4 from __future__
import absolute_import
, unicode_literals
29 from string
import ascii_letters
34 compat_get_terminal_size
,
40 compat_tokenize_tokenize
,
42 compat_urllib_request
,
43 compat_urllib_request_DataHandler
,
70 PerRequestProxyHandler
,
75 register_socks_protocols
,
85 UnavailableVideoError
,
90 YoutubeDLCookieProcessor
,
93 from .cache
import Cache
94 from .extractor
import get_info_extractor
, gen_extractor_classes
, _LAZY_LOADER
95 from .extractor
.openload
import PhantomJSwrapper
96 from .downloader
import get_suitable_downloader
97 from .downloader
.rtmp
import rtmpdump_version
98 from .postprocessor
import (
101 FFmpegFixupStretchedPP
,
106 from .version
import __version__
108 if compat_os_name
== 'nt':
112 class YoutubeDL(object):
115 YoutubeDL objects are the ones responsible of downloading the
116 actual video file and writing it to disk if the user has requested
117 it, among some other tasks. In most cases there should be one per
118 program. As, given a video URL, the downloader doesn't know how to
119 extract all the needed information, task that InfoExtractors do, it
120 has to pass the URL to one of them.
122 For this, YoutubeDL objects have a method that allows
123 InfoExtractors to be registered in a given order. When it is passed
124 a URL, the YoutubeDL object handles it to the first InfoExtractor it
125 finds that reports being able to handle it. The InfoExtractor extracts
126 all the information about the video or videos the URL refers to, and
127 YoutubeDL process the extracted information, possibly using a File
128 Downloader to download the video.
130 YoutubeDL objects accept a lot of parameters. In order not to saturate
131 the object constructor with arguments, it receives a dictionary of
132 options instead. These options are available through the params
133 attribute for the InfoExtractors to use. The YoutubeDL also
134 registers itself as the downloader in charge for the InfoExtractors
135 that are added to it, so this is a "mutual registration".
139 username: Username for authentication purposes.
140 password: Password for authentication purposes.
141 videopassword: Password for accessing a video.
142 ap_mso: Adobe Pass multiple-system operator identifier.
143 ap_username: Multiple-system operator account username.
144 ap_password: Multiple-system operator account password.
145 usenetrc: Use netrc for authentication instead.
146 verbose: Print additional info to stdout.
147 quiet: Do not print messages to stdout.
148 no_warnings: Do not print out anything for warnings.
149 forceurl: Force printing final URL.
150 forcetitle: Force printing title.
151 forceid: Force printing ID.
152 forcethumbnail: Force printing thumbnail URL.
153 forcedescription: Force printing description.
154 forcefilename: Force printing final filename.
155 forceduration: Force printing duration.
156 forcejson: Force printing info_dict as JSON.
157 dump_single_json: Force printing the info_dict of the whole playlist
158 (or video) as a single JSON line.
159 simulate: Do not download the video files.
160 format: Video format code. See options.py for more information.
161 outtmpl: Template for output names.
162 restrictfilenames: Do not allow "&" and spaces in file names
163 ignoreerrors: Do not stop on download errors.
164 force_generic_extractor: Force downloader to use the generic extractor
165 nooverwrites: Prevent overwriting files.
166 playliststart: Playlist item to start at.
167 playlistend: Playlist item to end at.
168 playlist_items: Specific indices of playlist to download.
169 playlistreverse: Download playlist items in reverse order.
170 playlistrandom: Download playlist items in random order.
171 matchtitle: Download only matching titles.
172 rejecttitle: Reject downloads for matching titles.
173 logger: Log messages to a logging.Logger instance.
174 logtostderr: Log messages to stderr instead of stdout.
175 writedescription: Write the video description to a .description file
176 writeinfojson: Write the video description to a .info.json file
177 writeannotations: Write the video annotations to a .annotations.xml file
178 writethumbnail: Write the thumbnail image to a file
179 write_all_thumbnails: Write all thumbnail formats to files
180 writesubtitles: Write the video subtitles to a file
181 writeautomaticsub: Write the automatically generated subtitles to a file
182 allsubtitles: Downloads all the subtitles of the video
183 (requires writesubtitles or writeautomaticsub)
184 listsubtitles: Lists all available subtitles for the video
185 subtitlesformat: The format code for subtitles
186 subtitleslangs: List of languages of the subtitles to download
187 keepvideo: Keep the video file after post-processing
188 daterange: A DateRange object, download only if the upload_date is in the range.
189 skip_download: Skip the actual download of the video file
190 cachedir: Location of the cache files in the filesystem.
191 False to disable filesystem cache.
192 noplaylist: Download single video instead of a playlist if in doubt.
193 age_limit: An integer representing the user's age in years.
194 Unsuitable videos for the given age are skipped.
195 min_views: An integer representing the minimum view count the video
196 must have in order to not be skipped.
197 Videos without view count information are always
198 downloaded. None for no limit.
199 max_views: An integer representing the maximum view count.
200 Videos that are more popular than that are not
202 Videos without view count information are always
203 downloaded. None for no limit.
204 download_archive: File name of a file where all downloads are recorded.
205 Videos already present in the file are not downloaded
207 cookiefile: File name where cookies should be read from and dumped to.
208 nocheckcertificate:Do not verify SSL certificates
209 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
210 At the moment, this is only supported by YouTube.
211 proxy: URL of the proxy server to use
212 geo_verification_proxy: URL of the proxy to use for IP address verification
213 on geo-restricted sites. (Experimental)
214 socket_timeout: Time to wait for unresponsive hosts, in seconds
215 bidi_workaround: Work around buggy terminals without bidirectional text
216 support, using fridibi
217 debug_printtraffic:Print out sent and received HTTP traffic
218 include_ads: Download ads as well
219 default_search: Prepend this string if an input url is not valid.
220 'auto' for elaborate guessing
221 encoding: Use this encoding instead of the system-specified.
222 extract_flat: Do not resolve URLs, return the immediate result.
223 Pass in 'in_playlist' to only show this behavior for
225 postprocessors: A list of dictionaries, each with an entry
226 * key: The name of the postprocessor. See
227 youtube_dl/postprocessor/__init__.py for a list.
228 as well as any further keyword arguments for the
230 progress_hooks: A list of functions that get called on download
231 progress, with a dictionary with the entries
232 * status: One of "downloading", "error", or "finished".
233 Check this first and ignore unknown values.
235 If status is one of "downloading", or "finished", the
236 following properties may also be present:
237 * filename: The final filename (always present)
238 * tmpfilename: The filename we're currently writing to
239 * downloaded_bytes: Bytes on disk
240 * total_bytes: Size of the whole file, None if unknown
241 * total_bytes_estimate: Guess of the eventual file size,
243 * elapsed: The number of seconds since download started.
244 * eta: The estimated time in seconds, None if unknown
245 * speed: The download speed in bytes/second, None if
247 * fragment_index: The counter of the currently
248 downloaded video fragment.
249 * fragment_count: The number of fragments (= individual
250 files that will be merged)
252 Progress hooks are guaranteed to be called at least once
253 (with status "finished") if the download is successful.
254 merge_output_format: Extension to use when merging formats.
255 fixup: Automatically correct known faults of the file.
257 - "never": do nothing
258 - "warn": only emit a warning
259 - "detect_or_warn": check whether we can do anything
260 about it, warn otherwise (default)
261 source_address: (Experimental) Client-side IP address to bind to.
262 call_home: Boolean, true iff we are allowed to contact the
263 youtube-dl servers for debugging.
264 sleep_interval: Number of seconds to sleep before each download when
265 used alone or a lower bound of a range for randomized
266 sleep before each download (minimum possible number
267 of seconds to sleep) when used along with
269 max_sleep_interval:Upper bound of a range for randomized sleep before each
270 download (maximum possible number of seconds to sleep).
271 Must only be used along with sleep_interval.
272 Actual sleep time will be a random float from range
273 [sleep_interval; max_sleep_interval].
274 listformats: Print an overview of available video formats and exit.
275 list_thumbnails: Print a table of all thumbnails and exit.
276 match_filter: A function that gets called with the info_dict of
278 If it returns a message, the video is ignored.
279 If it returns None, the video is downloaded.
280 match_filter_func in utils.py is one example for this.
281 no_color: Do not emit color codes in output.
282 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
283 HTTP header (experimental)
285 Two-letter ISO 3166-2 country code that will be used for
286 explicit geographic restriction bypassing via faking
287 X-Forwarded-For HTTP header (experimental)
289 The following options determine which downloader is picked:
290 external_downloader: Executable of the external downloader to call.
291 None or unset for standard (built-in) downloader.
292 hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv
293 if True, otherwise use ffmpeg/avconv if False, otherwise
294 use downloader suggested by extractor if None.
296 The following parameters are not used by YoutubeDL itself, they are used by
297 the downloader (see youtube_dl/downloader/common.py):
298 nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
299 noresizebuffer, retries, continuedl, noprogress, consoletitle,
300 xattr_set_filesize, external_downloader_args, hls_use_mpegts.
302 The following options are used by the post processors:
303 prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available,
304 otherwise prefer avconv.
305 postprocessor_args: A list of additional command-line arguments for the
309 _NUMERIC_FIELDS
= set((
310 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
311 'timestamp', 'upload_year', 'upload_month', 'upload_day',
312 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
313 'average_rating', 'comment_count', 'age_limit',
314 'start_time', 'end_time',
315 'chapter_number', 'season_number', 'episode_number',
316 'track_number', 'disc_number', 'release_year',
323 _download_retcode
= None
324 _num_downloads
= None
327 def __init__(self
, params
=None, auto_init
=True):
328 """Create a FileDownloader object with the given options."""
332 self
._ies
_instances
= {}
334 self
._progress
_hooks
= []
335 self
._download
_retcode
= 0
336 self
._num
_downloads
= 0
337 self
._screen
_file
= [sys
.stdout
, sys
.stderr
][params
.get('logtostderr', False)]
338 self
._err
_file
= sys
.stderr
341 'nocheckcertificate': False,
343 self
.params
.update(params
)
344 self
.cache
= Cache(self
)
346 def check_deprecated(param
, option
, suggestion
):
347 if self
.params
.get(param
) is not None:
349 '%s is deprecated. Use %s instead.' % (option
, suggestion
))
353 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
354 if self
.params
.get('geo_verification_proxy') is None:
355 self
.params
['geo_verification_proxy'] = self
.params
['cn_verification_proxy']
357 check_deprecated('autonumber_size', '--autonumber-size', 'output template with %(autonumber)0Nd, where N in the number of digits')
358 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
359 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
361 if params
.get('bidi_workaround', False):
364 master
, slave
= pty
.openpty()
365 width
= compat_get_terminal_size().columns
369 width_args
= ['-w', str(width
)]
371 stdin
=subprocess
.PIPE
,
373 stderr
=self
._err
_file
)
375 self
._output
_process
= subprocess
.Popen(
376 ['bidiv'] + width_args
, **sp_kwargs
379 self
._output
_process
= subprocess
.Popen(
380 ['fribidi', '-c', 'UTF-8'] + width_args
, **sp_kwargs
)
381 self
._output
_channel
= os
.fdopen(master
, 'rb')
382 except OSError as ose
:
383 if ose
.errno
== errno
.ENOENT
:
384 self
.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
388 if (sys
.platform
!= 'win32' and
389 sys
.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968'] and
390 not params
.get('restrictfilenames', False)):
391 # Unicode filesystem API will throw errors (#1474, #13027)
393 'Assuming --restrict-filenames since file system encoding '
394 'cannot encode all characters. '
395 'Set the LC_ALL environment variable to fix this.')
396 self
.params
['restrictfilenames'] = True
398 if isinstance(params
.get('outtmpl'), bytes):
400 'Parameter outtmpl is bytes, but should be a unicode string. '
401 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
406 self
.print_debug_header()
407 self
.add_default_info_extractors()
409 for pp_def_raw
in self
.params
.get('postprocessors', []):
410 pp_class
= get_postprocessor(pp_def_raw
['key'])
411 pp_def
= dict(pp_def_raw
)
413 pp
= pp_class(self
, **compat_kwargs(pp_def
))
414 self
.add_post_processor(pp
)
416 for ph
in self
.params
.get('progress_hooks', []):
417 self
.add_progress_hook(ph
)
419 register_socks_protocols()
421 def warn_if_short_id(self
, argv
):
422 # short YouTube ID starting with dash?
424 i
for i
, a
in enumerate(argv
)
425 if re
.match(r
'^-[0-9A-Za-z_-]{10}$', a
)]
429 [a
for i
, a
in enumerate(argv
) if i
not in idxs
] +
430 ['--'] + [argv
[i
] for i
in idxs
]
433 'Long argument string detected. '
434 'Use -- to separate parameters and URLs, like this:\n%s\n' %
435 args_to_str(correct_argv
))
437 def add_info_extractor(self
, ie
):
438 """Add an InfoExtractor object to the end of the list."""
440 if not isinstance(ie
, type):
441 self
._ies
_instances
[ie
.ie_key()] = ie
442 ie
.set_downloader(self
)
444 def get_info_extractor(self
, ie_key
):
446 Get an instance of an IE with name ie_key, it will try to get one from
447 the _ies list, if there's no instance it will create a new one and add
448 it to the extractor list.
450 ie
= self
._ies
_instances
.get(ie_key
)
452 ie
= get_info_extractor(ie_key
)()
453 self
.add_info_extractor(ie
)
456 def add_default_info_extractors(self
):
458 Add the InfoExtractors returned by gen_extractors to the end of the list
460 for ie
in gen_extractor_classes():
461 self
.add_info_extractor(ie
)
463 def add_post_processor(self
, pp
):
464 """Add a PostProcessor object to the end of the chain."""
466 pp
.set_downloader(self
)
468 def add_progress_hook(self
, ph
):
469 """Add the progress hook (currently only for the file downloader)"""
470 self
._progress
_hooks
.append(ph
)
472 def _bidi_workaround(self
, message
):
473 if not hasattr(self
, '_output_channel'):
476 assert hasattr(self
, '_output_process')
477 assert isinstance(message
, compat_str
)
478 line_count
= message
.count('\n') + 1
479 self
._output
_process
.stdin
.write((message
+ '\n').encode('utf-8'))
480 self
._output
_process
.stdin
.flush()
481 res
= ''.join(self
._output
_channel
.readline().decode('utf-8')
482 for _
in range(line_count
))
483 return res
[:-len('\n')]
485 def to_screen(self
, message
, skip_eol
=False):
486 """Print message to stdout if not in quiet mode."""
487 return self
.to_stdout(message
, skip_eol
, check_quiet
=True)
489 def _write_string(self
, s
, out
=None):
490 write_string(s
, out
=out
, encoding
=self
.params
.get('encoding'))
492 def to_stdout(self
, message
, skip_eol
=False, check_quiet
=False):
493 """Print message to stdout if not in quiet mode."""
494 if self
.params
.get('logger'):
495 self
.params
['logger'].debug(message
)
496 elif not check_quiet
or not self
.params
.get('quiet', False):
497 message
= self
._bidi
_workaround
(message
)
498 terminator
= ['\n', ''][skip_eol
]
499 output
= message
+ terminator
501 self
._write
_string
(output
, self
._screen
_file
)
503 def to_stderr(self
, message
):
504 """Print message to stderr."""
505 assert isinstance(message
, compat_str
)
506 if self
.params
.get('logger'):
507 self
.params
['logger'].error(message
)
509 message
= self
._bidi
_workaround
(message
)
510 output
= message
+ '\n'
511 self
._write
_string
(output
, self
._err
_file
)
513 def to_console_title(self
, message
):
514 if not self
.params
.get('consoletitle', False):
516 if compat_os_name
== 'nt':
517 if ctypes
.windll
.kernel32
.GetConsoleWindow():
518 # c_wchar_p() might not be necessary if `message` is
519 # already of type unicode()
520 ctypes
.windll
.kernel32
.SetConsoleTitleW(ctypes
.c_wchar_p(message
))
521 elif 'TERM' in os
.environ
:
522 self
._write
_string
('\033]0;%s\007' % message
, self
._screen
_file
)
524 def save_console_title(self
):
525 if not self
.params
.get('consoletitle', False):
527 if compat_os_name
!= 'nt' and 'TERM' in os
.environ
:
528 # Save the title on stack
529 self
._write
_string
('\033[22;0t', self
._screen
_file
)
531 def restore_console_title(self
):
532 if not self
.params
.get('consoletitle', False):
534 if compat_os_name
!= 'nt' and 'TERM' in os
.environ
:
535 # Restore the title from stack
536 self
._write
_string
('\033[23;0t', self
._screen
_file
)
539 self
.save_console_title()
542 def __exit__(self
, *args
):
543 self
.restore_console_title()
545 if self
.params
.get('cookiefile') is not None:
546 self
.cookiejar
.save()
548 def trouble(self
, message
=None, tb
=None):
549 """Determine action to take when a download problem appears.
551 Depending on if the downloader has been configured to ignore
552 download errors or not, this method may throw an exception or
553 not when errors are found, after printing the message.
555 tb, if given, is additional traceback information.
557 if message
is not None:
558 self
.to_stderr(message
)
559 if self
.params
.get('verbose'):
561 if sys
.exc_info()[0]: # if .trouble has been called from an except block
563 if hasattr(sys
.exc_info()[1], 'exc_info') and sys
.exc_info()[1].exc_info
[0]:
564 tb
+= ''.join(traceback
.format_exception(*sys
.exc_info()[1].exc_info
))
565 tb
+= encode_compat_str(traceback
.format_exc())
567 tb_data
= traceback
.format_list(traceback
.extract_stack())
568 tb
= ''.join(tb_data
)
570 if not self
.params
.get('ignoreerrors', False):
571 if sys
.exc_info()[0] and hasattr(sys
.exc_info()[1], 'exc_info') and sys
.exc_info()[1].exc_info
[0]:
572 exc_info
= sys
.exc_info()[1].exc_info
574 exc_info
= sys
.exc_info()
575 raise DownloadError(message
, exc_info
)
576 self
._download
_retcode
= 1
578 def report_warning(self
, message
):
580 Print the message to stderr, it will be prefixed with 'WARNING:'
581 If stderr is a tty file the 'WARNING:' will be colored
583 if self
.params
.get('logger') is not None:
584 self
.params
['logger'].warning(message
)
586 if self
.params
.get('no_warnings'):
588 if not self
.params
.get('no_color') and self
._err
_file
.isatty() and compat_os_name
!= 'nt':
589 _msg_header
= '\033[0;33mWARNING:\033[0m'
591 _msg_header
= 'WARNING:'
592 warning_message
= '%s %s' % (_msg_header
, message
)
593 self
.to_stderr(warning_message
)
595 def report_error(self
, message
, tb
=None):
597 Do the same as trouble, but prefixes the message with 'ERROR:', colored
598 in red if stderr is a tty file.
600 if not self
.params
.get('no_color') and self
._err
_file
.isatty() and compat_os_name
!= 'nt':
601 _msg_header
= '\033[0;31mERROR:\033[0m'
603 _msg_header
= 'ERROR:'
604 error_message
= '%s %s' % (_msg_header
, message
)
605 self
.trouble(error_message
, tb
)
607 def report_file_already_downloaded(self
, file_name
):
608 """Report file has already been fully downloaded."""
610 self
.to_screen('[download] %s has already been downloaded' % file_name
)
611 except UnicodeEncodeError:
612 self
.to_screen('[download] The file has already been downloaded')
614 def prepare_filename(self
, info_dict
):
615 """Generate the output filename."""
617 template_dict
= dict(info_dict
)
619 template_dict
['epoch'] = int(time
.time())
620 autonumber_size
= self
.params
.get('autonumber_size')
621 if autonumber_size
is None:
623 template_dict
['autonumber'] = self
.params
.get('autonumber_start', 1) - 1 + self
._num
_downloads
624 if template_dict
.get('resolution') is None:
625 if template_dict
.get('width') and template_dict
.get('height'):
626 template_dict
['resolution'] = '%dx%d' % (template_dict
['width'], template_dict
['height'])
627 elif template_dict
.get('height'):
628 template_dict
['resolution'] = '%sp' % template_dict
['height']
629 elif template_dict
.get('width'):
630 template_dict
['resolution'] = '%dx?' % template_dict
['width']
632 sanitize
= lambda k
, v
: sanitize_filename(
634 restricted
=self
.params
.get('restrictfilenames'),
635 is_id
=(k
== 'id' or k
.endswith('_id')))
636 template_dict
= dict((k
, v
if isinstance(v
, compat_numeric_types
) else sanitize(k
, v
))
637 for k
, v
in template_dict
.items()
638 if v
is not None and not isinstance(v
, (list, tuple, dict)))
639 template_dict
= collections
.defaultdict(lambda: 'NA', template_dict
)
641 outtmpl
= self
.params
.get('outtmpl', DEFAULT_OUTTMPL
)
643 # For fields playlist_index and autonumber convert all occurrences
644 # of %(field)s to %(field)0Nd for backward compatibility
645 field_size_compat_map
= {
646 'playlist_index': len(str(template_dict
['n_entries'])),
647 'autonumber': autonumber_size
,
649 FIELD_SIZE_COMPAT_RE
= r
'(?<!%)%\((?P<field>autonumber|playlist_index)\)s'
650 mobj
= re
.search(FIELD_SIZE_COMPAT_RE
, outtmpl
)
653 FIELD_SIZE_COMPAT_RE
,
654 r
'%%(\1)0%dd' % field_size_compat_map
[mobj
.group('field')],
657 # Missing numeric fields used together with integer presentation types
658 # in format specification will break the argument substitution since
659 # string 'NA' is returned for missing fields. We will patch output
660 # template for missing fields to meet string presentation type.
661 for numeric_field
in self
._NUMERIC
_FIELDS
:
662 if numeric_field
not in template_dict
:
663 # As of [1] format syntax is:
664 # %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
665 # 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
669 \({0}\) # mapping key
670 (?:[#0\-+ ]+)? # conversion flags (optional)
671 (?:\d+)? # minimum field width (optional)
672 (?:\.\d+)? # precision (optional)
673 [hlL]? # length modifier (optional)
674 [diouxXeEfFgGcrs%] # conversion type
677 FORMAT_RE
.format(numeric_field
),
678 r
'%({0})s'.format(numeric_field
), outtmpl
)
680 # expand_path translates '%%' into '%' and '$$' into '$'
681 # correspondingly that is not what we want since we need to keep
682 # '%%' intact for template dict substitution step. Working around
683 # with boundary-alike separator hack.
684 sep
= ''.join([random
.choice(ascii_letters
) for _
in range(32)])
685 outtmpl
= outtmpl
.replace('%%', '%{0}%'.format(sep
)).replace('$$', '${0}$'.format(sep
))
687 # outtmpl should be expand_path'ed before template dict substitution
688 # because meta fields may contain env variables we don't want to
689 # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
690 # title "Hello $PATH", we don't want `$PATH` to be expanded.
691 filename
= expand_path(outtmpl
).replace(sep
, '') % template_dict
693 # Temporary fix for #4787
694 # 'Treat' all problem characters by passing filename through preferredencoding
695 # to workaround encoding issues with subprocess on python2 @ Windows
696 if sys
.version_info
< (3, 0) and sys
.platform
== 'win32':
697 filename
= encodeFilename(filename
, True).decode(preferredencoding())
698 return sanitize_path(filename
)
699 except ValueError as err
:
700 self
.report_error('Error in output template: ' + str(err
) + ' (encoding: ' + repr(preferredencoding()) + ')')
703 def _match_entry(self
, info_dict
, incomplete
):
704 """ Returns None iff the file should be downloaded """
706 video_title
= info_dict
.get('title', info_dict
.get('id', 'video'))
707 if 'title' in info_dict
:
708 # This can happen when we're just evaluating the playlist
709 title
= info_dict
['title']
710 matchtitle
= self
.params
.get('matchtitle', False)
712 if not re
.search(matchtitle
, title
, re
.IGNORECASE
):
713 return '"' + title
+ '" title did not match pattern "' + matchtitle
+ '"'
714 rejecttitle
= self
.params
.get('rejecttitle', False)
716 if re
.search(rejecttitle
, title
, re
.IGNORECASE
):
717 return '"' + title
+ '" title matched reject pattern "' + rejecttitle
+ '"'
718 date
= info_dict
.get('upload_date')
720 dateRange
= self
.params
.get('daterange', DateRange())
721 if date
not in dateRange
:
722 return '%s upload date is not in range %s' % (date_from_str(date
).isoformat(), dateRange
)
723 view_count
= info_dict
.get('view_count')
724 if view_count
is not None:
725 min_views
= self
.params
.get('min_views')
726 if min_views
is not None and view_count
< min_views
:
727 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title
, view_count
, min_views
)
728 max_views
= self
.params
.get('max_views')
729 if max_views
is not None and view_count
> max_views
:
730 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title
, view_count
, max_views
)
731 if age_restricted(info_dict
.get('age_limit'), self
.params
.get('age_limit')):
732 return 'Skipping "%s" because it is age restricted' % video_title
733 if self
.in_download_archive(info_dict
):
734 return '%s has already been recorded in archive' % video_title
737 match_filter
= self
.params
.get('match_filter')
738 if match_filter
is not None:
739 ret
= match_filter(info_dict
)
746 def add_extra_info(info_dict
, extra_info
):
747 '''Set the keys from extra_info in info dict if they are missing'''
748 for key
, value
in extra_info
.items():
749 info_dict
.setdefault(key
, value
)
751 def extract_info(self
, url
, download
=True, ie_key
=None, extra_info
={},
752 process
=True, force_generic_extractor
=False):
754 Returns a list with a dictionary for each video we find.
755 If 'download', also downloads the videos.
756 extra_info is a dict containing the extra values to add to each result
759 if not ie_key
and force_generic_extractor
:
763 ies
= [self
.get_info_extractor(ie_key
)]
768 if not ie
.suitable(url
):
771 ie
= self
.get_info_extractor(ie
.ie_key())
773 self
.report_warning('The program functionality for this site has been marked as broken, '
774 'and will probably not work.')
777 ie_result
= ie
.extract(url
)
778 if ie_result
is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
780 if isinstance(ie_result
, list):
781 # Backwards compatibility: old IE result format
783 '_type': 'compat_list',
784 'entries': ie_result
,
786 self
.add_default_extra_info(ie_result
, ie
, url
)
788 return self
.process_ie_result(ie_result
, download
, extra_info
)
791 except GeoRestrictedError
as e
:
794 msg
+= '\nThis video is available in %s.' % ', '.join(
795 map(ISO3166Utils
.short2full
, e
.countries
))
796 msg
+= '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
797 self
.report_error(msg
)
799 except ExtractorError
as e
: # An error we somewhat expected
800 self
.report_error(compat_str(e
), e
.format_traceback())
802 except MaxDownloadsReached
:
804 except Exception as e
:
805 if self
.params
.get('ignoreerrors', False):
806 self
.report_error(error_to_compat_str(e
), tb
=encode_compat_str(traceback
.format_exc()))
811 self
.report_error('no suitable InfoExtractor for URL %s' % url
)
813 def add_default_extra_info(self
, ie_result
, ie
, url
):
814 self
.add_extra_info(ie_result
, {
815 'extractor': ie
.IE_NAME
,
817 'webpage_url_basename': url_basename(url
),
818 'extractor_key': ie
.ie_key(),
821 def process_ie_result(self
, ie_result
, download
=True, extra_info
={}):
823 Take the result of the ie(may be modified) and resolve all unresolved
824 references (URLs, playlist items).
826 It will also download the videos if 'download'.
827 Returns the resolved ie_result.
829 result_type
= ie_result
.get('_type', 'video')
831 if result_type
in ('url', 'url_transparent'):
832 ie_result
['url'] = sanitize_url(ie_result
['url'])
833 extract_flat
= self
.params
.get('extract_flat', False)
834 if ((extract_flat
== 'in_playlist' and 'playlist' in extra_info
) or
835 extract_flat
is True):
836 if self
.params
.get('forcejson', False):
837 self
.to_stdout(json
.dumps(ie_result
))
840 if result_type
== 'video':
841 self
.add_extra_info(ie_result
, extra_info
)
842 return self
.process_video_result(ie_result
, download
=download
)
843 elif result_type
== 'url':
844 # We have to add extra_info to the results because it may be
845 # contained in a playlist
846 return self
.extract_info(ie_result
['url'],
848 ie_key
=ie_result
.get('ie_key'),
849 extra_info
=extra_info
)
850 elif result_type
== 'url_transparent':
851 # Use the information from the embedding page
852 info
= self
.extract_info(
853 ie_result
['url'], ie_key
=ie_result
.get('ie_key'),
854 extra_info
=extra_info
, download
=False, process
=False)
856 # extract_info may return None when ignoreerrors is enabled and
857 # extraction failed with an error, don't crash and return early
862 force_properties
= dict(
863 (k
, v
) for k
, v
in ie_result
.items() if v
is not None)
864 for f
in ('_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'):
865 if f
in force_properties
:
866 del force_properties
[f
]
867 new_result
= info
.copy()
868 new_result
.update(force_properties
)
870 # Extracted info may not be a video result (i.e.
871 # info.get('_type', 'video') != video) but rather an url or
872 # url_transparent. In such cases outer metadata (from ie_result)
873 # should be propagated to inner one (info). For this to happen
874 # _type of info should be overridden with url_transparent. This
875 # fixes issue from https://github.com/rg3/youtube-dl/pull/11163.
876 if new_result
.get('_type') == 'url':
877 new_result
['_type'] = 'url_transparent'
879 return self
.process_ie_result(
880 new_result
, download
=download
, extra_info
=extra_info
)
881 elif result_type
in ('playlist', 'multi_video'):
882 # We process each entry in the playlist
883 playlist
= ie_result
.get('title') or ie_result
.get('id')
884 self
.to_screen('[download] Downloading playlist: %s' % playlist
)
886 playlist_results
= []
888 playliststart
= self
.params
.get('playliststart', 1) - 1
889 playlistend
= self
.params
.get('playlistend')
890 # For backwards compatibility, interpret -1 as whole list
891 if playlistend
== -1:
894 playlistitems_str
= self
.params
.get('playlist_items')
896 if playlistitems_str
is not None:
897 def iter_playlistitems(format
):
898 for string_segment
in format
.split(','):
899 if '-' in string_segment
:
900 start
, end
= string_segment
.split('-')
901 for item
in range(int(start
), int(end
) + 1):
904 yield int(string_segment
)
905 playlistitems
= iter_playlistitems(playlistitems_str
)
907 ie_entries
= ie_result
['entries']
908 if isinstance(ie_entries
, list):
909 n_all_entries
= len(ie_entries
)
912 ie_entries
[i
- 1] for i
in playlistitems
913 if -n_all_entries
<= i
- 1 < n_all_entries
]
915 entries
= ie_entries
[playliststart
:playlistend
]
916 n_entries
= len(entries
)
918 '[%s] playlist %s: Collected %d video ids (downloading %d of them)' %
919 (ie_result
['extractor'], playlist
, n_all_entries
, n_entries
))
920 elif isinstance(ie_entries
, PagedList
):
923 for item
in playlistitems
:
924 entries
.extend(ie_entries
.getslice(
928 entries
= ie_entries
.getslice(
929 playliststart
, playlistend
)
930 n_entries
= len(entries
)
932 '[%s] playlist %s: Downloading %d videos' %
933 (ie_result
['extractor'], playlist
, n_entries
))
936 entry_list
= list(ie_entries
)
937 entries
= [entry_list
[i
- 1] for i
in playlistitems
]
939 entries
= list(itertools
.islice(
940 ie_entries
, playliststart
, playlistend
))
941 n_entries
= len(entries
)
943 '[%s] playlist %s: Downloading %d videos' %
944 (ie_result
['extractor'], playlist
, n_entries
))
946 if self
.params
.get('playlistreverse', False):
947 entries
= entries
[::-1]
949 if self
.params
.get('playlistrandom', False):
950 random
.shuffle(entries
)
952 x_forwarded_for
= ie_result
.get('__x_forwarded_for_ip')
954 for i
, entry
in enumerate(entries
, 1):
955 self
.to_screen('[download] Downloading video %s of %s' % (i
, n_entries
))
956 # This __x_forwarded_for_ip thing is a bit ugly but requires
959 entry
['__x_forwarded_for_ip'] = x_forwarded_for
961 'n_entries': n_entries
,
962 'playlist': playlist
,
963 'playlist_id': ie_result
.get('id'),
964 'playlist_title': ie_result
.get('title'),
965 'playlist_index': i
+ playliststart
,
966 'extractor': ie_result
['extractor'],
967 'webpage_url': ie_result
['webpage_url'],
968 'webpage_url_basename': url_basename(ie_result
['webpage_url']),
969 'extractor_key': ie_result
['extractor_key'],
972 reason
= self
._match
_entry
(entry
, incomplete
=True)
973 if reason
is not None:
974 self
.to_screen('[download] ' + reason
)
977 entry_result
= self
.process_ie_result(entry
,
980 playlist_results
.append(entry_result
)
981 ie_result
['entries'] = playlist_results
982 self
.to_screen('[download] Finished downloading playlist: %s' % playlist
)
984 elif result_type
== 'compat_list':
986 'Extractor %s returned a compat_list result. '
987 'It needs to be updated.' % ie_result
.get('extractor'))
993 'extractor': ie_result
['extractor'],
994 'webpage_url': ie_result
['webpage_url'],
995 'webpage_url_basename': url_basename(ie_result
['webpage_url']),
996 'extractor_key': ie_result
['extractor_key'],
1000 ie_result
['entries'] = [
1001 self
.process_ie_result(_fixup(r
), download
, extra_info
)
1002 for r
in ie_result
['entries']
1006 raise Exception('Invalid result type: %s' % result_type
)
1008 def _build_format_filter(self
, filter_spec
):
1009 " Returns a function to filter the formats according to the filter_spec "
1019 operator_rex
= re
.compile(r
'''(?x)\s*
1020 (?P<key>width|height|tbr|abr|vbr|asr|filesize|fps)
1021 \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1022 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)
1024 ''' % '|'.join(map(re
.escape
, OPERATORS
.keys())))
1025 m
= operator_rex
.search(filter_spec
)
1028 comparison_value
= int(m
.group('value'))
1030 comparison_value
= parse_filesize(m
.group('value'))
1031 if comparison_value
is None:
1032 comparison_value
= parse_filesize(m
.group('value') + 'B')
1033 if comparison_value
is None:
1035 'Invalid value %r in format specification %r' % (
1036 m
.group('value'), filter_spec
))
1037 op
= OPERATORS
[m
.group('op')]
1043 '^=': lambda attr
, value
: attr
.startswith(value
),
1044 '$=': lambda attr
, value
: attr
.endswith(value
),
1045 '*=': lambda attr
, value
: value
in attr
,
1047 str_operator_rex
= re
.compile(r
'''(?x)
1048 \s*(?P<key>ext|acodec|vcodec|container|protocol|format_id)
1049 \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?
1050 \s*(?P<value>[a-zA-Z0-9._-]+)
1052 ''' % '|'.join(map(re
.escape
, STR_OPERATORS
.keys())))
1053 m
= str_operator_rex
.search(filter_spec
)
1055 comparison_value
= m
.group('value')
1056 op
= STR_OPERATORS
[m
.group('op')]
1059 raise ValueError('Invalid filter specification %r' % filter_spec
)
1062 actual_value
= f
.get(m
.group('key'))
1063 if actual_value
is None:
1064 return m
.group('none_inclusive')
1065 return op(actual_value
, comparison_value
)
1068 def _default_format_spec(self
, info_dict
, download
=True):
1069 req_format_list
= []
1071 def can_have_partial_formats():
1072 if self
.params
.get('simulate', False):
1076 if self
.params
.get('outtmpl', DEFAULT_OUTTMPL
) == '-':
1078 if info_dict
.get('is_live'):
1080 merger
= FFmpegMergerPP(self
)
1081 return merger
.available
and merger
.can_merge()
1082 if can_have_partial_formats():
1083 req_format_list
.append('bestvideo+bestaudio')
1084 req_format_list
.append('best')
1085 return '/'.join(req_format_list
)
1087 def build_format_selector(self
, format_spec
):
1088 def syntax_error(note
, start
):
1090 'Invalid format specification: '
1091 '{0}\n\t{1}\n\t{2}^'.format(note
, format_spec
, ' ' * start
[1]))
1092 return SyntaxError(message
)
1094 PICKFIRST
= 'PICKFIRST'
1098 FormatSelector
= collections
.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1100 def _parse_filter(tokens
):
1102 for type, string
, start
, _
, _
in tokens
:
1103 if type == tokenize
.OP
and string
== ']':
1104 return ''.join(filter_parts
)
1106 filter_parts
.append(string
)
1108 def _remove_unused_ops(tokens
):
1109 # Remove operators that we don't use and join them with the surrounding strings
1110 # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
1111 ALLOWED_OPS
= ('/', '+', ',', '(', ')')
1112 last_string
, last_start
, last_end
, last_line
= None, None, None, None
1113 for type, string
, start
, end
, line
in tokens
:
1114 if type == tokenize
.OP
and string
== '[':
1116 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
1118 yield type, string
, start
, end
, line
1119 # everything inside brackets will be handled by _parse_filter
1120 for type, string
, start
, end
, line
in tokens
:
1121 yield type, string
, start
, end
, line
1122 if type == tokenize
.OP
and string
== ']':
1124 elif type == tokenize
.OP
and string
in ALLOWED_OPS
:
1126 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
1128 yield type, string
, start
, end
, line
1129 elif type in [tokenize
.NAME
, tokenize
.NUMBER
, tokenize
.OP
]:
1131 last_string
= string
1135 last_string
+= string
1137 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
1139 def _parse_format_selection(tokens
, inside_merge
=False, inside_choice
=False, inside_group
=False):
1141 current_selector
= None
1142 for type, string
, start
, _
, _
in tokens
:
1143 # ENCODING is only defined in python 3.x
1144 if type == getattr(tokenize
, 'ENCODING', None):
1146 elif type in [tokenize
.NAME
, tokenize
.NUMBER
]:
1147 current_selector
= FormatSelector(SINGLE
, string
, [])
1148 elif type == tokenize
.OP
:
1150 if not inside_group
:
1151 # ')' will be handled by the parentheses group
1152 tokens
.restore_last_token()
1154 elif inside_merge
and string
in ['/', ',']:
1155 tokens
.restore_last_token()
1157 elif inside_choice
and string
== ',':
1158 tokens
.restore_last_token()
1161 if not current_selector
:
1162 raise syntax_error('"," must follow a format selector', start
)
1163 selectors
.append(current_selector
)
1164 current_selector
= None
1166 if not current_selector
:
1167 raise syntax_error('"/" must follow a format selector', start
)
1168 first_choice
= current_selector
1169 second_choice
= _parse_format_selection(tokens
, inside_choice
=True)
1170 current_selector
= FormatSelector(PICKFIRST
, (first_choice
, second_choice
), [])
1172 if not current_selector
:
1173 current_selector
= FormatSelector(SINGLE
, 'best', [])
1174 format_filter
= _parse_filter(tokens
)
1175 current_selector
.filters
.append(format_filter
)
1177 if current_selector
:
1178 raise syntax_error('Unexpected "("', start
)
1179 group
= _parse_format_selection(tokens
, inside_group
=True)
1180 current_selector
= FormatSelector(GROUP
, group
, [])
1182 video_selector
= current_selector
1183 audio_selector
= _parse_format_selection(tokens
, inside_merge
=True)
1184 if not video_selector
or not audio_selector
:
1185 raise syntax_error('"+" must be between two format selectors', start
)
1186 current_selector
= FormatSelector(MERGE
, (video_selector
, audio_selector
), [])
1188 raise syntax_error('Operator not recognized: "{0}"'.format(string
), start
)
1189 elif type == tokenize
.ENDMARKER
:
1191 if current_selector
:
1192 selectors
.append(current_selector
)
1195 def _build_selector_function(selector
):
1196 if isinstance(selector
, list):
1197 fs
= [_build_selector_function(s
) for s
in selector
]
1199 def selector_function(ctx
):
1201 for format
in f(ctx
):
1203 return selector_function
1204 elif selector
.type == GROUP
:
1205 selector_function
= _build_selector_function(selector
.selector
)
1206 elif selector
.type == PICKFIRST
:
1207 fs
= [_build_selector_function(s
) for s
in selector
.selector
]
1209 def selector_function(ctx
):
1211 picked_formats
= list(f(ctx
))
1213 return picked_formats
1215 elif selector
.type == SINGLE
:
1216 format_spec
= selector
.selector
1218 def selector_function(ctx
):
1219 formats
= list(ctx
['formats'])
1222 if format_spec
== 'all':
1225 elif format_spec
in ['best', 'worst', None]:
1226 format_idx
= 0 if format_spec
== 'worst' else -1
1227 audiovideo_formats
= [
1229 if f
.get('vcodec') != 'none' and f
.get('acodec') != 'none']
1230 if audiovideo_formats
:
1231 yield audiovideo_formats
[format_idx
]
1232 # for extractors with incomplete formats (audio only (soundcloud)
1233 # or video only (imgur)) we will fallback to best/worst
1234 # {video,audio}-only format
1235 elif ctx
['incomplete_formats']:
1236 yield formats
[format_idx
]
1237 elif format_spec
== 'bestaudio':
1240 if f
.get('vcodec') == 'none']
1242 yield audio_formats
[-1]
1243 elif format_spec
== 'worstaudio':
1246 if f
.get('vcodec') == 'none']
1248 yield audio_formats
[0]
1249 elif format_spec
== 'bestvideo':
1252 if f
.get('acodec') == 'none']
1254 yield video_formats
[-1]
1255 elif format_spec
== 'worstvideo':
1258 if f
.get('acodec') == 'none']
1260 yield video_formats
[0]
1262 extensions
= ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav']
1263 if format_spec
in extensions
:
1264 filter_f
= lambda f
: f
['ext'] == format_spec
1266 filter_f
= lambda f
: f
['format_id'] == format_spec
1267 matches
= list(filter(filter_f
, formats
))
1270 elif selector
.type == MERGE
:
1271 def _merge(formats_info
):
1272 format_1
, format_2
= [f
['format_id'] for f
in formats_info
]
1273 # The first format must contain the video and the
1275 if formats_info
[0].get('vcodec') == 'none':
1276 self
.report_error('The first format must '
1277 'contain the video, try using '
1278 '"-f %s+%s"' % (format_2
, format_1
))
1280 # Formats must be opposite (video+audio)
1281 if formats_info
[0].get('acodec') == 'none' and formats_info
[1].get('acodec') == 'none':
1283 'Both formats %s and %s are video-only, you must specify "-f video+audio"'
1284 % (format_1
, format_2
))
1287 formats_info
[0]['ext']
1288 if self
.params
.get('merge_output_format') is None
1289 else self
.params
['merge_output_format'])
1291 'requested_formats': formats_info
,
1292 'format': '%s+%s' % (formats_info
[0].get('format'),
1293 formats_info
[1].get('format')),
1294 'format_id': '%s+%s' % (formats_info
[0].get('format_id'),
1295 formats_info
[1].get('format_id')),
1296 'width': formats_info
[0].get('width'),
1297 'height': formats_info
[0].get('height'),
1298 'resolution': formats_info
[0].get('resolution'),
1299 'fps': formats_info
[0].get('fps'),
1300 'vcodec': formats_info
[0].get('vcodec'),
1301 'vbr': formats_info
[0].get('vbr'),
1302 'stretched_ratio': formats_info
[0].get('stretched_ratio'),
1303 'acodec': formats_info
[1].get('acodec'),
1304 'abr': formats_info
[1].get('abr'),
1307 video_selector
, audio_selector
= map(_build_selector_function
, selector
.selector
)
1309 def selector_function(ctx
):
1310 for pair
in itertools
.product(
1311 video_selector(copy
.deepcopy(ctx
)), audio_selector(copy
.deepcopy(ctx
))):
1314 filters
= [self
._build
_format
_filter
(f
) for f
in selector
.filters
]
1316 def final_selector(ctx
):
1317 ctx_copy
= copy
.deepcopy(ctx
)
1318 for _filter
in filters
:
1319 ctx_copy
['formats'] = list(filter(_filter
, ctx_copy
['formats']))
1320 return selector_function(ctx_copy
)
1321 return final_selector
1323 stream
= io
.BytesIO(format_spec
.encode('utf-8'))
1325 tokens
= list(_remove_unused_ops(compat_tokenize_tokenize(stream
.readline
)))
1326 except tokenize
.TokenError
:
1327 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec
)))
1329 class TokenIterator(object):
1330 def __init__(self
, tokens
):
1331 self
.tokens
= tokens
1338 if self
.counter
>= len(self
.tokens
):
1339 raise StopIteration()
1340 value
= self
.tokens
[self
.counter
]
1346 def restore_last_token(self
):
1349 parsed_selector
= _parse_format_selection(iter(TokenIterator(tokens
)))
1350 return _build_selector_function(parsed_selector
)
1352 def _calc_headers(self
, info_dict
):
1353 res
= std_headers
.copy()
1355 add_headers
= info_dict
.get('http_headers')
1357 res
.update(add_headers
)
1359 cookies
= self
._calc
_cookies
(info_dict
)
1361 res
['Cookie'] = cookies
1363 if 'X-Forwarded-For' not in res
:
1364 x_forwarded_for_ip
= info_dict
.get('__x_forwarded_for_ip')
1365 if x_forwarded_for_ip
:
1366 res
['X-Forwarded-For'] = x_forwarded_for_ip
1370 def _calc_cookies(self
, info_dict
):
1371 pr
= sanitized_Request(info_dict
['url'])
1372 self
.cookiejar
.add_cookie_header(pr
)
1373 return pr
.get_header('Cookie')
1375 def process_video_result(self
, info_dict
, download
=True):
1376 assert info_dict
.get('_type', 'video') == 'video'
1378 if 'id' not in info_dict
:
1379 raise ExtractorError('Missing "id" field in extractor result')
1380 if 'title' not in info_dict
:
1381 raise ExtractorError('Missing "title" field in extractor result')
1383 def report_force_conversion(field
, field_not
, conversion
):
1384 self
.report_warning(
1385 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
1386 % (field
, field_not
, conversion
))
1388 def sanitize_string_field(info
, string_field
):
1389 field
= info
.get(string_field
)
1390 if field
is None or isinstance(field
, compat_str
):
1392 report_force_conversion(string_field
, 'a string', 'string')
1393 info
[string_field
] = compat_str(field
)
1395 def sanitize_numeric_fields(info
):
1396 for numeric_field
in self
._NUMERIC
_FIELDS
:
1397 field
= info
.get(numeric_field
)
1398 if field
is None or isinstance(field
, compat_numeric_types
):
1400 report_force_conversion(numeric_field
, 'numeric', 'int')
1401 info
[numeric_field
] = int_or_none(field
)
1403 sanitize_string_field(info_dict
, 'id')
1404 sanitize_numeric_fields(info_dict
)
1406 if 'playlist' not in info_dict
:
1407 # It isn't part of a playlist
1408 info_dict
['playlist'] = None
1409 info_dict
['playlist_index'] = None
1411 thumbnails
= info_dict
.get('thumbnails')
1412 if thumbnails
is None:
1413 thumbnail
= info_dict
.get('thumbnail')
1415 info_dict
['thumbnails'] = thumbnails
= [{'url': thumbnail
}]
1417 thumbnails
.sort(key
=lambda t
: (
1418 t
.get('preference') if t
.get('preference') is not None else -1,
1419 t
.get('width') if t
.get('width') is not None else -1,
1420 t
.get('height') if t
.get('height') is not None else -1,
1421 t
.get('id') if t
.get('id') is not None else '', t
.get('url')))
1422 for i
, t
in enumerate(thumbnails
):
1423 t
['url'] = sanitize_url(t
['url'])
1424 if t
.get('width') and t
.get('height'):
1425 t
['resolution'] = '%dx%d' % (t
['width'], t
['height'])
1426 if t
.get('id') is None:
1429 if self
.params
.get('list_thumbnails'):
1430 self
.list_thumbnails(info_dict
)
1433 thumbnail
= info_dict
.get('thumbnail')
1435 info_dict
['thumbnail'] = sanitize_url(thumbnail
)
1437 info_dict
['thumbnail'] = thumbnails
[-1]['url']
1439 if 'display_id' not in info_dict
and 'id' in info_dict
:
1440 info_dict
['display_id'] = info_dict
['id']
1442 if info_dict
.get('upload_date') is None and info_dict
.get('timestamp') is not None:
1443 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
1444 # see http://bugs.python.org/issue1646728)
1446 upload_date
= datetime
.datetime
.utcfromtimestamp(info_dict
['timestamp'])
1447 info_dict
['upload_date'] = upload_date
.strftime('%Y%m%d')
1448 except (ValueError, OverflowError, OSError):
1451 # Auto generate title fields corresponding to the *_number fields when missing
1452 # in order to always have clean titles. This is very common for TV series.
1453 for field
in ('chapter', 'season', 'episode'):
1454 if info_dict
.get('%s_number' % field
) is not None and not info_dict
.get(field
):
1455 info_dict
[field
] = '%s %d' % (field
.capitalize(), info_dict
['%s_number' % field
])
1457 subtitles
= info_dict
.get('subtitles')
1459 for _
, subtitle
in subtitles
.items():
1460 for subtitle_format
in subtitle
:
1461 if subtitle_format
.get('url'):
1462 subtitle_format
['url'] = sanitize_url(subtitle_format
['url'])
1463 if subtitle_format
.get('ext') is None:
1464 subtitle_format
['ext'] = determine_ext(subtitle_format
['url']).lower()
1466 if self
.params
.get('listsubtitles', False):
1467 if 'automatic_captions' in info_dict
:
1468 self
.list_subtitles(info_dict
['id'], info_dict
.get('automatic_captions'), 'automatic captions')
1469 self
.list_subtitles(info_dict
['id'], subtitles
, 'subtitles')
1471 info_dict
['requested_subtitles'] = self
.process_subtitles(
1472 info_dict
['id'], subtitles
,
1473 info_dict
.get('automatic_captions'))
1475 # We now pick which formats have to be downloaded
1476 if info_dict
.get('formats') is None:
1477 # There's only one format available
1478 formats
= [info_dict
]
1480 formats
= info_dict
['formats']
1483 raise ExtractorError('No video formats found!')
1485 def is_wellformed(f
):
1488 self
.report_warning(
1489 '"url" field is missing or empty - skipping format, '
1490 'there is an error in extractor')
1492 if isinstance(url
, bytes):
1493 sanitize_string_field(f
, 'url')
1496 # Filter out malformed formats for better extraction robustness
1497 formats
= list(filter(is_wellformed
, formats
))
1501 # We check that all the formats have the format and format_id fields
1502 for i
, format
in enumerate(formats
):
1503 sanitize_string_field(format
, 'format_id')
1504 sanitize_numeric_fields(format
)
1505 format
['url'] = sanitize_url(format
['url'])
1506 if not format
.get('format_id'):
1507 format
['format_id'] = compat_str(i
)
1509 # Sanitize format_id from characters used in format selector expression
1510 format
['format_id'] = re
.sub(r
'[\s,/+\[\]()]', '_', format
['format_id'])
1511 format_id
= format
['format_id']
1512 if format_id
not in formats_dict
:
1513 formats_dict
[format_id
] = []
1514 formats_dict
[format_id
].append(format
)
1516 # Make sure all formats have unique format_id
1517 for format_id
, ambiguous_formats
in formats_dict
.items():
1518 if len(ambiguous_formats
) > 1:
1519 for i
, format
in enumerate(ambiguous_formats
):
1520 format
['format_id'] = '%s-%d' % (format_id
, i
)
1522 for i
, format
in enumerate(formats
):
1523 if format
.get('format') is None:
1524 format
['format'] = '{id} - {res}{note}'.format(
1525 id=format
['format_id'],
1526 res
=self
.format_resolution(format
),
1527 note
=' ({0})'.format(format
['format_note']) if format
.get('format_note') is not None else '',
1529 # Automatically determine file extension if missing
1530 if format
.get('ext') is None:
1531 format
['ext'] = determine_ext(format
['url']).lower()
1532 # Automatically determine protocol if missing (useful for format
1533 # selection purposes)
1534 if format
.get('protocol') is None:
1535 format
['protocol'] = determine_protocol(format
)
1536 # Add HTTP headers, so that external programs can use them from the
1538 full_format_info
= info_dict
.copy()
1539 full_format_info
.update(format
)
1540 format
['http_headers'] = self
._calc
_headers
(full_format_info
)
1541 # Remove private housekeeping stuff
1542 if '__x_forwarded_for_ip' in info_dict
:
1543 del info_dict
['__x_forwarded_for_ip']
1545 # TODO Central sorting goes here
1547 if formats
[0] is not info_dict
:
1548 # only set the 'formats' fields if the original info_dict list them
1549 # otherwise we end up with a circular reference, the first (and unique)
1550 # element in the 'formats' field in info_dict is info_dict itself,
1551 # which can't be exported to json
1552 info_dict
['formats'] = formats
1553 if self
.params
.get('listformats'):
1554 self
.list_formats(info_dict
)
1557 req_format
= self
.params
.get('format')
1558 if req_format
is None:
1559 req_format
= self
._default
_format
_spec
(info_dict
, download
=download
)
1560 if self
.params
.get('verbose'):
1561 self
.to_stdout('[debug] Default format spec: %s' % req_format
)
1563 format_selector
= self
.build_format_selector(req_format
)
1565 # While in format selection we may need to have an access to the original
1566 # format set in order to calculate some metrics or do some processing.
1567 # For now we need to be able to guess whether original formats provided
1568 # by extractor are incomplete or not (i.e. whether extractor provides only
1569 # video-only or audio-only formats) for proper formats selection for
1570 # extractors with such incomplete formats (see
1571 # https://github.com/rg3/youtube-dl/pull/5556).
1572 # Since formats may be filtered during format selection and may not match
1573 # the original formats the results may be incorrect. Thus original formats
1574 # or pre-calculated metrics should be passed to format selection routines
1576 # We will pass a context object containing all necessary additional data
1577 # instead of just formats.
1578 # This fixes incorrect format selection issue (see
1579 # https://github.com/rg3/youtube-dl/issues/10083).
1580 incomplete_formats
= (
1581 # All formats are video-only or
1582 all(f
.get('vcodec') != 'none' and f
.get('acodec') == 'none' for f
in formats
) or
1583 # all formats are audio-only
1584 all(f
.get('vcodec') == 'none' and f
.get('acodec') != 'none' for f
in formats
))
1588 'incomplete_formats': incomplete_formats
,
1591 formats_to_download
= list(format_selector(ctx
))
1592 if not formats_to_download
:
1593 raise ExtractorError('requested format not available',
1597 if len(formats_to_download
) > 1:
1598 self
.to_screen('[info] %s: downloading video in %s formats' % (info_dict
['id'], len(formats_to_download
)))
1599 for format
in formats_to_download
:
1600 new_info
= dict(info_dict
)
1601 new_info
.update(format
)
1602 self
.process_info(new_info
)
1603 # We update the info dict with the best quality format (backwards compatibility)
1604 info_dict
.update(formats_to_download
[-1])
1607 def process_subtitles(self
, video_id
, normal_subtitles
, automatic_captions
):
1608 """Select the requested subtitles and their format"""
1610 if normal_subtitles
and self
.params
.get('writesubtitles'):
1611 available_subs
.update(normal_subtitles
)
1612 if automatic_captions
and self
.params
.get('writeautomaticsub'):
1613 for lang
, cap_info
in automatic_captions
.items():
1614 if lang
not in available_subs
:
1615 available_subs
[lang
] = cap_info
1617 if (not self
.params
.get('writesubtitles') and not
1618 self
.params
.get('writeautomaticsub') or not
1622 if self
.params
.get('allsubtitles', False):
1623 requested_langs
= available_subs
.keys()
1625 if self
.params
.get('subtitleslangs', False):
1626 requested_langs
= self
.params
.get('subtitleslangs')
1627 elif 'en' in available_subs
:
1628 requested_langs
= ['en']
1630 requested_langs
= [list(available_subs
.keys())[0]]
1632 formats_query
= self
.params
.get('subtitlesformat', 'best')
1633 formats_preference
= formats_query
.split('/') if formats_query
else []
1635 for lang
in requested_langs
:
1636 formats
= available_subs
.get(lang
)
1638 self
.report_warning('%s subtitles not available for %s' % (lang
, video_id
))
1640 for ext
in formats_preference
:
1644 matches
= list(filter(lambda f
: f
['ext'] == ext
, formats
))
1650 self
.report_warning(
1651 'No subtitle format found matching "%s" for language %s, '
1652 'using %s' % (formats_query
, lang
, f
['ext']))
1656 def process_info(self
, info_dict
):
1657 """Process a single resolved IE result."""
1659 assert info_dict
.get('_type', 'video') == 'video'
1661 max_downloads
= self
.params
.get('max_downloads')
1662 if max_downloads
is not None:
1663 if self
._num
_downloads
>= int(max_downloads
):
1664 raise MaxDownloadsReached()
1666 info_dict
['fulltitle'] = info_dict
['title']
1667 if len(info_dict
['title']) > 200:
1668 info_dict
['title'] = info_dict
['title'][:197] + '...'
1670 if 'format' not in info_dict
:
1671 info_dict
['format'] = info_dict
['ext']
1673 reason
= self
._match
_entry
(info_dict
, incomplete
=False)
1674 if reason
is not None:
1675 self
.to_screen('[download] ' + reason
)
1678 self
._num
_downloads
+= 1
1680 info_dict
['_filename'] = filename
= self
.prepare_filename(info_dict
)
1683 if self
.params
.get('forcetitle', False):
1684 self
.to_stdout(info_dict
['fulltitle'])
1685 if self
.params
.get('forceid', False):
1686 self
.to_stdout(info_dict
['id'])
1687 if self
.params
.get('forceurl', False):
1688 if info_dict
.get('requested_formats') is not None:
1689 for f
in info_dict
['requested_formats']:
1690 self
.to_stdout(f
['url'] + f
.get('play_path', ''))
1692 # For RTMP URLs, also include the playpath
1693 self
.to_stdout(info_dict
['url'] + info_dict
.get('play_path', ''))
1694 if self
.params
.get('forcethumbnail', False) and info_dict
.get('thumbnail') is not None:
1695 self
.to_stdout(info_dict
['thumbnail'])
1696 if self
.params
.get('forcedescription', False) and info_dict
.get('description') is not None:
1697 self
.to_stdout(info_dict
['description'])
1698 if self
.params
.get('forcefilename', False) and filename
is not None:
1699 self
.to_stdout(filename
)
1700 if self
.params
.get('forceduration', False) and info_dict
.get('duration') is not None:
1701 self
.to_stdout(formatSeconds(info_dict
['duration']))
1702 if self
.params
.get('forceformat', False):
1703 self
.to_stdout(info_dict
['format'])
1704 if self
.params
.get('forcejson', False):
1705 self
.to_stdout(json
.dumps(info_dict
))
1707 # Do nothing else if in simulate mode
1708 if self
.params
.get('simulate', False):
1711 if filename
is None:
1714 def ensure_dir_exists(path
):
1716 dn
= os
.path
.dirname(path
)
1717 if dn
and not os
.path
.exists(dn
):
1720 except (OSError, IOError) as err
:
1721 self
.report_error('unable to create directory ' + error_to_compat_str(err
))
1724 if not ensure_dir_exists(sanitize_path(encodeFilename(filename
))):
1727 if self
.params
.get('writedescription', False):
1728 descfn
= replace_extension(filename
, 'description', info_dict
.get('ext'))
1729 if self
.params
.get('nooverwrites', False) and os
.path
.exists(encodeFilename(descfn
)):
1730 self
.to_screen('[info] Video description is already present')
1731 elif info_dict
.get('description') is None:
1732 self
.report_warning('There\'s no description to write.')
1735 self
.to_screen('[info] Writing video description to: ' + descfn
)
1736 with io
.open(encodeFilename(descfn
), 'w', encoding
='utf-8') as descfile
:
1737 descfile
.write(info_dict
['description'])
1738 except (OSError, IOError):
1739 self
.report_error('Cannot write description file ' + descfn
)
1742 if self
.params
.get('writeannotations', False):
1743 annofn
= replace_extension(filename
, 'annotations.xml', info_dict
.get('ext'))
1744 if self
.params
.get('nooverwrites', False) and os
.path
.exists(encodeFilename(annofn
)):
1745 self
.to_screen('[info] Video annotations are already present')
1748 self
.to_screen('[info] Writing video annotations to: ' + annofn
)
1749 with io
.open(encodeFilename(annofn
), 'w', encoding
='utf-8') as annofile
:
1750 annofile
.write(info_dict
['annotations'])
1751 except (KeyError, TypeError):
1752 self
.report_warning('There are no annotations to write.')
1753 except (OSError, IOError):
1754 self
.report_error('Cannot write annotations file: ' + annofn
)
1757 subtitles_are_requested
= any([self
.params
.get('writesubtitles', False),
1758 self
.params
.get('writeautomaticsub')])
1760 if subtitles_are_requested
and info_dict
.get('requested_subtitles'):
1761 # subtitles download errors are already managed as troubles in relevant IE
1762 # that way it will silently go on when used with unsupporting IE
1763 subtitles
= info_dict
['requested_subtitles']
1764 ie
= self
.get_info_extractor(info_dict
['extractor_key'])
1765 for sub_lang
, sub_info
in subtitles
.items():
1766 sub_format
= sub_info
['ext']
1767 sub_filename
= subtitles_filename(filename
, sub_lang
, sub_format
)
1768 if self
.params
.get('nooverwrites', False) and os
.path
.exists(encodeFilename(sub_filename
)):
1769 self
.to_screen('[info] Video subtitle %s.%s is already present' % (sub_lang
, sub_format
))
1771 self
.to_screen('[info] Writing video subtitles to: ' + sub_filename
)
1772 if sub_info
.get('data') is not None:
1774 # Use newline='' to prevent conversion of newline characters
1775 # See https://github.com/rg3/youtube-dl/issues/10268
1776 with io
.open(encodeFilename(sub_filename
), 'w', encoding
='utf-8', newline
='') as subfile
:
1777 subfile
.write(sub_info
['data'])
1778 except (OSError, IOError):
1779 self
.report_error('Cannot write subtitles file ' + sub_filename
)
1783 sub_data
= ie
._request
_webpage
(
1784 sub_info
['url'], info_dict
['id'], note
=False).read()
1785 with io
.open(encodeFilename(sub_filename
), 'wb') as subfile
:
1786 subfile
.write(sub_data
)
1787 except (ExtractorError
, IOError, OSError, ValueError) as err
:
1788 self
.report_warning('Unable to download subtitle for "%s": %s' %
1789 (sub_lang
, error_to_compat_str(err
)))
1792 if self
.params
.get('writeinfojson', False):
1793 infofn
= replace_extension(filename
, 'info.json', info_dict
.get('ext'))
1794 if self
.params
.get('nooverwrites', False) and os
.path
.exists(encodeFilename(infofn
)):
1795 self
.to_screen('[info] Video description metadata is already present')
1797 self
.to_screen('[info] Writing video description metadata as JSON to: ' + infofn
)
1799 write_json_file(self
.filter_requested_info(info_dict
), infofn
)
1800 except (OSError, IOError):
1801 self
.report_error('Cannot write metadata to JSON file ' + infofn
)
1804 self
._write
_thumbnails
(info_dict
, filename
)
1806 if not self
.params
.get('skip_download', False):
1809 fd
= get_suitable_downloader(info
, self
.params
)(self
, self
.params
)
1810 for ph
in self
._progress
_hooks
:
1811 fd
.add_progress_hook(ph
)
1812 if self
.params
.get('verbose'):
1813 self
.to_stdout('[debug] Invoking downloader on %r' % info
.get('url'))
1814 return fd
.download(name
, info
)
1816 if info_dict
.get('requested_formats') is not None:
1819 merger
= FFmpegMergerPP(self
)
1820 if not merger
.available
:
1822 self
.report_warning('You have requested multiple '
1823 'formats but ffmpeg or avconv are not installed.'
1824 ' The formats won\'t be merged.')
1826 postprocessors
= [merger
]
1828 def compatible_formats(formats
):
1829 video
, audio
= formats
1831 video_ext
, audio_ext
= audio
.get('ext'), video
.get('ext')
1832 if video_ext
and audio_ext
:
1834 ('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'),
1837 for exts
in COMPATIBLE_EXTS
:
1838 if video_ext
in exts
and audio_ext
in exts
:
1840 # TODO: Check acodec/vcodec
1843 filename_real_ext
= os
.path
.splitext(filename
)[1][1:]
1845 os
.path
.splitext(filename
)[0]
1846 if filename_real_ext
== info_dict
['ext']
1848 requested_formats
= info_dict
['requested_formats']
1849 if self
.params
.get('merge_output_format') is None and not compatible_formats(requested_formats
):
1850 info_dict
['ext'] = 'mkv'
1851 self
.report_warning(
1852 'Requested formats are incompatible for merge and will be merged into mkv.')
1853 # Ensure filename always has a correct extension for successful merge
1854 filename
= '%s.%s' % (filename_wo_ext
, info_dict
['ext'])
1855 if os
.path
.exists(encodeFilename(filename
)):
1857 '[download] %s has already been downloaded and '
1858 'merged' % filename
)
1860 for f
in requested_formats
:
1861 new_info
= dict(info_dict
)
1863 fname
= prepend_extension(
1864 self
.prepare_filename(new_info
),
1865 'f%s' % f
['format_id'], new_info
['ext'])
1866 if not ensure_dir_exists(fname
):
1868 downloaded
.append(fname
)
1869 partial_success
= dl(fname
, new_info
)
1870 success
= success
and partial_success
1871 info_dict
['__postprocessors'] = postprocessors
1872 info_dict
['__files_to_merge'] = downloaded
1874 # Just a single file
1875 success
= dl(filename
, info_dict
)
1876 except (compat_urllib_error
.URLError
, compat_http_client
.HTTPException
, socket
.error
) as err
:
1877 self
.report_error('unable to download video data: %s' % error_to_compat_str(err
))
1879 except (OSError, IOError) as err
:
1880 raise UnavailableVideoError(err
)
1881 except (ContentTooShortError
, ) as err
:
1882 self
.report_error('content too short (expected %s bytes and served %s)' % (err
.expected
, err
.downloaded
))
1885 if success
and filename
!= '-':
1887 fixup_policy
= self
.params
.get('fixup')
1888 if fixup_policy
is None:
1889 fixup_policy
= 'detect_or_warn'
1891 INSTALL_FFMPEG_MESSAGE
= 'Install ffmpeg or avconv to fix this automatically.'
1893 stretched_ratio
= info_dict
.get('stretched_ratio')
1894 if stretched_ratio
is not None and stretched_ratio
!= 1:
1895 if fixup_policy
== 'warn':
1896 self
.report_warning('%s: Non-uniform pixel ratio (%s)' % (
1897 info_dict
['id'], stretched_ratio
))
1898 elif fixup_policy
== 'detect_or_warn':
1899 stretched_pp
= FFmpegFixupStretchedPP(self
)
1900 if stretched_pp
.available
:
1901 info_dict
.setdefault('__postprocessors', [])
1902 info_dict
['__postprocessors'].append(stretched_pp
)
1904 self
.report_warning(
1905 '%s: Non-uniform pixel ratio (%s). %s'
1906 % (info_dict
['id'], stretched_ratio
, INSTALL_FFMPEG_MESSAGE
))
1908 assert fixup_policy
in ('ignore', 'never')
1910 if (info_dict
.get('requested_formats') is None and
1911 info_dict
.get('container') == 'm4a_dash'):
1912 if fixup_policy
== 'warn':
1913 self
.report_warning(
1914 '%s: writing DASH m4a. '
1915 'Only some players support this container.'
1917 elif fixup_policy
== 'detect_or_warn':
1918 fixup_pp
= FFmpegFixupM4aPP(self
)
1919 if fixup_pp
.available
:
1920 info_dict
.setdefault('__postprocessors', [])
1921 info_dict
['__postprocessors'].append(fixup_pp
)
1923 self
.report_warning(
1924 '%s: writing DASH m4a. '
1925 'Only some players support this container. %s'
1926 % (info_dict
['id'], INSTALL_FFMPEG_MESSAGE
))
1928 assert fixup_policy
in ('ignore', 'never')
1930 if (info_dict
.get('protocol') == 'm3u8_native' or
1931 info_dict
.get('protocol') == 'm3u8' and
1932 self
.params
.get('hls_prefer_native')):
1933 if fixup_policy
== 'warn':
1934 self
.report_warning('%s: malformed AAC bitstream detected.' % (
1936 elif fixup_policy
== 'detect_or_warn':
1937 fixup_pp
= FFmpegFixupM3u8PP(self
)
1938 if fixup_pp
.available
:
1939 info_dict
.setdefault('__postprocessors', [])
1940 info_dict
['__postprocessors'].append(fixup_pp
)
1942 self
.report_warning(
1943 '%s: malformed AAC bitstream detected. %s'
1944 % (info_dict
['id'], INSTALL_FFMPEG_MESSAGE
))
1946 assert fixup_policy
in ('ignore', 'never')
1949 self
.post_process(filename
, info_dict
)
1950 except (PostProcessingError
) as err
:
1951 self
.report_error('postprocessing: %s' % str(err
))
1953 self
.record_download_archive(info_dict
)
1955 def download(self
, url_list
):
1956 """Download a given list of URLs."""
1957 outtmpl
= self
.params
.get('outtmpl', DEFAULT_OUTTMPL
)
1958 if (len(url_list
) > 1 and
1960 '%' not in outtmpl
and
1961 self
.params
.get('max_downloads') != 1):
1962 raise SameFileError(outtmpl
)
1964 for url
in url_list
:
1966 # It also downloads the videos
1967 res
= self
.extract_info(
1968 url
, force_generic_extractor
=self
.params
.get('force_generic_extractor', False))
1969 except UnavailableVideoError
:
1970 self
.report_error('unable to download video')
1971 except MaxDownloadsReached
:
1972 self
.to_screen('[info] Maximum number of downloaded files reached.')
1975 if self
.params
.get('dump_single_json', False):
1976 self
.to_stdout(json
.dumps(res
))
1978 return self
._download
_retcode
1980 def download_with_info_file(self
, info_filename
):
1981 with contextlib
.closing(fileinput
.FileInput(
1982 [info_filename
], mode
='r',
1983 openhook
=fileinput
.hook_encoded('utf-8'))) as f
:
1984 # FileInput doesn't have a read method, we can't call json.load
1985 info
= self
.filter_requested_info(json
.loads('\n'.join(f
)))
1987 self
.process_ie_result(info
, download
=True)
1988 except DownloadError
:
1989 webpage_url
= info
.get('webpage_url')
1990 if webpage_url
is not None:
1991 self
.report_warning('The info failed to download, trying with "%s"' % webpage_url
)
1992 return self
.download([webpage_url
])
1995 return self
._download
_retcode
1998 def filter_requested_info(info_dict
):
2000 (k
, v
) for k
, v
in info_dict
.items()
2001 if k
not in ['requested_formats', 'requested_subtitles'])
2003 def post_process(self
, filename
, ie_info
):
2004 """Run all the postprocessors on the given file."""
2005 info
= dict(ie_info
)
2006 info
['filepath'] = filename
2008 if ie_info
.get('__postprocessors') is not None:
2009 pps_chain
.extend(ie_info
['__postprocessors'])
2010 pps_chain
.extend(self
._pps
)
2011 for pp
in pps_chain
:
2012 files_to_delete
= []
2014 files_to_delete
, info
= pp
.run(info
)
2015 except PostProcessingError
as e
:
2016 self
.report_error(e
.msg
)
2017 if files_to_delete
and not self
.params
.get('keepvideo', False):
2018 for old_filename
in files_to_delete
:
2019 self
.to_screen('Deleting original file %s (pass -k to keep)' % old_filename
)
2021 os
.remove(encodeFilename(old_filename
))
2022 except (IOError, OSError):
2023 self
.report_warning('Unable to remove downloaded original file')
2025 def _make_archive_id(self
, info_dict
):
2026 # Future-proof against any change in case
2027 # and backwards compatibility with prior versions
2028 extractor
= info_dict
.get('extractor_key')
2029 if extractor
is None:
2030 if 'id' in info_dict
:
2031 extractor
= info_dict
.get('ie_key') # key in a playlist
2032 if extractor
is None:
2033 return None # Incomplete video information
2034 return extractor
.lower() + ' ' + info_dict
['id']
2036 def in_download_archive(self
, info_dict
):
2037 fn
= self
.params
.get('download_archive')
2041 vid_id
= self
._make
_archive
_id
(info_dict
)
2043 return False # Incomplete video information
2046 with locked_file(fn
, 'r', encoding
='utf-8') as archive_file
:
2047 for line
in archive_file
:
2048 if line
.strip() == vid_id
:
2050 except IOError as ioe
:
2051 if ioe
.errno
!= errno
.ENOENT
:
2055 def record_download_archive(self
, info_dict
):
2056 fn
= self
.params
.get('download_archive')
2059 vid_id
= self
._make
_archive
_id
(info_dict
)
2061 with locked_file(fn
, 'a', encoding
='utf-8') as archive_file
:
2062 archive_file
.write(vid_id
+ '\n')
2065 def format_resolution(format
, default
='unknown'):
2066 if format
.get('vcodec') == 'none':
2068 if format
.get('resolution') is not None:
2069 return format
['resolution']
2070 if format
.get('height') is not None:
2071 if format
.get('width') is not None:
2072 res
= '%sx%s' % (format
['width'], format
['height'])
2074 res
= '%sp' % format
['height']
2075 elif format
.get('width') is not None:
2076 res
= '%dx?' % format
['width']
2081 def _format_note(self
, fdict
):
2083 if fdict
.get('ext') in ['f4f', 'f4m']:
2084 res
+= '(unsupported) '
2085 if fdict
.get('language'):
2088 res
+= '[%s] ' % fdict
['language']
2089 if fdict
.get('format_note') is not None:
2090 res
+= fdict
['format_note'] + ' '
2091 if fdict
.get('tbr') is not None:
2092 res
+= '%4dk ' % fdict
['tbr']
2093 if fdict
.get('container') is not None:
2096 res
+= '%s container' % fdict
['container']
2097 if (fdict
.get('vcodec') is not None and
2098 fdict
.get('vcodec') != 'none'):
2101 res
+= fdict
['vcodec']
2102 if fdict
.get('vbr') is not None:
2104 elif fdict
.get('vbr') is not None and fdict
.get('abr') is not None:
2106 if fdict
.get('vbr') is not None:
2107 res
+= '%4dk' % fdict
['vbr']
2108 if fdict
.get('fps') is not None:
2111 res
+= '%sfps' % fdict
['fps']
2112 if fdict
.get('acodec') is not None:
2115 if fdict
['acodec'] == 'none':
2118 res
+= '%-5s' % fdict
['acodec']
2119 elif fdict
.get('abr') is not None:
2123 if fdict
.get('abr') is not None:
2124 res
+= '@%3dk' % fdict
['abr']
2125 if fdict
.get('asr') is not None:
2126 res
+= ' (%5dHz)' % fdict
['asr']
2127 if fdict
.get('filesize') is not None:
2130 res
+= format_bytes(fdict
['filesize'])
2131 elif fdict
.get('filesize_approx') is not None:
2134 res
+= '~' + format_bytes(fdict
['filesize_approx'])
2137 def list_formats(self
, info_dict
):
2138 formats
= info_dict
.get('formats', [info_dict
])
2140 [f
['format_id'], f
['ext'], self
.format_resolution(f
), self
._format
_note
(f
)]
2142 if f
.get('preference') is None or f
['preference'] >= -1000]
2143 if len(formats
) > 1:
2144 table
[-1][-1] += (' ' if table
[-1][-1] else '') + '(best)'
2146 header_line
= ['format code', 'extension', 'resolution', 'note']
2148 '[info] Available formats for %s:\n%s' %
2149 (info_dict
['id'], render_table(header_line
, table
)))
2151 def list_thumbnails(self
, info_dict
):
2152 thumbnails
= info_dict
.get('thumbnails')
2154 self
.to_screen('[info] No thumbnails present for %s' % info_dict
['id'])
2158 '[info] Thumbnails for %s:' % info_dict
['id'])
2159 self
.to_screen(render_table(
2160 ['ID', 'width', 'height', 'URL'],
2161 [[t
['id'], t
.get('width', 'unknown'), t
.get('height', 'unknown'), t
['url']] for t
in thumbnails
]))
2163 def list_subtitles(self
, video_id
, subtitles
, name
='subtitles'):
2165 self
.to_screen('%s has no %s' % (video_id
, name
))
2168 'Available %s for %s:' % (name
, video_id
))
2169 self
.to_screen(render_table(
2170 ['Language', 'formats'],
2171 [[lang
, ', '.join(f
['ext'] for f
in reversed(formats
))]
2172 for lang
, formats
in subtitles
.items()]))
2174 def urlopen(self
, req
):
2175 """ Start an HTTP download """
2176 if isinstance(req
, compat_basestring
):
2177 req
= sanitized_Request(req
)
2178 return self
._opener
.open(req
, timeout
=self
._socket
_timeout
)
2180 def print_debug_header(self
):
2181 if not self
.params
.get('verbose'):
2184 if type('') is not compat_str
:
2185 # Python 2.6 on SLES11 SP1 (https://github.com/rg3/youtube-dl/issues/3326)
2186 self
.report_warning(
2187 'Your Python is broken! Update to a newer and supported version')
2189 stdout_encoding
= getattr(
2190 sys
.stdout
, 'encoding', 'missing (%s)' % type(sys
.stdout
).__name
__)
2192 '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
2193 locale
.getpreferredencoding(),
2194 sys
.getfilesystemencoding(),
2196 self
.get_encoding()))
2197 write_string(encoding_str
, encoding
=None)
2199 self
._write
_string
('[debug] youtube-dl version ' + __version__
+ '\n')
2201 self
._write
_string
('[debug] Lazy loading extractors enabled' + '\n')
2203 sp
= subprocess
.Popen(
2204 ['git', 'rev-parse', '--short', 'HEAD'],
2205 stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
,
2206 cwd
=os
.path
.dirname(os
.path
.abspath(__file__
)))
2207 out
, err
= sp
.communicate()
2208 out
= out
.decode().strip()
2209 if re
.match('[0-9a-f]+', out
):
2210 self
._write
_string
('[debug] Git HEAD: ' + out
+ '\n')
2216 self
._write
_string
('[debug] Python version %s - %s\n' % (
2217 platform
.python_version(), platform_name()))
2219 exe_versions
= FFmpegPostProcessor
.get_versions(self
)
2220 exe_versions
['rtmpdump'] = rtmpdump_version()
2221 exe_versions
['phantomjs'] = PhantomJSwrapper
._version
()
2222 exe_str
= ', '.join(
2224 for exe
, v
in sorted(exe_versions
.items())
2229 self
._write
_string
('[debug] exe versions: %s\n' % exe_str
)
2232 for handler
in self
._opener
.handlers
:
2233 if hasattr(handler
, 'proxies'):
2234 proxy_map
.update(handler
.proxies
)
2235 self
._write
_string
('[debug] Proxy map: ' + compat_str(proxy_map
) + '\n')
2237 if self
.params
.get('call_home', False):
2238 ipaddr
= self
.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
2239 self
._write
_string
('[debug] Public IP address: %s\n' % ipaddr
)
2240 latest_version
= self
.urlopen(
2241 'https://yt-dl.org/latest/version').read().decode('utf-8')
2242 if version_tuple(latest_version
) > version_tuple(__version__
):
2243 self
.report_warning(
2244 'You are using an outdated version (newest version: %s)! '
2245 'See https://yt-dl.org/update if you need help updating.' %
2248 def _setup_opener(self
):
2249 timeout_val
= self
.params
.get('socket_timeout')
2250 self
._socket
_timeout
= 600 if timeout_val
is None else float(timeout_val
)
2252 opts_cookiefile
= self
.params
.get('cookiefile')
2253 opts_proxy
= self
.params
.get('proxy')
2255 if opts_cookiefile
is None:
2256 self
.cookiejar
= compat_cookiejar
.CookieJar()
2258 opts_cookiefile
= expand_path(opts_cookiefile
)
2259 self
.cookiejar
= compat_cookiejar
.MozillaCookieJar(
2261 if os
.access(opts_cookiefile
, os
.R_OK
):
2262 self
.cookiejar
.load()
2264 cookie_processor
= YoutubeDLCookieProcessor(self
.cookiejar
)
2265 if opts_proxy
is not None:
2266 if opts_proxy
== '':
2269 proxies
= {'http': opts_proxy
, 'https': opts_proxy
}
2271 proxies
= compat_urllib_request
.getproxies()
2272 # Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805)
2273 if 'http' in proxies
and 'https' not in proxies
:
2274 proxies
['https'] = proxies
['http']
2275 proxy_handler
= PerRequestProxyHandler(proxies
)
2277 debuglevel
= 1 if self
.params
.get('debug_printtraffic') else 0
2278 https_handler
= make_HTTPS_handler(self
.params
, debuglevel
=debuglevel
)
2279 ydlh
= YoutubeDLHandler(self
.params
, debuglevel
=debuglevel
)
2280 data_handler
= compat_urllib_request_DataHandler()
2282 # When passing our own FileHandler instance, build_opener won't add the
2283 # default FileHandler and allows us to disable the file protocol, which
2284 # can be used for malicious purposes (see
2285 # https://github.com/rg3/youtube-dl/issues/8227)
2286 file_handler
= compat_urllib_request
.FileHandler()
2288 def file_open(*args
, **kwargs
):
2289 raise compat_urllib_error
.URLError('file:// scheme is explicitly disabled in youtube-dl for security reasons')
2290 file_handler
.file_open
= file_open
2292 opener
= compat_urllib_request
.build_opener(
2293 proxy_handler
, https_handler
, cookie_processor
, ydlh
, data_handler
, file_handler
)
2295 # Delete the default user-agent header, which would otherwise apply in
2296 # cases where our custom HTTP handler doesn't come into play
2297 # (See https://github.com/rg3/youtube-dl/issues/1309 for details)
2298 opener
.addheaders
= []
2299 self
._opener
= opener
2301 def encode(self
, s
):
2302 if isinstance(s
, bytes):
2303 return s
# Already encoded
2306 return s
.encode(self
.get_encoding())
2307 except UnicodeEncodeError as err
:
2308 err
.reason
= err
.reason
+ '. Check your system encoding configuration or use the --encoding option.'
2311 def get_encoding(self
):
2312 encoding
= self
.params
.get('encoding')
2313 if encoding
is None:
2314 encoding
= preferredencoding()
2317 def _write_thumbnails(self
, info_dict
, filename
):
2318 if self
.params
.get('writethumbnail', False):
2319 thumbnails
= info_dict
.get('thumbnails')
2321 thumbnails
= [thumbnails
[-1]]
2322 elif self
.params
.get('write_all_thumbnails', False):
2323 thumbnails
= info_dict
.get('thumbnails')
2328 # No thumbnails present, so return immediately
2331 for t
in thumbnails
:
2332 thumb_ext
= determine_ext(t
['url'], 'jpg')
2333 suffix
= '_%s' % t
['id'] if len(thumbnails
) > 1 else ''
2334 thumb_display_id
= '%s ' % t
['id'] if len(thumbnails
) > 1 else ''
2335 t
['filename'] = thumb_filename
= os
.path
.splitext(filename
)[0] + suffix
+ '.' + thumb_ext
2337 if self
.params
.get('nooverwrites', False) and os
.path
.exists(encodeFilename(thumb_filename
)):
2338 self
.to_screen('[%s] %s: Thumbnail %sis already present' %
2339 (info_dict
['extractor'], info_dict
['id'], thumb_display_id
))
2341 self
.to_screen('[%s] %s: Downloading thumbnail %s...' %
2342 (info_dict
['extractor'], info_dict
['id'], thumb_display_id
))
2344 uf
= self
.urlopen(t
['url'])
2345 with open(encodeFilename(thumb_filename
), 'wb') as thumbf
:
2346 shutil
.copyfileobj(uf
, thumbf
)
2347 self
.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
2348 (info_dict
['extractor'], info_dict
['id'], thumb_display_id
, thumb_filename
))
2349 except (compat_urllib_error
.URLError
, compat_http_client
.HTTPException
, socket
.error
) as err
:
2350 self
.report_warning('Unable to download thumbnail "%s": %s' %
2351 (t
['url'], error_to_compat_str(err
)))