1 from __future__
import unicode_literals
14 from ..compat
import (
21 compat_urllib_parse_urlparse
,
24 compat_etree_fromstring
,
48 class InfoExtractor(object):
49 """Information Extractor class.
51 Information extractors are the classes that, given a URL, extract
52 information about the video (or videos) the URL refers to. This
53 information includes the real video URL, the video title, author and
54 others. The information is stored in a dictionary which is then
55 passed to the YoutubeDL. The YoutubeDL processes this
56 information possibly downloading the video to the file system, among
57 other possible outcomes.
59 The type field determines the type of the result.
60 By far the most common value (and the default if _type is missing) is
61 "video", which indicates a single video.
63 For a video, the dictionaries must include the following fields:
66 title: Video title, unescaped.
68 Additionally, it must contain either a formats entry or a url one:
70 formats: A list of dictionaries for each format available, ordered
71 from worst to best quality.
74 * url Mandatory. The URL of the video file
75 * ext Will be calculated from URL if missing
76 * format A human-readable description of the format
77 ("mp4 container with h264/opus").
78 Calculated from the format_id, width, height.
79 and format_note fields if missing.
80 * format_id A short description of the format
81 ("mp4_h264_opus" or "19").
82 Technically optional, but strongly recommended.
83 * format_note Additional info about the format
84 ("3D" or "DASH video")
85 * width Width of the video, if known
86 * height Height of the video, if known
87 * resolution Textual description of width and height
88 * tbr Average bitrate of audio and video in KBit/s
89 * abr Average audio bitrate in KBit/s
90 * acodec Name of the audio codec in use
91 * asr Audio sampling rate in Hertz
92 * vbr Average video bitrate in KBit/s
94 * vcodec Name of the video codec in use
95 * container Name of the container format
96 * filesize The number of bytes, if known in advance
97 * filesize_approx An estimate for the number of bytes
98 * player_url SWF Player URL (used for rtmpdump).
99 * protocol The protocol that will be used for the actual
100 download, lower-case.
101 "http", "https", "rtsp", "rtmp", "rtmpe",
102 "m3u8", or "m3u8_native".
103 * preference Order number of this format. If this field is
104 present and not None, the formats get sorted
105 by this field, regardless of all other values.
106 -1 for default (order by other properties),
107 -2 or smaller for less than default.
108 < -1000 to hide the format (if there is
109 another one which is strictly better)
110 * language_preference Is this in the correct requested
112 10 if it's what the URL is about,
113 -1 for default (don't know),
114 -10 otherwise, other values reserved for now.
115 * quality Order number of the video quality of this
116 format, irrespective of the file format.
117 -1 for default (order by other properties),
118 -2 or smaller for less than default.
119 * source_preference Order number for this video source
120 (quality takes higher priority)
121 -1 for default (order by other properties),
122 -2 or smaller for less than default.
123 * http_headers A dictionary of additional HTTP headers
124 to add to the request.
125 * stretched_ratio If given and not 1, indicates that the
126 video's pixels are not square.
127 width : height ratio as float.
128 * no_resume The server does not support resuming the
129 (HTTP or RTMP) download. Boolean.
131 url: Final video URL.
132 ext: Video filename extension.
133 format: The video format, defaults to ext (used for --get-format)
134 player_url: SWF Player URL (used for rtmpdump).
136 The following fields are optional:
138 alt_title: A secondary title of the video.
139 display_id An alternative identifier for the video, not necessarily
140 unique, but available before title. Typically, id is
141 something like "4234987", title "Dancing naked mole rats",
142 and display_id "dancing-naked-mole-rats"
143 thumbnails: A list of dictionaries, with the following entries:
144 * "id" (optional, string) - Thumbnail format ID
146 * "preference" (optional, int) - quality of the image
147 * "width" (optional, int)
148 * "height" (optional, int)
149 * "resolution" (optional, string "{width}x{height"},
151 thumbnail: Full URL to a video thumbnail image.
152 description: Full video description.
153 uploader: Full name of the video uploader.
154 creator: The main artist who created the video.
155 release_date: The date (YYYYMMDD) when the video was released.
156 timestamp: UNIX timestamp of the moment the video became available.
157 upload_date: Video upload date (YYYYMMDD).
158 If not explicitly set, calculated from timestamp.
159 uploader_id: Nickname or id of the video uploader.
160 location: Physical location where the video was filmed.
161 subtitles: The available subtitles as a dictionary in the format
162 {language: subformats}. "subformats" is a list sorted from
163 lower to higher preference, each element is a dictionary
164 with the "ext" entry and one of:
165 * "data": The subtitles file contents
166 * "url": A URL pointing to the subtitles file
167 "ext" will be calculated from URL if missing
168 automatic_captions: Like 'subtitles', used by the YoutubeIE for
169 automatically generated captions
170 duration: Length of the video in seconds, as an integer.
171 view_count: How many users have watched the video on the platform.
172 like_count: Number of positive ratings of the video
173 dislike_count: Number of negative ratings of the video
174 repost_count: Number of reposts of the video
175 average_rating: Average rating give by users, the scale used depends on the webpage
176 comment_count: Number of comments on the video
177 comments: A list of comments, each with one or more of the following
178 properties (all but one of text or html optional):
179 * "author" - human-readable name of the comment author
180 * "author_id" - user ID of the comment author
182 * "html" - Comment as HTML
183 * "text" - Plain text of the comment
184 * "timestamp" - UNIX timestamp of comment
185 * "parent" - ID of the comment this one is replying to.
186 Set to "root" to indicate that this is a
187 comment to the original video.
188 age_limit: Age restriction for the video, as an integer (years)
189 webpage_url: The URL to the video webpage, if given to youtube-dl it
190 should allow to get the same result again. (It will be set
191 by YoutubeDL if it's missing)
192 categories: A list of categories that the video falls in, for example
194 tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
195 is_live: True, False, or None (=unknown). Whether this video is a
196 live stream that goes on instead of a fixed-length video.
197 start_time: Time in seconds where the reproduction should start, as
198 specified in the URL.
199 end_time: Time in seconds where the reproduction should end, as
200 specified in the URL.
202 Unless mentioned otherwise, the fields should be Unicode strings.
204 Unless mentioned otherwise, None is equivalent to absence of information.
207 _type "playlist" indicates multiple videos.
208 There must be a key "entries", which is a list, an iterable, or a PagedList
209 object, each element of which is a valid dictionary by this specification.
211 Additionally, playlists can have "title", "description" and "id" attributes
212 with the same semantics as videos (see above).
215 _type "multi_video" indicates that there are multiple videos that
216 form a single show, for examples multiple acts of an opera or TV episode.
217 It must have an entries key like a playlist and contain all the keys
218 required for a video at the same time.
221 _type "url" indicates that the video must be extracted from another
222 location, possibly by a different extractor. Its only required key is:
223 "url" - the next URL to extract.
224 The key "ie_key" can be set to the class name (minus the trailing "IE",
225 e.g. "Youtube") if the extractor class is known in advance.
226 Additionally, the dictionary may have any properties of the resolved entity
227 known in advance, for example "title" if the title of the referred video is
231 _type "url_transparent" entities have the same specification as "url", but
232 indicate that the given additional information is more precise than the one
233 associated with the resolved URL.
234 This is useful when a site employs a video service that hosts the video and
235 its technical metadata, but that video service does not embed a useful
236 title, description etc.
239 Subclasses of this one should re-define the _real_initialize() and
240 _real_extract() methods and define a _VALID_URL regexp.
241 Probably, they should also be added to the list of extractors.
243 Finally, the _WORKING attribute should be set to False for broken IEs
244 in order to warn the users and skip the tests.
251 def __init__(self
, downloader
=None):
252 """Constructor. Receives an optional downloader."""
254 self
.set_downloader(downloader
)
257 def suitable(cls
, url
):
258 """Receives a URL and returns True if suitable for this IE."""
260 # This does not use has/getattr intentionally - we want to know whether
261 # we have cached the regexp for *this* class, whereas getattr would also
262 # match the superclass
263 if '_VALID_URL_RE' not in cls
.__dict
__:
264 cls
._VALID
_URL
_RE
= re
.compile(cls
._VALID
_URL
)
265 return cls
._VALID
_URL
_RE
.match(url
) is not None
268 def _match_id(cls
, url
):
269 if '_VALID_URL_RE' not in cls
.__dict
__:
270 cls
._VALID
_URL
_RE
= re
.compile(cls
._VALID
_URL
)
271 m
= cls
._VALID
_URL
_RE
.match(url
)
277 """Getter method for _WORKING."""
280 def initialize(self
):
281 """Initializes an instance (authentication, etc)."""
283 self
._real
_initialize
()
286 def extract(self
, url
):
287 """Extracts URL information and returns it in list of dicts."""
290 return self
._real
_extract
(url
)
291 except ExtractorError
:
293 except compat_http_client
.IncompleteRead
as e
:
294 raise ExtractorError('A network error has occured.', cause
=e
, expected
=True)
295 except (KeyError, StopIteration) as e
:
296 raise ExtractorError('An extractor error has occured.', cause
=e
)
298 def set_downloader(self
, downloader
):
299 """Sets the downloader for this IE."""
300 self
._downloader
= downloader
302 def _real_initialize(self
):
303 """Real initialization process. Redefine in subclasses."""
306 def _real_extract(self
, url
):
307 """Real extraction process. Redefine in subclasses."""
312 """A string for getting the InfoExtractor with get_info_extractor"""
313 return compat_str(cls
.__name
__[:-2])
317 return compat_str(type(self
).__name
__[:-2])
319 def _request_webpage(self
, url_or_request
, video_id
, note
=None, errnote
=None, fatal
=True):
320 """ Returns the response handle """
322 self
.report_download_webpage(video_id
)
323 elif note
is not False:
325 self
.to_screen('%s' % (note
,))
327 self
.to_screen('%s: %s' % (video_id
, note
))
329 return self
._downloader
.urlopen(url_or_request
)
330 except (compat_urllib_error
.URLError
, compat_http_client
.HTTPException
, socket
.error
) as err
:
334 errnote
= 'Unable to download webpage'
335 errmsg
= '%s: %s' % (errnote
, compat_str(err
))
337 raise ExtractorError(errmsg
, sys
.exc_info()[2], cause
=err
)
339 self
._downloader
.report_warning(errmsg
)
342 def _download_webpage_handle(self
, url_or_request
, video_id
, note
=None, errnote
=None, fatal
=True, encoding
=None):
343 """ Returns a tuple (page content as string, URL handle) """
344 # Strip hashes from the URL (#1038)
345 if isinstance(url_or_request
, (compat_str
, str)):
346 url_or_request
= url_or_request
.partition('#')[0]
348 urlh
= self
._request
_webpage
(url_or_request
, video_id
, note
, errnote
, fatal
)
352 content
= self
._webpage
_read
_content
(urlh
, url_or_request
, video_id
, note
, errnote
, fatal
, encoding
=encoding
)
353 return (content
, urlh
)
356 def _guess_encoding_from_content(content_type
, webpage_bytes
):
357 m
= re
.match(r
'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type
)
359 encoding
= m
.group(1)
361 m
= re
.search(br
'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
362 webpage_bytes[:1024])
364 encoding = m.group(1).decode('ascii')
365 elif webpage_bytes.startswith(b'\xff\xfe'):
372 def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
373 content_type = urlh.headers.get('Content-Type', '')
374 webpage_bytes = urlh.read()
375 if prefix is not None:
376 webpage_bytes = prefix + webpage_bytes
378 encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
379 if self._downloader.params.get('dump_intermediate_pages', False):
381 url = url_or_request.get_full_url()
382 except AttributeError:
384 self.to_screen('Dumping request to ' + url)
385 dump = base64.b64encode(webpage_bytes).decode('ascii')
386 self._downloader.to_screen(dump)
387 if self._downloader.params.get('write_pages', False):
389 url = url_or_request.get_full_url()
390 except AttributeError:
392 basen = '%s_%s' % (video_id, url)
394 h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
395 basen = basen[:240 - len(h)] + h
396 raw_filename = basen + '.dump'
397 filename = sanitize_filename(raw_filename, restricted=True)
398 self.to_screen('Saving request to ' + filename)
399 # Working around MAX_PATH limitation on Windows (see
400 # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
402 absfilepath = os.path.abspath(filename)
403 if len(absfilepath) > 259:
404 filename = '\\\\?\\' + absfilepath
405 with open(filename, 'wb') as outf:
406 outf.write(webpage_bytes)
409 content = webpage_bytes.decode(encoding, 'replace')
411 content = webpage_bytes.decode('utf-8', 'replace')
413 if ('<title>Access to this site is blocked</title>' in content and
414 'Websense' in content[:512]):
415 msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
416 blocked_iframe = self._html_search_regex(
417 r'<iframe src="([^
"]+)"', content,
418 'Websense information URL
', default=None)
420 msg += ' Visit
%s for more details
' % blocked_iframe
421 raise ExtractorError(msg, expected=True)
422 if '<title
>The URL you requested has been blocked
</title
>' in content[:512]:
424 'Access to this webpage has been blocked by Indian censorship
. '
425 'Use a VPN
or proxy
server (with --proxy
) to route around it
.')
426 block_msg = self._html_search_regex(
427 r'</h1
><p
>(.*?
)</p
>',
428 content, 'block message
', default=None)
430 msg += ' (Message
: "%s")' % block_msg.replace('\n', ' ')
431 raise ExtractorError(msg, expected=True)
435 def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None):
436 """ Returns the data of the page as a string """
439 while success is False:
441 res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding)
443 except compat_http_client.IncompleteRead as e:
445 if try_count >= tries:
447 self._sleep(timeout, video_id)
454 def _download_xml(self, url_or_request, video_id,
455 note='Downloading XML
', errnote='Unable to download XML
',
456 transform_source=None, fatal=True, encoding=None):
457 """Return the xml as an xml.etree.ElementTree.Element"""
458 xml_string = self._download_webpage(
459 url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding)
460 if xml_string is False:
463 xml_string = transform_source(xml_string)
464 return compat_etree_fromstring(xml_string.encode('utf
-8'))
466 def _download_json(self, url_or_request, video_id,
467 note='Downloading JSON metadata
',
468 errnote='Unable to download JSON metadata
',
469 transform_source=None,
470 fatal=True, encoding=None):
471 json_string = self._download_webpage(
472 url_or_request, video_id, note, errnote, fatal=fatal,
474 if (not fatal) and json_string is False:
476 return self._parse_json(
477 json_string, video_id, transform_source=transform_source, fatal=fatal)
479 def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
481 json_string = transform_source(json_string)
483 return json.loads(json_string)
484 except ValueError as ve:
485 errmsg = '%s: Failed to parse JSON
' % video_id
487 raise ExtractorError(errmsg, cause=ve)
489 self.report_warning(errmsg + str(ve))
491 def report_warning(self, msg, video_id=None):
492 idstr = '' if video_id is None else '%s: ' % video_id
493 self._downloader.report_warning(
494 '[%s] %s%s' % (self.IE_NAME, idstr, msg))
496 def to_screen(self, msg):
497 """Print msg to screen, prefixing it with '[ie_name
]'"""
498 self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
500 def report_extraction(self, id_or_name):
501 """Report information extraction."""
502 self.to_screen('%s: Extracting information
' % id_or_name)
504 def report_download_webpage(self, video_id):
505 """Report webpage download."""
506 self.to_screen('%s: Downloading webpage
' % video_id)
508 def report_age_confirmation(self):
509 """Report attempt to confirm age."""
510 self.to_screen('Confirming age
')
512 def report_login(self):
513 """Report attempt to log in."""
514 self.to_screen('Logging
in')
517 def raise_login_required(msg='This video
is only available
for registered users
'):
518 raise ExtractorError(
519 '%s. Use
--username
and --password
or --netrc to provide account credentials
.' % msg,
523 def raise_geo_restricted(msg='This video
is not available
from your location due to geo restriction
'):
524 raise ExtractorError(
525 '%s. You might want to use
--proxy to workaround
.' % msg,
528 # Methods for following #608
530 def url_result(url, ie=None, video_id=None, video_title=None):
531 """Returns a URL that points to a page that should be processed"""
532 # TODO: ie should be the class used for getting the info
533 video_info = {'_type
': 'url
',
536 if video_id is not None:
537 video_info['id'] = video_id
538 if video_title is not None:
539 video_info['title
'] = video_title
543 def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
544 """Returns a playlist"""
545 video_info = {'_type
': 'playlist
',
548 video_info['id'] = playlist_id
550 video_info['title
'] = playlist_title
551 if playlist_description:
552 video_info['description
'] = playlist_description
555 def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
557 Perform a regex search on the given string, using a single or a list of
558 patterns returning the first matching group.
559 In case of failure return a default value or raise a WARNING or a
560 RegexNotFoundError, depending on fatal, specifying the field name.
562 if isinstance(pattern, (str, compat_str, compiled_regex_type)):
563 mobj = re.search(pattern, string, flags)
566 mobj = re.search(p, string, flags)
570 if not self._downloader.params.get('no_color
') and os.name != 'nt
' and sys.stderr.isatty():
571 _name = '\033[0;34m
%s\033[0m
' % name
577 # return the first matching group
578 return next(g for g in mobj.groups() if g is not None)
580 return mobj.group(group)
581 elif default is not NO_DEFAULT:
584 raise RegexNotFoundError('Unable to extract
%s' % _name)
586 self._downloader.report_warning('unable to extract
%s' % _name + bug_reports_message())
589 def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
591 Like _search_regex, but strips HTML tags and unescapes entities.
593 res = self._search_regex(pattern, string, name, default, fatal, flags, group)
595 return clean_html(res).strip()
599 def _get_login_info(self):
601 Get the login info as (username, password)
602 It will look in the netrc file using the _NETRC_MACHINE value
603 If there's no info available
, return (None, None)
605 if self._downloader is None:
610 downloader_params = self._downloader.params
612 # Attempt to use provided username and password or .netrc data
613 if downloader_params.get('username', None) is not None:
614 username = downloader_params['username']
615 password = downloader_params['password']
616 elif downloader_params.get('usenetrc', False):
618 info = netrc.netrc().authenticators(self._NETRC_MACHINE)
623 raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
624 except (IOError, netrc.NetrcParseError) as err:
625 self._downloader.report_warning('parsing .netrc: %s' % compat_str(err))
627 return (username, password)
629 def _get_tfa_info(self, note='two-factor verification code'):
631 Get the two
-factor authentication info
632 TODO
- asking the user will be required
for sms
/phone verify
633 currently just uses the command line option
634 If there
's no info available, return None
636 if self._downloader is None:
638 downloader_params = self._downloader.params
640 if downloader_params.get('twofactor
', None) is not None:
641 return downloader_params['twofactor
']
643 return compat_getpass('Type
%s and press
[Return
]: ' % note)
645 # Helper functions for extracting OpenGraph info
647 def _og_regexes(prop):
648 content_re = r'content
=(?
:"([^"]+?
)"|\'([^\']+?)\'|\s*([^\s"\'=<>`
]+?
))'
649 property_re = (r'(?
:name|
property)=(?
:\'og
:%(prop)s\'|
"og:%(prop)s"|\s
*og
:%(prop)s\b)'
650 % {'prop
': re.escape(prop)})
651 template = r'<meta
[^
>]+?
%s[^
>]+?
%s'
653 template % (property_re, content_re),
654 template % (content_re, property_re),
658 def _meta_regex(prop):
659 return r'''(?isx)<meta
660 (?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
661 [^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
663 def _og_search_property(self, prop, html, name=None, **kargs):
665 name = 'OpenGraph
%s' % prop
666 escaped = self._search_regex(self._og_regexes(prop), html, name, flags=re.DOTALL, **kargs)
669 return unescapeHTML(escaped)
671 def _og_search_thumbnail(self, html, **kargs):
672 return self._og_search_property('image
', html, 'thumbnail URL
', fatal=False, **kargs)
674 def _og_search_description(self, html, **kargs):
675 return self._og_search_property('description
', html, fatal=False, **kargs)
677 def _og_search_title(self, html, **kargs):
678 return self._og_search_property('title
', html, **kargs)
680 def _og_search_video_url(self, html, name='video url
', secure=True, **kargs):
681 regexes = self._og_regexes('video
') + self._og_regexes('video
:url
')
683 regexes = self._og_regexes('video
:secure_url
') + regexes
684 return self._html_search_regex(regexes, html, name, **kargs)
686 def _og_search_url(self, html, **kargs):
687 return self._og_search_property('url
', html, **kargs)
689 def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
690 if display_name is None:
692 return self._html_search_regex(
693 self._meta_regex(name),
694 html, display_name, fatal=fatal, group='content
', **kwargs)
696 def _dc_search_uploader(self, html):
697 return self._html_search_meta('dc
.creator
', html, 'uploader
')
699 def _rta_search(self, html):
700 # See http://www.rtalabel.org/index.php?content=howtofaq#single
701 if re.search(r'(?ix
)<meta\s
+name
="rating"\s
+'
702 r' content
="RTA-5042-1996-1400-1577-RTA"',
707 def _media_rating_search(self, html):
708 # See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
709 rating = self._html_search_meta('rating
', html)
721 return RATING_TABLE.get(rating.lower(), None)
723 def _family_friendly_search(self, html):
724 # See http://schema.org/VideoObject
725 family_friendly = self._html_search_meta('isFamilyFriendly
', html)
727 if not family_friendly:
736 return RATING_TABLE.get(family_friendly.lower(), None)
738 def _twitter_search_player(self, html):
739 return self._html_search_meta('twitter
:player
', html,
740 'twitter card player
')
743 def _hidden_inputs(html):
744 html = re.sub(r'<!--(?
:(?
!<!--).)*-->', '', html)
746 for input in re.findall(r'(?i
)<input([^
>]+)>', html):
747 if not re.search(r'type=(["\'])(?:hidden|submit)\1', input):
749 name = re.search(r'name=(["\'])(?P
<value
>.+?
)\
1', input)
752 value = re.search(r'value
=(["\'])(?P<value>.*?)\1', input)
755 hidden_inputs[name.group('value')] = value.group('value')
758 def _form_hidden_inputs(self, form_id, html):
759 form = self._search_regex(
760 r'(?is)<form[^>]+?id=(["\'])%s\
1[^
>]*>(?P
<form
>.+?
)</form
>' % form_id,
761 html, '%s form
' % form_id, group='form
')
762 return self._hidden_inputs(form)
764 def _sort_formats(self, formats, field_preference=None):
766 raise ExtractorError('No video formats found
')
769 # TODO remove the following workaround
770 from ..utils import determine_ext
771 if not f.get('ext
') and 'url
' in f:
772 f['ext
'] = determine_ext(f['url
'])
774 if isinstance(field_preference, (list, tuple)):
775 return tuple(f.get(field) if f.get(field) is not None else -1 for field in field_preference)
777 preference = f.get('preference
')
778 if preference is None:
779 proto = f.get('protocol
')
781 proto = compat_urllib_parse_urlparse(f.get('url
', '')).scheme
783 preference = 0 if proto in ['http
', 'https
'] else -0.1
784 if f.get('ext
') in ['f4f
', 'f4m
']: # Not yet supported
787 if f.get('vcodec
') == 'none
': # audio only
788 if self._downloader.params.get('prefer_free_formats
'):
789 ORDER = ['aac
', 'mp3
', 'm4a
', 'webm
', 'ogg
', 'opus
']
791 ORDER = ['webm
', 'opus
', 'ogg
', 'mp3
', 'aac
', 'm4a
']
794 audio_ext_preference = ORDER.index(f['ext
'])
796 audio_ext_preference = -1
798 if self._downloader.params.get('prefer_free_formats
'):
799 ORDER = ['flv
', 'mp4
', 'webm
']
801 ORDER = ['webm
', 'flv
', 'mp4
']
803 ext_preference = ORDER.index(f['ext
'])
806 audio_ext_preference = 0
810 f.get('language_preference
') if f.get('language_preference
') is not None else -1,
811 f.get('quality
') if f.get('quality
') is not None else -1,
812 f.get('tbr
') if f.get('tbr
') is not None else -1,
813 f.get('filesize
') if f.get('filesize
') is not None else -1,
814 f.get('vbr
') if f.get('vbr
') is not None else -1,
815 f.get('height
') if f.get('height
') is not None else -1,
816 f.get('width
') if f.get('width
') is not None else -1,
818 f.get('abr
') if f.get('abr
') is not None else -1,
819 audio_ext_preference,
820 f.get('fps
') if f.get('fps
') is not None else -1,
821 f.get('filesize_approx
') if f.get('filesize_approx
') is not None else -1,
822 f.get('source_preference
') if f.get('source_preference
') is not None else -1,
823 f.get('format_id
') if f.get('format_id
') is not None else '',
825 formats.sort(key=_formats_key)
827 def _check_formats(self, formats, video_id):
830 lambda f: self._is_valid_url(
832 item='%s video format
' % f.get('format_id
') if f.get('format_id
') else 'video
'),
835 def _is_valid_url(self, url, video_id, item='video
'):
836 url = self._proto_relative_url(url, scheme='http
:')
837 # For now assume non HTTP(S) URLs always valid
838 if not (url.startswith('http
://') or url.startswith('https
://')):
841 self._request_webpage(url, video_id, 'Checking
%s URL
' % item)
843 except ExtractorError as e:
844 if isinstance(e.cause, compat_urllib_error.URLError):
846 '%s: %s URL
is invalid
, skipping
' % (video_id, item))
850 def http_scheme(self):
851 """ Either "http:" or "https:", depending on the user's preferences
"""
854 if self._downloader.params.get('prefer_insecure', False)
857 def _proto_relative_url(self, url, scheme=None):
860 if url.startswith('//'):
862 scheme = self.http_scheme()
867 def _sleep(self, timeout, video_id, msg_template=None):
868 if msg_template is None:
869 msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
870 msg = msg_template % {'video_id': video_id, 'timeout': timeout}
874 def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
875 transform_source=lambda s: fix_xml_ampersands(s).strip(),
877 manifest = self._download_xml(
878 manifest_url, video_id, 'Downloading f4m manifest',
879 'Unable to download f4m manifest',
880 # Some manifests may be malformed, e.g. prosiebensat1 generated manifests
881 # (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
882 transform_source=transform_source,
885 if manifest is False:
889 manifest_version = '1.0'
890 media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
892 manifest_version = '2.0'
893 media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
894 base_url = xpath_text(
895 manifest, ['{http://ns.adobe.com/f4m/1.0}baseURL', '{http://ns.adobe.com/f4m/2.0}baseURL'],
896 'base URL', default=None)
898 base_url = base_url.strip()
899 for i, media_el in enumerate(media_nodes):
900 if manifest_version == '2.0':
901 media_url = media_el.attrib.get('href') or media_el.attrib.get('url')
905 media_url if media_url.startswith('http://') or media_url.startswith('https://')
906 else ((base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
907 # If media_url is itself a f4m manifest do the recursive extraction
908 # since bitrates in parent manifest (this one) and media_url manifest
909 # may differ leading to inability to resolve the format by requested
910 # bitrate in f4m downloader
911 if determine_ext(manifest_url) == 'f4m':
912 f4m_formats = self._extract_f4m_formats(
913 manifest_url, video_id, preference, f4m_id, fatal=fatal)
915 formats.extend(f4m_formats)
917 tbr = int_or_none(media_el.attrib.get('bitrate'))
919 'format_id': '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)])),
923 'width': int_or_none(media_el.attrib.get('width')),
924 'height': int_or_none(media_el.attrib.get('height')),
925 'preference': preference,
927 self._sort_formats(formats)
931 def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
932 entry_protocol='m3u8', preference=None,
933 m3u8_id=None, note=None, errnote=None,
937 'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
941 'preference': preference - 1 if preference else -1,
942 'resolution': 'multiple',
943 'format_note': 'Quality selection URL',
946 format_url = lambda u: (
948 if re.match(r'^https?://', u)
949 else compat_urlparse.urljoin(m3u8_url, u))
951 res = self._download_webpage_handle(
953 note=note or 'Downloading m3u8 information',
954 errnote=errnote or 'Failed to download m3u8 information',
959 m3u8_url = urlh.geturl()
963 r'(?P<key>[a-zA-Z_-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)')
964 for line in m3u8_doc.splitlines():
965 if line.startswith('#EXT-X-STREAM-INF:'):
967 for m in kv_rex.finditer(line):
969 if v.startswith('"'):
971 last_info[m.group('key')] = v
972 elif line.startswith('#EXT-X-MEDIA:'):
974 for m in kv_rex.finditer(line):
976 if v.startswith('"'):
978 last_media[m.group('key')] = v
979 elif line.startswith('#') or not line.strip():
982 if last_info is None:
983 formats.append({'url': format_url(line)})
985 tbr = int_or_none(last_info.get('BANDWIDTH'), scale=1000)
988 format_id.append(m3u8_id)
989 last_media_name = last_media.get('NAME') if last_media and last_media.get('TYPE') != 'SUBTITLES' else None
990 format_id.append(last_media_name if last_media_name else '%d' % (tbr if tbr else len(formats)))
992 'format_id': '-'.join(format_id),
993 'url': format_url(line.strip()),
996 'protocol': entry_protocol,
997 'preference': preference,
999 codecs = last_info.get('CODECS')
1001 # TODO: looks like video codec is not always necessarily goes first
1002 va_codecs = codecs.split(',')
1004 f['vcodec'] = va_codecs[0].partition('.')[0]
1005 if len(va_codecs) > 1 and va_codecs[1]:
1006 f['acodec'] = va_codecs[1].partition('.')[0]
1007 resolution = last_info.get('RESOLUTION')
1009 width_str, height_str = resolution.split('x')
1010 f['width'] = int(width_str)
1011 f['height'] = int(height_str)
1012 if last_media is not None:
1013 f['m3u8_media'] = last_media
1017 self._sort_formats(formats)
1021 def _xpath_ns(path, namespace=None):
1025 for c in path.split('/'):
1026 if not c or c == '.':
1029 out.append('{%s}%s' % (namespace, c))
1030 return '/'.join(out)
1032 def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None):
1033 smil = self._download_smil(smil_url, video_id, fatal=fatal)
1039 namespace = self._parse_smil_namespace(smil)
1041 return self._parse_smil_formats(
1042 smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
1044 def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
1045 smil = self._download_smil(smil_url, video_id, fatal=fatal)
1048 return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
1050 def _download_smil(self, smil_url, video_id, fatal=True):
1051 return self._download_xml(
1052 smil_url, video_id, 'Downloading SMIL file',
1053 'Unable to download SMIL file', fatal=fatal)
1055 def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
1056 namespace = self._parse_smil_namespace(smil)
1058 formats = self._parse_smil_formats(
1059 smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
1060 subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
1062 video_id = os.path.splitext(url_basename(smil_url))[0]
1066 for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
1067 name = meta.attrib.get('name')
1068 content = meta.attrib.get('content')
1069 if not name or not content:
1071 if not title and name == 'title':
1073 elif not description and name in ('description', 'abstract'):
1074 description = content
1075 elif not upload_date and name == 'date':
1076 upload_date = unified_strdate(content)
1079 'id': image.get('type'),
1080 'url': image.get('src'),
1081 'width': int_or_none(image.get('width')),
1082 'height': int_or_none(image.get('height')),
1083 } for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
1087 'title': title or video_id,
1088 'description': description,
1089 'upload_date': upload_date,
1090 'thumbnails': thumbnails,
1092 'subtitles': subtitles,
1095 def _parse_smil_namespace(self, smil):
1096 return self._search_regex(
1097 r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
1099 def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
1101 for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
1102 b = meta.get('base') or meta.get('httpBase')
1111 videos = smil.findall(self._xpath_ns('.//video', namespace))
1112 for video in videos:
1113 src = video.get('src')
1117 bitrate = float_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
1118 filesize = int_or_none(video.get('size') or video.get('fileSize'))
1119 width = int_or_none(video.get('width'))
1120 height = int_or_none(video.get('height'))
1121 proto = video.get('proto')
1122 ext = video.get('ext')
1123 src_ext = determine_ext(src)
1124 streamer = video.get('streamer') or base
1126 if proto == 'rtmp' or streamer.startswith('rtmp'):
1132 'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
1134 'filesize': filesize,
1138 if transform_rtmp_url:
1139 streamer, src = transform_rtmp_url(streamer, src)
1140 formats[-1].update({
1146 src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
1148 if proto == 'm3u8' or src_ext == 'm3u8':
1149 m3u8_formats = self._extract_m3u8_formats(
1150 src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
1152 formats.extend(m3u8_formats)
1155 if src_ext == 'f4m':
1160 'plugin': 'flowplayer-3.2.0.1',
1162 f4m_url += '&' if '?' in f4m_url else '?'
1163 f4m_url += compat_urllib_parse.urlencode(f4m_params)
1164 f4m_formats = self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False)
1166 formats.extend(f4m_formats)
1169 if src_url.startswith('http') and self._is_valid_url(src, video_id):
1173 'ext': ext or src_ext or 'flv',
1174 'format_id': 'http-%d' % (bitrate or http_count),
1176 'filesize': filesize,
1182 self._sort_formats(formats)
1186 def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
1188 for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
1189 src = textstream.get('src')
1192 ext = textstream.get('ext') or determine_ext(src)
1194 type_ = textstream.get('type')
1198 'application/smptett+xml': 'tt',
1200 if type_ in SUBTITLES_TYPES:
1201 ext = SUBTITLES_TYPES[type_]
1202 lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
1203 subtitles.setdefault(lang, []).append({
1209 def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True):
1210 xspf = self._download_xml(
1211 playlist_url, playlist_id, 'Downloading xpsf playlist',
1212 'Unable to download xspf manifest', fatal=fatal)
1215 return self._parse_xspf(xspf, playlist_id)
1217 def _parse_xspf(self, playlist, playlist_id):
1219 'xspf': 'http://xspf.org/ns/0/',
1220 's1': 'http://static.streamone.nl/player/ns/0',
1224 for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
1226 track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
1227 description = xpath_text(
1228 track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
1229 thumbnail = xpath_text(
1230 track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
1231 duration = float_or_none(
1232 xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
1235 'url': location.text,
1236 'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
1237 'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
1238 'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
1239 } for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
1240 self._sort_formats(formats)
1245 'description': description,
1246 'thumbnail': thumbnail,
1247 'duration': duration,
1252 def _live_title(self, name):
1253 """ Generate the title
for a live video
"""
1254 now = datetime.datetime.now()
1255 now_str = now.strftime("%Y-%m-%d %H:%M")
1256 return name + ' ' + now_str
1258 def _int(self, v, name, fatal=False, **kwargs):
1259 res = int_or_none(v, **kwargs)
1260 if 'get_attr' in kwargs:
1261 print(getattr(v, kwargs['get_attr']))
1263 msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
1265 raise ExtractorError(msg)
1267 self._downloader.report_warning(msg)
1270 def _float(self, v, name, fatal=False, **kwargs):
1271 res = float_or_none(v, **kwargs)
1273 msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
1275 raise ExtractorError(msg)
1277 self._downloader.report_warning(msg)
1280 def _set_cookie(self, domain, name, value, expire_time=None):
1281 cookie = compat_cookiejar.Cookie(
1282 0, name, value, None, None, domain, None,
1283 None, '/', True, False, expire_time, '', None, None, None)
1284 self._downloader.cookiejar.set_cookie(cookie)
1286 def _get_cookies(self, url):
1287 """ Return a compat_cookies
.SimpleCookie
with the cookies
for the url
"""
1288 req = sanitized_Request(url)
1289 self._downloader.cookiejar.add_cookie_header(req)
1290 return compat_cookies.SimpleCookie(req.get_header('Cookie'))
1292 def get_testcases(self, include_onlymatching=False):
1293 t = getattr(self, '_TEST', None)
1295 assert not hasattr(self, '_TESTS'), \
1296 '%s has _TEST and _TESTS' % type(self).__name__
1299 tests = getattr(self, '_TESTS', [])
1301 if not include_onlymatching and t.get('only_matching', False):
1303 t['name'] = type(self).__name__[:-len('IE')]
1306 def is_suitable(self, age_limit):
1307 """ Test whether the extractor
is generally suitable
for the given
1308 age
limit (i
.e
. pornographic sites are
not, all others usually are
) """
1310 any_restricted = False
1311 for tc in self.get_testcases(include_onlymatching=False):
1312 if 'playlist' in tc:
1313 tc = tc['playlist'][0]
1314 is_restricted = age_restricted(
1315 tc.get('info_dict', {}).get('age_limit'), age_limit)
1316 if not is_restricted:
1318 any_restricted = any_restricted or is_restricted
1319 return not any_restricted
1321 def extract_subtitles(self, *args, **kwargs):
1322 if (self._downloader.params.get('writesubtitles', False) or
1323 self._downloader.params.get('listsubtitles')):
1324 return self._get_subtitles(*args, **kwargs)
1327 def _get_subtitles(self, *args, **kwargs):
1328 raise NotImplementedError("This method must be implemented by subclasses")
1331 def _merge_subtitle_items(subtitle_list1, subtitle_list2):
1332 """ Merge subtitle items
for one language
. Items
with duplicated URLs
1333 will be dropped
. """
1334 list1_urls = set([item['url'] for item in subtitle_list1])
1335 ret = list(subtitle_list1)
1336 ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
1340 def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
1341 """ Merge two subtitle dictionaries
, language by language
. """
1342 ret = dict(subtitle_dict1)
1343 for lang in subtitle_dict2:
1344 ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
1347 def extract_automatic_captions(self, *args, **kwargs):
1348 if (self._downloader.params.get('writeautomaticsub', False) or
1349 self._downloader.params.get('listsubtitles')):
1350 return self._get_automatic_captions(*args, **kwargs)
1353 def _get_automatic_captions(self, *args, **kwargs):
1354 raise NotImplementedError("This method must be implemented by subclasses")
1357 class SearchInfoExtractor(InfoExtractor):
1359 Base
class for paged search queries extractors
.
1360 They accept URLs
in the format
_SEARCH_KEY(|all|
[0-9]):{query}
1361 Instances should define _SEARCH_KEY
and _MAX_RESULTS
.
1365 def _make_valid_url(cls):
1366 return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
1369 def suitable(cls, url):
1370 return re.match(cls._make_valid_url(), url) is not None
1372 def _real_extract(self, query):
1373 mobj = re.match(self._make_valid_url(), query)
1375 raise ExtractorError('Invalid search query "%s"' % query)
1377 prefix = mobj.group('prefix')
1378 query = mobj.group('query')
1380 return self._get_n_results(query, 1)
1381 elif prefix == 'all':
1382 return self._get_n_results(query, self._MAX_RESULTS)
1386 raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
1387 elif n > self._MAX_RESULTS:
1388 self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
1389 n = self._MAX_RESULTS
1390 return self._get_n_results(query, n)
1392 def _get_n_results(self, query, n):
1393 """Get a specified number of results
for a query
"""
1394 raise NotImplementedError("This method must be implemented by subclasses")
1397 def SEARCH_KEY(self):
1398 return self._SEARCH_KEY