1 from __future__ 
import unicode_literals
 
  13 import xml
.etree
.ElementTree
 
  15 from ..compat 
import ( 
  20     compat_urllib_parse_urlparse
, 
  41 class InfoExtractor(object): 
  42     """Information Extractor class. 
  44     Information extractors are the classes that, given a URL, extract 
  45     information about the video (or videos) the URL refers to. This 
  46     information includes the real video URL, the video title, author and 
  47     others. The information is stored in a dictionary which is then 
  48     passed to the YoutubeDL. The YoutubeDL processes this 
  49     information possibly downloading the video to the file system, among 
  50     other possible outcomes. 
  52     The type field determines the type of the result. 
  53     By far the most common value (and the default if _type is missing) is 
  54     "video", which indicates a single video. 
  56     For a video, the dictionaries must include the following fields: 
  59     title:          Video title, unescaped. 
  61     Additionally, it must contain either a formats entry or a url one: 
  63     formats:        A list of dictionaries for each format available, ordered 
  64                     from worst to best quality. 
  67                     * url        Mandatory. The URL of the video file 
  68                     * ext        Will be calculated from url if missing 
  69                     * format     A human-readable description of the format 
  70                                  ("mp4 container with h264/opus"). 
  71                                  Calculated from the format_id, width, height. 
  72                                  and format_note fields if missing. 
  73                     * format_id  A short description of the format 
  74                                  ("mp4_h264_opus" or "19"). 
  75                                 Technically optional, but strongly recommended. 
  76                     * format_note Additional info about the format 
  77                                  ("3D" or "DASH video") 
  78                     * width      Width of the video, if known 
  79                     * height     Height of the video, if known 
  80                     * resolution Textual description of width and height 
  81                     * tbr        Average bitrate of audio and video in KBit/s 
  82                     * abr        Average audio bitrate in KBit/s 
  83                     * acodec     Name of the audio codec in use 
  84                     * asr        Audio sampling rate in Hertz 
  85                     * vbr        Average video bitrate in KBit/s 
  87                     * vcodec     Name of the video codec in use 
  88                     * container  Name of the container format 
  89                     * filesize   The number of bytes, if known in advance 
  90                     * filesize_approx  An estimate for the number of bytes 
  91                     * player_url SWF Player URL (used for rtmpdump). 
  92                     * protocol   The protocol that will be used for the actual 
  94                                  "http", "https", "rtsp", "rtmp", "rtmpe", 
  95                                  "m3u8", or "m3u8_native". 
  96                     * preference Order number of this format. If this field is 
  97                                  present and not None, the formats get sorted 
  98                                  by this field, regardless of all other values. 
  99                                  -1 for default (order by other properties), 
 100                                  -2 or smaller for less than default. 
 101                                  < -1000 to hide the format (if there is 
 102                                     another one which is strictly better) 
 103                     * language_preference  Is this in the correct requested 
 105                                  10 if it's what the URL is about, 
 106                                  -1 for default (don't know), 
 107                                  -10 otherwise, other values reserved for now. 
 108                     * quality    Order number of the video quality of this 
 109                                  format, irrespective of the file format. 
 110                                  -1 for default (order by other properties), 
 111                                  -2 or smaller for less than default. 
 112                     * source_preference  Order number for this video source 
 113                                   (quality takes higher priority) 
 114                                  -1 for default (order by other properties), 
 115                                  -2 or smaller for less than default. 
 116                     * http_headers  A dictionary of additional HTTP headers 
 117                                  to add to the request. 
 118                     * stretched_ratio  If given and not 1, indicates that the 
 119                                  video's pixels are not square. 
 120                                  width : height ratio as float. 
 121                     * no_resume  The server does not support resuming the 
 122                                  (HTTP or RTMP) download. Boolean. 
 124     url:            Final video URL. 
 125     ext:            Video filename extension. 
 126     format:         The video format, defaults to ext (used for --get-format) 
 127     player_url:     SWF Player URL (used for rtmpdump). 
 129     The following fields are optional: 
 131     alt_title:      A secondary title of the video. 
 132     display_id      An alternative identifier for the video, not necessarily 
 133                     unique, but available before title. Typically, id is 
 134                     something like "4234987", title "Dancing naked mole rats", 
 135                     and display_id "dancing-naked-mole-rats" 
 136     thumbnails:     A list of dictionaries, with the following entries: 
 137                         * "id" (optional, string) - Thumbnail format ID 
 139                         * "preference" (optional, int) - quality of the image 
 140                         * "width" (optional, int) 
 141                         * "height" (optional, int) 
 142                         * "resolution" (optional, string "{width}x{height"}, 
 144     thumbnail:      Full URL to a video thumbnail image. 
 145     description:    Full video description. 
 146     uploader:       Full name of the video uploader. 
 147     creator:        The main artist who created the video. 
 148     timestamp:      UNIX timestamp of the moment the video became available. 
 149     upload_date:    Video upload date (YYYYMMDD). 
 150                     If not explicitly set, calculated from timestamp. 
 151     uploader_id:    Nickname or id of the video uploader. 
 152     location:       Physical location where the video was filmed. 
 153     subtitles:      The available subtitles as a dictionary in the format 
 154                     {language: subformats}. "subformats" is a list sorted from 
 155                     lower to higher preference, each element is a dictionary 
 156                     with the "ext" entry and one of: 
 157                         * "data": The subtitles file contents 
 158                         * "url": A url pointing to the subtitles file 
 159     automatic_captions: Like 'subtitles', used by the YoutubeIE for 
 160                     automatically generated captions 
 161     duration:       Length of the video in seconds, as an integer. 
 162     view_count:     How many users have watched the video on the platform. 
 163     like_count:     Number of positive ratings of the video 
 164     dislike_count:  Number of negative ratings of the video 
 165     average_rating: Average rating give by users, the scale used depends on the webpage 
 166     comment_count:  Number of comments on the video 
 167     comments:       A list of comments, each with one or more of the following 
 168                     properties (all but one of text or html optional): 
 169                         * "author" - human-readable name of the comment author 
 170                         * "author_id" - user ID of the comment author 
 172                         * "html" - Comment as HTML 
 173                         * "text" - Plain text of the comment 
 174                         * "timestamp" - UNIX timestamp of comment 
 175                         * "parent" - ID of the comment this one is replying to. 
 176                                      Set to "root" to indicate that this is a 
 177                                      comment to the original video. 
 178     age_limit:      Age restriction for the video, as an integer (years) 
 179     webpage_url:    The url to the video webpage, if given to youtube-dl it 
 180                     should allow to get the same result again. (It will be set 
 181                     by YoutubeDL if it's missing) 
 182     categories:     A list of categories that the video falls in, for example 
 184     is_live:        True, False, or None (=unknown). Whether this video is a 
 185                     live stream that goes on instead of a fixed-length video. 
 187     Unless mentioned otherwise, the fields should be Unicode strings. 
 189     Unless mentioned otherwise, None is equivalent to absence of information. 
 192     _type "playlist" indicates multiple videos. 
 193     There must be a key "entries", which is a list, an iterable, or a PagedList 
 194     object, each element of which is a valid dictionary by this specification. 
 196     Additionally, playlists can have "title" and "id" attributes with the same 
 197     semantics as videos (see above). 
 200     _type "multi_video" indicates that there are multiple videos that 
 201     form a single show, for examples multiple acts of an opera or TV episode. 
 202     It must have an entries key like a playlist and contain all the keys 
 203     required for a video at the same time. 
 206     _type "url" indicates that the video must be extracted from another 
 207     location, possibly by a different extractor. Its only required key is: 
 208     "url" - the next URL to extract. 
 209     The key "ie_key" can be set to the class name (minus the trailing "IE", 
 210     e.g. "Youtube") if the extractor class is known in advance. 
 211     Additionally, the dictionary may have any properties of the resolved entity 
 212     known in advance, for example "title" if the title of the referred video is 
 216     _type "url_transparent" entities have the same specification as "url", but 
 217     indicate that the given additional information is more precise than the one 
 218     associated with the resolved URL. 
 219     This is useful when a site employs a video service that hosts the video and 
 220     its technical metadata, but that video service does not embed a useful 
 221     title, description etc. 
 224     Subclasses of this one should re-define the _real_initialize() and 
 225     _real_extract() methods and define a _VALID_URL regexp. 
 226     Probably, they should also be added to the list of extractors. 
 228     Finally, the _WORKING attribute should be set to False for broken IEs 
 229     in order to warn the users and skip the tests. 
 236     def __init__(self
, downloader
=None): 
 237         """Constructor. Receives an optional downloader.""" 
 239         self
.set_downloader(downloader
) 
 242     def suitable(cls
, url
): 
 243         """Receives a URL and returns True if suitable for this IE.""" 
 245         # This does not use has/getattr intentionally - we want to know whether 
 246         # we have cached the regexp for *this* class, whereas getattr would also 
 247         # match the superclass 
 248         if '_VALID_URL_RE' not in cls
.__dict
__: 
 249             cls
._VALID
_URL
_RE 
= re
.compile(cls
._VALID
_URL
) 
 250         return cls
._VALID
_URL
_RE
.match(url
) is not None 
 253     def _match_id(cls
, url
): 
 254         if '_VALID_URL_RE' not in cls
.__dict
__: 
 255             cls
._VALID
_URL
_RE 
= re
.compile(cls
._VALID
_URL
) 
 256         m 
= cls
._VALID
_URL
_RE
.match(url
) 
 262         """Getter method for _WORKING.""" 
 265     def initialize(self
): 
 266         """Initializes an instance (authentication, etc).""" 
 268             self
._real
_initialize
() 
 271     def extract(self
, url
): 
 272         """Extracts URL information and returns it in list of dicts.""" 
 275             return self
._real
_extract
(url
) 
 276         except ExtractorError
: 
 278         except compat_http_client
.IncompleteRead 
as e
: 
 279             raise ExtractorError('A network error has occured.', cause
=e
, expected
=True) 
 280         except (KeyError, StopIteration) as e
: 
 281             raise ExtractorError('An extractor error has occured.', cause
=e
) 
 283     def set_downloader(self
, downloader
): 
 284         """Sets the downloader for this IE.""" 
 285         self
._downloader 
= downloader
 
 287     def _real_initialize(self
): 
 288         """Real initialization process. Redefine in subclasses.""" 
 291     def _real_extract(self
, url
): 
 292         """Real extraction process. Redefine in subclasses.""" 
 297         """A string for getting the InfoExtractor with get_info_extractor""" 
 298         return cls
.__name
__[:-2] 
 302         return type(self
).__name
__[:-2] 
 304     def _request_webpage(self
, url_or_request
, video_id
, note
=None, errnote
=None, fatal
=True): 
 305         """ Returns the response handle """ 
 307             self
.report_download_webpage(video_id
) 
 308         elif note 
is not False: 
 310                 self
.to_screen('%s' % (note
,)) 
 312                 self
.to_screen('%s: %s' % (video_id
, note
)) 
 314             return self
._downloader
.urlopen(url_or_request
) 
 315         except (compat_urllib_error
.URLError
, compat_http_client
.HTTPException
, socket
.error
) as err
: 
 319                 errnote 
= 'Unable to download webpage' 
 320             errmsg 
= '%s: %s' % (errnote
, compat_str(err
)) 
 322                 raise ExtractorError(errmsg
, sys
.exc_info()[2], cause
=err
) 
 324                 self
._downloader
.report_warning(errmsg
) 
 327     def _download_webpage_handle(self
, url_or_request
, video_id
, note
=None, errnote
=None, fatal
=True, encoding
=None): 
 328         """ Returns a tuple (page content as string, URL handle) """ 
 329         # Strip hashes from the URL (#1038) 
 330         if isinstance(url_or_request
, (compat_str
, str)): 
 331             url_or_request 
= url_or_request
.partition('#')[0] 
 333         urlh 
= self
._request
_webpage
(url_or_request
, video_id
, note
, errnote
, fatal
) 
 337         content 
= self
._webpage
_read
_content
(urlh
, url_or_request
, video_id
, note
, errnote
, fatal
, encoding
=encoding
) 
 338         return (content
, urlh
) 
 341     def _guess_encoding_from_content(content_type
, webpage_bytes
): 
 342         m 
= re
.match(r
'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type
) 
 344             encoding 
= m
.group(1) 
 346             m 
= re
.search(br
'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]', 
 347                           webpage_bytes[:1024]) 
 349                 encoding = m.group(1).decode('ascii') 
 350             elif webpage_bytes.startswith(b'\xff\xfe'): 
 357     def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None): 
 358         content_type = urlh.headers.get('Content-Type', '') 
 359         webpage_bytes = urlh.read() 
 360         if prefix is not None: 
 361             webpage_bytes = prefix + webpage_bytes 
 363             encoding = self._guess_encoding_from_content(content_type, webpage_bytes) 
 364         if self._downloader.params.get('dump_intermediate_pages', False): 
 366                 url = url_or_request.get_full_url() 
 367             except AttributeError: 
 369             self.to_screen('Dumping request to ' + url) 
 370             dump = base64.b64encode(webpage_bytes).decode('ascii') 
 371             self._downloader.to_screen(dump) 
 372         if self._downloader.params.get('write_pages', False): 
 374                 url = url_or_request.get_full_url() 
 375             except AttributeError: 
 377             basen = '%s_%s' % (video_id, url) 
 379                 h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest() 
 380                 basen = basen[:240 - len(h)] + h 
 381             raw_filename = basen + '.dump' 
 382             filename = sanitize_filename(raw_filename, restricted=True) 
 383             self.to_screen('Saving request to ' + filename) 
 384             # Working around MAX_PATH limitation on Windows (see 
 385             # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx) 
 387                 absfilepath = os.path.abspath(filename) 
 388                 if len(absfilepath) > 259: 
 389                     filename = '\\\\?\\' + absfilepath 
 390             with open(filename, 'wb') as outf: 
 391                 outf.write(webpage_bytes) 
 394             content = webpage_bytes.decode(encoding, 'replace') 
 396             content = webpage_bytes.decode('utf-8', 'replace') 
 398         if ('<title>Access to this site is blocked</title>' in content and 
 399                 'Websense' in content[:512]): 
 400             msg = 'Access to this webpage has been blocked by Websense filtering software in your network.' 
 401             blocked_iframe = self._html_search_regex( 
 402                 r'<iframe src="([^
"]+)"', content, 
 403                 'Websense information URL
', default=None) 
 405                 msg += ' Visit 
%s for more details
' % blocked_iframe 
 406             raise ExtractorError(msg, expected=True) 
 407         if '<title
>The URL you requested has been blocked
</title
>' in content[:512]: 
 409                 'Access to this webpage has been blocked by Indian censorship
. ' 
 410                 'Use a VPN 
or proxy 
server (with --proxy
) to route around it
.') 
 411             block_msg = self._html_search_regex( 
 412                 r'</h1
><p
>(.*?
)</p
>', 
 413                 content, 'block message
', default=None) 
 415                 msg += ' (Message
: "%s")' % block_msg.replace('\n', ' ') 
 416             raise ExtractorError(msg, expected=True) 
 420     def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None): 
 421         """ Returns the data of the page as a string """ 
 424         while success is False: 
 426                 res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding) 
 428             except compat_http_client.IncompleteRead as e: 
 430                 if try_count >= tries: 
 432                 self._sleep(timeout, video_id) 
 439     def _download_xml(self, url_or_request, video_id, 
 440                       note='Downloading XML
', errnote='Unable to download XML
', 
 441                       transform_source=None, fatal=True, encoding=None): 
 442         """Return the xml as an xml.etree.ElementTree.Element""" 
 443         xml_string = self._download_webpage( 
 444             url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding) 
 445         if xml_string is False: 
 448             xml_string = transform_source(xml_string) 
 449         return xml.etree.ElementTree.fromstring(xml_string.encode('utf
-8')) 
 451     def _download_json(self, url_or_request, video_id, 
 452                        note='Downloading JSON metadata
', 
 453                        errnote='Unable to download JSON metadata
', 
 454                        transform_source=None, 
 455                        fatal=True, encoding=None): 
 456         json_string = self._download_webpage( 
 457             url_or_request, video_id, note, errnote, fatal=fatal, 
 459         if (not fatal) and json_string is False: 
 461         return self._parse_json( 
 462             json_string, video_id, transform_source=transform_source, fatal=fatal) 
 464     def _parse_json(self, json_string, video_id, transform_source=None, fatal=True): 
 466             json_string = transform_source(json_string) 
 468             return json.loads(json_string) 
 469         except ValueError as ve: 
 470             errmsg = '%s: Failed to parse JSON 
' % video_id 
 472                 raise ExtractorError(errmsg, cause=ve) 
 474                 self.report_warning(errmsg + str(ve)) 
 476     def report_warning(self, msg, video_id=None): 
 477         idstr = '' if video_id is None else '%s: ' % video_id 
 478         self._downloader.report_warning( 
 479             '[%s] %s%s' % (self.IE_NAME, idstr, msg)) 
 481     def to_screen(self, msg): 
 482         """Print msg to screen, prefixing it with '[ie_name
]'""" 
 483         self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg)) 
 485     def report_extraction(self, id_or_name): 
 486         """Report information extraction.""" 
 487         self.to_screen('%s: Extracting information
' % id_or_name) 
 489     def report_download_webpage(self, video_id): 
 490         """Report webpage download.""" 
 491         self.to_screen('%s: Downloading webpage
' % video_id) 
 493     def report_age_confirmation(self): 
 494         """Report attempt to confirm age.""" 
 495         self.to_screen('Confirming age
') 
 497     def report_login(self): 
 498         """Report attempt to log in.""" 
 499         self.to_screen('Logging 
in') 
 501     # Methods for following #608 
 503     def url_result(url, ie=None, video_id=None, video_title=None): 
 504         """Returns a url that points to a page that should be processed""" 
 505         # TODO: ie should be the class used for getting the info 
 506         video_info = {'_type
': 'url
', 
 509         if video_id is not None: 
 510             video_info['id'] = video_id 
 511         if video_title is not None: 
 512             video_info['title
'] = video_title 
 516     def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None): 
 517         """Returns a playlist""" 
 518         video_info = {'_type
': 'playlist
', 
 521             video_info['id'] = playlist_id 
 523             video_info['title
'] = playlist_title 
 524         if playlist_description: 
 525             video_info['description
'] = playlist_description 
 528     def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None): 
 530         Perform a regex search on the given string, using a single or a list of 
 531         patterns returning the first matching group. 
 532         In case of failure return a default value or raise a WARNING or a 
 533         RegexNotFoundError, depending on fatal, specifying the field name. 
 535         if isinstance(pattern, (str, compat_str, compiled_regex_type)): 
 536             mobj = re.search(pattern, string, flags) 
 539                 mobj = re.search(p, string, flags) 
 543         if not self._downloader.params.get('no_color
') and os.name != 'nt
' and sys.stderr.isatty(): 
 544             _name = '\033[0;34m
%s\033[0m
' % name 
 550                 # return the first matching group 
 551                 return next(g for g in mobj.groups() if g is not None) 
 553                 return mobj.group(group) 
 554         elif default is not NO_DEFAULT: 
 557             raise RegexNotFoundError('Unable to extract 
%s' % _name) 
 559             self._downloader.report_warning('unable to extract 
%s' % _name + bug_reports_message()) 
 562     def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None): 
 564         Like _search_regex, but strips HTML tags and unescapes entities. 
 566         res = self._search_regex(pattern, string, name, default, fatal, flags, group) 
 568             return clean_html(res).strip() 
 572     def _get_login_info(self): 
 574         Get the login info as (username, password) 
 575         It will look in the netrc file using the _NETRC_MACHINE value 
 576         If there's no info available
, return (None, None) 
 578         if self._downloader is None: 
 583         downloader_params = self._downloader.params 
 585         # Attempt to use provided username and password or .netrc data 
 586         if downloader_params.get('username', None) is not None: 
 587             username = downloader_params['username'] 
 588             password = downloader_params['password'] 
 589         elif downloader_params.get('usenetrc', False): 
 591                 info = netrc.netrc().authenticators(self._NETRC_MACHINE) 
 596                     raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) 
 597             except (IOError, netrc.NetrcParseError) as err: 
 598                 self._downloader.report_warning('parsing .netrc: %s' % compat_str(err)) 
 600         return (username, password) 
 602     def _get_tfa_info(self): 
 604         Get the two
-factor authentication info
 
 605         TODO 
- asking the user will be required 
for sms
/phone verify
 
 606         currently just uses the command line option
 
 607         If there
's no info available, return None 
 609         if self._downloader is None: 
 611         downloader_params = self._downloader.params 
 613         if downloader_params.get('twofactor
', None) is not None: 
 614             return downloader_params['twofactor
'] 
 618     # Helper functions for extracting OpenGraph info 
 620     def _og_regexes(prop): 
 621         content_re = r'content
=(?
:"([^>]+?)"|
\'([^
>]+?
)\')' 
 622         property_re = r'(?
:name|
property)=[\'"]og:%s[\'"]' % re.escape(prop) 
 623         template = r'<meta
[^
>]+?
%s[^
>]+?
%s' 
 625             template % (property_re, content_re), 
 626             template % (content_re, property_re), 
 629     def _og_search_property(self, prop, html, name=None, **kargs): 
 631             name = 'OpenGraph 
%s' % prop 
 632         escaped = self._search_regex(self._og_regexes(prop), html, name, flags=re.DOTALL, **kargs) 
 635         return unescapeHTML(escaped) 
 637     def _og_search_thumbnail(self, html, **kargs): 
 638         return self._og_search_property('image
', html, 'thumbnail url
', fatal=False, **kargs) 
 640     def _og_search_description(self, html, **kargs): 
 641         return self._og_search_property('description
', html, fatal=False, **kargs) 
 643     def _og_search_title(self, html, **kargs): 
 644         return self._og_search_property('title
', html, **kargs) 
 646     def _og_search_video_url(self, html, name='video url
', secure=True, **kargs): 
 647         regexes = self._og_regexes('video
') + self._og_regexes('video
:url
') 
 649             regexes = self._og_regexes('video
:secure_url
') + regexes 
 650         return self._html_search_regex(regexes, html, name, **kargs) 
 652     def _og_search_url(self, html, **kargs): 
 653         return self._og_search_property('url
', html, **kargs) 
 655     def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs): 
 656         if display_name is None: 
 658         return self._html_search_regex( 
 660                     (?=[^>]+(?:itemprop|name|property)=(["\']?)%s\1) 
 661                     [^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(name), 
 662             html, display_name, fatal=fatal, group='content
', **kwargs) 
 664     def _dc_search_uploader(self, html): 
 665         return self._html_search_meta('dc
.creator
', html, 'uploader
') 
 667     def _rta_search(self, html): 
 668         # See http://www.rtalabel.org/index.php?content=howtofaq#single 
 669         if re.search(r'(?ix
)<meta\s
+name
="rating"\s
+' 
 670                      r'     content
="RTA-5042-1996-1400-1577-RTA"', 
 675     def _media_rating_search(self, html): 
 676         # See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/ 
 677         rating = self._html_search_meta('rating
', html) 
 689         return RATING_TABLE.get(rating.lower(), None) 
 691     def _family_friendly_search(self, html): 
 692         # See http://schema.org/VideoObject 
 693         family_friendly = self._html_search_meta('isFamilyFriendly
', html) 
 695         if not family_friendly: 
 704         return RATING_TABLE.get(family_friendly.lower(), None) 
 706     def _twitter_search_player(self, html): 
 707         return self._html_search_meta('twitter
:player
', html, 
 708                                       'twitter card player
') 
 711     def _hidden_inputs(html): 
 713             (input.group('name
'), input.group('value
')) for input in re.finditer( 
 716                         type=(?P<q_hidden>["\'])hidden(?P=q_hidden)\s+ 
 717                         name=(?P<q_name>["\'])(?P<name>.+?)(?P=q_name)\s+ 
 718                         (?:id=(?P<q_id>["\']).+?(?P=q_id)\s+)? 
 719                         value=(?P<q_value>["\'])(?P<value>.*?)(?P=q_value) 
 723     def _form_hidden_inputs(self, form_id, html): 
 724         form = self._search_regex( 
 725             r'(?s
)<form
[^
>]+?
id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id, 
 726             html, '%s form' % form_id, group='form') 
 727         return self._hidden_inputs(form) 
 729     def _sort_formats(self, formats, field_preference=None): 
 731             raise ExtractorError('No video formats found') 
 734             # TODO remove the following workaround 
 735             from ..utils import determine_ext 
 736             if not f.get('ext') and 'url' in f: 
 737                 f['ext'] = determine_ext(f['url']) 
 739             if isinstance(field_preference, (list, tuple)): 
 740                 return tuple(f.get(field) if f.get(field) is not None else -1 for field in field_preference) 
 742             preference = f.get('preference') 
 743             if preference is None: 
 744                 proto = f.get('protocol') 
 746                     proto = compat_urllib_parse_urlparse(f.get('url', '')).scheme 
 748                 preference = 0 if proto in ['http', 'https'] else -0.1 
 749                 if f.get('ext') in ['f4f', 'f4m']:  # Not yet supported 
 752             if f.get('vcodec') == 'none':  # audio only 
 753                 if self._downloader.params.get('prefer_free_formats'): 
 754                     ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus'] 
 756                     ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a'] 
 759                     audio_ext_preference = ORDER.index(f['ext']) 
 761                     audio_ext_preference = -1 
 763                 if self._downloader.params.get('prefer_free_formats'): 
 764                     ORDER = ['flv', 'mp4', 'webm'] 
 766                     ORDER = ['webm', 'flv', 'mp4'] 
 768                     ext_preference = ORDER.index(f['ext']) 
 771                 audio_ext_preference = 0 
 775                 f.get('language_preference') if f.get('language_preference') is not None else -1, 
 776                 f.get('quality') if f.get('quality') is not None else -1, 
 777                 f.get('tbr') if f.get('tbr') is not None else -1, 
 778                 f.get('filesize') if f.get('filesize') is not None else -1, 
 779                 f.get('vbr') if f.get('vbr') is not None else -1, 
 780                 f.get('height') if f.get('height') is not None else -1, 
 781                 f.get('width') if f.get('width') is not None else -1, 
 783                 f.get('abr') if f.get('abr') is not None else -1, 
 784                 audio_ext_preference, 
 785                 f.get('fps') if f.get('fps') is not None else -1, 
 786                 f.get('filesize_approx') if f.get('filesize_approx') is not None else -1, 
 787                 f.get('source_preference') if f.get('source_preference') is not None else -1, 
 788                 f.get('format_id') if f.get('format_id') is not None else '', 
 790         formats.sort(key=_formats_key) 
 792     def _check_formats(self, formats, video_id): 
 795                 lambda f: self._is_valid_url( 
 797                     item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'), 
 800     def _is_valid_url(self, url, video_id, item='video'): 
 801         url = self._proto_relative_url(url, scheme='http:') 
 802         # For now assume non HTTP(S) URLs always valid 
 803         if not (url.startswith('http://') or url.startswith('https://')): 
 806             self._request_webpage(url, video_id, 'Checking %s URL' % item) 
 808         except ExtractorError as e: 
 809             if isinstance(e.cause, compat_HTTPError): 
 811                     '%s: %s URL is invalid, skipping' % (video_id, item)) 
 815     def http_scheme(self): 
 816         """ Either "http
:" or "https
:", depending on the user's preferences """ 
 819             if self._downloader.params.get('prefer_insecure', False) 
 822     def _proto_relative_url(self, url, scheme=None): 
 825         if url.startswith('//'): 
 827                 scheme = self.http_scheme() 
 832     def _sleep(self, timeout, video_id, msg_template=None): 
 833         if msg_template is None: 
 834             msg_template = '%(video_id)s: Waiting for %(timeout)s seconds' 
 835         msg = msg_template % {'video_id': video_id, 'timeout': timeout} 
 839     def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None, 
 840                              transform_source=lambda s: fix_xml_ampersands(s).strip()): 
 841         manifest = self._download_xml( 
 842             manifest_url, video_id, 'Downloading f4m manifest', 
 843             'Unable to download f4m manifest', 
 844             # Some manifests may be malformed, e.g. prosiebensat1 generated manifests 
 845             # (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244) 
 846             transform_source=transform_source) 
 849         manifest_version = '1.0' 
 850         media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media') 
 852             manifest_version = '2.0' 
 853             media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media') 
 854         for i, media_el in enumerate(media_nodes): 
 855             if manifest_version == '2.0': 
 856                 media_url = media_el.attrib.get('href') or media_el.attrib.get('url') 
 860                     media_url if media_url.startswith('http://') or media_url.startswith('https://') 
 861                     else ('/'.join(manifest_url.split('/')[:-1]) + '/' + media_url)) 
 862                 # If media_url is itself a f4m manifest do the recursive extraction 
 863                 # since bitrates in parent manifest (this one) and media_url manifest 
 864                 # may differ leading to inability to resolve the format by requested 
 865                 # bitrate in f4m downloader 
 866                 if determine_ext(manifest_url) == 'f4m': 
 867                     formats.extend(self._extract_f4m_formats(manifest_url, video_id, preference, f4m_id)) 
 869             tbr = int_or_none(media_el.attrib.get('bitrate')) 
 871                 'format_id': '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)])), 
 875                 'width': int_or_none(media_el.attrib.get('width')), 
 876                 'height': int_or_none(media_el.attrib.get('height')), 
 877                 'preference': preference, 
 879         self._sort_formats(formats) 
 883     def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None, 
 884                               entry_protocol='m3u8', preference=None, 
 885                               m3u8_id=None, note=None, errnote=None, 
 889             'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])), 
 893             'preference': preference - 1 if preference else -1, 
 894             'resolution': 'multiple', 
 895             'format_note': 'Quality selection URL', 
 898         format_url = lambda u: ( 
 900             if re.match(r'^https?://', u) 
 901             else compat_urlparse.urljoin(m3u8_url, u)) 
 903         m3u8_doc = self._download_webpage( 
 905             note=note or 'Downloading m3u8 information', 
 906             errnote=errnote or 'Failed to download m3u8 information', 
 908         if m3u8_doc is False: 
 913             r'(?P<key>[a-zA-Z_-]+)=(?P<val>"[^
"]+"|
[^
",]+)(?:,|$)') 
 914         for line in m3u8_doc.splitlines(): 
 915             if line.startswith('#EXT-X-STREAM-INF:'): 
 917                 for m in kv_rex.finditer(line): 
 919                     if v.startswith('"'): 
 921                     last_info[m.group('key
')] = v 
 922             elif line.startswith('#EXT-X-MEDIA:'): 
 924                 for m 
in kv_rex
.finditer(line
): 
 926                     if v
.startswith('"'): 
 928                     last_media
[m
.group('key')] = v
 
 929             elif line
.startswith('#') or not line
.strip(): 
 932                 if last_info 
is None: 
 933                     formats
.append({'url': format_url(line
)}) 
 935                 tbr 
= int_or_none(last_info
.get('BANDWIDTH'), scale
=1000) 
 938                     format_id
.append(m3u8_id
) 
 939                 last_media_name 
= last_media
.get('NAME') if last_media 
and last_media
.get('TYPE') != 'SUBTITLES' else None 
 940                 format_id
.append(last_media_name 
if last_media_name 
else '%d' % (tbr 
if tbr 
else len(formats
))) 
 942                     'format_id': '-'.join(format_id
), 
 943                     'url': format_url(line
.strip()), 
 946                     'protocol': entry_protocol
, 
 947                     'preference': preference
, 
 949                 codecs 
= last_info
.get('CODECS') 
 951                     # TODO: looks like video codec is not always necessarily goes first 
 952                     va_codecs 
= codecs
.split(',') 
 954                         f
['vcodec'] = va_codecs
[0].partition('.')[0] 
 955                     if len(va_codecs
) > 1 and va_codecs
[1]: 
 956                         f
['acodec'] = va_codecs
[1].partition('.')[0] 
 957                 resolution 
= last_info
.get('RESOLUTION') 
 959                     width_str
, height_str 
= resolution
.split('x') 
 960                     f
['width'] = int(width_str
) 
 961                     f
['height'] = int(height_str
) 
 962                 if last_media 
is not None: 
 963                     f
['m3u8_media'] = last_media
 
 967         self
._sort
_formats
(formats
) 
 970     # TODO: improve extraction 
 971     def _extract_smil_formats(self
, smil_url
, video_id
, fatal
=True): 
 972         smil 
= self
._download
_xml
( 
 973             smil_url
, video_id
, 'Downloading SMIL file', 
 974             'Unable to download SMIL file', fatal
=fatal
) 
 979         base 
= smil
.find('./head/meta').get('base') 
 983         if smil
.findall('./body/seq/video'): 
 984             video 
= smil
.findall('./body/seq/video')[0] 
 985             fmts
, rtmp_count 
= self
._parse
_smil
_video
(video
, video_id
, base
, rtmp_count
) 
 988             for video 
in smil
.findall('./body/switch/video'): 
 989                 fmts
, rtmp_count 
= self
._parse
_smil
_video
(video
, video_id
, base
, rtmp_count
) 
 992         self
._sort
_formats
(formats
) 
 996     def _parse_smil_video(self
, video
, video_id
, base
, rtmp_count
): 
 997         src 
= video
.get('src') 
 999             return [], rtmp_count
 
1000         bitrate 
= int_or_none(video
.get('system-bitrate') or video
.get('systemBitrate'), 1000) 
1001         width 
= int_or_none(video
.get('width')) 
1002         height 
= int_or_none(video
.get('height')) 
1003         proto 
= video
.get('proto') 
1006                 if base
.startswith('rtmp'): 
1008                 elif base
.startswith('http'): 
1010         ext 
= video
.get('ext') 
1012             return self
._extract
_m
3u8_formats
(src
, video_id
, ext
), rtmp_count
 
1013         elif proto 
== 'rtmp': 
1015             streamer 
= video
.get('streamer') or base
 
1020                 'format_id': 'rtmp-%d' % (rtmp_count 
if bitrate 
is None else bitrate
), 
1025         elif proto
.startswith('http'): 
1028                 'ext': ext 
or 'flv', 
1034     def _live_title(self
, name
): 
1035         """ Generate the title for a live video """ 
1036         now 
= datetime
.datetime
.now() 
1037         now_str 
= now
.strftime("%Y-%m-%d %H:%M") 
1038         return name 
+ ' ' + now_str
 
1040     def _int(self
, v
, name
, fatal
=False, **kwargs
): 
1041         res 
= int_or_none(v
, **kwargs
) 
1042         if 'get_attr' in kwargs
: 
1043             print(getattr(v
, kwargs
['get_attr'])) 
1045             msg 
= 'Failed to extract %s: Could not parse value %r' % (name
, v
) 
1047                 raise ExtractorError(msg
) 
1049                 self
._downloader
.report_warning(msg
) 
1052     def _float(self
, v
, name
, fatal
=False, **kwargs
): 
1053         res 
= float_or_none(v
, **kwargs
) 
1055             msg 
= 'Failed to extract %s: Could not parse value %r' % (name
, v
) 
1057                 raise ExtractorError(msg
) 
1059                 self
._downloader
.report_warning(msg
) 
1062     def _set_cookie(self
, domain
, name
, value
, expire_time
=None): 
1063         cookie 
= compat_cookiejar
.Cookie( 
1064             0, name
, value
, None, None, domain
, None, 
1065             None, '/', True, False, expire_time
, '', None, None, None) 
1066         self
._downloader
.cookiejar
.set_cookie(cookie
) 
1068     def get_testcases(self
, include_onlymatching
=False): 
1069         t 
= getattr(self
, '_TEST', None) 
1071             assert not hasattr(self
, '_TESTS'), \
 
1072                 '%s has _TEST and _TESTS' % type(self
).__name
__ 
1075             tests 
= getattr(self
, '_TESTS', []) 
1077             if not include_onlymatching 
and t
.get('only_matching', False): 
1079             t
['name'] = type(self
).__name
__[:-len('IE')] 
1082     def is_suitable(self
, age_limit
): 
1083         """ Test whether the extractor is generally suitable for the given 
1084         age limit (i.e. pornographic sites are not, all others usually are) """ 
1086         any_restricted 
= False 
1087         for tc 
in self
.get_testcases(include_onlymatching
=False): 
1088             if 'playlist' in tc
: 
1089                 tc 
= tc
['playlist'][0] 
1090             is_restricted 
= age_restricted( 
1091                 tc
.get('info_dict', {}).get('age_limit'), age_limit
) 
1092             if not is_restricted
: 
1094             any_restricted 
= any_restricted 
or is_restricted
 
1095         return not any_restricted
 
1097     def extract_subtitles(self
, *args
, **kwargs
): 
1098         if (self
._downloader
.params
.get('writesubtitles', False) or 
1099                 self
._downloader
.params
.get('listsubtitles')): 
1100             return self
._get
_subtitles
(*args
, **kwargs
) 
1103     def _get_subtitles(self
, *args
, **kwargs
): 
1104         raise NotImplementedError("This method must be implemented by subclasses") 
1106     def extract_automatic_captions(self
, *args
, **kwargs
): 
1107         if (self
._downloader
.params
.get('writeautomaticsub', False) or 
1108                 self
._downloader
.params
.get('listsubtitles')): 
1109             return self
._get
_automatic
_captions
(*args
, **kwargs
) 
1112     def _get_automatic_captions(self
, *args
, **kwargs
): 
1113         raise NotImplementedError("This method must be implemented by subclasses") 
1116 class SearchInfoExtractor(InfoExtractor
): 
1118     Base class for paged search queries extractors. 
1119     They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query} 
1120     Instances should define _SEARCH_KEY and _MAX_RESULTS. 
1124     def _make_valid_url(cls
): 
1125         return r
'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls
._SEARCH
_KEY
 
1128     def suitable(cls
, url
): 
1129         return re
.match(cls
._make
_valid
_url
(), url
) is not None 
1131     def _real_extract(self
, query
): 
1132         mobj 
= re
.match(self
._make
_valid
_url
(), query
) 
1134             raise ExtractorError('Invalid search query "%s"' % query
) 
1136         prefix 
= mobj
.group('prefix') 
1137         query 
= mobj
.group('query') 
1139             return self
._get
_n
_results
(query
, 1) 
1140         elif prefix 
== 'all': 
1141             return self
._get
_n
_results
(query
, self
._MAX
_RESULTS
) 
1145                 raise ExtractorError('invalid download number %s for query "%s"' % (n
, query
)) 
1146             elif n 
> self
._MAX
_RESULTS
: 
1147                 self
._downloader
.report_warning('%s returns max %i results (you requested %i)' % (self
._SEARCH
_KEY
, self
._MAX
_RESULTS
, n
)) 
1148                 n 
= self
._MAX
_RESULTS
 
1149             return self
._get
_n
_results
(query
, n
) 
1151     def _get_n_results(self
, query
, n
): 
1152         """Get a specified number of results for a query""" 
1153         raise NotImplementedError("This method must be implemented by subclasses") 
1156     def SEARCH_KEY(self
): 
1157         return self
._SEARCH
_KEY