9 import xml
.etree
.ElementTree
 
  14     compat_urllib_parse_urlparse
, 
  24 _NO_DEFAULT 
= object() 
  27 class InfoExtractor(object): 
  28     """Information Extractor class. 
  30     Information extractors are the classes that, given a URL, extract 
  31     information about the video (or videos) the URL refers to. This 
  32     information includes the real video URL, the video title, author and 
  33     others. The information is stored in a dictionary which is then 
  34     passed to the FileDownloader. The FileDownloader processes this 
  35     information possibly downloading the video to the file system, among 
  36     other possible outcomes. 
  38     The dictionaries must include the following fields: 
  41     title:          Video title, unescaped. 
  43     Additionally, it must contain either a formats entry or a url one: 
  45     formats:        A list of dictionaries for each format available, ordered 
  46                     from worst to best quality. 
  49                     * url        Mandatory. The URL of the video file 
  50                     * ext        Will be calculated from url if missing 
  51                     * format     A human-readable description of the format 
  52                                  ("mp4 container with h264/opus"). 
  53                                  Calculated from the format_id, width, height. 
  54                                  and format_note fields if missing. 
  55                     * format_id  A short description of the format 
  56                                  ("mp4_h264_opus" or "19"). 
  57                                 Technically optional, but strongly recommended. 
  58                     * format_note Additional info about the format 
  59                                  ("3D" or "DASH video") 
  60                     * width      Width of the video, if known 
  61                     * height     Height of the video, if known 
  62                     * resolution Textual description of width and height 
  63                     * tbr        Average bitrate of audio and video in KBit/s 
  64                     * abr        Average audio bitrate in KBit/s 
  65                     * acodec     Name of the audio codec in use 
  66                     * asr        Audio sampling rate in Hertz 
  67                     * vbr        Average video bitrate in KBit/s 
  68                     * vcodec     Name of the video codec in use 
  69                     * container  Name of the container format 
  70                     * filesize   The number of bytes, if known in advance 
  71                     * player_url SWF Player URL (used for rtmpdump). 
  72                     * protocol   The protocol that will be used for the actual 
  74                                  "http", "https", "rtsp", "rtmp", "m3u8" or so. 
  75                     * preference Order number of this format. If this field is 
  76                                  present and not None, the formats get sorted 
  78                                  -1 for default (order by other properties), 
  79                                  -2 or smaller for less than default. 
  80                     * quality    Order number of the video quality of this 
  81                                  format, irrespective of the file format. 
  82                                  -1 for default (order by other properties), 
  83                                  -2 or smaller for less than default. 
  85     ext:            Video filename extension. 
  86     format:         The video format, defaults to ext (used for --get-format) 
  87     player_url:     SWF Player URL (used for rtmpdump). 
  89     The following fields are optional: 
  91     thumbnails:     A list of dictionaries (with the entries "resolution" and 
  92                     "url") for the varying thumbnails 
  93     thumbnail:      Full URL to a video thumbnail image. 
  94     description:    One-line video description. 
  95     uploader:       Full name of the video uploader. 
  96     upload_date:    Video upload date (YYYYMMDD). 
  97     uploader_id:    Nickname or id of the video uploader. 
  98     location:       Physical location of the video. 
  99     subtitles:      The subtitle file contents as a dictionary in the format 
 100                     {language: subtitles}. 
 101     duration:       Length of the video in seconds, as an integer. 
 102     view_count:     How many users have watched the video on the platform. 
 103     like_count:     Number of positive ratings of the video 
 104     dislike_count:  Number of negative ratings of the video 
 105     comment_count:  Number of comments on the video 
 106     age_limit:      Age restriction for the video, as an integer (years) 
 107     webpage_url:    The url to the video webpage, if given to youtube-dl it 
 108                     should allow to get the same result again. (It will be set 
 109                     by YoutubeDL if it's missing) 
 111     Unless mentioned otherwise, the fields should be Unicode strings. 
 113     Subclasses of this one should re-define the _real_initialize() and 
 114     _real_extract() methods and define a _VALID_URL regexp. 
 115     Probably, they should also be added to the list of extractors. 
 117     _real_extract() must return a *list* of information dictionaries as 
 120     Finally, the _WORKING attribute should be set to False for broken IEs 
 121     in order to warn the users and skip the tests. 
 128     def __init__(self
, downloader
=None): 
 129         """Constructor. Receives an optional downloader.""" 
 131         self
.set_downloader(downloader
) 
 134     def suitable(cls
, url
): 
 135         """Receives a URL and returns True if suitable for this IE.""" 
 137         # This does not use has/getattr intentionally - we want to know whether 
 138         # we have cached the regexp for *this* class, whereas getattr would also 
 139         # match the superclass 
 140         if '_VALID_URL_RE' not in cls
.__dict
__: 
 141             cls
._VALID
_URL
_RE 
= re
.compile(cls
._VALID
_URL
) 
 142         return cls
._VALID
_URL
_RE
.match(url
) is not None 
 146         """Getter method for _WORKING.""" 
 149     def initialize(self
): 
 150         """Initializes an instance (authentication, etc).""" 
 152             self
._real
_initialize
() 
 155     def extract(self
, url
): 
 156         """Extracts URL information and returns it in list of dicts.""" 
 158         return self
._real
_extract
(url
) 
 160     def set_downloader(self
, downloader
): 
 161         """Sets the downloader for this IE.""" 
 162         self
._downloader 
= downloader
 
 164     def _real_initialize(self
): 
 165         """Real initialization process. Redefine in subclasses.""" 
 168     def _real_extract(self
, url
): 
 169         """Real extraction process. Redefine in subclasses.""" 
 174         """A string for getting the InfoExtractor with get_info_extractor""" 
 175         return cls
.__name
__[:-2] 
 179         return type(self
).__name
__[:-2] 
 181     def _request_webpage(self
, url_or_request
, video_id
, note
=None, errnote
=None, fatal
=True): 
 182         """ Returns the response handle """ 
 184             self
.report_download_webpage(video_id
) 
 185         elif note 
is not False: 
 187                 self
.to_screen(u
'%s' % (note
,)) 
 189                 self
.to_screen(u
'%s: %s' % (video_id
, note
)) 
 191             return self
._downloader
.urlopen(url_or_request
) 
 192         except (compat_urllib_error
.URLError
, compat_http_client
.HTTPException
, socket
.error
) as err
: 
 196                 errnote 
= u
'Unable to download webpage' 
 197             errmsg 
= u
'%s: %s' % (errnote
, compat_str(err
)) 
 199                 raise ExtractorError(errmsg
, sys
.exc_info()[2], cause
=err
) 
 201                 self
._downloader
.report_warning(errmsg
) 
 204     def _download_webpage_handle(self
, url_or_request
, video_id
, note
=None, errnote
=None, fatal
=True): 
 205         """ Returns a tuple (page content as string, URL handle) """ 
 207         # Strip hashes from the URL (#1038) 
 208         if isinstance(url_or_request
, (compat_str
, str)): 
 209             url_or_request 
= url_or_request
.partition('#')[0] 
 211         urlh 
= self
._request
_webpage
(url_or_request
, video_id
, note
, errnote
, fatal
) 
 215         content_type 
= urlh
.headers
.get('Content-Type', '') 
 216         webpage_bytes 
= urlh
.read() 
 217         m 
= re
.match(r
'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type
) 
 219             encoding 
= m
.group(1) 
 221             m 
= re
.search(br
'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]', 
 222                           webpage_bytes[:1024]) 
 224                 encoding = m.group(1).decode('ascii') 
 225             elif webpage_bytes.startswith(b'\xff\xfe'): 
 229         if self._downloader.params.get('dump_intermediate_pages', False): 
 231                 url = url_or_request.get_full_url() 
 232             except AttributeError: 
 234             self.to_screen(u'Dumping request to ' + url) 
 235             dump = base64.b64encode(webpage_bytes).decode('ascii') 
 236             self._downloader.to_screen(dump) 
 237         if self._downloader.params.get('write_pages', False): 
 239                 url = url_or_request.get_full_url() 
 240             except AttributeError: 
 243                 h = u'___' + hashlib.md5(url.encode('utf-8')).hexdigest() 
 244                 url = url[:200 - len(h)] + h 
 245             raw_filename = ('%s_%s.dump' % (video_id, url)) 
 246             filename = sanitize_filename(raw_filename, restricted=True) 
 247             self.to_screen(u'Saving request to ' + filename) 
 248             with open(filename, 'wb') as outf: 
 249                 outf.write(webpage_bytes) 
 251         content = webpage_bytes.decode(encoding, 'replace') 
 252         return (content, urlh) 
 254     def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True): 
 255         """ Returns the data of the page as a string """ 
 256         res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal) 
 263     def _download_xml(self, url_or_request, video_id, 
 264                       note=u'Downloading XML', errnote=u'Unable to download XML', 
 265                       transform_source=None): 
 266         """Return the xml as an xml.etree.ElementTree.Element""" 
 267         xml_string = self._download_webpage(url_or_request, video_id, note, errnote) 
 269             xml_string = transform_source(xml_string) 
 270         return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8')) 
 272     def _download_json(self, url_or_request, video_id, 
 273                        note=u'Downloading JSON metadata', 
 274                        errnote=u'Unable to download JSON metadata', 
 275                        transform_source=None): 
 276         json_string = self._download_webpage(url_or_request, video_id, note, errnote) 
 278             json_string = transform_source(json_string) 
 280             return json.loads(json_string) 
 281         except ValueError as ve: 
 282             raise ExtractorError('Failed to download JSON', cause=ve) 
 284     def report_warning(self, msg, video_id=None): 
 285         idstr = u'' if video_id is None else u'%s: ' % video_id 
 286         self._downloader.report_warning( 
 287             u'[%s] %s%s' % (self.IE_NAME, idstr, msg)) 
 289     def to_screen(self, msg): 
 290         """Print msg to screen, prefixing it with '[ie_name]'""" 
 291         self._downloader.to_screen(u'[%s] %s' % (self.IE_NAME, msg)) 
 293     def report_extraction(self, id_or_name): 
 294         """Report information extraction.""" 
 295         self.to_screen(u'%s: Extracting information' % id_or_name) 
 297     def report_download_webpage(self, video_id): 
 298         """Report webpage download.""" 
 299         self.to_screen(u'%s: Downloading webpage' % video_id) 
 301     def report_age_confirmation(self): 
 302         """Report attempt to confirm age.""" 
 303         self.to_screen(u'Confirming age') 
 305     def report_login(self): 
 306         """Report attempt to log in.""" 
 307         self.to_screen(u'Logging in') 
 309     #Methods for following #608 
 311     def url_result(url, ie=None, video_id=None): 
 312         """Returns a url that points to a page that should be processed""" 
 313         #TODO: ie should be the class used for getting the info 
 314         video_info = {'_type': 'url', 
 317         if video_id is not None: 
 318             video_info['id'] = video_id 
 321     def playlist_result(entries, playlist_id=None, playlist_title=None): 
 322         """Returns a playlist""" 
 323         video_info = {'_type': 'playlist', 
 326             video_info['id'] = playlist_id 
 328             video_info['title'] = playlist_title 
 331     def _search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0): 
 333         Perform a regex search on the given string, using a single or a list of 
 334         patterns returning the first matching group. 
 335         In case of failure return a default value or raise a WARNING or a 
 336         RegexNotFoundError, depending on fatal, specifying the field name. 
 338         if isinstance(pattern, (str, compat_str, compiled_regex_type)): 
 339             mobj = re.search(pattern, string, flags) 
 342                 mobj = re.search(p, string, flags) 
 345         if os.name != 'nt' and sys.stderr.isatty(): 
 346             _name = u'\033[0;34m%s\033[0m' % name 
 351             # return the first matching group 
 352             return next(g for g in mobj.groups() if g is not None) 
 353         elif default is not _NO_DEFAULT: 
 356             raise RegexNotFoundError(u'Unable to extract %s' % _name) 
 358             self._downloader.report_warning(u'unable to extract %s; ' 
 359                 u'please report this issue on http://yt-dl.org/bug' % _name) 
 362     def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0): 
 364         Like _search_regex, but strips HTML tags and unescapes entities. 
 366         res = self._search_regex(pattern, string, name, default, fatal, flags) 
 368             return clean_html(res).strip() 
 372     def _get_login_info(self): 
 374         Get the the login info as (username, password) 
 375         It will look in the netrc file using the _NETRC_MACHINE value 
 376         If there's no info available, return (None, None) 
 378         if self._downloader is None: 
 383         downloader_params = self._downloader.params 
 385         # Attempt to use provided username and password or .netrc data 
 386         if downloader_params.get('username', None) is not None: 
 387             username = downloader_params['username'] 
 388             password = downloader_params['password'] 
 389         elif downloader_params.get('usenetrc', False): 
 391                 info = netrc.netrc().authenticators(self._NETRC_MACHINE) 
 396                     raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) 
 397             except (IOError, netrc.NetrcParseError) as err: 
 398                 self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err)) 
 400         return (username, password) 
 402     # Helper functions for extracting OpenGraph info 
 404     def _og_regexes(prop): 
 405         content_re = r'content=(?:"([^
>]+?
)"|\'([^>]+?)\')' 
 406         property_re = r'(?:name|property)=[\'"]og
:%s[\'"]' % re.escape(prop) 
 407         template = r'<meta[^>]+?%s[^>]+?%s' 
 409             template % (property_re, content_re), 
 410             template % (content_re, property_re), 
 413     def _og_search_property(self, prop, html, name=None, **kargs): 
 415             name = 'OpenGraph %s' % prop 
 416         escaped = self._search_regex(self._og_regexes(prop), html, name, flags=re.DOTALL, **kargs) 
 419         return unescapeHTML(escaped) 
 421     def _og_search_thumbnail(self, html, **kargs): 
 422         return self._og_search_property('image', html, u'thumbnail url', fatal=False, **kargs) 
 424     def _og_search_description(self, html, **kargs): 
 425         return self._og_search_property('description', html, fatal=False, **kargs) 
 427     def _og_search_title(self, html, **kargs): 
 428         return self._og_search_property('title', html, **kargs) 
 430     def _og_search_video_url(self, html, name='video url', secure=True, **kargs): 
 431         regexes = self._og_regexes('video') 
 432         if secure: regexes = self._og_regexes('video:secure_url') + regexes 
 433         return self._html_search_regex(regexes, html, name, **kargs) 
 435     def _html_search_meta(self, name, html, display_name=None): 
 436         if display_name is None: 
 438         return self._html_search_regex( 
 440                     (?=[^>]+(?:itemprop|name|property)=["\']%s["\']) 
 441                     [^>]+content=["\']([^
"\']+)["\']''' % re.escape(name), 
 442             html, display_name, fatal=False) 
 444     def _dc_search_uploader(self, html): 
 445         return self._html_search_meta('dc.creator', html, 'uploader') 
 447     def _rta_search(self, html): 
 448         # See http://www.rtalabel.org/index.php?content=howtofaq#single 
 449         if re.search(r'(?ix)<meta\s+name="rating"\s+' 
 450                      r'     content="RTA-5042-1996-1400-1577-RTA"', 
 455     def _media_rating_search(self, html): 
 456         # See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/ 
 457         rating = self._html_search_meta('rating', html) 
 469         return RATING_TABLE.get(rating.lower(), None) 
 471     def _twitter_search_player(self, html): 
 472         return self._html_search_meta('twitter:player', html, 
 473             'twitter card player') 
 475     def _sort_formats(self, formats): 
 477             raise ExtractorError(u'No video formats found') 
 480             # TODO remove the following workaround 
 481             from ..utils import determine_ext 
 482             if not f.get('ext') and 'url' in f: 
 483                 f['ext'] = determine_ext(f['url']) 
 485             preference = f.get('preference') 
 486             if preference is None: 
 487                 proto = f.get('protocol') 
 489                     proto = compat_urllib_parse_urlparse(f.get('url', '')).scheme 
 491                 preference = 0 if proto in ['http', 'https'] else -0.1 
 492                 if f.get('ext') in ['f4f', 'f4m']:  # Not yet supported 
 495             if f.get('vcodec') == 'none':  # audio only 
 496                 if self._downloader.params.get('prefer_free_formats'): 
 497                     ORDER = [u'aac', u'mp3', u'm4a', u'webm', u'ogg', u'opus'] 
 499                     ORDER = [u'webm', u'opus', u'ogg', u'mp3', u'aac', u'm4a'] 
 502                     audio_ext_preference = ORDER.index(f['ext']) 
 504                     audio_ext_preference = -1 
 506                 if self._downloader.params.get('prefer_free_formats'): 
 507                     ORDER = [u'flv', u'mp4', u'webm'] 
 509                     ORDER = [u'webm', u'flv', u'mp4'] 
 511                     ext_preference = ORDER.index(f['ext']) 
 514                 audio_ext_preference = 0 
 518                 f.get('quality') if f.get('quality') is not None else -1, 
 519                 f.get('height') if f.get('height') is not None else -1, 
 520                 f.get('width') if f.get('width') is not None else -1, 
 522                 f.get('tbr') if f.get('tbr') is not None else -1, 
 523                 f.get('vbr') if f.get('vbr') is not None else -1, 
 524                 f.get('abr') if f.get('abr') is not None else -1, 
 525                 audio_ext_preference, 
 526                 f.get('filesize') if f.get('filesize') is not None else -1, 
 529         formats.sort(key=_formats_key) 
 532 class SearchInfoExtractor(InfoExtractor): 
 534     Base class for paged search queries extractors. 
 535     They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query} 
 536     Instances should define _SEARCH_KEY and _MAX_RESULTS. 
 540     def _make_valid_url(cls): 
 541         return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY 
 544     def suitable(cls, url): 
 545         return re.match(cls._make_valid_url(), url) is not None 
 547     def _real_extract(self, query): 
 548         mobj = re.match(self._make_valid_url(), query) 
 550             raise ExtractorError(u'Invalid search query "%s"' % query) 
 552         prefix = mobj.group('prefix') 
 553         query = mobj.group('query') 
 555             return self._get_n_results(query, 1) 
 556         elif prefix == 'all': 
 557             return self._get_n_results(query, self._MAX_RESULTS) 
 561                 raise ExtractorError(u'invalid download number %s for query "%s"' % (n, query)) 
 562             elif n > self._MAX_RESULTS: 
 563                 self._downloader.report_warning(u'%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n)) 
 564                 n = self._MAX_RESULTS 
 565             return self._get_n_results(query, n) 
 567     def _get_n_results(self, query, n): 
 568         """Get a specified number of results for a query""" 
 569         raise NotImplementedError("This method must be implemented by subclasses") 
 572     def SEARCH_KEY(self): 
 573         return self._SEARCH_KEY