]>
Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/common.py
  11     compat_urllib_request
, 
  19 class InfoExtractor(object): 
  20     """Information Extractor class. 
  22     Information extractors are the classes that, given a URL, extract 
  23     information about the video (or videos) the URL refers to. This 
  24     information includes the real video URL, the video title, author and 
  25     others. The information is stored in a dictionary which is then 
  26     passed to the FileDownloader. The FileDownloader processes this 
  27     information possibly downloading the video to the file system, among 
  28     other possible outcomes. 
  30     The dictionaries must include the following fields: 
  34     title:          Video title, unescaped. 
  35     ext:            Video filename extension. 
  37     The following fields are optional: 
  39     format:         The video format, defaults to ext (used for --get-format) 
  40     thumbnails:     A list of dictionaries (with the entries "resolution" and 
  41                     "url") for the varying thumbnails 
  42     thumbnail:      Full URL to a video thumbnail image. 
  43     description:    One-line video description. 
  44     uploader:       Full name of the video uploader. 
  45     upload_date:    Video upload date (YYYYMMDD). 
  46     uploader_id:    Nickname or id of the video uploader. 
  47     location:       Physical location of the video. 
  48     player_url:     SWF Player URL (used for rtmpdump). 
  49     subtitles:      The subtitle file contents. 
  50     view_count:     How many users have watched the video on the platform. 
  51     urlhandle:      [internal] The urlHandle to be used to download the file, 
  52                     like returned by urllib.request.urlopen 
  54     The fields should all be Unicode strings. 
  56     Subclasses of this one should re-define the _real_initialize() and 
  57     _real_extract() methods and define a _VALID_URL regexp. 
  58     Probably, they should also be added to the list of extractors. 
  60     _real_extract() must return a *list* of information dictionaries as 
  63     Finally, the _WORKING attribute should be set to False for broken IEs 
  64     in order to warn the users and skip the tests. 
  71     def __init__(self
, downloader
=None): 
  72         """Constructor. Receives an optional downloader.""" 
  74         self
.set_downloader(downloader
) 
  77     def suitable(cls
, url
): 
  78         """Receives a URL and returns True if suitable for this IE.""" 
  79         return re
.match(cls
._VALID
_URL
, url
) is not None 
  83         """Getter method for _WORKING.""" 
  87         """Initializes an instance (authentication, etc).""" 
  89             self
._real
_initialize
() 
  92     def extract(self
, url
): 
  93         """Extracts URL information and returns it in list of dicts.""" 
  95         return self
._real
_extract
(url
) 
  97     def set_downloader(self
, downloader
): 
  98         """Sets the downloader for this IE.""" 
  99         self
._downloader 
= downloader
 
 101     def _real_initialize(self
): 
 102         """Real initialization process. Redefine in subclasses.""" 
 105     def _real_extract(self
, url
): 
 106         """Real extraction process. Redefine in subclasses.""" 
 111         return type(self
).__name
__[:-2] 
 113     def _request_webpage(self
, url_or_request
, video_id
, note
=None, errnote
=None): 
 114         """ Returns the response handle """ 
 116             self
.report_download_webpage(video_id
) 
 117         elif note 
is not False: 
 118             self
.to_screen(u
'%s: %s' % (video_id
, note
)) 
 120             return compat_urllib_request
.urlopen(url_or_request
) 
 121         except (compat_urllib_error
.URLError
, compat_http_client
.HTTPException
, socket
.error
) as err
: 
 123                 errnote 
= u
'Unable to download webpage' 
 124             raise ExtractorError(u
'%s: %s' % (errnote
, compat_str(err
)), sys
.exc_info()[2]) 
 126     def _download_webpage_handle(self
, url_or_request
, video_id
, note
=None, errnote
=None): 
 127         """ Returns a tuple (page content as string, URL handle) """ 
 128         urlh 
= self
._request
_webpage
(url_or_request
, video_id
, note
, errnote
) 
 129         content_type 
= urlh
.headers
.get('Content-Type', '') 
 130         m 
= re
.match(r
'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type
) 
 132             encoding 
= m
.group(1) 
 135         webpage_bytes 
= urlh
.read() 
 136         if self
._downloader
.params
.get('dump_intermediate_pages', False): 
 138                 url 
= url_or_request
.get_full_url() 
 139             except AttributeError: 
 141             self
.to_screen(u
'Dumping request to ' + url
) 
 142             dump 
= base64
.b64encode(webpage_bytes
).decode('ascii') 
 143             self
._downloader
.to_screen(dump
) 
 144         content 
= webpage_bytes
.decode(encoding
, 'replace') 
 145         return (content
, urlh
) 
 147     def _download_webpage(self
, url_or_request
, video_id
, note
=None, errnote
=None): 
 148         """ Returns the data of the page as a string """ 
 149         return self
._download
_webpage
_handle
(url_or_request
, video_id
, note
, errnote
)[0] 
 151     def to_screen(self
, msg
): 
 152         """Print msg to screen, prefixing it with '[ie_name]'""" 
 153         self
._downloader
.to_screen(u
'[%s] %s' % (self
.IE_NAME
, msg
)) 
 155     def report_extraction(self
, id_or_name
): 
 156         """Report information extraction.""" 
 157         self
.to_screen(u
'%s: Extracting information' % id_or_name
) 
 159     def report_download_webpage(self
, video_id
): 
 160         """Report webpage download.""" 
 161         self
.to_screen(u
'%s: Downloading webpage' % video_id
) 
 163     def report_age_confirmation(self
): 
 164         """Report attempt to confirm age.""" 
 165         self
.to_screen(u
'Confirming age') 
 167     def report_login(self
): 
 168         """Report attempt to log in.""" 
 169         self
.to_screen(u
'Logging in') 
 171     #Methods for following #608 
 172     #They set the correct value of the '_type' key 
 173     def video_result(self
, video_info
): 
 174         """Returns a video""" 
 175         video_info
['_type'] = 'video' 
 177     def url_result(self
, url
, ie
=None): 
 178         """Returns a url that points to a page that should be processed""" 
 179         #TODO: ie should be the class used for getting the info 
 180         video_info 
= {'_type': 'url', 
 184     def playlist_result(self
, entries
, playlist_id
=None, playlist_title
=None): 
 185         """Returns a playlist""" 
 186         video_info 
= {'_type': 'playlist', 
 189             video_info
['id'] = playlist_id
 
 191             video_info
['title'] = playlist_title
 
 194     def _search_regex(self
, pattern
, string
, name
, default
=None, fatal
=True, flags
=0): 
 196         Perform a regex search on the given string, using a single or a list of 
 197         patterns returning the first matching group. 
 198         In case of failure return a default value or raise a WARNING or a 
 199         ExtractorError, depending on fatal, specifying the field name. 
 201         if isinstance(pattern
, (str, compat_str
, compiled_regex_type
)): 
 202             mobj 
= re
.search(pattern
, string
, flags
) 
 205                 mobj 
= re
.search(p
, string
, flags
) 
 208         if sys
.stderr
.isatty() and os
.name 
!= 'nt': 
 209             _name 
= u
'\033[0;34m%s\033[0m' % name
 
 214             # return the first matching group 
 215             return next(g 
for g 
in mobj
.groups() if g 
is not None) 
 216         elif default 
is not None: 
 219             raise ExtractorError(u
'Unable to extract %s' % _name
) 
 221             self
._downloader
.report_warning(u
'unable to extract %s; ' 
 222                 u
'please report this issue on http://yt-dl.org/bug' % _name
) 
 225     def _html_search_regex(self
, pattern
, string
, name
, default
=None, fatal
=True, flags
=0): 
 227         Like _search_regex, but strips HTML tags and unescapes entities. 
 229         res 
= self
._search
_regex
(pattern
, string
, name
, default
, fatal
, flags
) 
 231             return clean_html(res
).strip() 
 235     def _get_login_info(self
): 
 237         Get the the login info as (username, password) 
 238         It will look in the netrc file using the _NETRC_MACHINE value 
 239         If there's no info available, return (None, None) 
 241         if self
._downloader 
is None: 
 246         downloader_params 
= self
._downloader
.params
 
 248         # Attempt to use provided username and password or .netrc data 
 249         if downloader_params
.get('username', None) is not None: 
 250             username 
= downloader_params
['username'] 
 251             password 
= downloader_params
['password'] 
 252         elif downloader_params
.get('usenetrc', False): 
 254                 info 
= netrc
.netrc().authenticators(self
._NETRC
_MACHINE
) 
 259                     raise netrc
.NetrcParseError('No authenticators for %s' % self
._NETRC
_MACHINE
) 
 260             except (IOError, netrc
.NetrcParseError
) as err
: 
 261                 self
._downloader
.report_warning(u
'parsing .netrc: %s' % compat_str(err
)) 
 263         return (username
, password
) 
 265 class SearchInfoExtractor(InfoExtractor
): 
 267     Base class for paged search queries extractors. 
 268     They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query} 
 269     Instances should define _SEARCH_KEY and _MAX_RESULTS. 
 273     def _make_valid_url(cls
): 
 274         return r
'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls
._SEARCH
_KEY
 
 277     def suitable(cls
, url
): 
 278         return re
.match(cls
._make
_valid
_url
(), url
) is not None 
 280     def _real_extract(self
, query
): 
 281         mobj 
= re
.match(self
._make
_valid
_url
(), query
) 
 283             raise ExtractorError(u
'Invalid search query "%s"' % query
) 
 285         prefix 
= mobj
.group('prefix') 
 286         query 
= mobj
.group('query') 
 288             return self
._get
_n
_results
(query
, 1) 
 289         elif prefix 
== 'all': 
 290             return self
._get
_n
_results
(query
, self
._MAX
_RESULTS
) 
 294                 raise ExtractorError(u
'invalid download number %s for query "%s"' % (n
, query
)) 
 295             elif n 
> self
._MAX
_RESULTS
: 
 296                 self
._downloader
.report_warning(u
'%s returns max %i results (you requested %i)' % (self
._SEARCH
_KEY
, self
._MAX
_RESULTS
, n
)) 
 297                 n 
= self
._MAX
_RESULTS
 
 298             return self
._get
_n
_results
(query
, n
) 
 300     def _get_n_results(self
, query
, n
): 
 301         """Get a specified number of results for a query""" 
 302         raise NotImplementedError("This method must be implemented by sublclasses") 
 305     def SEARCH_KEY(self
): 
 306         return self
._SEARCH
_KEY