]> Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/common.py
Finalize changelog.
[youtubedl] / youtube_dl / extractor / common.py
1 import base64
2 import os
3 import re
4 import socket
5 import sys
6 import netrc
7
8 from ..utils import (
9 compat_http_client,
10 compat_urllib_error,
11 compat_urllib_request,
12 compat_str,
13
14 clean_html,
15 compiled_regex_type,
16 ExtractorError,
17 unescapeHTML,
18 )
19
20 class InfoExtractor(object):
21 """Information Extractor class.
22
23 Information extractors are the classes that, given a URL, extract
24 information about the video (or videos) the URL refers to. This
25 information includes the real video URL, the video title, author and
26 others. The information is stored in a dictionary which is then
27 passed to the FileDownloader. The FileDownloader processes this
28 information possibly downloading the video to the file system, among
29 other possible outcomes.
30
31 The dictionaries must include the following fields:
32
33 id: Video identifier.
34 url: Final video URL.
35 title: Video title, unescaped.
36 ext: Video filename extension.
37
38 The following fields are optional:
39
40 format: The video format, defaults to ext (used for --get-format)
41 thumbnails: A list of dictionaries (with the entries "resolution" and
42 "url") for the varying thumbnails
43 thumbnail: Full URL to a video thumbnail image.
44 description: One-line video description.
45 uploader: Full name of the video uploader.
46 upload_date: Video upload date (YYYYMMDD).
47 uploader_id: Nickname or id of the video uploader.
48 location: Physical location of the video.
49 player_url: SWF Player URL (used for rtmpdump).
50 subtitles: The subtitle file contents as a dictionary in the format
51 {language: subtitles}.
52 view_count: How many users have watched the video on the platform.
53 urlhandle: [internal] The urlHandle to be used to download the file,
54 like returned by urllib.request.urlopen
55
56 The fields should all be Unicode strings.
57
58 Subclasses of this one should re-define the _real_initialize() and
59 _real_extract() methods and define a _VALID_URL regexp.
60 Probably, they should also be added to the list of extractors.
61
62 _real_extract() must return a *list* of information dictionaries as
63 described above.
64
65 Finally, the _WORKING attribute should be set to False for broken IEs
66 in order to warn the users and skip the tests.
67 """
68
69 _ready = False
70 _downloader = None
71 _WORKING = True
72
73 def __init__(self, downloader=None):
74 """Constructor. Receives an optional downloader."""
75 self._ready = False
76 self.set_downloader(downloader)
77
78 @classmethod
79 def suitable(cls, url):
80 """Receives a URL and returns True if suitable for this IE."""
81
82 # This does not use has/getattr intentionally - we want to know whether
83 # we have cached the regexp for *this* class, whereas getattr would also
84 # match the superclass
85 if '_VALID_URL_RE' not in cls.__dict__:
86 cls._VALID_URL_RE = re.compile(cls._VALID_URL)
87 return cls._VALID_URL_RE.match(url) is not None
88
89 @classmethod
90 def working(cls):
91 """Getter method for _WORKING."""
92 return cls._WORKING
93
94 def initialize(self):
95 """Initializes an instance (authentication, etc)."""
96 if not self._ready:
97 self._real_initialize()
98 self._ready = True
99
100 def extract(self, url):
101 """Extracts URL information and returns it in list of dicts."""
102 self.initialize()
103 return self._real_extract(url)
104
105 def set_downloader(self, downloader):
106 """Sets the downloader for this IE."""
107 self._downloader = downloader
108
109 def _real_initialize(self):
110 """Real initialization process. Redefine in subclasses."""
111 pass
112
113 def _real_extract(self, url):
114 """Real extraction process. Redefine in subclasses."""
115 pass
116
117 @classmethod
118 def ie_key(cls):
119 """A string for getting the InfoExtractor with get_info_extractor"""
120 return cls.__name__[:-2]
121
122 @property
123 def IE_NAME(self):
124 return type(self).__name__[:-2]
125
126 def _request_webpage(self, url_or_request, video_id, note=None, errnote=None):
127 """ Returns the response handle """
128 if note is None:
129 self.report_download_webpage(video_id)
130 elif note is not False:
131 self.to_screen(u'%s: %s' % (video_id, note))
132 try:
133 return compat_urllib_request.urlopen(url_or_request)
134 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
135 if errnote is None:
136 errnote = u'Unable to download webpage'
137 raise ExtractorError(u'%s: %s' % (errnote, compat_str(err)), sys.exc_info()[2], cause=err)
138
139 def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None):
140 """ Returns a tuple (page content as string, URL handle) """
141
142 # Strip hashes from the URL (#1038)
143 if isinstance(url_or_request, (compat_str, str)):
144 url_or_request = url_or_request.partition('#')[0]
145
146 urlh = self._request_webpage(url_or_request, video_id, note, errnote)
147 content_type = urlh.headers.get('Content-Type', '')
148 webpage_bytes = urlh.read()
149 m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
150 if m:
151 encoding = m.group(1)
152 else:
153 m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
154 webpage_bytes[:1024])
155 if m:
156 encoding = m.group(1).decode('ascii')
157 else:
158 encoding = 'utf-8'
159 if self._downloader.params.get('dump_intermediate_pages', False):
160 try:
161 url = url_or_request.get_full_url()
162 except AttributeError:
163 url = url_or_request
164 self.to_screen(u'Dumping request to ' + url)
165 dump = base64.b64encode(webpage_bytes).decode('ascii')
166 self._downloader.to_screen(dump)
167 content = webpage_bytes.decode(encoding, 'replace')
168 return (content, urlh)
169
170 def _download_webpage(self, url_or_request, video_id, note=None, errnote=None):
171 """ Returns the data of the page as a string """
172 return self._download_webpage_handle(url_or_request, video_id, note, errnote)[0]
173
174 def to_screen(self, msg):
175 """Print msg to screen, prefixing it with '[ie_name]'"""
176 self._downloader.to_screen(u'[%s] %s' % (self.IE_NAME, msg))
177
178 def report_extraction(self, id_or_name):
179 """Report information extraction."""
180 self.to_screen(u'%s: Extracting information' % id_or_name)
181
182 def report_download_webpage(self, video_id):
183 """Report webpage download."""
184 self.to_screen(u'%s: Downloading webpage' % video_id)
185
186 def report_age_confirmation(self):
187 """Report attempt to confirm age."""
188 self.to_screen(u'Confirming age')
189
190 def report_login(self):
191 """Report attempt to log in."""
192 self.to_screen(u'Logging in')
193
194 #Methods for following #608
195 def url_result(self, url, ie=None):
196 """Returns a url that points to a page that should be processed"""
197 #TODO: ie should be the class used for getting the info
198 video_info = {'_type': 'url',
199 'url': url,
200 'ie_key': ie}
201 return video_info
202 def playlist_result(self, entries, playlist_id=None, playlist_title=None):
203 """Returns a playlist"""
204 video_info = {'_type': 'playlist',
205 'entries': entries}
206 if playlist_id:
207 video_info['id'] = playlist_id
208 if playlist_title:
209 video_info['title'] = playlist_title
210 return video_info
211
212 def _search_regex(self, pattern, string, name, default=None, fatal=True, flags=0):
213 """
214 Perform a regex search on the given string, using a single or a list of
215 patterns returning the first matching group.
216 In case of failure return a default value or raise a WARNING or a
217 ExtractorError, depending on fatal, specifying the field name.
218 """
219 if isinstance(pattern, (str, compat_str, compiled_regex_type)):
220 mobj = re.search(pattern, string, flags)
221 else:
222 for p in pattern:
223 mobj = re.search(p, string, flags)
224 if mobj: break
225
226 if sys.stderr.isatty() and os.name != 'nt':
227 _name = u'\033[0;34m%s\033[0m' % name
228 else:
229 _name = name
230
231 if mobj:
232 # return the first matching group
233 return next(g for g in mobj.groups() if g is not None)
234 elif default is not None:
235 return default
236 elif fatal:
237 raise ExtractorError(u'Unable to extract %s' % _name)
238 else:
239 self._downloader.report_warning(u'unable to extract %s; '
240 u'please report this issue on http://yt-dl.org/bug' % _name)
241 return None
242
243 def _html_search_regex(self, pattern, string, name, default=None, fatal=True, flags=0):
244 """
245 Like _search_regex, but strips HTML tags and unescapes entities.
246 """
247 res = self._search_regex(pattern, string, name, default, fatal, flags)
248 if res:
249 return clean_html(res).strip()
250 else:
251 return res
252
253 def _get_login_info(self):
254 """
255 Get the the login info as (username, password)
256 It will look in the netrc file using the _NETRC_MACHINE value
257 If there's no info available, return (None, None)
258 """
259 if self._downloader is None:
260 return (None, None)
261
262 username = None
263 password = None
264 downloader_params = self._downloader.params
265
266 # Attempt to use provided username and password or .netrc data
267 if downloader_params.get('username', None) is not None:
268 username = downloader_params['username']
269 password = downloader_params['password']
270 elif downloader_params.get('usenetrc', False):
271 try:
272 info = netrc.netrc().authenticators(self._NETRC_MACHINE)
273 if info is not None:
274 username = info[0]
275 password = info[2]
276 else:
277 raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
278 except (IOError, netrc.NetrcParseError) as err:
279 self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
280
281 return (username, password)
282
283 # Helper functions for extracting OpenGraph info
284 @staticmethod
285 def _og_regex(prop):
286 return r'<meta.+?property=[\'"]og:%s[\'"].+?content=(?:"(.+?)"|\'(.+?)\')' % re.escape(prop)
287
288 def _og_search_property(self, prop, html, name=None, **kargs):
289 if name is None:
290 name = 'OpenGraph %s' % prop
291 escaped = self._search_regex(self._og_regex(prop), html, name, flags=re.DOTALL, **kargs)
292 return unescapeHTML(escaped)
293
294 def _og_search_thumbnail(self, html, **kargs):
295 return self._og_search_property('image', html, u'thumbnail url', fatal=False, **kargs)
296
297 def _og_search_description(self, html, **kargs):
298 return self._og_search_property('description', html, fatal=False, **kargs)
299
300 def _og_search_title(self, html, **kargs):
301 return self._og_search_property('title', html, **kargs)
302
303 def _og_search_video_url(self, html, name='video url', **kargs):
304 return self._html_search_regex([self._og_regex('video:secure_url'),
305 self._og_regex('video')],
306 html, name, **kargs)
307
308 class SearchInfoExtractor(InfoExtractor):
309 """
310 Base class for paged search queries extractors.
311 They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query}
312 Instances should define _SEARCH_KEY and _MAX_RESULTS.
313 """
314
315 @classmethod
316 def _make_valid_url(cls):
317 return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
318
319 @classmethod
320 def suitable(cls, url):
321 return re.match(cls._make_valid_url(), url) is not None
322
323 def _real_extract(self, query):
324 mobj = re.match(self._make_valid_url(), query)
325 if mobj is None:
326 raise ExtractorError(u'Invalid search query "%s"' % query)
327
328 prefix = mobj.group('prefix')
329 query = mobj.group('query')
330 if prefix == '':
331 return self._get_n_results(query, 1)
332 elif prefix == 'all':
333 return self._get_n_results(query, self._MAX_RESULTS)
334 else:
335 n = int(prefix)
336 if n <= 0:
337 raise ExtractorError(u'invalid download number %s for query "%s"' % (n, query))
338 elif n > self._MAX_RESULTS:
339 self._downloader.report_warning(u'%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
340 n = self._MAX_RESULTS
341 return self._get_n_results(query, n)
342
343 def _get_n_results(self, query, n):
344 """Get a specified number of results for a query"""
345 raise NotImplementedError("This method must be implemented by sublclasses")
346
347 @property
348 def SEARCH_KEY(self):
349 return self._SEARCH_KEY