]> Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/common.py
Imported Upstream version 2015.11.10
[youtubedl] / youtube_dl / extractor / common.py
1 from __future__ import unicode_literals
2
3 import base64
4 import datetime
5 import hashlib
6 import json
7 import netrc
8 import os
9 import re
10 import socket
11 import sys
12 import time
13
14 from ..compat import (
15 compat_cookiejar,
16 compat_cookies,
17 compat_getpass,
18 compat_http_client,
19 compat_urllib_error,
20 compat_urllib_parse,
21 compat_urllib_parse_urlparse,
22 compat_urllib_request,
23 compat_urlparse,
24 compat_str,
25 compat_etree_fromstring,
26 )
27 from ..utils import (
28 NO_DEFAULT,
29 age_restricted,
30 bug_reports_message,
31 clean_html,
32 compiled_regex_type,
33 determine_ext,
34 ExtractorError,
35 fix_xml_ampersands,
36 float_or_none,
37 int_or_none,
38 RegexNotFoundError,
39 sanitize_filename,
40 unescapeHTML,
41 unified_strdate,
42 url_basename,
43 xpath_text,
44 xpath_with_ns,
45 )
46
47
48 class InfoExtractor(object):
49 """Information Extractor class.
50
51 Information extractors are the classes that, given a URL, extract
52 information about the video (or videos) the URL refers to. This
53 information includes the real video URL, the video title, author and
54 others. The information is stored in a dictionary which is then
55 passed to the YoutubeDL. The YoutubeDL processes this
56 information possibly downloading the video to the file system, among
57 other possible outcomes.
58
59 The type field determines the type of the result.
60 By far the most common value (and the default if _type is missing) is
61 "video", which indicates a single video.
62
63 For a video, the dictionaries must include the following fields:
64
65 id: Video identifier.
66 title: Video title, unescaped.
67
68 Additionally, it must contain either a formats entry or a url one:
69
70 formats: A list of dictionaries for each format available, ordered
71 from worst to best quality.
72
73 Potential fields:
74 * url Mandatory. The URL of the video file
75 * ext Will be calculated from URL if missing
76 * format A human-readable description of the format
77 ("mp4 container with h264/opus").
78 Calculated from the format_id, width, height.
79 and format_note fields if missing.
80 * format_id A short description of the format
81 ("mp4_h264_opus" or "19").
82 Technically optional, but strongly recommended.
83 * format_note Additional info about the format
84 ("3D" or "DASH video")
85 * width Width of the video, if known
86 * height Height of the video, if known
87 * resolution Textual description of width and height
88 * tbr Average bitrate of audio and video in KBit/s
89 * abr Average audio bitrate in KBit/s
90 * acodec Name of the audio codec in use
91 * asr Audio sampling rate in Hertz
92 * vbr Average video bitrate in KBit/s
93 * fps Frame rate
94 * vcodec Name of the video codec in use
95 * container Name of the container format
96 * filesize The number of bytes, if known in advance
97 * filesize_approx An estimate for the number of bytes
98 * player_url SWF Player URL (used for rtmpdump).
99 * protocol The protocol that will be used for the actual
100 download, lower-case.
101 "http", "https", "rtsp", "rtmp", "rtmpe",
102 "m3u8", or "m3u8_native".
103 * preference Order number of this format. If this field is
104 present and not None, the formats get sorted
105 by this field, regardless of all other values.
106 -1 for default (order by other properties),
107 -2 or smaller for less than default.
108 < -1000 to hide the format (if there is
109 another one which is strictly better)
110 * language_preference Is this in the correct requested
111 language?
112 10 if it's what the URL is about,
113 -1 for default (don't know),
114 -10 otherwise, other values reserved for now.
115 * quality Order number of the video quality of this
116 format, irrespective of the file format.
117 -1 for default (order by other properties),
118 -2 or smaller for less than default.
119 * source_preference Order number for this video source
120 (quality takes higher priority)
121 -1 for default (order by other properties),
122 -2 or smaller for less than default.
123 * http_headers A dictionary of additional HTTP headers
124 to add to the request.
125 * stretched_ratio If given and not 1, indicates that the
126 video's pixels are not square.
127 width : height ratio as float.
128 * no_resume The server does not support resuming the
129 (HTTP or RTMP) download. Boolean.
130
131 url: Final video URL.
132 ext: Video filename extension.
133 format: The video format, defaults to ext (used for --get-format)
134 player_url: SWF Player URL (used for rtmpdump).
135
136 The following fields are optional:
137
138 alt_title: A secondary title of the video.
139 display_id An alternative identifier for the video, not necessarily
140 unique, but available before title. Typically, id is
141 something like "4234987", title "Dancing naked mole rats",
142 and display_id "dancing-naked-mole-rats"
143 thumbnails: A list of dictionaries, with the following entries:
144 * "id" (optional, string) - Thumbnail format ID
145 * "url"
146 * "preference" (optional, int) - quality of the image
147 * "width" (optional, int)
148 * "height" (optional, int)
149 * "resolution" (optional, string "{width}x{height"},
150 deprecated)
151 thumbnail: Full URL to a video thumbnail image.
152 description: Full video description.
153 uploader: Full name of the video uploader.
154 creator: The main artist who created the video.
155 release_date: The date (YYYYMMDD) when the video was released.
156 timestamp: UNIX timestamp of the moment the video became available.
157 upload_date: Video upload date (YYYYMMDD).
158 If not explicitly set, calculated from timestamp.
159 uploader_id: Nickname or id of the video uploader.
160 location: Physical location where the video was filmed.
161 subtitles: The available subtitles as a dictionary in the format
162 {language: subformats}. "subformats" is a list sorted from
163 lower to higher preference, each element is a dictionary
164 with the "ext" entry and one of:
165 * "data": The subtitles file contents
166 * "url": A URL pointing to the subtitles file
167 "ext" will be calculated from URL if missing
168 automatic_captions: Like 'subtitles', used by the YoutubeIE for
169 automatically generated captions
170 duration: Length of the video in seconds, as an integer.
171 view_count: How many users have watched the video on the platform.
172 like_count: Number of positive ratings of the video
173 dislike_count: Number of negative ratings of the video
174 repost_count: Number of reposts of the video
175 average_rating: Average rating give by users, the scale used depends on the webpage
176 comment_count: Number of comments on the video
177 comments: A list of comments, each with one or more of the following
178 properties (all but one of text or html optional):
179 * "author" - human-readable name of the comment author
180 * "author_id" - user ID of the comment author
181 * "id" - Comment ID
182 * "html" - Comment as HTML
183 * "text" - Plain text of the comment
184 * "timestamp" - UNIX timestamp of comment
185 * "parent" - ID of the comment this one is replying to.
186 Set to "root" to indicate that this is a
187 comment to the original video.
188 age_limit: Age restriction for the video, as an integer (years)
189 webpage_url: The URL to the video webpage, if given to youtube-dl it
190 should allow to get the same result again. (It will be set
191 by YoutubeDL if it's missing)
192 categories: A list of categories that the video falls in, for example
193 ["Sports", "Berlin"]
194 tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
195 is_live: True, False, or None (=unknown). Whether this video is a
196 live stream that goes on instead of a fixed-length video.
197 start_time: Time in seconds where the reproduction should start, as
198 specified in the URL.
199 end_time: Time in seconds where the reproduction should end, as
200 specified in the URL.
201
202 Unless mentioned otherwise, the fields should be Unicode strings.
203
204 Unless mentioned otherwise, None is equivalent to absence of information.
205
206
207 _type "playlist" indicates multiple videos.
208 There must be a key "entries", which is a list, an iterable, or a PagedList
209 object, each element of which is a valid dictionary by this specification.
210
211 Additionally, playlists can have "title", "description" and "id" attributes
212 with the same semantics as videos (see above).
213
214
215 _type "multi_video" indicates that there are multiple videos that
216 form a single show, for examples multiple acts of an opera or TV episode.
217 It must have an entries key like a playlist and contain all the keys
218 required for a video at the same time.
219
220
221 _type "url" indicates that the video must be extracted from another
222 location, possibly by a different extractor. Its only required key is:
223 "url" - the next URL to extract.
224 The key "ie_key" can be set to the class name (minus the trailing "IE",
225 e.g. "Youtube") if the extractor class is known in advance.
226 Additionally, the dictionary may have any properties of the resolved entity
227 known in advance, for example "title" if the title of the referred video is
228 known ahead of time.
229
230
231 _type "url_transparent" entities have the same specification as "url", but
232 indicate that the given additional information is more precise than the one
233 associated with the resolved URL.
234 This is useful when a site employs a video service that hosts the video and
235 its technical metadata, but that video service does not embed a useful
236 title, description etc.
237
238
239 Subclasses of this one should re-define the _real_initialize() and
240 _real_extract() methods and define a _VALID_URL regexp.
241 Probably, they should also be added to the list of extractors.
242
243 Finally, the _WORKING attribute should be set to False for broken IEs
244 in order to warn the users and skip the tests.
245 """
246
247 _ready = False
248 _downloader = None
249 _WORKING = True
250
251 def __init__(self, downloader=None):
252 """Constructor. Receives an optional downloader."""
253 self._ready = False
254 self.set_downloader(downloader)
255
256 @classmethod
257 def suitable(cls, url):
258 """Receives a URL and returns True if suitable for this IE."""
259
260 # This does not use has/getattr intentionally - we want to know whether
261 # we have cached the regexp for *this* class, whereas getattr would also
262 # match the superclass
263 if '_VALID_URL_RE' not in cls.__dict__:
264 cls._VALID_URL_RE = re.compile(cls._VALID_URL)
265 return cls._VALID_URL_RE.match(url) is not None
266
267 @classmethod
268 def _match_id(cls, url):
269 if '_VALID_URL_RE' not in cls.__dict__:
270 cls._VALID_URL_RE = re.compile(cls._VALID_URL)
271 m = cls._VALID_URL_RE.match(url)
272 assert m
273 return m.group('id')
274
275 @classmethod
276 def working(cls):
277 """Getter method for _WORKING."""
278 return cls._WORKING
279
280 def initialize(self):
281 """Initializes an instance (authentication, etc)."""
282 if not self._ready:
283 self._real_initialize()
284 self._ready = True
285
286 def extract(self, url):
287 """Extracts URL information and returns it in list of dicts."""
288 try:
289 self.initialize()
290 return self._real_extract(url)
291 except ExtractorError:
292 raise
293 except compat_http_client.IncompleteRead as e:
294 raise ExtractorError('A network error has occured.', cause=e, expected=True)
295 except (KeyError, StopIteration) as e:
296 raise ExtractorError('An extractor error has occured.', cause=e)
297
298 def set_downloader(self, downloader):
299 """Sets the downloader for this IE."""
300 self._downloader = downloader
301
302 def _real_initialize(self):
303 """Real initialization process. Redefine in subclasses."""
304 pass
305
306 def _real_extract(self, url):
307 """Real extraction process. Redefine in subclasses."""
308 pass
309
310 @classmethod
311 def ie_key(cls):
312 """A string for getting the InfoExtractor with get_info_extractor"""
313 return compat_str(cls.__name__[:-2])
314
315 @property
316 def IE_NAME(self):
317 return compat_str(type(self).__name__[:-2])
318
319 def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
320 """ Returns the response handle """
321 if note is None:
322 self.report_download_webpage(video_id)
323 elif note is not False:
324 if video_id is None:
325 self.to_screen('%s' % (note,))
326 else:
327 self.to_screen('%s: %s' % (video_id, note))
328 try:
329 return self._downloader.urlopen(url_or_request)
330 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
331 if errnote is False:
332 return False
333 if errnote is None:
334 errnote = 'Unable to download webpage'
335 errmsg = '%s: %s' % (errnote, compat_str(err))
336 if fatal:
337 raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
338 else:
339 self._downloader.report_warning(errmsg)
340 return False
341
342 def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None):
343 """ Returns a tuple (page content as string, URL handle) """
344 # Strip hashes from the URL (#1038)
345 if isinstance(url_or_request, (compat_str, str)):
346 url_or_request = url_or_request.partition('#')[0]
347
348 urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal)
349 if urlh is False:
350 assert not fatal
351 return False
352 content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
353 return (content, urlh)
354
355 @staticmethod
356 def _guess_encoding_from_content(content_type, webpage_bytes):
357 m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
358 if m:
359 encoding = m.group(1)
360 else:
361 m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
362 webpage_bytes[:1024])
363 if m:
364 encoding = m.group(1).decode('ascii')
365 elif webpage_bytes.startswith(b'\xff\xfe'):
366 encoding = 'utf-16'
367 else:
368 encoding = 'utf-8'
369
370 return encoding
371
372 def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
373 content_type = urlh.headers.get('Content-Type', '')
374 webpage_bytes = urlh.read()
375 if prefix is not None:
376 webpage_bytes = prefix + webpage_bytes
377 if not encoding:
378 encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
379 if self._downloader.params.get('dump_intermediate_pages', False):
380 try:
381 url = url_or_request.get_full_url()
382 except AttributeError:
383 url = url_or_request
384 self.to_screen('Dumping request to ' + url)
385 dump = base64.b64encode(webpage_bytes).decode('ascii')
386 self._downloader.to_screen(dump)
387 if self._downloader.params.get('write_pages', False):
388 try:
389 url = url_or_request.get_full_url()
390 except AttributeError:
391 url = url_or_request
392 basen = '%s_%s' % (video_id, url)
393 if len(basen) > 240:
394 h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
395 basen = basen[:240 - len(h)] + h
396 raw_filename = basen + '.dump'
397 filename = sanitize_filename(raw_filename, restricted=True)
398 self.to_screen('Saving request to ' + filename)
399 # Working around MAX_PATH limitation on Windows (see
400 # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
401 if os.name == 'nt':
402 absfilepath = os.path.abspath(filename)
403 if len(absfilepath) > 259:
404 filename = '\\\\?\\' + absfilepath
405 with open(filename, 'wb') as outf:
406 outf.write(webpage_bytes)
407
408 try:
409 content = webpage_bytes.decode(encoding, 'replace')
410 except LookupError:
411 content = webpage_bytes.decode('utf-8', 'replace')
412
413 if ('<title>Access to this site is blocked</title>' in content and
414 'Websense' in content[:512]):
415 msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
416 blocked_iframe = self._html_search_regex(
417 r'<iframe src="([^"]+)"', content,
418 'Websense information URL', default=None)
419 if blocked_iframe:
420 msg += ' Visit %s for more details' % blocked_iframe
421 raise ExtractorError(msg, expected=True)
422 if '<title>The URL you requested has been blocked</title>' in content[:512]:
423 msg = (
424 'Access to this webpage has been blocked by Indian censorship. '
425 'Use a VPN or proxy server (with --proxy) to route around it.')
426 block_msg = self._html_search_regex(
427 r'</h1><p>(.*?)</p>',
428 content, 'block message', default=None)
429 if block_msg:
430 msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
431 raise ExtractorError(msg, expected=True)
432
433 return content
434
435 def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None):
436 """ Returns the data of the page as a string """
437 success = False
438 try_count = 0
439 while success is False:
440 try:
441 res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding)
442 success = True
443 except compat_http_client.IncompleteRead as e:
444 try_count += 1
445 if try_count >= tries:
446 raise e
447 self._sleep(timeout, video_id)
448 if res is False:
449 return res
450 else:
451 content, _ = res
452 return content
453
454 def _download_xml(self, url_or_request, video_id,
455 note='Downloading XML', errnote='Unable to download XML',
456 transform_source=None, fatal=True, encoding=None):
457 """Return the xml as an xml.etree.ElementTree.Element"""
458 xml_string = self._download_webpage(
459 url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding)
460 if xml_string is False:
461 return xml_string
462 if transform_source:
463 xml_string = transform_source(xml_string)
464 return compat_etree_fromstring(xml_string.encode('utf-8'))
465
466 def _download_json(self, url_or_request, video_id,
467 note='Downloading JSON metadata',
468 errnote='Unable to download JSON metadata',
469 transform_source=None,
470 fatal=True, encoding=None):
471 json_string = self._download_webpage(
472 url_or_request, video_id, note, errnote, fatal=fatal,
473 encoding=encoding)
474 if (not fatal) and json_string is False:
475 return None
476 return self._parse_json(
477 json_string, video_id, transform_source=transform_source, fatal=fatal)
478
479 def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
480 if transform_source:
481 json_string = transform_source(json_string)
482 try:
483 return json.loads(json_string)
484 except ValueError as ve:
485 errmsg = '%s: Failed to parse JSON ' % video_id
486 if fatal:
487 raise ExtractorError(errmsg, cause=ve)
488 else:
489 self.report_warning(errmsg + str(ve))
490
491 def report_warning(self, msg, video_id=None):
492 idstr = '' if video_id is None else '%s: ' % video_id
493 self._downloader.report_warning(
494 '[%s] %s%s' % (self.IE_NAME, idstr, msg))
495
496 def to_screen(self, msg):
497 """Print msg to screen, prefixing it with '[ie_name]'"""
498 self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
499
500 def report_extraction(self, id_or_name):
501 """Report information extraction."""
502 self.to_screen('%s: Extracting information' % id_or_name)
503
504 def report_download_webpage(self, video_id):
505 """Report webpage download."""
506 self.to_screen('%s: Downloading webpage' % video_id)
507
508 def report_age_confirmation(self):
509 """Report attempt to confirm age."""
510 self.to_screen('Confirming age')
511
512 def report_login(self):
513 """Report attempt to log in."""
514 self.to_screen('Logging in')
515
516 @staticmethod
517 def raise_login_required(msg='This video is only available for registered users'):
518 raise ExtractorError(
519 '%s. Use --username and --password or --netrc to provide account credentials.' % msg,
520 expected=True)
521
522 @staticmethod
523 def raise_geo_restricted(msg='This video is not available from your location due to geo restriction'):
524 raise ExtractorError(
525 '%s. You might want to use --proxy to workaround.' % msg,
526 expected=True)
527
528 # Methods for following #608
529 @staticmethod
530 def url_result(url, ie=None, video_id=None, video_title=None):
531 """Returns a URL that points to a page that should be processed"""
532 # TODO: ie should be the class used for getting the info
533 video_info = {'_type': 'url',
534 'url': url,
535 'ie_key': ie}
536 if video_id is not None:
537 video_info['id'] = video_id
538 if video_title is not None:
539 video_info['title'] = video_title
540 return video_info
541
542 @staticmethod
543 def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
544 """Returns a playlist"""
545 video_info = {'_type': 'playlist',
546 'entries': entries}
547 if playlist_id:
548 video_info['id'] = playlist_id
549 if playlist_title:
550 video_info['title'] = playlist_title
551 if playlist_description:
552 video_info['description'] = playlist_description
553 return video_info
554
555 def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
556 """
557 Perform a regex search on the given string, using a single or a list of
558 patterns returning the first matching group.
559 In case of failure return a default value or raise a WARNING or a
560 RegexNotFoundError, depending on fatal, specifying the field name.
561 """
562 if isinstance(pattern, (str, compat_str, compiled_regex_type)):
563 mobj = re.search(pattern, string, flags)
564 else:
565 for p in pattern:
566 mobj = re.search(p, string, flags)
567 if mobj:
568 break
569
570 if not self._downloader.params.get('no_color') and os.name != 'nt' and sys.stderr.isatty():
571 _name = '\033[0;34m%s\033[0m' % name
572 else:
573 _name = name
574
575 if mobj:
576 if group is None:
577 # return the first matching group
578 return next(g for g in mobj.groups() if g is not None)
579 else:
580 return mobj.group(group)
581 elif default is not NO_DEFAULT:
582 return default
583 elif fatal:
584 raise RegexNotFoundError('Unable to extract %s' % _name)
585 else:
586 self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
587 return None
588
589 def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
590 """
591 Like _search_regex, but strips HTML tags and unescapes entities.
592 """
593 res = self._search_regex(pattern, string, name, default, fatal, flags, group)
594 if res:
595 return clean_html(res).strip()
596 else:
597 return res
598
599 def _get_login_info(self):
600 """
601 Get the login info as (username, password)
602 It will look in the netrc file using the _NETRC_MACHINE value
603 If there's no info available, return (None, None)
604 """
605 if self._downloader is None:
606 return (None, None)
607
608 username = None
609 password = None
610 downloader_params = self._downloader.params
611
612 # Attempt to use provided username and password or .netrc data
613 if downloader_params.get('username', None) is not None:
614 username = downloader_params['username']
615 password = downloader_params['password']
616 elif downloader_params.get('usenetrc', False):
617 try:
618 info = netrc.netrc().authenticators(self._NETRC_MACHINE)
619 if info is not None:
620 username = info[0]
621 password = info[2]
622 else:
623 raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
624 except (IOError, netrc.NetrcParseError) as err:
625 self._downloader.report_warning('parsing .netrc: %s' % compat_str(err))
626
627 return (username, password)
628
629 def _get_tfa_info(self, note='two-factor verification code'):
630 """
631 Get the two-factor authentication info
632 TODO - asking the user will be required for sms/phone verify
633 currently just uses the command line option
634 If there's no info available, return None
635 """
636 if self._downloader is None:
637 return None
638 downloader_params = self._downloader.params
639
640 if downloader_params.get('twofactor', None) is not None:
641 return downloader_params['twofactor']
642
643 return compat_getpass('Type %s and press [Return]: ' % note)
644
645 # Helper functions for extracting OpenGraph info
646 @staticmethod
647 def _og_regexes(prop):
648 content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
649 property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)'
650 % {'prop': re.escape(prop)})
651 template = r'<meta[^>]+?%s[^>]+?%s'
652 return [
653 template % (property_re, content_re),
654 template % (content_re, property_re),
655 ]
656
657 @staticmethod
658 def _meta_regex(prop):
659 return r'''(?isx)<meta
660 (?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
661 [^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
662
663 def _og_search_property(self, prop, html, name=None, **kargs):
664 if name is None:
665 name = 'OpenGraph %s' % prop
666 escaped = self._search_regex(self._og_regexes(prop), html, name, flags=re.DOTALL, **kargs)
667 if escaped is None:
668 return None
669 return unescapeHTML(escaped)
670
671 def _og_search_thumbnail(self, html, **kargs):
672 return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
673
674 def _og_search_description(self, html, **kargs):
675 return self._og_search_property('description', html, fatal=False, **kargs)
676
677 def _og_search_title(self, html, **kargs):
678 return self._og_search_property('title', html, **kargs)
679
680 def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
681 regexes = self._og_regexes('video') + self._og_regexes('video:url')
682 if secure:
683 regexes = self._og_regexes('video:secure_url') + regexes
684 return self._html_search_regex(regexes, html, name, **kargs)
685
686 def _og_search_url(self, html, **kargs):
687 return self._og_search_property('url', html, **kargs)
688
689 def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
690 if display_name is None:
691 display_name = name
692 return self._html_search_regex(
693 self._meta_regex(name),
694 html, display_name, fatal=fatal, group='content', **kwargs)
695
696 def _dc_search_uploader(self, html):
697 return self._html_search_meta('dc.creator', html, 'uploader')
698
699 def _rta_search(self, html):
700 # See http://www.rtalabel.org/index.php?content=howtofaq#single
701 if re.search(r'(?ix)<meta\s+name="rating"\s+'
702 r' content="RTA-5042-1996-1400-1577-RTA"',
703 html):
704 return 18
705 return 0
706
707 def _media_rating_search(self, html):
708 # See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
709 rating = self._html_search_meta('rating', html)
710
711 if not rating:
712 return None
713
714 RATING_TABLE = {
715 'safe for kids': 0,
716 'general': 8,
717 '14 years': 14,
718 'mature': 17,
719 'restricted': 19,
720 }
721 return RATING_TABLE.get(rating.lower(), None)
722
723 def _family_friendly_search(self, html):
724 # See http://schema.org/VideoObject
725 family_friendly = self._html_search_meta('isFamilyFriendly', html)
726
727 if not family_friendly:
728 return None
729
730 RATING_TABLE = {
731 '1': 0,
732 'true': 0,
733 '0': 18,
734 'false': 18,
735 }
736 return RATING_TABLE.get(family_friendly.lower(), None)
737
738 def _twitter_search_player(self, html):
739 return self._html_search_meta('twitter:player', html,
740 'twitter card player')
741
742 @staticmethod
743 def _hidden_inputs(html):
744 html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
745 hidden_inputs = {}
746 for input in re.findall(r'(?i)<input([^>]+)>', html):
747 if not re.search(r'type=(["\'])(?:hidden|submit)\1', input):
748 continue
749 name = re.search(r'name=(["\'])(?P<value>.+?)\1', input)
750 if not name:
751 continue
752 value = re.search(r'value=(["\'])(?P<value>.*?)\1', input)
753 if not value:
754 continue
755 hidden_inputs[name.group('value')] = value.group('value')
756 return hidden_inputs
757
758 def _form_hidden_inputs(self, form_id, html):
759 form = self._search_regex(
760 r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
761 html, '%s form' % form_id, group='form')
762 return self._hidden_inputs(form)
763
764 def _sort_formats(self, formats, field_preference=None):
765 if not formats:
766 raise ExtractorError('No video formats found')
767
768 def _formats_key(f):
769 # TODO remove the following workaround
770 from ..utils import determine_ext
771 if not f.get('ext') and 'url' in f:
772 f['ext'] = determine_ext(f['url'])
773
774 if isinstance(field_preference, (list, tuple)):
775 return tuple(f.get(field) if f.get(field) is not None else -1 for field in field_preference)
776
777 preference = f.get('preference')
778 if preference is None:
779 proto = f.get('protocol')
780 if proto is None:
781 proto = compat_urllib_parse_urlparse(f.get('url', '')).scheme
782
783 preference = 0 if proto in ['http', 'https'] else -0.1
784 if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
785 preference -= 0.5
786
787 if f.get('vcodec') == 'none': # audio only
788 if self._downloader.params.get('prefer_free_formats'):
789 ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
790 else:
791 ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
792 ext_preference = 0
793 try:
794 audio_ext_preference = ORDER.index(f['ext'])
795 except ValueError:
796 audio_ext_preference = -1
797 else:
798 if self._downloader.params.get('prefer_free_formats'):
799 ORDER = ['flv', 'mp4', 'webm']
800 else:
801 ORDER = ['webm', 'flv', 'mp4']
802 try:
803 ext_preference = ORDER.index(f['ext'])
804 except ValueError:
805 ext_preference = -1
806 audio_ext_preference = 0
807
808 return (
809 preference,
810 f.get('language_preference') if f.get('language_preference') is not None else -1,
811 f.get('quality') if f.get('quality') is not None else -1,
812 f.get('tbr') if f.get('tbr') is not None else -1,
813 f.get('filesize') if f.get('filesize') is not None else -1,
814 f.get('vbr') if f.get('vbr') is not None else -1,
815 f.get('height') if f.get('height') is not None else -1,
816 f.get('width') if f.get('width') is not None else -1,
817 ext_preference,
818 f.get('abr') if f.get('abr') is not None else -1,
819 audio_ext_preference,
820 f.get('fps') if f.get('fps') is not None else -1,
821 f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
822 f.get('source_preference') if f.get('source_preference') is not None else -1,
823 f.get('format_id') if f.get('format_id') is not None else '',
824 )
825 formats.sort(key=_formats_key)
826
827 def _check_formats(self, formats, video_id):
828 if formats:
829 formats[:] = filter(
830 lambda f: self._is_valid_url(
831 f['url'], video_id,
832 item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
833 formats)
834
835 def _is_valid_url(self, url, video_id, item='video'):
836 url = self._proto_relative_url(url, scheme='http:')
837 # For now assume non HTTP(S) URLs always valid
838 if not (url.startswith('http://') or url.startswith('https://')):
839 return True
840 try:
841 self._request_webpage(url, video_id, 'Checking %s URL' % item)
842 return True
843 except ExtractorError as e:
844 if isinstance(e.cause, compat_urllib_error.URLError):
845 self.to_screen(
846 '%s: %s URL is invalid, skipping' % (video_id, item))
847 return False
848 raise
849
850 def http_scheme(self):
851 """ Either "http:" or "https:", depending on the user's preferences """
852 return (
853 'http:'
854 if self._downloader.params.get('prefer_insecure', False)
855 else 'https:')
856
857 def _proto_relative_url(self, url, scheme=None):
858 if url is None:
859 return url
860 if url.startswith('//'):
861 if scheme is None:
862 scheme = self.http_scheme()
863 return scheme + url
864 else:
865 return url
866
867 def _sleep(self, timeout, video_id, msg_template=None):
868 if msg_template is None:
869 msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
870 msg = msg_template % {'video_id': video_id, 'timeout': timeout}
871 self.to_screen(msg)
872 time.sleep(timeout)
873
874 def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
875 transform_source=lambda s: fix_xml_ampersands(s).strip(),
876 fatal=True):
877 manifest = self._download_xml(
878 manifest_url, video_id, 'Downloading f4m manifest',
879 'Unable to download f4m manifest',
880 # Some manifests may be malformed, e.g. prosiebensat1 generated manifests
881 # (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
882 transform_source=transform_source,
883 fatal=fatal)
884
885 if manifest is False:
886 return manifest
887
888 formats = []
889 manifest_version = '1.0'
890 media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
891 if not media_nodes:
892 manifest_version = '2.0'
893 media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
894 for i, media_el in enumerate(media_nodes):
895 if manifest_version == '2.0':
896 media_url = media_el.attrib.get('href') or media_el.attrib.get('url')
897 if not media_url:
898 continue
899 manifest_url = (
900 media_url if media_url.startswith('http://') or media_url.startswith('https://')
901 else ('/'.join(manifest_url.split('/')[:-1]) + '/' + media_url))
902 # If media_url is itself a f4m manifest do the recursive extraction
903 # since bitrates in parent manifest (this one) and media_url manifest
904 # may differ leading to inability to resolve the format by requested
905 # bitrate in f4m downloader
906 if determine_ext(manifest_url) == 'f4m':
907 f4m_formats = self._extract_f4m_formats(
908 manifest_url, video_id, preference, f4m_id, fatal=fatal)
909 if f4m_formats:
910 formats.extend(f4m_formats)
911 continue
912 tbr = int_or_none(media_el.attrib.get('bitrate'))
913 formats.append({
914 'format_id': '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)])),
915 'url': manifest_url,
916 'ext': 'flv',
917 'tbr': tbr,
918 'width': int_or_none(media_el.attrib.get('width')),
919 'height': int_or_none(media_el.attrib.get('height')),
920 'preference': preference,
921 })
922 self._sort_formats(formats)
923
924 return formats
925
926 def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
927 entry_protocol='m3u8', preference=None,
928 m3u8_id=None, note=None, errnote=None,
929 fatal=True):
930
931 formats = [{
932 'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
933 'url': m3u8_url,
934 'ext': ext,
935 'protocol': 'm3u8',
936 'preference': preference - 1 if preference else -1,
937 'resolution': 'multiple',
938 'format_note': 'Quality selection URL',
939 }]
940
941 format_url = lambda u: (
942 u
943 if re.match(r'^https?://', u)
944 else compat_urlparse.urljoin(m3u8_url, u))
945
946 res = self._download_webpage_handle(
947 m3u8_url, video_id,
948 note=note or 'Downloading m3u8 information',
949 errnote=errnote or 'Failed to download m3u8 information',
950 fatal=fatal)
951 if res is False:
952 return res
953 m3u8_doc, urlh = res
954 m3u8_url = urlh.geturl()
955 last_info = None
956 last_media = None
957 kv_rex = re.compile(
958 r'(?P<key>[a-zA-Z_-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)')
959 for line in m3u8_doc.splitlines():
960 if line.startswith('#EXT-X-STREAM-INF:'):
961 last_info = {}
962 for m in kv_rex.finditer(line):
963 v = m.group('val')
964 if v.startswith('"'):
965 v = v[1:-1]
966 last_info[m.group('key')] = v
967 elif line.startswith('#EXT-X-MEDIA:'):
968 last_media = {}
969 for m in kv_rex.finditer(line):
970 v = m.group('val')
971 if v.startswith('"'):
972 v = v[1:-1]
973 last_media[m.group('key')] = v
974 elif line.startswith('#') or not line.strip():
975 continue
976 else:
977 if last_info is None:
978 formats.append({'url': format_url(line)})
979 continue
980 tbr = int_or_none(last_info.get('BANDWIDTH'), scale=1000)
981 format_id = []
982 if m3u8_id:
983 format_id.append(m3u8_id)
984 last_media_name = last_media.get('NAME') if last_media and last_media.get('TYPE') != 'SUBTITLES' else None
985 format_id.append(last_media_name if last_media_name else '%d' % (tbr if tbr else len(formats)))
986 f = {
987 'format_id': '-'.join(format_id),
988 'url': format_url(line.strip()),
989 'tbr': tbr,
990 'ext': ext,
991 'protocol': entry_protocol,
992 'preference': preference,
993 }
994 codecs = last_info.get('CODECS')
995 if codecs:
996 # TODO: looks like video codec is not always necessarily goes first
997 va_codecs = codecs.split(',')
998 if va_codecs[0]:
999 f['vcodec'] = va_codecs[0].partition('.')[0]
1000 if len(va_codecs) > 1 and va_codecs[1]:
1001 f['acodec'] = va_codecs[1].partition('.')[0]
1002 resolution = last_info.get('RESOLUTION')
1003 if resolution:
1004 width_str, height_str = resolution.split('x')
1005 f['width'] = int(width_str)
1006 f['height'] = int(height_str)
1007 if last_media is not None:
1008 f['m3u8_media'] = last_media
1009 last_media = None
1010 formats.append(f)
1011 last_info = {}
1012 self._sort_formats(formats)
1013 return formats
1014
1015 @staticmethod
1016 def _xpath_ns(path, namespace=None):
1017 if not namespace:
1018 return path
1019 out = []
1020 for c in path.split('/'):
1021 if not c or c == '.':
1022 out.append(c)
1023 else:
1024 out.append('{%s}%s' % (namespace, c))
1025 return '/'.join(out)
1026
1027 def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None):
1028 smil = self._download_smil(smil_url, video_id, fatal=fatal)
1029
1030 if smil is False:
1031 assert not fatal
1032 return []
1033
1034 namespace = self._parse_smil_namespace(smil)
1035
1036 return self._parse_smil_formats(
1037 smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
1038
1039 def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
1040 smil = self._download_smil(smil_url, video_id, fatal=fatal)
1041 if smil is False:
1042 return {}
1043 return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
1044
1045 def _download_smil(self, smil_url, video_id, fatal=True):
1046 return self._download_xml(
1047 smil_url, video_id, 'Downloading SMIL file',
1048 'Unable to download SMIL file', fatal=fatal)
1049
1050 def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
1051 namespace = self._parse_smil_namespace(smil)
1052
1053 formats = self._parse_smil_formats(
1054 smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
1055 subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
1056
1057 video_id = os.path.splitext(url_basename(smil_url))[0]
1058 title = None
1059 description = None
1060 upload_date = None
1061 for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
1062 name = meta.attrib.get('name')
1063 content = meta.attrib.get('content')
1064 if not name or not content:
1065 continue
1066 if not title and name == 'title':
1067 title = content
1068 elif not description and name in ('description', 'abstract'):
1069 description = content
1070 elif not upload_date and name == 'date':
1071 upload_date = unified_strdate(content)
1072
1073 thumbnails = [{
1074 'id': image.get('type'),
1075 'url': image.get('src'),
1076 'width': int_or_none(image.get('width')),
1077 'height': int_or_none(image.get('height')),
1078 } for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
1079
1080 return {
1081 'id': video_id,
1082 'title': title or video_id,
1083 'description': description,
1084 'upload_date': upload_date,
1085 'thumbnails': thumbnails,
1086 'formats': formats,
1087 'subtitles': subtitles,
1088 }
1089
1090 def _parse_smil_namespace(self, smil):
1091 return self._search_regex(
1092 r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
1093
1094 def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
1095 base = smil_url
1096 for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
1097 b = meta.get('base') or meta.get('httpBase')
1098 if b:
1099 base = b
1100 break
1101
1102 formats = []
1103 rtmp_count = 0
1104 http_count = 0
1105
1106 videos = smil.findall(self._xpath_ns('.//video', namespace))
1107 for video in videos:
1108 src = video.get('src')
1109 if not src:
1110 continue
1111
1112 bitrate = float_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
1113 filesize = int_or_none(video.get('size') or video.get('fileSize'))
1114 width = int_or_none(video.get('width'))
1115 height = int_or_none(video.get('height'))
1116 proto = video.get('proto')
1117 ext = video.get('ext')
1118 src_ext = determine_ext(src)
1119 streamer = video.get('streamer') or base
1120
1121 if proto == 'rtmp' or streamer.startswith('rtmp'):
1122 rtmp_count += 1
1123 formats.append({
1124 'url': streamer,
1125 'play_path': src,
1126 'ext': 'flv',
1127 'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
1128 'tbr': bitrate,
1129 'filesize': filesize,
1130 'width': width,
1131 'height': height,
1132 })
1133 if transform_rtmp_url:
1134 streamer, src = transform_rtmp_url(streamer, src)
1135 formats[-1].update({
1136 'url': streamer,
1137 'play_path': src,
1138 })
1139 continue
1140
1141 src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
1142
1143 if proto == 'm3u8' or src_ext == 'm3u8':
1144 m3u8_formats = self._extract_m3u8_formats(
1145 src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
1146 if m3u8_formats:
1147 formats.extend(m3u8_formats)
1148 continue
1149
1150 if src_ext == 'f4m':
1151 f4m_url = src_url
1152 if not f4m_params:
1153 f4m_params = {
1154 'hdcore': '3.2.0',
1155 'plugin': 'flowplayer-3.2.0.1',
1156 }
1157 f4m_url += '&' if '?' in f4m_url else '?'
1158 f4m_url += compat_urllib_parse.urlencode(f4m_params)
1159 f4m_formats = self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False)
1160 if f4m_formats:
1161 formats.extend(f4m_formats)
1162 continue
1163
1164 if src_url.startswith('http') and self._is_valid_url(src, video_id):
1165 http_count += 1
1166 formats.append({
1167 'url': src_url,
1168 'ext': ext or src_ext or 'flv',
1169 'format_id': 'http-%d' % (bitrate or http_count),
1170 'tbr': bitrate,
1171 'filesize': filesize,
1172 'width': width,
1173 'height': height,
1174 })
1175 continue
1176
1177 self._sort_formats(formats)
1178
1179 return formats
1180
1181 def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
1182 subtitles = {}
1183 for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
1184 src = textstream.get('src')
1185 if not src:
1186 continue
1187 ext = textstream.get('ext') or determine_ext(src)
1188 if not ext:
1189 type_ = textstream.get('type')
1190 SUBTITLES_TYPES = {
1191 'text/vtt': 'vtt',
1192 'text/srt': 'srt',
1193 'application/smptett+xml': 'tt',
1194 }
1195 if type_ in SUBTITLES_TYPES:
1196 ext = SUBTITLES_TYPES[type_]
1197 lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
1198 subtitles.setdefault(lang, []).append({
1199 'url': src,
1200 'ext': ext,
1201 })
1202 return subtitles
1203
1204 def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True):
1205 xspf = self._download_xml(
1206 playlist_url, playlist_id, 'Downloading xpsf playlist',
1207 'Unable to download xspf manifest', fatal=fatal)
1208 if xspf is False:
1209 return []
1210 return self._parse_xspf(xspf, playlist_id)
1211
1212 def _parse_xspf(self, playlist, playlist_id):
1213 NS_MAP = {
1214 'xspf': 'http://xspf.org/ns/0/',
1215 's1': 'http://static.streamone.nl/player/ns/0',
1216 }
1217
1218 entries = []
1219 for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
1220 title = xpath_text(
1221 track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
1222 description = xpath_text(
1223 track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
1224 thumbnail = xpath_text(
1225 track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
1226 duration = float_or_none(
1227 xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
1228
1229 formats = [{
1230 'url': location.text,
1231 'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
1232 'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
1233 'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
1234 } for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
1235 self._sort_formats(formats)
1236
1237 entries.append({
1238 'id': playlist_id,
1239 'title': title,
1240 'description': description,
1241 'thumbnail': thumbnail,
1242 'duration': duration,
1243 'formats': formats,
1244 })
1245 return entries
1246
1247 def _live_title(self, name):
1248 """ Generate the title for a live video """
1249 now = datetime.datetime.now()
1250 now_str = now.strftime("%Y-%m-%d %H:%M")
1251 return name + ' ' + now_str
1252
1253 def _int(self, v, name, fatal=False, **kwargs):
1254 res = int_or_none(v, **kwargs)
1255 if 'get_attr' in kwargs:
1256 print(getattr(v, kwargs['get_attr']))
1257 if res is None:
1258 msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
1259 if fatal:
1260 raise ExtractorError(msg)
1261 else:
1262 self._downloader.report_warning(msg)
1263 return res
1264
1265 def _float(self, v, name, fatal=False, **kwargs):
1266 res = float_or_none(v, **kwargs)
1267 if res is None:
1268 msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
1269 if fatal:
1270 raise ExtractorError(msg)
1271 else:
1272 self._downloader.report_warning(msg)
1273 return res
1274
1275 def _set_cookie(self, domain, name, value, expire_time=None):
1276 cookie = compat_cookiejar.Cookie(
1277 0, name, value, None, None, domain, None,
1278 None, '/', True, False, expire_time, '', None, None, None)
1279 self._downloader.cookiejar.set_cookie(cookie)
1280
1281 def _get_cookies(self, url):
1282 """ Return a compat_cookies.SimpleCookie with the cookies for the url """
1283 req = compat_urllib_request.Request(url)
1284 self._downloader.cookiejar.add_cookie_header(req)
1285 return compat_cookies.SimpleCookie(req.get_header('Cookie'))
1286
1287 def get_testcases(self, include_onlymatching=False):
1288 t = getattr(self, '_TEST', None)
1289 if t:
1290 assert not hasattr(self, '_TESTS'), \
1291 '%s has _TEST and _TESTS' % type(self).__name__
1292 tests = [t]
1293 else:
1294 tests = getattr(self, '_TESTS', [])
1295 for t in tests:
1296 if not include_onlymatching and t.get('only_matching', False):
1297 continue
1298 t['name'] = type(self).__name__[:-len('IE')]
1299 yield t
1300
1301 def is_suitable(self, age_limit):
1302 """ Test whether the extractor is generally suitable for the given
1303 age limit (i.e. pornographic sites are not, all others usually are) """
1304
1305 any_restricted = False
1306 for tc in self.get_testcases(include_onlymatching=False):
1307 if 'playlist' in tc:
1308 tc = tc['playlist'][0]
1309 is_restricted = age_restricted(
1310 tc.get('info_dict', {}).get('age_limit'), age_limit)
1311 if not is_restricted:
1312 return True
1313 any_restricted = any_restricted or is_restricted
1314 return not any_restricted
1315
1316 def extract_subtitles(self, *args, **kwargs):
1317 if (self._downloader.params.get('writesubtitles', False) or
1318 self._downloader.params.get('listsubtitles')):
1319 return self._get_subtitles(*args, **kwargs)
1320 return {}
1321
1322 def _get_subtitles(self, *args, **kwargs):
1323 raise NotImplementedError("This method must be implemented by subclasses")
1324
1325 @staticmethod
1326 def _merge_subtitle_items(subtitle_list1, subtitle_list2):
1327 """ Merge subtitle items for one language. Items with duplicated URLs
1328 will be dropped. """
1329 list1_urls = set([item['url'] for item in subtitle_list1])
1330 ret = list(subtitle_list1)
1331 ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
1332 return ret
1333
1334 @classmethod
1335 def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
1336 """ Merge two subtitle dictionaries, language by language. """
1337 ret = dict(subtitle_dict1)
1338 for lang in subtitle_dict2:
1339 ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
1340 return ret
1341
1342 def extract_automatic_captions(self, *args, **kwargs):
1343 if (self._downloader.params.get('writeautomaticsub', False) or
1344 self._downloader.params.get('listsubtitles')):
1345 return self._get_automatic_captions(*args, **kwargs)
1346 return {}
1347
1348 def _get_automatic_captions(self, *args, **kwargs):
1349 raise NotImplementedError("This method must be implemented by subclasses")
1350
1351
1352 class SearchInfoExtractor(InfoExtractor):
1353 """
1354 Base class for paged search queries extractors.
1355 They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
1356 Instances should define _SEARCH_KEY and _MAX_RESULTS.
1357 """
1358
1359 @classmethod
1360 def _make_valid_url(cls):
1361 return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
1362
1363 @classmethod
1364 def suitable(cls, url):
1365 return re.match(cls._make_valid_url(), url) is not None
1366
1367 def _real_extract(self, query):
1368 mobj = re.match(self._make_valid_url(), query)
1369 if mobj is None:
1370 raise ExtractorError('Invalid search query "%s"' % query)
1371
1372 prefix = mobj.group('prefix')
1373 query = mobj.group('query')
1374 if prefix == '':
1375 return self._get_n_results(query, 1)
1376 elif prefix == 'all':
1377 return self._get_n_results(query, self._MAX_RESULTS)
1378 else:
1379 n = int(prefix)
1380 if n <= 0:
1381 raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
1382 elif n > self._MAX_RESULTS:
1383 self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
1384 n = self._MAX_RESULTS
1385 return self._get_n_results(query, n)
1386
1387 def _get_n_results(self, query, n):
1388 """Get a specified number of results for a query"""
1389 raise NotImplementedError("This method must be implemented by subclasses")
1390
1391 @property
1392 def SEARCH_KEY(self):
1393 return self._SEARCH_KEY