3 from __future__
import unicode_literals
13 from .common
import InfoExtractor
, SearchInfoExtractor
14 from ..jsinterp
import JSInterpreter
15 from ..swfinterp
import SWFInterpreter
16 from ..compat
import (
20 compat_urllib_parse_unquote
,
21 compat_urllib_parse_unquote_plus
,
22 compat_urllib_parse_urlparse
,
31 get_element_by_attribute
,
48 class YoutubeBaseInfoExtractor(InfoExtractor
):
49 """Provide base functions for Youtube extractors"""
50 _LOGIN_URL
= 'https://accounts.google.com/ServiceLogin'
51 _TWOFACTOR_URL
= 'https://accounts.google.com/signin/challenge'
52 _NETRC_MACHINE
= 'youtube'
53 # If True it will raise an error if no login info is provided
54 _LOGIN_REQUIRED
= False
56 def _set_language(self
):
58 '.youtube.com', 'PREF', 'f1=50000000&hl=en',
59 # YouTube sets the expire time to about two months
60 expire_time
=time
.time() + 2 * 30 * 24 * 3600)
62 def _ids_to_results(self
, ids
):
64 self
.url_result(vid_id
, 'Youtube', video_id
=vid_id
)
69 Attempt to log in to YouTube.
70 True is returned if successful or skipped.
71 False is returned if login failed.
73 If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
75 (username
, password
) = self
._get
_login
_info
()
76 # No authentication to be performed
78 if self
._LOGIN
_REQUIRED
:
79 raise ExtractorError('No login info available, needed for using %s.' % self
.IE_NAME
, expected
=True)
82 login_page
= self
._download
_webpage
(
83 self
._LOGIN
_URL
, None,
84 note
='Downloading login page',
85 errnote
='unable to fetch login page', fatal
=False)
86 if login_page
is False:
89 galx
= self
._search
_regex
(r
'(?s)<input.+?name="GALX".+?value="(.+?)"',
90 login_page
, 'Login GALX parameter')
94 'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
99 'PersistentCookie': 'yes',
101 'bgresponse': 'js_disabled',
102 'checkConnection': '',
103 'checkedDomains': 'youtube',
110 'service': 'youtube',
115 login_data
= compat_urllib_parse
.urlencode(encode_dict(login_form_strs
)).encode('ascii')
117 req
= sanitized_Request(self
._LOGIN
_URL
, login_data
)
118 login_results
= self
._download
_webpage
(
120 note
='Logging in', errnote
='unable to log in', fatal
=False)
121 if login_results
is False:
124 if re
.search(r
'id="errormsg_0_Passwd"', login_results
) is not None:
125 raise ExtractorError('Please use your account password and a two-factor code instead of an application-specific password.', expected
=True)
128 # TODO add SMS and phone call support - these require making a request and then prompting the user
130 if re
.search(r
'(?i)<form[^>]* id="challenge"', login_results
) is not None:
131 tfa_code
= self
._get
_tfa
_info
('2-step verification code')
134 self
._downloader
.report_warning(
135 'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
136 '(Note that only TOTP (Google Authenticator App) codes work at this time.)')
139 tfa_code
= remove_start(tfa_code
, 'G-')
141 tfa_form_strs
= self
._form
_hidden
_inputs
('challenge', login_results
)
143 tfa_form_strs
.update({
148 tfa_data
= compat_urllib_parse
.urlencode(encode_dict(tfa_form_strs
)).encode('ascii')
150 tfa_req
= sanitized_Request(self
._TWOFACTOR
_URL
, tfa_data
)
151 tfa_results
= self
._download
_webpage
(
153 note
='Submitting TFA code', errnote
='unable to submit tfa', fatal
=False)
155 if tfa_results
is False:
158 if re
.search(r
'(?i)<form[^>]* id="challenge"', tfa_results
) is not None:
159 self
._downloader
.report_warning('Two-factor code expired or invalid. Please try again, or use a one-use backup code instead.')
161 if re
.search(r
'(?i)<form[^>]* id="gaia_loginform"', tfa_results
) is not None:
162 self
._downloader
.report_warning('unable to log in - did the page structure change?')
164 if re
.search(r
'smsauth-interstitial-reviewsettings', tfa_results
) is not None:
165 self
._downloader
.report_warning('Your Google account has a security notice. Please log in on your web browser, resolve the notice, and try again.')
168 if re
.search(r
'(?i)<form[^>]* id="gaia_loginform"', login_results
) is not None:
169 self
._downloader
.report_warning('unable to log in: bad username or password')
173 def _real_initialize(self
):
174 if self
._downloader
is None:
177 if not self
._login
():
181 class YoutubeEntryListBaseInfoExtractor(InfoExtractor
):
182 # Extract entries from page with "Load more" button
183 def _entries(self
, page
, playlist_id
):
184 more_widget_html
= content_html
= page
185 for page_num
in itertools
.count(1):
186 for entry
in self
._process
_page
(content_html
):
189 mobj
= re
.search(r
'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html
)
193 more
= self
._download
_json
(
194 'https://youtube.com/%s' % mobj
.group('more'), playlist_id
,
195 'Downloading page #%s' % page_num
,
196 transform_source
=uppercase_escape
)
197 content_html
= more
['content_html']
198 if not content_html
.strip():
199 # Some webpages show a "Load more" button but they don't
202 more_widget_html
= more
['load_more_widget_html']
205 class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor
):
206 def _process_page(self
, content
):
207 for video_id
, video_title
in self
.extract_videos_from_page(content
):
208 yield self
.url_result(video_id
, 'Youtube', video_id
, video_title
)
210 def extract_videos_from_page(self
, page
):
213 for mobj
in re
.finditer(self
._VIDEO
_RE
, page
):
214 # The link with index 0 is not the first video of the playlist (not sure if still actual)
215 if 'index' in mobj
.groupdict() and mobj
.group('id') == '0':
217 video_id
= mobj
.group('id')
218 video_title
= unescapeHTML(mobj
.group('title'))
220 video_title
= video_title
.strip()
222 idx
= ids_in_page
.index(video_id
)
223 if video_title
and not titles_in_page
[idx
]:
224 titles_in_page
[idx
] = video_title
226 ids_in_page
.append(video_id
)
227 titles_in_page
.append(video_title
)
228 return zip(ids_in_page
, titles_in_page
)
231 class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor
):
232 def _process_page(self
, content
):
233 for playlist_id
in re
.findall(r
'href="/?playlist\?list=(.+?)"', content
):
234 yield self
.url_result(
235 'https://www.youtube.com/playlist?list=%s' % playlist_id
, 'YoutubePlaylist')
237 def _real_extract(self
, url
):
238 playlist_id
= self
._match
_id
(url
)
239 webpage
= self
._download
_webpage
(url
, playlist_id
)
240 title
= self
._og
_search
_title
(webpage
, fatal
=False)
241 return self
.playlist_result(self
._entries
(webpage
, playlist_id
), playlist_id
, title
)
244 class YoutubeIE(YoutubeBaseInfoExtractor
):
245 IE_DESC
= 'YouTube.com'
246 _VALID_URL
= r
"""(?x)^
248 (?:https?://|//) # http(s):// or protocol-independent URL
249 (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
250 (?:www\.)?deturl\.com/www\.youtube\.com/|
251 (?:www\.)?pwnyoutube\.com/|
252 (?:www\.)?yourepeat\.com/|
253 tube\.majestyc\.net/|
254 youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
255 (?:.*?\#/)? # handle anchor (#/) redirect urls
256 (?: # the various things that can precede the ID:
257 (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
258 |(?: # or the v= param in all its forms
259 (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
260 (?:\?|\#!?) # the params delimiter ? or # or #!
261 (?:.*?&)?? # any other preceding param (like /?s=tuff&v=xxxx)
266 youtu\.be| # just youtu.be/xxxx
267 vid\.plus # or vid.plus/xxxx
269 |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
271 )? # all until now is optional -> you can pass the naked ID
272 ([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
273 (?!.*?&list=) # combined list/video URLs are handled by the playlist IE
274 (?(1).+)? # if we found the ID, everything can follow
276 _NEXT_URL_RE
= r
'[\?&]next_url=([^&]+)'
278 '5': {'ext': 'flv', 'width': 400, 'height': 240},
279 '6': {'ext': 'flv', 'width': 450, 'height': 270},
280 '13': {'ext': '3gp'},
281 '17': {'ext': '3gp', 'width': 176, 'height': 144},
282 '18': {'ext': 'mp4', 'width': 640, 'height': 360},
283 '22': {'ext': 'mp4', 'width': 1280, 'height': 720},
284 '34': {'ext': 'flv', 'width': 640, 'height': 360},
285 '35': {'ext': 'flv', 'width': 854, 'height': 480},
286 '36': {'ext': '3gp', 'width': 320, 'height': 240},
287 '37': {'ext': 'mp4', 'width': 1920, 'height': 1080},
288 '38': {'ext': 'mp4', 'width': 4096, 'height': 3072},
289 '43': {'ext': 'webm', 'width': 640, 'height': 360},
290 '44': {'ext': 'webm', 'width': 854, 'height': 480},
291 '45': {'ext': 'webm', 'width': 1280, 'height': 720},
292 '46': {'ext': 'webm', 'width': 1920, 'height': 1080},
293 '59': {'ext': 'mp4', 'width': 854, 'height': 480},
294 '78': {'ext': 'mp4', 'width': 854, 'height': 480},
298 '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'preference': -20},
299 '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'preference': -20},
300 '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'preference': -20},
301 '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'preference': -20},
302 '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'preference': -20},
303 '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'preference': -20},
304 '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'preference': -20},
306 # Apple HTTP Live Streaming
307 '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
308 '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'preference': -10},
309 '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'preference': -10},
310 '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'preference': -10},
311 '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'preference': -10},
312 '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
313 '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'preference': -10},
316 '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
317 '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
318 '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
319 '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
320 '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
321 '138': {'ext': 'mp4', 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, # Height can vary (https://github.com/rg3/youtube-dl/issues/4559)
322 '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
323 '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
324 '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
325 '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
326 '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'h264'},
329 '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 48, 'preference': -50, 'container': 'm4a_dash'},
330 '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 128, 'preference': -50, 'container': 'm4a_dash'},
331 '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 256, 'preference': -50, 'container': 'm4a_dash'},
334 '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
335 '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
336 '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
337 '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
338 '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
339 '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
340 '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'container': 'webm', 'vcodec': 'vp9'},
341 '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
342 '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
343 '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
344 '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
345 '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
346 '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
347 '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
348 '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
349 '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
350 '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
351 '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
352 '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
353 '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'vp9'},
354 '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
357 '171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
358 '172': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
360 # Dash webm audio with opus inside
361 '249': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50, 'preference': -50},
362 '250': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70, 'preference': -50},
363 '251': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160, 'preference': -50},
366 '_rtmp': {'protocol': 'rtmp'},
372 'url': 'http://www.youtube.com/watch?v=BaW_jenozKcj&t=1s&end=9',
376 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
377 'uploader': 'Philipp Hagemeister',
378 'uploader_id': 'phihag',
379 'upload_date': '20121002',
380 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
381 'categories': ['Science & Technology'],
382 'tags': ['youtube-dl'],
384 'dislike_count': int,
390 'url': 'http://www.youtube.com/watch?v=UxxajLWwzqY',
391 'note': 'Test generic use_cipher_signature video (#897)',
395 'upload_date': '20120506',
396 'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
397 'description': 'md5:782e8651347686cba06e58f71ab51773',
398 'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
399 'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
400 'iconic ep', 'iconic', 'love', 'it'],
401 'uploader': 'Icona Pop',
402 'uploader_id': 'IconaPop',
406 'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
407 'note': 'Test VEVO video with age protection (#956)',
411 'upload_date': '20130703',
412 'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
413 'description': 'md5:64249768eec3bc4276236606ea996373',
414 'uploader': 'justintimberlakeVEVO',
415 'uploader_id': 'justintimberlakeVEVO',
420 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
421 'note': 'Embed-only video (#1746)',
425 'upload_date': '20120608',
426 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
427 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
428 'uploader': 'SET India',
429 'uploader_id': 'setindia',
434 'url': 'http://www.youtube.com/watch?v=BaW_jenozKcj&v=UxxajLWwzqY',
435 'note': 'Use the first video ID in the URL',
439 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
440 'uploader': 'Philipp Hagemeister',
441 'uploader_id': 'phihag',
442 'upload_date': '20121002',
443 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
444 'categories': ['Science & Technology'],
445 'tags': ['youtube-dl'],
447 'dislike_count': int,
450 'skip_download': True,
454 'url': 'http://www.youtube.com/watch?v=a9LDPn-MO4I',
455 'note': '256k DASH audio (format 141) via DASH manifest',
459 'upload_date': '20121002',
460 'uploader_id': '8KVIDEO',
462 'uploader': '8KVIDEO',
463 'title': 'UHDTV TEST 8K VIDEO.mp4'
466 'youtube_include_dash_manifest': True,
470 # DASH manifest with encrypted signature
472 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
476 'title': 'Afrojack, Spree Wilson - The Spark ft. Spree Wilson',
477 'description': 'md5:12e7067fa6735a77bdcbb58cb1187d2d',
478 'uploader': 'AfrojackVEVO',
479 'uploader_id': 'AfrojackVEVO',
480 'upload_date': '20131011',
483 'youtube_include_dash_manifest': True,
487 # JS player signature function name containing $
489 'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
493 'title': 'Taylor Swift - Shake It Off',
494 'description': 'md5:95f66187cd7c8b2c13eb78e1223b63c3',
495 'uploader': 'TaylorSwiftVEVO',
496 'uploader_id': 'TaylorSwiftVEVO',
497 'upload_date': '20140818',
500 'youtube_include_dash_manifest': True,
506 'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
510 'upload_date': '20100909',
511 'uploader': 'The Amazing Atheist',
512 'uploader_id': 'TheAmazingAtheist',
513 'title': 'Burning Everyone\'s Koran',
514 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
517 # Normal age-gate video (No vevo, embed allowed)
519 'url': 'http://youtube.com/watch?v=HtVdAasjOgU',
523 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
524 'description': 're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
525 'uploader': 'The Witcher',
526 'uploader_id': 'WitcherGame',
527 'upload_date': '20140605',
531 # Age-gate video with encrypted signature
533 'url': 'http://www.youtube.com/watch?v=6kLq3WMV1nU',
537 'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
538 'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
539 'uploader': 'LloydVEVO',
540 'uploader_id': 'LloydVEVO',
541 'upload_date': '20110629',
545 # video_info is None (https://github.com/rg3/youtube-dl/issues/4421)
547 'url': '__2ABJjxzNo',
551 'upload_date': '20100430',
552 'uploader_id': 'deadmau5',
553 'description': 'md5:12c56784b8032162bb936a5f76d55360',
554 'uploader': 'deadmau5',
555 'title': 'Deadmau5 - Some Chords (HD)',
557 'expected_warnings': [
558 'DASH manifest missing',
561 # Olympics (https://github.com/rg3/youtube-dl/issues/4431)
563 'url': 'lqQg6PlCWgI',
567 'upload_date': '20150827',
568 'uploader_id': 'olympic',
569 'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
570 'uploader': 'Olympics',
571 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
574 'skip_download': 'requires avconv',
579 'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
583 'stretched_ratio': 16 / 9.,
584 'upload_date': '20110310',
585 'uploader_id': 'AllenMeow',
586 'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
588 'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
591 # url_encoded_fmt_stream_map is empty string
593 'url': 'qEJwOuvDf7I',
597 'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
599 'upload_date': '20150404',
600 'uploader_id': 'spbelect',
601 'uploader': 'Наблюдатели Петербурга',
604 'skip_download': 'requires avconv',
607 # Extraction from multiple DASH manifests (https://github.com/rg3/youtube-dl/pull/6097)
609 'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
613 'title': 'md5:7b81415841e02ecd4313668cde88737a',
614 'description': 'md5:116377fd2963b81ec4ce64b542173306',
615 'upload_date': '20150625',
616 'uploader_id': 'dorappi2000',
617 'uploader': 'dorappi2000',
618 'formats': 'mincount:33',
621 # DASH manifest with segment_list
623 'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
624 'md5': '8ce563a1d667b599d21064e982ab9e31',
628 'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
629 'uploader': 'Airtek',
630 'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
631 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
632 'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
635 'youtube_include_dash_manifest': True,
636 'format': '135', # bestvideo
640 # Multifeed videos (multiple cameras), URL is for Main Camera
641 'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
644 'title': 'teamPGP: Rocket League Noob Stream',
645 'description': 'md5:dc7872fb300e143831327f1bae3af010',
651 'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
652 'description': 'md5:dc7872fb300e143831327f1bae3af010',
653 'upload_date': '20150721',
654 'uploader': 'Beer Games Beer',
655 'uploader_id': 'beergamesbeer',
661 'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
662 'description': 'md5:dc7872fb300e143831327f1bae3af010',
663 'upload_date': '20150721',
664 'uploader': 'Beer Games Beer',
665 'uploader_id': 'beergamesbeer',
671 'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
672 'description': 'md5:dc7872fb300e143831327f1bae3af010',
673 'upload_date': '20150721',
674 'uploader': 'Beer Games Beer',
675 'uploader_id': 'beergamesbeer',
681 'title': 'teamPGP: Rocket League Noob Stream (zim)',
682 'description': 'md5:dc7872fb300e143831327f1bae3af010',
683 'upload_date': '20150721',
684 'uploader': 'Beer Games Beer',
685 'uploader_id': 'beergamesbeer',
689 'skip_download': True,
693 'url': 'http://vid.plus/FlRa-iH7PGw',
694 'only_matching': True,
697 # Title with JS-like syntax "};" (see https://github.com/rg3/youtube-dl/issues/7468)
698 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
702 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
703 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
704 'upload_date': '20151119',
705 'uploader_id': 'IronSoulElf',
706 'uploader': 'IronSoulElf',
709 'skip_download': True,
713 # Tags with '};' (see https://github.com/rg3/youtube-dl/issues/7468)
714 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
715 'only_matching': True,
719 def __init__(self
, *args
, **kwargs
):
720 super(YoutubeIE
, self
).__init
__(*args
, **kwargs
)
721 self
._player
_cache
= {}
723 def report_video_info_webpage_download(self
, video_id
):
724 """Report attempt to download video info webpage."""
725 self
.to_screen('%s: Downloading video info webpage' % video_id
)
727 def report_information_extraction(self
, video_id
):
728 """Report attempt to extract video information."""
729 self
.to_screen('%s: Extracting video information' % video_id
)
731 def report_unavailable_format(self
, video_id
, format
):
732 """Report extracted video URL."""
733 self
.to_screen('%s: Format %s not available' % (video_id
, format
))
735 def report_rtmp_download(self
):
736 """Indicate the download will use the RTMP protocol."""
737 self
.to_screen('RTMP download detected')
739 def _signature_cache_id(self
, example_sig
):
740 """ Return a string representation of a signature """
741 return '.'.join(compat_str(len(part
)) for part
in example_sig
.split('.'))
743 def _extract_signature_function(self
, video_id
, player_url
, example_sig
):
745 r
'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|/base)?\.(?P<ext>[a-z]+)$',
748 raise ExtractorError('Cannot identify player %r' % player_url
)
749 player_type
= id_m
.group('ext')
750 player_id
= id_m
.group('id')
752 # Read from filesystem cache
753 func_id
= '%s_%s_%s' % (
754 player_type
, player_id
, self
._signature
_cache
_id
(example_sig
))
755 assert os
.path
.basename(func_id
) == func_id
757 cache_spec
= self
._downloader
.cache
.load('youtube-sigfuncs', func_id
)
758 if cache_spec
is not None:
759 return lambda s
: ''.join(s
[i
] for i
in cache_spec
)
762 'Downloading player %s' % player_url
763 if self
._downloader
.params
.get('verbose') else
764 'Downloading %s player %s' % (player_type
, player_id
)
766 if player_type
== 'js':
767 code
= self
._download
_webpage
(
768 player_url
, video_id
,
770 errnote
='Download of %s failed' % player_url
)
771 res
= self
._parse
_sig
_js
(code
)
772 elif player_type
== 'swf':
773 urlh
= self
._request
_webpage
(
774 player_url
, video_id
,
776 errnote
='Download of %s failed' % player_url
)
778 res
= self
._parse
_sig
_swf
(code
)
780 assert False, 'Invalid player type %r' % player_type
782 test_string
= ''.join(map(compat_chr
, range(len(example_sig
))))
783 cache_res
= res(test_string
)
784 cache_spec
= [ord(c
) for c
in cache_res
]
786 self
._downloader
.cache
.store('youtube-sigfuncs', func_id
, cache_spec
)
789 def _print_sig_code(self
, func
, example_sig
):
790 def gen_sig_code(idxs
):
791 def _genslice(start
, end
, step
):
792 starts
= '' if start
== 0 else str(start
)
793 ends
= (':%d' % (end
+ step
)) if end
+ step
>= 0 else ':'
794 steps
= '' if step
== 1 else (':%d' % step
)
795 return 's[%s%s%s]' % (starts
, ends
, steps
)
798 # Quelch pyflakes warnings - start will be set when step is set
799 start
= '(Never used)'
800 for i
, prev
in zip(idxs
[1:], idxs
[:-1]):
804 yield _genslice(start
, prev
, step
)
807 if i
- prev
in [-1, 1]:
816 yield _genslice(start
, i
, step
)
818 test_string
= ''.join(map(compat_chr
, range(len(example_sig
))))
819 cache_res
= func(test_string
)
820 cache_spec
= [ord(c
) for c
in cache_res
]
821 expr_code
= ' + '.join(gen_sig_code(cache_spec
))
822 signature_id_tuple
= '(%s)' % (
823 ', '.join(compat_str(len(p
)) for p
in example_sig
.split('.')))
824 code
= ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
825 ' return %s\n') % (signature_id_tuple
, expr_code
)
826 self
.to_screen('Extracted signature function:\n' + code
)
828 def _parse_sig_js(self
, jscode
):
829 funcname
= self
._search
_regex
(
830 r
'\.sig\|\|([a-zA-Z0-9$]+)\(', jscode
,
831 'Initial JS player signature function name')
833 jsi
= JSInterpreter(jscode
)
834 initial_function
= jsi
.extract_function(funcname
)
835 return lambda s
: initial_function([s
])
837 def _parse_sig_swf(self
, file_contents
):
838 swfi
= SWFInterpreter(file_contents
)
839 TARGET_CLASSNAME
= 'SignatureDecipher'
840 searched_class
= swfi
.extract_class(TARGET_CLASSNAME
)
841 initial_function
= swfi
.extract_function(searched_class
, 'decipher')
842 return lambda s
: initial_function([s
])
844 def _decrypt_signature(self
, s
, video_id
, player_url
, age_gate
=False):
845 """Turn the encrypted s field into a working signature"""
847 if player_url
is None:
848 raise ExtractorError('Cannot decrypt signature without player_url')
850 if player_url
.startswith('//'):
851 player_url
= 'https:' + player_url
853 player_id
= (player_url
, self
._signature
_cache
_id
(s
))
854 if player_id
not in self
._player
_cache
:
855 func
= self
._extract
_signature
_function
(
856 video_id
, player_url
, s
858 self
._player
_cache
[player_id
] = func
859 func
= self
._player
_cache
[player_id
]
860 if self
._downloader
.params
.get('youtube_print_sig_code'):
861 self
._print
_sig
_code
(func
, s
)
863 except Exception as e
:
864 tb
= traceback
.format_exc()
865 raise ExtractorError(
866 'Signature extraction failed: ' + tb
, cause
=e
)
868 def _get_subtitles(self
, video_id
, webpage
):
870 subs_doc
= self
._download
_xml
(
871 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id
,
872 video_id
, note
=False)
873 except ExtractorError
as err
:
874 self
._downloader
.report_warning('unable to download video subtitles: %s' % compat_str(err
))
878 for track
in subs_doc
.findall('track'):
879 lang
= track
.attrib
['lang_code']
880 if lang
in sub_lang_list
:
883 for ext
in ['sbv', 'vtt', 'srt']:
884 params
= compat_urllib_parse
.urlencode({
888 'name': track
.attrib
['name'].encode('utf-8'),
891 'url': 'https://www.youtube.com/api/timedtext?' + params
,
894 sub_lang_list
[lang
] = sub_formats
895 if not sub_lang_list
:
896 self
._downloader
.report_warning('video doesn\'t have subtitles')
900 def _get_ytplayer_config(self
, video_id
, webpage
):
902 # User data may contain arbitrary character sequences that may affect
903 # JSON extraction with regex, e.g. when '};' is contained the second
904 # regex won't capture the whole JSON. Yet working around by trying more
905 # concrete regex first keeping in mind proper quoted string handling
906 # to be implemented in future that will replace this workaround (see
907 # https://github.com/rg3/youtube-dl/issues/7468,
908 # https://github.com/rg3/youtube-dl/pull/7599)
909 r
';ytplayer\.config\s*=\s*({.+?});ytplayer',
910 r
';ytplayer\.config\s*=\s*({.+?});',
912 config
= self
._search
_regex
(
913 patterns
, webpage
, 'ytplayer.config', default
=None)
915 return self
._parse
_json
(
916 uppercase_escape(config
), video_id
, fatal
=False)
918 def _get_automatic_captions(self
, video_id
, webpage
):
919 """We need the webpage for getting the captions url, pass it as an
920 argument to speed up the process."""
921 self
.to_screen('%s: Looking for automatic captions' % video_id
)
922 player_config
= self
._get
_ytplayer
_config
(video_id
, webpage
)
923 err_msg
= 'Couldn\'t find automatic captions for %s' % video_id
924 if not player_config
:
925 self
._downloader
.report_warning(err_msg
)
928 args
= player_config
['args']
929 caption_url
= args
['ttsurl']
930 timestamp
= args
['timestamp']
931 # We get the available subtitles
932 list_params
= compat_urllib_parse
.urlencode({
937 list_url
= caption_url
+ '&' + list_params
938 caption_list
= self
._download
_xml
(list_url
, video_id
)
939 original_lang_node
= caption_list
.find('track')
940 if original_lang_node
is None:
941 self
._downloader
.report_warning('Video doesn\'t have automatic captions')
943 original_lang
= original_lang_node
.attrib
['lang_code']
944 caption_kind
= original_lang_node
.attrib
.get('kind', '')
947 for lang_node
in caption_list
.findall('target'):
948 sub_lang
= lang_node
.attrib
['lang_code']
950 for ext
in ['sbv', 'vtt', 'srt']:
951 params
= compat_urllib_parse
.urlencode({
952 'lang': original_lang
,
956 'kind': caption_kind
,
959 'url': caption_url
+ '&' + params
,
962 sub_lang_list
[sub_lang
] = sub_formats
964 # An extractor error can be raise by the download process if there are
965 # no automatic captions but there are subtitles
966 except (KeyError, ExtractorError
):
967 self
._downloader
.report_warning(err_msg
)
971 def extract_id(cls
, url
):
972 mobj
= re
.match(cls
._VALID
_URL
, url
, re
.VERBOSE
)
974 raise ExtractorError('Invalid URL: %s' % url
)
975 video_id
= mobj
.group(2)
978 def _extract_from_m3u8(self
, manifest_url
, video_id
):
981 def _get_urls(_manifest
):
982 lines
= _manifest
.split('\n')
983 urls
= filter(lambda l
: l
and not l
.startswith('#'),
986 manifest
= self
._download
_webpage
(manifest_url
, video_id
, 'Downloading formats manifest')
987 formats_urls
= _get_urls(manifest
)
988 for format_url
in formats_urls
:
989 itag
= self
._search
_regex
(r
'itag/(\d+?)/', format_url
, 'itag')
990 url_map
[itag
] = format_url
993 def _extract_annotations(self
, video_id
):
994 url
= 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
995 return self
._download
_webpage
(url
, video_id
, note
='Searching for annotations.', errnote
='Unable to download video annotations.')
997 def _parse_dash_manifest(
998 self
, video_id
, dash_manifest_url
, player_url
, age_gate
, fatal
=True):
999 def decrypt_sig(mobj
):
1001 dec_s
= self
._decrypt
_signature
(s
, video_id
, player_url
, age_gate
)
1002 return '/signature/%s' % dec_s
1003 dash_manifest_url
= re
.sub(r
'/s/([a-fA-F0-9\.]+)', decrypt_sig
, dash_manifest_url
)
1004 dash_doc
= self
._download
_xml
(
1005 dash_manifest_url
, video_id
,
1006 note
='Downloading DASH manifest',
1007 errnote
='Could not download DASH manifest',
1010 if dash_doc
is False:
1014 for a
in dash_doc
.findall('.//{urn:mpeg:DASH:schema:MPD:2011}AdaptationSet'):
1015 mime_type
= a
.attrib
.get('mimeType')
1016 for r
in a
.findall('{urn:mpeg:DASH:schema:MPD:2011}Representation'):
1017 url_el
= r
.find('{urn:mpeg:DASH:schema:MPD:2011}BaseURL')
1020 if mime_type
== 'text/vtt':
1021 # TODO implement WebVTT downloading
1023 elif mime_type
.startswith('audio/') or mime_type
.startswith('video/'):
1024 segment_list
= r
.find('{urn:mpeg:DASH:schema:MPD:2011}SegmentList')
1025 format_id
= r
.attrib
['id']
1026 video_url
= url_el
.text
1027 filesize
= int_or_none(url_el
.attrib
.get('{http://youtube.com/yt/2012/10/10}contentLength'))
1029 'format_id': format_id
,
1031 'width': int_or_none(r
.attrib
.get('width')),
1032 'height': int_or_none(r
.attrib
.get('height')),
1033 'tbr': int_or_none(r
.attrib
.get('bandwidth'), 1000),
1034 'asr': int_or_none(r
.attrib
.get('audioSamplingRate')),
1035 'filesize': filesize
,
1036 'fps': int_or_none(r
.attrib
.get('frameRate')),
1038 if segment_list
is not None:
1040 'initialization_url': segment_list
.find('{urn:mpeg:DASH:schema:MPD:2011}Initialization').attrib
['sourceURL'],
1041 'segment_urls': [segment
.attrib
.get('media') for segment
in segment_list
.findall('{urn:mpeg:DASH:schema:MPD:2011}SegmentURL')],
1042 'protocol': 'http_dash_segments',
1045 existing_format
= next(
1046 fo
for fo
in formats
1047 if fo
['format_id'] == format_id
)
1048 except StopIteration:
1049 full_info
= self
._formats
.get(format_id
, {}).copy()
1051 codecs
= r
.attrib
.get('codecs')
1053 if full_info
.get('acodec') == 'none' and 'vcodec' not in full_info
:
1054 full_info
['vcodec'] = codecs
1055 elif full_info
.get('vcodec') == 'none' and 'acodec' not in full_info
:
1056 full_info
['acodec'] = codecs
1057 formats
.append(full_info
)
1059 existing_format
.update(f
)
1061 self
.report_warning('Unknown MIME type %s in DASH manifest' % mime_type
)
1064 def _real_extract(self
, url
):
1065 url
, smuggled_data
= unsmuggle_url(url
, {})
1068 'http' if self
._downloader
.params
.get('prefer_insecure', False)
1073 parsed_url
= compat_urllib_parse_urlparse(url
)
1074 for component
in [parsed_url
.fragment
, parsed_url
.query
]:
1075 query
= compat_parse_qs(component
)
1076 if start_time
is None and 't' in query
:
1077 start_time
= parse_duration(query
['t'][0])
1078 if start_time
is None and 'start' in query
:
1079 start_time
= parse_duration(query
['start'][0])
1080 if end_time
is None and 'end' in query
:
1081 end_time
= parse_duration(query
['end'][0])
1083 # Extract original video URL from URL with redirection, like age verification, using next_url parameter
1084 mobj
= re
.search(self
._NEXT
_URL
_RE
, url
)
1086 url
= proto
+ '://www.youtube.com/' + compat_urllib_parse_unquote(mobj
.group(1)).lstrip('/')
1087 video_id
= self
.extract_id(url
)
1090 url
= proto
+ '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
1091 video_webpage
= self
._download
_webpage
(url
, video_id
)
1093 # Attempt to extract SWF player URL
1094 mobj
= re
.search(r
'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage
)
1095 if mobj
is not None:
1096 player_url
= re
.sub(r
'\\(.)', r
'\1', mobj
.group(1))
1102 def add_dash_mpd(video_info
):
1103 dash_mpd
= video_info
.get('dashmpd')
1104 if dash_mpd
and dash_mpd
[0] not in dash_mpds
:
1105 dash_mpds
.append(dash_mpd
[0])
1108 embed_webpage
= None
1110 if re
.search(r
'player-age-gate-content">', video_webpage
) is not None:
1112 # We simulate the access to the video from www.youtube.com/v/{video_id}
1113 # this can be viewed without login into Youtube
1114 url
= proto
+ '://www.youtube.com/embed/%s' % video_id
1115 embed_webpage
= self
._download
_webpage
(url
, video_id
, 'Downloading embed webpage')
1116 data
= compat_urllib_parse
.urlencode({
1117 'video_id': video_id
,
1118 'eurl': 'https://youtube.googleapis.com/v/' + video_id
,
1119 'sts': self
._search
_regex
(
1120 r
'"sts"\s*:\s*(\d+)', embed_webpage
, 'sts', default
=''),
1122 video_info_url
= proto
+ '://www.youtube.com/get_video_info?' + data
1123 video_info_webpage
= self
._download
_webpage
(
1124 video_info_url
, video_id
,
1125 note
='Refetching age-gated info webpage',
1126 errnote
='unable to download video info webpage')
1127 video_info
= compat_parse_qs(video_info_webpage
)
1128 add_dash_mpd(video_info
)
1132 # Try looking directly into the video webpage
1133 ytplayer_config
= self
._get
_ytplayer
_config
(video_id
, video_webpage
)
1135 args
= ytplayer_config
['args']
1136 if args
.get('url_encoded_fmt_stream_map'):
1137 # Convert to the same format returned by compat_parse_qs
1138 video_info
= dict((k
, [v
]) for k
, v
in args
.items())
1139 add_dash_mpd(video_info
)
1140 if args
.get('livestream') == '1' or args
.get('live_playback') == 1:
1142 if not video_info
or self
._downloader
.params
.get('youtube_include_dash_manifest', True):
1143 # We also try looking in get_video_info since it may contain different dashmpd
1144 # URL that points to a DASH manifest with possibly different itag set (some itags
1145 # are missing from DASH manifest pointed by webpage's dashmpd, some - from DASH
1146 # manifest pointed by get_video_info's dashmpd).
1147 # The general idea is to take a union of itags of both DASH manifests (for example
1148 # video with such 'manifest behavior' see https://github.com/rg3/youtube-dl/issues/6093)
1149 self
.report_video_info_webpage_download(video_id
)
1150 for el_type
in ['&el=info', '&el=embedded', '&el=detailpage', '&el=vevo', '']:
1152 '%s://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
1153 % (proto
, video_id
, el_type
))
1154 video_info_webpage
= self
._download
_webpage
(
1156 video_id
, note
=False,
1157 errnote
='unable to download video info webpage')
1158 get_video_info
= compat_parse_qs(video_info_webpage
)
1159 if get_video_info
.get('use_cipher_signature') != ['True']:
1160 add_dash_mpd(get_video_info
)
1162 video_info
= get_video_info
1163 if 'token' in get_video_info
:
1164 # Different get_video_info requests may report different results, e.g.
1165 # some may report video unavailability, but some may serve it without
1166 # any complaint (see https://github.com/rg3/youtube-dl/issues/7362,
1167 # the original webpage as well as el=info and el=embedded get_video_info
1168 # requests report video unavailability due to geo restriction while
1169 # el=detailpage succeeds and returns valid data). This is probably
1170 # due to YouTube measures against IP ranges of hosting providers.
1171 # Working around by preferring the first succeeded video_info containing
1172 # the token if no such video_info yet was found.
1173 if 'token' not in video_info
:
1174 video_info
= get_video_info
1176 if 'token' not in video_info
:
1177 if 'reason' in video_info
:
1178 if 'The uploader has not made this video available in your country.' in video_info
['reason']:
1179 regions_allowed
= self
._html
_search
_meta
('regionsAllowed', video_webpage
, default
=None)
1181 raise ExtractorError('YouTube said: This video is available in %s only' % (
1182 ', '.join(map(ISO3166Utils
.short2full
, regions_allowed
.split(',')))),
1184 raise ExtractorError(
1185 'YouTube said: %s' % video_info
['reason'][0],
1186 expected
=True, video_id
=video_id
)
1188 raise ExtractorError(
1189 '"token" parameter not in video info for unknown reason',
1193 if 'title' in video_info
:
1194 video_title
= video_info
['title'][0]
1196 self
._downloader
.report_warning('Unable to extract video title')
1200 video_description
= get_element_by_id("eow-description", video_webpage
)
1201 if video_description
:
1202 video_description
= re
.sub(r
'''(?x)
1204 (?:[a-zA-Z-]+="[^"]+"\s+)*?
1206 (?:[a-zA-Z-]+="[^"]+"\s+)*?
1207 class="yt-uix-redirect-link"\s*>
1210 ''', r
'\1', video_description
)
1211 video_description
= clean_html(video_description
)
1213 fd_mobj
= re
.search(r
'<meta name="description" content="([^"]+)"', video_webpage
)
1215 video_description
= unescapeHTML(fd_mobj
.group(1))
1217 video_description
= ''
1219 if 'multifeed_metadata_list' in video_info
and not smuggled_data
.get('force_singlefeed', False):
1220 if not self
._downloader
.params
.get('noplaylist'):
1223 multifeed_metadata_list
= compat_urllib_parse_unquote_plus(video_info
['multifeed_metadata_list'][0])
1224 for feed
in multifeed_metadata_list
.split(','):
1225 feed_data
= compat_parse_qs(feed
)
1227 '_type': 'url_transparent',
1228 'ie_key': 'Youtube',
1230 '%s://www.youtube.com/watch?v=%s' % (proto
, feed_data
['id'][0]),
1231 {'force_singlefeed': True}),
1232 'title': '%s (%s)' % (video_title
, feed_data
['title'][0]),
1234 feed_ids
.append(feed_data
['id'][0])
1236 'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
1237 % (', '.join(feed_ids
), video_id
))
1238 return self
.playlist_result(entries
, video_id
, video_title
, video_description
)
1239 self
.to_screen('Downloading just video %s because of --no-playlist' % video_id
)
1241 if 'view_count' in video_info
:
1242 view_count
= int(video_info
['view_count'][0])
1246 # Check for "rental" videos
1247 if 'ypc_video_rental_bar_text' in video_info
and 'author' not in video_info
:
1248 raise ExtractorError('"rental" videos not supported')
1250 # Start extracting information
1251 self
.report_information_extraction(video_id
)
1254 if 'author' not in video_info
:
1255 raise ExtractorError('Unable to extract uploader name')
1256 video_uploader
= compat_urllib_parse_unquote_plus(video_info
['author'][0])
1259 video_uploader_id
= None
1260 mobj
= re
.search(r
'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage
)
1261 if mobj
is not None:
1262 video_uploader_id
= mobj
.group(1)
1264 self
._downloader
.report_warning('unable to extract uploader nickname')
1267 # We try first to get a high quality image:
1268 m_thumb
= re
.search(r
'<span itemprop="thumbnail".*?href="(.*?)">',
1269 video_webpage
, re
.DOTALL
)
1270 if m_thumb
is not None:
1271 video_thumbnail
= m_thumb
.group(1)
1272 elif 'thumbnail_url' not in video_info
:
1273 self
._downloader
.report_warning('unable to extract video thumbnail')
1274 video_thumbnail
= None
1275 else: # don't panic if we can't find it
1276 video_thumbnail
= compat_urllib_parse_unquote_plus(video_info
['thumbnail_url'][0])
1279 upload_date
= self
._html
_search
_meta
(
1280 'datePublished', video_webpage
, 'upload date', default
=None)
1282 upload_date
= self
._search
_regex
(
1283 [r
'(?s)id="eow-date.*?>(.*?)</span>',
1284 r
'id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live|Started) on (.+?)</strong>'],
1285 video_webpage
, 'upload date', default
=None)
1287 upload_date
= ' '.join(re
.sub(r
'[/,-]', r
' ', mobj
.group(1)).split())
1288 upload_date
= unified_strdate(upload_date
)
1290 m_cat_container
= self
._search
_regex
(
1291 r
'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
1292 video_webpage
, 'categories', default
=None)
1294 category
= self
._html
_search
_regex
(
1295 r
'(?s)<a[^<]+>(.*?)</a>', m_cat_container
, 'category',
1297 video_categories
= None if category
is None else [category
]
1299 video_categories
= None
1302 unescapeHTML(m
.group('content'))
1303 for m
in re
.finditer(self
._meta
_regex
('og:video:tag'), video_webpage
)]
1305 def _extract_count(count_name
):
1306 return str_to_int(self
._search
_regex
(
1307 r
'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
1308 % re
.escape(count_name
),
1309 video_webpage
, count_name
, default
=None))
1311 like_count
= _extract_count('like')
1312 dislike_count
= _extract_count('dislike')
1315 video_subtitles
= self
.extract_subtitles(video_id
, video_webpage
)
1316 automatic_captions
= self
.extract_automatic_captions(video_id
, video_webpage
)
1318 if 'length_seconds' not in video_info
:
1319 self
._downloader
.report_warning('unable to extract video duration')
1320 video_duration
= None
1322 video_duration
= int(compat_urllib_parse_unquote_plus(video_info
['length_seconds'][0]))
1325 video_annotations
= None
1326 if self
._downloader
.params
.get('writeannotations', False):
1327 video_annotations
= self
._extract
_annotations
(video_id
)
1329 def _map_to_format_list(urlmap
):
1331 for itag
, video_real_url
in urlmap
.items():
1334 'url': video_real_url
,
1335 'player_url': player_url
,
1337 if itag
in self
._formats
:
1338 dct
.update(self
._formats
[itag
])
1342 if 'conn' in video_info
and video_info
['conn'][0].startswith('rtmp'):
1343 self
.report_rtmp_download()
1345 'format_id': '_rtmp',
1347 'url': video_info
['conn'][0],
1348 'player_url': player_url
,
1350 elif len(video_info
.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info
.get('adaptive_fmts', [''])[0]) >= 1:
1351 encoded_url_map
= video_info
.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info
.get('adaptive_fmts', [''])[0]
1352 if 'rtmpe%3Dyes' in encoded_url_map
:
1353 raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected
=True)
1355 for url_data_str
in encoded_url_map
.split(','):
1356 url_data
= compat_parse_qs(url_data_str
)
1357 if 'itag' not in url_data
or 'url' not in url_data
:
1359 format_id
= url_data
['itag'][0]
1360 url
= url_data
['url'][0]
1362 if 'sig' in url_data
:
1363 url
+= '&signature=' + url_data
['sig'][0]
1364 elif 's' in url_data
:
1365 encrypted_sig
= url_data
['s'][0]
1366 ASSETS_RE
= r
'"assets":.+?"js":\s*("[^"]+")'
1368 jsplayer_url_json
= self
._search
_regex
(
1370 embed_webpage
if age_gate
else video_webpage
,
1371 'JS player URL (1)', default
=None)
1372 if not jsplayer_url_json
and not age_gate
:
1373 # We need the embed website after all
1374 if embed_webpage
is None:
1375 embed_url
= proto
+ '://www.youtube.com/embed/%s' % video_id
1376 embed_webpage
= self
._download
_webpage
(
1377 embed_url
, video_id
, 'Downloading embed webpage')
1378 jsplayer_url_json
= self
._search
_regex
(
1379 ASSETS_RE
, embed_webpage
, 'JS player URL')
1381 player_url
= json
.loads(jsplayer_url_json
)
1382 if player_url
is None:
1383 player_url_json
= self
._search
_regex
(
1384 r
'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
1385 video_webpage
, 'age gate player URL')
1386 player_url
= json
.loads(player_url_json
)
1388 if self
._downloader
.params
.get('verbose'):
1389 if player_url
is None:
1390 player_version
= 'unknown'
1391 player_desc
= 'unknown'
1393 if player_url
.endswith('swf'):
1394 player_version
= self
._search
_regex
(
1395 r
'-(.+?)(?:/watch_as3)?\.swf$', player_url
,
1396 'flash player', fatal
=False)
1397 player_desc
= 'flash player %s' % player_version
1399 player_version
= self
._search
_regex
(
1400 [r
'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js', r
'(?:www|player)-([^/]+)/base\.js'],
1402 'html5 player', fatal
=False)
1403 player_desc
= 'html5 player %s' % player_version
1405 parts_sizes
= self
._signature
_cache
_id
(encrypted_sig
)
1406 self
.to_screen('{%s} signature length %s, %s' %
1407 (format_id
, parts_sizes
, player_desc
))
1409 signature
= self
._decrypt
_signature
(
1410 encrypted_sig
, video_id
, player_url
, age_gate
)
1411 url
+= '&signature=' + signature
1412 if 'ratebypass' not in url
:
1413 url
+= '&ratebypass=yes'
1415 # Some itags are not included in DASH manifest thus corresponding formats will
1416 # lack metadata (see https://github.com/rg3/youtube-dl/pull/5993).
1417 # Trying to extract metadata from url_encoded_fmt_stream_map entry.
1418 mobj
= re
.search(r
'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data
.get('size', [''])[0])
1419 width
, height
= (int(mobj
.group('width')), int(mobj
.group('height'))) if mobj
else (None, None)
1421 'format_id': format_id
,
1423 'player_url': player_url
,
1424 'filesize': int_or_none(url_data
.get('clen', [None])[0]),
1425 'tbr': float_or_none(url_data
.get('bitrate', [None])[0], 1000),
1428 'fps': int_or_none(url_data
.get('fps', [None])[0]),
1429 'format_note': url_data
.get('quality_label', [None])[0] or url_data
.get('quality', [None])[0],
1431 type_
= url_data
.get('type', [None])[0]
1433 type_split
= type_
.split(';')
1434 kind_ext
= type_split
[0].split('/')
1435 if len(kind_ext
) == 2:
1436 kind
, ext
= kind_ext
1438 if kind
in ('audio', 'video'):
1440 for mobj
in re
.finditer(
1441 r
'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?
)(?P
<val
>.+?
)(?P
=quote
)(?
:;|$
)', type_):
1442 if mobj.group('key
') == 'codecs
':
1443 codecs = mobj.group('val
')
1446 codecs = codecs.split(',')
1447 if len(codecs) == 2:
1448 acodec, vcodec = codecs[0], codecs[1]
1450 acodec, vcodec = (codecs[0], 'none
') if kind == 'audio
' else ('none
', codecs[0])
1455 if format_id in self._formats:
1456 dct.update(self._formats[format_id])
1458 elif video_info.get('hlsvp
'):
1459 manifest_url = video_info['hlsvp
'][0]
1460 url_map = self._extract_from_m3u8(manifest_url, video_id)
1461 formats = _map_to_format_list(url_map)
1463 raise ExtractorError('no conn
, hlsvp
or url_encoded_fmt_stream_map information found
in video info
')
1465 # Look for the DASH manifest
1466 if self._downloader.params.get('youtube_include_dash_manifest
', True):
1467 dash_mpd_fatal = True
1468 for dash_manifest_url in dash_mpds:
1471 for df in self._parse_dash_manifest(
1472 video_id, dash_manifest_url, player_url, age_gate, dash_mpd_fatal):
1473 # Do not overwrite DASH format found in some previous DASH manifest
1474 if df['format_id
'] not in dash_formats:
1475 dash_formats[df['format_id
']] = df
1476 # Additional DASH manifests may end up in HTTP Error 403 therefore
1477 # allow them to fail without bug report message if we already have
1478 # some DASH manifest succeeded. This is temporary workaround to reduce
1479 # burst of bug reports until we figure out the reason and whether it
1480 # can be fixed at all.
1481 dash_mpd_fatal = False
1482 except (ExtractorError, KeyError) as e:
1483 self.report_warning(
1484 'Skipping DASH manifest
: %r' % e, video_id)
1486 # Remove the formats we found through non-DASH, they
1487 # contain less info and it can be wrong, because we use
1488 # fixed values (for example the resolution). See
1489 # https://github.com/rg3/youtube-dl/issues/5774 for an
1491 formats = [f for f in formats if f['format_id
'] not in dash_formats.keys()]
1492 formats.extend(dash_formats.values())
1494 # Check for malformed aspect ratio
1495 stretched_m = re.search(
1496 r'<meta\s
+property="og:video:tag".*?content
="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
1499 ratio = float(stretched_m.group('w
')) / float(stretched_m.group('h
'))
1501 if f.get('vcodec
') != 'none
':
1502 f['stretched_ratio
'] = ratio
1504 self._sort_formats(formats)
1508 'uploader
': video_uploader,
1509 'uploader_id
': video_uploader_id,
1510 'upload_date
': upload_date,
1511 'title
': video_title,
1512 'thumbnail
': video_thumbnail,
1513 'description
': video_description,
1514 'categories
': video_categories,
1516 'subtitles
': video_subtitles,
1517 'automatic_captions
': automatic_captions,
1518 'duration
': video_duration,
1519 'age_limit
': 18 if age_gate else 0,
1520 'annotations
': video_annotations,
1521 'webpage_url
': proto + '://www
.youtube
.com
/watch?v
=%s' % video_id,
1522 'view_count
': view_count,
1523 'like_count
': like_count,
1524 'dislike_count
': dislike_count,
1525 'average_rating
': float_or_none(video_info.get('avg_rating
', [None])[0]),
1528 'start_time
': start_time,
1529 'end_time
': end_time,
1533 class YoutubePlaylistIE(YoutubeBaseInfoExtractor, YoutubePlaylistBaseInfoExtractor):
1534 IE_DESC = 'YouTube
.com playlists
'
1535 _VALID_URL = r"""(?x)(?:
1540 (?:course|view_play_list|my_playlists|artist|playlist|watch|embed/videoseries)
1541 \? (?:.*?&)*? (?:p|a|list)=
1545 (?:PL|LL|EC|UU|FL|RD|UL)?[0-9A-Za-z-_]{10,}
1546 # Top tracks, they can also include dots
1551 ((?:PL|LL|EC|UU|FL|RD|UL)[0-9A-Za-z-_]{10,})
1553 _TEMPLATE_URL = 'https
://www
.youtube
.com
/playlist?
list=%s'
1554 _VIDEO_RE = r'href
="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&[^"]*?index
=(?P
<index
>\d
+)(?
:[^
>]+>(?P
<title
>[^
<]+))?
'
1555 IE_NAME = 'youtube
:playlist
'
1557 'url
': 'https
://www
.youtube
.com
/playlist?
list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re
',
1559 'title
': 'ytdl test PL
',
1560 'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re
',
1562 'playlist_count
': 3,
1564 'url
': 'https
://www
.youtube
.com
/playlist?
list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx
',
1566 'id': 'PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx
',
1567 'title
': 'YDL_Empty_List
',
1569 'playlist_count
': 0,
1571 'note
': 'Playlist
with deleted
videos (#651). As a bonus, the video #51 is also twice in this list.',
1572 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
1574 'title': '29C3: Not my department',
1575 'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
1577 'playlist_count': 95,
1579 'note': 'issue #673',
1580 'url': 'PLBB231211A4F62143',
1582 'title': '[OLD]Team Fortress 2 (Class-based LP)',
1583 'id': 'PLBB231211A4F62143',
1585 'playlist_mincount': 26,
1587 'note': 'Large playlist',
1588 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
1590 'title': 'Uploads from Cauchemar',
1591 'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
1593 'playlist_mincount': 799,
1595 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
1597 'title': 'YDL_safe_search',
1598 'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
1600 'playlist_count': 2,
1603 'url': 'http://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
1604 'playlist_count': 4,
1607 'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
1610 'note': 'Embedded SWF player',
1611 'url': 'http://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
1612 'playlist_count': 4,
1615 'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
1618 'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
1619 'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
1621 'title': 'Uploads from Interstellar Movie',
1622 'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
1624 'playlist_mincout': 21,
1627 def _real_initialize(self
):
1630 def _extract_mix(self
, playlist_id
):
1631 # The mixes are generated from a single video
1632 # the id of the playlist is just 'RD' + video_id
1633 url
= 'https://youtube.com/watch?v=%s&list=%s' % (playlist_id
[-11:], playlist_id
)
1634 webpage
= self
._download
_webpage
(
1635 url
, playlist_id
, 'Downloading Youtube mix')
1636 search_title
= lambda class_name
: get_element_by_attribute('class', class_name
, webpage
)
1638 search_title('playlist-title') or
1639 search_title('title long-title') or
1640 search_title('title'))
1641 title
= clean_html(title_span
)
1642 ids
= orderedSet(re
.findall(
1643 r
'''(?xs)data-video-username=".*?".*?
1644 href="/watch\?v=([0-9A-Za-z_-]{11})&[^"]*?list=%s''' % re
.escape(playlist_id
),
1646 url_results
= self
._ids
_to
_results
(ids
)
1648 return self
.playlist_result(url_results
, playlist_id
, title
)
1650 def _extract_playlist(self
, playlist_id
):
1651 url
= self
._TEMPLATE
_URL
% playlist_id
1652 page
= self
._download
_webpage
(url
, playlist_id
)
1654 for match
in re
.findall(r
'<div class="yt-alert-message">([^<]+)</div>', page
):
1655 match
= match
.strip()
1656 # Check if the playlist exists or is private
1657 if re
.match(r
'[^<]*(The|This) playlist (does not exist|is private)[^<]*', match
):
1658 raise ExtractorError(
1659 'The playlist doesn\'t exist or is private, use --username or '
1660 '--netrc to access it.',
1662 elif re
.match(r
'[^<]*Invalid parameters[^<]*', match
):
1663 raise ExtractorError(
1664 'Invalid parameters. Maybe URL is incorrect.',
1666 elif re
.match(r
'[^<]*Choose your language[^<]*', match
):
1669 self
.report_warning('Youtube gives an alert message: ' + match
)
1671 playlist_title
= self
._html
_search
_regex
(
1672 r
'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>',
1675 return self
.playlist_result(self
._entries
(page
, playlist_id
), playlist_id
, playlist_title
)
1677 def _real_extract(self
, url
):
1678 # Extract playlist id
1679 mobj
= re
.match(self
._VALID
_URL
, url
)
1681 raise ExtractorError('Invalid URL: %s' % url
)
1682 playlist_id
= mobj
.group(1) or mobj
.group(2)
1684 # Check if it's a video-specific URL
1685 query_dict
= compat_urlparse
.parse_qs(compat_urlparse
.urlparse(url
).query
)
1686 if 'v' in query_dict
:
1687 video_id
= query_dict
['v'][0]
1688 if self
._downloader
.params
.get('noplaylist'):
1689 self
.to_screen('Downloading just video %s because of --no-playlist' % video_id
)
1690 return self
.url_result(video_id
, 'Youtube', video_id
=video_id
)
1692 self
.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id
, video_id
))
1694 if playlist_id
.startswith('RD') or playlist_id
.startswith('UL'):
1695 # Mixes require a custom extraction process
1696 return self
._extract
_mix
(playlist_id
)
1698 return self
._extract
_playlist
(playlist_id
)
1701 class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor
):
1702 IE_DESC
= 'YouTube.com channels'
1703 _VALID_URL
= r
'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)'
1704 _TEMPLATE_URL
= 'https://www.youtube.com/channel/%s/videos'
1705 _VIDEO_RE
= r
'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
1706 IE_NAME
= 'youtube:channel'
1708 'note': 'paginated channel',
1709 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
1710 'playlist_mincount': 91,
1712 'id': 'UUKfVa3S1e4PHvxWcwyMMg8w',
1713 'title': 'Uploads from lex will',
1716 'note': 'Age restricted channel',
1717 # from https://www.youtube.com/user/DeusExOfficial
1718 'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w',
1719 'playlist_mincount': 64,
1721 'id': 'UUs0ifCMCm1icqRbqhUINa0w',
1722 'title': 'Uploads from Deus Ex',
1726 def _real_extract(self
, url
):
1727 channel_id
= self
._match
_id
(url
)
1729 url
= self
._TEMPLATE
_URL
% channel_id
1731 # Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778)
1732 # Workaround by extracting as a playlist if managed to obtain channel playlist URL
1733 # otherwise fallback on channel by page extraction
1734 channel_page
= self
._download
_webpage
(
1735 url
+ '?view=57', channel_id
,
1736 'Downloading channel page', fatal
=False)
1737 if channel_page
is False:
1738 channel_playlist_id
= False
1740 channel_playlist_id
= self
._html
_search
_meta
(
1741 'channelId', channel_page
, 'channel id', default
=None)
1742 if not channel_playlist_id
:
1743 channel_playlist_id
= self
._search
_regex
(
1744 r
'data-(?:channel-external-|yt)id="([^"]+)"',
1745 channel_page
, 'channel id', default
=None)
1746 if channel_playlist_id
and channel_playlist_id
.startswith('UC'):
1747 playlist_id
= 'UU' + channel_playlist_id
[2:]
1748 return self
.url_result(
1749 compat_urlparse
.urljoin(url
, '/playlist?list=%s' % playlist_id
), 'YoutubePlaylist')
1751 channel_page
= self
._download
_webpage
(url
, channel_id
, 'Downloading page #1')
1752 autogenerated
= re
.search(r
'''(?x)
1754 channel-header-autogenerated-label|
1755 yt-channel-title-autogenerated
1756 )[^"]*"''', channel_page
) is not None
1759 # The videos are contained in a single page
1760 # the ajax pages can't be used, they are empty
1763 video_id
, 'Youtube', video_id
=video_id
,
1764 video_title
=video_title
)
1765 for video_id
, video_title
in self
.extract_videos_from_page(channel_page
)]
1766 return self
.playlist_result(entries
, channel_id
)
1768 return self
.playlist_result(self
._entries
(channel_page
, channel_id
), channel_id
)
1771 class YoutubeUserIE(YoutubeChannelIE
):
1772 IE_DESC
= 'YouTube.com user videos (URL or "ytuser" keyword)'
1773 _VALID_URL
= r
'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
1774 _TEMPLATE_URL
= 'https://www.youtube.com/user/%s/videos'
1775 IE_NAME
= 'youtube:user'
1778 'url': 'https://www.youtube.com/user/TheLinuxFoundation',
1779 'playlist_mincount': 320,
1781 'title': 'TheLinuxFoundation',
1784 'url': 'ytuser:phihag',
1785 'only_matching': True,
1789 def suitable(cls
, url
):
1790 # Don't return True if the url can be extracted with other youtube
1791 # extractor, the regex would is too permissive and it would match.
1792 other_ies
= iter(klass
for (name
, klass
) in globals().items() if name
.endswith('IE') and klass
is not cls
)
1793 if any(ie
.suitable(url
) for ie
in other_ies
):
1796 return super(YoutubeUserIE
, cls
).suitable(url
)
1799 class YoutubeUserPlaylistsIE(YoutubePlaylistsBaseInfoExtractor
):
1800 IE_DESC
= 'YouTube.com user playlists'
1801 _VALID_URL
= r
'https?://(?:\w+\.)?youtube\.com/user/(?P<id>[^/]+)/playlists'
1802 IE_NAME
= 'youtube:user:playlists'
1805 'url': 'http://www.youtube.com/user/ThirstForScience/playlists',
1806 'playlist_mincount': 4,
1808 'id': 'ThirstForScience',
1809 'title': 'Thirst for Science',
1812 # with "Load more" button
1813 'url': 'http://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
1814 'playlist_mincount': 70,
1817 'title': 'Игорь Клейнер',
1822 class YoutubeSearchIE(SearchInfoExtractor
, YoutubePlaylistIE
):
1823 IE_DESC
= 'YouTube.com searches'
1824 # there doesn't appear to be a real limit, for example if you search for
1825 # 'python' you get more than 8.000.000 results
1826 _MAX_RESULTS
= float('inf')
1827 IE_NAME
= 'youtube:search'
1828 _SEARCH_KEY
= 'ytsearch'
1829 _EXTRA_QUERY_ARGS
= {}
1832 def _get_n_results(self
, query
, n
):
1833 """Get a specified number of results for a query"""
1838 for pagenum
in itertools
.count(1):
1840 'search_query': query
.encode('utf-8'),
1844 url_query
.update(self
._EXTRA
_QUERY
_ARGS
)
1845 result_url
= 'https://www.youtube.com/results?' + compat_urllib_parse
.urlencode(url_query
)
1846 data
= self
._download
_json
(
1847 result_url
, video_id
='query "%s"' % query
,
1848 note
='Downloading page %s' % pagenum
,
1849 errnote
='Unable to download API page')
1850 html_content
= data
[1]['body']['content']
1852 if 'class="search-message' in html_content
:
1853 raise ExtractorError(
1854 '[youtube] No video results', expected
=True)
1856 new_videos
= self
._ids
_to
_results
(orderedSet(re
.findall(
1857 r
'href="/watch\?v=(.{11})', html_content
)))
1858 videos
+= new_videos
1859 if not new_videos
or len(videos
) > limit
:
1864 return self
.playlist_result(videos
, query
)
1867 class YoutubeSearchDateIE(YoutubeSearchIE
):
1868 IE_NAME
= YoutubeSearchIE
.IE_NAME
+ ':date'
1869 _SEARCH_KEY
= 'ytsearchdate'
1870 IE_DESC
= 'YouTube.com searches, newest videos first'
1871 _EXTRA_QUERY_ARGS
= {'search_sort': 'video_date_uploaded'}
1874 class YoutubeSearchURLIE(InfoExtractor
):
1875 IE_DESC
= 'YouTube.com search URLs'
1876 IE_NAME
= 'youtube:search_url'
1877 _VALID_URL
= r
'https?://(?:www\.)?youtube\.com/results\?(.*?&)?search_query=(?P<query>[^&]+)(?:[&]|$)'
1879 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
1880 'playlist_mincount': 5,
1882 'title': 'youtube-dl test video',
1886 def _real_extract(self
, url
):
1887 mobj
= re
.match(self
._VALID
_URL
, url
)
1888 query
= compat_urllib_parse_unquote_plus(mobj
.group('query'))
1890 webpage
= self
._download
_webpage
(url
, query
)
1891 result_code
= self
._search
_regex
(
1892 r
'(?s)<ol[^>]+class="item-section"(.*?)</ol>', webpage
, 'result HTML')
1894 part_codes
= re
.findall(
1895 r
'(?s)<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*>(.*?)</h3>', result_code
)
1897 for part_code
in part_codes
:
1898 part_title
= self
._html
_search
_regex
(
1899 [r
'(?s)title="([^"]+)"', r
'>([^<]+)</a>'], part_code
, 'item title', fatal
=False)
1900 part_url_snippet
= self
._html
_search
_regex
(
1901 r
'(?s)href="([^"]+)"', part_code
, 'item URL')
1902 part_url
= compat_urlparse
.urljoin(
1903 'https://www.youtube.com/', part_url_snippet
)
1907 'title': part_title
,
1911 '_type': 'playlist',
1917 class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor
):
1918 IE_DESC
= 'YouTube.com (multi-season) shows'
1919 _VALID_URL
= r
'https?://www\.youtube\.com/show/(?P<id>[^?#]*)'
1920 IE_NAME
= 'youtube:show'
1922 'url': 'https://www.youtube.com/show/airdisasters',
1923 'playlist_mincount': 5,
1925 'id': 'airdisasters',
1926 'title': 'Air Disasters',
1930 def _real_extract(self
, url
):
1931 playlist_id
= self
._match
_id
(url
)
1932 return super(YoutubeShowIE
, self
)._real
_extract
(
1933 'https://www.youtube.com/show/%s/playlists' % playlist_id
)
1936 class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor
):
1938 Base class for feed extractors
1939 Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
1941 _LOGIN_REQUIRED
= True
1945 return 'youtube:%s' % self
._FEED
_NAME
1947 def _real_initialize(self
):
1950 def _real_extract(self
, url
):
1951 page
= self
._download
_webpage
(
1952 'https://www.youtube.com/feed/%s' % self
._FEED
_NAME
, self
._PLAYLIST
_TITLE
)
1954 # The extraction process is the same as for playlists, but the regex
1955 # for the video ids doesn't contain an index
1957 more_widget_html
= content_html
= page
1958 for page_num
in itertools
.count(1):
1959 matches
= re
.findall(r
'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html
)
1961 # 'recommended' feed has infinite 'load more' and each new portion spins
1962 # the same videos in (sometimes) slightly different order, so we'll check
1963 # for unicity and break when portion has no new videos
1964 new_ids
= filter(lambda video_id
: video_id
not in ids
, orderedSet(matches
))
1970 mobj
= re
.search(r
'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html
)
1974 more
= self
._download
_json
(
1975 'https://youtube.com/%s' % mobj
.group('more'), self
._PLAYLIST
_TITLE
,
1976 'Downloading page #%s' % page_num
,
1977 transform_source
=uppercase_escape
)
1978 content_html
= more
['content_html']
1979 more_widget_html
= more
['load_more_widget_html']
1981 return self
.playlist_result(
1982 self
._ids
_to
_results
(ids
), playlist_title
=self
._PLAYLIST
_TITLE
)
1985 class YoutubeWatchLaterIE(YoutubePlaylistIE
):
1986 IE_NAME
= 'youtube:watchlater'
1987 IE_DESC
= 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
1988 _VALID_URL
= r
'https?://www\.youtube\.com/(?:feed/watch_later|playlist\?list=WL)|:ytwatchlater'
1990 _TESTS
= [] # override PlaylistIE tests
1992 def _real_extract(self
, url
):
1993 return self
._extract
_playlist
('WL')
1996 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor
):
1997 IE_NAME
= 'youtube:favorites'
1998 IE_DESC
= 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
1999 _VALID_URL
= r
'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
2000 _LOGIN_REQUIRED
= True
2002 def _real_extract(self
, url
):
2003 webpage
= self
._download
_webpage
('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
2004 playlist_id
= self
._search
_regex
(r
'list=(.+?)["&]', webpage
, 'favourites playlist id')
2005 return self
.url_result(playlist_id
, 'YoutubePlaylist')
2008 class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor
):
2009 IE_DESC
= 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
2010 _VALID_URL
= r
'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
2011 _FEED_NAME
= 'recommended'
2012 _PLAYLIST_TITLE
= 'Youtube Recommended videos'
2015 class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor
):
2016 IE_DESC
= 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
2017 _VALID_URL
= r
'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
2018 _FEED_NAME
= 'subscriptions'
2019 _PLAYLIST_TITLE
= 'Youtube Subscriptions'
2022 class YoutubeHistoryIE(YoutubeFeedsInfoExtractor
):
2023 IE_DESC
= 'Youtube watch history, ":ythistory" for short (requires authentication)'
2024 _VALID_URL
= 'https?://www\.youtube\.com/feed/history|:ythistory'
2025 _FEED_NAME
= 'history'
2026 _PLAYLIST_TITLE
= 'Youtube History'
2029 class YoutubeTruncatedURLIE(InfoExtractor
):
2030 IE_NAME
= 'youtube:truncated_url'
2031 IE_DESC
= False # Do not list
2032 _VALID_URL
= r
'''(?x)
2034 (?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
2037 annotation_id=annotation_[^&]+|
2043 attribution_link\?a=[^&]+
2049 'url': 'http://www.youtube.com/watch?annotation_id=annotation_3951667041',
2050 'only_matching': True,
2052 'url': 'http://www.youtube.com/watch?',
2053 'only_matching': True,
2055 'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
2056 'only_matching': True,
2058 'url': 'https://www.youtube.com/watch?feature=foo',
2059 'only_matching': True,
2061 'url': 'https://www.youtube.com/watch?hl=en-GB',
2062 'only_matching': True,
2064 'url': 'https://www.youtube.com/watch?t=2372',
2065 'only_matching': True,
2068 def _real_extract(self
, url
):
2069 raise ExtractorError(
2070 'Did you forget to quote the URL? Remember that & is a meta '
2071 'character in most shells, so you want to put the URL in quotes, '
2073 '"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
2074 ' or simply youtube-dl BaW_jenozKc .',
2078 class YoutubeTruncatedIDIE(InfoExtractor
):
2079 IE_NAME
= 'youtube:truncated_id'
2080 IE_DESC
= False # Do not list
2081 _VALID_URL
= r
'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
2084 'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
2085 'only_matching': True,
2088 def _real_extract(self
, url
):
2089 video_id
= self
._match
_id
(url
)
2090 raise ExtractorError(
2091 'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id
, url
),