3 from __future__
import unicode_literals
14 from .common
import InfoExtractor
, SearchInfoExtractor
15 from ..jsinterp
import JSInterpreter
16 from ..swfinterp
import SWFInterpreter
17 from ..compat
import (
20 compat_urllib_parse_unquote
,
21 compat_urllib_parse_unquote_plus
,
22 compat_urllib_parse_urlencode
,
23 compat_urllib_parse_urlparse
,
32 get_element_by_attribute
,
52 class YoutubeBaseInfoExtractor(InfoExtractor
):
53 """Provide base functions for Youtube extractors"""
54 _LOGIN_URL
= 'https://accounts.google.com/ServiceLogin'
55 _TWOFACTOR_URL
= 'https://accounts.google.com/signin/challenge'
57 _LOOKUP_URL
= 'https://accounts.google.com/_/signin/sl/lookup'
58 _CHALLENGE_URL
= 'https://accounts.google.com/_/signin/sl/challenge'
59 _TFA_URL
= 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}'
61 _NETRC_MACHINE
= 'youtube'
62 # If True it will raise an error if no login info is provided
63 _LOGIN_REQUIRED
= False
65 _PLAYLIST_ID_RE
= r
'(?:PL|LL|EC|UU|FL|RD|UL|TL)[0-9A-Za-z-_]{10,}'
67 def _set_language(self
):
69 '.youtube.com', 'PREF', 'f1=50000000&hl=en',
70 # YouTube sets the expire time to about two months
71 expire_time
=time
.time() + 2 * 30 * 24 * 3600)
73 def _ids_to_results(self
, ids
):
75 self
.url_result(vid_id
, 'Youtube', video_id
=vid_id
)
80 Attempt to log in to YouTube.
81 True is returned if successful or skipped.
82 False is returned if login failed.
84 If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
86 (username
, password
) = self
._get
_login
_info
()
87 # No authentication to be performed
89 if self
._LOGIN
_REQUIRED
:
90 raise ExtractorError('No login info available, needed for using %s.' % self
.IE_NAME
, expected
=True)
93 login_page
= self
._download
_webpage
(
94 self
._LOGIN
_URL
, None,
95 note
='Downloading login page',
96 errnote
='unable to fetch login page', fatal
=False)
97 if login_page
is False:
100 login_form
= self
._hidden
_inputs
(login_page
)
102 def req(url
, f_req
, note
, errnote
):
103 data
= login_form
.copy()
106 'checkConnection': 'youtube',
107 'checkedDomains': 'youtube',
109 'deviceinfo': '[null,null,null,[],null,"US",null,null,[],"GlifWebSignIn",null,[null,null,[]]]',
110 'f.req': json
.dumps(f_req
),
111 'flowName': 'GlifWebSignIn',
112 'flowEntry': 'ServiceLogin',
114 return self
._download
_json
(
115 url
, None, note
=note
, errnote
=errnote
,
116 transform_source
=lambda s
: re
.sub(r
'^[^[]*', '', s
),
118 data
=urlencode_postdata(data
), headers
={
119 'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',
120 'Google-Accounts-XSRF': 1,
124 self
._downloader
.report_warning(message
)
128 None, [], None, 'US', None, None, 2, False, True,
132 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
134 1, [None, None, []], None, None, None, True
139 lookup_results
= req(
140 self
._LOOKUP
_URL
, lookup_req
,
141 'Looking up account info', 'Unable to look up account info')
143 if lookup_results
is False:
146 user_hash
= try_get(lookup_results
, lambda x
: x
[0][2], compat_str
)
148 warn('Unable to extract user hash')
153 None, 1, None, [1, None, None, None, [password
, None, True]],
155 None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4],
156 1, [None, None, []], None, None, None, True
159 challenge_results
= req(
160 self
._CHALLENGE
_URL
, challenge_req
,
161 'Logging in', 'Unable to log in')
163 if challenge_results
is False:
166 login_res
= try_get(challenge_results
, lambda x
: x
[0][5], list)
168 login_msg
= try_get(login_res
, lambda x
: x
[5], compat_str
)
170 'Unable to login: %s' % 'Invalid password'
171 if login_msg
== 'INCORRECT_ANSWER_ENTERED' else login_msg
)
174 res
= try_get(challenge_results
, lambda x
: x
[0][-1], list)
176 warn('Unable to extract result entry')
179 tfa
= try_get(res
, lambda x
: x
[0][0], list)
181 tfa_str
= try_get(tfa
, lambda x
: x
[2], compat_str
)
182 if tfa_str
== 'TWO_STEP_VERIFICATION':
183 # SEND_SUCCESS - TFA code has been successfully sent to phone
184 # QUOTA_EXCEEDED - reached the limit of TFA codes
185 status
= try_get(tfa
, lambda x
: x
[5], compat_str
)
186 if status
== 'QUOTA_EXCEEDED':
187 warn('Exceeded the limit of TFA codes, try later')
190 tl
= try_get(challenge_results
, lambda x
: x
[1][2], compat_str
)
192 warn('Unable to extract TL')
195 tfa_code
= self
._get
_tfa
_info
('2-step verification code')
199 'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
200 '(Note that only TOTP (Google Authenticator App) codes work at this time.)')
203 tfa_code
= remove_start(tfa_code
, 'G-')
206 user_hash
, None, 2, None,
208 9, None, None, None, None, None, None, None,
209 [None, tfa_code
, True, 2]
213 self
._TFA
_URL
.format(tl
), tfa_req
,
214 'Submitting TFA code', 'Unable to submit TFA code')
216 if tfa_results
is False:
219 tfa_res
= try_get(tfa_results
, lambda x
: x
[0][5], list)
221 tfa_msg
= try_get(tfa_res
, lambda x
: x
[5], compat_str
)
223 'Unable to finish TFA: %s' % 'Invalid TFA code'
224 if tfa_msg
== 'INCORRECT_ANSWER_ENTERED' else tfa_msg
)
227 check_cookie_url
= try_get(
228 tfa_results
, lambda x
: x
[0][-1][2], compat_str
)
230 check_cookie_url
= try_get(res
, lambda x
: x
[2], compat_str
)
232 if not check_cookie_url
:
233 warn('Unable to extract CheckCookie URL')
236 check_cookie_results
= self
._download
_webpage
(
237 check_cookie_url
, None, 'Checking cookie', fatal
=False)
239 if check_cookie_results
is False:
242 if 'https://myaccount.google.com/' not in check_cookie_results
:
243 warn('Unable to log in')
248 def _real_initialize(self
):
249 if self
._downloader
is None:
252 if not self
._login
():
256 class YoutubeEntryListBaseInfoExtractor(YoutubeBaseInfoExtractor
):
257 # Extract entries from page with "Load more" button
258 def _entries(self
, page
, playlist_id
):
259 more_widget_html
= content_html
= page
260 for page_num
in itertools
.count(1):
261 for entry
in self
._process
_page
(content_html
):
264 mobj
= re
.search(r
'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html
)
268 more
= self
._download
_json
(
269 'https://youtube.com/%s' % mobj
.group('more'), playlist_id
,
270 'Downloading page #%s' % page_num
,
271 transform_source
=uppercase_escape
)
272 content_html
= more
['content_html']
273 if not content_html
.strip():
274 # Some webpages show a "Load more" button but they don't
277 more_widget_html
= more
['load_more_widget_html']
280 class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor
):
281 def _process_page(self
, content
):
282 for video_id
, video_title
in self
.extract_videos_from_page(content
):
283 yield self
.url_result(video_id
, 'Youtube', video_id
, video_title
)
285 def extract_videos_from_page(self
, page
):
288 for mobj
in re
.finditer(self
._VIDEO
_RE
, page
):
289 # The link with index 0 is not the first video of the playlist (not sure if still actual)
290 if 'index' in mobj
.groupdict() and mobj
.group('id') == '0':
292 video_id
= mobj
.group('id')
293 video_title
= unescapeHTML(mobj
.group('title'))
295 video_title
= video_title
.strip()
297 idx
= ids_in_page
.index(video_id
)
298 if video_title
and not titles_in_page
[idx
]:
299 titles_in_page
[idx
] = video_title
301 ids_in_page
.append(video_id
)
302 titles_in_page
.append(video_title
)
303 return zip(ids_in_page
, titles_in_page
)
306 class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor
):
307 def _process_page(self
, content
):
308 for playlist_id
in orderedSet(re
.findall(
309 r
'<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*><a[^>]+href="/?playlist\?list=([0-9A-Za-z-_]{10,})"',
311 yield self
.url_result(
312 'https://www.youtube.com/playlist?list=%s' % playlist_id
, 'YoutubePlaylist')
314 def _real_extract(self
, url
):
315 playlist_id
= self
._match
_id
(url
)
316 webpage
= self
._download
_webpage
(url
, playlist_id
)
317 title
= self
._og
_search
_title
(webpage
, fatal
=False)
318 return self
.playlist_result(self
._entries
(webpage
, playlist_id
), playlist_id
, title
)
321 class YoutubeIE(YoutubeBaseInfoExtractor
):
322 IE_DESC
= 'YouTube.com'
323 _VALID_URL
= r
"""(?x)^
325 (?:https?://|//) # http(s):// or protocol-independent URL
326 (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
327 (?:www\.)?deturl\.com/www\.youtube\.com/|
328 (?:www\.)?pwnyoutube\.com/|
329 (?:www\.)?yourepeat\.com/|
330 tube\.majestyc\.net/|
331 youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
332 (?:.*?\#/)? # handle anchor (#/) redirect urls
333 (?: # the various things that can precede the ID:
334 (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
335 |(?: # or the v= param in all its forms
336 (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
337 (?:\?|\#!?) # the params delimiter ? or # or #!
338 (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&v=V36LpHqtcDY)
343 youtu\.be| # just youtu.be/xxxx
344 vid\.plus| # or vid.plus/xxxx
345 zwearz\.com/watch| # or zwearz.com/watch/xxxx
347 |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
349 )? # all until now is optional -> you can pass the naked ID
350 ([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
353 %(playlist_id)s| # combined list/video URLs are handled by the playlist IE
354 WL # WL are handled by the watch later IE
357 (?(1).+)? # if we found the ID, everything can follow
358 $""" % {'playlist_id': YoutubeBaseInfoExtractor
._PLAYLIST
_ID
_RE
}
359 _NEXT_URL_RE
= r
'[\?&]next_url=([^&]+)'
361 '5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
362 '6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
363 '13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
364 '17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
365 '18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
366 '22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
367 '34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
368 '35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
369 # itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
370 '36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
371 '37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
372 '38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
373 '43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
374 '44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
375 '45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
376 '46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
377 '59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
378 '78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
382 '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
383 '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
384 '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
385 '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
386 '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
387 '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
388 '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
390 # Apple HTTP Live Streaming
391 '91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
392 '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
393 '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
394 '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
395 '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
396 '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
397 '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
398 '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
401 '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
402 '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
403 '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
404 '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
405 '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
406 '138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/rg3/youtube-dl/issues/4559)
407 '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
408 '212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
409 '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
410 '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
411 '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
412 '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
415 '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
416 '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
417 '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
418 '256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
419 '258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
420 '325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
421 '328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
424 '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
425 '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
426 '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
427 '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
428 '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
429 '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
430 '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
431 '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
432 '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
433 '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
434 '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
435 '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
436 '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
437 '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
438 '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
439 # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
440 '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
441 '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
442 '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
443 '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
444 '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
445 '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
448 '171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
449 '172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
451 # Dash webm audio with opus inside
452 '249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
453 '250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
454 '251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
457 '_rtmp': {'protocol': 'rtmp'},
459 _SUBTITLE_FORMATS
= ('ttml', 'vtt')
466 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
470 'title': 'youtube-dl test video "\'/\\Ƥāš',
471 'uploader': 'Philipp Hagemeister',
472 'uploader_id': 'phihag',
473 'uploader_url': r
're:https?://(?:www\.)?youtube\.com/user/phihag',
474 'upload_date': '20121002',
475 'license': 'Standard YouTube License',
476 'description': 'test chars: "\'/\\Ƥāš\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
477 'categories': ['Science & Technology'],
478 'tags': ['youtube-dl'],
481 'dislike_count': int,
487 'url': 'https://www.youtube.com/watch?v=UxxajLWwzqY',
488 'note': 'Test generic use_cipher_signature video (#897)',
492 'upload_date': '20120506',
493 'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
494 'alt_title': 'I Love It (feat. Charli XCX)',
495 'description': 'md5:f3ceb5ef83a08d95b9d146f973157cc8',
496 'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
497 'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
498 'iconic ep', 'iconic', 'love', 'it'],
500 'uploader': 'Icona Pop',
501 'uploader_id': 'IconaPop',
502 'uploader_url': r
're:https?://(?:www\.)?youtube\.com/user/IconaPop',
503 'license': 'Standard YouTube License',
504 'creator': 'Icona Pop',
508 'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
509 'note': 'Test VEVO video with age protection (#956)',
513 'upload_date': '20130703',
514 'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
515 'alt_title': 'Tunnel Vision',
516 'description': 'md5:64249768eec3bc4276236606ea996373',
518 'uploader': 'justintimberlakeVEVO',
519 'uploader_id': 'justintimberlakeVEVO',
520 'uploader_url': r
're:https?://(?:www\.)?youtube\.com/user/justintimberlakeVEVO',
521 'license': 'Standard YouTube License',
522 'creator': 'Justin Timberlake',
527 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
528 'note': 'Embed-only video (#1746)',
532 'upload_date': '20120608',
533 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
534 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
535 'uploader': 'SET India',
536 'uploader_id': 'setindia',
537 'uploader_url': r
're:https?://(?:www\.)?youtube\.com/user/setindia',
538 'license': 'Standard YouTube License',
543 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=UxxajLWwzqY',
544 'note': 'Use the first video ID in the URL',
548 'title': 'youtube-dl test video "\'/\\Ƥāš',
549 'uploader': 'Philipp Hagemeister',
550 'uploader_id': 'phihag',
551 'uploader_url': r
're:https?://(?:www\.)?youtube\.com/user/phihag',
552 'upload_date': '20121002',
553 'license': 'Standard YouTube License',
554 'description': 'test chars: "\'/\\Ƥāš\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
555 'categories': ['Science & Technology'],
556 'tags': ['youtube-dl'],
559 'dislike_count': int,
562 'skip_download': True,
566 'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
567 'note': '256k DASH audio (format 141) via DASH manifest',
571 'upload_date': '20121002',
572 'uploader_id': '8KVIDEO',
573 'uploader_url': r
're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
575 'uploader': '8KVIDEO',
576 'license': 'Standard YouTube License',
577 'title': 'UHDTV TEST 8K VIDEO.mp4'
580 'youtube_include_dash_manifest': True,
583 'skip': 'format 141 not served anymore',
585 # DASH manifest with encrypted signature
587 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
591 'title': 'Afrojack, Spree Wilson - The Spark ft. Spree Wilson',
592 'description': 'md5:12e7067fa6735a77bdcbb58cb1187d2d',
594 'uploader': 'AfrojackVEVO',
595 'uploader_id': 'AfrojackVEVO',
596 'upload_date': '20131011',
597 'license': 'Standard YouTube License',
600 'youtube_include_dash_manifest': True,
601 'format': '141/bestaudio[ext=m4a]',
604 # JS player signature function name containing $
606 'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
610 'title': 'Taylor Swift - Shake It Off',
611 'alt_title': 'Shake It Off',
612 'description': 'md5:95f66187cd7c8b2c13eb78e1223b63c3',
614 'uploader': 'TaylorSwiftVEVO',
615 'uploader_id': 'TaylorSwiftVEVO',
616 'upload_date': '20140818',
617 'license': 'Standard YouTube License',
618 'creator': 'Taylor Swift',
621 'youtube_include_dash_manifest': True,
622 'format': '141/bestaudio[ext=m4a]',
627 'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
632 'upload_date': '20100909',
633 'uploader': 'The Amazing Atheist',
634 'uploader_id': 'TheAmazingAtheist',
635 'uploader_url': r
're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist',
636 'license': 'Standard YouTube License',
637 'title': 'Burning Everyone\'s Koran',
638 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
641 # Normal age-gate video (No vevo, embed allowed)
643 'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
647 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
648 'description': r
're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
650 'uploader': 'The Witcher',
651 'uploader_id': 'WitcherGame',
652 'uploader_url': r
're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
653 'upload_date': '20140605',
654 'license': 'Standard YouTube License',
658 # Age-gate video with encrypted signature
660 'url': 'https://www.youtube.com/watch?v=6kLq3WMV1nU',
664 'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
665 'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
667 'uploader': 'LloydVEVO',
668 'uploader_id': 'LloydVEVO',
669 'uploader_url': r
're:https?://(?:www\.)?youtube\.com/user/LloydVEVO',
670 'upload_date': '20110629',
671 'license': 'Standard YouTube License',
675 # video_info is None (https://github.com/rg3/youtube-dl/issues/4421)
677 'url': '__2ABJjxzNo',
682 'upload_date': '20100430',
683 'uploader_id': 'deadmau5',
684 'uploader_url': r
're:https?://(?:www\.)?youtube\.com/user/deadmau5',
685 'creator': 'deadmau5',
686 'description': 'md5:12c56784b8032162bb936a5f76d55360',
687 'uploader': 'deadmau5',
688 'license': 'Standard YouTube License',
689 'title': 'Deadmau5 - Some Chords (HD)',
690 'alt_title': 'Some Chords',
692 'expected_warnings': [
693 'DASH manifest missing',
696 # Olympics (https://github.com/rg3/youtube-dl/issues/4431)
698 'url': 'lqQg6PlCWgI',
703 'upload_date': '20150827',
704 'uploader_id': 'olympic',
705 'uploader_url': r
're:https?://(?:www\.)?youtube\.com/user/olympic',
706 'license': 'Standard YouTube License',
707 'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
708 'uploader': 'Olympic',
709 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
712 'skip_download': 'requires avconv',
717 'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
721 'stretched_ratio': 16 / 9.,
723 'upload_date': '20110310',
724 'uploader_id': 'AllenMeow',
725 'uploader_url': r
're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
726 'description': 'made by Wacom from Korea | åå¹&å ę²¹ę·»é by TY\'s Allen | ęč¬heylisa00cavey1001ååøē±ę
ęä¾ę¢åēæ»čÆ',
727 'uploader': 'å«č¾å«',
728 'license': 'Standard YouTube License',
729 'title': '[A-made] č®ę
å¦åå¹ē å¤Ŗå¦ ęå°±ęÆéęØ£ēäŗŗ',
732 # url_encoded_fmt_stream_map is empty string
734 'url': 'qEJwOuvDf7I',
738 'title': 'ŠŠ±ŃŃŠ¶Š“ŠµŠ½ŠøŠµ ŃŃŠ“ŠµŠ±Š½Š¾Š¹ ŠæŃŠ°ŠŗŃŠøŠŗŠø ŠæŠ¾ Š²ŃŠ±Š¾ŃŠ°Š¼ 14 ŃŠµŠ½ŃŃŠ±ŃŃ 2014 Š³Š¾Š“Š° Š² Š”Š°Š½ŠŗŃ-ŠŠµŃŠµŃŠ±ŃŃŠ³Šµ',
740 'upload_date': '20150404',
741 'uploader_id': 'spbelect',
742 'uploader': 'ŠŠ°Š±Š»ŃŠ“Š°ŃŠµŠ»Šø ŠŠµŃŠµŃŠ±ŃŃŠ³Š°',
745 'skip_download': 'requires avconv',
747 'skip': 'This live event has ended.',
749 # Extraction from multiple DASH manifests (https://github.com/rg3/youtube-dl/pull/6097)
751 'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
755 'title': 'md5:7b81415841e02ecd4313668cde88737a',
756 'description': 'md5:116377fd2963b81ec4ce64b542173306',
758 'upload_date': '20150625',
759 'uploader_id': 'dorappi2000',
760 'uploader_url': r
're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
761 'uploader': 'dorappi2000',
762 'license': 'Standard YouTube License',
763 'formats': 'mincount:32',
766 # DASH manifest with segment_list
768 'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
769 'md5': '8ce563a1d667b599d21064e982ab9e31',
773 'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
774 'uploader': 'Airtek',
775 'description': 'RetransmisiĆ³n en directo de la XVIII media maratĆ³n de Zaragoza.',
776 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
777 'license': 'Standard YouTube License',
778 'title': 'RetransmisiĆ³n XVIII Media maratĆ³n Zaragoza 2015',
781 'youtube_include_dash_manifest': True,
782 'format': '135', # bestvideo
784 'skip': 'This live event has ended.',
787 # Multifeed videos (multiple cameras), URL is for Main Camera
788 'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
791 'title': 'teamPGP: Rocket League Noob Stream',
792 'description': 'md5:dc7872fb300e143831327f1bae3af010',
798 'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
799 'description': 'md5:dc7872fb300e143831327f1bae3af010',
801 'upload_date': '20150721',
802 'uploader': 'Beer Games Beer',
803 'uploader_id': 'beergamesbeer',
804 'uploader_url': r
're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
805 'license': 'Standard YouTube License',
811 'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
812 'description': 'md5:dc7872fb300e143831327f1bae3af010',
814 'upload_date': '20150721',
815 'uploader': 'Beer Games Beer',
816 'uploader_id': 'beergamesbeer',
817 'uploader_url': r
're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
818 'license': 'Standard YouTube License',
824 'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
825 'description': 'md5:dc7872fb300e143831327f1bae3af010',
827 'upload_date': '20150721',
828 'uploader': 'Beer Games Beer',
829 'uploader_id': 'beergamesbeer',
830 'uploader_url': r
're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
831 'license': 'Standard YouTube License',
837 'title': 'teamPGP: Rocket League Noob Stream (zim)',
838 'description': 'md5:dc7872fb300e143831327f1bae3af010',
840 'upload_date': '20150721',
841 'uploader': 'Beer Games Beer',
842 'uploader_id': 'beergamesbeer',
843 'uploader_url': r
're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
844 'license': 'Standard YouTube License',
848 'skip_download': True,
852 # Multifeed video with comma in title (see https://github.com/rg3/youtube-dl/issues/8536)
853 'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
856 'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
859 'skip': 'Not multifeed anymore',
862 'url': 'https://vid.plus/FlRa-iH7PGw',
863 'only_matching': True,
866 'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
867 'only_matching': True,
870 # Title with JS-like syntax "};" (see https://github.com/rg3/youtube-dl/issues/7468)
871 # Also tests cut-off URL expansion in video description (see
872 # https://github.com/rg3/youtube-dl/issues/1892,
873 # https://github.com/rg3/youtube-dl/issues/8164)
874 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
878 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
879 'alt_title': 'Dark Walk',
880 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
882 'upload_date': '20151119',
883 'uploader_id': 'IronSoulElf',
884 'uploader_url': r
're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
885 'uploader': 'IronSoulElf',
886 'license': 'Standard YouTube License',
887 'creator': 'Todd Haberman, Daniel Law Heath & Aaron Kaplan',
890 'skip_download': True,
894 # Tags with '};' (see https://github.com/rg3/youtube-dl/issues/7468)
895 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
896 'only_matching': True,
899 # Video with yt:stretch=17:0
900 'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
904 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
905 'description': 'md5:ee18a25c350637c8faff806845bddee9',
906 'upload_date': '20151107',
907 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
908 'uploader': 'CH GAMER DROID',
911 'skip_download': True,
913 'skip': 'This video does not exist.',
916 # Video licensed under Creative Commons
917 'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
921 'title': 'md5:e41008789470fc2533a3252216f1c1d1',
922 'description': 'md5:a677553cf0840649b731a3024aeff4cc',
924 'upload_date': '20150127',
925 'uploader_id': 'BerkmanCenter',
926 'uploader_url': r
're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
927 'uploader': 'The Berkman Klein Center for Internet & Society',
928 'license': 'Creative Commons Attribution license (reuse allowed)',
931 'skip_download': True,
935 # Channel-like uploader_url
936 'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
940 'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
941 'description': 'md5:dda0d780d5a6e120758d1711d062a867',
943 'upload_date': '20151119',
944 'uploader': 'Bernie 2016',
945 'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
946 'uploader_url': r
're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
947 'license': 'Creative Commons Attribution license (reuse allowed)',
950 'skip_download': True,
954 'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;v=V36LpHqtcDY',
955 'only_matching': True,
958 # YouTube Red paid video (https://github.com/rg3/youtube-dl/issues/10059)
959 'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
960 'only_matching': True,
963 # Rental video preview
964 'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
968 'title': 'Piku - Trailer',
969 'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
970 'upload_date': '20150811',
971 'uploader': 'FlixMatrix',
972 'uploader_id': 'FlixMatrixKaravan',
973 'uploader_url': r
're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
974 'license': 'Standard YouTube License',
977 'skip_download': True,
981 # YouTube Red video with episode data
982 'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
986 'title': 'Isolation - Mind Field (Ep 1)',
987 'description': 'md5:8013b7ddea787342608f63a13ddc9492',
989 'upload_date': '20170118',
990 'uploader': 'Vsauce',
991 'uploader_id': 'Vsauce',
992 'uploader_url': r
're:https?://(?:www\.)?youtube\.com/user/Vsauce',
993 'license': 'Standard YouTube License',
994 'series': 'Mind Field',
999 'skip_download': True,
1001 'expected_warnings': [
1002 'Skipping DASH manifest',
1007 'url': '1t24XAntNCY',
1008 'only_matching': True,
1011 # geo restricted to JP
1012 'url': 'sJL6WA-aGkQ',
1013 'only_matching': True,
1016 'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
1017 'only_matching': True,
1021 def __init__(self
, *args
, **kwargs
):
1022 super(YoutubeIE
, self
).__init
__(*args
, **kwargs
)
1023 self
._player
_cache
= {}
1025 def report_video_info_webpage_download(self
, video_id
):
1026 """Report attempt to download video info webpage."""
1027 self
.to_screen('%s: Downloading video info webpage' % video_id
)
1029 def report_information_extraction(self
, video_id
):
1030 """Report attempt to extract video information."""
1031 self
.to_screen('%s: Extracting video information' % video_id
)
1033 def report_unavailable_format(self
, video_id
, format
):
1034 """Report extracted video URL."""
1035 self
.to_screen('%s: Format %s not available' % (video_id
, format
))
1037 def report_rtmp_download(self
):
1038 """Indicate the download will use the RTMP protocol."""
1039 self
.to_screen('RTMP download detected')
1041 def _signature_cache_id(self
, example_sig
):
1042 """ Return a string representation of a signature """
1043 return '.'.join(compat_str(len(part
)) for part
in example_sig
.split('.'))
1045 def _extract_signature_function(self
, video_id
, player_url
, example_sig
):
1047 r
'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|(?:/[a-z]{2}_[A-Z]{2})?/base)?\.(?P<ext>[a-z]+)$',
1050 raise ExtractorError('Cannot identify player %r' % player_url
)
1051 player_type
= id_m
.group('ext')
1052 player_id
= id_m
.group('id')
1054 # Read from filesystem cache
1055 func_id
= '%s_%s_%s' % (
1056 player_type
, player_id
, self
._signature
_cache
_id
(example_sig
))
1057 assert os
.path
.basename(func_id
) == func_id
1059 cache_spec
= self
._downloader
.cache
.load('youtube-sigfuncs', func_id
)
1060 if cache_spec
is not None:
1061 return lambda s
: ''.join(s
[i
] for i
in cache_spec
)
1064 'Downloading player %s' % player_url
1065 if self
._downloader
.params
.get('verbose') else
1066 'Downloading %s player %s' % (player_type
, player_id
)
1068 if player_type
== 'js':
1069 code
= self
._download
_webpage
(
1070 player_url
, video_id
,
1072 errnote
='Download of %s failed' % player_url
)
1073 res
= self
._parse
_sig
_js
(code
)
1074 elif player_type
== 'swf':
1075 urlh
= self
._request
_webpage
(
1076 player_url
, video_id
,
1078 errnote
='Download of %s failed' % player_url
)
1080 res
= self
._parse
_sig
_swf
(code
)
1082 assert False, 'Invalid player type %r' % player_type
1084 test_string
= ''.join(map(compat_chr
, range(len(example_sig
))))
1085 cache_res
= res(test_string
)
1086 cache_spec
= [ord(c
) for c
in cache_res
]
1088 self
._downloader
.cache
.store('youtube-sigfuncs', func_id
, cache_spec
)
1091 def _print_sig_code(self
, func
, example_sig
):
1092 def gen_sig_code(idxs
):
1093 def _genslice(start
, end
, step
):
1094 starts
= '' if start
== 0 else str(start
)
1095 ends
= (':%d' % (end
+ step
)) if end
+ step
>= 0 else ':'
1096 steps
= '' if step
== 1 else (':%d' % step
)
1097 return 's[%s%s%s]' % (starts
, ends
, steps
)
1100 # Quelch pyflakes warnings - start will be set when step is set
1101 start
= '(Never used)'
1102 for i
, prev
in zip(idxs
[1:], idxs
[:-1]):
1103 if step
is not None:
1104 if i
- prev
== step
:
1106 yield _genslice(start
, prev
, step
)
1109 if i
- prev
in [-1, 1]:
1114 yield 's[%d]' % prev
1118 yield _genslice(start
, i
, step
)
1120 test_string
= ''.join(map(compat_chr
, range(len(example_sig
))))
1121 cache_res
= func(test_string
)
1122 cache_spec
= [ord(c
) for c
in cache_res
]
1123 expr_code
= ' + '.join(gen_sig_code(cache_spec
))
1124 signature_id_tuple
= '(%s)' % (
1125 ', '.join(compat_str(len(p
)) for p
in example_sig
.split('.')))
1126 code
= ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
1127 ' return %s\n') % (signature_id_tuple
, expr_code
)
1128 self
.to_screen('Extracted signature function:\n' + code
)
1130 def _parse_sig_js(self
, jscode
):
1131 funcname
= self
._search
_regex
(
1132 (r
'(["\'])signature\
1\s
*,\s
*(?P
<sig
>[a
-zA
-Z0
-9$
]+)\
(',
1133 r'\
.sig\|\|
(?P
<sig
>[a
-zA
-Z0
-9$
]+)\
('),
1134 jscode, 'Initial JS player signature function name
', group='sig
')
1136 jsi = JSInterpreter(jscode)
1137 initial_function = jsi.extract_function(funcname)
1138 return lambda s: initial_function([s])
1140 def _parse_sig_swf(self, file_contents):
1141 swfi = SWFInterpreter(file_contents)
1142 TARGET_CLASSNAME = 'SignatureDecipher
'
1143 searched_class = swfi.extract_class(TARGET_CLASSNAME)
1144 initial_function = swfi.extract_function(searched_class, 'decipher
')
1145 return lambda s: initial_function([s])
1147 def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
1148 """Turn the encrypted s field into a working signature"""
1150 if player_url is None:
1151 raise ExtractorError('Cannot decrypt signature without player_url
')
1153 if player_url.startswith('//'):
1154 player_url = 'https
:' + player_url
1155 elif not re.match(r'https?
://', player_url):
1156 player_url = compat_urlparse.urljoin(
1157 'https
://www
.youtube
.com
', player_url)
1159 player_id = (player_url, self._signature_cache_id(s))
1160 if player_id not in self._player_cache:
1161 func = self._extract_signature_function(
1162 video_id, player_url, s
1164 self._player_cache[player_id] = func
1165 func = self._player_cache[player_id]
1166 if self._downloader.params.get('youtube_print_sig_code
'):
1167 self._print_sig_code(func, s)
1169 except Exception as e:
1170 tb = traceback.format_exc()
1171 raise ExtractorError(
1172 'Signature extraction failed
: ' + tb, cause=e)
1174 def _get_subtitles(self, video_id, webpage):
1176 subs_doc = self._download_xml(
1177 'https
://video
.google
.com
/timedtext?hl
=en
&type=list&v
=%s' % video_id,
1178 video_id, note=False)
1179 except ExtractorError as err:
1180 self._downloader.report_warning('unable to download video subtitles
: %s' % error_to_compat_str(err))
1184 for track in subs_doc.findall('track
'):
1185 lang = track.attrib['lang_code
']
1186 if lang in sub_lang_list:
1189 for ext in self._SUBTITLE_FORMATS:
1190 params = compat_urllib_parse_urlencode({
1194 'name
': track.attrib['name
'].encode('utf
-8'),
1196 sub_formats.append({
1197 'url
': 'https
://www
.youtube
.com
/api
/timedtext?
' + params,
1200 sub_lang_list[lang] = sub_formats
1201 if not sub_lang_list:
1202 self._downloader.report_warning('video doesn
\'t have subtitles
')
1204 return sub_lang_list
1206 def _get_ytplayer_config(self, video_id, webpage):
1208 # User data may contain arbitrary character sequences that may affect
1209 # JSON extraction with regex, e.g. when '};' is contained the second
1210 # regex won't capture the whole JSON
. Yet working around by trying more
1211 # concrete regex first keeping in mind proper quoted string handling
1212 # to be implemented in future that will replace this workaround (see
1213 # https://github.com/rg3/youtube-dl/issues/7468,
1214 # https://github.com/rg3/youtube-dl/pull/7599)
1215 r
';ytplayer\.config\s*=\s*({.+?});ytplayer',
1216 r
';ytplayer\.config\s*=\s*({.+?});',
1218 config
= self
._search
_regex
(
1219 patterns
, webpage
, 'ytplayer.config', default
=None)
1221 return self
._parse
_json
(
1222 uppercase_escape(config
), video_id
, fatal
=False)
1224 def _get_automatic_captions(self
, video_id
, webpage
):
1225 """We need the webpage for getting the captions url, pass it as an
1226 argument to speed up the process."""
1227 self
.to_screen('%s: Looking for automatic captions' % video_id
)
1228 player_config
= self
._get
_ytplayer
_config
(video_id
, webpage
)
1229 err_msg
= 'Couldn\'t find automatic captions for %s' % video_id
1230 if not player_config
:
1231 self
._downloader
.report_warning(err_msg
)
1234 args
= player_config
['args']
1235 caption_url
= args
.get('ttsurl')
1237 timestamp
= args
['timestamp']
1238 # We get the available subtitles
1239 list_params
= compat_urllib_parse_urlencode({
1244 list_url
= caption_url
+ '&' + list_params
1245 caption_list
= self
._download
_xml
(list_url
, video_id
)
1246 original_lang_node
= caption_list
.find('track')
1247 if original_lang_node
is None:
1248 self
._downloader
.report_warning('Video doesn\'t have automatic captions')
1250 original_lang
= original_lang_node
.attrib
['lang_code']
1251 caption_kind
= original_lang_node
.attrib
.get('kind', '')
1254 for lang_node
in caption_list
.findall('target'):
1255 sub_lang
= lang_node
.attrib
['lang_code']
1257 for ext
in self
._SUBTITLE
_FORMATS
:
1258 params
= compat_urllib_parse_urlencode({
1259 'lang': original_lang
,
1263 'kind': caption_kind
,
1265 sub_formats
.append({
1266 'url': caption_url
+ '&' + params
,
1269 sub_lang_list
[sub_lang
] = sub_formats
1270 return sub_lang_list
1272 # Some videos don't provide ttsurl but rather caption_tracks and
1273 # caption_translation_languages (e.g. 20LmZk1hakA)
1274 caption_tracks
= args
['caption_tracks']
1275 caption_translation_languages
= args
['caption_translation_languages']
1276 caption_url
= compat_parse_qs(caption_tracks
.split(',')[0])['u'][0]
1277 parsed_caption_url
= compat_urllib_parse_urlparse(caption_url
)
1278 caption_qs
= compat_parse_qs(parsed_caption_url
.query
)
1281 for lang
in caption_translation_languages
.split(','):
1282 lang_qs
= compat_parse_qs(compat_urllib_parse_unquote_plus(lang
))
1283 sub_lang
= lang_qs
.get('lc', [None])[0]
1287 for ext
in self
._SUBTITLE
_FORMATS
:
1289 'tlang': [sub_lang
],
1292 sub_url
= compat_urlparse
.urlunparse(parsed_caption_url
._replace
(
1293 query
=compat_urllib_parse_urlencode(caption_qs
, True)))
1294 sub_formats
.append({
1298 sub_lang_list
[sub_lang
] = sub_formats
1299 return sub_lang_list
1300 # An extractor error can be raise by the download process if there are
1301 # no automatic captions but there are subtitles
1302 except (KeyError, ExtractorError
):
1303 self
._downloader
.report_warning(err_msg
)
1306 def _mark_watched(self
, video_id
, video_info
):
1307 playback_url
= video_info
.get('videostats_playback_base_url', [None])[0]
1308 if not playback_url
:
1310 parsed_playback_url
= compat_urlparse
.urlparse(playback_url
)
1311 qs
= compat_urlparse
.parse_qs(parsed_playback_url
.query
)
1313 # cpn generation algorithm is reverse engineered from base.js.
1314 # In fact it works even with dummy cpn.
1315 CPN_ALPHABET
= 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
1316 cpn
= ''.join((CPN_ALPHABET
[random
.randint(0, 256) & 63] for _
in range(0, 16)))
1322 playback_url
= compat_urlparse
.urlunparse(
1323 parsed_playback_url
._replace
(query
=compat_urllib_parse_urlencode(qs
, True)))
1325 self
._download
_webpage
(
1326 playback_url
, video_id
, 'Marking watched',
1327 'Unable to mark watched', fatal
=False)
1330 def extract_id(cls
, url
):
1331 mobj
= re
.match(cls
._VALID
_URL
, url
, re
.VERBOSE
)
1333 raise ExtractorError('Invalid URL: %s' % url
)
1334 video_id
= mobj
.group(2)
1337 def _extract_annotations(self
, video_id
):
1338 url
= 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
1339 return self
._download
_webpage
(url
, video_id
, note
='Searching for annotations.', errnote
='Unable to download video annotations.')
1342 def _extract_chapters(description
, duration
):
1345 chapter_lines
= re
.findall(
1346 r
'(?:^|<br\s*/>)([^<]*<a[^>]+onclick=["\']yt\
.www\
.watch\
.player\
.seekTo
[^
>]+>(\d
{1,2}:\d
{1,2}(?
::\d
{1,2})?
)</a
>[^
>]*)(?
=$|
<br\s
*/>)',
1348 if not chapter_lines:
1351 for next_num, (chapter_line, time_point) in enumerate(
1352 chapter_lines, start=1):
1353 start_time = parse_duration(time_point)
1354 if start_time is None:
1356 end_time = (duration if next_num == len(chapter_lines)
1357 else parse_duration(chapter_lines[next_num][1]))
1358 if end_time is None:
1360 chapter_title = re.sub(
1361 r'<a
[^
>]+>[^
<]+</a
>', '', chapter_line).strip(' \t-')
1362 chapter_title = re.sub(r'\s
+', ' ', chapter_title)
1364 'start_time
': start_time,
1365 'end_time
': end_time,
1366 'title
': chapter_title,
1370 def _real_extract(self, url):
1371 url, smuggled_data = unsmuggle_url(url, {})
1374 'http
' if self._downloader.params.get('prefer_insecure
', False)
1379 parsed_url = compat_urllib_parse_urlparse(url)
1380 for component in [parsed_url.fragment, parsed_url.query]:
1381 query = compat_parse_qs(component)
1382 if start_time is None and 't
' in query:
1383 start_time = parse_duration(query['t
'][0])
1384 if start_time is None and 'start
' in query:
1385 start_time = parse_duration(query['start
'][0])
1386 if end_time is None and 'end
' in query:
1387 end_time = parse_duration(query['end
'][0])
1389 # Extract original video URL from URL with redirection, like age verification, using next_url parameter
1390 mobj = re.search(self._NEXT_URL_RE, url)
1392 url = proto + '://www
.youtube
.com
/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
1393 video_id = self.extract_id(url)
1396 url = proto + '://www
.youtube
.com
/watch?v
=%s&gl
=US
&hl
=en
&has_verified
=1&bpctr
=9999999999' % video_id
1397 video_webpage = self._download_webpage(url, video_id)
1399 # Attempt to extract SWF player URL
1400 mobj = re.search(r'swfConfig
.*?
"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
1401 if mobj is not None:
1402 player_url = re.sub(r'\\(.)', r'\
1', mobj.group(1))
1408 def add_dash_mpd(video_info):
1409 dash_mpd = video_info.get('dashmpd
')
1410 if dash_mpd and dash_mpd[0] not in dash_mpds:
1411 dash_mpds.append(dash_mpd[0])
1414 embed_webpage = None
1416 if re.search(r'player
-age
-gate
-content
">', video_webpage) is not None:
1418 # We simulate the access to the video from www.youtube.com/v/{video_id}
1419 # this can be viewed without login into Youtube
1420 url = proto + '://www.youtube.com/embed/%s' % video_id
1421 embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
1422 data = compat_urllib_parse_urlencode({
1423 'video_id': video_id,
1424 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
1425 'sts': self._search_regex(
1426 r'"sts
"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
1428 video_info_url = proto + '://www.youtube.com/get_video_info?' + data
1429 video_info_webpage = self._download_webpage(
1430 video_info_url, video_id,
1431 note='Refetching age-gated info webpage',
1432 errnote='unable to download video info webpage')
1433 video_info = compat_parse_qs(video_info_webpage)
1434 add_dash_mpd(video_info)
1438 # Try looking directly into the video webpage
1439 ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
1441 args = ytplayer_config['args']
1442 if args.get('url_encoded_fmt_stream_map'):
1443 # Convert to the same format returned by compat_parse_qs
1444 video_info = dict((k, [v]) for k, v in args.items())
1445 add_dash_mpd(video_info)
1446 # Rental video is not rented but preview is available (e.g.
1447 # https://www.youtube.com/watch?v=yYr8q0y5Jfg,
1448 # https://github.com/rg3/youtube-dl/issues/10532)
1449 if not video_info and args.get('ypc_vid'):
1450 return self.url_result(
1451 args['ypc_vid'], YoutubeIE.ie_key(), video_id=args['ypc_vid'])
1452 if args.get('livestream') == '1' or args.get('live_playback') == 1:
1454 if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
1455 # We also try looking in get_video_info since it may contain different dashmpd
1456 # URL that points to a DASH manifest with possibly different itag set (some itags
1457 # are missing from DASH manifest pointed by webpage's dashmpd, some - from DASH
1458 # manifest pointed by get_video_info's dashmpd).
1459 # The general idea is to take a union of itags of both DASH manifests (for example
1460 # video with such 'manifest behavior' see https://github.com/rg3/youtube-dl/issues/6093)
1461 self.report_video_info_webpage_download(video_id)
1462 for el_type in ['&el=info', '&el=embedded', '&el=detailpage', '&el=vevo', '']:
1464 '%s://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
1465 % (proto, video_id, el_type))
1466 video_info_webpage = self._download_webpage(
1468 video_id, note=False,
1469 errnote='unable to download video info webpage')
1470 get_video_info = compat_parse_qs(video_info_webpage)
1471 if get_video_info.get('use_cipher_signature') != ['True']:
1472 add_dash_mpd(get_video_info)
1474 video_info = get_video_info
1475 if 'token' in get_video_info:
1476 # Different get_video_info requests may report different results, e.g.
1477 # some may report video unavailability, but some may serve it without
1478 # any complaint (see https://github.com/rg3/youtube-dl/issues/7362,
1479 # the original webpage as well as el=info and el=embedded get_video_info
1480 # requests report video unavailability due to geo restriction while
1481 # el=detailpage succeeds and returns valid data). This is probably
1482 # due to YouTube measures against IP ranges of hosting providers.
1483 # Working around by preferring the first succeeded video_info containing
1484 # the token if no such video_info yet was found.
1485 if 'token' not in video_info:
1486 video_info = get_video_info
1488 if 'token' not in video_info:
1489 if 'reason' in video_info:
1490 if 'The uploader has not made this video available in your country.' in video_info['reason']:
1491 regions_allowed = self._html_search_meta(
1492 'regionsAllowed', video_webpage, default=None)
1493 countries = regions_allowed.split(',') if regions_allowed else None
1494 self.raise_geo_restricted(
1495 msg=video_info['reason'][0], countries=countries)
1496 raise ExtractorError(
1497 'YouTube said: %s' % video_info['reason'][0],
1498 expected=True, video_id=video_id)
1500 raise ExtractorError(
1501 '"token
" parameter not in video info for unknown reason',
1505 if 'title' in video_info:
1506 video_title = video_info['title'][0]
1508 self._downloader.report_warning('Unable to extract video title')
1512 description_original = video_description = get_element_by_id("eow
-description
", video_webpage)
1513 if video_description:
1514 description_original = video_description = re.sub(r'''(?x)
1516 (?:[a-zA-Z-]+="[^
"]*"\s
+)*?
1517 (?
:title|href
)="([^"]+)"\s+
1518 (?:[a-zA-Z-]+="[^
"]*"\s
+)*?
1522 ''', r'\1', video_description)
1523 video_description = clean_html(video_description)
1525 fd_mobj = re.search(r'<meta name="description
" content="([^
"]+)"', video_webpage)
1527 video_description = unescapeHTML(fd_mobj.group(1))
1529 video_description = ''
1531 if 'multifeed_metadata_list
' in video_info and not smuggled_data.get('force_singlefeed
', False):
1532 if not self._downloader.params.get('noplaylist
'):
1535 multifeed_metadata_list = video_info['multifeed_metadata_list
'][0]
1536 for feed in multifeed_metadata_list.split(','):
1537 # Unquote should take place before split on comma (,) since textual
1538 # fields may contain comma as well (see
1539 # https://github.com/rg3/youtube-dl/issues/8536)
1540 feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
1542 '_type
': 'url_transparent
',
1543 'ie_key
': 'Youtube
',
1545 '%s://www
.youtube
.com
/watch?v
=%s' % (proto, feed_data['id'][0]),
1546 {'force_singlefeed
': True}),
1547 'title
': '%s (%s)' % (video_title, feed_data['title
'][0]),
1549 feed_ids.append(feed_data['id'][0])
1551 'Downloading multifeed
video (%s) - add
--no
-playlist to just download video
%s'
1552 % (', '.join(feed_ids), video_id))
1553 return self.playlist_result(entries, video_id, video_title, video_description)
1554 self.to_screen('Downloading just video
%s because of
--no
-playlist
' % video_id)
1556 if 'view_count
' in video_info:
1557 view_count = int(video_info['view_count
'][0])
1561 # Check for "rental" videos
1562 if 'ypc_video_rental_bar_text
' in video_info and 'author
' not in video_info:
1563 raise ExtractorError('"rental" videos
not supported
. See https
://github
.com
/rg3
/youtube
-dl
/issues
/359 for more information
.', expected=True)
1565 # Start extracting information
1566 self.report_information_extraction(video_id)
1569 if 'author
' not in video_info:
1570 raise ExtractorError('Unable to extract uploader name
')
1571 video_uploader = compat_urllib_parse_unquote_plus(video_info['author
'][0])
1574 video_uploader_id = None
1575 video_uploader_url = None
1577 r'<link itemprop
="url" href
="(?P<uploader_url>https?://www.youtube.com/(?:user|channel)/(?P<uploader_id>[^"]+))">',
1579 if mobj is not None:
1580 video_uploader_id = mobj.group('uploader_id')
1581 video_uploader_url = mobj.group('uploader_url')
1583 self._downloader.report_warning('unable to extract uploader nickname')
1586 # We try first to get a high quality image:
1587 m_thumb = re.search(r'<span itemprop="thumbnail
".*?href="(.*?
)">',
1588 video_webpage, re.DOTALL)
1589 if m_thumb is not None:
1590 video_thumbnail = m_thumb.group(1)
1591 elif 'thumbnail_url' not in video_info:
1592 self._downloader.report_warning('unable to extract video thumbnail')
1593 video_thumbnail = None
1594 else: # don't panic if we can't find it
1595 video_thumbnail = compat_urllib_parse_unquote_plus(video_info['thumbnail_url'][0])
1598 upload_date = self._html_search_meta(
1599 'datePublished', video_webpage, 'upload date', default=None)
1601 upload_date = self._search_regex(
1602 [r'(?s)id="eow
-date
.*?
>(.*?
)</span
>',
1603 r'id="watch-uploader-info".*?
>.*?
(?
:Published|Uploaded|Streamed live|Started
) on (.+?
)</strong
>'],
1604 video_webpage, 'upload date
', default=None)
1606 upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
1607 upload_date = unified_strdate(upload_date)
1609 video_license = self._html_search_regex(
1610 r'<h4
[^
>]+class="title"[^
>]*>\s
*License\s
*</h4
>\s
*<ul
[^
>]*>\s
*<li
>(.+?
)</li
',
1611 video_webpage, 'license
', default=None)
1613 m_music = re.search(
1614 r'<h4
[^
>]+class="title"[^
>]*>\s
*Music\s
*</h4
>\s
*<ul
[^
>]*>\s
*<li
>(?P
<title
>.+?
) by (?P
<creator
>.+?
)(?
:\
(.+?\
))?
</li
',
1617 video_alt_title = remove_quotes(unescapeHTML(m_music.group('title
')))
1618 video_creator = clean_html(m_music.group('creator
'))
1620 video_alt_title = video_creator = None
1622 m_episode = re.search(
1623 r'<div
[^
>]+id="watch7-headline"[^
>]*>\s
*<span
[^
>]*>.*?
>(?P
<series
>[^
<]+)</a
></b
>\s
*S(?P
<season
>\d
+)\s
*ā¢\s
*E(?P
<episode
>\d
+)</span
>',
1626 series = m_episode.group('series
')
1627 season_number = int(m_episode.group('season
'))
1628 episode_number = int(m_episode.group('episode
'))
1630 series = season_number = episode_number = None
1632 m_cat_container = self._search_regex(
1633 r'(?s
)<h4
[^
>]*>\s
*Category\s
*</h4
>\s
*<ul
[^
>]*>(.*?
)</ul
>',
1634 video_webpage, 'categories
', default=None)
1636 category = self._html_search_regex(
1637 r'(?s
)<a
[^
<]+>(.*?
)</a
>', m_cat_container, 'category
',
1639 video_categories = None if category is None else [category]
1641 video_categories = None
1644 unescapeHTML(m.group('content
'))
1645 for m in re.finditer(self._meta_regex('og
:video
:tag
'), video_webpage)]
1647 def _extract_count(count_name):
1648 return str_to_int(self._search_regex(
1649 r'-%s-button
[^
>]+><span
[^
>]+class="yt-uix-button-content"[^
>]*>([\d
,]+)</span
>'
1650 % re.escape(count_name),
1651 video_webpage, count_name, default=None))
1653 like_count = _extract_count('like
')
1654 dislike_count = _extract_count('dislike
')
1657 video_subtitles = self.extract_subtitles(video_id, video_webpage)
1658 automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
1660 video_duration = try_get(
1661 video_info, lambda x: int_or_none(x['length_seconds
'][0]))
1662 if not video_duration:
1663 video_duration = parse_duration(self._html_search_meta(
1664 'duration
', video_webpage, 'video duration
'))
1667 video_annotations = None
1668 if self._downloader.params.get('writeannotations
', False):
1669 video_annotations = self._extract_annotations(video_id)
1671 chapters = self._extract_chapters(description_original, video_duration)
1673 if 'conn
' in video_info and video_info['conn
'][0].startswith('rtmp
'):
1674 self.report_rtmp_download()
1676 'format_id
': '_rtmp
',
1678 'url
': video_info['conn
'][0],
1679 'player_url
': player_url,
1681 elif len(video_info.get('url_encoded_fmt_stream_map
', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts
', [''])[0]) >= 1:
1682 encoded_url_map = video_info.get('url_encoded_fmt_stream_map
', [''])[0] + ',' + video_info.get('adaptive_fmts
', [''])[0]
1683 if 'rtmpe
%3Dyes
' in encoded_url_map:
1684 raise ExtractorError('rtmpe downloads are
not supported
, see https
://github
.com
/rg3
/youtube
-dl
/issues
/343 for more information
.', expected=True)
1686 fmt_list = video_info.get('fmt_list
', [''])[0]
1688 for fmt in fmt_list.split(','):
1689 spec = fmt.split('/')
1691 width_height = spec[1].split('x
')
1692 if len(width_height) == 2:
1693 formats_spec[spec[0]] = {
1694 'resolution
': spec[1],
1695 'width
': int_or_none(width_height[0]),
1696 'height
': int_or_none(width_height[1]),
1699 for url_data_str in encoded_url_map.split(','):
1700 url_data = compat_parse_qs(url_data_str)
1701 if 'itag
' not in url_data or 'url
' not in url_data:
1703 format_id = url_data['itag
'][0]
1704 url = url_data['url
'][0]
1706 if 'sig
' in url_data:
1707 url += '&signature
=' + url_data['sig
'][0]
1708 elif 's
' in url_data:
1709 encrypted_sig = url_data['s
'][0]
1710 ASSETS_RE = r'"assets":.+?
"js":\s
*("[^"]+")'
1712 jsplayer_url_json = self._search_regex(
1714 embed_webpage if age_gate else video_webpage,
1715 'JS player URL (1)', default=None)
1716 if not jsplayer_url_json and not age_gate:
1717 # We need the embed website after all
1718 if embed_webpage is None:
1719 embed_url = proto + '://www.youtube.com/embed/%s' % video_id
1720 embed_webpage = self._download_webpage(
1721 embed_url, video_id, 'Downloading embed webpage')
1722 jsplayer_url_json = self._search_regex(
1723 ASSETS_RE, embed_webpage, 'JS player URL')
1725 player_url = json.loads(jsplayer_url_json)
1726 if player_url is None:
1727 player_url_json = self._search_regex(
1728 r'ytplayer\.config.*?"url
"\s*:\s*("[^
"]+")',
1729 video_webpage, 'age gate player URL
')
1730 player_url = json.loads(player_url_json)
1732 if self._downloader.params.get('verbose
'):
1733 if player_url is None:
1734 player_version = 'unknown
'
1735 player_desc = 'unknown
'
1737 if player_url.endswith('swf
'):
1738 player_version = self._search_regex(
1739 r'-(.+?
)(?
:/watch_as3
)?\
.swf$
', player_url,
1740 'flash player
', fatal=False)
1741 player_desc = 'flash player
%s' % player_version
1743 player_version = self._search_regex(
1744 [r'html5player
-([^
/]+?
)(?
:/html5player(?
:-new
)?
)?\
.js
',
1745 r'(?
:www|player
)-([^
/]+)(?
:/[a
-z
]{2}_
[A
-Z
]{2}
)?
/base\
.js
'],
1747 'html5 player
', fatal=False)
1748 player_desc = 'html5 player
%s' % player_version
1750 parts_sizes = self._signature_cache_id(encrypted_sig)
1751 self.to_screen('{%s} signature length
%s, %s' %
1752 (format_id, parts_sizes, player_desc))
1754 signature = self._decrypt_signature(
1755 encrypted_sig, video_id, player_url, age_gate)
1756 url += '&signature
=' + signature
1757 if 'ratebypass
' not in url:
1758 url += '&ratebypass
=yes
'
1761 'format_id
': format_id,
1763 'player_url
': player_url,
1765 if format_id in self._formats:
1766 dct.update(self._formats[format_id])
1767 if format_id in formats_spec:
1768 dct.update(formats_spec[format_id])
1770 # Some itags are not included in DASH manifest thus corresponding formats will
1771 # lack metadata (see https://github.com/rg3/youtube-dl/pull/5993).
1772 # Trying to extract metadata from url_encoded_fmt_stream_map entry.
1773 mobj = re.search(r'^
(?P
<width
>\d
+)[xX
](?P
<height
>\d
+)$
', url_data.get('size
', [''])[0])
1774 width, height = (int(mobj.group('width
')), int(mobj.group('height
'))) if mobj else (None, None)
1777 'filesize
': int_or_none(url_data.get('clen
', [None])[0]),
1778 'tbr
': float_or_none(url_data.get('bitrate
', [None])[0], 1000),
1781 'fps
': int_or_none(url_data.get('fps
', [None])[0]),
1782 'format_note
': url_data.get('quality_label
', [None])[0] or url_data.get('quality
', [None])[0],
1784 for key, value in more_fields.items():
1787 type_ = url_data.get('type', [None])[0]
1789 type_split = type_.split(';')
1790 kind_ext = type_split[0].split('/')
1791 if len(kind_ext) == 2:
1793 dct['ext
'] = mimetype2ext(type_split[0])
1794 if kind in ('audio
', 'video
'):
1796 for mobj in re.finditer(
1797 r'(?P
<key
>[a
-zA
-Z_
-]+)=(?P
<quote
>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
1798 if mobj.group('key') == 'codecs':
1799 codecs = mobj.group('val')
1802 dct.update(parse_codecs(codecs))
1804 elif video_info.get('hlsvp'):
1805 manifest_url = video_info['hlsvp'][0]
1807 m3u8_formats = self._extract_m3u8_formats(
1808 manifest_url, video_id, 'mp4', fatal=False)
1809 for a_format in m3u8_formats:
1810 itag = self._search_regex(
1811 r'/itag/(\d+)/', a_format['url'], 'itag', default=None)
1813 a_format['format_id'] = itag
1814 if itag in self._formats:
1815 dct = self._formats[itag].copy()
1816 dct.update(a_format)
1818 a_format['player_url'] = player_url
1819 # Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
1820 a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
1821 formats.append(a_format)
1823 unavailable_message = self._html_search_regex(
1824 r'(?s)<h1[^>]+id="unavailable
-message
"[^>]*>(.+?)</h1>',
1825 video_webpage, 'unavailable message', default=None)
1826 if unavailable_message:
1827 raise ExtractorError(unavailable_message, expected=True)
1828 raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
1830 # Look for the DASH manifest
1831 if self._downloader.params.get('youtube_include_dash_manifest', True):
1832 dash_mpd_fatal = True
1833 for mpd_url in dash_mpds:
1836 def decrypt_sig(mobj):
1838 dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
1839 return '/signature/%s' % dec_s
1841 mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
1843 for df in self._extract_mpd_formats(
1844 mpd_url, video_id, fatal=dash_mpd_fatal,
1845 formats_dict=self._formats):
1846 # Do not overwrite DASH format found in some previous DASH manifest
1847 if df['format_id'] not in dash_formats:
1848 dash_formats[df['format_id']] = df
1849 # Additional DASH manifests may end up in HTTP Error 403 therefore
1850 # allow them to fail without bug report message if we already have
1851 # some DASH manifest succeeded. This is temporary workaround to reduce
1852 # burst of bug reports until we figure out the reason and whether it
1853 # can be fixed at all.
1854 dash_mpd_fatal = False
1855 except (ExtractorError, KeyError) as e:
1856 self.report_warning(
1857 'Skipping DASH manifest: %r' % e, video_id)
1859 # Remove the formats we found through non-DASH, they
1860 # contain less info and it can be wrong, because we use
1861 # fixed values (for example the resolution). See
1862 # https://github.com/rg3/youtube-dl/issues/5774 for an
1864 formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
1865 formats.extend(dash_formats.values())
1867 # Check for malformed aspect ratio
1868 stretched_m = re.search(
1869 r'<meta\s+property="og
:video
:tag
".*?content="yt
:stretch
=(?P
<w
>[0-9]+):(?P
<h
>[0-9]+)">',
1872 w = float(stretched_m.group('w'))
1873 h = float(stretched_m.group('h'))
1874 # yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
1875 # We will only process correct ratios.
1879 if f.get('vcodec') != 'none':
1880 f['stretched_ratio'] = ratio
1882 self._sort_formats(formats)
1884 self.mark_watched(video_id, video_info)
1888 'uploader': video_uploader,
1889 'uploader_id': video_uploader_id,
1890 'uploader_url': video_uploader_url,
1891 'upload_date': upload_date,
1892 'license': video_license,
1893 'creator': video_creator,
1894 'title': video_title,
1895 'alt_title': video_alt_title,
1896 'thumbnail': video_thumbnail,
1897 'description': video_description,
1898 'categories': video_categories,
1900 'subtitles': video_subtitles,
1901 'automatic_captions': automatic_captions,
1902 'duration': video_duration,
1903 'age_limit': 18 if age_gate else 0,
1904 'annotations': video_annotations,
1905 'chapters': chapters,
1906 'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
1907 'view_count': view_count,
1908 'like_count': like_count,
1909 'dislike_count': dislike_count,
1910 'average_rating': float_or_none(video_info.get('avg_rating', [None])[0]),
1913 'start_time': start_time,
1914 'end_time': end_time,
1916 'season_number': season_number,
1917 'episode_number': episode_number,
1921 class YoutubeSharedVideoIE(InfoExtractor):
1922 _VALID_URL = r'(?:https?:)?//(?:www\.)?youtube\.com/shared\?.*\bci=(?P<id>[0-9A-Za-z_-]{11})'
1923 IE_NAME = 'youtube:shared'
1926 'url': 'https://www.youtube.com/shared?ci=1nEzmT-M4fU',
1928 'id': 'uPDB5I9wfp8',
1930 'title': 'Pocoyo: 90 minutos de episĆ³dios completos PortuguĆŖs para crianƧas - PARTE 3',
1931 'description': 'md5:d9e4d9346a2dfff4c7dc4c8cec0f546d',
1932 'upload_date': '20160219',
1933 'uploader': 'Pocoyo - PortuguĆŖs (BR)',
1934 'uploader_id': 'PocoyoBrazil',
1936 'add_ie': ['Youtube'],
1938 # There are already too many Youtube downloads
1939 'skip_download': True,
1943 def _real_extract(self, url):
1944 video_id = self._match_id(url)
1946 webpage = self._download_webpage(url, video_id)
1948 real_video_id = self._html_search_meta(
1949 'videoId', webpage, 'YouTube video id', fatal=True)
1951 return self.url_result(real_video_id, YoutubeIE.ie_key())
1954 class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
1955 IE_DESC = 'YouTube.com playlists'
1956 _VALID_URL = r"""(?x)(?:
1962 (?:course|view_play_list|my_playlists|artist|playlist|watch|embed/(?:videoseries|[0-9A-Za-z_-]{11}))
1963 \? (?:.*?[&;])*? (?:p|a|list)=
1966 youtu\.be/[0-9A-Za-z_-]{11}\?.*?\blist=
1969 (?:PL|LL|EC|UU|FL|RD|UL|TL)?[0-9A-Za-z-_]{10,}
1970 # Top tracks, they can also include dots
1976 )""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
1977 _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s&disable_polymer=true'
1978 _VIDEO_RE = r'href="\s
*/watch
\?v
=(?P
<id>[0-9A
-Za
-z_
-]{11}
)&
;[^
"]*?index=(?P<index>\d+)(?:[^>]+>(?P<title>[^<]+))?'
1979 IE_NAME = 'youtube:playlist'
1981 'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
1983 'title': 'ytdl test PL',
1984 'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
1986 'playlist_count': 3,
1988 'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
1990 'id': 'PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
1991 'title': 'YDL_Empty_List',
1993 'playlist_count': 0,
1994 'skip': 'This playlist is private',
1996 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
1997 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
1999 'title': '29C3: Not my department',
2000 'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
2002 'playlist_count': 95,
2004 'note': 'issue #673',
2005 'url': 'PLBB231211A4F62143',
2007 'title': '[OLD]Team Fortress 2 (Class-based LP)',
2008 'id': 'PLBB231211A4F62143',
2010 'playlist_mincount': 26,
2012 'note': 'Large playlist',
2013 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
2015 'title': 'Uploads from Cauchemar',
2016 'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
2018 'playlist_mincount': 799,
2020 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
2022 'title': 'YDL_safe_search',
2023 'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
2025 'playlist_count': 2,
2026 'skip': 'This playlist is private',
2029 'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
2030 'playlist_count': 4,
2033 'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
2036 'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
2037 'playlist_mincount': 485,
2039 'title': '2017 čÆčŖęę°å®ę² (2/24ę“ę°)',
2040 'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
2043 'note': 'Embedded SWF player',
2044 'url': 'https://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
2045 'playlist_count': 4,
2048 'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
2051 'note': 'Buggy playlist: the webpage has a "Load more
" button but it doesn\'t have more videos',
2052 'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
2054 'title': 'Uploads from Interstellar Movie',
2055 'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
2057 'playlist_mincount': 21,
2059 # Playlist URL that does not actually serve a playlist
2060 'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
2062 'id': 'FqZTN594JQw',
2064 'title': "Smiley
's People 01 detective, Adventure Series, Action",
2065 'uploader
': 'STREEM
',
2066 'uploader_id
': 'UCyPhqAZgwYWZfxElWVbVJng
',
2067 'uploader_url
': r're
:https?
://(?
:www\
.)?youtube\
.com
/channel
/UCyPhqAZgwYWZfxElWVbVJng
',
2068 'upload_date
': '20150526',
2069 'license
': 'Standard YouTube License
',
2070 'description
': 'md5
:507cdcb5a49ac0da37a920ece610be80
',
2071 'categories
': ['People
& Blogs
'],
2074 'dislike_count
': int,
2077 'skip_download
': True,
2079 'add_ie
': [YoutubeIE.ie_key()],
2081 'url
': 'https
://youtu
.be
/yeWKywCrFtk?
list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5
',
2083 'id': 'yeWKywCrFtk
',
2085 'title
': 'Small Scale Baler
and Braiding Rugs
',
2086 'uploader
': 'Backus
-Page House Museum
',
2087 'uploader_id
': 'backuspagemuseum
',
2088 'uploader_url
': r're
:https?
://(?
:www\
.)?youtube\
.com
/user
/backuspagemuseum
',
2089 'upload_date
': '20161008',
2090 'license
': 'Standard YouTube License
',
2091 'description
': 'md5
:800c0c78d5eb128500bffd4f0b4f2e8a
',
2092 'categories
': ['Nonprofits
& Activism
'],
2095 'dislike_count
': int,
2099 'skip_download
': True,
2102 'url
': 'https
://youtu
.be
/uWyaPkt
-VOI?
list=PL9D9FC436B881BA21
',
2103 'only_matching
': True,
2105 'url
': 'TLGGrESM50VT6acwMjAyMjAxNw
',
2106 'only_matching
': True,
2109 def _real_initialize(self):
2112 def _extract_mix(self, playlist_id):
2113 # The mixes are generated from a single video
2114 # the id of the playlist is just 'RD
' + video_id
2116 last_id = playlist_id[-11:]
2117 for n in itertools.count(1):
2118 url = 'https
://youtube
.com
/watch?v
=%s&list=%s' % (last_id, playlist_id)
2119 webpage = self._download_webpage(
2120 url, playlist_id, 'Downloading page {0} of Youtube mix
'.format(n))
2121 new_ids = orderedSet(re.findall(
2122 r'''(?xs)data-video-username=".*?".*?
2123 href="/watch\?v=([0-9A-Za-z_-]{11})&[^"]*?list=%s''' % re.escape(playlist_id),
2125 # Fetch new pages until all the videos are repeated, it seems that
2126 # there are always 51 unique videos.
2127 new_ids = [_id for _id in new_ids if _id not in ids]
2133 url_results = self._ids_to_results(ids)
2135 search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
2137 search_title('playlist
-title
') or
2138 search_title('title
long-title
') or
2139 search_title('title
'))
2140 title = clean_html(title_span)
2142 return self.playlist_result(url_results, playlist_id, title)
2144 def _extract_playlist(self, playlist_id):
2145 url = self._TEMPLATE_URL % playlist_id
2146 page = self._download_webpage(url, playlist_id)
2148 # the yt-alert-message now has tabindex attribute (see https://github.com/rg3/youtube-dl/issues/11604)
2149 for match in re.findall(r'<div
class="yt-alert-message"[^
>]*>([^
<]+)</div
>', page):
2150 match = match.strip()
2151 # Check if the playlist exists or is private
2152 mobj = re.match(r'[^
<]*(?
:The|This
) playlist (?P
<reason
>does
not exist|
is private
)[^
<]*', match)
2154 reason = mobj.group('reason
')
2155 message = 'This playlist
%s' % reason
2156 if 'private
' in reason:
2157 message += ', use
--username
or --netrc to access it
'
2159 raise ExtractorError(message, expected=True)
2160 elif re.match(r'[^
<]*Invalid parameters
[^
<]*', match):
2161 raise ExtractorError(
2162 'Invalid parameters
. Maybe URL
is incorrect
.',
2164 elif re.match(r'[^
<]*Choose your language
[^
<]*', match):
2167 self.report_warning('Youtube gives an alert message
: ' + match)
2169 playlist_title = self._html_search_regex(
2170 r'(?s
)<h1
class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>',
2171 page, 'title', default=None)
2175 if not playlist_title:
2177 # Some playlist URLs don't actually serve a playlist (e.g.
2178 # https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4)
2179 next(self._entries(page, playlist_id))
2180 except StopIteration:
2183 return has_videos, self.playlist_result(
2184 self._entries(page, playlist_id), playlist_id, playlist_title)
2186 def _check_download_just_video(self, url, playlist_id):
2187 # Check if it's a video-specific URL
2188 query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
2189 video_id = query_dict.get('v', [None])[0] or self._search_regex(
2190 r'(?:(?:^|//)youtu\.be/|youtube\.com/embed/(?!videoseries))([0-9A-Za-z_-]{11})', url,
2191 'video id', default=None)
2193 if self._downloader.params.get('noplaylist'):
2194 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
2195 return video_id, self.url_result(video_id, 'Youtube', video_id=video_id)
2197 self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
2198 return video_id, None
2201 def _real_extract(self, url):
2202 # Extract playlist id
2203 mobj = re.match(self._VALID_URL, url)
2205 raise ExtractorError('Invalid URL: %s' % url)
2206 playlist_id = mobj.group(1) or mobj.group(2)
2208 video_id, video = self._check_download_just_video(url, playlist_id)
2212 if playlist_id.startswith(('RD', 'UL', 'PU')):
2213 # Mixes require a custom extraction process
2214 return self._extract_mix(playlist_id)
2216 has_videos, playlist = self._extract_playlist(playlist_id)
2217 if has_videos or not video_id:
2220 # Some playlist URLs don't actually serve a playlist (see
2221 # https://github.com/rg3/youtube-dl/issues/10537).
2222 # Fallback to plain video extraction if there is a video id
2223 # along with playlist id.
2224 return self.url_result(video_id, 'Youtube', video_id=video_id)
2227 class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
2228 IE_DESC = 'YouTube.com channels'
2229 _VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)'
2230 _TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
2231 _VIDEO_RE = r'(?:title="(?P
<title
>[^
"]+)"[^
>]+)?href
="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
2232 IE_NAME = 'youtube:channel'
2234 'note': 'paginated channel',
2235 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
2236 'playlist_mincount': 91,
2238 'id': 'UUKfVa3S1e4PHvxWcwyMMg8w',
2239 'title': 'Uploads from lex will',
2242 'note': 'Age restricted channel',
2243 # from https://www.youtube.com/user/DeusExOfficial
2244 'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w',
2245 'playlist_mincount': 64,
2247 'id': 'UUs0ifCMCm1icqRbqhUINa0w',
2248 'title': 'Uploads from Deus Ex',
2253 def suitable(cls, url):
2254 return (False if YoutubePlaylistsIE.suitable(url) or YoutubeLiveIE.suitable(url)
2255 else super(YoutubeChannelIE, cls).suitable(url))
2257 def _build_template_url(self, url, channel_id):
2258 return self._TEMPLATE_URL % channel_id
2260 def _real_extract(self, url):
2261 channel_id = self._match_id(url)
2263 url = self._build_template_url(url, channel_id)
2265 # Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778)
2266 # Workaround by extracting as a playlist if managed to obtain channel playlist URL
2267 # otherwise fallback on channel by page extraction
2268 channel_page = self._download_webpage(
2269 url + '?view=57', channel_id,
2270 'Downloading channel page', fatal=False)
2271 if channel_page is False:
2272 channel_playlist_id = False
2274 channel_playlist_id = self._html_search_meta(
2275 'channelId', channel_page, 'channel id', default=None)
2276 if not channel_playlist_id:
2277 channel_url = self._html_search_meta(
2278 ('al:ios:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad'),
2279 channel_page, 'channel url', default=None)
2281 channel_playlist_id = self._search_regex(
2282 r'vnd\.youtube://user/([0-9A-Za-z_-]+)',
2283 channel_url, 'channel id', default=None)
2284 if channel_playlist_id and channel_playlist_id.startswith('UC'):
2285 playlist_id = 'UU' + channel_playlist_id[2:]
2286 return self.url_result(
2287 compat_urlparse.urljoin(url, '/playlist?list=%s' % playlist_id), 'YoutubePlaylist')
2289 channel_page = self._download_webpage(url, channel_id, 'Downloading page #1')
2290 autogenerated = re.search(r'''(?x)
2292 channel-header-autogenerated-label|
2293 yt-channel-title-autogenerated
2294 )[^"]*"''', channel_page) is not None
2297 # The videos are contained in a single page
2298 # the ajax pages can't be used, they are empty
2301 video_id, 'Youtube', video_id=video_id,
2302 video_title=video_title)
2303 for video_id, video_title in self.extract_videos_from_page(channel_page)]
2304 return self.playlist_result(entries, channel_id)
2307 next(self._entries(channel_page, channel_id))
2308 except StopIteration:
2309 alert_message = self._html_search_regex(
2310 r'(?s)<div[^>]+class=(["\']).*?
\byt
-alert
-message
\b.*?\
1[^
>]*>(?P
<alert
>[^
<]+)</div
>',
2311 channel_page, 'alert
', default=None, group='alert
')
2313 raise ExtractorError('Youtube said
: %s' % alert_message, expected=True)
2315 return self.playlist_result(self._entries(channel_page, channel_id), channel_id)
2318 class YoutubeUserIE(YoutubeChannelIE):
2319 IE_DESC = 'YouTube
.com user
videos (URL
or "ytuser" keyword
)'
2320 _VALID_URL = r'(?
:(?
:https?
://(?
:\w
+\
.)?youtube\
.com
/(?
:(?P
<user
>user|c
)/)?
(?
!(?
:attribution_link|watch|results
)(?
:$|
[^a
-z_A
-Z0
-9-])))|ytuser
:)(?
!feed
/)(?P
<id>[A
-Za
-z0
-9_-]+)'
2321 _TEMPLATE_URL = 'https
://www
.youtube
.com
/%s/%s/videos
'
2322 IE_NAME = 'youtube
:user
'
2325 'url
': 'https
://www
.youtube
.com
/user
/TheLinuxFoundation
',
2326 'playlist_mincount
': 320,
2328 'id': 'UUfX55Sx5hEFjoC3cNs6mCUQ
',
2329 'title
': 'Uploads
from The Linux Foundation
',
2332 # Only available via https://www.youtube.com/c/12minuteathlete/videos
2333 # but not https://www.youtube.com/user/12minuteathlete/videos
2334 'url
': 'https
://www
.youtube
.com
/c
/12minuteathlete
/videos
',
2335 'playlist_mincount
': 249,
2337 'id': 'UUVjM
-zV6_opMDx7WYxnjZiQ
',
2338 'title
': 'Uploads
from 12 Minute Athlete
',
2341 'url
': 'ytuser
:phihag
',
2342 'only_matching
': True,
2344 'url
': 'https
://www
.youtube
.com
/c
/gametrailers
',
2345 'only_matching
': True,
2347 'url
': 'https
://www
.youtube
.com
/gametrailers
',
2348 'only_matching
': True,
2350 # This channel is not available, geo restricted to JP
2351 'url
': 'https
://www
.youtube
.com
/user
/kananishinoSMEJ
/videos
',
2352 'only_matching
': True,
2356 def suitable(cls, url):
2357 # Don't
return True if the url can be extracted
with other youtube
2358 # extractor, the regex would is too permissive and it would match.
2359 other_yt_ies
= iter(klass
for (name
, klass
) in globals().items() if name
.startswith('Youtube') and name
.endswith('IE') and klass
is not cls
)
2360 if any(ie
.suitable(url
) for ie
in other_yt_ies
):
2363 return super(YoutubeUserIE
, cls
).suitable(url
)
2365 def _build_template_url(self
, url
, channel_id
):
2366 mobj
= re
.match(self
._VALID
_URL
, url
)
2367 return self
._TEMPLATE
_URL
% (mobj
.group('user') or 'user', mobj
.group('id'))
2370 class YoutubeLiveIE(YoutubeBaseInfoExtractor
):
2371 IE_DESC
= 'YouTube.com live streams'
2372 _VALID_URL
= r
'(?P<base_url>https?://(?:\w+\.)?youtube\.com/(?:(?:user|channel|c)/)?(?P<id>[^/]+))/live'
2373 IE_NAME
= 'youtube:live'
2376 'url': 'https://www.youtube.com/user/TheYoungTurks/live',
2378 'id': 'a48o2S1cPoo',
2380 'title': 'The Young Turks - Live Main Show',
2381 'uploader': 'The Young Turks',
2382 'uploader_id': 'TheYoungTurks',
2383 'uploader_url': r
're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
2384 'upload_date': '20150715',
2385 'license': 'Standard YouTube License',
2386 'description': 'md5:438179573adcdff3c97ebb1ee632b891',
2387 'categories': ['News & Politics'],
2388 'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
2390 'dislike_count': int,
2393 'skip_download': True,
2396 'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
2397 'only_matching': True,
2399 'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
2400 'only_matching': True,
2402 'url': 'https://www.youtube.com/TheYoungTurks/live',
2403 'only_matching': True,
2406 def _real_extract(self
, url
):
2407 mobj
= re
.match(self
._VALID
_URL
, url
)
2408 channel_id
= mobj
.group('id')
2409 base_url
= mobj
.group('base_url')
2410 webpage
= self
._download
_webpage
(url
, channel_id
, fatal
=False)
2412 page_type
= self
._og
_search
_property
(
2413 'type', webpage
, 'page type', default
=None)
2414 video_id
= self
._html
_search
_meta
(
2415 'videoId', webpage
, 'video id', default
=None)
2416 if page_type
== 'video' and video_id
and re
.match(r
'^[0-9A-Za-z_-]{11}$', video_id
):
2417 return self
.url_result(video_id
, YoutubeIE
.ie_key())
2418 return self
.url_result(base_url
)
2421 class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor
):
2422 IE_DESC
= 'YouTube.com user/channel playlists'
2423 _VALID_URL
= r
'https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+)/playlists'
2424 IE_NAME
= 'youtube:playlists'
2427 'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
2428 'playlist_mincount': 4,
2430 'id': 'ThirstForScience',
2431 'title': 'Thirst for Science',
2434 # with "Load more" button
2435 'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
2436 'playlist_mincount': 70,
2439 'title': 'ŠŠ³Š¾ŃŃ ŠŠ»ŠµŠ¹Š½ŠµŃ',
2442 'url': 'https://www.youtube.com/channel/UCiU1dHvZObB2iP6xkJ__Icw/playlists',
2443 'playlist_mincount': 17,
2445 'id': 'UCiU1dHvZObB2iP6xkJ__Icw',
2446 'title': 'Chem Player',
2451 class YoutubeSearchIE(SearchInfoExtractor
, YoutubePlaylistIE
):
2452 IE_DESC
= 'YouTube.com searches'
2453 # there doesn't appear to be a real limit, for example if you search for
2454 # 'python' you get more than 8.000.000 results
2455 _MAX_RESULTS
= float('inf')
2456 IE_NAME
= 'youtube:search'
2457 _SEARCH_KEY
= 'ytsearch'
2458 _EXTRA_QUERY_ARGS
= {}
2461 def _get_n_results(self
, query
, n
):
2462 """Get a specified number of results for a query"""
2468 'search_query': query
.encode('utf-8'),
2470 url_query
.update(self
._EXTRA
_QUERY
_ARGS
)
2471 result_url
= 'https://www.youtube.com/results?' + compat_urllib_parse_urlencode(url_query
)
2473 for pagenum
in itertools
.count(1):
2474 data
= self
._download
_json
(
2475 result_url
, video_id
='query "%s"' % query
,
2476 note
='Downloading page %s' % pagenum
,
2477 errnote
='Unable to download API page',
2478 query
={'spf': 'navigate'})
2479 html_content
= data
[1]['body']['content']
2481 if 'class="search-message' in html_content
:
2482 raise ExtractorError(
2483 '[youtube] No video results', expected
=True)
2485 new_videos
= self
._ids
_to
_results
(orderedSet(re
.findall(
2486 r
'href="/watch\?v=(.{11})', html_content
)))
2487 videos
+= new_videos
2488 if not new_videos
or len(videos
) > limit
:
2490 next_link
= self
._html
_search
_regex
(
2491 r
'href="(/results\?[^"]*\bsp=[^"]+)"[^>]*>\s*<span[^>]+class="[^"]*\byt-uix-button-content\b[^"]*"[^>]*>Next',
2492 html_content
, 'next link', default
=None)
2493 if next_link
is None:
2495 result_url
= compat_urlparse
.urljoin('https://www.youtube.com/', next_link
)
2499 return self
.playlist_result(videos
, query
)
2502 class YoutubeSearchDateIE(YoutubeSearchIE
):
2503 IE_NAME
= YoutubeSearchIE
.IE_NAME
+ ':date'
2504 _SEARCH_KEY
= 'ytsearchdate'
2505 IE_DESC
= 'YouTube.com searches, newest videos first'
2506 _EXTRA_QUERY_ARGS
= {'search_sort': 'video_date_uploaded'}
2509 class YoutubeSearchURLIE(YoutubePlaylistBaseInfoExtractor
):
2510 IE_DESC
= 'YouTube.com search URLs'
2511 IE_NAME
= 'youtube:search_url'
2512 _VALID_URL
= r
'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)'
2513 _VIDEO_RE
= r
'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})(?:[^"]*"[^>]+\btitle="(?P<title>[^"]+))?'
2515 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
2516 'playlist_mincount': 5,
2518 'title': 'youtube-dl test video',
2521 'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
2522 'only_matching': True,
2525 def _real_extract(self
, url
):
2526 mobj
= re
.match(self
._VALID
_URL
, url
)
2527 query
= compat_urllib_parse_unquote_plus(mobj
.group('query'))
2528 webpage
= self
._download
_webpage
(url
, query
)
2529 return self
.playlist_result(self
._process
_page
(webpage
), playlist_title
=query
)
2532 class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor
):
2533 IE_DESC
= 'YouTube.com (multi-season) shows'
2534 _VALID_URL
= r
'https?://(?:www\.)?youtube\.com/show/(?P<id>[^?#]*)'
2535 IE_NAME
= 'youtube:show'
2537 'url': 'https://www.youtube.com/show/airdisasters',
2538 'playlist_mincount': 5,
2540 'id': 'airdisasters',
2541 'title': 'Air Disasters',
2545 def _real_extract(self
, url
):
2546 playlist_id
= self
._match
_id
(url
)
2547 return super(YoutubeShowIE
, self
)._real
_extract
(
2548 'https://www.youtube.com/show/%s/playlists' % playlist_id
)
2551 class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor
):
2553 Base class for feed extractors
2554 Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
2556 _LOGIN_REQUIRED
= True
2560 return 'youtube:%s' % self
._FEED
_NAME
2562 def _real_initialize(self
):
2565 def _real_extract(self
, url
):
2566 page
= self
._download
_webpage
(
2567 'https://www.youtube.com/feed/%s' % self
._FEED
_NAME
, self
._PLAYLIST
_TITLE
)
2569 # The extraction process is the same as for playlists, but the regex
2570 # for the video ids doesn't contain an index
2572 more_widget_html
= content_html
= page
2573 for page_num
in itertools
.count(1):
2574 matches
= re
.findall(r
'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html
)
2576 # 'recommended' feed has infinite 'load more' and each new portion spins
2577 # the same videos in (sometimes) slightly different order, so we'll check
2578 # for unicity and break when portion has no new videos
2579 new_ids
= filter(lambda video_id
: video_id
not in ids
, orderedSet(matches
))
2585 mobj
= re
.search(r
'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html
)
2589 more
= self
._download
_json
(
2590 'https://youtube.com/%s' % mobj
.group('more'), self
._PLAYLIST
_TITLE
,
2591 'Downloading page #%s' % page_num
,
2592 transform_source
=uppercase_escape
)
2593 content_html
= more
['content_html']
2594 more_widget_html
= more
['load_more_widget_html']
2596 return self
.playlist_result(
2597 self
._ids
_to
_results
(ids
), playlist_title
=self
._PLAYLIST
_TITLE
)
2600 class YoutubeWatchLaterIE(YoutubePlaylistIE
):
2601 IE_NAME
= 'youtube:watchlater'
2602 IE_DESC
= 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
2603 _VALID_URL
= r
'https?://(?:www\.)?youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater'
2606 'url': 'https://www.youtube.com/playlist?list=WL',
2607 'only_matching': True,
2609 'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL',
2610 'only_matching': True,
2613 def _real_extract(self
, url
):
2614 _
, video
= self
._check
_download
_just
_video
(url
, 'WL')
2617 _
, playlist
= self
._extract
_playlist
('WL')
2621 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor
):
2622 IE_NAME
= 'youtube:favorites'
2623 IE_DESC
= 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
2624 _VALID_URL
= r
'https?://(?:www\.)?youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
2625 _LOGIN_REQUIRED
= True
2627 def _real_extract(self
, url
):
2628 webpage
= self
._download
_webpage
('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
2629 playlist_id
= self
._search
_regex
(r
'list=(.+?)["&]', webpage
, 'favourites playlist id')
2630 return self
.url_result(playlist_id
, 'YoutubePlaylist')
2633 class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor
):
2634 IE_DESC
= 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
2635 _VALID_URL
= r
'https?://(?:www\.)?youtube\.com/feed/recommended|:ytrec(?:ommended)?'
2636 _FEED_NAME
= 'recommended'
2637 _PLAYLIST_TITLE
= 'Youtube Recommended videos'
2640 class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor
):
2641 IE_DESC
= 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
2642 _VALID_URL
= r
'https?://(?:www\.)?youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
2643 _FEED_NAME
= 'subscriptions'
2644 _PLAYLIST_TITLE
= 'Youtube Subscriptions'
2647 class YoutubeHistoryIE(YoutubeFeedsInfoExtractor
):
2648 IE_DESC
= 'Youtube watch history, ":ythistory" for short (requires authentication)'
2649 _VALID_URL
= r
'https?://(?:www\.)?youtube\.com/feed/history|:ythistory'
2650 _FEED_NAME
= 'history'
2651 _PLAYLIST_TITLE
= 'Youtube History'
2654 class YoutubeTruncatedURLIE(InfoExtractor
):
2655 IE_NAME
= 'youtube:truncated_url'
2656 IE_DESC
= False # Do not list
2657 _VALID_URL
= r
'''(?x)
2659 (?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
2662 annotation_id=annotation_[^&]+|
2668 attribution_link\?a=[^&]+
2674 'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
2675 'only_matching': True,
2677 'url': 'https://www.youtube.com/watch?',
2678 'only_matching': True,
2680 'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
2681 'only_matching': True,
2683 'url': 'https://www.youtube.com/watch?feature=foo',
2684 'only_matching': True,
2686 'url': 'https://www.youtube.com/watch?hl=en-GB',
2687 'only_matching': True,
2689 'url': 'https://www.youtube.com/watch?t=2372',
2690 'only_matching': True,
2693 def _real_extract(self
, url
):
2694 raise ExtractorError(
2695 'Did you forget to quote the URL? Remember that & is a meta '
2696 'character in most shells, so you want to put the URL in quotes, '
2698 '"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
2699 ' or simply youtube-dl BaW_jenozKc .',
2703 class YoutubeTruncatedIDIE(InfoExtractor
):
2704 IE_NAME
= 'youtube:truncated_id'
2705 IE_DESC
= False # Do not list
2706 _VALID_URL
= r
'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
2709 'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
2710 'only_matching': True,
2713 def _real_extract(self
, url
):
2714 video_id
= self
._match
_id
(url
)
2715 raise ExtractorError(
2716 'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id
, url
),