]> Raphaël G. Git Repositories - youtubedl/blob - youtube_dl/extractor/youtube.py
cfe9eed551088dfac9d2690cac0de03c375c214e
[youtubedl] / youtube_dl / extractor / youtube.py
1 # coding: utf-8
2
3 from __future__ import unicode_literals
4
5
6 import itertools
7 import json
8 import os.path
9 import re
10 import time
11 import traceback
12
13 from .common import InfoExtractor, SearchInfoExtractor
14 from ..jsinterp import JSInterpreter
15 from ..swfinterp import SWFInterpreter
16 from ..compat import (
17 compat_chr,
18 compat_parse_qs,
19 compat_urllib_parse,
20 compat_urllib_parse_unquote,
21 compat_urllib_parse_unquote_plus,
22 compat_urllib_parse_urlparse,
23 compat_urlparse,
24 compat_str,
25 )
26 from ..utils import (
27 clean_html,
28 encode_dict,
29 ExtractorError,
30 float_or_none,
31 get_element_by_attribute,
32 get_element_by_id,
33 int_or_none,
34 orderedSet,
35 parse_duration,
36 remove_start,
37 sanitized_Request,
38 smuggle_url,
39 str_to_int,
40 unescapeHTML,
41 unified_strdate,
42 unsmuggle_url,
43 uppercase_escape,
44 ISO3166Utils,
45 )
46
47
48 class YoutubeBaseInfoExtractor(InfoExtractor):
49 """Provide base functions for Youtube extractors"""
50 _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
51 _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
52 _NETRC_MACHINE = 'youtube'
53 # If True it will raise an error if no login info is provided
54 _LOGIN_REQUIRED = False
55
56 def _set_language(self):
57 self._set_cookie(
58 '.youtube.com', 'PREF', 'f1=50000000&hl=en',
59 # YouTube sets the expire time to about two months
60 expire_time=time.time() + 2 * 30 * 24 * 3600)
61
62 def _ids_to_results(self, ids):
63 return [
64 self.url_result(vid_id, 'Youtube', video_id=vid_id)
65 for vid_id in ids]
66
67 def _login(self):
68 """
69 Attempt to log in to YouTube.
70 True is returned if successful or skipped.
71 False is returned if login failed.
72
73 If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
74 """
75 (username, password) = self._get_login_info()
76 # No authentication to be performed
77 if username is None:
78 if self._LOGIN_REQUIRED:
79 raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
80 return True
81
82 login_page = self._download_webpage(
83 self._LOGIN_URL, None,
84 note='Downloading login page',
85 errnote='unable to fetch login page', fatal=False)
86 if login_page is False:
87 return
88
89 galx = self._search_regex(r'(?s)<input.+?name="GALX".+?value="(.+?)"',
90 login_page, 'Login GALX parameter')
91
92 # Log in
93 login_form_strs = {
94 'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
95 'Email': username,
96 'GALX': galx,
97 'Passwd': password,
98
99 'PersistentCookie': 'yes',
100 '_utf8': '霱',
101 'bgresponse': 'js_disabled',
102 'checkConnection': '',
103 'checkedDomains': 'youtube',
104 'dnConn': '',
105 'pstMsg': '0',
106 'rmShown': '1',
107 'secTok': '',
108 'signIn': 'Sign in',
109 'timeStmp': '',
110 'service': 'youtube',
111 'uilel': '3',
112 'hl': 'en_US',
113 }
114
115 login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('ascii')
116
117 req = sanitized_Request(self._LOGIN_URL, login_data)
118 login_results = self._download_webpage(
119 req, None,
120 note='Logging in', errnote='unable to log in', fatal=False)
121 if login_results is False:
122 return False
123
124 if re.search(r'id="errormsg_0_Passwd"', login_results) is not None:
125 raise ExtractorError('Please use your account password and a two-factor code instead of an application-specific password.', expected=True)
126
127 # Two-Factor
128 # TODO add SMS and phone call support - these require making a request and then prompting the user
129
130 if re.search(r'(?i)<form[^>]* id="challenge"', login_results) is not None:
131 tfa_code = self._get_tfa_info('2-step verification code')
132
133 if not tfa_code:
134 self._downloader.report_warning(
135 'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
136 '(Note that only TOTP (Google Authenticator App) codes work at this time.)')
137 return False
138
139 tfa_code = remove_start(tfa_code, 'G-')
140
141 tfa_form_strs = self._form_hidden_inputs('challenge', login_results)
142
143 tfa_form_strs.update({
144 'Pin': tfa_code,
145 'TrustDevice': 'on',
146 })
147
148 tfa_data = compat_urllib_parse.urlencode(encode_dict(tfa_form_strs)).encode('ascii')
149
150 tfa_req = sanitized_Request(self._TWOFACTOR_URL, tfa_data)
151 tfa_results = self._download_webpage(
152 tfa_req, None,
153 note='Submitting TFA code', errnote='unable to submit tfa', fatal=False)
154
155 if tfa_results is False:
156 return False
157
158 if re.search(r'(?i)<form[^>]* id="challenge"', tfa_results) is not None:
159 self._downloader.report_warning('Two-factor code expired or invalid. Please try again, or use a one-use backup code instead.')
160 return False
161 if re.search(r'(?i)<form[^>]* id="gaia_loginform"', tfa_results) is not None:
162 self._downloader.report_warning('unable to log in - did the page structure change?')
163 return False
164 if re.search(r'smsauth-interstitial-reviewsettings', tfa_results) is not None:
165 self._downloader.report_warning('Your Google account has a security notice. Please log in on your web browser, resolve the notice, and try again.')
166 return False
167
168 if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
169 self._downloader.report_warning('unable to log in: bad username or password')
170 return False
171 return True
172
173 def _real_initialize(self):
174 if self._downloader is None:
175 return
176 self._set_language()
177 if not self._login():
178 return
179
180
181 class YoutubeEntryListBaseInfoExtractor(InfoExtractor):
182 # Extract entries from page with "Load more" button
183 def _entries(self, page, playlist_id):
184 more_widget_html = content_html = page
185 for page_num in itertools.count(1):
186 for entry in self._process_page(content_html):
187 yield entry
188
189 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
190 if not mobj:
191 break
192
193 more = self._download_json(
194 'https://youtube.com/%s' % mobj.group('more'), playlist_id,
195 'Downloading page #%s' % page_num,
196 transform_source=uppercase_escape)
197 content_html = more['content_html']
198 if not content_html.strip():
199 # Some webpages show a "Load more" button but they don't
200 # have more videos
201 break
202 more_widget_html = more['load_more_widget_html']
203
204
205 class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
206 def _process_page(self, content):
207 for video_id, video_title in self.extract_videos_from_page(content):
208 yield self.url_result(video_id, 'Youtube', video_id, video_title)
209
210 def extract_videos_from_page(self, page):
211 ids_in_page = []
212 titles_in_page = []
213 for mobj in re.finditer(self._VIDEO_RE, page):
214 # The link with index 0 is not the first video of the playlist (not sure if still actual)
215 if 'index' in mobj.groupdict() and mobj.group('id') == '0':
216 continue
217 video_id = mobj.group('id')
218 video_title = unescapeHTML(mobj.group('title'))
219 if video_title:
220 video_title = video_title.strip()
221 try:
222 idx = ids_in_page.index(video_id)
223 if video_title and not titles_in_page[idx]:
224 titles_in_page[idx] = video_title
225 except ValueError:
226 ids_in_page.append(video_id)
227 titles_in_page.append(video_title)
228 return zip(ids_in_page, titles_in_page)
229
230
231 class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
232 def _process_page(self, content):
233 for playlist_id in re.findall(r'href="/?playlist\?list=(.+?)"', content):
234 yield self.url_result(
235 'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist')
236
237 def _real_extract(self, url):
238 playlist_id = self._match_id(url)
239 webpage = self._download_webpage(url, playlist_id)
240 title = self._og_search_title(webpage, fatal=False)
241 return self.playlist_result(self._entries(webpage, playlist_id), playlist_id, title)
242
243
244 class YoutubeIE(YoutubeBaseInfoExtractor):
245 IE_DESC = 'YouTube.com'
246 _VALID_URL = r"""(?x)^
247 (
248 (?:https?://|//) # http(s):// or protocol-independent URL
249 (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
250 (?:www\.)?deturl\.com/www\.youtube\.com/|
251 (?:www\.)?pwnyoutube\.com/|
252 (?:www\.)?yourepeat\.com/|
253 tube\.majestyc\.net/|
254 youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
255 (?:.*?\#/)? # handle anchor (#/) redirect urls
256 (?: # the various things that can precede the ID:
257 (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
258 |(?: # or the v= param in all its forms
259 (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
260 (?:\?|\#!?) # the params delimiter ? or # or #!
261 (?:.*?&)?? # any other preceding param (like /?s=tuff&v=xxxx)
262 v=
263 )
264 ))
265 |(?:
266 youtu\.be| # just youtu.be/xxxx
267 vid\.plus # or vid.plus/xxxx
268 )/
269 |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
270 )
271 )? # all until now is optional -> you can pass the naked ID
272 ([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
273 (?!.*?&list=) # combined list/video URLs are handled by the playlist IE
274 (?(1).+)? # if we found the ID, everything can follow
275 $"""
276 _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
277 _formats = {
278 '5': {'ext': 'flv', 'width': 400, 'height': 240},
279 '6': {'ext': 'flv', 'width': 450, 'height': 270},
280 '13': {'ext': '3gp'},
281 '17': {'ext': '3gp', 'width': 176, 'height': 144},
282 '18': {'ext': 'mp4', 'width': 640, 'height': 360},
283 '22': {'ext': 'mp4', 'width': 1280, 'height': 720},
284 '34': {'ext': 'flv', 'width': 640, 'height': 360},
285 '35': {'ext': 'flv', 'width': 854, 'height': 480},
286 '36': {'ext': '3gp', 'width': 320, 'height': 240},
287 '37': {'ext': 'mp4', 'width': 1920, 'height': 1080},
288 '38': {'ext': 'mp4', 'width': 4096, 'height': 3072},
289 '43': {'ext': 'webm', 'width': 640, 'height': 360},
290 '44': {'ext': 'webm', 'width': 854, 'height': 480},
291 '45': {'ext': 'webm', 'width': 1280, 'height': 720},
292 '46': {'ext': 'webm', 'width': 1920, 'height': 1080},
293 '59': {'ext': 'mp4', 'width': 854, 'height': 480},
294 '78': {'ext': 'mp4', 'width': 854, 'height': 480},
295
296
297 # 3d videos
298 '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'preference': -20},
299 '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'preference': -20},
300 '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'preference': -20},
301 '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'preference': -20},
302 '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'preference': -20},
303 '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'preference': -20},
304 '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'preference': -20},
305
306 # Apple HTTP Live Streaming
307 '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
308 '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'preference': -10},
309 '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'preference': -10},
310 '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'preference': -10},
311 '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'preference': -10},
312 '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
313 '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'preference': -10},
314
315 # DASH mp4 video
316 '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
317 '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
318 '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
319 '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
320 '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
321 '138': {'ext': 'mp4', 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, # Height can vary (https://github.com/rg3/youtube-dl/issues/4559)
322 '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
323 '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
324 '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
325 '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
326 '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'h264'},
327
328 # Dash mp4 audio
329 '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 48, 'preference': -50, 'container': 'm4a_dash'},
330 '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 128, 'preference': -50, 'container': 'm4a_dash'},
331 '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 256, 'preference': -50, 'container': 'm4a_dash'},
332
333 # Dash webm
334 '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
335 '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
336 '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
337 '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
338 '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
339 '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
340 '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'container': 'webm', 'vcodec': 'vp9'},
341 '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
342 '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
343 '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
344 '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
345 '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
346 '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
347 '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
348 '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
349 '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
350 '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
351 '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
352 '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
353 '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'vp9'},
354 '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
355
356 # Dash webm audio
357 '171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
358 '172': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
359
360 # Dash webm audio with opus inside
361 '249': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50, 'preference': -50},
362 '250': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70, 'preference': -50},
363 '251': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160, 'preference': -50},
364
365 # RTMP (unnamed)
366 '_rtmp': {'protocol': 'rtmp'},
367 }
368
369 IE_NAME = 'youtube'
370 _TESTS = [
371 {
372 'url': 'http://www.youtube.com/watch?v=BaW_jenozKcj&t=1s&end=9',
373 'info_dict': {
374 'id': 'BaW_jenozKc',
375 'ext': 'mp4',
376 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
377 'uploader': 'Philipp Hagemeister',
378 'uploader_id': 'phihag',
379 'upload_date': '20121002',
380 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
381 'categories': ['Science & Technology'],
382 'tags': ['youtube-dl'],
383 'like_count': int,
384 'dislike_count': int,
385 'start_time': 1,
386 'end_time': 9,
387 }
388 },
389 {
390 'url': 'http://www.youtube.com/watch?v=UxxajLWwzqY',
391 'note': 'Test generic use_cipher_signature video (#897)',
392 'info_dict': {
393 'id': 'UxxajLWwzqY',
394 'ext': 'mp4',
395 'upload_date': '20120506',
396 'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
397 'description': 'md5:782e8651347686cba06e58f71ab51773',
398 'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
399 'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
400 'iconic ep', 'iconic', 'love', 'it'],
401 'uploader': 'Icona Pop',
402 'uploader_id': 'IconaPop',
403 }
404 },
405 {
406 'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
407 'note': 'Test VEVO video with age protection (#956)',
408 'info_dict': {
409 'id': '07FYdnEawAQ',
410 'ext': 'mp4',
411 'upload_date': '20130703',
412 'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
413 'description': 'md5:64249768eec3bc4276236606ea996373',
414 'uploader': 'justintimberlakeVEVO',
415 'uploader_id': 'justintimberlakeVEVO',
416 'age_limit': 18,
417 }
418 },
419 {
420 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
421 'note': 'Embed-only video (#1746)',
422 'info_dict': {
423 'id': 'yZIXLfi8CZQ',
424 'ext': 'mp4',
425 'upload_date': '20120608',
426 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
427 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
428 'uploader': 'SET India',
429 'uploader_id': 'setindia',
430 'age_limit': 18,
431 }
432 },
433 {
434 'url': 'http://www.youtube.com/watch?v=BaW_jenozKcj&v=UxxajLWwzqY',
435 'note': 'Use the first video ID in the URL',
436 'info_dict': {
437 'id': 'BaW_jenozKc',
438 'ext': 'mp4',
439 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
440 'uploader': 'Philipp Hagemeister',
441 'uploader_id': 'phihag',
442 'upload_date': '20121002',
443 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
444 'categories': ['Science & Technology'],
445 'tags': ['youtube-dl'],
446 'like_count': int,
447 'dislike_count': int,
448 },
449 'params': {
450 'skip_download': True,
451 },
452 },
453 {
454 'url': 'http://www.youtube.com/watch?v=a9LDPn-MO4I',
455 'note': '256k DASH audio (format 141) via DASH manifest',
456 'info_dict': {
457 'id': 'a9LDPn-MO4I',
458 'ext': 'm4a',
459 'upload_date': '20121002',
460 'uploader_id': '8KVIDEO',
461 'description': '',
462 'uploader': '8KVIDEO',
463 'title': 'UHDTV TEST 8K VIDEO.mp4'
464 },
465 'params': {
466 'youtube_include_dash_manifest': True,
467 'format': '141',
468 },
469 },
470 # DASH manifest with encrypted signature
471 {
472 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
473 'info_dict': {
474 'id': 'IB3lcPjvWLA',
475 'ext': 'm4a',
476 'title': 'Afrojack, Spree Wilson - The Spark ft. Spree Wilson',
477 'description': 'md5:12e7067fa6735a77bdcbb58cb1187d2d',
478 'uploader': 'AfrojackVEVO',
479 'uploader_id': 'AfrojackVEVO',
480 'upload_date': '20131011',
481 },
482 'params': {
483 'youtube_include_dash_manifest': True,
484 'format': '141',
485 },
486 },
487 # JS player signature function name containing $
488 {
489 'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
490 'info_dict': {
491 'id': 'nfWlot6h_JM',
492 'ext': 'm4a',
493 'title': 'Taylor Swift - Shake It Off',
494 'description': 'md5:95f66187cd7c8b2c13eb78e1223b63c3',
495 'uploader': 'TaylorSwiftVEVO',
496 'uploader_id': 'TaylorSwiftVEVO',
497 'upload_date': '20140818',
498 },
499 'params': {
500 'youtube_include_dash_manifest': True,
501 'format': '141',
502 },
503 },
504 # Controversy video
505 {
506 'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
507 'info_dict': {
508 'id': 'T4XJQO3qol8',
509 'ext': 'mp4',
510 'upload_date': '20100909',
511 'uploader': 'The Amazing Atheist',
512 'uploader_id': 'TheAmazingAtheist',
513 'title': 'Burning Everyone\'s Koran',
514 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
515 }
516 },
517 # Normal age-gate video (No vevo, embed allowed)
518 {
519 'url': 'http://youtube.com/watch?v=HtVdAasjOgU',
520 'info_dict': {
521 'id': 'HtVdAasjOgU',
522 'ext': 'mp4',
523 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
524 'description': 're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
525 'uploader': 'The Witcher',
526 'uploader_id': 'WitcherGame',
527 'upload_date': '20140605',
528 'age_limit': 18,
529 },
530 },
531 # Age-gate video with encrypted signature
532 {
533 'url': 'http://www.youtube.com/watch?v=6kLq3WMV1nU',
534 'info_dict': {
535 'id': '6kLq3WMV1nU',
536 'ext': 'mp4',
537 'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
538 'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
539 'uploader': 'LloydVEVO',
540 'uploader_id': 'LloydVEVO',
541 'upload_date': '20110629',
542 'age_limit': 18,
543 },
544 },
545 # video_info is None (https://github.com/rg3/youtube-dl/issues/4421)
546 {
547 'url': '__2ABJjxzNo',
548 'info_dict': {
549 'id': '__2ABJjxzNo',
550 'ext': 'mp4',
551 'upload_date': '20100430',
552 'uploader_id': 'deadmau5',
553 'description': 'md5:12c56784b8032162bb936a5f76d55360',
554 'uploader': 'deadmau5',
555 'title': 'Deadmau5 - Some Chords (HD)',
556 },
557 'expected_warnings': [
558 'DASH manifest missing',
559 ]
560 },
561 # Olympics (https://github.com/rg3/youtube-dl/issues/4431)
562 {
563 'url': 'lqQg6PlCWgI',
564 'info_dict': {
565 'id': 'lqQg6PlCWgI',
566 'ext': 'mp4',
567 'upload_date': '20150827',
568 'uploader_id': 'olympic',
569 'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
570 'uploader': 'Olympics',
571 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
572 },
573 'params': {
574 'skip_download': 'requires avconv',
575 }
576 },
577 # Non-square pixels
578 {
579 'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
580 'info_dict': {
581 'id': '_b-2C3KPAM0',
582 'ext': 'mp4',
583 'stretched_ratio': 16 / 9.,
584 'upload_date': '20110310',
585 'uploader_id': 'AllenMeow',
586 'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
587 'uploader': '孫艾倫',
588 'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
589 },
590 },
591 # url_encoded_fmt_stream_map is empty string
592 {
593 'url': 'qEJwOuvDf7I',
594 'info_dict': {
595 'id': 'qEJwOuvDf7I',
596 'ext': 'webm',
597 'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
598 'description': '',
599 'upload_date': '20150404',
600 'uploader_id': 'spbelect',
601 'uploader': 'Наблюдатели Петербурга',
602 },
603 'params': {
604 'skip_download': 'requires avconv',
605 }
606 },
607 # Extraction from multiple DASH manifests (https://github.com/rg3/youtube-dl/pull/6097)
608 {
609 'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
610 'info_dict': {
611 'id': 'FIl7x6_3R5Y',
612 'ext': 'mp4',
613 'title': 'md5:7b81415841e02ecd4313668cde88737a',
614 'description': 'md5:116377fd2963b81ec4ce64b542173306',
615 'upload_date': '20150625',
616 'uploader_id': 'dorappi2000',
617 'uploader': 'dorappi2000',
618 'formats': 'mincount:33',
619 },
620 },
621 # DASH manifest with segment_list
622 {
623 'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
624 'md5': '8ce563a1d667b599d21064e982ab9e31',
625 'info_dict': {
626 'id': 'CsmdDsKjzN8',
627 'ext': 'mp4',
628 'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
629 'uploader': 'Airtek',
630 'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
631 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
632 'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
633 },
634 'params': {
635 'youtube_include_dash_manifest': True,
636 'format': '135', # bestvideo
637 }
638 },
639 {
640 # Multifeed videos (multiple cameras), URL is for Main Camera
641 'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
642 'info_dict': {
643 'id': 'jqWvoWXjCVs',
644 'title': 'teamPGP: Rocket League Noob Stream',
645 'description': 'md5:dc7872fb300e143831327f1bae3af010',
646 },
647 'playlist': [{
648 'info_dict': {
649 'id': 'jqWvoWXjCVs',
650 'ext': 'mp4',
651 'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
652 'description': 'md5:dc7872fb300e143831327f1bae3af010',
653 'upload_date': '20150721',
654 'uploader': 'Beer Games Beer',
655 'uploader_id': 'beergamesbeer',
656 },
657 }, {
658 'info_dict': {
659 'id': '6h8e8xoXJzg',
660 'ext': 'mp4',
661 'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
662 'description': 'md5:dc7872fb300e143831327f1bae3af010',
663 'upload_date': '20150721',
664 'uploader': 'Beer Games Beer',
665 'uploader_id': 'beergamesbeer',
666 },
667 }, {
668 'info_dict': {
669 'id': 'PUOgX5z9xZw',
670 'ext': 'mp4',
671 'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
672 'description': 'md5:dc7872fb300e143831327f1bae3af010',
673 'upload_date': '20150721',
674 'uploader': 'Beer Games Beer',
675 'uploader_id': 'beergamesbeer',
676 },
677 }, {
678 'info_dict': {
679 'id': 'teuwxikvS5k',
680 'ext': 'mp4',
681 'title': 'teamPGP: Rocket League Noob Stream (zim)',
682 'description': 'md5:dc7872fb300e143831327f1bae3af010',
683 'upload_date': '20150721',
684 'uploader': 'Beer Games Beer',
685 'uploader_id': 'beergamesbeer',
686 },
687 }],
688 'params': {
689 'skip_download': True,
690 },
691 },
692 {
693 'url': 'http://vid.plus/FlRa-iH7PGw',
694 'only_matching': True,
695 },
696 {
697 # Title with JS-like syntax "};" (see https://github.com/rg3/youtube-dl/issues/7468)
698 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
699 'info_dict': {
700 'id': 'lsguqyKfVQg',
701 'ext': 'mp4',
702 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
703 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
704 'upload_date': '20151119',
705 'uploader_id': 'IronSoulElf',
706 'uploader': 'IronSoulElf',
707 },
708 'params': {
709 'skip_download': True,
710 },
711 },
712 {
713 # Tags with '};' (see https://github.com/rg3/youtube-dl/issues/7468)
714 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
715 'only_matching': True,
716 },
717 ]
718
719 def __init__(self, *args, **kwargs):
720 super(YoutubeIE, self).__init__(*args, **kwargs)
721 self._player_cache = {}
722
723 def report_video_info_webpage_download(self, video_id):
724 """Report attempt to download video info webpage."""
725 self.to_screen('%s: Downloading video info webpage' % video_id)
726
727 def report_information_extraction(self, video_id):
728 """Report attempt to extract video information."""
729 self.to_screen('%s: Extracting video information' % video_id)
730
731 def report_unavailable_format(self, video_id, format):
732 """Report extracted video URL."""
733 self.to_screen('%s: Format %s not available' % (video_id, format))
734
735 def report_rtmp_download(self):
736 """Indicate the download will use the RTMP protocol."""
737 self.to_screen('RTMP download detected')
738
739 def _signature_cache_id(self, example_sig):
740 """ Return a string representation of a signature """
741 return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
742
743 def _extract_signature_function(self, video_id, player_url, example_sig):
744 id_m = re.match(
745 r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|/base)?\.(?P<ext>[a-z]+)$',
746 player_url)
747 if not id_m:
748 raise ExtractorError('Cannot identify player %r' % player_url)
749 player_type = id_m.group('ext')
750 player_id = id_m.group('id')
751
752 # Read from filesystem cache
753 func_id = '%s_%s_%s' % (
754 player_type, player_id, self._signature_cache_id(example_sig))
755 assert os.path.basename(func_id) == func_id
756
757 cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
758 if cache_spec is not None:
759 return lambda s: ''.join(s[i] for i in cache_spec)
760
761 download_note = (
762 'Downloading player %s' % player_url
763 if self._downloader.params.get('verbose') else
764 'Downloading %s player %s' % (player_type, player_id)
765 )
766 if player_type == 'js':
767 code = self._download_webpage(
768 player_url, video_id,
769 note=download_note,
770 errnote='Download of %s failed' % player_url)
771 res = self._parse_sig_js(code)
772 elif player_type == 'swf':
773 urlh = self._request_webpage(
774 player_url, video_id,
775 note=download_note,
776 errnote='Download of %s failed' % player_url)
777 code = urlh.read()
778 res = self._parse_sig_swf(code)
779 else:
780 assert False, 'Invalid player type %r' % player_type
781
782 test_string = ''.join(map(compat_chr, range(len(example_sig))))
783 cache_res = res(test_string)
784 cache_spec = [ord(c) for c in cache_res]
785
786 self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
787 return res
788
789 def _print_sig_code(self, func, example_sig):
790 def gen_sig_code(idxs):
791 def _genslice(start, end, step):
792 starts = '' if start == 0 else str(start)
793 ends = (':%d' % (end + step)) if end + step >= 0 else ':'
794 steps = '' if step == 1 else (':%d' % step)
795 return 's[%s%s%s]' % (starts, ends, steps)
796
797 step = None
798 # Quelch pyflakes warnings - start will be set when step is set
799 start = '(Never used)'
800 for i, prev in zip(idxs[1:], idxs[:-1]):
801 if step is not None:
802 if i - prev == step:
803 continue
804 yield _genslice(start, prev, step)
805 step = None
806 continue
807 if i - prev in [-1, 1]:
808 step = i - prev
809 start = prev
810 continue
811 else:
812 yield 's[%d]' % prev
813 if step is None:
814 yield 's[%d]' % i
815 else:
816 yield _genslice(start, i, step)
817
818 test_string = ''.join(map(compat_chr, range(len(example_sig))))
819 cache_res = func(test_string)
820 cache_spec = [ord(c) for c in cache_res]
821 expr_code = ' + '.join(gen_sig_code(cache_spec))
822 signature_id_tuple = '(%s)' % (
823 ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
824 code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
825 ' return %s\n') % (signature_id_tuple, expr_code)
826 self.to_screen('Extracted signature function:\n' + code)
827
828 def _parse_sig_js(self, jscode):
829 funcname = self._search_regex(
830 r'\.sig\|\|([a-zA-Z0-9$]+)\(', jscode,
831 'Initial JS player signature function name')
832
833 jsi = JSInterpreter(jscode)
834 initial_function = jsi.extract_function(funcname)
835 return lambda s: initial_function([s])
836
837 def _parse_sig_swf(self, file_contents):
838 swfi = SWFInterpreter(file_contents)
839 TARGET_CLASSNAME = 'SignatureDecipher'
840 searched_class = swfi.extract_class(TARGET_CLASSNAME)
841 initial_function = swfi.extract_function(searched_class, 'decipher')
842 return lambda s: initial_function([s])
843
844 def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
845 """Turn the encrypted s field into a working signature"""
846
847 if player_url is None:
848 raise ExtractorError('Cannot decrypt signature without player_url')
849
850 if player_url.startswith('//'):
851 player_url = 'https:' + player_url
852 try:
853 player_id = (player_url, self._signature_cache_id(s))
854 if player_id not in self._player_cache:
855 func = self._extract_signature_function(
856 video_id, player_url, s
857 )
858 self._player_cache[player_id] = func
859 func = self._player_cache[player_id]
860 if self._downloader.params.get('youtube_print_sig_code'):
861 self._print_sig_code(func, s)
862 return func(s)
863 except Exception as e:
864 tb = traceback.format_exc()
865 raise ExtractorError(
866 'Signature extraction failed: ' + tb, cause=e)
867
868 def _get_subtitles(self, video_id, webpage):
869 try:
870 subs_doc = self._download_xml(
871 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
872 video_id, note=False)
873 except ExtractorError as err:
874 self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err))
875 return {}
876
877 sub_lang_list = {}
878 for track in subs_doc.findall('track'):
879 lang = track.attrib['lang_code']
880 if lang in sub_lang_list:
881 continue
882 sub_formats = []
883 for ext in ['sbv', 'vtt', 'srt']:
884 params = compat_urllib_parse.urlencode({
885 'lang': lang,
886 'v': video_id,
887 'fmt': ext,
888 'name': track.attrib['name'].encode('utf-8'),
889 })
890 sub_formats.append({
891 'url': 'https://www.youtube.com/api/timedtext?' + params,
892 'ext': ext,
893 })
894 sub_lang_list[lang] = sub_formats
895 if not sub_lang_list:
896 self._downloader.report_warning('video doesn\'t have subtitles')
897 return {}
898 return sub_lang_list
899
900 def _get_ytplayer_config(self, video_id, webpage):
901 patterns = (
902 # User data may contain arbitrary character sequences that may affect
903 # JSON extraction with regex, e.g. when '};' is contained the second
904 # regex won't capture the whole JSON. Yet working around by trying more
905 # concrete regex first keeping in mind proper quoted string handling
906 # to be implemented in future that will replace this workaround (see
907 # https://github.com/rg3/youtube-dl/issues/7468,
908 # https://github.com/rg3/youtube-dl/pull/7599)
909 r';ytplayer\.config\s*=\s*({.+?});ytplayer',
910 r';ytplayer\.config\s*=\s*({.+?});',
911 )
912 config = self._search_regex(
913 patterns, webpage, 'ytplayer.config', default=None)
914 if config:
915 return self._parse_json(
916 uppercase_escape(config), video_id, fatal=False)
917
918 def _get_automatic_captions(self, video_id, webpage):
919 """We need the webpage for getting the captions url, pass it as an
920 argument to speed up the process."""
921 self.to_screen('%s: Looking for automatic captions' % video_id)
922 player_config = self._get_ytplayer_config(video_id, webpage)
923 err_msg = 'Couldn\'t find automatic captions for %s' % video_id
924 if not player_config:
925 self._downloader.report_warning(err_msg)
926 return {}
927 try:
928 args = player_config['args']
929 caption_url = args['ttsurl']
930 timestamp = args['timestamp']
931 # We get the available subtitles
932 list_params = compat_urllib_parse.urlencode({
933 'type': 'list',
934 'tlangs': 1,
935 'asrs': 1,
936 })
937 list_url = caption_url + '&' + list_params
938 caption_list = self._download_xml(list_url, video_id)
939 original_lang_node = caption_list.find('track')
940 if original_lang_node is None:
941 self._downloader.report_warning('Video doesn\'t have automatic captions')
942 return {}
943 original_lang = original_lang_node.attrib['lang_code']
944 caption_kind = original_lang_node.attrib.get('kind', '')
945
946 sub_lang_list = {}
947 for lang_node in caption_list.findall('target'):
948 sub_lang = lang_node.attrib['lang_code']
949 sub_formats = []
950 for ext in ['sbv', 'vtt', 'srt']:
951 params = compat_urllib_parse.urlencode({
952 'lang': original_lang,
953 'tlang': sub_lang,
954 'fmt': ext,
955 'ts': timestamp,
956 'kind': caption_kind,
957 })
958 sub_formats.append({
959 'url': caption_url + '&' + params,
960 'ext': ext,
961 })
962 sub_lang_list[sub_lang] = sub_formats
963 return sub_lang_list
964 # An extractor error can be raise by the download process if there are
965 # no automatic captions but there are subtitles
966 except (KeyError, ExtractorError):
967 self._downloader.report_warning(err_msg)
968 return {}
969
970 @classmethod
971 def extract_id(cls, url):
972 mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
973 if mobj is None:
974 raise ExtractorError('Invalid URL: %s' % url)
975 video_id = mobj.group(2)
976 return video_id
977
978 def _extract_from_m3u8(self, manifest_url, video_id):
979 url_map = {}
980
981 def _get_urls(_manifest):
982 lines = _manifest.split('\n')
983 urls = filter(lambda l: l and not l.startswith('#'),
984 lines)
985 return urls
986 manifest = self._download_webpage(manifest_url, video_id, 'Downloading formats manifest')
987 formats_urls = _get_urls(manifest)
988 for format_url in formats_urls:
989 itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag')
990 url_map[itag] = format_url
991 return url_map
992
993 def _extract_annotations(self, video_id):
994 url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
995 return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
996
997 def _parse_dash_manifest(
998 self, video_id, dash_manifest_url, player_url, age_gate, fatal=True):
999 def decrypt_sig(mobj):
1000 s = mobj.group(1)
1001 dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
1002 return '/signature/%s' % dec_s
1003 dash_manifest_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, dash_manifest_url)
1004 dash_doc = self._download_xml(
1005 dash_manifest_url, video_id,
1006 note='Downloading DASH manifest',
1007 errnote='Could not download DASH manifest',
1008 fatal=fatal)
1009
1010 if dash_doc is False:
1011 return []
1012
1013 formats = []
1014 for a in dash_doc.findall('.//{urn:mpeg:DASH:schema:MPD:2011}AdaptationSet'):
1015 mime_type = a.attrib.get('mimeType')
1016 for r in a.findall('{urn:mpeg:DASH:schema:MPD:2011}Representation'):
1017 url_el = r.find('{urn:mpeg:DASH:schema:MPD:2011}BaseURL')
1018 if url_el is None:
1019 continue
1020 if mime_type == 'text/vtt':
1021 # TODO implement WebVTT downloading
1022 pass
1023 elif mime_type.startswith('audio/') or mime_type.startswith('video/'):
1024 segment_list = r.find('{urn:mpeg:DASH:schema:MPD:2011}SegmentList')
1025 format_id = r.attrib['id']
1026 video_url = url_el.text
1027 filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength'))
1028 f = {
1029 'format_id': format_id,
1030 'url': video_url,
1031 'width': int_or_none(r.attrib.get('width')),
1032 'height': int_or_none(r.attrib.get('height')),
1033 'tbr': int_or_none(r.attrib.get('bandwidth'), 1000),
1034 'asr': int_or_none(r.attrib.get('audioSamplingRate')),
1035 'filesize': filesize,
1036 'fps': int_or_none(r.attrib.get('frameRate')),
1037 }
1038 if segment_list is not None:
1039 f.update({
1040 'initialization_url': segment_list.find('{urn:mpeg:DASH:schema:MPD:2011}Initialization').attrib['sourceURL'],
1041 'segment_urls': [segment.attrib.get('media') for segment in segment_list.findall('{urn:mpeg:DASH:schema:MPD:2011}SegmentURL')],
1042 'protocol': 'http_dash_segments',
1043 })
1044 try:
1045 existing_format = next(
1046 fo for fo in formats
1047 if fo['format_id'] == format_id)
1048 except StopIteration:
1049 full_info = self._formats.get(format_id, {}).copy()
1050 full_info.update(f)
1051 codecs = r.attrib.get('codecs')
1052 if codecs:
1053 if full_info.get('acodec') == 'none' and 'vcodec' not in full_info:
1054 full_info['vcodec'] = codecs
1055 elif full_info.get('vcodec') == 'none' and 'acodec' not in full_info:
1056 full_info['acodec'] = codecs
1057 formats.append(full_info)
1058 else:
1059 existing_format.update(f)
1060 else:
1061 self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
1062 return formats
1063
1064 def _real_extract(self, url):
1065 url, smuggled_data = unsmuggle_url(url, {})
1066
1067 proto = (
1068 'http' if self._downloader.params.get('prefer_insecure', False)
1069 else 'https')
1070
1071 start_time = None
1072 end_time = None
1073 parsed_url = compat_urllib_parse_urlparse(url)
1074 for component in [parsed_url.fragment, parsed_url.query]:
1075 query = compat_parse_qs(component)
1076 if start_time is None and 't' in query:
1077 start_time = parse_duration(query['t'][0])
1078 if start_time is None and 'start' in query:
1079 start_time = parse_duration(query['start'][0])
1080 if end_time is None and 'end' in query:
1081 end_time = parse_duration(query['end'][0])
1082
1083 # Extract original video URL from URL with redirection, like age verification, using next_url parameter
1084 mobj = re.search(self._NEXT_URL_RE, url)
1085 if mobj:
1086 url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
1087 video_id = self.extract_id(url)
1088
1089 # Get video webpage
1090 url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
1091 video_webpage = self._download_webpage(url, video_id)
1092
1093 # Attempt to extract SWF player URL
1094 mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
1095 if mobj is not None:
1096 player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
1097 else:
1098 player_url = None
1099
1100 dash_mpds = []
1101
1102 def add_dash_mpd(video_info):
1103 dash_mpd = video_info.get('dashmpd')
1104 if dash_mpd and dash_mpd[0] not in dash_mpds:
1105 dash_mpds.append(dash_mpd[0])
1106
1107 # Get video info
1108 embed_webpage = None
1109 is_live = None
1110 if re.search(r'player-age-gate-content">', video_webpage) is not None:
1111 age_gate = True
1112 # We simulate the access to the video from www.youtube.com/v/{video_id}
1113 # this can be viewed without login into Youtube
1114 url = proto + '://www.youtube.com/embed/%s' % video_id
1115 embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
1116 data = compat_urllib_parse.urlencode({
1117 'video_id': video_id,
1118 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
1119 'sts': self._search_regex(
1120 r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
1121 })
1122 video_info_url = proto + '://www.youtube.com/get_video_info?' + data
1123 video_info_webpage = self._download_webpage(
1124 video_info_url, video_id,
1125 note='Refetching age-gated info webpage',
1126 errnote='unable to download video info webpage')
1127 video_info = compat_parse_qs(video_info_webpage)
1128 add_dash_mpd(video_info)
1129 else:
1130 age_gate = False
1131 video_info = None
1132 # Try looking directly into the video webpage
1133 ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
1134 if ytplayer_config:
1135 args = ytplayer_config['args']
1136 if args.get('url_encoded_fmt_stream_map'):
1137 # Convert to the same format returned by compat_parse_qs
1138 video_info = dict((k, [v]) for k, v in args.items())
1139 add_dash_mpd(video_info)
1140 if args.get('livestream') == '1' or args.get('live_playback') == 1:
1141 is_live = True
1142 if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
1143 # We also try looking in get_video_info since it may contain different dashmpd
1144 # URL that points to a DASH manifest with possibly different itag set (some itags
1145 # are missing from DASH manifest pointed by webpage's dashmpd, some - from DASH
1146 # manifest pointed by get_video_info's dashmpd).
1147 # The general idea is to take a union of itags of both DASH manifests (for example
1148 # video with such 'manifest behavior' see https://github.com/rg3/youtube-dl/issues/6093)
1149 self.report_video_info_webpage_download(video_id)
1150 for el_type in ['&el=info', '&el=embedded', '&el=detailpage', '&el=vevo', '']:
1151 video_info_url = (
1152 '%s://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
1153 % (proto, video_id, el_type))
1154 video_info_webpage = self._download_webpage(
1155 video_info_url,
1156 video_id, note=False,
1157 errnote='unable to download video info webpage')
1158 get_video_info = compat_parse_qs(video_info_webpage)
1159 if get_video_info.get('use_cipher_signature') != ['True']:
1160 add_dash_mpd(get_video_info)
1161 if not video_info:
1162 video_info = get_video_info
1163 if 'token' in get_video_info:
1164 # Different get_video_info requests may report different results, e.g.
1165 # some may report video unavailability, but some may serve it without
1166 # any complaint (see https://github.com/rg3/youtube-dl/issues/7362,
1167 # the original webpage as well as el=info and el=embedded get_video_info
1168 # requests report video unavailability due to geo restriction while
1169 # el=detailpage succeeds and returns valid data). This is probably
1170 # due to YouTube measures against IP ranges of hosting providers.
1171 # Working around by preferring the first succeeded video_info containing
1172 # the token if no such video_info yet was found.
1173 if 'token' not in video_info:
1174 video_info = get_video_info
1175 break
1176 if 'token' not in video_info:
1177 if 'reason' in video_info:
1178 if 'The uploader has not made this video available in your country.' in video_info['reason']:
1179 regions_allowed = self._html_search_meta('regionsAllowed', video_webpage, default=None)
1180 if regions_allowed:
1181 raise ExtractorError('YouTube said: This video is available in %s only' % (
1182 ', '.join(map(ISO3166Utils.short2full, regions_allowed.split(',')))),
1183 expected=True)
1184 raise ExtractorError(
1185 'YouTube said: %s' % video_info['reason'][0],
1186 expected=True, video_id=video_id)
1187 else:
1188 raise ExtractorError(
1189 '"token" parameter not in video info for unknown reason',
1190 video_id=video_id)
1191
1192 # title
1193 if 'title' in video_info:
1194 video_title = video_info['title'][0]
1195 else:
1196 self._downloader.report_warning('Unable to extract video title')
1197 video_title = '_'
1198
1199 # description
1200 video_description = get_element_by_id("eow-description", video_webpage)
1201 if video_description:
1202 video_description = re.sub(r'''(?x)
1203 <a\s+
1204 (?:[a-zA-Z-]+="[^"]+"\s+)*?
1205 title="([^"]+)"\s+
1206 (?:[a-zA-Z-]+="[^"]+"\s+)*?
1207 class="yt-uix-redirect-link"\s*>
1208 [^<]+
1209 </a>
1210 ''', r'\1', video_description)
1211 video_description = clean_html(video_description)
1212 else:
1213 fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
1214 if fd_mobj:
1215 video_description = unescapeHTML(fd_mobj.group(1))
1216 else:
1217 video_description = ''
1218
1219 if 'multifeed_metadata_list' in video_info and not smuggled_data.get('force_singlefeed', False):
1220 if not self._downloader.params.get('noplaylist'):
1221 entries = []
1222 feed_ids = []
1223 multifeed_metadata_list = compat_urllib_parse_unquote_plus(video_info['multifeed_metadata_list'][0])
1224 for feed in multifeed_metadata_list.split(','):
1225 feed_data = compat_parse_qs(feed)
1226 entries.append({
1227 '_type': 'url_transparent',
1228 'ie_key': 'Youtube',
1229 'url': smuggle_url(
1230 '%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
1231 {'force_singlefeed': True}),
1232 'title': '%s (%s)' % (video_title, feed_data['title'][0]),
1233 })
1234 feed_ids.append(feed_data['id'][0])
1235 self.to_screen(
1236 'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
1237 % (', '.join(feed_ids), video_id))
1238 return self.playlist_result(entries, video_id, video_title, video_description)
1239 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
1240
1241 if 'view_count' in video_info:
1242 view_count = int(video_info['view_count'][0])
1243 else:
1244 view_count = None
1245
1246 # Check for "rental" videos
1247 if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
1248 raise ExtractorError('"rental" videos not supported')
1249
1250 # Start extracting information
1251 self.report_information_extraction(video_id)
1252
1253 # uploader
1254 if 'author' not in video_info:
1255 raise ExtractorError('Unable to extract uploader name')
1256 video_uploader = compat_urllib_parse_unquote_plus(video_info['author'][0])
1257
1258 # uploader_id
1259 video_uploader_id = None
1260 mobj = re.search(r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage)
1261 if mobj is not None:
1262 video_uploader_id = mobj.group(1)
1263 else:
1264 self._downloader.report_warning('unable to extract uploader nickname')
1265
1266 # thumbnail image
1267 # We try first to get a high quality image:
1268 m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
1269 video_webpage, re.DOTALL)
1270 if m_thumb is not None:
1271 video_thumbnail = m_thumb.group(1)
1272 elif 'thumbnail_url' not in video_info:
1273 self._downloader.report_warning('unable to extract video thumbnail')
1274 video_thumbnail = None
1275 else: # don't panic if we can't find it
1276 video_thumbnail = compat_urllib_parse_unquote_plus(video_info['thumbnail_url'][0])
1277
1278 # upload date
1279 upload_date = self._html_search_meta(
1280 'datePublished', video_webpage, 'upload date', default=None)
1281 if not upload_date:
1282 upload_date = self._search_regex(
1283 [r'(?s)id="eow-date.*?>(.*?)</span>',
1284 r'id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live|Started) on (.+?)</strong>'],
1285 video_webpage, 'upload date', default=None)
1286 if upload_date:
1287 upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
1288 upload_date = unified_strdate(upload_date)
1289
1290 m_cat_container = self._search_regex(
1291 r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
1292 video_webpage, 'categories', default=None)
1293 if m_cat_container:
1294 category = self._html_search_regex(
1295 r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
1296 default=None)
1297 video_categories = None if category is None else [category]
1298 else:
1299 video_categories = None
1300
1301 video_tags = [
1302 unescapeHTML(m.group('content'))
1303 for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
1304
1305 def _extract_count(count_name):
1306 return str_to_int(self._search_regex(
1307 r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
1308 % re.escape(count_name),
1309 video_webpage, count_name, default=None))
1310
1311 like_count = _extract_count('like')
1312 dislike_count = _extract_count('dislike')
1313
1314 # subtitles
1315 video_subtitles = self.extract_subtitles(video_id, video_webpage)
1316 automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
1317
1318 if 'length_seconds' not in video_info:
1319 self._downloader.report_warning('unable to extract video duration')
1320 video_duration = None
1321 else:
1322 video_duration = int(compat_urllib_parse_unquote_plus(video_info['length_seconds'][0]))
1323
1324 # annotations
1325 video_annotations = None
1326 if self._downloader.params.get('writeannotations', False):
1327 video_annotations = self._extract_annotations(video_id)
1328
1329 def _map_to_format_list(urlmap):
1330 formats = []
1331 for itag, video_real_url in urlmap.items():
1332 dct = {
1333 'format_id': itag,
1334 'url': video_real_url,
1335 'player_url': player_url,
1336 }
1337 if itag in self._formats:
1338 dct.update(self._formats[itag])
1339 formats.append(dct)
1340 return formats
1341
1342 if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
1343 self.report_rtmp_download()
1344 formats = [{
1345 'format_id': '_rtmp',
1346 'protocol': 'rtmp',
1347 'url': video_info['conn'][0],
1348 'player_url': player_url,
1349 }]
1350 elif len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1:
1351 encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
1352 if 'rtmpe%3Dyes' in encoded_url_map:
1353 raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
1354 formats = []
1355 for url_data_str in encoded_url_map.split(','):
1356 url_data = compat_parse_qs(url_data_str)
1357 if 'itag' not in url_data or 'url' not in url_data:
1358 continue
1359 format_id = url_data['itag'][0]
1360 url = url_data['url'][0]
1361
1362 if 'sig' in url_data:
1363 url += '&signature=' + url_data['sig'][0]
1364 elif 's' in url_data:
1365 encrypted_sig = url_data['s'][0]
1366 ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")'
1367
1368 jsplayer_url_json = self._search_regex(
1369 ASSETS_RE,
1370 embed_webpage if age_gate else video_webpage,
1371 'JS player URL (1)', default=None)
1372 if not jsplayer_url_json and not age_gate:
1373 # We need the embed website after all
1374 if embed_webpage is None:
1375 embed_url = proto + '://www.youtube.com/embed/%s' % video_id
1376 embed_webpage = self._download_webpage(
1377 embed_url, video_id, 'Downloading embed webpage')
1378 jsplayer_url_json = self._search_regex(
1379 ASSETS_RE, embed_webpage, 'JS player URL')
1380
1381 player_url = json.loads(jsplayer_url_json)
1382 if player_url is None:
1383 player_url_json = self._search_regex(
1384 r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
1385 video_webpage, 'age gate player URL')
1386 player_url = json.loads(player_url_json)
1387
1388 if self._downloader.params.get('verbose'):
1389 if player_url is None:
1390 player_version = 'unknown'
1391 player_desc = 'unknown'
1392 else:
1393 if player_url.endswith('swf'):
1394 player_version = self._search_regex(
1395 r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
1396 'flash player', fatal=False)
1397 player_desc = 'flash player %s' % player_version
1398 else:
1399 player_version = self._search_regex(
1400 [r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js', r'(?:www|player)-([^/]+)/base\.js'],
1401 player_url,
1402 'html5 player', fatal=False)
1403 player_desc = 'html5 player %s' % player_version
1404
1405 parts_sizes = self._signature_cache_id(encrypted_sig)
1406 self.to_screen('{%s} signature length %s, %s' %
1407 (format_id, parts_sizes, player_desc))
1408
1409 signature = self._decrypt_signature(
1410 encrypted_sig, video_id, player_url, age_gate)
1411 url += '&signature=' + signature
1412 if 'ratebypass' not in url:
1413 url += '&ratebypass=yes'
1414
1415 # Some itags are not included in DASH manifest thus corresponding formats will
1416 # lack metadata (see https://github.com/rg3/youtube-dl/pull/5993).
1417 # Trying to extract metadata from url_encoded_fmt_stream_map entry.
1418 mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
1419 width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
1420 dct = {
1421 'format_id': format_id,
1422 'url': url,
1423 'player_url': player_url,
1424 'filesize': int_or_none(url_data.get('clen', [None])[0]),
1425 'tbr': float_or_none(url_data.get('bitrate', [None])[0], 1000),
1426 'width': width,
1427 'height': height,
1428 'fps': int_or_none(url_data.get('fps', [None])[0]),
1429 'format_note': url_data.get('quality_label', [None])[0] or url_data.get('quality', [None])[0],
1430 }
1431 type_ = url_data.get('type', [None])[0]
1432 if type_:
1433 type_split = type_.split(';')
1434 kind_ext = type_split[0].split('/')
1435 if len(kind_ext) == 2:
1436 kind, ext = kind_ext
1437 dct['ext'] = ext
1438 if kind in ('audio', 'video'):
1439 codecs = None
1440 for mobj in re.finditer(
1441 r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
1442 if mobj.group('key') == 'codecs':
1443 codecs = mobj.group('val')
1444 break
1445 if codecs:
1446 codecs = codecs.split(',')
1447 if len(codecs) == 2:
1448 acodec, vcodec = codecs[0], codecs[1]
1449 else:
1450 acodec, vcodec = (codecs[0], 'none') if kind == 'audio' else ('none', codecs[0])
1451 dct.update({
1452 'acodec': acodec,
1453 'vcodec': vcodec,
1454 })
1455 if format_id in self._formats:
1456 dct.update(self._formats[format_id])
1457 formats.append(dct)
1458 elif video_info.get('hlsvp'):
1459 manifest_url = video_info['hlsvp'][0]
1460 url_map = self._extract_from_m3u8(manifest_url, video_id)
1461 formats = _map_to_format_list(url_map)
1462 else:
1463 raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
1464
1465 # Look for the DASH manifest
1466 if self._downloader.params.get('youtube_include_dash_manifest', True):
1467 dash_mpd_fatal = True
1468 for dash_manifest_url in dash_mpds:
1469 dash_formats = {}
1470 try:
1471 for df in self._parse_dash_manifest(
1472 video_id, dash_manifest_url, player_url, age_gate, dash_mpd_fatal):
1473 # Do not overwrite DASH format found in some previous DASH manifest
1474 if df['format_id'] not in dash_formats:
1475 dash_formats[df['format_id']] = df
1476 # Additional DASH manifests may end up in HTTP Error 403 therefore
1477 # allow them to fail without bug report message if we already have
1478 # some DASH manifest succeeded. This is temporary workaround to reduce
1479 # burst of bug reports until we figure out the reason and whether it
1480 # can be fixed at all.
1481 dash_mpd_fatal = False
1482 except (ExtractorError, KeyError) as e:
1483 self.report_warning(
1484 'Skipping DASH manifest: %r' % e, video_id)
1485 if dash_formats:
1486 # Remove the formats we found through non-DASH, they
1487 # contain less info and it can be wrong, because we use
1488 # fixed values (for example the resolution). See
1489 # https://github.com/rg3/youtube-dl/issues/5774 for an
1490 # example.
1491 formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
1492 formats.extend(dash_formats.values())
1493
1494 # Check for malformed aspect ratio
1495 stretched_m = re.search(
1496 r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
1497 video_webpage)
1498 if stretched_m:
1499 ratio = float(stretched_m.group('w')) / float(stretched_m.group('h'))
1500 for f in formats:
1501 if f.get('vcodec') != 'none':
1502 f['stretched_ratio'] = ratio
1503
1504 self._sort_formats(formats)
1505
1506 return {
1507 'id': video_id,
1508 'uploader': video_uploader,
1509 'uploader_id': video_uploader_id,
1510 'upload_date': upload_date,
1511 'title': video_title,
1512 'thumbnail': video_thumbnail,
1513 'description': video_description,
1514 'categories': video_categories,
1515 'tags': video_tags,
1516 'subtitles': video_subtitles,
1517 'automatic_captions': automatic_captions,
1518 'duration': video_duration,
1519 'age_limit': 18 if age_gate else 0,
1520 'annotations': video_annotations,
1521 'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
1522 'view_count': view_count,
1523 'like_count': like_count,
1524 'dislike_count': dislike_count,
1525 'average_rating': float_or_none(video_info.get('avg_rating', [None])[0]),
1526 'formats': formats,
1527 'is_live': is_live,
1528 'start_time': start_time,
1529 'end_time': end_time,
1530 }
1531
1532
1533 class YoutubePlaylistIE(YoutubeBaseInfoExtractor, YoutubePlaylistBaseInfoExtractor):
1534 IE_DESC = 'YouTube.com playlists'
1535 _VALID_URL = r"""(?x)(?:
1536 (?:https?://)?
1537 (?:\w+\.)?
1538 youtube\.com/
1539 (?:
1540 (?:course|view_play_list|my_playlists|artist|playlist|watch|embed/videoseries)
1541 \? (?:.*?&)*? (?:p|a|list)=
1542 | p/
1543 )
1544 (
1545 (?:PL|LL|EC|UU|FL|RD|UL)?[0-9A-Za-z-_]{10,}
1546 # Top tracks, they can also include dots
1547 |(?:MC)[\w\.]*
1548 )
1549 .*
1550 |
1551 ((?:PL|LL|EC|UU|FL|RD|UL)[0-9A-Za-z-_]{10,})
1552 )"""
1553 _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
1554 _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&amp;[^"]*?index=(?P<index>\d+)(?:[^>]+>(?P<title>[^<]+))?'
1555 IE_NAME = 'youtube:playlist'
1556 _TESTS = [{
1557 'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
1558 'info_dict': {
1559 'title': 'ytdl test PL',
1560 'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
1561 },
1562 'playlist_count': 3,
1563 }, {
1564 'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
1565 'info_dict': {
1566 'id': 'PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
1567 'title': 'YDL_Empty_List',
1568 },
1569 'playlist_count': 0,
1570 }, {
1571 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
1572 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
1573 'info_dict': {
1574 'title': '29C3: Not my department',
1575 'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
1576 },
1577 'playlist_count': 95,
1578 }, {
1579 'note': 'issue #673',
1580 'url': 'PLBB231211A4F62143',
1581 'info_dict': {
1582 'title': '[OLD]Team Fortress 2 (Class-based LP)',
1583 'id': 'PLBB231211A4F62143',
1584 },
1585 'playlist_mincount': 26,
1586 }, {
1587 'note': 'Large playlist',
1588 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
1589 'info_dict': {
1590 'title': 'Uploads from Cauchemar',
1591 'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
1592 },
1593 'playlist_mincount': 799,
1594 }, {
1595 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
1596 'info_dict': {
1597 'title': 'YDL_safe_search',
1598 'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
1599 },
1600 'playlist_count': 2,
1601 }, {
1602 'note': 'embedded',
1603 'url': 'http://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
1604 'playlist_count': 4,
1605 'info_dict': {
1606 'title': 'JODA15',
1607 'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
1608 }
1609 }, {
1610 'note': 'Embedded SWF player',
1611 'url': 'http://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
1612 'playlist_count': 4,
1613 'info_dict': {
1614 'title': 'JODA7',
1615 'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
1616 }
1617 }, {
1618 'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
1619 'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
1620 'info_dict': {
1621 'title': 'Uploads from Interstellar Movie',
1622 'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
1623 },
1624 'playlist_mincout': 21,
1625 }]
1626
1627 def _real_initialize(self):
1628 self._login()
1629
1630 def _extract_mix(self, playlist_id):
1631 # The mixes are generated from a single video
1632 # the id of the playlist is just 'RD' + video_id
1633 url = 'https://youtube.com/watch?v=%s&list=%s' % (playlist_id[-11:], playlist_id)
1634 webpage = self._download_webpage(
1635 url, playlist_id, 'Downloading Youtube mix')
1636 search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
1637 title_span = (
1638 search_title('playlist-title') or
1639 search_title('title long-title') or
1640 search_title('title'))
1641 title = clean_html(title_span)
1642 ids = orderedSet(re.findall(
1643 r'''(?xs)data-video-username=".*?".*?
1644 href="/watch\?v=([0-9A-Za-z_-]{11})&amp;[^"]*?list=%s''' % re.escape(playlist_id),
1645 webpage))
1646 url_results = self._ids_to_results(ids)
1647
1648 return self.playlist_result(url_results, playlist_id, title)
1649
1650 def _extract_playlist(self, playlist_id):
1651 url = self._TEMPLATE_URL % playlist_id
1652 page = self._download_webpage(url, playlist_id)
1653
1654 for match in re.findall(r'<div class="yt-alert-message">([^<]+)</div>', page):
1655 match = match.strip()
1656 # Check if the playlist exists or is private
1657 if re.match(r'[^<]*(The|This) playlist (does not exist|is private)[^<]*', match):
1658 raise ExtractorError(
1659 'The playlist doesn\'t exist or is private, use --username or '
1660 '--netrc to access it.',
1661 expected=True)
1662 elif re.match(r'[^<]*Invalid parameters[^<]*', match):
1663 raise ExtractorError(
1664 'Invalid parameters. Maybe URL is incorrect.',
1665 expected=True)
1666 elif re.match(r'[^<]*Choose your language[^<]*', match):
1667 continue
1668 else:
1669 self.report_warning('Youtube gives an alert message: ' + match)
1670
1671 playlist_title = self._html_search_regex(
1672 r'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>',
1673 page, 'title')
1674
1675 return self.playlist_result(self._entries(page, playlist_id), playlist_id, playlist_title)
1676
1677 def _real_extract(self, url):
1678 # Extract playlist id
1679 mobj = re.match(self._VALID_URL, url)
1680 if mobj is None:
1681 raise ExtractorError('Invalid URL: %s' % url)
1682 playlist_id = mobj.group(1) or mobj.group(2)
1683
1684 # Check if it's a video-specific URL
1685 query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
1686 if 'v' in query_dict:
1687 video_id = query_dict['v'][0]
1688 if self._downloader.params.get('noplaylist'):
1689 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
1690 return self.url_result(video_id, 'Youtube', video_id=video_id)
1691 else:
1692 self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
1693
1694 if playlist_id.startswith('RD') or playlist_id.startswith('UL'):
1695 # Mixes require a custom extraction process
1696 return self._extract_mix(playlist_id)
1697
1698 return self._extract_playlist(playlist_id)
1699
1700
1701 class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
1702 IE_DESC = 'YouTube.com channels'
1703 _VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)'
1704 _TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
1705 _VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
1706 IE_NAME = 'youtube:channel'
1707 _TESTS = [{
1708 'note': 'paginated channel',
1709 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
1710 'playlist_mincount': 91,
1711 'info_dict': {
1712 'id': 'UUKfVa3S1e4PHvxWcwyMMg8w',
1713 'title': 'Uploads from lex will',
1714 }
1715 }, {
1716 'note': 'Age restricted channel',
1717 # from https://www.youtube.com/user/DeusExOfficial
1718 'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w',
1719 'playlist_mincount': 64,
1720 'info_dict': {
1721 'id': 'UUs0ifCMCm1icqRbqhUINa0w',
1722 'title': 'Uploads from Deus Ex',
1723 },
1724 }]
1725
1726 def _real_extract(self, url):
1727 channel_id = self._match_id(url)
1728
1729 url = self._TEMPLATE_URL % channel_id
1730
1731 # Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778)
1732 # Workaround by extracting as a playlist if managed to obtain channel playlist URL
1733 # otherwise fallback on channel by page extraction
1734 channel_page = self._download_webpage(
1735 url + '?view=57', channel_id,
1736 'Downloading channel page', fatal=False)
1737 if channel_page is False:
1738 channel_playlist_id = False
1739 else:
1740 channel_playlist_id = self._html_search_meta(
1741 'channelId', channel_page, 'channel id', default=None)
1742 if not channel_playlist_id:
1743 channel_playlist_id = self._search_regex(
1744 r'data-(?:channel-external-|yt)id="([^"]+)"',
1745 channel_page, 'channel id', default=None)
1746 if channel_playlist_id and channel_playlist_id.startswith('UC'):
1747 playlist_id = 'UU' + channel_playlist_id[2:]
1748 return self.url_result(
1749 compat_urlparse.urljoin(url, '/playlist?list=%s' % playlist_id), 'YoutubePlaylist')
1750
1751 channel_page = self._download_webpage(url, channel_id, 'Downloading page #1')
1752 autogenerated = re.search(r'''(?x)
1753 class="[^"]*?(?:
1754 channel-header-autogenerated-label|
1755 yt-channel-title-autogenerated
1756 )[^"]*"''', channel_page) is not None
1757
1758 if autogenerated:
1759 # The videos are contained in a single page
1760 # the ajax pages can't be used, they are empty
1761 entries = [
1762 self.url_result(
1763 video_id, 'Youtube', video_id=video_id,
1764 video_title=video_title)
1765 for video_id, video_title in self.extract_videos_from_page(channel_page)]
1766 return self.playlist_result(entries, channel_id)
1767
1768 return self.playlist_result(self._entries(channel_page, channel_id), channel_id)
1769
1770
1771 class YoutubeUserIE(YoutubeChannelIE):
1772 IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
1773 _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
1774 _TEMPLATE_URL = 'https://www.youtube.com/user/%s/videos'
1775 IE_NAME = 'youtube:user'
1776
1777 _TESTS = [{
1778 'url': 'https://www.youtube.com/user/TheLinuxFoundation',
1779 'playlist_mincount': 320,
1780 'info_dict': {
1781 'title': 'TheLinuxFoundation',
1782 }
1783 }, {
1784 'url': 'ytuser:phihag',
1785 'only_matching': True,
1786 }]
1787
1788 @classmethod
1789 def suitable(cls, url):
1790 # Don't return True if the url can be extracted with other youtube
1791 # extractor, the regex would is too permissive and it would match.
1792 other_ies = iter(klass for (name, klass) in globals().items() if name.endswith('IE') and klass is not cls)
1793 if any(ie.suitable(url) for ie in other_ies):
1794 return False
1795 else:
1796 return super(YoutubeUserIE, cls).suitable(url)
1797
1798
1799 class YoutubeUserPlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
1800 IE_DESC = 'YouTube.com user playlists'
1801 _VALID_URL = r'https?://(?:\w+\.)?youtube\.com/user/(?P<id>[^/]+)/playlists'
1802 IE_NAME = 'youtube:user:playlists'
1803
1804 _TESTS = [{
1805 'url': 'http://www.youtube.com/user/ThirstForScience/playlists',
1806 'playlist_mincount': 4,
1807 'info_dict': {
1808 'id': 'ThirstForScience',
1809 'title': 'Thirst for Science',
1810 },
1811 }, {
1812 # with "Load more" button
1813 'url': 'http://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
1814 'playlist_mincount': 70,
1815 'info_dict': {
1816 'id': 'igorkle1',
1817 'title': 'Игорь Клейнер',
1818 },
1819 }]
1820
1821
1822 class YoutubeSearchIE(SearchInfoExtractor, YoutubePlaylistIE):
1823 IE_DESC = 'YouTube.com searches'
1824 # there doesn't appear to be a real limit, for example if you search for
1825 # 'python' you get more than 8.000.000 results
1826 _MAX_RESULTS = float('inf')
1827 IE_NAME = 'youtube:search'
1828 _SEARCH_KEY = 'ytsearch'
1829 _EXTRA_QUERY_ARGS = {}
1830 _TESTS = []
1831
1832 def _get_n_results(self, query, n):
1833 """Get a specified number of results for a query"""
1834
1835 videos = []
1836 limit = n
1837
1838 for pagenum in itertools.count(1):
1839 url_query = {
1840 'search_query': query.encode('utf-8'),
1841 'page': pagenum,
1842 'spf': 'navigate',
1843 }
1844 url_query.update(self._EXTRA_QUERY_ARGS)
1845 result_url = 'https://www.youtube.com/results?' + compat_urllib_parse.urlencode(url_query)
1846 data = self._download_json(
1847 result_url, video_id='query "%s"' % query,
1848 note='Downloading page %s' % pagenum,
1849 errnote='Unable to download API page')
1850 html_content = data[1]['body']['content']
1851
1852 if 'class="search-message' in html_content:
1853 raise ExtractorError(
1854 '[youtube] No video results', expected=True)
1855
1856 new_videos = self._ids_to_results(orderedSet(re.findall(
1857 r'href="/watch\?v=(.{11})', html_content)))
1858 videos += new_videos
1859 if not new_videos or len(videos) > limit:
1860 break
1861
1862 if len(videos) > n:
1863 videos = videos[:n]
1864 return self.playlist_result(videos, query)
1865
1866
1867 class YoutubeSearchDateIE(YoutubeSearchIE):
1868 IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
1869 _SEARCH_KEY = 'ytsearchdate'
1870 IE_DESC = 'YouTube.com searches, newest videos first'
1871 _EXTRA_QUERY_ARGS = {'search_sort': 'video_date_uploaded'}
1872
1873
1874 class YoutubeSearchURLIE(InfoExtractor):
1875 IE_DESC = 'YouTube.com search URLs'
1876 IE_NAME = 'youtube:search_url'
1877 _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?search_query=(?P<query>[^&]+)(?:[&]|$)'
1878 _TESTS = [{
1879 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
1880 'playlist_mincount': 5,
1881 'info_dict': {
1882 'title': 'youtube-dl test video',
1883 }
1884 }]
1885
1886 def _real_extract(self, url):
1887 mobj = re.match(self._VALID_URL, url)
1888 query = compat_urllib_parse_unquote_plus(mobj.group('query'))
1889
1890 webpage = self._download_webpage(url, query)
1891 result_code = self._search_regex(
1892 r'(?s)<ol[^>]+class="item-section"(.*?)</ol>', webpage, 'result HTML')
1893
1894 part_codes = re.findall(
1895 r'(?s)<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*>(.*?)</h3>', result_code)
1896 entries = []
1897 for part_code in part_codes:
1898 part_title = self._html_search_regex(
1899 [r'(?s)title="([^"]+)"', r'>([^<]+)</a>'], part_code, 'item title', fatal=False)
1900 part_url_snippet = self._html_search_regex(
1901 r'(?s)href="([^"]+)"', part_code, 'item URL')
1902 part_url = compat_urlparse.urljoin(
1903 'https://www.youtube.com/', part_url_snippet)
1904 entries.append({
1905 '_type': 'url',
1906 'url': part_url,
1907 'title': part_title,
1908 })
1909
1910 return {
1911 '_type': 'playlist',
1912 'entries': entries,
1913 'title': query,
1914 }
1915
1916
1917 class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor):
1918 IE_DESC = 'YouTube.com (multi-season) shows'
1919 _VALID_URL = r'https?://www\.youtube\.com/show/(?P<id>[^?#]*)'
1920 IE_NAME = 'youtube:show'
1921 _TESTS = [{
1922 'url': 'https://www.youtube.com/show/airdisasters',
1923 'playlist_mincount': 5,
1924 'info_dict': {
1925 'id': 'airdisasters',
1926 'title': 'Air Disasters',
1927 }
1928 }]
1929
1930 def _real_extract(self, url):
1931 playlist_id = self._match_id(url)
1932 return super(YoutubeShowIE, self)._real_extract(
1933 'https://www.youtube.com/show/%s/playlists' % playlist_id)
1934
1935
1936 class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
1937 """
1938 Base class for feed extractors
1939 Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
1940 """
1941 _LOGIN_REQUIRED = True
1942
1943 @property
1944 def IE_NAME(self):
1945 return 'youtube:%s' % self._FEED_NAME
1946
1947 def _real_initialize(self):
1948 self._login()
1949
1950 def _real_extract(self, url):
1951 page = self._download_webpage(
1952 'https://www.youtube.com/feed/%s' % self._FEED_NAME, self._PLAYLIST_TITLE)
1953
1954 # The extraction process is the same as for playlists, but the regex
1955 # for the video ids doesn't contain an index
1956 ids = []
1957 more_widget_html = content_html = page
1958 for page_num in itertools.count(1):
1959 matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
1960
1961 # 'recommended' feed has infinite 'load more' and each new portion spins
1962 # the same videos in (sometimes) slightly different order, so we'll check
1963 # for unicity and break when portion has no new videos
1964 new_ids = filter(lambda video_id: video_id not in ids, orderedSet(matches))
1965 if not new_ids:
1966 break
1967
1968 ids.extend(new_ids)
1969
1970 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
1971 if not mobj:
1972 break
1973
1974 more = self._download_json(
1975 'https://youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
1976 'Downloading page #%s' % page_num,
1977 transform_source=uppercase_escape)
1978 content_html = more['content_html']
1979 more_widget_html = more['load_more_widget_html']
1980
1981 return self.playlist_result(
1982 self._ids_to_results(ids), playlist_title=self._PLAYLIST_TITLE)
1983
1984
1985 class YoutubeWatchLaterIE(YoutubePlaylistIE):
1986 IE_NAME = 'youtube:watchlater'
1987 IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
1988 _VALID_URL = r'https?://www\.youtube\.com/(?:feed/watch_later|playlist\?list=WL)|:ytwatchlater'
1989
1990 _TESTS = [] # override PlaylistIE tests
1991
1992 def _real_extract(self, url):
1993 return self._extract_playlist('WL')
1994
1995
1996 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
1997 IE_NAME = 'youtube:favorites'
1998 IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
1999 _VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
2000 _LOGIN_REQUIRED = True
2001
2002 def _real_extract(self, url):
2003 webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
2004 playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
2005 return self.url_result(playlist_id, 'YoutubePlaylist')
2006
2007
2008 class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
2009 IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
2010 _VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
2011 _FEED_NAME = 'recommended'
2012 _PLAYLIST_TITLE = 'Youtube Recommended videos'
2013
2014
2015 class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
2016 IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
2017 _VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
2018 _FEED_NAME = 'subscriptions'
2019 _PLAYLIST_TITLE = 'Youtube Subscriptions'
2020
2021
2022 class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
2023 IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
2024 _VALID_URL = 'https?://www\.youtube\.com/feed/history|:ythistory'
2025 _FEED_NAME = 'history'
2026 _PLAYLIST_TITLE = 'Youtube History'
2027
2028
2029 class YoutubeTruncatedURLIE(InfoExtractor):
2030 IE_NAME = 'youtube:truncated_url'
2031 IE_DESC = False # Do not list
2032 _VALID_URL = r'''(?x)
2033 (?:https?://)?
2034 (?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
2035 (?:watch\?(?:
2036 feature=[a-z_]+|
2037 annotation_id=annotation_[^&]+|
2038 x-yt-cl=[0-9]+|
2039 hl=[^&]*|
2040 t=[0-9]+
2041 )?
2042 |
2043 attribution_link\?a=[^&]+
2044 )
2045 $
2046 '''
2047
2048 _TESTS = [{
2049 'url': 'http://www.youtube.com/watch?annotation_id=annotation_3951667041',
2050 'only_matching': True,
2051 }, {
2052 'url': 'http://www.youtube.com/watch?',
2053 'only_matching': True,
2054 }, {
2055 'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
2056 'only_matching': True,
2057 }, {
2058 'url': 'https://www.youtube.com/watch?feature=foo',
2059 'only_matching': True,
2060 }, {
2061 'url': 'https://www.youtube.com/watch?hl=en-GB',
2062 'only_matching': True,
2063 }, {
2064 'url': 'https://www.youtube.com/watch?t=2372',
2065 'only_matching': True,
2066 }]
2067
2068 def _real_extract(self, url):
2069 raise ExtractorError(
2070 'Did you forget to quote the URL? Remember that & is a meta '
2071 'character in most shells, so you want to put the URL in quotes, '
2072 'like youtube-dl '
2073 '"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
2074 ' or simply youtube-dl BaW_jenozKc .',
2075 expected=True)
2076
2077
2078 class YoutubeTruncatedIDIE(InfoExtractor):
2079 IE_NAME = 'youtube:truncated_id'
2080 IE_DESC = False # Do not list
2081 _VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
2082
2083 _TESTS = [{
2084 'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
2085 'only_matching': True,
2086 }]
2087
2088 def _real_extract(self, url):
2089 video_id = self._match_id(url)
2090 raise ExtractorError(
2091 'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
2092 expected=True)