]> Raphaël G. Git Repositories - youtubedl/blob - youtube_dl/extractor/youtube.py
Update Standards-Version to 3.9.8 (no changes required).
[youtubedl] / youtube_dl / extractor / youtube.py
1 # coding: utf-8
2
3 from __future__ import unicode_literals
4
5
6 import itertools
7 import json
8 import os.path
9 import random
10 import re
11 import time
12 import traceback
13
14 from .common import InfoExtractor, SearchInfoExtractor
15 from ..jsinterp import JSInterpreter
16 from ..swfinterp import SWFInterpreter
17 from ..compat import (
18 compat_chr,
19 compat_parse_qs,
20 compat_urllib_parse_unquote,
21 compat_urllib_parse_unquote_plus,
22 compat_urllib_parse_urlencode,
23 compat_urllib_parse_urlparse,
24 compat_urlparse,
25 compat_str,
26 )
27 from ..utils import (
28 clean_html,
29 error_to_compat_str,
30 ExtractorError,
31 float_or_none,
32 get_element_by_attribute,
33 get_element_by_id,
34 int_or_none,
35 mimetype2ext,
36 orderedSet,
37 parse_duration,
38 remove_quotes,
39 remove_start,
40 sanitized_Request,
41 smuggle_url,
42 str_to_int,
43 unescapeHTML,
44 unified_strdate,
45 unsmuggle_url,
46 uppercase_escape,
47 urlencode_postdata,
48 ISO3166Utils,
49 )
50
51
52 class YoutubeBaseInfoExtractor(InfoExtractor):
53 """Provide base functions for Youtube extractors"""
54 _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
55 _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
56 _PASSWORD_CHALLENGE_URL = 'https://accounts.google.com/signin/challenge/sl/password'
57 _NETRC_MACHINE = 'youtube'
58 # If True it will raise an error if no login info is provided
59 _LOGIN_REQUIRED = False
60
61 def _set_language(self):
62 self._set_cookie(
63 '.youtube.com', 'PREF', 'f1=50000000&hl=en',
64 # YouTube sets the expire time to about two months
65 expire_time=time.time() + 2 * 30 * 24 * 3600)
66
67 def _ids_to_results(self, ids):
68 return [
69 self.url_result(vid_id, 'Youtube', video_id=vid_id)
70 for vid_id in ids]
71
72 def _login(self):
73 """
74 Attempt to log in to YouTube.
75 True is returned if successful or skipped.
76 False is returned if login failed.
77
78 If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
79 """
80 (username, password) = self._get_login_info()
81 # No authentication to be performed
82 if username is None:
83 if self._LOGIN_REQUIRED:
84 raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
85 return True
86
87 login_page = self._download_webpage(
88 self._LOGIN_URL, None,
89 note='Downloading login page',
90 errnote='unable to fetch login page', fatal=False)
91 if login_page is False:
92 return
93
94 galx = self._search_regex(r'(?s)<input.+?name="GALX".+?value="(.+?)"',
95 login_page, 'Login GALX parameter')
96
97 # Log in
98 login_form_strs = {
99 'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
100 'Email': username,
101 'GALX': galx,
102 'Passwd': password,
103
104 'PersistentCookie': 'yes',
105 '_utf8': '霱',
106 'bgresponse': 'js_disabled',
107 'checkConnection': '',
108 'checkedDomains': 'youtube',
109 'dnConn': '',
110 'pstMsg': '0',
111 'rmShown': '1',
112 'secTok': '',
113 'signIn': 'Sign in',
114 'timeStmp': '',
115 'service': 'youtube',
116 'uilel': '3',
117 'hl': 'en_US',
118 }
119
120 login_results = self._download_webpage(
121 self._PASSWORD_CHALLENGE_URL, None,
122 note='Logging in', errnote='unable to log in', fatal=False,
123 data=urlencode_postdata(login_form_strs))
124 if login_results is False:
125 return False
126
127 error_msg = self._html_search_regex(
128 r'<[^>]+id="errormsg_0_Passwd"[^>]*>([^<]+)<',
129 login_results, 'error message', default=None)
130 if error_msg:
131 raise ExtractorError('Unable to login: %s' % error_msg, expected=True)
132
133 if re.search(r'id="errormsg_0_Passwd"', login_results) is not None:
134 raise ExtractorError('Please use your account password and a two-factor code instead of an application-specific password.', expected=True)
135
136 # Two-Factor
137 # TODO add SMS and phone call support - these require making a request and then prompting the user
138
139 if re.search(r'(?i)<form[^>]+id="challenge"', login_results) is not None:
140 tfa_code = self._get_tfa_info('2-step verification code')
141
142 if not tfa_code:
143 self._downloader.report_warning(
144 'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
145 '(Note that only TOTP (Google Authenticator App) codes work at this time.)')
146 return False
147
148 tfa_code = remove_start(tfa_code, 'G-')
149
150 tfa_form_strs = self._form_hidden_inputs('challenge', login_results)
151
152 tfa_form_strs.update({
153 'Pin': tfa_code,
154 'TrustDevice': 'on',
155 })
156
157 tfa_data = urlencode_postdata(tfa_form_strs)
158
159 tfa_req = sanitized_Request(self._TWOFACTOR_URL, tfa_data)
160 tfa_results = self._download_webpage(
161 tfa_req, None,
162 note='Submitting TFA code', errnote='unable to submit tfa', fatal=False)
163
164 if tfa_results is False:
165 return False
166
167 if re.search(r'(?i)<form[^>]+id="challenge"', tfa_results) is not None:
168 self._downloader.report_warning('Two-factor code expired or invalid. Please try again, or use a one-use backup code instead.')
169 return False
170 if re.search(r'(?i)<form[^>]+id="gaia_loginform"', tfa_results) is not None:
171 self._downloader.report_warning('unable to log in - did the page structure change?')
172 return False
173 if re.search(r'smsauth-interstitial-reviewsettings', tfa_results) is not None:
174 self._downloader.report_warning('Your Google account has a security notice. Please log in on your web browser, resolve the notice, and try again.')
175 return False
176
177 if re.search(r'(?i)<form[^>]+id="gaia_loginform"', login_results) is not None:
178 self._downloader.report_warning('unable to log in: bad username or password')
179 return False
180 return True
181
182 def _real_initialize(self):
183 if self._downloader is None:
184 return
185 self._set_language()
186 if not self._login():
187 return
188
189
190 class YoutubeEntryListBaseInfoExtractor(YoutubeBaseInfoExtractor):
191 # Extract entries from page with "Load more" button
192 def _entries(self, page, playlist_id):
193 more_widget_html = content_html = page
194 for page_num in itertools.count(1):
195 for entry in self._process_page(content_html):
196 yield entry
197
198 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
199 if not mobj:
200 break
201
202 more = self._download_json(
203 'https://youtube.com/%s' % mobj.group('more'), playlist_id,
204 'Downloading page #%s' % page_num,
205 transform_source=uppercase_escape)
206 content_html = more['content_html']
207 if not content_html.strip():
208 # Some webpages show a "Load more" button but they don't
209 # have more videos
210 break
211 more_widget_html = more['load_more_widget_html']
212
213
214 class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
215 def _process_page(self, content):
216 for video_id, video_title in self.extract_videos_from_page(content):
217 yield self.url_result(video_id, 'Youtube', video_id, video_title)
218
219 def extract_videos_from_page(self, page):
220 ids_in_page = []
221 titles_in_page = []
222 for mobj in re.finditer(self._VIDEO_RE, page):
223 # The link with index 0 is not the first video of the playlist (not sure if still actual)
224 if 'index' in mobj.groupdict() and mobj.group('id') == '0':
225 continue
226 video_id = mobj.group('id')
227 video_title = unescapeHTML(mobj.group('title'))
228 if video_title:
229 video_title = video_title.strip()
230 try:
231 idx = ids_in_page.index(video_id)
232 if video_title and not titles_in_page[idx]:
233 titles_in_page[idx] = video_title
234 except ValueError:
235 ids_in_page.append(video_id)
236 titles_in_page.append(video_title)
237 return zip(ids_in_page, titles_in_page)
238
239
240 class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
241 def _process_page(self, content):
242 for playlist_id in orderedSet(re.findall(
243 r'<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*><a[^>]+href="/?playlist\?list=([0-9A-Za-z-_]{10,})"',
244 content)):
245 yield self.url_result(
246 'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist')
247
248 def _real_extract(self, url):
249 playlist_id = self._match_id(url)
250 webpage = self._download_webpage(url, playlist_id)
251 title = self._og_search_title(webpage, fatal=False)
252 return self.playlist_result(self._entries(webpage, playlist_id), playlist_id, title)
253
254
255 class YoutubeIE(YoutubeBaseInfoExtractor):
256 IE_DESC = 'YouTube.com'
257 _VALID_URL = r"""(?x)^
258 (
259 (?:https?://|//) # http(s):// or protocol-independent URL
260 (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
261 (?:www\.)?deturl\.com/www\.youtube\.com/|
262 (?:www\.)?pwnyoutube\.com/|
263 (?:www\.)?yourepeat\.com/|
264 tube\.majestyc\.net/|
265 youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
266 (?:.*?\#/)? # handle anchor (#/) redirect urls
267 (?: # the various things that can precede the ID:
268 (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
269 |(?: # or the v= param in all its forms
270 (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
271 (?:\?|\#!?) # the params delimiter ? or # or #!
272 (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&amp;v=V36LpHqtcDY)
273 v=
274 )
275 ))
276 |(?:
277 youtu\.be| # just youtu.be/xxxx
278 vid\.plus| # or vid.plus/xxxx
279 zwearz\.com/watch| # or zwearz.com/watch/xxxx
280 )/
281 |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
282 )
283 )? # all until now is optional -> you can pass the naked ID
284 ([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
285 (?!.*?&list=) # combined list/video URLs are handled by the playlist IE
286 (?(1).+)? # if we found the ID, everything can follow
287 $"""
288 _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
289 _formats = {
290 '5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
291 '6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
292 '13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
293 '17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
294 '18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
295 '22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
296 '34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
297 '35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
298 # itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
299 '36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
300 '37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
301 '38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
302 '43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
303 '44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
304 '45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
305 '46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
306 '59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
307 '78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
308
309
310 # 3D videos
311 '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
312 '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
313 '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
314 '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
315 '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
316 '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
317 '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
318
319 # Apple HTTP Live Streaming
320 '91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
321 '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
322 '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
323 '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
324 '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
325 '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
326 '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
327 '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
328
329 # DASH mp4 video
330 '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
331 '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
332 '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
333 '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
334 '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
335 '138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40}, # Height can vary (https://github.com/rg3/youtube-dl/issues/4559)
336 '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
337 '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
338 '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60, 'preference': -40},
339 '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60, 'preference': -40},
340 '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
341
342 # Dash mp4 audio
343 '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'preference': -50, 'container': 'm4a_dash'},
344 '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'preference': -50, 'container': 'm4a_dash'},
345 '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'preference': -50, 'container': 'm4a_dash'},
346 '256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'preference': -50, 'container': 'm4a_dash'},
347 '258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'preference': -50, 'container': 'm4a_dash'},
348
349 # Dash webm
350 '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
351 '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
352 '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
353 '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
354 '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
355 '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
356 '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9', 'preference': -40},
357 '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
358 '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
359 '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
360 '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
361 '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
362 '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
363 '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
364 '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
365 # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
366 '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
367 '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
368 '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
369 '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
370 '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
371 '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
372
373 # Dash webm audio
374 '171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
375 '172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
376
377 # Dash webm audio with opus inside
378 '249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50, 'preference': -50},
379 '250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70, 'preference': -50},
380 '251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160, 'preference': -50},
381
382 # RTMP (unnamed)
383 '_rtmp': {'protocol': 'rtmp'},
384 }
385 _SUBTITLE_FORMATS = ('ttml', 'vtt')
386
387 IE_NAME = 'youtube'
388 _TESTS = [
389 {
390 'url': 'http://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
391 'info_dict': {
392 'id': 'BaW_jenozKc',
393 'ext': 'mp4',
394 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
395 'uploader': 'Philipp Hagemeister',
396 'uploader_id': 'phihag',
397 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/phihag',
398 'upload_date': '20121002',
399 'license': 'Standard YouTube License',
400 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
401 'categories': ['Science & Technology'],
402 'tags': ['youtube-dl'],
403 'like_count': int,
404 'dislike_count': int,
405 'start_time': 1,
406 'end_time': 9,
407 }
408 },
409 {
410 'url': 'http://www.youtube.com/watch?v=UxxajLWwzqY',
411 'note': 'Test generic use_cipher_signature video (#897)',
412 'info_dict': {
413 'id': 'UxxajLWwzqY',
414 'ext': 'mp4',
415 'upload_date': '20120506',
416 'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
417 'alt_title': 'I Love It (feat. Charli XCX)',
418 'description': 'md5:f3ceb5ef83a08d95b9d146f973157cc8',
419 'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
420 'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
421 'iconic ep', 'iconic', 'love', 'it'],
422 'uploader': 'Icona Pop',
423 'uploader_id': 'IconaPop',
424 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/IconaPop',
425 'license': 'Standard YouTube License',
426 'creator': 'Icona Pop',
427 }
428 },
429 {
430 'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
431 'note': 'Test VEVO video with age protection (#956)',
432 'info_dict': {
433 'id': '07FYdnEawAQ',
434 'ext': 'mp4',
435 'upload_date': '20130703',
436 'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
437 'alt_title': 'Tunnel Vision',
438 'description': 'md5:64249768eec3bc4276236606ea996373',
439 'uploader': 'justintimberlakeVEVO',
440 'uploader_id': 'justintimberlakeVEVO',
441 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/justintimberlakeVEVO',
442 'license': 'Standard YouTube License',
443 'creator': 'Justin Timberlake',
444 'age_limit': 18,
445 }
446 },
447 {
448 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
449 'note': 'Embed-only video (#1746)',
450 'info_dict': {
451 'id': 'yZIXLfi8CZQ',
452 'ext': 'mp4',
453 'upload_date': '20120608',
454 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
455 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
456 'uploader': 'SET India',
457 'uploader_id': 'setindia',
458 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/setindia',
459 'license': 'Standard YouTube License',
460 'age_limit': 18,
461 }
462 },
463 {
464 'url': 'http://www.youtube.com/watch?v=BaW_jenozKc&v=UxxajLWwzqY',
465 'note': 'Use the first video ID in the URL',
466 'info_dict': {
467 'id': 'BaW_jenozKc',
468 'ext': 'mp4',
469 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
470 'uploader': 'Philipp Hagemeister',
471 'uploader_id': 'phihag',
472 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/phihag',
473 'upload_date': '20121002',
474 'license': 'Standard YouTube License',
475 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
476 'categories': ['Science & Technology'],
477 'tags': ['youtube-dl'],
478 'like_count': int,
479 'dislike_count': int,
480 },
481 'params': {
482 'skip_download': True,
483 },
484 },
485 {
486 'url': 'http://www.youtube.com/watch?v=a9LDPn-MO4I',
487 'note': '256k DASH audio (format 141) via DASH manifest',
488 'info_dict': {
489 'id': 'a9LDPn-MO4I',
490 'ext': 'm4a',
491 'upload_date': '20121002',
492 'uploader_id': '8KVIDEO',
493 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
494 'description': '',
495 'uploader': '8KVIDEO',
496 'license': 'Standard YouTube License',
497 'title': 'UHDTV TEST 8K VIDEO.mp4'
498 },
499 'params': {
500 'youtube_include_dash_manifest': True,
501 'format': '141',
502 },
503 'skip': 'format 141 not served anymore',
504 },
505 # DASH manifest with encrypted signature
506 {
507 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
508 'info_dict': {
509 'id': 'IB3lcPjvWLA',
510 'ext': 'm4a',
511 'title': 'Afrojack, Spree Wilson - The Spark ft. Spree Wilson',
512 'description': 'md5:12e7067fa6735a77bdcbb58cb1187d2d',
513 'uploader': 'AfrojackVEVO',
514 'uploader_id': 'AfrojackVEVO',
515 'upload_date': '20131011',
516 'license': 'Standard YouTube License',
517 },
518 'params': {
519 'youtube_include_dash_manifest': True,
520 'format': '141/bestaudio[ext=m4a]',
521 },
522 },
523 # JS player signature function name containing $
524 {
525 'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
526 'info_dict': {
527 'id': 'nfWlot6h_JM',
528 'ext': 'm4a',
529 'title': 'Taylor Swift - Shake It Off',
530 'alt_title': 'Shake It Off',
531 'description': 'md5:95f66187cd7c8b2c13eb78e1223b63c3',
532 'uploader': 'TaylorSwiftVEVO',
533 'uploader_id': 'TaylorSwiftVEVO',
534 'upload_date': '20140818',
535 'license': 'Standard YouTube License',
536 'creator': 'Taylor Swift',
537 },
538 'params': {
539 'youtube_include_dash_manifest': True,
540 'format': '141/bestaudio[ext=m4a]',
541 },
542 },
543 # Controversy video
544 {
545 'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
546 'info_dict': {
547 'id': 'T4XJQO3qol8',
548 'ext': 'mp4',
549 'upload_date': '20100909',
550 'uploader': 'The Amazing Atheist',
551 'uploader_id': 'TheAmazingAtheist',
552 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist',
553 'license': 'Standard YouTube License',
554 'title': 'Burning Everyone\'s Koran',
555 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
556 }
557 },
558 # Normal age-gate video (No vevo, embed allowed)
559 {
560 'url': 'http://youtube.com/watch?v=HtVdAasjOgU',
561 'info_dict': {
562 'id': 'HtVdAasjOgU',
563 'ext': 'mp4',
564 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
565 'description': 're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
566 'uploader': 'The Witcher',
567 'uploader_id': 'WitcherGame',
568 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
569 'upload_date': '20140605',
570 'license': 'Standard YouTube License',
571 'age_limit': 18,
572 },
573 },
574 # Age-gate video with encrypted signature
575 {
576 'url': 'http://www.youtube.com/watch?v=6kLq3WMV1nU',
577 'info_dict': {
578 'id': '6kLq3WMV1nU',
579 'ext': 'mp4',
580 'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
581 'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
582 'uploader': 'LloydVEVO',
583 'uploader_id': 'LloydVEVO',
584 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/LloydVEVO',
585 'upload_date': '20110629',
586 'license': 'Standard YouTube License',
587 'age_limit': 18,
588 },
589 },
590 # video_info is None (https://github.com/rg3/youtube-dl/issues/4421)
591 {
592 'url': '__2ABJjxzNo',
593 'info_dict': {
594 'id': '__2ABJjxzNo',
595 'ext': 'mp4',
596 'upload_date': '20100430',
597 'uploader_id': 'deadmau5',
598 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/deadmau5',
599 'creator': 'deadmau5',
600 'description': 'md5:12c56784b8032162bb936a5f76d55360',
601 'uploader': 'deadmau5',
602 'license': 'Standard YouTube License',
603 'title': 'Deadmau5 - Some Chords (HD)',
604 'alt_title': 'Some Chords',
605 },
606 'expected_warnings': [
607 'DASH manifest missing',
608 ]
609 },
610 # Olympics (https://github.com/rg3/youtube-dl/issues/4431)
611 {
612 'url': 'lqQg6PlCWgI',
613 'info_dict': {
614 'id': 'lqQg6PlCWgI',
615 'ext': 'mp4',
616 'upload_date': '20150827',
617 'uploader_id': 'olympic',
618 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/olympic',
619 'license': 'Standard YouTube License',
620 'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
621 'uploader': 'Olympic',
622 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
623 },
624 'params': {
625 'skip_download': 'requires avconv',
626 }
627 },
628 # Non-square pixels
629 {
630 'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
631 'info_dict': {
632 'id': '_b-2C3KPAM0',
633 'ext': 'mp4',
634 'stretched_ratio': 16 / 9.,
635 'upload_date': '20110310',
636 'uploader_id': 'AllenMeow',
637 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
638 'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
639 'uploader': '孫艾倫',
640 'license': 'Standard YouTube License',
641 'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
642 },
643 },
644 # url_encoded_fmt_stream_map is empty string
645 {
646 'url': 'qEJwOuvDf7I',
647 'info_dict': {
648 'id': 'qEJwOuvDf7I',
649 'ext': 'webm',
650 'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
651 'description': '',
652 'upload_date': '20150404',
653 'uploader_id': 'spbelect',
654 'uploader': 'Наблюдатели Петербурга',
655 },
656 'params': {
657 'skip_download': 'requires avconv',
658 },
659 'skip': 'This live event has ended.',
660 },
661 # Extraction from multiple DASH manifests (https://github.com/rg3/youtube-dl/pull/6097)
662 {
663 'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
664 'info_dict': {
665 'id': 'FIl7x6_3R5Y',
666 'ext': 'mp4',
667 'title': 'md5:7b81415841e02ecd4313668cde88737a',
668 'description': 'md5:116377fd2963b81ec4ce64b542173306',
669 'upload_date': '20150625',
670 'uploader_id': 'dorappi2000',
671 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
672 'uploader': 'dorappi2000',
673 'license': 'Standard YouTube License',
674 'formats': 'mincount:32',
675 },
676 },
677 # DASH manifest with segment_list
678 {
679 'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
680 'md5': '8ce563a1d667b599d21064e982ab9e31',
681 'info_dict': {
682 'id': 'CsmdDsKjzN8',
683 'ext': 'mp4',
684 'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
685 'uploader': 'Airtek',
686 'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
687 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
688 'license': 'Standard YouTube License',
689 'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
690 },
691 'params': {
692 'youtube_include_dash_manifest': True,
693 'format': '135', # bestvideo
694 },
695 'skip': 'This live event has ended.',
696 },
697 {
698 # Multifeed videos (multiple cameras), URL is for Main Camera
699 'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
700 'info_dict': {
701 'id': 'jqWvoWXjCVs',
702 'title': 'teamPGP: Rocket League Noob Stream',
703 'description': 'md5:dc7872fb300e143831327f1bae3af010',
704 },
705 'playlist': [{
706 'info_dict': {
707 'id': 'jqWvoWXjCVs',
708 'ext': 'mp4',
709 'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
710 'description': 'md5:dc7872fb300e143831327f1bae3af010',
711 'upload_date': '20150721',
712 'uploader': 'Beer Games Beer',
713 'uploader_id': 'beergamesbeer',
714 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
715 'license': 'Standard YouTube License',
716 },
717 }, {
718 'info_dict': {
719 'id': '6h8e8xoXJzg',
720 'ext': 'mp4',
721 'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
722 'description': 'md5:dc7872fb300e143831327f1bae3af010',
723 'upload_date': '20150721',
724 'uploader': 'Beer Games Beer',
725 'uploader_id': 'beergamesbeer',
726 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
727 'license': 'Standard YouTube License',
728 },
729 }, {
730 'info_dict': {
731 'id': 'PUOgX5z9xZw',
732 'ext': 'mp4',
733 'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
734 'description': 'md5:dc7872fb300e143831327f1bae3af010',
735 'upload_date': '20150721',
736 'uploader': 'Beer Games Beer',
737 'uploader_id': 'beergamesbeer',
738 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
739 'license': 'Standard YouTube License',
740 },
741 }, {
742 'info_dict': {
743 'id': 'teuwxikvS5k',
744 'ext': 'mp4',
745 'title': 'teamPGP: Rocket League Noob Stream (zim)',
746 'description': 'md5:dc7872fb300e143831327f1bae3af010',
747 'upload_date': '20150721',
748 'uploader': 'Beer Games Beer',
749 'uploader_id': 'beergamesbeer',
750 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
751 'license': 'Standard YouTube License',
752 },
753 }],
754 'params': {
755 'skip_download': True,
756 },
757 },
758 {
759 # Multifeed video with comma in title (see https://github.com/rg3/youtube-dl/issues/8536)
760 'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
761 'info_dict': {
762 'id': 'gVfLd0zydlo',
763 'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
764 },
765 'playlist_count': 2,
766 'skip': 'Not multifeed anymore',
767 },
768 {
769 'url': 'http://vid.plus/FlRa-iH7PGw',
770 'only_matching': True,
771 },
772 {
773 'url': 'http://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
774 'only_matching': True,
775 },
776 {
777 # Title with JS-like syntax "};" (see https://github.com/rg3/youtube-dl/issues/7468)
778 # Also tests cut-off URL expansion in video description (see
779 # https://github.com/rg3/youtube-dl/issues/1892,
780 # https://github.com/rg3/youtube-dl/issues/8164)
781 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
782 'info_dict': {
783 'id': 'lsguqyKfVQg',
784 'ext': 'mp4',
785 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
786 'alt_title': 'Dark Walk',
787 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
788 'upload_date': '20151119',
789 'uploader_id': 'IronSoulElf',
790 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
791 'uploader': 'IronSoulElf',
792 'license': 'Standard YouTube License',
793 'creator': 'Todd Haberman, Daniel Law Heath & Aaron Kaplan',
794 },
795 'params': {
796 'skip_download': True,
797 },
798 },
799 {
800 # Tags with '};' (see https://github.com/rg3/youtube-dl/issues/7468)
801 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
802 'only_matching': True,
803 },
804 {
805 # Video with yt:stretch=17:0
806 'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
807 'info_dict': {
808 'id': 'Q39EVAstoRM',
809 'ext': 'mp4',
810 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
811 'description': 'md5:ee18a25c350637c8faff806845bddee9',
812 'upload_date': '20151107',
813 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
814 'uploader': 'CH GAMER DROID',
815 },
816 'params': {
817 'skip_download': True,
818 },
819 'skip': 'This video does not exist.',
820 },
821 {
822 # Video licensed under Creative Commons
823 'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
824 'info_dict': {
825 'id': 'M4gD1WSo5mA',
826 'ext': 'mp4',
827 'title': 'md5:e41008789470fc2533a3252216f1c1d1',
828 'description': 'md5:a677553cf0840649b731a3024aeff4cc',
829 'upload_date': '20150127',
830 'uploader_id': 'BerkmanCenter',
831 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
832 'uploader': 'BerkmanCenter',
833 'license': 'Creative Commons Attribution license (reuse allowed)',
834 },
835 'params': {
836 'skip_download': True,
837 },
838 },
839 {
840 # Channel-like uploader_url
841 'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
842 'info_dict': {
843 'id': 'eQcmzGIKrzg',
844 'ext': 'mp4',
845 'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
846 'description': 'md5:dda0d780d5a6e120758d1711d062a867',
847 'upload_date': '20151119',
848 'uploader': 'Bernie 2016',
849 'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
850 'uploader_url': 're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
851 'license': 'Creative Commons Attribution license (reuse allowed)',
852 },
853 'params': {
854 'skip_download': True,
855 },
856 },
857 {
858 'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;amp;v=V36LpHqtcDY',
859 'only_matching': True,
860 },
861 {
862 # YouTube Red paid video (https://github.com/rg3/youtube-dl/issues/10059)
863 'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
864 'only_matching': True,
865 }
866 ]
867
868 def __init__(self, *args, **kwargs):
869 super(YoutubeIE, self).__init__(*args, **kwargs)
870 self._player_cache = {}
871
872 def report_video_info_webpage_download(self, video_id):
873 """Report attempt to download video info webpage."""
874 self.to_screen('%s: Downloading video info webpage' % video_id)
875
876 def report_information_extraction(self, video_id):
877 """Report attempt to extract video information."""
878 self.to_screen('%s: Extracting video information' % video_id)
879
880 def report_unavailable_format(self, video_id, format):
881 """Report extracted video URL."""
882 self.to_screen('%s: Format %s not available' % (video_id, format))
883
884 def report_rtmp_download(self):
885 """Indicate the download will use the RTMP protocol."""
886 self.to_screen('RTMP download detected')
887
888 def _signature_cache_id(self, example_sig):
889 """ Return a string representation of a signature """
890 return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
891
892 def _extract_signature_function(self, video_id, player_url, example_sig):
893 id_m = re.match(
894 r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|/base)?\.(?P<ext>[a-z]+)$',
895 player_url)
896 if not id_m:
897 raise ExtractorError('Cannot identify player %r' % player_url)
898 player_type = id_m.group('ext')
899 player_id = id_m.group('id')
900
901 # Read from filesystem cache
902 func_id = '%s_%s_%s' % (
903 player_type, player_id, self._signature_cache_id(example_sig))
904 assert os.path.basename(func_id) == func_id
905
906 cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
907 if cache_spec is not None:
908 return lambda s: ''.join(s[i] for i in cache_spec)
909
910 download_note = (
911 'Downloading player %s' % player_url
912 if self._downloader.params.get('verbose') else
913 'Downloading %s player %s' % (player_type, player_id)
914 )
915 if player_type == 'js':
916 code = self._download_webpage(
917 player_url, video_id,
918 note=download_note,
919 errnote='Download of %s failed' % player_url)
920 res = self._parse_sig_js(code)
921 elif player_type == 'swf':
922 urlh = self._request_webpage(
923 player_url, video_id,
924 note=download_note,
925 errnote='Download of %s failed' % player_url)
926 code = urlh.read()
927 res = self._parse_sig_swf(code)
928 else:
929 assert False, 'Invalid player type %r' % player_type
930
931 test_string = ''.join(map(compat_chr, range(len(example_sig))))
932 cache_res = res(test_string)
933 cache_spec = [ord(c) for c in cache_res]
934
935 self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
936 return res
937
938 def _print_sig_code(self, func, example_sig):
939 def gen_sig_code(idxs):
940 def _genslice(start, end, step):
941 starts = '' if start == 0 else str(start)
942 ends = (':%d' % (end + step)) if end + step >= 0 else ':'
943 steps = '' if step == 1 else (':%d' % step)
944 return 's[%s%s%s]' % (starts, ends, steps)
945
946 step = None
947 # Quelch pyflakes warnings - start will be set when step is set
948 start = '(Never used)'
949 for i, prev in zip(idxs[1:], idxs[:-1]):
950 if step is not None:
951 if i - prev == step:
952 continue
953 yield _genslice(start, prev, step)
954 step = None
955 continue
956 if i - prev in [-1, 1]:
957 step = i - prev
958 start = prev
959 continue
960 else:
961 yield 's[%d]' % prev
962 if step is None:
963 yield 's[%d]' % i
964 else:
965 yield _genslice(start, i, step)
966
967 test_string = ''.join(map(compat_chr, range(len(example_sig))))
968 cache_res = func(test_string)
969 cache_spec = [ord(c) for c in cache_res]
970 expr_code = ' + '.join(gen_sig_code(cache_spec))
971 signature_id_tuple = '(%s)' % (
972 ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
973 code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
974 ' return %s\n') % (signature_id_tuple, expr_code)
975 self.to_screen('Extracted signature function:\n' + code)
976
977 def _parse_sig_js(self, jscode):
978 funcname = self._search_regex(
979 r'\.sig\|\|([a-zA-Z0-9$]+)\(', jscode,
980 'Initial JS player signature function name')
981
982 jsi = JSInterpreter(jscode)
983 initial_function = jsi.extract_function(funcname)
984 return lambda s: initial_function([s])
985
986 def _parse_sig_swf(self, file_contents):
987 swfi = SWFInterpreter(file_contents)
988 TARGET_CLASSNAME = 'SignatureDecipher'
989 searched_class = swfi.extract_class(TARGET_CLASSNAME)
990 initial_function = swfi.extract_function(searched_class, 'decipher')
991 return lambda s: initial_function([s])
992
993 def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
994 """Turn the encrypted s field into a working signature"""
995
996 if player_url is None:
997 raise ExtractorError('Cannot decrypt signature without player_url')
998
999 if player_url.startswith('//'):
1000 player_url = 'https:' + player_url
1001 try:
1002 player_id = (player_url, self._signature_cache_id(s))
1003 if player_id not in self._player_cache:
1004 func = self._extract_signature_function(
1005 video_id, player_url, s
1006 )
1007 self._player_cache[player_id] = func
1008 func = self._player_cache[player_id]
1009 if self._downloader.params.get('youtube_print_sig_code'):
1010 self._print_sig_code(func, s)
1011 return func(s)
1012 except Exception as e:
1013 tb = traceback.format_exc()
1014 raise ExtractorError(
1015 'Signature extraction failed: ' + tb, cause=e)
1016
1017 def _get_subtitles(self, video_id, webpage):
1018 try:
1019 subs_doc = self._download_xml(
1020 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
1021 video_id, note=False)
1022 except ExtractorError as err:
1023 self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
1024 return {}
1025
1026 sub_lang_list = {}
1027 for track in subs_doc.findall('track'):
1028 lang = track.attrib['lang_code']
1029 if lang in sub_lang_list:
1030 continue
1031 sub_formats = []
1032 for ext in self._SUBTITLE_FORMATS:
1033 params = compat_urllib_parse_urlencode({
1034 'lang': lang,
1035 'v': video_id,
1036 'fmt': ext,
1037 'name': track.attrib['name'].encode('utf-8'),
1038 })
1039 sub_formats.append({
1040 'url': 'https://www.youtube.com/api/timedtext?' + params,
1041 'ext': ext,
1042 })
1043 sub_lang_list[lang] = sub_formats
1044 if not sub_lang_list:
1045 self._downloader.report_warning('video doesn\'t have subtitles')
1046 return {}
1047 return sub_lang_list
1048
1049 def _get_ytplayer_config(self, video_id, webpage):
1050 patterns = (
1051 # User data may contain arbitrary character sequences that may affect
1052 # JSON extraction with regex, e.g. when '};' is contained the second
1053 # regex won't capture the whole JSON. Yet working around by trying more
1054 # concrete regex first keeping in mind proper quoted string handling
1055 # to be implemented in future that will replace this workaround (see
1056 # https://github.com/rg3/youtube-dl/issues/7468,
1057 # https://github.com/rg3/youtube-dl/pull/7599)
1058 r';ytplayer\.config\s*=\s*({.+?});ytplayer',
1059 r';ytplayer\.config\s*=\s*({.+?});',
1060 )
1061 config = self._search_regex(
1062 patterns, webpage, 'ytplayer.config', default=None)
1063 if config:
1064 return self._parse_json(
1065 uppercase_escape(config), video_id, fatal=False)
1066
1067 def _get_automatic_captions(self, video_id, webpage):
1068 """We need the webpage for getting the captions url, pass it as an
1069 argument to speed up the process."""
1070 self.to_screen('%s: Looking for automatic captions' % video_id)
1071 player_config = self._get_ytplayer_config(video_id, webpage)
1072 err_msg = 'Couldn\'t find automatic captions for %s' % video_id
1073 if not player_config:
1074 self._downloader.report_warning(err_msg)
1075 return {}
1076 try:
1077 args = player_config['args']
1078 caption_url = args.get('ttsurl')
1079 if caption_url:
1080 timestamp = args['timestamp']
1081 # We get the available subtitles
1082 list_params = compat_urllib_parse_urlencode({
1083 'type': 'list',
1084 'tlangs': 1,
1085 'asrs': 1,
1086 })
1087 list_url = caption_url + '&' + list_params
1088 caption_list = self._download_xml(list_url, video_id)
1089 original_lang_node = caption_list.find('track')
1090 if original_lang_node is None:
1091 self._downloader.report_warning('Video doesn\'t have automatic captions')
1092 return {}
1093 original_lang = original_lang_node.attrib['lang_code']
1094 caption_kind = original_lang_node.attrib.get('kind', '')
1095
1096 sub_lang_list = {}
1097 for lang_node in caption_list.findall('target'):
1098 sub_lang = lang_node.attrib['lang_code']
1099 sub_formats = []
1100 for ext in self._SUBTITLE_FORMATS:
1101 params = compat_urllib_parse_urlencode({
1102 'lang': original_lang,
1103 'tlang': sub_lang,
1104 'fmt': ext,
1105 'ts': timestamp,
1106 'kind': caption_kind,
1107 })
1108 sub_formats.append({
1109 'url': caption_url + '&' + params,
1110 'ext': ext,
1111 })
1112 sub_lang_list[sub_lang] = sub_formats
1113 return sub_lang_list
1114
1115 # Some videos don't provide ttsurl but rather caption_tracks and
1116 # caption_translation_languages (e.g. 20LmZk1hakA)
1117 caption_tracks = args['caption_tracks']
1118 caption_translation_languages = args['caption_translation_languages']
1119 caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
1120 parsed_caption_url = compat_urllib_parse_urlparse(caption_url)
1121 caption_qs = compat_parse_qs(parsed_caption_url.query)
1122
1123 sub_lang_list = {}
1124 for lang in caption_translation_languages.split(','):
1125 lang_qs = compat_parse_qs(compat_urllib_parse_unquote_plus(lang))
1126 sub_lang = lang_qs.get('lc', [None])[0]
1127 if not sub_lang:
1128 continue
1129 sub_formats = []
1130 for ext in self._SUBTITLE_FORMATS:
1131 caption_qs.update({
1132 'tlang': [sub_lang],
1133 'fmt': [ext],
1134 })
1135 sub_url = compat_urlparse.urlunparse(parsed_caption_url._replace(
1136 query=compat_urllib_parse_urlencode(caption_qs, True)))
1137 sub_formats.append({
1138 'url': sub_url,
1139 'ext': ext,
1140 })
1141 sub_lang_list[sub_lang] = sub_formats
1142 return sub_lang_list
1143 # An extractor error can be raise by the download process if there are
1144 # no automatic captions but there are subtitles
1145 except (KeyError, ExtractorError):
1146 self._downloader.report_warning(err_msg)
1147 return {}
1148
1149 def _mark_watched(self, video_id, video_info):
1150 playback_url = video_info.get('videostats_playback_base_url', [None])[0]
1151 if not playback_url:
1152 return
1153 parsed_playback_url = compat_urlparse.urlparse(playback_url)
1154 qs = compat_urlparse.parse_qs(parsed_playback_url.query)
1155
1156 # cpn generation algorithm is reverse engineered from base.js.
1157 # In fact it works even with dummy cpn.
1158 CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
1159 cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
1160
1161 qs.update({
1162 'ver': ['2'],
1163 'cpn': [cpn],
1164 })
1165 playback_url = compat_urlparse.urlunparse(
1166 parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
1167
1168 self._download_webpage(
1169 playback_url, video_id, 'Marking watched',
1170 'Unable to mark watched', fatal=False)
1171
1172 @classmethod
1173 def extract_id(cls, url):
1174 mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
1175 if mobj is None:
1176 raise ExtractorError('Invalid URL: %s' % url)
1177 video_id = mobj.group(2)
1178 return video_id
1179
1180 def _extract_from_m3u8(self, manifest_url, video_id):
1181 url_map = {}
1182
1183 def _get_urls(_manifest):
1184 lines = _manifest.split('\n')
1185 urls = filter(lambda l: l and not l.startswith('#'),
1186 lines)
1187 return urls
1188 manifest = self._download_webpage(manifest_url, video_id, 'Downloading formats manifest')
1189 formats_urls = _get_urls(manifest)
1190 for format_url in formats_urls:
1191 itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag')
1192 url_map[itag] = format_url
1193 return url_map
1194
1195 def _extract_annotations(self, video_id):
1196 url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
1197 return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
1198
1199 def _real_extract(self, url):
1200 url, smuggled_data = unsmuggle_url(url, {})
1201
1202 proto = (
1203 'http' if self._downloader.params.get('prefer_insecure', False)
1204 else 'https')
1205
1206 start_time = None
1207 end_time = None
1208 parsed_url = compat_urllib_parse_urlparse(url)
1209 for component in [parsed_url.fragment, parsed_url.query]:
1210 query = compat_parse_qs(component)
1211 if start_time is None and 't' in query:
1212 start_time = parse_duration(query['t'][0])
1213 if start_time is None and 'start' in query:
1214 start_time = parse_duration(query['start'][0])
1215 if end_time is None and 'end' in query:
1216 end_time = parse_duration(query['end'][0])
1217
1218 # Extract original video URL from URL with redirection, like age verification, using next_url parameter
1219 mobj = re.search(self._NEXT_URL_RE, url)
1220 if mobj:
1221 url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
1222 video_id = self.extract_id(url)
1223
1224 # Get video webpage
1225 url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
1226 video_webpage = self._download_webpage(url, video_id)
1227
1228 # Attempt to extract SWF player URL
1229 mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
1230 if mobj is not None:
1231 player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
1232 else:
1233 player_url = None
1234
1235 dash_mpds = []
1236
1237 def add_dash_mpd(video_info):
1238 dash_mpd = video_info.get('dashmpd')
1239 if dash_mpd and dash_mpd[0] not in dash_mpds:
1240 dash_mpds.append(dash_mpd[0])
1241
1242 # Get video info
1243 embed_webpage = None
1244 is_live = None
1245 if re.search(r'player-age-gate-content">', video_webpage) is not None:
1246 age_gate = True
1247 # We simulate the access to the video from www.youtube.com/v/{video_id}
1248 # this can be viewed without login into Youtube
1249 url = proto + '://www.youtube.com/embed/%s' % video_id
1250 embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
1251 data = compat_urllib_parse_urlencode({
1252 'video_id': video_id,
1253 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
1254 'sts': self._search_regex(
1255 r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
1256 })
1257 video_info_url = proto + '://www.youtube.com/get_video_info?' + data
1258 video_info_webpage = self._download_webpage(
1259 video_info_url, video_id,
1260 note='Refetching age-gated info webpage',
1261 errnote='unable to download video info webpage')
1262 video_info = compat_parse_qs(video_info_webpage)
1263 add_dash_mpd(video_info)
1264 else:
1265 age_gate = False
1266 video_info = None
1267 # Try looking directly into the video webpage
1268 ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
1269 if ytplayer_config:
1270 args = ytplayer_config['args']
1271 if args.get('url_encoded_fmt_stream_map'):
1272 # Convert to the same format returned by compat_parse_qs
1273 video_info = dict((k, [v]) for k, v in args.items())
1274 add_dash_mpd(video_info)
1275 if args.get('livestream') == '1' or args.get('live_playback') == 1:
1276 is_live = True
1277 if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
1278 # We also try looking in get_video_info since it may contain different dashmpd
1279 # URL that points to a DASH manifest with possibly different itag set (some itags
1280 # are missing from DASH manifest pointed by webpage's dashmpd, some - from DASH
1281 # manifest pointed by get_video_info's dashmpd).
1282 # The general idea is to take a union of itags of both DASH manifests (for example
1283 # video with such 'manifest behavior' see https://github.com/rg3/youtube-dl/issues/6093)
1284 self.report_video_info_webpage_download(video_id)
1285 for el_type in ['&el=info', '&el=embedded', '&el=detailpage', '&el=vevo', '']:
1286 video_info_url = (
1287 '%s://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
1288 % (proto, video_id, el_type))
1289 video_info_webpage = self._download_webpage(
1290 video_info_url,
1291 video_id, note=False,
1292 errnote='unable to download video info webpage')
1293 get_video_info = compat_parse_qs(video_info_webpage)
1294 if get_video_info.get('use_cipher_signature') != ['True']:
1295 add_dash_mpd(get_video_info)
1296 if not video_info:
1297 video_info = get_video_info
1298 if 'token' in get_video_info:
1299 # Different get_video_info requests may report different results, e.g.
1300 # some may report video unavailability, but some may serve it without
1301 # any complaint (see https://github.com/rg3/youtube-dl/issues/7362,
1302 # the original webpage as well as el=info and el=embedded get_video_info
1303 # requests report video unavailability due to geo restriction while
1304 # el=detailpage succeeds and returns valid data). This is probably
1305 # due to YouTube measures against IP ranges of hosting providers.
1306 # Working around by preferring the first succeeded video_info containing
1307 # the token if no such video_info yet was found.
1308 if 'token' not in video_info:
1309 video_info = get_video_info
1310 break
1311 if 'token' not in video_info:
1312 if 'reason' in video_info:
1313 if 'The uploader has not made this video available in your country.' in video_info['reason']:
1314 regions_allowed = self._html_search_meta('regionsAllowed', video_webpage, default=None)
1315 if regions_allowed:
1316 raise ExtractorError('YouTube said: This video is available in %s only' % (
1317 ', '.join(map(ISO3166Utils.short2full, regions_allowed.split(',')))),
1318 expected=True)
1319 raise ExtractorError(
1320 'YouTube said: %s' % video_info['reason'][0],
1321 expected=True, video_id=video_id)
1322 else:
1323 raise ExtractorError(
1324 '"token" parameter not in video info for unknown reason',
1325 video_id=video_id)
1326
1327 # title
1328 if 'title' in video_info:
1329 video_title = video_info['title'][0]
1330 else:
1331 self._downloader.report_warning('Unable to extract video title')
1332 video_title = '_'
1333
1334 # description
1335 video_description = get_element_by_id("eow-description", video_webpage)
1336 if video_description:
1337 video_description = re.sub(r'''(?x)
1338 <a\s+
1339 (?:[a-zA-Z-]+="[^"]*"\s+)*?
1340 (?:title|href)="([^"]+)"\s+
1341 (?:[a-zA-Z-]+="[^"]*"\s+)*?
1342 class="[^"]*"[^>]*>
1343 [^<]+\.{3}\s*
1344 </a>
1345 ''', r'\1', video_description)
1346 video_description = clean_html(video_description)
1347 else:
1348 fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
1349 if fd_mobj:
1350 video_description = unescapeHTML(fd_mobj.group(1))
1351 else:
1352 video_description = ''
1353
1354 if 'multifeed_metadata_list' in video_info and not smuggled_data.get('force_singlefeed', False):
1355 if not self._downloader.params.get('noplaylist'):
1356 entries = []
1357 feed_ids = []
1358 multifeed_metadata_list = video_info['multifeed_metadata_list'][0]
1359 for feed in multifeed_metadata_list.split(','):
1360 # Unquote should take place before split on comma (,) since textual
1361 # fields may contain comma as well (see
1362 # https://github.com/rg3/youtube-dl/issues/8536)
1363 feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
1364 entries.append({
1365 '_type': 'url_transparent',
1366 'ie_key': 'Youtube',
1367 'url': smuggle_url(
1368 '%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
1369 {'force_singlefeed': True}),
1370 'title': '%s (%s)' % (video_title, feed_data['title'][0]),
1371 })
1372 feed_ids.append(feed_data['id'][0])
1373 self.to_screen(
1374 'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
1375 % (', '.join(feed_ids), video_id))
1376 return self.playlist_result(entries, video_id, video_title, video_description)
1377 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
1378
1379 if 'view_count' in video_info:
1380 view_count = int(video_info['view_count'][0])
1381 else:
1382 view_count = None
1383
1384 # Check for "rental" videos
1385 if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
1386 raise ExtractorError('"rental" videos not supported')
1387
1388 # Start extracting information
1389 self.report_information_extraction(video_id)
1390
1391 # uploader
1392 if 'author' not in video_info:
1393 raise ExtractorError('Unable to extract uploader name')
1394 video_uploader = compat_urllib_parse_unquote_plus(video_info['author'][0])
1395
1396 # uploader_id
1397 video_uploader_id = None
1398 video_uploader_url = None
1399 mobj = re.search(
1400 r'<link itemprop="url" href="(?P<uploader_url>https?://www.youtube.com/(?:user|channel)/(?P<uploader_id>[^"]+))">',
1401 video_webpage)
1402 if mobj is not None:
1403 video_uploader_id = mobj.group('uploader_id')
1404 video_uploader_url = mobj.group('uploader_url')
1405 else:
1406 self._downloader.report_warning('unable to extract uploader nickname')
1407
1408 # thumbnail image
1409 # We try first to get a high quality image:
1410 m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
1411 video_webpage, re.DOTALL)
1412 if m_thumb is not None:
1413 video_thumbnail = m_thumb.group(1)
1414 elif 'thumbnail_url' not in video_info:
1415 self._downloader.report_warning('unable to extract video thumbnail')
1416 video_thumbnail = None
1417 else: # don't panic if we can't find it
1418 video_thumbnail = compat_urllib_parse_unquote_plus(video_info['thumbnail_url'][0])
1419
1420 # upload date
1421 upload_date = self._html_search_meta(
1422 'datePublished', video_webpage, 'upload date', default=None)
1423 if not upload_date:
1424 upload_date = self._search_regex(
1425 [r'(?s)id="eow-date.*?>(.*?)</span>',
1426 r'id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live|Started) on (.+?)</strong>'],
1427 video_webpage, 'upload date', default=None)
1428 if upload_date:
1429 upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
1430 upload_date = unified_strdate(upload_date)
1431
1432 video_license = self._html_search_regex(
1433 r'<h4[^>]+class="title"[^>]*>\s*License\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li',
1434 video_webpage, 'license', default=None)
1435
1436 m_music = re.search(
1437 r'<h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*<ul[^>]*>\s*<li>(?P<title>.+?) by (?P<creator>.+?)(?:\(.+?\))?</li',
1438 video_webpage)
1439 if m_music:
1440 video_alt_title = remove_quotes(unescapeHTML(m_music.group('title')))
1441 video_creator = clean_html(m_music.group('creator'))
1442 else:
1443 video_alt_title = video_creator = None
1444
1445 m_cat_container = self._search_regex(
1446 r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
1447 video_webpage, 'categories', default=None)
1448 if m_cat_container:
1449 category = self._html_search_regex(
1450 r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
1451 default=None)
1452 video_categories = None if category is None else [category]
1453 else:
1454 video_categories = None
1455
1456 video_tags = [
1457 unescapeHTML(m.group('content'))
1458 for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
1459
1460 def _extract_count(count_name):
1461 return str_to_int(self._search_regex(
1462 r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
1463 % re.escape(count_name),
1464 video_webpage, count_name, default=None))
1465
1466 like_count = _extract_count('like')
1467 dislike_count = _extract_count('dislike')
1468
1469 # subtitles
1470 video_subtitles = self.extract_subtitles(video_id, video_webpage)
1471 automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
1472
1473 if 'length_seconds' not in video_info:
1474 self._downloader.report_warning('unable to extract video duration')
1475 video_duration = None
1476 else:
1477 video_duration = int(compat_urllib_parse_unquote_plus(video_info['length_seconds'][0]))
1478
1479 # annotations
1480 video_annotations = None
1481 if self._downloader.params.get('writeannotations', False):
1482 video_annotations = self._extract_annotations(video_id)
1483
1484 def _map_to_format_list(urlmap):
1485 formats = []
1486 for itag, video_real_url in urlmap.items():
1487 dct = {
1488 'format_id': itag,
1489 'url': video_real_url,
1490 'player_url': player_url,
1491 }
1492 if itag in self._formats:
1493 dct.update(self._formats[itag])
1494 formats.append(dct)
1495 return formats
1496
1497 if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
1498 self.report_rtmp_download()
1499 formats = [{
1500 'format_id': '_rtmp',
1501 'protocol': 'rtmp',
1502 'url': video_info['conn'][0],
1503 'player_url': player_url,
1504 }]
1505 elif len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1:
1506 encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
1507 if 'rtmpe%3Dyes' in encoded_url_map:
1508 raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
1509 formats_spec = {}
1510 fmt_list = video_info.get('fmt_list', [''])[0]
1511 if fmt_list:
1512 for fmt in fmt_list.split(','):
1513 spec = fmt.split('/')
1514 if len(spec) > 1:
1515 width_height = spec[1].split('x')
1516 if len(width_height) == 2:
1517 formats_spec[spec[0]] = {
1518 'resolution': spec[1],
1519 'width': int_or_none(width_height[0]),
1520 'height': int_or_none(width_height[1]),
1521 }
1522 formats = []
1523 for url_data_str in encoded_url_map.split(','):
1524 url_data = compat_parse_qs(url_data_str)
1525 if 'itag' not in url_data or 'url' not in url_data:
1526 continue
1527 format_id = url_data['itag'][0]
1528 url = url_data['url'][0]
1529
1530 if 'sig' in url_data:
1531 url += '&signature=' + url_data['sig'][0]
1532 elif 's' in url_data:
1533 encrypted_sig = url_data['s'][0]
1534 ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")'
1535
1536 jsplayer_url_json = self._search_regex(
1537 ASSETS_RE,
1538 embed_webpage if age_gate else video_webpage,
1539 'JS player URL (1)', default=None)
1540 if not jsplayer_url_json and not age_gate:
1541 # We need the embed website after all
1542 if embed_webpage is None:
1543 embed_url = proto + '://www.youtube.com/embed/%s' % video_id
1544 embed_webpage = self._download_webpage(
1545 embed_url, video_id, 'Downloading embed webpage')
1546 jsplayer_url_json = self._search_regex(
1547 ASSETS_RE, embed_webpage, 'JS player URL')
1548
1549 player_url = json.loads(jsplayer_url_json)
1550 if player_url is None:
1551 player_url_json = self._search_regex(
1552 r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
1553 video_webpage, 'age gate player URL')
1554 player_url = json.loads(player_url_json)
1555
1556 if self._downloader.params.get('verbose'):
1557 if player_url is None:
1558 player_version = 'unknown'
1559 player_desc = 'unknown'
1560 else:
1561 if player_url.endswith('swf'):
1562 player_version = self._search_regex(
1563 r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
1564 'flash player', fatal=False)
1565 player_desc = 'flash player %s' % player_version
1566 else:
1567 player_version = self._search_regex(
1568 [r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js', r'(?:www|player)-([^/]+)/base\.js'],
1569 player_url,
1570 'html5 player', fatal=False)
1571 player_desc = 'html5 player %s' % player_version
1572
1573 parts_sizes = self._signature_cache_id(encrypted_sig)
1574 self.to_screen('{%s} signature length %s, %s' %
1575 (format_id, parts_sizes, player_desc))
1576
1577 signature = self._decrypt_signature(
1578 encrypted_sig, video_id, player_url, age_gate)
1579 url += '&signature=' + signature
1580 if 'ratebypass' not in url:
1581 url += '&ratebypass=yes'
1582
1583 dct = {
1584 'format_id': format_id,
1585 'url': url,
1586 'player_url': player_url,
1587 }
1588 if format_id in self._formats:
1589 dct.update(self._formats[format_id])
1590 if format_id in formats_spec:
1591 dct.update(formats_spec[format_id])
1592
1593 # Some itags are not included in DASH manifest thus corresponding formats will
1594 # lack metadata (see https://github.com/rg3/youtube-dl/pull/5993).
1595 # Trying to extract metadata from url_encoded_fmt_stream_map entry.
1596 mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
1597 width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
1598
1599 more_fields = {
1600 'filesize': int_or_none(url_data.get('clen', [None])[0]),
1601 'tbr': float_or_none(url_data.get('bitrate', [None])[0], 1000),
1602 'width': width,
1603 'height': height,
1604 'fps': int_or_none(url_data.get('fps', [None])[0]),
1605 'format_note': url_data.get('quality_label', [None])[0] or url_data.get('quality', [None])[0],
1606 }
1607 for key, value in more_fields.items():
1608 if value:
1609 dct[key] = value
1610 type_ = url_data.get('type', [None])[0]
1611 if type_:
1612 type_split = type_.split(';')
1613 kind_ext = type_split[0].split('/')
1614 if len(kind_ext) == 2:
1615 kind, _ = kind_ext
1616 dct['ext'] = mimetype2ext(type_split[0])
1617 if kind in ('audio', 'video'):
1618 codecs = None
1619 for mobj in re.finditer(
1620 r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
1621 if mobj.group('key') == 'codecs':
1622 codecs = mobj.group('val')
1623 break
1624 if codecs:
1625 codecs = codecs.split(',')
1626 if len(codecs) == 2:
1627 acodec, vcodec = codecs[1], codecs[0]
1628 else:
1629 acodec, vcodec = (codecs[0], 'none') if kind == 'audio' else ('none', codecs[0])
1630 dct.update({
1631 'acodec': acodec,
1632 'vcodec': vcodec,
1633 })
1634 formats.append(dct)
1635 elif video_info.get('hlsvp'):
1636 manifest_url = video_info['hlsvp'][0]
1637 url_map = self._extract_from_m3u8(manifest_url, video_id)
1638 formats = _map_to_format_list(url_map)
1639 # Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
1640 for a_format in formats:
1641 a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
1642 else:
1643 unavailable_message = self._html_search_regex(
1644 r'(?s)<h1[^>]+id="unavailable-message"[^>]*>(.+?)</h1>',
1645 video_webpage, 'unavailable message', default=None)
1646 if unavailable_message:
1647 raise ExtractorError(unavailable_message, expected=True)
1648 raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
1649
1650 # Look for the DASH manifest
1651 if self._downloader.params.get('youtube_include_dash_manifest', True):
1652 dash_mpd_fatal = True
1653 for mpd_url in dash_mpds:
1654 dash_formats = {}
1655 try:
1656 def decrypt_sig(mobj):
1657 s = mobj.group(1)
1658 dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
1659 return '/signature/%s' % dec_s
1660
1661 mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
1662
1663 for df in self._extract_mpd_formats(
1664 mpd_url, video_id, fatal=dash_mpd_fatal,
1665 formats_dict=self._formats):
1666 # Do not overwrite DASH format found in some previous DASH manifest
1667 if df['format_id'] not in dash_formats:
1668 dash_formats[df['format_id']] = df
1669 # Additional DASH manifests may end up in HTTP Error 403 therefore
1670 # allow them to fail without bug report message if we already have
1671 # some DASH manifest succeeded. This is temporary workaround to reduce
1672 # burst of bug reports until we figure out the reason and whether it
1673 # can be fixed at all.
1674 dash_mpd_fatal = False
1675 except (ExtractorError, KeyError) as e:
1676 self.report_warning(
1677 'Skipping DASH manifest: %r' % e, video_id)
1678 if dash_formats:
1679 # Remove the formats we found through non-DASH, they
1680 # contain less info and it can be wrong, because we use
1681 # fixed values (for example the resolution). See
1682 # https://github.com/rg3/youtube-dl/issues/5774 for an
1683 # example.
1684 formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
1685 formats.extend(dash_formats.values())
1686
1687 # Check for malformed aspect ratio
1688 stretched_m = re.search(
1689 r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
1690 video_webpage)
1691 if stretched_m:
1692 w = float(stretched_m.group('w'))
1693 h = float(stretched_m.group('h'))
1694 # yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
1695 # We will only process correct ratios.
1696 if w > 0 and h > 0:
1697 ratio = w / h
1698 for f in formats:
1699 if f.get('vcodec') != 'none':
1700 f['stretched_ratio'] = ratio
1701
1702 self._sort_formats(formats)
1703
1704 self.mark_watched(video_id, video_info)
1705
1706 return {
1707 'id': video_id,
1708 'uploader': video_uploader,
1709 'uploader_id': video_uploader_id,
1710 'uploader_url': video_uploader_url,
1711 'upload_date': upload_date,
1712 'license': video_license,
1713 'creator': video_creator,
1714 'title': video_title,
1715 'alt_title': video_alt_title,
1716 'thumbnail': video_thumbnail,
1717 'description': video_description,
1718 'categories': video_categories,
1719 'tags': video_tags,
1720 'subtitles': video_subtitles,
1721 'automatic_captions': automatic_captions,
1722 'duration': video_duration,
1723 'age_limit': 18 if age_gate else 0,
1724 'annotations': video_annotations,
1725 'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
1726 'view_count': view_count,
1727 'like_count': like_count,
1728 'dislike_count': dislike_count,
1729 'average_rating': float_or_none(video_info.get('avg_rating', [None])[0]),
1730 'formats': formats,
1731 'is_live': is_live,
1732 'start_time': start_time,
1733 'end_time': end_time,
1734 }
1735
1736
1737 class YoutubeSharedVideoIE(InfoExtractor):
1738 _VALID_URL = r'(?:https?:)?//(?:www\.)?youtube\.com/shared\?.*\bci=(?P<id>[0-9A-Za-z_-]{11})'
1739 IE_NAME = 'youtube:shared'
1740
1741 _TEST = {
1742 'url': 'https://www.youtube.com/shared?ci=1nEzmT-M4fU',
1743 'info_dict': {
1744 'id': 'uPDB5I9wfp8',
1745 'ext': 'webm',
1746 'title': 'Pocoyo: 90 minutos de episódios completos Português para crianças - PARTE 3',
1747 'description': 'md5:d9e4d9346a2dfff4c7dc4c8cec0f546d',
1748 'upload_date': '20160219',
1749 'uploader': 'Pocoyo - Português (BR)',
1750 'uploader_id': 'PocoyoBrazil',
1751 },
1752 'add_ie': ['Youtube'],
1753 'params': {
1754 # There are already too many Youtube downloads
1755 'skip_download': True,
1756 },
1757 }
1758
1759 def _real_extract(self, url):
1760 video_id = self._match_id(url)
1761
1762 webpage = self._download_webpage(url, video_id)
1763
1764 real_video_id = self._html_search_meta(
1765 'videoId', webpage, 'YouTube video id', fatal=True)
1766
1767 return self.url_result(real_video_id, YoutubeIE.ie_key())
1768
1769
1770 class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
1771 IE_DESC = 'YouTube.com playlists'
1772 _VALID_URL = r"""(?x)(?:
1773 (?:https?://)?
1774 (?:\w+\.)?
1775 youtube\.com/
1776 (?:
1777 (?:course|view_play_list|my_playlists|artist|playlist|watch|embed/videoseries)
1778 \? (?:.*?[&;])*? (?:p|a|list)=
1779 | p/
1780 )
1781 (
1782 (?:PL|LL|EC|UU|FL|RD|UL)?[0-9A-Za-z-_]{10,}
1783 # Top tracks, they can also include dots
1784 |(?:MC)[\w\.]*
1785 )
1786 .*
1787 |
1788 ((?:PL|LL|EC|UU|FL|RD|UL)[0-9A-Za-z-_]{10,})
1789 )"""
1790 _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
1791 _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&amp;[^"]*?index=(?P<index>\d+)(?:[^>]+>(?P<title>[^<]+))?'
1792 IE_NAME = 'youtube:playlist'
1793 _TESTS = [{
1794 'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
1795 'info_dict': {
1796 'title': 'ytdl test PL',
1797 'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
1798 },
1799 'playlist_count': 3,
1800 }, {
1801 'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
1802 'info_dict': {
1803 'id': 'PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
1804 'title': 'YDL_Empty_List',
1805 },
1806 'playlist_count': 0,
1807 }, {
1808 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
1809 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
1810 'info_dict': {
1811 'title': '29C3: Not my department',
1812 'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
1813 },
1814 'playlist_count': 95,
1815 }, {
1816 'note': 'issue #673',
1817 'url': 'PLBB231211A4F62143',
1818 'info_dict': {
1819 'title': '[OLD]Team Fortress 2 (Class-based LP)',
1820 'id': 'PLBB231211A4F62143',
1821 },
1822 'playlist_mincount': 26,
1823 }, {
1824 'note': 'Large playlist',
1825 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
1826 'info_dict': {
1827 'title': 'Uploads from Cauchemar',
1828 'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
1829 },
1830 'playlist_mincount': 799,
1831 }, {
1832 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
1833 'info_dict': {
1834 'title': 'YDL_safe_search',
1835 'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
1836 },
1837 'playlist_count': 2,
1838 }, {
1839 'note': 'embedded',
1840 'url': 'http://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
1841 'playlist_count': 4,
1842 'info_dict': {
1843 'title': 'JODA15',
1844 'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
1845 }
1846 }, {
1847 'note': 'Embedded SWF player',
1848 'url': 'http://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
1849 'playlist_count': 4,
1850 'info_dict': {
1851 'title': 'JODA7',
1852 'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
1853 }
1854 }, {
1855 'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
1856 'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
1857 'info_dict': {
1858 'title': 'Uploads from Interstellar Movie',
1859 'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
1860 },
1861 'playlist_mincout': 21,
1862 }]
1863
1864 def _real_initialize(self):
1865 self._login()
1866
1867 def _extract_mix(self, playlist_id):
1868 # The mixes are generated from a single video
1869 # the id of the playlist is just 'RD' + video_id
1870 ids = []
1871 last_id = playlist_id[-11:]
1872 for n in itertools.count(1):
1873 url = 'https://youtube.com/watch?v=%s&list=%s' % (last_id, playlist_id)
1874 webpage = self._download_webpage(
1875 url, playlist_id, 'Downloading page {0} of Youtube mix'.format(n))
1876 new_ids = orderedSet(re.findall(
1877 r'''(?xs)data-video-username=".*?".*?
1878 href="/watch\?v=([0-9A-Za-z_-]{11})&amp;[^"]*?list=%s''' % re.escape(playlist_id),
1879 webpage))
1880 # Fetch new pages until all the videos are repeated, it seems that
1881 # there are always 51 unique videos.
1882 new_ids = [_id for _id in new_ids if _id not in ids]
1883 if not new_ids:
1884 break
1885 ids.extend(new_ids)
1886 last_id = ids[-1]
1887
1888 url_results = self._ids_to_results(ids)
1889
1890 search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
1891 title_span = (
1892 search_title('playlist-title') or
1893 search_title('title long-title') or
1894 search_title('title'))
1895 title = clean_html(title_span)
1896
1897 return self.playlist_result(url_results, playlist_id, title)
1898
1899 def _extract_playlist(self, playlist_id):
1900 url = self._TEMPLATE_URL % playlist_id
1901 page = self._download_webpage(url, playlist_id)
1902
1903 for match in re.findall(r'<div class="yt-alert-message">([^<]+)</div>', page):
1904 match = match.strip()
1905 # Check if the playlist exists or is private
1906 if re.match(r'[^<]*(The|This) playlist (does not exist|is private)[^<]*', match):
1907 raise ExtractorError(
1908 'The playlist doesn\'t exist or is private, use --username or '
1909 '--netrc to access it.',
1910 expected=True)
1911 elif re.match(r'[^<]*Invalid parameters[^<]*', match):
1912 raise ExtractorError(
1913 'Invalid parameters. Maybe URL is incorrect.',
1914 expected=True)
1915 elif re.match(r'[^<]*Choose your language[^<]*', match):
1916 continue
1917 else:
1918 self.report_warning('Youtube gives an alert message: ' + match)
1919
1920 playlist_title = self._html_search_regex(
1921 r'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>',
1922 page, 'title')
1923
1924 return self.playlist_result(self._entries(page, playlist_id), playlist_id, playlist_title)
1925
1926 def _check_download_just_video(self, url, playlist_id):
1927 # Check if it's a video-specific URL
1928 query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
1929 if 'v' in query_dict:
1930 video_id = query_dict['v'][0]
1931 if self._downloader.params.get('noplaylist'):
1932 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
1933 return self.url_result(video_id, 'Youtube', video_id=video_id)
1934 else:
1935 self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
1936
1937 def _real_extract(self, url):
1938 # Extract playlist id
1939 mobj = re.match(self._VALID_URL, url)
1940 if mobj is None:
1941 raise ExtractorError('Invalid URL: %s' % url)
1942 playlist_id = mobj.group(1) or mobj.group(2)
1943
1944 video = self._check_download_just_video(url, playlist_id)
1945 if video:
1946 return video
1947
1948 if playlist_id.startswith(('RD', 'UL', 'PU')):
1949 # Mixes require a custom extraction process
1950 return self._extract_mix(playlist_id)
1951
1952 return self._extract_playlist(playlist_id)
1953
1954
1955 class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
1956 IE_DESC = 'YouTube.com channels'
1957 _VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)'
1958 _TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
1959 _VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
1960 IE_NAME = 'youtube:channel'
1961 _TESTS = [{
1962 'note': 'paginated channel',
1963 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
1964 'playlist_mincount': 91,
1965 'info_dict': {
1966 'id': 'UUKfVa3S1e4PHvxWcwyMMg8w',
1967 'title': 'Uploads from lex will',
1968 }
1969 }, {
1970 'note': 'Age restricted channel',
1971 # from https://www.youtube.com/user/DeusExOfficial
1972 'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w',
1973 'playlist_mincount': 64,
1974 'info_dict': {
1975 'id': 'UUs0ifCMCm1icqRbqhUINa0w',
1976 'title': 'Uploads from Deus Ex',
1977 },
1978 }]
1979
1980 @classmethod
1981 def suitable(cls, url):
1982 return (False if YoutubePlaylistsIE.suitable(url) or YoutubeLiveIE.suitable(url)
1983 else super(YoutubeChannelIE, cls).suitable(url))
1984
1985 def _build_template_url(self, url, channel_id):
1986 return self._TEMPLATE_URL % channel_id
1987
1988 def _real_extract(self, url):
1989 channel_id = self._match_id(url)
1990
1991 url = self._build_template_url(url, channel_id)
1992
1993 # Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778)
1994 # Workaround by extracting as a playlist if managed to obtain channel playlist URL
1995 # otherwise fallback on channel by page extraction
1996 channel_page = self._download_webpage(
1997 url + '?view=57', channel_id,
1998 'Downloading channel page', fatal=False)
1999 if channel_page is False:
2000 channel_playlist_id = False
2001 else:
2002 channel_playlist_id = self._html_search_meta(
2003 'channelId', channel_page, 'channel id', default=None)
2004 if not channel_playlist_id:
2005 channel_url = self._html_search_meta(
2006 ('al:ios:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad'),
2007 channel_page, 'channel url', default=None)
2008 if channel_url:
2009 channel_playlist_id = self._search_regex(
2010 r'vnd\.youtube://user/([0-9A-Za-z_-]+)',
2011 channel_url, 'channel id', default=None)
2012 if channel_playlist_id and channel_playlist_id.startswith('UC'):
2013 playlist_id = 'UU' + channel_playlist_id[2:]
2014 return self.url_result(
2015 compat_urlparse.urljoin(url, '/playlist?list=%s' % playlist_id), 'YoutubePlaylist')
2016
2017 channel_page = self._download_webpage(url, channel_id, 'Downloading page #1')
2018 autogenerated = re.search(r'''(?x)
2019 class="[^"]*?(?:
2020 channel-header-autogenerated-label|
2021 yt-channel-title-autogenerated
2022 )[^"]*"''', channel_page) is not None
2023
2024 if autogenerated:
2025 # The videos are contained in a single page
2026 # the ajax pages can't be used, they are empty
2027 entries = [
2028 self.url_result(
2029 video_id, 'Youtube', video_id=video_id,
2030 video_title=video_title)
2031 for video_id, video_title in self.extract_videos_from_page(channel_page)]
2032 return self.playlist_result(entries, channel_id)
2033
2034 try:
2035 next(self._entries(channel_page, channel_id))
2036 except StopIteration:
2037 alert_message = self._html_search_regex(
2038 r'(?s)<div[^>]+class=(["\']).*?\byt-alert-message\b.*?\1[^>]*>(?P<alert>[^<]+)</div>',
2039 channel_page, 'alert', default=None, group='alert')
2040 if alert_message:
2041 raise ExtractorError('Youtube said: %s' % alert_message, expected=True)
2042
2043 return self.playlist_result(self._entries(channel_page, channel_id), channel_id)
2044
2045
2046 class YoutubeUserIE(YoutubeChannelIE):
2047 IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
2048 _VALID_URL = r'(?:(?:https?://(?:\w+\.)?youtube\.com/(?:(?P<user>user|c)/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
2049 _TEMPLATE_URL = 'https://www.youtube.com/%s/%s/videos'
2050 IE_NAME = 'youtube:user'
2051
2052 _TESTS = [{
2053 'url': 'https://www.youtube.com/user/TheLinuxFoundation',
2054 'playlist_mincount': 320,
2055 'info_dict': {
2056 'id': 'UUfX55Sx5hEFjoC3cNs6mCUQ',
2057 'title': 'Uploads from The Linux Foundation',
2058 }
2059 }, {
2060 # Only available via https://www.youtube.com/c/12minuteathlete/videos
2061 # but not https://www.youtube.com/user/12minuteathlete/videos
2062 'url': 'https://www.youtube.com/c/12minuteathlete/videos',
2063 'playlist_mincount': 249,
2064 'info_dict': {
2065 'id': 'UUVjM-zV6_opMDx7WYxnjZiQ',
2066 'title': 'Uploads from 12 Minute Athlete',
2067 }
2068 }, {
2069 'url': 'ytuser:phihag',
2070 'only_matching': True,
2071 }, {
2072 'url': 'https://www.youtube.com/c/gametrailers',
2073 'only_matching': True,
2074 }, {
2075 'url': 'https://www.youtube.com/gametrailers',
2076 'only_matching': True,
2077 }, {
2078 # This channel is not available.
2079 'url': 'https://www.youtube.com/user/kananishinoSMEJ/videos',
2080 'only_matching': True,
2081 }]
2082
2083 @classmethod
2084 def suitable(cls, url):
2085 # Don't return True if the url can be extracted with other youtube
2086 # extractor, the regex would is too permissive and it would match.
2087 other_yt_ies = iter(klass for (name, klass) in globals().items() if name.startswith('Youtube') and name.endswith('IE') and klass is not cls)
2088 if any(ie.suitable(url) for ie in other_yt_ies):
2089 return False
2090 else:
2091 return super(YoutubeUserIE, cls).suitable(url)
2092
2093 def _build_template_url(self, url, channel_id):
2094 mobj = re.match(self._VALID_URL, url)
2095 return self._TEMPLATE_URL % (mobj.group('user') or 'user', mobj.group('id'))
2096
2097
2098 class YoutubeLiveIE(YoutubeBaseInfoExtractor):
2099 IE_DESC = 'YouTube.com live streams'
2100 _VALID_URL = r'(?P<base_url>https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+))/live'
2101 IE_NAME = 'youtube:live'
2102
2103 _TESTS = [{
2104 'url': 'http://www.youtube.com/user/TheYoungTurks/live',
2105 'info_dict': {
2106 'id': 'a48o2S1cPoo',
2107 'ext': 'mp4',
2108 'title': 'The Young Turks - Live Main Show',
2109 'uploader': 'The Young Turks',
2110 'uploader_id': 'TheYoungTurks',
2111 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
2112 'upload_date': '20150715',
2113 'license': 'Standard YouTube License',
2114 'description': 'md5:438179573adcdff3c97ebb1ee632b891',
2115 'categories': ['News & Politics'],
2116 'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
2117 'like_count': int,
2118 'dislike_count': int,
2119 },
2120 'params': {
2121 'skip_download': True,
2122 },
2123 }, {
2124 'url': 'http://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
2125 'only_matching': True,
2126 }]
2127
2128 def _real_extract(self, url):
2129 mobj = re.match(self._VALID_URL, url)
2130 channel_id = mobj.group('id')
2131 base_url = mobj.group('base_url')
2132 webpage = self._download_webpage(url, channel_id, fatal=False)
2133 if webpage:
2134 page_type = self._og_search_property(
2135 'type', webpage, 'page type', default=None)
2136 video_id = self._html_search_meta(
2137 'videoId', webpage, 'video id', default=None)
2138 if page_type == 'video' and video_id and re.match(r'^[0-9A-Za-z_-]{11}$', video_id):
2139 return self.url_result(video_id, YoutubeIE.ie_key())
2140 return self.url_result(base_url)
2141
2142
2143 class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
2144 IE_DESC = 'YouTube.com user/channel playlists'
2145 _VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+)/playlists'
2146 IE_NAME = 'youtube:playlists'
2147
2148 _TESTS = [{
2149 'url': 'http://www.youtube.com/user/ThirstForScience/playlists',
2150 'playlist_mincount': 4,
2151 'info_dict': {
2152 'id': 'ThirstForScience',
2153 'title': 'Thirst for Science',
2154 },
2155 }, {
2156 # with "Load more" button
2157 'url': 'http://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
2158 'playlist_mincount': 70,
2159 'info_dict': {
2160 'id': 'igorkle1',
2161 'title': 'Игорь Клейнер',
2162 },
2163 }, {
2164 'url': 'https://www.youtube.com/channel/UCiU1dHvZObB2iP6xkJ__Icw/playlists',
2165 'playlist_mincount': 17,
2166 'info_dict': {
2167 'id': 'UCiU1dHvZObB2iP6xkJ__Icw',
2168 'title': 'Chem Player',
2169 },
2170 }]
2171
2172
2173 class YoutubeSearchIE(SearchInfoExtractor, YoutubePlaylistIE):
2174 IE_DESC = 'YouTube.com searches'
2175 # there doesn't appear to be a real limit, for example if you search for
2176 # 'python' you get more than 8.000.000 results
2177 _MAX_RESULTS = float('inf')
2178 IE_NAME = 'youtube:search'
2179 _SEARCH_KEY = 'ytsearch'
2180 _EXTRA_QUERY_ARGS = {}
2181 _TESTS = []
2182
2183 def _get_n_results(self, query, n):
2184 """Get a specified number of results for a query"""
2185
2186 videos = []
2187 limit = n
2188
2189 for pagenum in itertools.count(1):
2190 url_query = {
2191 'search_query': query.encode('utf-8'),
2192 'page': pagenum,
2193 'spf': 'navigate',
2194 }
2195 url_query.update(self._EXTRA_QUERY_ARGS)
2196 result_url = 'https://www.youtube.com/results?' + compat_urllib_parse_urlencode(url_query)
2197 data = self._download_json(
2198 result_url, video_id='query "%s"' % query,
2199 note='Downloading page %s' % pagenum,
2200 errnote='Unable to download API page')
2201 html_content = data[1]['body']['content']
2202
2203 if 'class="search-message' in html_content:
2204 raise ExtractorError(
2205 '[youtube] No video results', expected=True)
2206
2207 new_videos = self._ids_to_results(orderedSet(re.findall(
2208 r'href="/watch\?v=(.{11})', html_content)))
2209 videos += new_videos
2210 if not new_videos or len(videos) > limit:
2211 break
2212
2213 if len(videos) > n:
2214 videos = videos[:n]
2215 return self.playlist_result(videos, query)
2216
2217
2218 class YoutubeSearchDateIE(YoutubeSearchIE):
2219 IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
2220 _SEARCH_KEY = 'ytsearchdate'
2221 IE_DESC = 'YouTube.com searches, newest videos first'
2222 _EXTRA_QUERY_ARGS = {'search_sort': 'video_date_uploaded'}
2223
2224
2225 class YoutubeSearchURLIE(YoutubePlaylistBaseInfoExtractor):
2226 IE_DESC = 'YouTube.com search URLs'
2227 IE_NAME = 'youtube:search_url'
2228 _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)'
2229 _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})(?:[^"]*"[^>]+\btitle="(?P<title>[^"]+))?'
2230 _TESTS = [{
2231 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
2232 'playlist_mincount': 5,
2233 'info_dict': {
2234 'title': 'youtube-dl test video',
2235 }
2236 }, {
2237 'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
2238 'only_matching': True,
2239 }]
2240
2241 def _real_extract(self, url):
2242 mobj = re.match(self._VALID_URL, url)
2243 query = compat_urllib_parse_unquote_plus(mobj.group('query'))
2244 webpage = self._download_webpage(url, query)
2245 return self.playlist_result(self._process_page(webpage), playlist_title=query)
2246
2247
2248 class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor):
2249 IE_DESC = 'YouTube.com (multi-season) shows'
2250 _VALID_URL = r'https?://www\.youtube\.com/show/(?P<id>[^?#]*)'
2251 IE_NAME = 'youtube:show'
2252 _TESTS = [{
2253 'url': 'https://www.youtube.com/show/airdisasters',
2254 'playlist_mincount': 5,
2255 'info_dict': {
2256 'id': 'airdisasters',
2257 'title': 'Air Disasters',
2258 }
2259 }]
2260
2261 def _real_extract(self, url):
2262 playlist_id = self._match_id(url)
2263 return super(YoutubeShowIE, self)._real_extract(
2264 'https://www.youtube.com/show/%s/playlists' % playlist_id)
2265
2266
2267 class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
2268 """
2269 Base class for feed extractors
2270 Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
2271 """
2272 _LOGIN_REQUIRED = True
2273
2274 @property
2275 def IE_NAME(self):
2276 return 'youtube:%s' % self._FEED_NAME
2277
2278 def _real_initialize(self):
2279 self._login()
2280
2281 def _real_extract(self, url):
2282 page = self._download_webpage(
2283 'https://www.youtube.com/feed/%s' % self._FEED_NAME, self._PLAYLIST_TITLE)
2284
2285 # The extraction process is the same as for playlists, but the regex
2286 # for the video ids doesn't contain an index
2287 ids = []
2288 more_widget_html = content_html = page
2289 for page_num in itertools.count(1):
2290 matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
2291
2292 # 'recommended' feed has infinite 'load more' and each new portion spins
2293 # the same videos in (sometimes) slightly different order, so we'll check
2294 # for unicity and break when portion has no new videos
2295 new_ids = filter(lambda video_id: video_id not in ids, orderedSet(matches))
2296 if not new_ids:
2297 break
2298
2299 ids.extend(new_ids)
2300
2301 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
2302 if not mobj:
2303 break
2304
2305 more = self._download_json(
2306 'https://youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
2307 'Downloading page #%s' % page_num,
2308 transform_source=uppercase_escape)
2309 content_html = more['content_html']
2310 more_widget_html = more['load_more_widget_html']
2311
2312 return self.playlist_result(
2313 self._ids_to_results(ids), playlist_title=self._PLAYLIST_TITLE)
2314
2315
2316 class YoutubeWatchLaterIE(YoutubePlaylistIE):
2317 IE_NAME = 'youtube:watchlater'
2318 IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
2319 _VALID_URL = r'https?://www\.youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater'
2320
2321 _TESTS = [{
2322 'url': 'https://www.youtube.com/playlist?list=WL',
2323 'only_matching': True,
2324 }, {
2325 'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL',
2326 'only_matching': True,
2327 }]
2328
2329 def _real_extract(self, url):
2330 video = self._check_download_just_video(url, 'WL')
2331 if video:
2332 return video
2333 return self._extract_playlist('WL')
2334
2335
2336 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
2337 IE_NAME = 'youtube:favorites'
2338 IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
2339 _VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
2340 _LOGIN_REQUIRED = True
2341
2342 def _real_extract(self, url):
2343 webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
2344 playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
2345 return self.url_result(playlist_id, 'YoutubePlaylist')
2346
2347
2348 class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
2349 IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
2350 _VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
2351 _FEED_NAME = 'recommended'
2352 _PLAYLIST_TITLE = 'Youtube Recommended videos'
2353
2354
2355 class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
2356 IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
2357 _VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
2358 _FEED_NAME = 'subscriptions'
2359 _PLAYLIST_TITLE = 'Youtube Subscriptions'
2360
2361
2362 class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
2363 IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
2364 _VALID_URL = 'https?://www\.youtube\.com/feed/history|:ythistory'
2365 _FEED_NAME = 'history'
2366 _PLAYLIST_TITLE = 'Youtube History'
2367
2368
2369 class YoutubeTruncatedURLIE(InfoExtractor):
2370 IE_NAME = 'youtube:truncated_url'
2371 IE_DESC = False # Do not list
2372 _VALID_URL = r'''(?x)
2373 (?:https?://)?
2374 (?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
2375 (?:watch\?(?:
2376 feature=[a-z_]+|
2377 annotation_id=annotation_[^&]+|
2378 x-yt-cl=[0-9]+|
2379 hl=[^&]*|
2380 t=[0-9]+
2381 )?
2382 |
2383 attribution_link\?a=[^&]+
2384 )
2385 $
2386 '''
2387
2388 _TESTS = [{
2389 'url': 'http://www.youtube.com/watch?annotation_id=annotation_3951667041',
2390 'only_matching': True,
2391 }, {
2392 'url': 'http://www.youtube.com/watch?',
2393 'only_matching': True,
2394 }, {
2395 'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
2396 'only_matching': True,
2397 }, {
2398 'url': 'https://www.youtube.com/watch?feature=foo',
2399 'only_matching': True,
2400 }, {
2401 'url': 'https://www.youtube.com/watch?hl=en-GB',
2402 'only_matching': True,
2403 }, {
2404 'url': 'https://www.youtube.com/watch?t=2372',
2405 'only_matching': True,
2406 }]
2407
2408 def _real_extract(self, url):
2409 raise ExtractorError(
2410 'Did you forget to quote the URL? Remember that & is a meta '
2411 'character in most shells, so you want to put the URL in quotes, '
2412 'like youtube-dl '
2413 '"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
2414 ' or simply youtube-dl BaW_jenozKc .',
2415 expected=True)
2416
2417
2418 class YoutubeTruncatedIDIE(InfoExtractor):
2419 IE_NAME = 'youtube:truncated_id'
2420 IE_DESC = False # Do not list
2421 _VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
2422
2423 _TESTS = [{
2424 'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
2425 'only_matching': True,
2426 }]
2427
2428 def _real_extract(self, url):
2429 video_id = self._match_id(url)
2430 raise ExtractorError(
2431 'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
2432 expected=True)