]> Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/youtube.py
caa0482491fef0c550f8b9de8b608e317e40aa4f
[youtubedl] / youtube_dl / extractor / youtube.py
1 # coding: utf-8
2
3 from __future__ import unicode_literals
4
5
6 import itertools
7 import json
8 import os.path
9 import random
10 import re
11 import time
12 import traceback
13
14 from .common import InfoExtractor, SearchInfoExtractor
15 from ..jsinterp import JSInterpreter
16 from ..swfinterp import SWFInterpreter
17 from ..compat import (
18 compat_chr,
19 compat_parse_qs,
20 compat_urllib_parse_unquote,
21 compat_urllib_parse_unquote_plus,
22 compat_urllib_parse_urlencode,
23 compat_urllib_parse_urlparse,
24 compat_urlparse,
25 compat_str,
26 )
27 from ..utils import (
28 clean_html,
29 error_to_compat_str,
30 ExtractorError,
31 float_or_none,
32 get_element_by_attribute,
33 get_element_by_id,
34 int_or_none,
35 mimetype2ext,
36 orderedSet,
37 parse_codecs,
38 parse_duration,
39 remove_quotes,
40 remove_start,
41 sanitized_Request,
42 smuggle_url,
43 str_to_int,
44 try_get,
45 unescapeHTML,
46 unified_strdate,
47 unsmuggle_url,
48 uppercase_escape,
49 urlencode_postdata,
50 )
51
52
53 class YoutubeBaseInfoExtractor(InfoExtractor):
54 """Provide base functions for Youtube extractors"""
55 _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
56 _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
57 _PASSWORD_CHALLENGE_URL = 'https://accounts.google.com/signin/challenge/sl/password'
58 _NETRC_MACHINE = 'youtube'
59 # If True it will raise an error if no login info is provided
60 _LOGIN_REQUIRED = False
61
62 def _set_language(self):
63 self._set_cookie(
64 '.youtube.com', 'PREF', 'f1=50000000&hl=en',
65 # YouTube sets the expire time to about two months
66 expire_time=time.time() + 2 * 30 * 24 * 3600)
67
68 def _ids_to_results(self, ids):
69 return [
70 self.url_result(vid_id, 'Youtube', video_id=vid_id)
71 for vid_id in ids]
72
73 def _login(self):
74 """
75 Attempt to log in to YouTube.
76 True is returned if successful or skipped.
77 False is returned if login failed.
78
79 If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
80 """
81 (username, password) = self._get_login_info()
82 # No authentication to be performed
83 if username is None:
84 if self._LOGIN_REQUIRED:
85 raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
86 return True
87
88 login_page = self._download_webpage(
89 self._LOGIN_URL, None,
90 note='Downloading login page',
91 errnote='unable to fetch login page', fatal=False)
92 if login_page is False:
93 return
94
95 login_form = self._hidden_inputs(login_page)
96
97 login_form.update({
98 'checkConnection': 'youtube',
99 'Email': username,
100 'Passwd': password,
101 })
102
103 login_results = self._download_webpage(
104 self._PASSWORD_CHALLENGE_URL, None,
105 note='Logging in', errnote='unable to log in', fatal=False,
106 data=urlencode_postdata(login_form))
107 if login_results is False:
108 return False
109
110 error_msg = self._html_search_regex(
111 r'<[^>]+id="errormsg_0_Passwd"[^>]*>([^<]+)<',
112 login_results, 'error message', default=None)
113 if error_msg:
114 raise ExtractorError('Unable to login: %s' % error_msg, expected=True)
115
116 if re.search(r'id="errormsg_0_Passwd"', login_results) is not None:
117 raise ExtractorError('Please use your account password and a two-factor code instead of an application-specific password.', expected=True)
118
119 # Two-Factor
120 # TODO add SMS and phone call support - these require making a request and then prompting the user
121
122 if re.search(r'(?i)<form[^>]+id="challenge"', login_results) is not None:
123 tfa_code = self._get_tfa_info('2-step verification code')
124
125 if not tfa_code:
126 self._downloader.report_warning(
127 'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
128 '(Note that only TOTP (Google Authenticator App) codes work at this time.)')
129 return False
130
131 tfa_code = remove_start(tfa_code, 'G-')
132
133 tfa_form_strs = self._form_hidden_inputs('challenge', login_results)
134
135 tfa_form_strs.update({
136 'Pin': tfa_code,
137 'TrustDevice': 'on',
138 })
139
140 tfa_data = urlencode_postdata(tfa_form_strs)
141
142 tfa_req = sanitized_Request(self._TWOFACTOR_URL, tfa_data)
143 tfa_results = self._download_webpage(
144 tfa_req, None,
145 note='Submitting TFA code', errnote='unable to submit tfa', fatal=False)
146
147 if tfa_results is False:
148 return False
149
150 if re.search(r'(?i)<form[^>]+id="challenge"', tfa_results) is not None:
151 self._downloader.report_warning('Two-factor code expired or invalid. Please try again, or use a one-use backup code instead.')
152 return False
153 if re.search(r'(?i)<form[^>]+id="gaia_loginform"', tfa_results) is not None:
154 self._downloader.report_warning('unable to log in - did the page structure change?')
155 return False
156 if re.search(r'smsauth-interstitial-reviewsettings', tfa_results) is not None:
157 self._downloader.report_warning('Your Google account has a security notice. Please log in on your web browser, resolve the notice, and try again.')
158 return False
159
160 if re.search(r'(?i)<form[^>]+id="gaia_loginform"', login_results) is not None:
161 self._downloader.report_warning('unable to log in: bad username or password')
162 return False
163 return True
164
165 def _real_initialize(self):
166 if self._downloader is None:
167 return
168 self._set_language()
169 if not self._login():
170 return
171
172
173 class YoutubeEntryListBaseInfoExtractor(YoutubeBaseInfoExtractor):
174 # Extract entries from page with "Load more" button
175 def _entries(self, page, playlist_id):
176 more_widget_html = content_html = page
177 for page_num in itertools.count(1):
178 for entry in self._process_page(content_html):
179 yield entry
180
181 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
182 if not mobj:
183 break
184
185 more = self._download_json(
186 'https://youtube.com/%s' % mobj.group('more'), playlist_id,
187 'Downloading page #%s' % page_num,
188 transform_source=uppercase_escape)
189 content_html = more['content_html']
190 if not content_html.strip():
191 # Some webpages show a "Load more" button but they don't
192 # have more videos
193 break
194 more_widget_html = more['load_more_widget_html']
195
196
197 class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
198 def _process_page(self, content):
199 for video_id, video_title in self.extract_videos_from_page(content):
200 yield self.url_result(video_id, 'Youtube', video_id, video_title)
201
202 def extract_videos_from_page(self, page):
203 ids_in_page = []
204 titles_in_page = []
205 for mobj in re.finditer(self._VIDEO_RE, page):
206 # The link with index 0 is not the first video of the playlist (not sure if still actual)
207 if 'index' in mobj.groupdict() and mobj.group('id') == '0':
208 continue
209 video_id = mobj.group('id')
210 video_title = unescapeHTML(mobj.group('title'))
211 if video_title:
212 video_title = video_title.strip()
213 try:
214 idx = ids_in_page.index(video_id)
215 if video_title and not titles_in_page[idx]:
216 titles_in_page[idx] = video_title
217 except ValueError:
218 ids_in_page.append(video_id)
219 titles_in_page.append(video_title)
220 return zip(ids_in_page, titles_in_page)
221
222
223 class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
224 def _process_page(self, content):
225 for playlist_id in orderedSet(re.findall(
226 r'<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*><a[^>]+href="/?playlist\?list=([0-9A-Za-z-_]{10,})"',
227 content)):
228 yield self.url_result(
229 'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist')
230
231 def _real_extract(self, url):
232 playlist_id = self._match_id(url)
233 webpage = self._download_webpage(url, playlist_id)
234 title = self._og_search_title(webpage, fatal=False)
235 return self.playlist_result(self._entries(webpage, playlist_id), playlist_id, title)
236
237
238 class YoutubeIE(YoutubeBaseInfoExtractor):
239 IE_DESC = 'YouTube.com'
240 _VALID_URL = r"""(?x)^
241 (
242 (?:https?://|//) # http(s):// or protocol-independent URL
243 (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
244 (?:www\.)?deturl\.com/www\.youtube\.com/|
245 (?:www\.)?pwnyoutube\.com/|
246 (?:www\.)?yourepeat\.com/|
247 tube\.majestyc\.net/|
248 youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
249 (?:.*?\#/)? # handle anchor (#/) redirect urls
250 (?: # the various things that can precede the ID:
251 (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
252 |(?: # or the v= param in all its forms
253 (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
254 (?:\?|\#!?) # the params delimiter ? or # or #!
255 (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&amp;v=V36LpHqtcDY)
256 v=
257 )
258 ))
259 |(?:
260 youtu\.be| # just youtu.be/xxxx
261 vid\.plus| # or vid.plus/xxxx
262 zwearz\.com/watch| # or zwearz.com/watch/xxxx
263 )/
264 |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
265 )
266 )? # all until now is optional -> you can pass the naked ID
267 ([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
268 (?!.*?\blist=) # combined list/video URLs are handled by the playlist IE
269 (?(1).+)? # if we found the ID, everything can follow
270 $"""
271 _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
272 _formats = {
273 '5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
274 '6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
275 '13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
276 '17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
277 '18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
278 '22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
279 '34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
280 '35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
281 # itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
282 '36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
283 '37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
284 '38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
285 '43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
286 '44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
287 '45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
288 '46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
289 '59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
290 '78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
291
292
293 # 3D videos
294 '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
295 '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
296 '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
297 '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
298 '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
299 '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
300 '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
301
302 # Apple HTTP Live Streaming
303 '91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
304 '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
305 '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
306 '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
307 '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
308 '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
309 '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
310 '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
311
312 # DASH mp4 video
313 '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
314 '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
315 '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
316 '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
317 '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
318 '138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40}, # Height can vary (https://github.com/rg3/youtube-dl/issues/4559)
319 '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
320 '212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
321 '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
322 '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60, 'preference': -40},
323 '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60, 'preference': -40},
324 '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
325
326 # Dash mp4 audio
327 '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'preference': -50, 'container': 'm4a_dash'},
328 '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'preference': -50, 'container': 'm4a_dash'},
329 '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'preference': -50, 'container': 'm4a_dash'},
330 '256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'preference': -50, 'container': 'm4a_dash'},
331 '258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'preference': -50, 'container': 'm4a_dash'},
332 '325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'preference': -50, 'container': 'm4a_dash'},
333 '328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'preference': -50, 'container': 'm4a_dash'},
334
335 # Dash webm
336 '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
337 '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
338 '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
339 '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
340 '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
341 '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
342 '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9', 'preference': -40},
343 '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
344 '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
345 '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
346 '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
347 '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
348 '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
349 '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
350 '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
351 # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
352 '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
353 '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
354 '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
355 '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
356 '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
357 '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
358
359 # Dash webm audio
360 '171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
361 '172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
362
363 # Dash webm audio with opus inside
364 '249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50, 'preference': -50},
365 '250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70, 'preference': -50},
366 '251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160, 'preference': -50},
367
368 # RTMP (unnamed)
369 '_rtmp': {'protocol': 'rtmp'},
370 }
371 _SUBTITLE_FORMATS = ('ttml', 'vtt')
372
373 _GEO_BYPASS = False
374
375 IE_NAME = 'youtube'
376 _TESTS = [
377 {
378 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
379 'info_dict': {
380 'id': 'BaW_jenozKc',
381 'ext': 'mp4',
382 'title': 'youtube-dl test video "\'/\\Ƥā†­š•',
383 'uploader': 'Philipp Hagemeister',
384 'uploader_id': 'phihag',
385 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
386 'upload_date': '20121002',
387 'license': 'Standard YouTube License',
388 'description': 'test chars: "\'/\\Ƥā†­š•\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
389 'categories': ['Science & Technology'],
390 'tags': ['youtube-dl'],
391 'duration': 10,
392 'like_count': int,
393 'dislike_count': int,
394 'start_time': 1,
395 'end_time': 9,
396 }
397 },
398 {
399 'url': 'https://www.youtube.com/watch?v=UxxajLWwzqY',
400 'note': 'Test generic use_cipher_signature video (#897)',
401 'info_dict': {
402 'id': 'UxxajLWwzqY',
403 'ext': 'mp4',
404 'upload_date': '20120506',
405 'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
406 'alt_title': 'I Love It (feat. Charli XCX)',
407 'description': 'md5:f3ceb5ef83a08d95b9d146f973157cc8',
408 'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
409 'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
410 'iconic ep', 'iconic', 'love', 'it'],
411 'duration': 180,
412 'uploader': 'Icona Pop',
413 'uploader_id': 'IconaPop',
414 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IconaPop',
415 'license': 'Standard YouTube License',
416 'creator': 'Icona Pop',
417 }
418 },
419 {
420 'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
421 'note': 'Test VEVO video with age protection (#956)',
422 'info_dict': {
423 'id': '07FYdnEawAQ',
424 'ext': 'mp4',
425 'upload_date': '20130703',
426 'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
427 'alt_title': 'Tunnel Vision',
428 'description': 'md5:64249768eec3bc4276236606ea996373',
429 'duration': 419,
430 'uploader': 'justintimberlakeVEVO',
431 'uploader_id': 'justintimberlakeVEVO',
432 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/justintimberlakeVEVO',
433 'license': 'Standard YouTube License',
434 'creator': 'Justin Timberlake',
435 'age_limit': 18,
436 }
437 },
438 {
439 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
440 'note': 'Embed-only video (#1746)',
441 'info_dict': {
442 'id': 'yZIXLfi8CZQ',
443 'ext': 'mp4',
444 'upload_date': '20120608',
445 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
446 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
447 'uploader': 'SET India',
448 'uploader_id': 'setindia',
449 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
450 'license': 'Standard YouTube License',
451 'age_limit': 18,
452 }
453 },
454 {
455 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=UxxajLWwzqY',
456 'note': 'Use the first video ID in the URL',
457 'info_dict': {
458 'id': 'BaW_jenozKc',
459 'ext': 'mp4',
460 'title': 'youtube-dl test video "\'/\\Ƥā†­š•',
461 'uploader': 'Philipp Hagemeister',
462 'uploader_id': 'phihag',
463 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
464 'upload_date': '20121002',
465 'license': 'Standard YouTube License',
466 'description': 'test chars: "\'/\\Ƥā†­š•\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
467 'categories': ['Science & Technology'],
468 'tags': ['youtube-dl'],
469 'duration': 10,
470 'like_count': int,
471 'dislike_count': int,
472 },
473 'params': {
474 'skip_download': True,
475 },
476 },
477 {
478 'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
479 'note': '256k DASH audio (format 141) via DASH manifest',
480 'info_dict': {
481 'id': 'a9LDPn-MO4I',
482 'ext': 'm4a',
483 'upload_date': '20121002',
484 'uploader_id': '8KVIDEO',
485 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
486 'description': '',
487 'uploader': '8KVIDEO',
488 'license': 'Standard YouTube License',
489 'title': 'UHDTV TEST 8K VIDEO.mp4'
490 },
491 'params': {
492 'youtube_include_dash_manifest': True,
493 'format': '141',
494 },
495 'skip': 'format 141 not served anymore',
496 },
497 # DASH manifest with encrypted signature
498 {
499 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
500 'info_dict': {
501 'id': 'IB3lcPjvWLA',
502 'ext': 'm4a',
503 'title': 'Afrojack, Spree Wilson - The Spark ft. Spree Wilson',
504 'description': 'md5:12e7067fa6735a77bdcbb58cb1187d2d',
505 'duration': 244,
506 'uploader': 'AfrojackVEVO',
507 'uploader_id': 'AfrojackVEVO',
508 'upload_date': '20131011',
509 'license': 'Standard YouTube License',
510 },
511 'params': {
512 'youtube_include_dash_manifest': True,
513 'format': '141/bestaudio[ext=m4a]',
514 },
515 },
516 # JS player signature function name containing $
517 {
518 'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
519 'info_dict': {
520 'id': 'nfWlot6h_JM',
521 'ext': 'm4a',
522 'title': 'Taylor Swift - Shake It Off',
523 'alt_title': 'Shake It Off',
524 'description': 'md5:95f66187cd7c8b2c13eb78e1223b63c3',
525 'duration': 242,
526 'uploader': 'TaylorSwiftVEVO',
527 'uploader_id': 'TaylorSwiftVEVO',
528 'upload_date': '20140818',
529 'license': 'Standard YouTube License',
530 'creator': 'Taylor Swift',
531 },
532 'params': {
533 'youtube_include_dash_manifest': True,
534 'format': '141/bestaudio[ext=m4a]',
535 },
536 },
537 # Controversy video
538 {
539 'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
540 'info_dict': {
541 'id': 'T4XJQO3qol8',
542 'ext': 'mp4',
543 'duration': 219,
544 'upload_date': '20100909',
545 'uploader': 'The Amazing Atheist',
546 'uploader_id': 'TheAmazingAtheist',
547 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist',
548 'license': 'Standard YouTube License',
549 'title': 'Burning Everyone\'s Koran',
550 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
551 }
552 },
553 # Normal age-gate video (No vevo, embed allowed)
554 {
555 'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
556 'info_dict': {
557 'id': 'HtVdAasjOgU',
558 'ext': 'mp4',
559 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
560 'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
561 'duration': 142,
562 'uploader': 'The Witcher',
563 'uploader_id': 'WitcherGame',
564 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
565 'upload_date': '20140605',
566 'license': 'Standard YouTube License',
567 'age_limit': 18,
568 },
569 },
570 # Age-gate video with encrypted signature
571 {
572 'url': 'https://www.youtube.com/watch?v=6kLq3WMV1nU',
573 'info_dict': {
574 'id': '6kLq3WMV1nU',
575 'ext': 'mp4',
576 'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
577 'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
578 'duration': 247,
579 'uploader': 'LloydVEVO',
580 'uploader_id': 'LloydVEVO',
581 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/LloydVEVO',
582 'upload_date': '20110629',
583 'license': 'Standard YouTube License',
584 'age_limit': 18,
585 },
586 },
587 # video_info is None (https://github.com/rg3/youtube-dl/issues/4421)
588 {
589 'url': '__2ABJjxzNo',
590 'info_dict': {
591 'id': '__2ABJjxzNo',
592 'ext': 'mp4',
593 'duration': 266,
594 'upload_date': '20100430',
595 'uploader_id': 'deadmau5',
596 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
597 'creator': 'deadmau5',
598 'description': 'md5:12c56784b8032162bb936a5f76d55360',
599 'uploader': 'deadmau5',
600 'license': 'Standard YouTube License',
601 'title': 'Deadmau5 - Some Chords (HD)',
602 'alt_title': 'Some Chords',
603 },
604 'expected_warnings': [
605 'DASH manifest missing',
606 ]
607 },
608 # Olympics (https://github.com/rg3/youtube-dl/issues/4431)
609 {
610 'url': 'lqQg6PlCWgI',
611 'info_dict': {
612 'id': 'lqQg6PlCWgI',
613 'ext': 'mp4',
614 'duration': 6085,
615 'upload_date': '20150827',
616 'uploader_id': 'olympic',
617 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
618 'license': 'Standard YouTube License',
619 'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
620 'uploader': 'Olympic',
621 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
622 },
623 'params': {
624 'skip_download': 'requires avconv',
625 }
626 },
627 # Non-square pixels
628 {
629 'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
630 'info_dict': {
631 'id': '_b-2C3KPAM0',
632 'ext': 'mp4',
633 'stretched_ratio': 16 / 9.,
634 'duration': 85,
635 'upload_date': '20110310',
636 'uploader_id': 'AllenMeow',
637 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
638 'description': 'made by Wacom from Korea | 字幕&åŠ ę²¹ę·»é†‹ by TY\'s Allen | ę„Ÿč¬heylisa00cavey1001同å­øē†±ęƒ…ęä¾›ę¢—åŠēæ»č­Æ',
639 'uploader': 'å­«č‰¾å€«',
640 'license': 'Standard YouTube License',
641 'title': '[A-made] č®Šę…‹å¦å­—å¹•ē‰ˆ å¤Ŗ妍 ęˆ‘å°±ę˜Æ這ęØ£ēš„äŗŗ',
642 },
643 },
644 # url_encoded_fmt_stream_map is empty string
645 {
646 'url': 'qEJwOuvDf7I',
647 'info_dict': {
648 'id': 'qEJwOuvDf7I',
649 'ext': 'webm',
650 'title': 'ŠžŠ±ŃŃƒŠ¶Š“ŠµŠ½ŠøŠµ суŠ“ŠµŠ±Š½Š¾Š¹ ŠæрŠ°ŠŗтŠøŠŗŠø ŠæŠ¾ Š²Ń‹Š±Š¾Ń€Š°Š¼ 14 сŠµŠ½Ń‚яŠ±Ń€Ń 2014 Š³Š¾Š“Š° Š² Š”Š°Š½Šŗт-ŠŸŠµŃ‚ŠµŃ€Š±ŃƒŃ€Š³Šµ',
651 'description': '',
652 'upload_date': '20150404',
653 'uploader_id': 'spbelect',
654 'uploader': 'ŠŠ°Š±Š»ŃŽŠ“Š°Ń‚ŠµŠ»Šø ŠŸŠµŃ‚ŠµŃ€Š±ŃƒŃ€Š³Š°',
655 },
656 'params': {
657 'skip_download': 'requires avconv',
658 },
659 'skip': 'This live event has ended.',
660 },
661 # Extraction from multiple DASH manifests (https://github.com/rg3/youtube-dl/pull/6097)
662 {
663 'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
664 'info_dict': {
665 'id': 'FIl7x6_3R5Y',
666 'ext': 'mp4',
667 'title': 'md5:7b81415841e02ecd4313668cde88737a',
668 'description': 'md5:116377fd2963b81ec4ce64b542173306',
669 'duration': 220,
670 'upload_date': '20150625',
671 'uploader_id': 'dorappi2000',
672 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
673 'uploader': 'dorappi2000',
674 'license': 'Standard YouTube License',
675 'formats': 'mincount:32',
676 },
677 },
678 # DASH manifest with segment_list
679 {
680 'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
681 'md5': '8ce563a1d667b599d21064e982ab9e31',
682 'info_dict': {
683 'id': 'CsmdDsKjzN8',
684 'ext': 'mp4',
685 'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
686 'uploader': 'Airtek',
687 'description': 'RetransmisiĆ³n en directo de la XVIII media maratĆ³n de Zaragoza.',
688 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
689 'license': 'Standard YouTube License',
690 'title': 'RetransmisiĆ³n XVIII Media maratĆ³n Zaragoza 2015',
691 },
692 'params': {
693 'youtube_include_dash_manifest': True,
694 'format': '135', # bestvideo
695 },
696 'skip': 'This live event has ended.',
697 },
698 {
699 # Multifeed videos (multiple cameras), URL is for Main Camera
700 'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
701 'info_dict': {
702 'id': 'jqWvoWXjCVs',
703 'title': 'teamPGP: Rocket League Noob Stream',
704 'description': 'md5:dc7872fb300e143831327f1bae3af010',
705 },
706 'playlist': [{
707 'info_dict': {
708 'id': 'jqWvoWXjCVs',
709 'ext': 'mp4',
710 'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
711 'description': 'md5:dc7872fb300e143831327f1bae3af010',
712 'duration': 7335,
713 'upload_date': '20150721',
714 'uploader': 'Beer Games Beer',
715 'uploader_id': 'beergamesbeer',
716 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
717 'license': 'Standard YouTube License',
718 },
719 }, {
720 'info_dict': {
721 'id': '6h8e8xoXJzg',
722 'ext': 'mp4',
723 'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
724 'description': 'md5:dc7872fb300e143831327f1bae3af010',
725 'duration': 7337,
726 'upload_date': '20150721',
727 'uploader': 'Beer Games Beer',
728 'uploader_id': 'beergamesbeer',
729 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
730 'license': 'Standard YouTube License',
731 },
732 }, {
733 'info_dict': {
734 'id': 'PUOgX5z9xZw',
735 'ext': 'mp4',
736 'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
737 'description': 'md5:dc7872fb300e143831327f1bae3af010',
738 'duration': 7337,
739 'upload_date': '20150721',
740 'uploader': 'Beer Games Beer',
741 'uploader_id': 'beergamesbeer',
742 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
743 'license': 'Standard YouTube License',
744 },
745 }, {
746 'info_dict': {
747 'id': 'teuwxikvS5k',
748 'ext': 'mp4',
749 'title': 'teamPGP: Rocket League Noob Stream (zim)',
750 'description': 'md5:dc7872fb300e143831327f1bae3af010',
751 'duration': 7334,
752 'upload_date': '20150721',
753 'uploader': 'Beer Games Beer',
754 'uploader_id': 'beergamesbeer',
755 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
756 'license': 'Standard YouTube License',
757 },
758 }],
759 'params': {
760 'skip_download': True,
761 },
762 },
763 {
764 # Multifeed video with comma in title (see https://github.com/rg3/youtube-dl/issues/8536)
765 'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
766 'info_dict': {
767 'id': 'gVfLd0zydlo',
768 'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
769 },
770 'playlist_count': 2,
771 'skip': 'Not multifeed anymore',
772 },
773 {
774 'url': 'https://vid.plus/FlRa-iH7PGw',
775 'only_matching': True,
776 },
777 {
778 'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
779 'only_matching': True,
780 },
781 {
782 # Title with JS-like syntax "};" (see https://github.com/rg3/youtube-dl/issues/7468)
783 # Also tests cut-off URL expansion in video description (see
784 # https://github.com/rg3/youtube-dl/issues/1892,
785 # https://github.com/rg3/youtube-dl/issues/8164)
786 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
787 'info_dict': {
788 'id': 'lsguqyKfVQg',
789 'ext': 'mp4',
790 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
791 'alt_title': 'Dark Walk',
792 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
793 'duration': 133,
794 'upload_date': '20151119',
795 'uploader_id': 'IronSoulElf',
796 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
797 'uploader': 'IronSoulElf',
798 'license': 'Standard YouTube License',
799 'creator': 'Todd Haberman, Daniel Law Heath & Aaron Kaplan',
800 },
801 'params': {
802 'skip_download': True,
803 },
804 },
805 {
806 # Tags with '};' (see https://github.com/rg3/youtube-dl/issues/7468)
807 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
808 'only_matching': True,
809 },
810 {
811 # Video with yt:stretch=17:0
812 'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
813 'info_dict': {
814 'id': 'Q39EVAstoRM',
815 'ext': 'mp4',
816 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
817 'description': 'md5:ee18a25c350637c8faff806845bddee9',
818 'upload_date': '20151107',
819 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
820 'uploader': 'CH GAMER DROID',
821 },
822 'params': {
823 'skip_download': True,
824 },
825 'skip': 'This video does not exist.',
826 },
827 {
828 # Video licensed under Creative Commons
829 'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
830 'info_dict': {
831 'id': 'M4gD1WSo5mA',
832 'ext': 'mp4',
833 'title': 'md5:e41008789470fc2533a3252216f1c1d1',
834 'description': 'md5:a677553cf0840649b731a3024aeff4cc',
835 'duration': 721,
836 'upload_date': '20150127',
837 'uploader_id': 'BerkmanCenter',
838 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
839 'uploader': 'The Berkman Klein Center for Internet & Society',
840 'license': 'Creative Commons Attribution license (reuse allowed)',
841 },
842 'params': {
843 'skip_download': True,
844 },
845 },
846 {
847 # Channel-like uploader_url
848 'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
849 'info_dict': {
850 'id': 'eQcmzGIKrzg',
851 'ext': 'mp4',
852 'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
853 'description': 'md5:dda0d780d5a6e120758d1711d062a867',
854 'duration': 4060,
855 'upload_date': '20151119',
856 'uploader': 'Bernie 2016',
857 'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
858 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
859 'license': 'Creative Commons Attribution license (reuse allowed)',
860 },
861 'params': {
862 'skip_download': True,
863 },
864 },
865 {
866 'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;amp;v=V36LpHqtcDY',
867 'only_matching': True,
868 },
869 {
870 # YouTube Red paid video (https://github.com/rg3/youtube-dl/issues/10059)
871 'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
872 'only_matching': True,
873 },
874 {
875 # Rental video preview
876 'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
877 'info_dict': {
878 'id': 'uGpuVWrhIzE',
879 'ext': 'mp4',
880 'title': 'Piku - Trailer',
881 'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
882 'upload_date': '20150811',
883 'uploader': 'FlixMatrix',
884 'uploader_id': 'FlixMatrixKaravan',
885 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
886 'license': 'Standard YouTube License',
887 },
888 'params': {
889 'skip_download': True,
890 },
891 },
892 {
893 # YouTube Red video with episode data
894 'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
895 'info_dict': {
896 'id': 'iqKdEhx-dD4',
897 'ext': 'mp4',
898 'title': 'Isolation - Mind Field (Ep 1)',
899 'description': 'md5:8013b7ddea787342608f63a13ddc9492',
900 'duration': 2085,
901 'upload_date': '20170118',
902 'uploader': 'Vsauce',
903 'uploader_id': 'Vsauce',
904 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
905 'license': 'Standard YouTube License',
906 'series': 'Mind Field',
907 'season_number': 1,
908 'episode_number': 1,
909 },
910 'params': {
911 'skip_download': True,
912 },
913 'expected_warnings': [
914 'Skipping DASH manifest',
915 ],
916 },
917 {
918 # itag 212
919 'url': '1t24XAntNCY',
920 'only_matching': True,
921 },
922 {
923 # geo restricted to JP
924 'url': 'sJL6WA-aGkQ',
925 'only_matching': True,
926 },
927 ]
928
929 def __init__(self, *args, **kwargs):
930 super(YoutubeIE, self).__init__(*args, **kwargs)
931 self._player_cache = {}
932
933 def report_video_info_webpage_download(self, video_id):
934 """Report attempt to download video info webpage."""
935 self.to_screen('%s: Downloading video info webpage' % video_id)
936
937 def report_information_extraction(self, video_id):
938 """Report attempt to extract video information."""
939 self.to_screen('%s: Extracting video information' % video_id)
940
941 def report_unavailable_format(self, video_id, format):
942 """Report extracted video URL."""
943 self.to_screen('%s: Format %s not available' % (video_id, format))
944
945 def report_rtmp_download(self):
946 """Indicate the download will use the RTMP protocol."""
947 self.to_screen('RTMP download detected')
948
949 def _signature_cache_id(self, example_sig):
950 """ Return a string representation of a signature """
951 return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
952
953 def _extract_signature_function(self, video_id, player_url, example_sig):
954 id_m = re.match(
955 r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|/base)?\.(?P<ext>[a-z]+)$',
956 player_url)
957 if not id_m:
958 raise ExtractorError('Cannot identify player %r' % player_url)
959 player_type = id_m.group('ext')
960 player_id = id_m.group('id')
961
962 # Read from filesystem cache
963 func_id = '%s_%s_%s' % (
964 player_type, player_id, self._signature_cache_id(example_sig))
965 assert os.path.basename(func_id) == func_id
966
967 cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
968 if cache_spec is not None:
969 return lambda s: ''.join(s[i] for i in cache_spec)
970
971 download_note = (
972 'Downloading player %s' % player_url
973 if self._downloader.params.get('verbose') else
974 'Downloading %s player %s' % (player_type, player_id)
975 )
976 if player_type == 'js':
977 code = self._download_webpage(
978 player_url, video_id,
979 note=download_note,
980 errnote='Download of %s failed' % player_url)
981 res = self._parse_sig_js(code)
982 elif player_type == 'swf':
983 urlh = self._request_webpage(
984 player_url, video_id,
985 note=download_note,
986 errnote='Download of %s failed' % player_url)
987 code = urlh.read()
988 res = self._parse_sig_swf(code)
989 else:
990 assert False, 'Invalid player type %r' % player_type
991
992 test_string = ''.join(map(compat_chr, range(len(example_sig))))
993 cache_res = res(test_string)
994 cache_spec = [ord(c) for c in cache_res]
995
996 self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
997 return res
998
999 def _print_sig_code(self, func, example_sig):
1000 def gen_sig_code(idxs):
1001 def _genslice(start, end, step):
1002 starts = '' if start == 0 else str(start)
1003 ends = (':%d' % (end + step)) if end + step >= 0 else ':'
1004 steps = '' if step == 1 else (':%d' % step)
1005 return 's[%s%s%s]' % (starts, ends, steps)
1006
1007 step = None
1008 # Quelch pyflakes warnings - start will be set when step is set
1009 start = '(Never used)'
1010 for i, prev in zip(idxs[1:], idxs[:-1]):
1011 if step is not None:
1012 if i - prev == step:
1013 continue
1014 yield _genslice(start, prev, step)
1015 step = None
1016 continue
1017 if i - prev in [-1, 1]:
1018 step = i - prev
1019 start = prev
1020 continue
1021 else:
1022 yield 's[%d]' % prev
1023 if step is None:
1024 yield 's[%d]' % i
1025 else:
1026 yield _genslice(start, i, step)
1027
1028 test_string = ''.join(map(compat_chr, range(len(example_sig))))
1029 cache_res = func(test_string)
1030 cache_spec = [ord(c) for c in cache_res]
1031 expr_code = ' + '.join(gen_sig_code(cache_spec))
1032 signature_id_tuple = '(%s)' % (
1033 ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
1034 code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
1035 ' return %s\n') % (signature_id_tuple, expr_code)
1036 self.to_screen('Extracted signature function:\n' + code)
1037
1038 def _parse_sig_js(self, jscode):
1039 funcname = self._search_regex(
1040 (r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1041 r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\('),
1042 jscode, 'Initial JS player signature function name', group='sig')
1043
1044 jsi = JSInterpreter(jscode)
1045 initial_function = jsi.extract_function(funcname)
1046 return lambda s: initial_function([s])
1047
1048 def _parse_sig_swf(self, file_contents):
1049 swfi = SWFInterpreter(file_contents)
1050 TARGET_CLASSNAME = 'SignatureDecipher'
1051 searched_class = swfi.extract_class(TARGET_CLASSNAME)
1052 initial_function = swfi.extract_function(searched_class, 'decipher')
1053 return lambda s: initial_function([s])
1054
1055 def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
1056 """Turn the encrypted s field into a working signature"""
1057
1058 if player_url is None:
1059 raise ExtractorError('Cannot decrypt signature without player_url')
1060
1061 if player_url.startswith('//'):
1062 player_url = 'https:' + player_url
1063 elif not re.match(r'https?://', player_url):
1064 player_url = compat_urlparse.urljoin(
1065 'https://www.youtube.com', player_url)
1066 try:
1067 player_id = (player_url, self._signature_cache_id(s))
1068 if player_id not in self._player_cache:
1069 func = self._extract_signature_function(
1070 video_id, player_url, s
1071 )
1072 self._player_cache[player_id] = func
1073 func = self._player_cache[player_id]
1074 if self._downloader.params.get('youtube_print_sig_code'):
1075 self._print_sig_code(func, s)
1076 return func(s)
1077 except Exception as e:
1078 tb = traceback.format_exc()
1079 raise ExtractorError(
1080 'Signature extraction failed: ' + tb, cause=e)
1081
1082 def _get_subtitles(self, video_id, webpage):
1083 try:
1084 subs_doc = self._download_xml(
1085 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
1086 video_id, note=False)
1087 except ExtractorError as err:
1088 self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
1089 return {}
1090
1091 sub_lang_list = {}
1092 for track in subs_doc.findall('track'):
1093 lang = track.attrib['lang_code']
1094 if lang in sub_lang_list:
1095 continue
1096 sub_formats = []
1097 for ext in self._SUBTITLE_FORMATS:
1098 params = compat_urllib_parse_urlencode({
1099 'lang': lang,
1100 'v': video_id,
1101 'fmt': ext,
1102 'name': track.attrib['name'].encode('utf-8'),
1103 })
1104 sub_formats.append({
1105 'url': 'https://www.youtube.com/api/timedtext?' + params,
1106 'ext': ext,
1107 })
1108 sub_lang_list[lang] = sub_formats
1109 if not sub_lang_list:
1110 self._downloader.report_warning('video doesn\'t have subtitles')
1111 return {}
1112 return sub_lang_list
1113
1114 def _get_ytplayer_config(self, video_id, webpage):
1115 patterns = (
1116 # User data may contain arbitrary character sequences that may affect
1117 # JSON extraction with regex, e.g. when '};' is contained the second
1118 # regex won't capture the whole JSON. Yet working around by trying more
1119 # concrete regex first keeping in mind proper quoted string handling
1120 # to be implemented in future that will replace this workaround (see
1121 # https://github.com/rg3/youtube-dl/issues/7468,
1122 # https://github.com/rg3/youtube-dl/pull/7599)
1123 r';ytplayer\.config\s*=\s*({.+?});ytplayer',
1124 r';ytplayer\.config\s*=\s*({.+?});',
1125 )
1126 config = self._search_regex(
1127 patterns, webpage, 'ytplayer.config', default=None)
1128 if config:
1129 return self._parse_json(
1130 uppercase_escape(config), video_id, fatal=False)
1131
1132 def _get_automatic_captions(self, video_id, webpage):
1133 """We need the webpage for getting the captions url, pass it as an
1134 argument to speed up the process."""
1135 self.to_screen('%s: Looking for automatic captions' % video_id)
1136 player_config = self._get_ytplayer_config(video_id, webpage)
1137 err_msg = 'Couldn\'t find automatic captions for %s' % video_id
1138 if not player_config:
1139 self._downloader.report_warning(err_msg)
1140 return {}
1141 try:
1142 args = player_config['args']
1143 caption_url = args.get('ttsurl')
1144 if caption_url:
1145 timestamp = args['timestamp']
1146 # We get the available subtitles
1147 list_params = compat_urllib_parse_urlencode({
1148 'type': 'list',
1149 'tlangs': 1,
1150 'asrs': 1,
1151 })
1152 list_url = caption_url + '&' + list_params
1153 caption_list = self._download_xml(list_url, video_id)
1154 original_lang_node = caption_list.find('track')
1155 if original_lang_node is None:
1156 self._downloader.report_warning('Video doesn\'t have automatic captions')
1157 return {}
1158 original_lang = original_lang_node.attrib['lang_code']
1159 caption_kind = original_lang_node.attrib.get('kind', '')
1160
1161 sub_lang_list = {}
1162 for lang_node in caption_list.findall('target'):
1163 sub_lang = lang_node.attrib['lang_code']
1164 sub_formats = []
1165 for ext in self._SUBTITLE_FORMATS:
1166 params = compat_urllib_parse_urlencode({
1167 'lang': original_lang,
1168 'tlang': sub_lang,
1169 'fmt': ext,
1170 'ts': timestamp,
1171 'kind': caption_kind,
1172 })
1173 sub_formats.append({
1174 'url': caption_url + '&' + params,
1175 'ext': ext,
1176 })
1177 sub_lang_list[sub_lang] = sub_formats
1178 return sub_lang_list
1179
1180 # Some videos don't provide ttsurl but rather caption_tracks and
1181 # caption_translation_languages (e.g. 20LmZk1hakA)
1182 caption_tracks = args['caption_tracks']
1183 caption_translation_languages = args['caption_translation_languages']
1184 caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
1185 parsed_caption_url = compat_urllib_parse_urlparse(caption_url)
1186 caption_qs = compat_parse_qs(parsed_caption_url.query)
1187
1188 sub_lang_list = {}
1189 for lang in caption_translation_languages.split(','):
1190 lang_qs = compat_parse_qs(compat_urllib_parse_unquote_plus(lang))
1191 sub_lang = lang_qs.get('lc', [None])[0]
1192 if not sub_lang:
1193 continue
1194 sub_formats = []
1195 for ext in self._SUBTITLE_FORMATS:
1196 caption_qs.update({
1197 'tlang': [sub_lang],
1198 'fmt': [ext],
1199 })
1200 sub_url = compat_urlparse.urlunparse(parsed_caption_url._replace(
1201 query=compat_urllib_parse_urlencode(caption_qs, True)))
1202 sub_formats.append({
1203 'url': sub_url,
1204 'ext': ext,
1205 })
1206 sub_lang_list[sub_lang] = sub_formats
1207 return sub_lang_list
1208 # An extractor error can be raise by the download process if there are
1209 # no automatic captions but there are subtitles
1210 except (KeyError, ExtractorError):
1211 self._downloader.report_warning(err_msg)
1212 return {}
1213
1214 def _mark_watched(self, video_id, video_info):
1215 playback_url = video_info.get('videostats_playback_base_url', [None])[0]
1216 if not playback_url:
1217 return
1218 parsed_playback_url = compat_urlparse.urlparse(playback_url)
1219 qs = compat_urlparse.parse_qs(parsed_playback_url.query)
1220
1221 # cpn generation algorithm is reverse engineered from base.js.
1222 # In fact it works even with dummy cpn.
1223 CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
1224 cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
1225
1226 qs.update({
1227 'ver': ['2'],
1228 'cpn': [cpn],
1229 })
1230 playback_url = compat_urlparse.urlunparse(
1231 parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
1232
1233 self._download_webpage(
1234 playback_url, video_id, 'Marking watched',
1235 'Unable to mark watched', fatal=False)
1236
1237 @classmethod
1238 def extract_id(cls, url):
1239 mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
1240 if mobj is None:
1241 raise ExtractorError('Invalid URL: %s' % url)
1242 video_id = mobj.group(2)
1243 return video_id
1244
1245 def _extract_from_m3u8(self, manifest_url, video_id):
1246 url_map = {}
1247
1248 def _get_urls(_manifest):
1249 lines = _manifest.split('\n')
1250 urls = filter(lambda l: l and not l.startswith('#'),
1251 lines)
1252 return urls
1253 manifest = self._download_webpage(manifest_url, video_id, 'Downloading formats manifest')
1254 formats_urls = _get_urls(manifest)
1255 for format_url in formats_urls:
1256 itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag')
1257 url_map[itag] = format_url
1258 return url_map
1259
1260 def _extract_annotations(self, video_id):
1261 url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
1262 return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
1263
1264 def _real_extract(self, url):
1265 url, smuggled_data = unsmuggle_url(url, {})
1266
1267 proto = (
1268 'http' if self._downloader.params.get('prefer_insecure', False)
1269 else 'https')
1270
1271 start_time = None
1272 end_time = None
1273 parsed_url = compat_urllib_parse_urlparse(url)
1274 for component in [parsed_url.fragment, parsed_url.query]:
1275 query = compat_parse_qs(component)
1276 if start_time is None and 't' in query:
1277 start_time = parse_duration(query['t'][0])
1278 if start_time is None and 'start' in query:
1279 start_time = parse_duration(query['start'][0])
1280 if end_time is None and 'end' in query:
1281 end_time = parse_duration(query['end'][0])
1282
1283 # Extract original video URL from URL with redirection, like age verification, using next_url parameter
1284 mobj = re.search(self._NEXT_URL_RE, url)
1285 if mobj:
1286 url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
1287 video_id = self.extract_id(url)
1288
1289 # Get video webpage
1290 url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
1291 video_webpage = self._download_webpage(url, video_id)
1292
1293 # Attempt to extract SWF player URL
1294 mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
1295 if mobj is not None:
1296 player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
1297 else:
1298 player_url = None
1299
1300 dash_mpds = []
1301
1302 def add_dash_mpd(video_info):
1303 dash_mpd = video_info.get('dashmpd')
1304 if dash_mpd and dash_mpd[0] not in dash_mpds:
1305 dash_mpds.append(dash_mpd[0])
1306
1307 # Get video info
1308 embed_webpage = None
1309 is_live = None
1310 if re.search(r'player-age-gate-content">', video_webpage) is not None:
1311 age_gate = True
1312 # We simulate the access to the video from www.youtube.com/v/{video_id}
1313 # this can be viewed without login into Youtube
1314 url = proto + '://www.youtube.com/embed/%s' % video_id
1315 embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
1316 data = compat_urllib_parse_urlencode({
1317 'video_id': video_id,
1318 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
1319 'sts': self._search_regex(
1320 r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
1321 })
1322 video_info_url = proto + '://www.youtube.com/get_video_info?' + data
1323 video_info_webpage = self._download_webpage(
1324 video_info_url, video_id,
1325 note='Refetching age-gated info webpage',
1326 errnote='unable to download video info webpage')
1327 video_info = compat_parse_qs(video_info_webpage)
1328 add_dash_mpd(video_info)
1329 else:
1330 age_gate = False
1331 video_info = None
1332 # Try looking directly into the video webpage
1333 ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
1334 if ytplayer_config:
1335 args = ytplayer_config['args']
1336 if args.get('url_encoded_fmt_stream_map'):
1337 # Convert to the same format returned by compat_parse_qs
1338 video_info = dict((k, [v]) for k, v in args.items())
1339 add_dash_mpd(video_info)
1340 # Rental video is not rented but preview is available (e.g.
1341 # https://www.youtube.com/watch?v=yYr8q0y5Jfg,
1342 # https://github.com/rg3/youtube-dl/issues/10532)
1343 if not video_info and args.get('ypc_vid'):
1344 return self.url_result(
1345 args['ypc_vid'], YoutubeIE.ie_key(), video_id=args['ypc_vid'])
1346 if args.get('livestream') == '1' or args.get('live_playback') == 1:
1347 is_live = True
1348 if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
1349 # We also try looking in get_video_info since it may contain different dashmpd
1350 # URL that points to a DASH manifest with possibly different itag set (some itags
1351 # are missing from DASH manifest pointed by webpage's dashmpd, some - from DASH
1352 # manifest pointed by get_video_info's dashmpd).
1353 # The general idea is to take a union of itags of both DASH manifests (for example
1354 # video with such 'manifest behavior' see https://github.com/rg3/youtube-dl/issues/6093)
1355 self.report_video_info_webpage_download(video_id)
1356 for el_type in ['&el=info', '&el=embedded', '&el=detailpage', '&el=vevo', '']:
1357 video_info_url = (
1358 '%s://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
1359 % (proto, video_id, el_type))
1360 video_info_webpage = self._download_webpage(
1361 video_info_url,
1362 video_id, note=False,
1363 errnote='unable to download video info webpage')
1364 get_video_info = compat_parse_qs(video_info_webpage)
1365 if get_video_info.get('use_cipher_signature') != ['True']:
1366 add_dash_mpd(get_video_info)
1367 if not video_info:
1368 video_info = get_video_info
1369 if 'token' in get_video_info:
1370 # Different get_video_info requests may report different results, e.g.
1371 # some may report video unavailability, but some may serve it without
1372 # any complaint (see https://github.com/rg3/youtube-dl/issues/7362,
1373 # the original webpage as well as el=info and el=embedded get_video_info
1374 # requests report video unavailability due to geo restriction while
1375 # el=detailpage succeeds and returns valid data). This is probably
1376 # due to YouTube measures against IP ranges of hosting providers.
1377 # Working around by preferring the first succeeded video_info containing
1378 # the token if no such video_info yet was found.
1379 if 'token' not in video_info:
1380 video_info = get_video_info
1381 break
1382 if 'token' not in video_info:
1383 if 'reason' in video_info:
1384 if 'The uploader has not made this video available in your country.' in video_info['reason']:
1385 regions_allowed = self._html_search_meta(
1386 'regionsAllowed', video_webpage, default=None)
1387 countries = regions_allowed.split(',') if regions_allowed else None
1388 self.raise_geo_restricted(
1389 msg=video_info['reason'][0], countries=countries)
1390 raise ExtractorError(
1391 'YouTube said: %s' % video_info['reason'][0],
1392 expected=True, video_id=video_id)
1393 else:
1394 raise ExtractorError(
1395 '"token" parameter not in video info for unknown reason',
1396 video_id=video_id)
1397
1398 # title
1399 if 'title' in video_info:
1400 video_title = video_info['title'][0]
1401 else:
1402 self._downloader.report_warning('Unable to extract video title')
1403 video_title = '_'
1404
1405 # description
1406 video_description = get_element_by_id("eow-description", video_webpage)
1407 if video_description:
1408 video_description = re.sub(r'''(?x)
1409 <a\s+
1410 (?:[a-zA-Z-]+="[^"]*"\s+)*?
1411 (?:title|href)="([^"]+)"\s+
1412 (?:[a-zA-Z-]+="[^"]*"\s+)*?
1413 class="[^"]*"[^>]*>
1414 [^<]+\.{3}\s*
1415 </a>
1416 ''', r'\1', video_description)
1417 video_description = clean_html(video_description)
1418 else:
1419 fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
1420 if fd_mobj:
1421 video_description = unescapeHTML(fd_mobj.group(1))
1422 else:
1423 video_description = ''
1424
1425 if 'multifeed_metadata_list' in video_info and not smuggled_data.get('force_singlefeed', False):
1426 if not self._downloader.params.get('noplaylist'):
1427 entries = []
1428 feed_ids = []
1429 multifeed_metadata_list = video_info['multifeed_metadata_list'][0]
1430 for feed in multifeed_metadata_list.split(','):
1431 # Unquote should take place before split on comma (,) since textual
1432 # fields may contain comma as well (see
1433 # https://github.com/rg3/youtube-dl/issues/8536)
1434 feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
1435 entries.append({
1436 '_type': 'url_transparent',
1437 'ie_key': 'Youtube',
1438 'url': smuggle_url(
1439 '%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
1440 {'force_singlefeed': True}),
1441 'title': '%s (%s)' % (video_title, feed_data['title'][0]),
1442 })
1443 feed_ids.append(feed_data['id'][0])
1444 self.to_screen(
1445 'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
1446 % (', '.join(feed_ids), video_id))
1447 return self.playlist_result(entries, video_id, video_title, video_description)
1448 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
1449
1450 if 'view_count' in video_info:
1451 view_count = int(video_info['view_count'][0])
1452 else:
1453 view_count = None
1454
1455 # Check for "rental" videos
1456 if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
1457 raise ExtractorError('"rental" videos not supported. See https://github.com/rg3/youtube-dl/issues/359 for more information.', expected=True)
1458
1459 # Start extracting information
1460 self.report_information_extraction(video_id)
1461
1462 # uploader
1463 if 'author' not in video_info:
1464 raise ExtractorError('Unable to extract uploader name')
1465 video_uploader = compat_urllib_parse_unquote_plus(video_info['author'][0])
1466
1467 # uploader_id
1468 video_uploader_id = None
1469 video_uploader_url = None
1470 mobj = re.search(
1471 r'<link itemprop="url" href="(?P<uploader_url>https?://www.youtube.com/(?:user|channel)/(?P<uploader_id>[^"]+))">',
1472 video_webpage)
1473 if mobj is not None:
1474 video_uploader_id = mobj.group('uploader_id')
1475 video_uploader_url = mobj.group('uploader_url')
1476 else:
1477 self._downloader.report_warning('unable to extract uploader nickname')
1478
1479 # thumbnail image
1480 # We try first to get a high quality image:
1481 m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
1482 video_webpage, re.DOTALL)
1483 if m_thumb is not None:
1484 video_thumbnail = m_thumb.group(1)
1485 elif 'thumbnail_url' not in video_info:
1486 self._downloader.report_warning('unable to extract video thumbnail')
1487 video_thumbnail = None
1488 else: # don't panic if we can't find it
1489 video_thumbnail = compat_urllib_parse_unquote_plus(video_info['thumbnail_url'][0])
1490
1491 # upload date
1492 upload_date = self._html_search_meta(
1493 'datePublished', video_webpage, 'upload date', default=None)
1494 if not upload_date:
1495 upload_date = self._search_regex(
1496 [r'(?s)id="eow-date.*?>(.*?)</span>',
1497 r'id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live|Started) on (.+?)</strong>'],
1498 video_webpage, 'upload date', default=None)
1499 if upload_date:
1500 upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
1501 upload_date = unified_strdate(upload_date)
1502
1503 video_license = self._html_search_regex(
1504 r'<h4[^>]+class="title"[^>]*>\s*License\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li',
1505 video_webpage, 'license', default=None)
1506
1507 m_music = re.search(
1508 r'<h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*<ul[^>]*>\s*<li>(?P<title>.+?) by (?P<creator>.+?)(?:\(.+?\))?</li',
1509 video_webpage)
1510 if m_music:
1511 video_alt_title = remove_quotes(unescapeHTML(m_music.group('title')))
1512 video_creator = clean_html(m_music.group('creator'))
1513 else:
1514 video_alt_title = video_creator = None
1515
1516 m_episode = re.search(
1517 r'<div[^>]+id="watch7-headline"[^>]*>\s*<span[^>]*>.*?>(?P<series>[^<]+)</a></b>\s*S(?P<season>\d+)\s*ā€¢\s*E(?P<episode>\d+)</span>',
1518 video_webpage)
1519 if m_episode:
1520 series = m_episode.group('series')
1521 season_number = int(m_episode.group('season'))
1522 episode_number = int(m_episode.group('episode'))
1523 else:
1524 series = season_number = episode_number = None
1525
1526 m_cat_container = self._search_regex(
1527 r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
1528 video_webpage, 'categories', default=None)
1529 if m_cat_container:
1530 category = self._html_search_regex(
1531 r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
1532 default=None)
1533 video_categories = None if category is None else [category]
1534 else:
1535 video_categories = None
1536
1537 video_tags = [
1538 unescapeHTML(m.group('content'))
1539 for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
1540
1541 def _extract_count(count_name):
1542 return str_to_int(self._search_regex(
1543 r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
1544 % re.escape(count_name),
1545 video_webpage, count_name, default=None))
1546
1547 like_count = _extract_count('like')
1548 dislike_count = _extract_count('dislike')
1549
1550 # subtitles
1551 video_subtitles = self.extract_subtitles(video_id, video_webpage)
1552 automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
1553
1554 video_duration = try_get(
1555 video_info, lambda x: int_or_none(x['length_seconds'][0]))
1556 if not video_duration:
1557 video_duration = parse_duration(self._html_search_meta(
1558 'duration', video_webpage, 'video duration'))
1559
1560 # annotations
1561 video_annotations = None
1562 if self._downloader.params.get('writeannotations', False):
1563 video_annotations = self._extract_annotations(video_id)
1564
1565 def _map_to_format_list(urlmap):
1566 formats = []
1567 for itag, video_real_url in urlmap.items():
1568 dct = {
1569 'format_id': itag,
1570 'url': video_real_url,
1571 'player_url': player_url,
1572 }
1573 if itag in self._formats:
1574 dct.update(self._formats[itag])
1575 formats.append(dct)
1576 return formats
1577
1578 if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
1579 self.report_rtmp_download()
1580 formats = [{
1581 'format_id': '_rtmp',
1582 'protocol': 'rtmp',
1583 'url': video_info['conn'][0],
1584 'player_url': player_url,
1585 }]
1586 elif len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1:
1587 encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
1588 if 'rtmpe%3Dyes' in encoded_url_map:
1589 raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
1590 formats_spec = {}
1591 fmt_list = video_info.get('fmt_list', [''])[0]
1592 if fmt_list:
1593 for fmt in fmt_list.split(','):
1594 spec = fmt.split('/')
1595 if len(spec) > 1:
1596 width_height = spec[1].split('x')
1597 if len(width_height) == 2:
1598 formats_spec[spec[0]] = {
1599 'resolution': spec[1],
1600 'width': int_or_none(width_height[0]),
1601 'height': int_or_none(width_height[1]),
1602 }
1603 formats = []
1604 for url_data_str in encoded_url_map.split(','):
1605 url_data = compat_parse_qs(url_data_str)
1606 if 'itag' not in url_data or 'url' not in url_data:
1607 continue
1608 format_id = url_data['itag'][0]
1609 url = url_data['url'][0]
1610
1611 if 'sig' in url_data:
1612 url += '&signature=' + url_data['sig'][0]
1613 elif 's' in url_data:
1614 encrypted_sig = url_data['s'][0]
1615 ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")'
1616
1617 jsplayer_url_json = self._search_regex(
1618 ASSETS_RE,
1619 embed_webpage if age_gate else video_webpage,
1620 'JS player URL (1)', default=None)
1621 if not jsplayer_url_json and not age_gate:
1622 # We need the embed website after all
1623 if embed_webpage is None:
1624 embed_url = proto + '://www.youtube.com/embed/%s' % video_id
1625 embed_webpage = self._download_webpage(
1626 embed_url, video_id, 'Downloading embed webpage')
1627 jsplayer_url_json = self._search_regex(
1628 ASSETS_RE, embed_webpage, 'JS player URL')
1629
1630 player_url = json.loads(jsplayer_url_json)
1631 if player_url is None:
1632 player_url_json = self._search_regex(
1633 r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
1634 video_webpage, 'age gate player URL')
1635 player_url = json.loads(player_url_json)
1636
1637 if self._downloader.params.get('verbose'):
1638 if player_url is None:
1639 player_version = 'unknown'
1640 player_desc = 'unknown'
1641 else:
1642 if player_url.endswith('swf'):
1643 player_version = self._search_regex(
1644 r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
1645 'flash player', fatal=False)
1646 player_desc = 'flash player %s' % player_version
1647 else:
1648 player_version = self._search_regex(
1649 [r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js', r'(?:www|player)-([^/]+)/base\.js'],
1650 player_url,
1651 'html5 player', fatal=False)
1652 player_desc = 'html5 player %s' % player_version
1653
1654 parts_sizes = self._signature_cache_id(encrypted_sig)
1655 self.to_screen('{%s} signature length %s, %s' %
1656 (format_id, parts_sizes, player_desc))
1657
1658 signature = self._decrypt_signature(
1659 encrypted_sig, video_id, player_url, age_gate)
1660 url += '&signature=' + signature
1661 if 'ratebypass' not in url:
1662 url += '&ratebypass=yes'
1663
1664 dct = {
1665 'format_id': format_id,
1666 'url': url,
1667 'player_url': player_url,
1668 }
1669 if format_id in self._formats:
1670 dct.update(self._formats[format_id])
1671 if format_id in formats_spec:
1672 dct.update(formats_spec[format_id])
1673
1674 # Some itags are not included in DASH manifest thus corresponding formats will
1675 # lack metadata (see https://github.com/rg3/youtube-dl/pull/5993).
1676 # Trying to extract metadata from url_encoded_fmt_stream_map entry.
1677 mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
1678 width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
1679
1680 more_fields = {
1681 'filesize': int_or_none(url_data.get('clen', [None])[0]),
1682 'tbr': float_or_none(url_data.get('bitrate', [None])[0], 1000),
1683 'width': width,
1684 'height': height,
1685 'fps': int_or_none(url_data.get('fps', [None])[0]),
1686 'format_note': url_data.get('quality_label', [None])[0] or url_data.get('quality', [None])[0],
1687 }
1688 for key, value in more_fields.items():
1689 if value:
1690 dct[key] = value
1691 type_ = url_data.get('type', [None])[0]
1692 if type_:
1693 type_split = type_.split(';')
1694 kind_ext = type_split[0].split('/')
1695 if len(kind_ext) == 2:
1696 kind, _ = kind_ext
1697 dct['ext'] = mimetype2ext(type_split[0])
1698 if kind in ('audio', 'video'):
1699 codecs = None
1700 for mobj in re.finditer(
1701 r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
1702 if mobj.group('key') == 'codecs':
1703 codecs = mobj.group('val')
1704 break
1705 if codecs:
1706 dct.update(parse_codecs(codecs))
1707 formats.append(dct)
1708 elif video_info.get('hlsvp'):
1709 manifest_url = video_info['hlsvp'][0]
1710 url_map = self._extract_from_m3u8(manifest_url, video_id)
1711 formats = _map_to_format_list(url_map)
1712 # Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
1713 for a_format in formats:
1714 a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
1715 else:
1716 unavailable_message = self._html_search_regex(
1717 r'(?s)<h1[^>]+id="unavailable-message"[^>]*>(.+?)</h1>',
1718 video_webpage, 'unavailable message', default=None)
1719 if unavailable_message:
1720 raise ExtractorError(unavailable_message, expected=True)
1721 raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
1722
1723 # Look for the DASH manifest
1724 if self._downloader.params.get('youtube_include_dash_manifest', True):
1725 dash_mpd_fatal = True
1726 for mpd_url in dash_mpds:
1727 dash_formats = {}
1728 try:
1729 def decrypt_sig(mobj):
1730 s = mobj.group(1)
1731 dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
1732 return '/signature/%s' % dec_s
1733
1734 mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
1735
1736 for df in self._extract_mpd_formats(
1737 mpd_url, video_id, fatal=dash_mpd_fatal,
1738 formats_dict=self._formats):
1739 # Do not overwrite DASH format found in some previous DASH manifest
1740 if df['format_id'] not in dash_formats:
1741 dash_formats[df['format_id']] = df
1742 # Additional DASH manifests may end up in HTTP Error 403 therefore
1743 # allow them to fail without bug report message if we already have
1744 # some DASH manifest succeeded. This is temporary workaround to reduce
1745 # burst of bug reports until we figure out the reason and whether it
1746 # can be fixed at all.
1747 dash_mpd_fatal = False
1748 except (ExtractorError, KeyError) as e:
1749 self.report_warning(
1750 'Skipping DASH manifest: %r' % e, video_id)
1751 if dash_formats:
1752 # Remove the formats we found through non-DASH, they
1753 # contain less info and it can be wrong, because we use
1754 # fixed values (for example the resolution). See
1755 # https://github.com/rg3/youtube-dl/issues/5774 for an
1756 # example.
1757 formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
1758 formats.extend(dash_formats.values())
1759
1760 # Check for malformed aspect ratio
1761 stretched_m = re.search(
1762 r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
1763 video_webpage)
1764 if stretched_m:
1765 w = float(stretched_m.group('w'))
1766 h = float(stretched_m.group('h'))
1767 # yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
1768 # We will only process correct ratios.
1769 if w > 0 and h > 0:
1770 ratio = w / h
1771 for f in formats:
1772 if f.get('vcodec') != 'none':
1773 f['stretched_ratio'] = ratio
1774
1775 self._sort_formats(formats)
1776
1777 self.mark_watched(video_id, video_info)
1778
1779 return {
1780 'id': video_id,
1781 'uploader': video_uploader,
1782 'uploader_id': video_uploader_id,
1783 'uploader_url': video_uploader_url,
1784 'upload_date': upload_date,
1785 'license': video_license,
1786 'creator': video_creator,
1787 'title': video_title,
1788 'alt_title': video_alt_title,
1789 'thumbnail': video_thumbnail,
1790 'description': video_description,
1791 'categories': video_categories,
1792 'tags': video_tags,
1793 'subtitles': video_subtitles,
1794 'automatic_captions': automatic_captions,
1795 'duration': video_duration,
1796 'age_limit': 18 if age_gate else 0,
1797 'annotations': video_annotations,
1798 'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
1799 'view_count': view_count,
1800 'like_count': like_count,
1801 'dislike_count': dislike_count,
1802 'average_rating': float_or_none(video_info.get('avg_rating', [None])[0]),
1803 'formats': formats,
1804 'is_live': is_live,
1805 'start_time': start_time,
1806 'end_time': end_time,
1807 'series': series,
1808 'season_number': season_number,
1809 'episode_number': episode_number,
1810 }
1811
1812
1813 class YoutubeSharedVideoIE(InfoExtractor):
1814 _VALID_URL = r'(?:https?:)?//(?:www\.)?youtube\.com/shared\?.*\bci=(?P<id>[0-9A-Za-z_-]{11})'
1815 IE_NAME = 'youtube:shared'
1816
1817 _TEST = {
1818 'url': 'https://www.youtube.com/shared?ci=1nEzmT-M4fU',
1819 'info_dict': {
1820 'id': 'uPDB5I9wfp8',
1821 'ext': 'webm',
1822 'title': 'Pocoyo: 90 minutos de episĆ³dios completos PortuguĆŖs para crianƧas - PARTE 3',
1823 'description': 'md5:d9e4d9346a2dfff4c7dc4c8cec0f546d',
1824 'upload_date': '20160219',
1825 'uploader': 'Pocoyo - PortuguĆŖs (BR)',
1826 'uploader_id': 'PocoyoBrazil',
1827 },
1828 'add_ie': ['Youtube'],
1829 'params': {
1830 # There are already too many Youtube downloads
1831 'skip_download': True,
1832 },
1833 }
1834
1835 def _real_extract(self, url):
1836 video_id = self._match_id(url)
1837
1838 webpage = self._download_webpage(url, video_id)
1839
1840 real_video_id = self._html_search_meta(
1841 'videoId', webpage, 'YouTube video id', fatal=True)
1842
1843 return self.url_result(real_video_id, YoutubeIE.ie_key())
1844
1845
1846 class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
1847 IE_DESC = 'YouTube.com playlists'
1848 _VALID_URL = r"""(?x)(?:
1849 (?:https?://)?
1850 (?:\w+\.)?
1851 (?:
1852 youtube\.com/
1853 (?:
1854 (?:course|view_play_list|my_playlists|artist|playlist|watch|embed/(?:videoseries|[0-9A-Za-z_-]{11}))
1855 \? (?:.*?[&;])*? (?:p|a|list)=
1856 | p/
1857 )|
1858 youtu\.be/[0-9A-Za-z_-]{11}\?.*?\blist=
1859 )
1860 (
1861 (?:PL|LL|EC|UU|FL|RD|UL|TL)?[0-9A-Za-z-_]{10,}
1862 # Top tracks, they can also include dots
1863 |(?:MC)[\w\.]*
1864 )
1865 .*
1866 |
1867 ((?:PL|LL|EC|UU|FL|RD|UL|TL)[0-9A-Za-z-_]{10,})
1868 )"""
1869 _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s&disable_polymer=true'
1870 _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&amp;[^"]*?index=(?P<index>\d+)(?:[^>]+>(?P<title>[^<]+))?'
1871 IE_NAME = 'youtube:playlist'
1872 _TESTS = [{
1873 'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
1874 'info_dict': {
1875 'title': 'ytdl test PL',
1876 'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
1877 },
1878 'playlist_count': 3,
1879 }, {
1880 'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
1881 'info_dict': {
1882 'id': 'PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
1883 'title': 'YDL_Empty_List',
1884 },
1885 'playlist_count': 0,
1886 'skip': 'This playlist is private',
1887 }, {
1888 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
1889 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
1890 'info_dict': {
1891 'title': '29C3: Not my department',
1892 'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
1893 },
1894 'playlist_count': 95,
1895 }, {
1896 'note': 'issue #673',
1897 'url': 'PLBB231211A4F62143',
1898 'info_dict': {
1899 'title': '[OLD]Team Fortress 2 (Class-based LP)',
1900 'id': 'PLBB231211A4F62143',
1901 },
1902 'playlist_mincount': 26,
1903 }, {
1904 'note': 'Large playlist',
1905 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
1906 'info_dict': {
1907 'title': 'Uploads from Cauchemar',
1908 'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
1909 },
1910 'playlist_mincount': 799,
1911 }, {
1912 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
1913 'info_dict': {
1914 'title': 'YDL_safe_search',
1915 'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
1916 },
1917 'playlist_count': 2,
1918 'skip': 'This playlist is private',
1919 }, {
1920 'note': 'embedded',
1921 'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
1922 'playlist_count': 4,
1923 'info_dict': {
1924 'title': 'JODA15',
1925 'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
1926 }
1927 }, {
1928 'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
1929 'playlist_mincount': 485,
1930 'info_dict': {
1931 'title': '2017 čÆčŖžęœ€ę–°å–®ę›² (2/24ꛓꖰ)',
1932 'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
1933 }
1934 }, {
1935 'note': 'Embedded SWF player',
1936 'url': 'https://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
1937 'playlist_count': 4,
1938 'info_dict': {
1939 'title': 'JODA7',
1940 'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
1941 }
1942 }, {
1943 'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
1944 'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
1945 'info_dict': {
1946 'title': 'Uploads from Interstellar Movie',
1947 'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
1948 },
1949 'playlist_mincount': 21,
1950 }, {
1951 # Playlist URL that does not actually serve a playlist
1952 'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
1953 'info_dict': {
1954 'id': 'FqZTN594JQw',
1955 'ext': 'webm',
1956 'title': "Smiley's People 01 detective, Adventure Series, Action",
1957 'uploader': 'STREEM',
1958 'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
1959 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
1960 'upload_date': '20150526',
1961 'license': 'Standard YouTube License',
1962 'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
1963 'categories': ['People & Blogs'],
1964 'tags': list,
1965 'like_count': int,
1966 'dislike_count': int,
1967 },
1968 'params': {
1969 'skip_download': True,
1970 },
1971 'add_ie': [YoutubeIE.ie_key()],
1972 }, {
1973 'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
1974 'info_dict': {
1975 'id': 'yeWKywCrFtk',
1976 'ext': 'mp4',
1977 'title': 'Small Scale Baler and Braiding Rugs',
1978 'uploader': 'Backus-Page House Museum',
1979 'uploader_id': 'backuspagemuseum',
1980 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
1981 'upload_date': '20161008',
1982 'license': 'Standard YouTube License',
1983 'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
1984 'categories': ['Nonprofits & Activism'],
1985 'tags': list,
1986 'like_count': int,
1987 'dislike_count': int,
1988 },
1989 'params': {
1990 'noplaylist': True,
1991 'skip_download': True,
1992 },
1993 }, {
1994 'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
1995 'only_matching': True,
1996 }, {
1997 'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
1998 'only_matching': True,
1999 }]
2000
2001 def _real_initialize(self):
2002 self._login()
2003
2004 def _extract_mix(self, playlist_id):
2005 # The mixes are generated from a single video
2006 # the id of the playlist is just 'RD' + video_id
2007 ids = []
2008 last_id = playlist_id[-11:]
2009 for n in itertools.count(1):
2010 url = 'https://youtube.com/watch?v=%s&list=%s' % (last_id, playlist_id)
2011 webpage = self._download_webpage(
2012 url, playlist_id, 'Downloading page {0} of Youtube mix'.format(n))
2013 new_ids = orderedSet(re.findall(
2014 r'''(?xs)data-video-username=".*?".*?
2015 href="/watch\?v=([0-9A-Za-z_-]{11})&amp;[^"]*?list=%s''' % re.escape(playlist_id),
2016 webpage))
2017 # Fetch new pages until all the videos are repeated, it seems that
2018 # there are always 51 unique videos.
2019 new_ids = [_id for _id in new_ids if _id not in ids]
2020 if not new_ids:
2021 break
2022 ids.extend(new_ids)
2023 last_id = ids[-1]
2024
2025 url_results = self._ids_to_results(ids)
2026
2027 search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
2028 title_span = (
2029 search_title('playlist-title') or
2030 search_title('title long-title') or
2031 search_title('title'))
2032 title = clean_html(title_span)
2033
2034 return self.playlist_result(url_results, playlist_id, title)
2035
2036 def _extract_playlist(self, playlist_id):
2037 url = self._TEMPLATE_URL % playlist_id
2038 page = self._download_webpage(url, playlist_id)
2039
2040 # the yt-alert-message now has tabindex attribute (see https://github.com/rg3/youtube-dl/issues/11604)
2041 for match in re.findall(r'<div class="yt-alert-message"[^>]*>([^<]+)</div>', page):
2042 match = match.strip()
2043 # Check if the playlist exists or is private
2044 mobj = re.match(r'[^<]*(?:The|This) playlist (?P<reason>does not exist|is private)[^<]*', match)
2045 if mobj:
2046 reason = mobj.group('reason')
2047 message = 'This playlist %s' % reason
2048 if 'private' in reason:
2049 message += ', use --username or --netrc to access it'
2050 message += '.'
2051 raise ExtractorError(message, expected=True)
2052 elif re.match(r'[^<]*Invalid parameters[^<]*', match):
2053 raise ExtractorError(
2054 'Invalid parameters. Maybe URL is incorrect.',
2055 expected=True)
2056 elif re.match(r'[^<]*Choose your language[^<]*', match):
2057 continue
2058 else:
2059 self.report_warning('Youtube gives an alert message: ' + match)
2060
2061 playlist_title = self._html_search_regex(
2062 r'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>',
2063 page, 'title', default=None)
2064
2065 has_videos = True
2066
2067 if not playlist_title:
2068 try:
2069 # Some playlist URLs don't actually serve a playlist (e.g.
2070 # https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4)
2071 next(self._entries(page, playlist_id))
2072 except StopIteration:
2073 has_videos = False
2074
2075 return has_videos, self.playlist_result(
2076 self._entries(page, playlist_id), playlist_id, playlist_title)
2077
2078 def _check_download_just_video(self, url, playlist_id):
2079 # Check if it's a video-specific URL
2080 query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
2081 video_id = query_dict.get('v', [None])[0] or self._search_regex(
2082 r'(?:(?:^|//)youtu\.be/|youtube\.com/embed/(?!videoseries))([0-9A-Za-z_-]{11})', url,
2083 'video id', default=None)
2084 if video_id:
2085 if self._downloader.params.get('noplaylist'):
2086 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
2087 return video_id, self.url_result(video_id, 'Youtube', video_id=video_id)
2088 else:
2089 self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
2090 return video_id, None
2091 return None, None
2092
2093 def _real_extract(self, url):
2094 # Extract playlist id
2095 mobj = re.match(self._VALID_URL, url)
2096 if mobj is None:
2097 raise ExtractorError('Invalid URL: %s' % url)
2098 playlist_id = mobj.group(1) or mobj.group(2)
2099
2100 video_id, video = self._check_download_just_video(url, playlist_id)
2101 if video:
2102 return video
2103
2104 if playlist_id.startswith(('RD', 'UL', 'PU')):
2105 # Mixes require a custom extraction process
2106 return self._extract_mix(playlist_id)
2107
2108 has_videos, playlist = self._extract_playlist(playlist_id)
2109 if has_videos or not video_id:
2110 return playlist
2111
2112 # Some playlist URLs don't actually serve a playlist (see
2113 # https://github.com/rg3/youtube-dl/issues/10537).
2114 # Fallback to plain video extraction if there is a video id
2115 # along with playlist id.
2116 return self.url_result(video_id, 'Youtube', video_id=video_id)
2117
2118
2119 class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
2120 IE_DESC = 'YouTube.com channels'
2121 _VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)'
2122 _TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
2123 _VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
2124 IE_NAME = 'youtube:channel'
2125 _TESTS = [{
2126 'note': 'paginated channel',
2127 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
2128 'playlist_mincount': 91,
2129 'info_dict': {
2130 'id': 'UUKfVa3S1e4PHvxWcwyMMg8w',
2131 'title': 'Uploads from lex will',
2132 }
2133 }, {
2134 'note': 'Age restricted channel',
2135 # from https://www.youtube.com/user/DeusExOfficial
2136 'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w',
2137 'playlist_mincount': 64,
2138 'info_dict': {
2139 'id': 'UUs0ifCMCm1icqRbqhUINa0w',
2140 'title': 'Uploads from Deus Ex',
2141 },
2142 }]
2143
2144 @classmethod
2145 def suitable(cls, url):
2146 return (False if YoutubePlaylistsIE.suitable(url) or YoutubeLiveIE.suitable(url)
2147 else super(YoutubeChannelIE, cls).suitable(url))
2148
2149 def _build_template_url(self, url, channel_id):
2150 return self._TEMPLATE_URL % channel_id
2151
2152 def _real_extract(self, url):
2153 channel_id = self._match_id(url)
2154
2155 url = self._build_template_url(url, channel_id)
2156
2157 # Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778)
2158 # Workaround by extracting as a playlist if managed to obtain channel playlist URL
2159 # otherwise fallback on channel by page extraction
2160 channel_page = self._download_webpage(
2161 url + '?view=57', channel_id,
2162 'Downloading channel page', fatal=False)
2163 if channel_page is False:
2164 channel_playlist_id = False
2165 else:
2166 channel_playlist_id = self._html_search_meta(
2167 'channelId', channel_page, 'channel id', default=None)
2168 if not channel_playlist_id:
2169 channel_url = self._html_search_meta(
2170 ('al:ios:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad'),
2171 channel_page, 'channel url', default=None)
2172 if channel_url:
2173 channel_playlist_id = self._search_regex(
2174 r'vnd\.youtube://user/([0-9A-Za-z_-]+)',
2175 channel_url, 'channel id', default=None)
2176 if channel_playlist_id and channel_playlist_id.startswith('UC'):
2177 playlist_id = 'UU' + channel_playlist_id[2:]
2178 return self.url_result(
2179 compat_urlparse.urljoin(url, '/playlist?list=%s' % playlist_id), 'YoutubePlaylist')
2180
2181 channel_page = self._download_webpage(url, channel_id, 'Downloading page #1')
2182 autogenerated = re.search(r'''(?x)
2183 class="[^"]*?(?:
2184 channel-header-autogenerated-label|
2185 yt-channel-title-autogenerated
2186 )[^"]*"''', channel_page) is not None
2187
2188 if autogenerated:
2189 # The videos are contained in a single page
2190 # the ajax pages can't be used, they are empty
2191 entries = [
2192 self.url_result(
2193 video_id, 'Youtube', video_id=video_id,
2194 video_title=video_title)
2195 for video_id, video_title in self.extract_videos_from_page(channel_page)]
2196 return self.playlist_result(entries, channel_id)
2197
2198 try:
2199 next(self._entries(channel_page, channel_id))
2200 except StopIteration:
2201 alert_message = self._html_search_regex(
2202 r'(?s)<div[^>]+class=(["\']).*?\byt-alert-message\b.*?\1[^>]*>(?P<alert>[^<]+)</div>',
2203 channel_page, 'alert', default=None, group='alert')
2204 if alert_message:
2205 raise ExtractorError('Youtube said: %s' % alert_message, expected=True)
2206
2207 return self.playlist_result(self._entries(channel_page, channel_id), channel_id)
2208
2209
2210 class YoutubeUserIE(YoutubeChannelIE):
2211 IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
2212 _VALID_URL = r'(?:(?:https?://(?:\w+\.)?youtube\.com/(?:(?P<user>user|c)/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
2213 _TEMPLATE_URL = 'https://www.youtube.com/%s/%s/videos'
2214 IE_NAME = 'youtube:user'
2215
2216 _TESTS = [{
2217 'url': 'https://www.youtube.com/user/TheLinuxFoundation',
2218 'playlist_mincount': 320,
2219 'info_dict': {
2220 'id': 'UUfX55Sx5hEFjoC3cNs6mCUQ',
2221 'title': 'Uploads from The Linux Foundation',
2222 }
2223 }, {
2224 # Only available via https://www.youtube.com/c/12minuteathlete/videos
2225 # but not https://www.youtube.com/user/12minuteathlete/videos
2226 'url': 'https://www.youtube.com/c/12minuteathlete/videos',
2227 'playlist_mincount': 249,
2228 'info_dict': {
2229 'id': 'UUVjM-zV6_opMDx7WYxnjZiQ',
2230 'title': 'Uploads from 12 Minute Athlete',
2231 }
2232 }, {
2233 'url': 'ytuser:phihag',
2234 'only_matching': True,
2235 }, {
2236 'url': 'https://www.youtube.com/c/gametrailers',
2237 'only_matching': True,
2238 }, {
2239 'url': 'https://www.youtube.com/gametrailers',
2240 'only_matching': True,
2241 }, {
2242 # This channel is not available, geo restricted to JP
2243 'url': 'https://www.youtube.com/user/kananishinoSMEJ/videos',
2244 'only_matching': True,
2245 }]
2246
2247 @classmethod
2248 def suitable(cls, url):
2249 # Don't return True if the url can be extracted with other youtube
2250 # extractor, the regex would is too permissive and it would match.
2251 other_yt_ies = iter(klass for (name, klass) in globals().items() if name.startswith('Youtube') and name.endswith('IE') and klass is not cls)
2252 if any(ie.suitable(url) for ie in other_yt_ies):
2253 return False
2254 else:
2255 return super(YoutubeUserIE, cls).suitable(url)
2256
2257 def _build_template_url(self, url, channel_id):
2258 mobj = re.match(self._VALID_URL, url)
2259 return self._TEMPLATE_URL % (mobj.group('user') or 'user', mobj.group('id'))
2260
2261
2262 class YoutubeLiveIE(YoutubeBaseInfoExtractor):
2263 IE_DESC = 'YouTube.com live streams'
2264 _VALID_URL = r'(?P<base_url>https?://(?:\w+\.)?youtube\.com/(?:(?:user|channel|c)/)?(?P<id>[^/]+))/live'
2265 IE_NAME = 'youtube:live'
2266
2267 _TESTS = [{
2268 'url': 'https://www.youtube.com/user/TheYoungTurks/live',
2269 'info_dict': {
2270 'id': 'a48o2S1cPoo',
2271 'ext': 'mp4',
2272 'title': 'The Young Turks - Live Main Show',
2273 'uploader': 'The Young Turks',
2274 'uploader_id': 'TheYoungTurks',
2275 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
2276 'upload_date': '20150715',
2277 'license': 'Standard YouTube License',
2278 'description': 'md5:438179573adcdff3c97ebb1ee632b891',
2279 'categories': ['News & Politics'],
2280 'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
2281 'like_count': int,
2282 'dislike_count': int,
2283 },
2284 'params': {
2285 'skip_download': True,
2286 },
2287 }, {
2288 'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
2289 'only_matching': True,
2290 }, {
2291 'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
2292 'only_matching': True,
2293 }, {
2294 'url': 'https://www.youtube.com/TheYoungTurks/live',
2295 'only_matching': True,
2296 }]
2297
2298 def _real_extract(self, url):
2299 mobj = re.match(self._VALID_URL, url)
2300 channel_id = mobj.group('id')
2301 base_url = mobj.group('base_url')
2302 webpage = self._download_webpage(url, channel_id, fatal=False)
2303 if webpage:
2304 page_type = self._og_search_property(
2305 'type', webpage, 'page type', default=None)
2306 video_id = self._html_search_meta(
2307 'videoId', webpage, 'video id', default=None)
2308 if page_type == 'video' and video_id and re.match(r'^[0-9A-Za-z_-]{11}$', video_id):
2309 return self.url_result(video_id, YoutubeIE.ie_key())
2310 return self.url_result(base_url)
2311
2312
2313 class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
2314 IE_DESC = 'YouTube.com user/channel playlists'
2315 _VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+)/playlists'
2316 IE_NAME = 'youtube:playlists'
2317
2318 _TESTS = [{
2319 'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
2320 'playlist_mincount': 4,
2321 'info_dict': {
2322 'id': 'ThirstForScience',
2323 'title': 'Thirst for Science',
2324 },
2325 }, {
2326 # with "Load more" button
2327 'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
2328 'playlist_mincount': 70,
2329 'info_dict': {
2330 'id': 'igorkle1',
2331 'title': 'Š˜Š³Š¾Ń€ŃŒ ŠšŠ»ŠµŠ¹Š½ŠµŃ€',
2332 },
2333 }, {
2334 'url': 'https://www.youtube.com/channel/UCiU1dHvZObB2iP6xkJ__Icw/playlists',
2335 'playlist_mincount': 17,
2336 'info_dict': {
2337 'id': 'UCiU1dHvZObB2iP6xkJ__Icw',
2338 'title': 'Chem Player',
2339 },
2340 }]
2341
2342
2343 class YoutubeSearchIE(SearchInfoExtractor, YoutubePlaylistIE):
2344 IE_DESC = 'YouTube.com searches'
2345 # there doesn't appear to be a real limit, for example if you search for
2346 # 'python' you get more than 8.000.000 results
2347 _MAX_RESULTS = float('inf')
2348 IE_NAME = 'youtube:search'
2349 _SEARCH_KEY = 'ytsearch'
2350 _EXTRA_QUERY_ARGS = {}
2351 _TESTS = []
2352
2353 def _get_n_results(self, query, n):
2354 """Get a specified number of results for a query"""
2355
2356 videos = []
2357 limit = n
2358
2359 url_query = {
2360 'search_query': query.encode('utf-8'),
2361 }
2362 url_query.update(self._EXTRA_QUERY_ARGS)
2363 result_url = 'https://www.youtube.com/results?' + compat_urllib_parse_urlencode(url_query)
2364
2365 for pagenum in itertools.count(1):
2366 data = self._download_json(
2367 result_url, video_id='query "%s"' % query,
2368 note='Downloading page %s' % pagenum,
2369 errnote='Unable to download API page',
2370 query={'spf': 'navigate'})
2371 html_content = data[1]['body']['content']
2372
2373 if 'class="search-message' in html_content:
2374 raise ExtractorError(
2375 '[youtube] No video results', expected=True)
2376
2377 new_videos = self._ids_to_results(orderedSet(re.findall(
2378 r'href="/watch\?v=(.{11})', html_content)))
2379 videos += new_videos
2380 if not new_videos or len(videos) > limit:
2381 break
2382 next_link = self._html_search_regex(
2383 r'href="(/results\?[^"]*\bsp=[^"]+)"[^>]*>\s*<span[^>]+class="[^"]*\byt-uix-button-content\b[^"]*"[^>]*>Next',
2384 html_content, 'next link', default=None)
2385 if next_link is None:
2386 break
2387 result_url = compat_urlparse.urljoin('https://www.youtube.com/', next_link)
2388
2389 if len(videos) > n:
2390 videos = videos[:n]
2391 return self.playlist_result(videos, query)
2392
2393
2394 class YoutubeSearchDateIE(YoutubeSearchIE):
2395 IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
2396 _SEARCH_KEY = 'ytsearchdate'
2397 IE_DESC = 'YouTube.com searches, newest videos first'
2398 _EXTRA_QUERY_ARGS = {'search_sort': 'video_date_uploaded'}
2399
2400
2401 class YoutubeSearchURLIE(YoutubePlaylistBaseInfoExtractor):
2402 IE_DESC = 'YouTube.com search URLs'
2403 IE_NAME = 'youtube:search_url'
2404 _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)'
2405 _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})(?:[^"]*"[^>]+\btitle="(?P<title>[^"]+))?'
2406 _TESTS = [{
2407 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
2408 'playlist_mincount': 5,
2409 'info_dict': {
2410 'title': 'youtube-dl test video',
2411 }
2412 }, {
2413 'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
2414 'only_matching': True,
2415 }]
2416
2417 def _real_extract(self, url):
2418 mobj = re.match(self._VALID_URL, url)
2419 query = compat_urllib_parse_unquote_plus(mobj.group('query'))
2420 webpage = self._download_webpage(url, query)
2421 return self.playlist_result(self._process_page(webpage), playlist_title=query)
2422
2423
2424 class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor):
2425 IE_DESC = 'YouTube.com (multi-season) shows'
2426 _VALID_URL = r'https?://(?:www\.)?youtube\.com/show/(?P<id>[^?#]*)'
2427 IE_NAME = 'youtube:show'
2428 _TESTS = [{
2429 'url': 'https://www.youtube.com/show/airdisasters',
2430 'playlist_mincount': 5,
2431 'info_dict': {
2432 'id': 'airdisasters',
2433 'title': 'Air Disasters',
2434 }
2435 }]
2436
2437 def _real_extract(self, url):
2438 playlist_id = self._match_id(url)
2439 return super(YoutubeShowIE, self)._real_extract(
2440 'https://www.youtube.com/show/%s/playlists' % playlist_id)
2441
2442
2443 class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
2444 """
2445 Base class for feed extractors
2446 Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
2447 """
2448 _LOGIN_REQUIRED = True
2449
2450 @property
2451 def IE_NAME(self):
2452 return 'youtube:%s' % self._FEED_NAME
2453
2454 def _real_initialize(self):
2455 self._login()
2456
2457 def _real_extract(self, url):
2458 page = self._download_webpage(
2459 'https://www.youtube.com/feed/%s' % self._FEED_NAME, self._PLAYLIST_TITLE)
2460
2461 # The extraction process is the same as for playlists, but the regex
2462 # for the video ids doesn't contain an index
2463 ids = []
2464 more_widget_html = content_html = page
2465 for page_num in itertools.count(1):
2466 matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
2467
2468 # 'recommended' feed has infinite 'load more' and each new portion spins
2469 # the same videos in (sometimes) slightly different order, so we'll check
2470 # for unicity and break when portion has no new videos
2471 new_ids = filter(lambda video_id: video_id not in ids, orderedSet(matches))
2472 if not new_ids:
2473 break
2474
2475 ids.extend(new_ids)
2476
2477 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
2478 if not mobj:
2479 break
2480
2481 more = self._download_json(
2482 'https://youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
2483 'Downloading page #%s' % page_num,
2484 transform_source=uppercase_escape)
2485 content_html = more['content_html']
2486 more_widget_html = more['load_more_widget_html']
2487
2488 return self.playlist_result(
2489 self._ids_to_results(ids), playlist_title=self._PLAYLIST_TITLE)
2490
2491
2492 class YoutubeWatchLaterIE(YoutubePlaylistIE):
2493 IE_NAME = 'youtube:watchlater'
2494 IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
2495 _VALID_URL = r'https?://(?:www\.)?youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater'
2496
2497 _TESTS = [{
2498 'url': 'https://www.youtube.com/playlist?list=WL',
2499 'only_matching': True,
2500 }, {
2501 'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL',
2502 'only_matching': True,
2503 }]
2504
2505 def _real_extract(self, url):
2506 _, video = self._check_download_just_video(url, 'WL')
2507 if video:
2508 return video
2509 _, playlist = self._extract_playlist('WL')
2510 return playlist
2511
2512
2513 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
2514 IE_NAME = 'youtube:favorites'
2515 IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
2516 _VALID_URL = r'https?://(?:www\.)?youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
2517 _LOGIN_REQUIRED = True
2518
2519 def _real_extract(self, url):
2520 webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
2521 playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
2522 return self.url_result(playlist_id, 'YoutubePlaylist')
2523
2524
2525 class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
2526 IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
2527 _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/recommended|:ytrec(?:ommended)?'
2528 _FEED_NAME = 'recommended'
2529 _PLAYLIST_TITLE = 'Youtube Recommended videos'
2530
2531
2532 class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
2533 IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
2534 _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
2535 _FEED_NAME = 'subscriptions'
2536 _PLAYLIST_TITLE = 'Youtube Subscriptions'
2537
2538
2539 class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
2540 IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
2541 _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/history|:ythistory'
2542 _FEED_NAME = 'history'
2543 _PLAYLIST_TITLE = 'Youtube History'
2544
2545
2546 class YoutubeTruncatedURLIE(InfoExtractor):
2547 IE_NAME = 'youtube:truncated_url'
2548 IE_DESC = False # Do not list
2549 _VALID_URL = r'''(?x)
2550 (?:https?://)?
2551 (?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
2552 (?:watch\?(?:
2553 feature=[a-z_]+|
2554 annotation_id=annotation_[^&]+|
2555 x-yt-cl=[0-9]+|
2556 hl=[^&]*|
2557 t=[0-9]+
2558 )?
2559 |
2560 attribution_link\?a=[^&]+
2561 )
2562 $
2563 '''
2564
2565 _TESTS = [{
2566 'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
2567 'only_matching': True,
2568 }, {
2569 'url': 'https://www.youtube.com/watch?',
2570 'only_matching': True,
2571 }, {
2572 'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
2573 'only_matching': True,
2574 }, {
2575 'url': 'https://www.youtube.com/watch?feature=foo',
2576 'only_matching': True,
2577 }, {
2578 'url': 'https://www.youtube.com/watch?hl=en-GB',
2579 'only_matching': True,
2580 }, {
2581 'url': 'https://www.youtube.com/watch?t=2372',
2582 'only_matching': True,
2583 }]
2584
2585 def _real_extract(self, url):
2586 raise ExtractorError(
2587 'Did you forget to quote the URL? Remember that & is a meta '
2588 'character in most shells, so you want to put the URL in quotes, '
2589 'like youtube-dl '
2590 '"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
2591 ' or simply youtube-dl BaW_jenozKc .',
2592 expected=True)
2593
2594
2595 class YoutubeTruncatedIDIE(InfoExtractor):
2596 IE_NAME = 'youtube:truncated_id'
2597 IE_DESC = False # Do not list
2598 _VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
2599
2600 _TESTS = [{
2601 'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
2602 'only_matching': True,
2603 }]
2604
2605 def _real_extract(self, url):
2606 video_id = self._match_id(url)
2607 raise ExtractorError(
2608 'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
2609 expected=True)