]> Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/youtube.py
Merge tag 'upstream/2017.03.26'
[youtubedl] / youtube_dl / extractor / youtube.py
1 # coding: utf-8
2
3 from __future__ import unicode_literals
4
5
6 import itertools
7 import json
8 import os.path
9 import random
10 import re
11 import time
12 import traceback
13
14 from .common import InfoExtractor, SearchInfoExtractor
15 from ..jsinterp import JSInterpreter
16 from ..swfinterp import SWFInterpreter
17 from ..compat import (
18 compat_chr,
19 compat_parse_qs,
20 compat_urllib_parse_unquote,
21 compat_urllib_parse_unquote_plus,
22 compat_urllib_parse_urlencode,
23 compat_urllib_parse_urlparse,
24 compat_urlparse,
25 compat_str,
26 )
27 from ..utils import (
28 clean_html,
29 error_to_compat_str,
30 ExtractorError,
31 float_or_none,
32 get_element_by_attribute,
33 get_element_by_id,
34 int_or_none,
35 mimetype2ext,
36 orderedSet,
37 parse_codecs,
38 parse_duration,
39 remove_quotes,
40 remove_start,
41 sanitized_Request,
42 smuggle_url,
43 str_to_int,
44 try_get,
45 unescapeHTML,
46 unified_strdate,
47 unsmuggle_url,
48 uppercase_escape,
49 urlencode_postdata,
50 )
51
52
53 class YoutubeBaseInfoExtractor(InfoExtractor):
54 """Provide base functions for Youtube extractors"""
55 _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
56 _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
57 _PASSWORD_CHALLENGE_URL = 'https://accounts.google.com/signin/challenge/sl/password'
58 _NETRC_MACHINE = 'youtube'
59 # If True it will raise an error if no login info is provided
60 _LOGIN_REQUIRED = False
61
62 _PLAYLIST_ID_RE = r'(?:PL|LL|EC|UU|FL|RD|UL|TL)[0-9A-Za-z-_]{10,}'
63
64 def _set_language(self):
65 self._set_cookie(
66 '.youtube.com', 'PREF', 'f1=50000000&hl=en',
67 # YouTube sets the expire time to about two months
68 expire_time=time.time() + 2 * 30 * 24 * 3600)
69
70 def _ids_to_results(self, ids):
71 return [
72 self.url_result(vid_id, 'Youtube', video_id=vid_id)
73 for vid_id in ids]
74
75 def _login(self):
76 """
77 Attempt to log in to YouTube.
78 True is returned if successful or skipped.
79 False is returned if login failed.
80
81 If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
82 """
83 (username, password) = self._get_login_info()
84 # No authentication to be performed
85 if username is None:
86 if self._LOGIN_REQUIRED:
87 raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
88 return True
89
90 login_page = self._download_webpage(
91 self._LOGIN_URL, None,
92 note='Downloading login page',
93 errnote='unable to fetch login page', fatal=False)
94 if login_page is False:
95 return
96
97 login_form = self._hidden_inputs(login_page)
98
99 login_form.update({
100 'checkConnection': 'youtube',
101 'Email': username,
102 'Passwd': password,
103 })
104
105 login_results = self._download_webpage(
106 self._PASSWORD_CHALLENGE_URL, None,
107 note='Logging in', errnote='unable to log in', fatal=False,
108 data=urlencode_postdata(login_form))
109 if login_results is False:
110 return False
111
112 error_msg = self._html_search_regex(
113 r'<[^>]+id="errormsg_0_Passwd"[^>]*>([^<]+)<',
114 login_results, 'error message', default=None)
115 if error_msg:
116 raise ExtractorError('Unable to login: %s' % error_msg, expected=True)
117
118 if re.search(r'id="errormsg_0_Passwd"', login_results) is not None:
119 raise ExtractorError('Please use your account password and a two-factor code instead of an application-specific password.', expected=True)
120
121 # Two-Factor
122 # TODO add SMS and phone call support - these require making a request and then prompting the user
123
124 if re.search(r'(?i)<form[^>]+id="challenge"', login_results) is not None:
125 tfa_code = self._get_tfa_info('2-step verification code')
126
127 if not tfa_code:
128 self._downloader.report_warning(
129 'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
130 '(Note that only TOTP (Google Authenticator App) codes work at this time.)')
131 return False
132
133 tfa_code = remove_start(tfa_code, 'G-')
134
135 tfa_form_strs = self._form_hidden_inputs('challenge', login_results)
136
137 tfa_form_strs.update({
138 'Pin': tfa_code,
139 'TrustDevice': 'on',
140 })
141
142 tfa_data = urlencode_postdata(tfa_form_strs)
143
144 tfa_req = sanitized_Request(self._TWOFACTOR_URL, tfa_data)
145 tfa_results = self._download_webpage(
146 tfa_req, None,
147 note='Submitting TFA code', errnote='unable to submit tfa', fatal=False)
148
149 if tfa_results is False:
150 return False
151
152 if re.search(r'(?i)<form[^>]+id="challenge"', tfa_results) is not None:
153 self._downloader.report_warning('Two-factor code expired or invalid. Please try again, or use a one-use backup code instead.')
154 return False
155 if re.search(r'(?i)<form[^>]+id="gaia_loginform"', tfa_results) is not None:
156 self._downloader.report_warning('unable to log in - did the page structure change?')
157 return False
158 if re.search(r'smsauth-interstitial-reviewsettings', tfa_results) is not None:
159 self._downloader.report_warning('Your Google account has a security notice. Please log in on your web browser, resolve the notice, and try again.')
160 return False
161
162 if re.search(r'(?i)<form[^>]+id="gaia_loginform"', login_results) is not None:
163 self._downloader.report_warning('unable to log in: bad username or password')
164 return False
165 return True
166
167 def _real_initialize(self):
168 if self._downloader is None:
169 return
170 self._set_language()
171 if not self._login():
172 return
173
174
175 class YoutubeEntryListBaseInfoExtractor(YoutubeBaseInfoExtractor):
176 # Extract entries from page with "Load more" button
177 def _entries(self, page, playlist_id):
178 more_widget_html = content_html = page
179 for page_num in itertools.count(1):
180 for entry in self._process_page(content_html):
181 yield entry
182
183 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
184 if not mobj:
185 break
186
187 more = self._download_json(
188 'https://youtube.com/%s' % mobj.group('more'), playlist_id,
189 'Downloading page #%s' % page_num,
190 transform_source=uppercase_escape)
191 content_html = more['content_html']
192 if not content_html.strip():
193 # Some webpages show a "Load more" button but they don't
194 # have more videos
195 break
196 more_widget_html = more['load_more_widget_html']
197
198
199 class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
200 def _process_page(self, content):
201 for video_id, video_title in self.extract_videos_from_page(content):
202 yield self.url_result(video_id, 'Youtube', video_id, video_title)
203
204 def extract_videos_from_page(self, page):
205 ids_in_page = []
206 titles_in_page = []
207 for mobj in re.finditer(self._VIDEO_RE, page):
208 # The link with index 0 is not the first video of the playlist (not sure if still actual)
209 if 'index' in mobj.groupdict() and mobj.group('id') == '0':
210 continue
211 video_id = mobj.group('id')
212 video_title = unescapeHTML(mobj.group('title'))
213 if video_title:
214 video_title = video_title.strip()
215 try:
216 idx = ids_in_page.index(video_id)
217 if video_title and not titles_in_page[idx]:
218 titles_in_page[idx] = video_title
219 except ValueError:
220 ids_in_page.append(video_id)
221 titles_in_page.append(video_title)
222 return zip(ids_in_page, titles_in_page)
223
224
225 class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
226 def _process_page(self, content):
227 for playlist_id in orderedSet(re.findall(
228 r'<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*><a[^>]+href="/?playlist\?list=([0-9A-Za-z-_]{10,})"',
229 content)):
230 yield self.url_result(
231 'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist')
232
233 def _real_extract(self, url):
234 playlist_id = self._match_id(url)
235 webpage = self._download_webpage(url, playlist_id)
236 title = self._og_search_title(webpage, fatal=False)
237 return self.playlist_result(self._entries(webpage, playlist_id), playlist_id, title)
238
239
240 class YoutubeIE(YoutubeBaseInfoExtractor):
241 IE_DESC = 'YouTube.com'
242 _VALID_URL = r"""(?x)^
243 (
244 (?:https?://|//) # http(s):// or protocol-independent URL
245 (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
246 (?:www\.)?deturl\.com/www\.youtube\.com/|
247 (?:www\.)?pwnyoutube\.com/|
248 (?:www\.)?yourepeat\.com/|
249 tube\.majestyc\.net/|
250 youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
251 (?:.*?\#/)? # handle anchor (#/) redirect urls
252 (?: # the various things that can precede the ID:
253 (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
254 |(?: # or the v= param in all its forms
255 (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
256 (?:\?|\#!?) # the params delimiter ? or # or #!
257 (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&amp;v=V36LpHqtcDY)
258 v=
259 )
260 ))
261 |(?:
262 youtu\.be| # just youtu.be/xxxx
263 vid\.plus| # or vid.plus/xxxx
264 zwearz\.com/watch| # or zwearz.com/watch/xxxx
265 )/
266 |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
267 )
268 )? # all until now is optional -> you can pass the naked ID
269 ([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
270 (?!.*?\blist=
271 (?:
272 %(playlist_id)s| # combined list/video URLs are handled by the playlist IE
273 WL # WL are handled by the watch later IE
274 )
275 )
276 (?(1).+)? # if we found the ID, everything can follow
277 $""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
278 _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
279 _formats = {
280 '5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
281 '6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
282 '13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
283 '17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
284 '18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
285 '22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
286 '34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
287 '35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
288 # itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
289 '36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
290 '37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
291 '38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
292 '43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
293 '44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
294 '45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
295 '46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
296 '59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
297 '78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
298
299
300 # 3D videos
301 '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
302 '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
303 '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
304 '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
305 '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
306 '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
307 '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
308
309 # Apple HTTP Live Streaming
310 '91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
311 '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
312 '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
313 '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
314 '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
315 '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
316 '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
317 '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
318
319 # DASH mp4 video
320 '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
321 '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
322 '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
323 '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
324 '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
325 '138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40}, # Height can vary (https://github.com/rg3/youtube-dl/issues/4559)
326 '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
327 '212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
328 '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
329 '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60, 'preference': -40},
330 '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60, 'preference': -40},
331 '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
332
333 # Dash mp4 audio
334 '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'preference': -50, 'container': 'm4a_dash'},
335 '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'preference': -50, 'container': 'm4a_dash'},
336 '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'preference': -50, 'container': 'm4a_dash'},
337 '256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'preference': -50, 'container': 'm4a_dash'},
338 '258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'preference': -50, 'container': 'm4a_dash'},
339 '325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'preference': -50, 'container': 'm4a_dash'},
340 '328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'preference': -50, 'container': 'm4a_dash'},
341
342 # Dash webm
343 '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
344 '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
345 '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
346 '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
347 '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
348 '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
349 '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9', 'preference': -40},
350 '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
351 '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
352 '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
353 '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
354 '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
355 '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
356 '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
357 '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
358 # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
359 '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
360 '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
361 '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
362 '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
363 '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
364 '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
365
366 # Dash webm audio
367 '171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
368 '172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
369
370 # Dash webm audio with opus inside
371 '249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50, 'preference': -50},
372 '250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70, 'preference': -50},
373 '251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160, 'preference': -50},
374
375 # RTMP (unnamed)
376 '_rtmp': {'protocol': 'rtmp'},
377 }
378 _SUBTITLE_FORMATS = ('ttml', 'vtt')
379
380 _GEO_BYPASS = False
381
382 IE_NAME = 'youtube'
383 _TESTS = [
384 {
385 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
386 'info_dict': {
387 'id': 'BaW_jenozKc',
388 'ext': 'mp4',
389 'title': 'youtube-dl test video "\'/\\Ƥā†­š•',
390 'uploader': 'Philipp Hagemeister',
391 'uploader_id': 'phihag',
392 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
393 'upload_date': '20121002',
394 'license': 'Standard YouTube License',
395 'description': 'test chars: "\'/\\Ƥā†­š•\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
396 'categories': ['Science & Technology'],
397 'tags': ['youtube-dl'],
398 'duration': 10,
399 'like_count': int,
400 'dislike_count': int,
401 'start_time': 1,
402 'end_time': 9,
403 }
404 },
405 {
406 'url': 'https://www.youtube.com/watch?v=UxxajLWwzqY',
407 'note': 'Test generic use_cipher_signature video (#897)',
408 'info_dict': {
409 'id': 'UxxajLWwzqY',
410 'ext': 'mp4',
411 'upload_date': '20120506',
412 'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
413 'alt_title': 'I Love It (feat. Charli XCX)',
414 'description': 'md5:f3ceb5ef83a08d95b9d146f973157cc8',
415 'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
416 'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
417 'iconic ep', 'iconic', 'love', 'it'],
418 'duration': 180,
419 'uploader': 'Icona Pop',
420 'uploader_id': 'IconaPop',
421 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IconaPop',
422 'license': 'Standard YouTube License',
423 'creator': 'Icona Pop',
424 }
425 },
426 {
427 'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
428 'note': 'Test VEVO video with age protection (#956)',
429 'info_dict': {
430 'id': '07FYdnEawAQ',
431 'ext': 'mp4',
432 'upload_date': '20130703',
433 'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
434 'alt_title': 'Tunnel Vision',
435 'description': 'md5:64249768eec3bc4276236606ea996373',
436 'duration': 419,
437 'uploader': 'justintimberlakeVEVO',
438 'uploader_id': 'justintimberlakeVEVO',
439 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/justintimberlakeVEVO',
440 'license': 'Standard YouTube License',
441 'creator': 'Justin Timberlake',
442 'age_limit': 18,
443 }
444 },
445 {
446 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
447 'note': 'Embed-only video (#1746)',
448 'info_dict': {
449 'id': 'yZIXLfi8CZQ',
450 'ext': 'mp4',
451 'upload_date': '20120608',
452 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
453 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
454 'uploader': 'SET India',
455 'uploader_id': 'setindia',
456 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
457 'license': 'Standard YouTube License',
458 'age_limit': 18,
459 }
460 },
461 {
462 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=UxxajLWwzqY',
463 'note': 'Use the first video ID in the URL',
464 'info_dict': {
465 'id': 'BaW_jenozKc',
466 'ext': 'mp4',
467 'title': 'youtube-dl test video "\'/\\Ƥā†­š•',
468 'uploader': 'Philipp Hagemeister',
469 'uploader_id': 'phihag',
470 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
471 'upload_date': '20121002',
472 'license': 'Standard YouTube License',
473 'description': 'test chars: "\'/\\Ƥā†­š•\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
474 'categories': ['Science & Technology'],
475 'tags': ['youtube-dl'],
476 'duration': 10,
477 'like_count': int,
478 'dislike_count': int,
479 },
480 'params': {
481 'skip_download': True,
482 },
483 },
484 {
485 'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
486 'note': '256k DASH audio (format 141) via DASH manifest',
487 'info_dict': {
488 'id': 'a9LDPn-MO4I',
489 'ext': 'm4a',
490 'upload_date': '20121002',
491 'uploader_id': '8KVIDEO',
492 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
493 'description': '',
494 'uploader': '8KVIDEO',
495 'license': 'Standard YouTube License',
496 'title': 'UHDTV TEST 8K VIDEO.mp4'
497 },
498 'params': {
499 'youtube_include_dash_manifest': True,
500 'format': '141',
501 },
502 'skip': 'format 141 not served anymore',
503 },
504 # DASH manifest with encrypted signature
505 {
506 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
507 'info_dict': {
508 'id': 'IB3lcPjvWLA',
509 'ext': 'm4a',
510 'title': 'Afrojack, Spree Wilson - The Spark ft. Spree Wilson',
511 'description': 'md5:12e7067fa6735a77bdcbb58cb1187d2d',
512 'duration': 244,
513 'uploader': 'AfrojackVEVO',
514 'uploader_id': 'AfrojackVEVO',
515 'upload_date': '20131011',
516 'license': 'Standard YouTube License',
517 },
518 'params': {
519 'youtube_include_dash_manifest': True,
520 'format': '141/bestaudio[ext=m4a]',
521 },
522 },
523 # JS player signature function name containing $
524 {
525 'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
526 'info_dict': {
527 'id': 'nfWlot6h_JM',
528 'ext': 'm4a',
529 'title': 'Taylor Swift - Shake It Off',
530 'alt_title': 'Shake It Off',
531 'description': 'md5:95f66187cd7c8b2c13eb78e1223b63c3',
532 'duration': 242,
533 'uploader': 'TaylorSwiftVEVO',
534 'uploader_id': 'TaylorSwiftVEVO',
535 'upload_date': '20140818',
536 'license': 'Standard YouTube License',
537 'creator': 'Taylor Swift',
538 },
539 'params': {
540 'youtube_include_dash_manifest': True,
541 'format': '141/bestaudio[ext=m4a]',
542 },
543 },
544 # Controversy video
545 {
546 'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
547 'info_dict': {
548 'id': 'T4XJQO3qol8',
549 'ext': 'mp4',
550 'duration': 219,
551 'upload_date': '20100909',
552 'uploader': 'The Amazing Atheist',
553 'uploader_id': 'TheAmazingAtheist',
554 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist',
555 'license': 'Standard YouTube License',
556 'title': 'Burning Everyone\'s Koran',
557 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
558 }
559 },
560 # Normal age-gate video (No vevo, embed allowed)
561 {
562 'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
563 'info_dict': {
564 'id': 'HtVdAasjOgU',
565 'ext': 'mp4',
566 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
567 'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
568 'duration': 142,
569 'uploader': 'The Witcher',
570 'uploader_id': 'WitcherGame',
571 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
572 'upload_date': '20140605',
573 'license': 'Standard YouTube License',
574 'age_limit': 18,
575 },
576 },
577 # Age-gate video with encrypted signature
578 {
579 'url': 'https://www.youtube.com/watch?v=6kLq3WMV1nU',
580 'info_dict': {
581 'id': '6kLq3WMV1nU',
582 'ext': 'mp4',
583 'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
584 'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
585 'duration': 247,
586 'uploader': 'LloydVEVO',
587 'uploader_id': 'LloydVEVO',
588 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/LloydVEVO',
589 'upload_date': '20110629',
590 'license': 'Standard YouTube License',
591 'age_limit': 18,
592 },
593 },
594 # video_info is None (https://github.com/rg3/youtube-dl/issues/4421)
595 {
596 'url': '__2ABJjxzNo',
597 'info_dict': {
598 'id': '__2ABJjxzNo',
599 'ext': 'mp4',
600 'duration': 266,
601 'upload_date': '20100430',
602 'uploader_id': 'deadmau5',
603 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
604 'creator': 'deadmau5',
605 'description': 'md5:12c56784b8032162bb936a5f76d55360',
606 'uploader': 'deadmau5',
607 'license': 'Standard YouTube License',
608 'title': 'Deadmau5 - Some Chords (HD)',
609 'alt_title': 'Some Chords',
610 },
611 'expected_warnings': [
612 'DASH manifest missing',
613 ]
614 },
615 # Olympics (https://github.com/rg3/youtube-dl/issues/4431)
616 {
617 'url': 'lqQg6PlCWgI',
618 'info_dict': {
619 'id': 'lqQg6PlCWgI',
620 'ext': 'mp4',
621 'duration': 6085,
622 'upload_date': '20150827',
623 'uploader_id': 'olympic',
624 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
625 'license': 'Standard YouTube License',
626 'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
627 'uploader': 'Olympic',
628 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
629 },
630 'params': {
631 'skip_download': 'requires avconv',
632 }
633 },
634 # Non-square pixels
635 {
636 'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
637 'info_dict': {
638 'id': '_b-2C3KPAM0',
639 'ext': 'mp4',
640 'stretched_ratio': 16 / 9.,
641 'duration': 85,
642 'upload_date': '20110310',
643 'uploader_id': 'AllenMeow',
644 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
645 'description': 'made by Wacom from Korea | 字幕&åŠ ę²¹ę·»é†‹ by TY\'s Allen | ę„Ÿč¬heylisa00cavey1001同å­øē†±ęƒ…ęä¾›ę¢—åŠēæ»č­Æ',
646 'uploader': 'å­«č‰¾å€«',
647 'license': 'Standard YouTube License',
648 'title': '[A-made] č®Šę…‹å¦å­—å¹•ē‰ˆ å¤Ŗ妍 ęˆ‘å°±ę˜Æ這ęØ£ēš„äŗŗ',
649 },
650 },
651 # url_encoded_fmt_stream_map is empty string
652 {
653 'url': 'qEJwOuvDf7I',
654 'info_dict': {
655 'id': 'qEJwOuvDf7I',
656 'ext': 'webm',
657 'title': 'ŠžŠ±ŃŃƒŠ¶Š“ŠµŠ½ŠøŠµ суŠ“ŠµŠ±Š½Š¾Š¹ ŠæрŠ°ŠŗтŠøŠŗŠø ŠæŠ¾ Š²Ń‹Š±Š¾Ń€Š°Š¼ 14 сŠµŠ½Ń‚яŠ±Ń€Ń 2014 Š³Š¾Š“Š° Š² Š”Š°Š½Šŗт-ŠŸŠµŃ‚ŠµŃ€Š±ŃƒŃ€Š³Šµ',
658 'description': '',
659 'upload_date': '20150404',
660 'uploader_id': 'spbelect',
661 'uploader': 'ŠŠ°Š±Š»ŃŽŠ“Š°Ń‚ŠµŠ»Šø ŠŸŠµŃ‚ŠµŃ€Š±ŃƒŃ€Š³Š°',
662 },
663 'params': {
664 'skip_download': 'requires avconv',
665 },
666 'skip': 'This live event has ended.',
667 },
668 # Extraction from multiple DASH manifests (https://github.com/rg3/youtube-dl/pull/6097)
669 {
670 'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
671 'info_dict': {
672 'id': 'FIl7x6_3R5Y',
673 'ext': 'mp4',
674 'title': 'md5:7b81415841e02ecd4313668cde88737a',
675 'description': 'md5:116377fd2963b81ec4ce64b542173306',
676 'duration': 220,
677 'upload_date': '20150625',
678 'uploader_id': 'dorappi2000',
679 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
680 'uploader': 'dorappi2000',
681 'license': 'Standard YouTube License',
682 'formats': 'mincount:32',
683 },
684 },
685 # DASH manifest with segment_list
686 {
687 'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
688 'md5': '8ce563a1d667b599d21064e982ab9e31',
689 'info_dict': {
690 'id': 'CsmdDsKjzN8',
691 'ext': 'mp4',
692 'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
693 'uploader': 'Airtek',
694 'description': 'RetransmisiĆ³n en directo de la XVIII media maratĆ³n de Zaragoza.',
695 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
696 'license': 'Standard YouTube License',
697 'title': 'RetransmisiĆ³n XVIII Media maratĆ³n Zaragoza 2015',
698 },
699 'params': {
700 'youtube_include_dash_manifest': True,
701 'format': '135', # bestvideo
702 },
703 'skip': 'This live event has ended.',
704 },
705 {
706 # Multifeed videos (multiple cameras), URL is for Main Camera
707 'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
708 'info_dict': {
709 'id': 'jqWvoWXjCVs',
710 'title': 'teamPGP: Rocket League Noob Stream',
711 'description': 'md5:dc7872fb300e143831327f1bae3af010',
712 },
713 'playlist': [{
714 'info_dict': {
715 'id': 'jqWvoWXjCVs',
716 'ext': 'mp4',
717 'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
718 'description': 'md5:dc7872fb300e143831327f1bae3af010',
719 'duration': 7335,
720 'upload_date': '20150721',
721 'uploader': 'Beer Games Beer',
722 'uploader_id': 'beergamesbeer',
723 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
724 'license': 'Standard YouTube License',
725 },
726 }, {
727 'info_dict': {
728 'id': '6h8e8xoXJzg',
729 'ext': 'mp4',
730 'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
731 'description': 'md5:dc7872fb300e143831327f1bae3af010',
732 'duration': 7337,
733 'upload_date': '20150721',
734 'uploader': 'Beer Games Beer',
735 'uploader_id': 'beergamesbeer',
736 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
737 'license': 'Standard YouTube License',
738 },
739 }, {
740 'info_dict': {
741 'id': 'PUOgX5z9xZw',
742 'ext': 'mp4',
743 'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
744 'description': 'md5:dc7872fb300e143831327f1bae3af010',
745 'duration': 7337,
746 'upload_date': '20150721',
747 'uploader': 'Beer Games Beer',
748 'uploader_id': 'beergamesbeer',
749 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
750 'license': 'Standard YouTube License',
751 },
752 }, {
753 'info_dict': {
754 'id': 'teuwxikvS5k',
755 'ext': 'mp4',
756 'title': 'teamPGP: Rocket League Noob Stream (zim)',
757 'description': 'md5:dc7872fb300e143831327f1bae3af010',
758 'duration': 7334,
759 'upload_date': '20150721',
760 'uploader': 'Beer Games Beer',
761 'uploader_id': 'beergamesbeer',
762 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
763 'license': 'Standard YouTube License',
764 },
765 }],
766 'params': {
767 'skip_download': True,
768 },
769 },
770 {
771 # Multifeed video with comma in title (see https://github.com/rg3/youtube-dl/issues/8536)
772 'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
773 'info_dict': {
774 'id': 'gVfLd0zydlo',
775 'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
776 },
777 'playlist_count': 2,
778 'skip': 'Not multifeed anymore',
779 },
780 {
781 'url': 'https://vid.plus/FlRa-iH7PGw',
782 'only_matching': True,
783 },
784 {
785 'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
786 'only_matching': True,
787 },
788 {
789 # Title with JS-like syntax "};" (see https://github.com/rg3/youtube-dl/issues/7468)
790 # Also tests cut-off URL expansion in video description (see
791 # https://github.com/rg3/youtube-dl/issues/1892,
792 # https://github.com/rg3/youtube-dl/issues/8164)
793 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
794 'info_dict': {
795 'id': 'lsguqyKfVQg',
796 'ext': 'mp4',
797 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
798 'alt_title': 'Dark Walk',
799 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
800 'duration': 133,
801 'upload_date': '20151119',
802 'uploader_id': 'IronSoulElf',
803 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
804 'uploader': 'IronSoulElf',
805 'license': 'Standard YouTube License',
806 'creator': 'Todd Haberman, Daniel Law Heath & Aaron Kaplan',
807 },
808 'params': {
809 'skip_download': True,
810 },
811 },
812 {
813 # Tags with '};' (see https://github.com/rg3/youtube-dl/issues/7468)
814 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
815 'only_matching': True,
816 },
817 {
818 # Video with yt:stretch=17:0
819 'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
820 'info_dict': {
821 'id': 'Q39EVAstoRM',
822 'ext': 'mp4',
823 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
824 'description': 'md5:ee18a25c350637c8faff806845bddee9',
825 'upload_date': '20151107',
826 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
827 'uploader': 'CH GAMER DROID',
828 },
829 'params': {
830 'skip_download': True,
831 },
832 'skip': 'This video does not exist.',
833 },
834 {
835 # Video licensed under Creative Commons
836 'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
837 'info_dict': {
838 'id': 'M4gD1WSo5mA',
839 'ext': 'mp4',
840 'title': 'md5:e41008789470fc2533a3252216f1c1d1',
841 'description': 'md5:a677553cf0840649b731a3024aeff4cc',
842 'duration': 721,
843 'upload_date': '20150127',
844 'uploader_id': 'BerkmanCenter',
845 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
846 'uploader': 'The Berkman Klein Center for Internet & Society',
847 'license': 'Creative Commons Attribution license (reuse allowed)',
848 },
849 'params': {
850 'skip_download': True,
851 },
852 },
853 {
854 # Channel-like uploader_url
855 'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
856 'info_dict': {
857 'id': 'eQcmzGIKrzg',
858 'ext': 'mp4',
859 'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
860 'description': 'md5:dda0d780d5a6e120758d1711d062a867',
861 'duration': 4060,
862 'upload_date': '20151119',
863 'uploader': 'Bernie 2016',
864 'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
865 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
866 'license': 'Creative Commons Attribution license (reuse allowed)',
867 },
868 'params': {
869 'skip_download': True,
870 },
871 },
872 {
873 'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;amp;v=V36LpHqtcDY',
874 'only_matching': True,
875 },
876 {
877 # YouTube Red paid video (https://github.com/rg3/youtube-dl/issues/10059)
878 'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
879 'only_matching': True,
880 },
881 {
882 # Rental video preview
883 'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
884 'info_dict': {
885 'id': 'uGpuVWrhIzE',
886 'ext': 'mp4',
887 'title': 'Piku - Trailer',
888 'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
889 'upload_date': '20150811',
890 'uploader': 'FlixMatrix',
891 'uploader_id': 'FlixMatrixKaravan',
892 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
893 'license': 'Standard YouTube License',
894 },
895 'params': {
896 'skip_download': True,
897 },
898 },
899 {
900 # YouTube Red video with episode data
901 'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
902 'info_dict': {
903 'id': 'iqKdEhx-dD4',
904 'ext': 'mp4',
905 'title': 'Isolation - Mind Field (Ep 1)',
906 'description': 'md5:8013b7ddea787342608f63a13ddc9492',
907 'duration': 2085,
908 'upload_date': '20170118',
909 'uploader': 'Vsauce',
910 'uploader_id': 'Vsauce',
911 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
912 'license': 'Standard YouTube License',
913 'series': 'Mind Field',
914 'season_number': 1,
915 'episode_number': 1,
916 },
917 'params': {
918 'skip_download': True,
919 },
920 'expected_warnings': [
921 'Skipping DASH manifest',
922 ],
923 },
924 {
925 # itag 212
926 'url': '1t24XAntNCY',
927 'only_matching': True,
928 },
929 {
930 # geo restricted to JP
931 'url': 'sJL6WA-aGkQ',
932 'only_matching': True,
933 },
934 {
935 'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
936 'only_matching': True,
937 },
938 ]
939
940 def __init__(self, *args, **kwargs):
941 super(YoutubeIE, self).__init__(*args, **kwargs)
942 self._player_cache = {}
943
944 def report_video_info_webpage_download(self, video_id):
945 """Report attempt to download video info webpage."""
946 self.to_screen('%s: Downloading video info webpage' % video_id)
947
948 def report_information_extraction(self, video_id):
949 """Report attempt to extract video information."""
950 self.to_screen('%s: Extracting video information' % video_id)
951
952 def report_unavailable_format(self, video_id, format):
953 """Report extracted video URL."""
954 self.to_screen('%s: Format %s not available' % (video_id, format))
955
956 def report_rtmp_download(self):
957 """Indicate the download will use the RTMP protocol."""
958 self.to_screen('RTMP download detected')
959
960 def _signature_cache_id(self, example_sig):
961 """ Return a string representation of a signature """
962 return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
963
964 def _extract_signature_function(self, video_id, player_url, example_sig):
965 id_m = re.match(
966 r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|/base)?\.(?P<ext>[a-z]+)$',
967 player_url)
968 if not id_m:
969 raise ExtractorError('Cannot identify player %r' % player_url)
970 player_type = id_m.group('ext')
971 player_id = id_m.group('id')
972
973 # Read from filesystem cache
974 func_id = '%s_%s_%s' % (
975 player_type, player_id, self._signature_cache_id(example_sig))
976 assert os.path.basename(func_id) == func_id
977
978 cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
979 if cache_spec is not None:
980 return lambda s: ''.join(s[i] for i in cache_spec)
981
982 download_note = (
983 'Downloading player %s' % player_url
984 if self._downloader.params.get('verbose') else
985 'Downloading %s player %s' % (player_type, player_id)
986 )
987 if player_type == 'js':
988 code = self._download_webpage(
989 player_url, video_id,
990 note=download_note,
991 errnote='Download of %s failed' % player_url)
992 res = self._parse_sig_js(code)
993 elif player_type == 'swf':
994 urlh = self._request_webpage(
995 player_url, video_id,
996 note=download_note,
997 errnote='Download of %s failed' % player_url)
998 code = urlh.read()
999 res = self._parse_sig_swf(code)
1000 else:
1001 assert False, 'Invalid player type %r' % player_type
1002
1003 test_string = ''.join(map(compat_chr, range(len(example_sig))))
1004 cache_res = res(test_string)
1005 cache_spec = [ord(c) for c in cache_res]
1006
1007 self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
1008 return res
1009
1010 def _print_sig_code(self, func, example_sig):
1011 def gen_sig_code(idxs):
1012 def _genslice(start, end, step):
1013 starts = '' if start == 0 else str(start)
1014 ends = (':%d' % (end + step)) if end + step >= 0 else ':'
1015 steps = '' if step == 1 else (':%d' % step)
1016 return 's[%s%s%s]' % (starts, ends, steps)
1017
1018 step = None
1019 # Quelch pyflakes warnings - start will be set when step is set
1020 start = '(Never used)'
1021 for i, prev in zip(idxs[1:], idxs[:-1]):
1022 if step is not None:
1023 if i - prev == step:
1024 continue
1025 yield _genslice(start, prev, step)
1026 step = None
1027 continue
1028 if i - prev in [-1, 1]:
1029 step = i - prev
1030 start = prev
1031 continue
1032 else:
1033 yield 's[%d]' % prev
1034 if step is None:
1035 yield 's[%d]' % i
1036 else:
1037 yield _genslice(start, i, step)
1038
1039 test_string = ''.join(map(compat_chr, range(len(example_sig))))
1040 cache_res = func(test_string)
1041 cache_spec = [ord(c) for c in cache_res]
1042 expr_code = ' + '.join(gen_sig_code(cache_spec))
1043 signature_id_tuple = '(%s)' % (
1044 ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
1045 code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
1046 ' return %s\n') % (signature_id_tuple, expr_code)
1047 self.to_screen('Extracted signature function:\n' + code)
1048
1049 def _parse_sig_js(self, jscode):
1050 funcname = self._search_regex(
1051 (r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1052 r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\('),
1053 jscode, 'Initial JS player signature function name', group='sig')
1054
1055 jsi = JSInterpreter(jscode)
1056 initial_function = jsi.extract_function(funcname)
1057 return lambda s: initial_function([s])
1058
1059 def _parse_sig_swf(self, file_contents):
1060 swfi = SWFInterpreter(file_contents)
1061 TARGET_CLASSNAME = 'SignatureDecipher'
1062 searched_class = swfi.extract_class(TARGET_CLASSNAME)
1063 initial_function = swfi.extract_function(searched_class, 'decipher')
1064 return lambda s: initial_function([s])
1065
1066 def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
1067 """Turn the encrypted s field into a working signature"""
1068
1069 if player_url is None:
1070 raise ExtractorError('Cannot decrypt signature without player_url')
1071
1072 if player_url.startswith('//'):
1073 player_url = 'https:' + player_url
1074 elif not re.match(r'https?://', player_url):
1075 player_url = compat_urlparse.urljoin(
1076 'https://www.youtube.com', player_url)
1077 try:
1078 player_id = (player_url, self._signature_cache_id(s))
1079 if player_id not in self._player_cache:
1080 func = self._extract_signature_function(
1081 video_id, player_url, s
1082 )
1083 self._player_cache[player_id] = func
1084 func = self._player_cache[player_id]
1085 if self._downloader.params.get('youtube_print_sig_code'):
1086 self._print_sig_code(func, s)
1087 return func(s)
1088 except Exception as e:
1089 tb = traceback.format_exc()
1090 raise ExtractorError(
1091 'Signature extraction failed: ' + tb, cause=e)
1092
1093 def _get_subtitles(self, video_id, webpage):
1094 try:
1095 subs_doc = self._download_xml(
1096 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
1097 video_id, note=False)
1098 except ExtractorError as err:
1099 self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
1100 return {}
1101
1102 sub_lang_list = {}
1103 for track in subs_doc.findall('track'):
1104 lang = track.attrib['lang_code']
1105 if lang in sub_lang_list:
1106 continue
1107 sub_formats = []
1108 for ext in self._SUBTITLE_FORMATS:
1109 params = compat_urllib_parse_urlencode({
1110 'lang': lang,
1111 'v': video_id,
1112 'fmt': ext,
1113 'name': track.attrib['name'].encode('utf-8'),
1114 })
1115 sub_formats.append({
1116 'url': 'https://www.youtube.com/api/timedtext?' + params,
1117 'ext': ext,
1118 })
1119 sub_lang_list[lang] = sub_formats
1120 if not sub_lang_list:
1121 self._downloader.report_warning('video doesn\'t have subtitles')
1122 return {}
1123 return sub_lang_list
1124
1125 def _get_ytplayer_config(self, video_id, webpage):
1126 patterns = (
1127 # User data may contain arbitrary character sequences that may affect
1128 # JSON extraction with regex, e.g. when '};' is contained the second
1129 # regex won't capture the whole JSON. Yet working around by trying more
1130 # concrete regex first keeping in mind proper quoted string handling
1131 # to be implemented in future that will replace this workaround (see
1132 # https://github.com/rg3/youtube-dl/issues/7468,
1133 # https://github.com/rg3/youtube-dl/pull/7599)
1134 r';ytplayer\.config\s*=\s*({.+?});ytplayer',
1135 r';ytplayer\.config\s*=\s*({.+?});',
1136 )
1137 config = self._search_regex(
1138 patterns, webpage, 'ytplayer.config', default=None)
1139 if config:
1140 return self._parse_json(
1141 uppercase_escape(config), video_id, fatal=False)
1142
1143 def _get_automatic_captions(self, video_id, webpage):
1144 """We need the webpage for getting the captions url, pass it as an
1145 argument to speed up the process."""
1146 self.to_screen('%s: Looking for automatic captions' % video_id)
1147 player_config = self._get_ytplayer_config(video_id, webpage)
1148 err_msg = 'Couldn\'t find automatic captions for %s' % video_id
1149 if not player_config:
1150 self._downloader.report_warning(err_msg)
1151 return {}
1152 try:
1153 args = player_config['args']
1154 caption_url = args.get('ttsurl')
1155 if caption_url:
1156 timestamp = args['timestamp']
1157 # We get the available subtitles
1158 list_params = compat_urllib_parse_urlencode({
1159 'type': 'list',
1160 'tlangs': 1,
1161 'asrs': 1,
1162 })
1163 list_url = caption_url + '&' + list_params
1164 caption_list = self._download_xml(list_url, video_id)
1165 original_lang_node = caption_list.find('track')
1166 if original_lang_node is None:
1167 self._downloader.report_warning('Video doesn\'t have automatic captions')
1168 return {}
1169 original_lang = original_lang_node.attrib['lang_code']
1170 caption_kind = original_lang_node.attrib.get('kind', '')
1171
1172 sub_lang_list = {}
1173 for lang_node in caption_list.findall('target'):
1174 sub_lang = lang_node.attrib['lang_code']
1175 sub_formats = []
1176 for ext in self._SUBTITLE_FORMATS:
1177 params = compat_urllib_parse_urlencode({
1178 'lang': original_lang,
1179 'tlang': sub_lang,
1180 'fmt': ext,
1181 'ts': timestamp,
1182 'kind': caption_kind,
1183 })
1184 sub_formats.append({
1185 'url': caption_url + '&' + params,
1186 'ext': ext,
1187 })
1188 sub_lang_list[sub_lang] = sub_formats
1189 return sub_lang_list
1190
1191 # Some videos don't provide ttsurl but rather caption_tracks and
1192 # caption_translation_languages (e.g. 20LmZk1hakA)
1193 caption_tracks = args['caption_tracks']
1194 caption_translation_languages = args['caption_translation_languages']
1195 caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
1196 parsed_caption_url = compat_urllib_parse_urlparse(caption_url)
1197 caption_qs = compat_parse_qs(parsed_caption_url.query)
1198
1199 sub_lang_list = {}
1200 for lang in caption_translation_languages.split(','):
1201 lang_qs = compat_parse_qs(compat_urllib_parse_unquote_plus(lang))
1202 sub_lang = lang_qs.get('lc', [None])[0]
1203 if not sub_lang:
1204 continue
1205 sub_formats = []
1206 for ext in self._SUBTITLE_FORMATS:
1207 caption_qs.update({
1208 'tlang': [sub_lang],
1209 'fmt': [ext],
1210 })
1211 sub_url = compat_urlparse.urlunparse(parsed_caption_url._replace(
1212 query=compat_urllib_parse_urlencode(caption_qs, True)))
1213 sub_formats.append({
1214 'url': sub_url,
1215 'ext': ext,
1216 })
1217 sub_lang_list[sub_lang] = sub_formats
1218 return sub_lang_list
1219 # An extractor error can be raise by the download process if there are
1220 # no automatic captions but there are subtitles
1221 except (KeyError, ExtractorError):
1222 self._downloader.report_warning(err_msg)
1223 return {}
1224
1225 def _mark_watched(self, video_id, video_info):
1226 playback_url = video_info.get('videostats_playback_base_url', [None])[0]
1227 if not playback_url:
1228 return
1229 parsed_playback_url = compat_urlparse.urlparse(playback_url)
1230 qs = compat_urlparse.parse_qs(parsed_playback_url.query)
1231
1232 # cpn generation algorithm is reverse engineered from base.js.
1233 # In fact it works even with dummy cpn.
1234 CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
1235 cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
1236
1237 qs.update({
1238 'ver': ['2'],
1239 'cpn': [cpn],
1240 })
1241 playback_url = compat_urlparse.urlunparse(
1242 parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
1243
1244 self._download_webpage(
1245 playback_url, video_id, 'Marking watched',
1246 'Unable to mark watched', fatal=False)
1247
1248 @classmethod
1249 def extract_id(cls, url):
1250 mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
1251 if mobj is None:
1252 raise ExtractorError('Invalid URL: %s' % url)
1253 video_id = mobj.group(2)
1254 return video_id
1255
1256 def _extract_from_m3u8(self, manifest_url, video_id):
1257 url_map = {}
1258
1259 def _get_urls(_manifest):
1260 lines = _manifest.split('\n')
1261 urls = filter(lambda l: l and not l.startswith('#'),
1262 lines)
1263 return urls
1264 manifest = self._download_webpage(manifest_url, video_id, 'Downloading formats manifest')
1265 formats_urls = _get_urls(manifest)
1266 for format_url in formats_urls:
1267 itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag')
1268 url_map[itag] = format_url
1269 return url_map
1270
1271 def _extract_annotations(self, video_id):
1272 url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
1273 return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
1274
1275 def _real_extract(self, url):
1276 url, smuggled_data = unsmuggle_url(url, {})
1277
1278 proto = (
1279 'http' if self._downloader.params.get('prefer_insecure', False)
1280 else 'https')
1281
1282 start_time = None
1283 end_time = None
1284 parsed_url = compat_urllib_parse_urlparse(url)
1285 for component in [parsed_url.fragment, parsed_url.query]:
1286 query = compat_parse_qs(component)
1287 if start_time is None and 't' in query:
1288 start_time = parse_duration(query['t'][0])
1289 if start_time is None and 'start' in query:
1290 start_time = parse_duration(query['start'][0])
1291 if end_time is None and 'end' in query:
1292 end_time = parse_duration(query['end'][0])
1293
1294 # Extract original video URL from URL with redirection, like age verification, using next_url parameter
1295 mobj = re.search(self._NEXT_URL_RE, url)
1296 if mobj:
1297 url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
1298 video_id = self.extract_id(url)
1299
1300 # Get video webpage
1301 url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
1302 video_webpage = self._download_webpage(url, video_id)
1303
1304 # Attempt to extract SWF player URL
1305 mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
1306 if mobj is not None:
1307 player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
1308 else:
1309 player_url = None
1310
1311 dash_mpds = []
1312
1313 def add_dash_mpd(video_info):
1314 dash_mpd = video_info.get('dashmpd')
1315 if dash_mpd and dash_mpd[0] not in dash_mpds:
1316 dash_mpds.append(dash_mpd[0])
1317
1318 # Get video info
1319 embed_webpage = None
1320 is_live = None
1321 if re.search(r'player-age-gate-content">', video_webpage) is not None:
1322 age_gate = True
1323 # We simulate the access to the video from www.youtube.com/v/{video_id}
1324 # this can be viewed without login into Youtube
1325 url = proto + '://www.youtube.com/embed/%s' % video_id
1326 embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
1327 data = compat_urllib_parse_urlencode({
1328 'video_id': video_id,
1329 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
1330 'sts': self._search_regex(
1331 r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
1332 })
1333 video_info_url = proto + '://www.youtube.com/get_video_info?' + data
1334 video_info_webpage = self._download_webpage(
1335 video_info_url, video_id,
1336 note='Refetching age-gated info webpage',
1337 errnote='unable to download video info webpage')
1338 video_info = compat_parse_qs(video_info_webpage)
1339 add_dash_mpd(video_info)
1340 else:
1341 age_gate = False
1342 video_info = None
1343 # Try looking directly into the video webpage
1344 ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
1345 if ytplayer_config:
1346 args = ytplayer_config['args']
1347 if args.get('url_encoded_fmt_stream_map'):
1348 # Convert to the same format returned by compat_parse_qs
1349 video_info = dict((k, [v]) for k, v in args.items())
1350 add_dash_mpd(video_info)
1351 # Rental video is not rented but preview is available (e.g.
1352 # https://www.youtube.com/watch?v=yYr8q0y5Jfg,
1353 # https://github.com/rg3/youtube-dl/issues/10532)
1354 if not video_info and args.get('ypc_vid'):
1355 return self.url_result(
1356 args['ypc_vid'], YoutubeIE.ie_key(), video_id=args['ypc_vid'])
1357 if args.get('livestream') == '1' or args.get('live_playback') == 1:
1358 is_live = True
1359 if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
1360 # We also try looking in get_video_info since it may contain different dashmpd
1361 # URL that points to a DASH manifest with possibly different itag set (some itags
1362 # are missing from DASH manifest pointed by webpage's dashmpd, some - from DASH
1363 # manifest pointed by get_video_info's dashmpd).
1364 # The general idea is to take a union of itags of both DASH manifests (for example
1365 # video with such 'manifest behavior' see https://github.com/rg3/youtube-dl/issues/6093)
1366 self.report_video_info_webpage_download(video_id)
1367 for el_type in ['&el=info', '&el=embedded', '&el=detailpage', '&el=vevo', '']:
1368 video_info_url = (
1369 '%s://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
1370 % (proto, video_id, el_type))
1371 video_info_webpage = self._download_webpage(
1372 video_info_url,
1373 video_id, note=False,
1374 errnote='unable to download video info webpage')
1375 get_video_info = compat_parse_qs(video_info_webpage)
1376 if get_video_info.get('use_cipher_signature') != ['True']:
1377 add_dash_mpd(get_video_info)
1378 if not video_info:
1379 video_info = get_video_info
1380 if 'token' in get_video_info:
1381 # Different get_video_info requests may report different results, e.g.
1382 # some may report video unavailability, but some may serve it without
1383 # any complaint (see https://github.com/rg3/youtube-dl/issues/7362,
1384 # the original webpage as well as el=info and el=embedded get_video_info
1385 # requests report video unavailability due to geo restriction while
1386 # el=detailpage succeeds and returns valid data). This is probably
1387 # due to YouTube measures against IP ranges of hosting providers.
1388 # Working around by preferring the first succeeded video_info containing
1389 # the token if no such video_info yet was found.
1390 if 'token' not in video_info:
1391 video_info = get_video_info
1392 break
1393 if 'token' not in video_info:
1394 if 'reason' in video_info:
1395 if 'The uploader has not made this video available in your country.' in video_info['reason']:
1396 regions_allowed = self._html_search_meta(
1397 'regionsAllowed', video_webpage, default=None)
1398 countries = regions_allowed.split(',') if regions_allowed else None
1399 self.raise_geo_restricted(
1400 msg=video_info['reason'][0], countries=countries)
1401 raise ExtractorError(
1402 'YouTube said: %s' % video_info['reason'][0],
1403 expected=True, video_id=video_id)
1404 else:
1405 raise ExtractorError(
1406 '"token" parameter not in video info for unknown reason',
1407 video_id=video_id)
1408
1409 # title
1410 if 'title' in video_info:
1411 video_title = video_info['title'][0]
1412 else:
1413 self._downloader.report_warning('Unable to extract video title')
1414 video_title = '_'
1415
1416 # description
1417 video_description = get_element_by_id("eow-description", video_webpage)
1418 if video_description:
1419 video_description = re.sub(r'''(?x)
1420 <a\s+
1421 (?:[a-zA-Z-]+="[^"]*"\s+)*?
1422 (?:title|href)="([^"]+)"\s+
1423 (?:[a-zA-Z-]+="[^"]*"\s+)*?
1424 class="[^"]*"[^>]*>
1425 [^<]+\.{3}\s*
1426 </a>
1427 ''', r'\1', video_description)
1428 video_description = clean_html(video_description)
1429 else:
1430 fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
1431 if fd_mobj:
1432 video_description = unescapeHTML(fd_mobj.group(1))
1433 else:
1434 video_description = ''
1435
1436 if 'multifeed_metadata_list' in video_info and not smuggled_data.get('force_singlefeed', False):
1437 if not self._downloader.params.get('noplaylist'):
1438 entries = []
1439 feed_ids = []
1440 multifeed_metadata_list = video_info['multifeed_metadata_list'][0]
1441 for feed in multifeed_metadata_list.split(','):
1442 # Unquote should take place before split on comma (,) since textual
1443 # fields may contain comma as well (see
1444 # https://github.com/rg3/youtube-dl/issues/8536)
1445 feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
1446 entries.append({
1447 '_type': 'url_transparent',
1448 'ie_key': 'Youtube',
1449 'url': smuggle_url(
1450 '%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
1451 {'force_singlefeed': True}),
1452 'title': '%s (%s)' % (video_title, feed_data['title'][0]),
1453 })
1454 feed_ids.append(feed_data['id'][0])
1455 self.to_screen(
1456 'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
1457 % (', '.join(feed_ids), video_id))
1458 return self.playlist_result(entries, video_id, video_title, video_description)
1459 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
1460
1461 if 'view_count' in video_info:
1462 view_count = int(video_info['view_count'][0])
1463 else:
1464 view_count = None
1465
1466 # Check for "rental" videos
1467 if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
1468 raise ExtractorError('"rental" videos not supported. See https://github.com/rg3/youtube-dl/issues/359 for more information.', expected=True)
1469
1470 # Start extracting information
1471 self.report_information_extraction(video_id)
1472
1473 # uploader
1474 if 'author' not in video_info:
1475 raise ExtractorError('Unable to extract uploader name')
1476 video_uploader = compat_urllib_parse_unquote_plus(video_info['author'][0])
1477
1478 # uploader_id
1479 video_uploader_id = None
1480 video_uploader_url = None
1481 mobj = re.search(
1482 r'<link itemprop="url" href="(?P<uploader_url>https?://www.youtube.com/(?:user|channel)/(?P<uploader_id>[^"]+))">',
1483 video_webpage)
1484 if mobj is not None:
1485 video_uploader_id = mobj.group('uploader_id')
1486 video_uploader_url = mobj.group('uploader_url')
1487 else:
1488 self._downloader.report_warning('unable to extract uploader nickname')
1489
1490 # thumbnail image
1491 # We try first to get a high quality image:
1492 m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
1493 video_webpage, re.DOTALL)
1494 if m_thumb is not None:
1495 video_thumbnail = m_thumb.group(1)
1496 elif 'thumbnail_url' not in video_info:
1497 self._downloader.report_warning('unable to extract video thumbnail')
1498 video_thumbnail = None
1499 else: # don't panic if we can't find it
1500 video_thumbnail = compat_urllib_parse_unquote_plus(video_info['thumbnail_url'][0])
1501
1502 # upload date
1503 upload_date = self._html_search_meta(
1504 'datePublished', video_webpage, 'upload date', default=None)
1505 if not upload_date:
1506 upload_date = self._search_regex(
1507 [r'(?s)id="eow-date.*?>(.*?)</span>',
1508 r'id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live|Started) on (.+?)</strong>'],
1509 video_webpage, 'upload date', default=None)
1510 if upload_date:
1511 upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
1512 upload_date = unified_strdate(upload_date)
1513
1514 video_license = self._html_search_regex(
1515 r'<h4[^>]+class="title"[^>]*>\s*License\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li',
1516 video_webpage, 'license', default=None)
1517
1518 m_music = re.search(
1519 r'<h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*<ul[^>]*>\s*<li>(?P<title>.+?) by (?P<creator>.+?)(?:\(.+?\))?</li',
1520 video_webpage)
1521 if m_music:
1522 video_alt_title = remove_quotes(unescapeHTML(m_music.group('title')))
1523 video_creator = clean_html(m_music.group('creator'))
1524 else:
1525 video_alt_title = video_creator = None
1526
1527 m_episode = re.search(
1528 r'<div[^>]+id="watch7-headline"[^>]*>\s*<span[^>]*>.*?>(?P<series>[^<]+)</a></b>\s*S(?P<season>\d+)\s*ā€¢\s*E(?P<episode>\d+)</span>',
1529 video_webpage)
1530 if m_episode:
1531 series = m_episode.group('series')
1532 season_number = int(m_episode.group('season'))
1533 episode_number = int(m_episode.group('episode'))
1534 else:
1535 series = season_number = episode_number = None
1536
1537 m_cat_container = self._search_regex(
1538 r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
1539 video_webpage, 'categories', default=None)
1540 if m_cat_container:
1541 category = self._html_search_regex(
1542 r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
1543 default=None)
1544 video_categories = None if category is None else [category]
1545 else:
1546 video_categories = None
1547
1548 video_tags = [
1549 unescapeHTML(m.group('content'))
1550 for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
1551
1552 def _extract_count(count_name):
1553 return str_to_int(self._search_regex(
1554 r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
1555 % re.escape(count_name),
1556 video_webpage, count_name, default=None))
1557
1558 like_count = _extract_count('like')
1559 dislike_count = _extract_count('dislike')
1560
1561 # subtitles
1562 video_subtitles = self.extract_subtitles(video_id, video_webpage)
1563 automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
1564
1565 video_duration = try_get(
1566 video_info, lambda x: int_or_none(x['length_seconds'][0]))
1567 if not video_duration:
1568 video_duration = parse_duration(self._html_search_meta(
1569 'duration', video_webpage, 'video duration'))
1570
1571 # annotations
1572 video_annotations = None
1573 if self._downloader.params.get('writeannotations', False):
1574 video_annotations = self._extract_annotations(video_id)
1575
1576 def _map_to_format_list(urlmap):
1577 formats = []
1578 for itag, video_real_url in urlmap.items():
1579 dct = {
1580 'format_id': itag,
1581 'url': video_real_url,
1582 'player_url': player_url,
1583 }
1584 if itag in self._formats:
1585 dct.update(self._formats[itag])
1586 formats.append(dct)
1587 return formats
1588
1589 if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
1590 self.report_rtmp_download()
1591 formats = [{
1592 'format_id': '_rtmp',
1593 'protocol': 'rtmp',
1594 'url': video_info['conn'][0],
1595 'player_url': player_url,
1596 }]
1597 elif len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1:
1598 encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
1599 if 'rtmpe%3Dyes' in encoded_url_map:
1600 raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
1601 formats_spec = {}
1602 fmt_list = video_info.get('fmt_list', [''])[0]
1603 if fmt_list:
1604 for fmt in fmt_list.split(','):
1605 spec = fmt.split('/')
1606 if len(spec) > 1:
1607 width_height = spec[1].split('x')
1608 if len(width_height) == 2:
1609 formats_spec[spec[0]] = {
1610 'resolution': spec[1],
1611 'width': int_or_none(width_height[0]),
1612 'height': int_or_none(width_height[1]),
1613 }
1614 formats = []
1615 for url_data_str in encoded_url_map.split(','):
1616 url_data = compat_parse_qs(url_data_str)
1617 if 'itag' not in url_data or 'url' not in url_data:
1618 continue
1619 format_id = url_data['itag'][0]
1620 url = url_data['url'][0]
1621
1622 if 'sig' in url_data:
1623 url += '&signature=' + url_data['sig'][0]
1624 elif 's' in url_data:
1625 encrypted_sig = url_data['s'][0]
1626 ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")'
1627
1628 jsplayer_url_json = self._search_regex(
1629 ASSETS_RE,
1630 embed_webpage if age_gate else video_webpage,
1631 'JS player URL (1)', default=None)
1632 if not jsplayer_url_json and not age_gate:
1633 # We need the embed website after all
1634 if embed_webpage is None:
1635 embed_url = proto + '://www.youtube.com/embed/%s' % video_id
1636 embed_webpage = self._download_webpage(
1637 embed_url, video_id, 'Downloading embed webpage')
1638 jsplayer_url_json = self._search_regex(
1639 ASSETS_RE, embed_webpage, 'JS player URL')
1640
1641 player_url = json.loads(jsplayer_url_json)
1642 if player_url is None:
1643 player_url_json = self._search_regex(
1644 r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
1645 video_webpage, 'age gate player URL')
1646 player_url = json.loads(player_url_json)
1647
1648 if self._downloader.params.get('verbose'):
1649 if player_url is None:
1650 player_version = 'unknown'
1651 player_desc = 'unknown'
1652 else:
1653 if player_url.endswith('swf'):
1654 player_version = self._search_regex(
1655 r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
1656 'flash player', fatal=False)
1657 player_desc = 'flash player %s' % player_version
1658 else:
1659 player_version = self._search_regex(
1660 [r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js', r'(?:www|player)-([^/]+)/base\.js'],
1661 player_url,
1662 'html5 player', fatal=False)
1663 player_desc = 'html5 player %s' % player_version
1664
1665 parts_sizes = self._signature_cache_id(encrypted_sig)
1666 self.to_screen('{%s} signature length %s, %s' %
1667 (format_id, parts_sizes, player_desc))
1668
1669 signature = self._decrypt_signature(
1670 encrypted_sig, video_id, player_url, age_gate)
1671 url += '&signature=' + signature
1672 if 'ratebypass' not in url:
1673 url += '&ratebypass=yes'
1674
1675 dct = {
1676 'format_id': format_id,
1677 'url': url,
1678 'player_url': player_url,
1679 }
1680 if format_id in self._formats:
1681 dct.update(self._formats[format_id])
1682 if format_id in formats_spec:
1683 dct.update(formats_spec[format_id])
1684
1685 # Some itags are not included in DASH manifest thus corresponding formats will
1686 # lack metadata (see https://github.com/rg3/youtube-dl/pull/5993).
1687 # Trying to extract metadata from url_encoded_fmt_stream_map entry.
1688 mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
1689 width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
1690
1691 more_fields = {
1692 'filesize': int_or_none(url_data.get('clen', [None])[0]),
1693 'tbr': float_or_none(url_data.get('bitrate', [None])[0], 1000),
1694 'width': width,
1695 'height': height,
1696 'fps': int_or_none(url_data.get('fps', [None])[0]),
1697 'format_note': url_data.get('quality_label', [None])[0] or url_data.get('quality', [None])[0],
1698 }
1699 for key, value in more_fields.items():
1700 if value:
1701 dct[key] = value
1702 type_ = url_data.get('type', [None])[0]
1703 if type_:
1704 type_split = type_.split(';')
1705 kind_ext = type_split[0].split('/')
1706 if len(kind_ext) == 2:
1707 kind, _ = kind_ext
1708 dct['ext'] = mimetype2ext(type_split[0])
1709 if kind in ('audio', 'video'):
1710 codecs = None
1711 for mobj in re.finditer(
1712 r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
1713 if mobj.group('key') == 'codecs':
1714 codecs = mobj.group('val')
1715 break
1716 if codecs:
1717 dct.update(parse_codecs(codecs))
1718 formats.append(dct)
1719 elif video_info.get('hlsvp'):
1720 manifest_url = video_info['hlsvp'][0]
1721 url_map = self._extract_from_m3u8(manifest_url, video_id)
1722 formats = _map_to_format_list(url_map)
1723 # Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
1724 for a_format in formats:
1725 a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
1726 else:
1727 unavailable_message = self._html_search_regex(
1728 r'(?s)<h1[^>]+id="unavailable-message"[^>]*>(.+?)</h1>',
1729 video_webpage, 'unavailable message', default=None)
1730 if unavailable_message:
1731 raise ExtractorError(unavailable_message, expected=True)
1732 raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
1733
1734 # Look for the DASH manifest
1735 if self._downloader.params.get('youtube_include_dash_manifest', True):
1736 dash_mpd_fatal = True
1737 for mpd_url in dash_mpds:
1738 dash_formats = {}
1739 try:
1740 def decrypt_sig(mobj):
1741 s = mobj.group(1)
1742 dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
1743 return '/signature/%s' % dec_s
1744
1745 mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
1746
1747 for df in self._extract_mpd_formats(
1748 mpd_url, video_id, fatal=dash_mpd_fatal,
1749 formats_dict=self._formats):
1750 # Do not overwrite DASH format found in some previous DASH manifest
1751 if df['format_id'] not in dash_formats:
1752 dash_formats[df['format_id']] = df
1753 # Additional DASH manifests may end up in HTTP Error 403 therefore
1754 # allow them to fail without bug report message if we already have
1755 # some DASH manifest succeeded. This is temporary workaround to reduce
1756 # burst of bug reports until we figure out the reason and whether it
1757 # can be fixed at all.
1758 dash_mpd_fatal = False
1759 except (ExtractorError, KeyError) as e:
1760 self.report_warning(
1761 'Skipping DASH manifest: %r' % e, video_id)
1762 if dash_formats:
1763 # Remove the formats we found through non-DASH, they
1764 # contain less info and it can be wrong, because we use
1765 # fixed values (for example the resolution). See
1766 # https://github.com/rg3/youtube-dl/issues/5774 for an
1767 # example.
1768 formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
1769 formats.extend(dash_formats.values())
1770
1771 # Check for malformed aspect ratio
1772 stretched_m = re.search(
1773 r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
1774 video_webpage)
1775 if stretched_m:
1776 w = float(stretched_m.group('w'))
1777 h = float(stretched_m.group('h'))
1778 # yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
1779 # We will only process correct ratios.
1780 if w > 0 and h > 0:
1781 ratio = w / h
1782 for f in formats:
1783 if f.get('vcodec') != 'none':
1784 f['stretched_ratio'] = ratio
1785
1786 self._sort_formats(formats)
1787
1788 self.mark_watched(video_id, video_info)
1789
1790 return {
1791 'id': video_id,
1792 'uploader': video_uploader,
1793 'uploader_id': video_uploader_id,
1794 'uploader_url': video_uploader_url,
1795 'upload_date': upload_date,
1796 'license': video_license,
1797 'creator': video_creator,
1798 'title': video_title,
1799 'alt_title': video_alt_title,
1800 'thumbnail': video_thumbnail,
1801 'description': video_description,
1802 'categories': video_categories,
1803 'tags': video_tags,
1804 'subtitles': video_subtitles,
1805 'automatic_captions': automatic_captions,
1806 'duration': video_duration,
1807 'age_limit': 18 if age_gate else 0,
1808 'annotations': video_annotations,
1809 'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
1810 'view_count': view_count,
1811 'like_count': like_count,
1812 'dislike_count': dislike_count,
1813 'average_rating': float_or_none(video_info.get('avg_rating', [None])[0]),
1814 'formats': formats,
1815 'is_live': is_live,
1816 'start_time': start_time,
1817 'end_time': end_time,
1818 'series': series,
1819 'season_number': season_number,
1820 'episode_number': episode_number,
1821 }
1822
1823
1824 class YoutubeSharedVideoIE(InfoExtractor):
1825 _VALID_URL = r'(?:https?:)?//(?:www\.)?youtube\.com/shared\?.*\bci=(?P<id>[0-9A-Za-z_-]{11})'
1826 IE_NAME = 'youtube:shared'
1827
1828 _TEST = {
1829 'url': 'https://www.youtube.com/shared?ci=1nEzmT-M4fU',
1830 'info_dict': {
1831 'id': 'uPDB5I9wfp8',
1832 'ext': 'webm',
1833 'title': 'Pocoyo: 90 minutos de episĆ³dios completos PortuguĆŖs para crianƧas - PARTE 3',
1834 'description': 'md5:d9e4d9346a2dfff4c7dc4c8cec0f546d',
1835 'upload_date': '20160219',
1836 'uploader': 'Pocoyo - PortuguĆŖs (BR)',
1837 'uploader_id': 'PocoyoBrazil',
1838 },
1839 'add_ie': ['Youtube'],
1840 'params': {
1841 # There are already too many Youtube downloads
1842 'skip_download': True,
1843 },
1844 }
1845
1846 def _real_extract(self, url):
1847 video_id = self._match_id(url)
1848
1849 webpage = self._download_webpage(url, video_id)
1850
1851 real_video_id = self._html_search_meta(
1852 'videoId', webpage, 'YouTube video id', fatal=True)
1853
1854 return self.url_result(real_video_id, YoutubeIE.ie_key())
1855
1856
1857 class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
1858 IE_DESC = 'YouTube.com playlists'
1859 _VALID_URL = r"""(?x)(?:
1860 (?:https?://)?
1861 (?:\w+\.)?
1862 (?:
1863 youtube\.com/
1864 (?:
1865 (?:course|view_play_list|my_playlists|artist|playlist|watch|embed/(?:videoseries|[0-9A-Za-z_-]{11}))
1866 \? (?:.*?[&;])*? (?:p|a|list)=
1867 | p/
1868 )|
1869 youtu\.be/[0-9A-Za-z_-]{11}\?.*?\blist=
1870 )
1871 (
1872 (?:PL|LL|EC|UU|FL|RD|UL|TL)?[0-9A-Za-z-_]{10,}
1873 # Top tracks, they can also include dots
1874 |(?:MC)[\w\.]*
1875 )
1876 .*
1877 |
1878 (%(playlist_id)s)
1879 )""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
1880 _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s&disable_polymer=true'
1881 _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&amp;[^"]*?index=(?P<index>\d+)(?:[^>]+>(?P<title>[^<]+))?'
1882 IE_NAME = 'youtube:playlist'
1883 _TESTS = [{
1884 'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
1885 'info_dict': {
1886 'title': 'ytdl test PL',
1887 'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
1888 },
1889 'playlist_count': 3,
1890 }, {
1891 'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
1892 'info_dict': {
1893 'id': 'PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
1894 'title': 'YDL_Empty_List',
1895 },
1896 'playlist_count': 0,
1897 'skip': 'This playlist is private',
1898 }, {
1899 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
1900 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
1901 'info_dict': {
1902 'title': '29C3: Not my department',
1903 'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
1904 },
1905 'playlist_count': 95,
1906 }, {
1907 'note': 'issue #673',
1908 'url': 'PLBB231211A4F62143',
1909 'info_dict': {
1910 'title': '[OLD]Team Fortress 2 (Class-based LP)',
1911 'id': 'PLBB231211A4F62143',
1912 },
1913 'playlist_mincount': 26,
1914 }, {
1915 'note': 'Large playlist',
1916 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
1917 'info_dict': {
1918 'title': 'Uploads from Cauchemar',
1919 'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
1920 },
1921 'playlist_mincount': 799,
1922 }, {
1923 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
1924 'info_dict': {
1925 'title': 'YDL_safe_search',
1926 'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
1927 },
1928 'playlist_count': 2,
1929 'skip': 'This playlist is private',
1930 }, {
1931 'note': 'embedded',
1932 'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
1933 'playlist_count': 4,
1934 'info_dict': {
1935 'title': 'JODA15',
1936 'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
1937 }
1938 }, {
1939 'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
1940 'playlist_mincount': 485,
1941 'info_dict': {
1942 'title': '2017 čÆčŖžęœ€ę–°å–®ę›² (2/24ꛓꖰ)',
1943 'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
1944 }
1945 }, {
1946 'note': 'Embedded SWF player',
1947 'url': 'https://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
1948 'playlist_count': 4,
1949 'info_dict': {
1950 'title': 'JODA7',
1951 'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
1952 }
1953 }, {
1954 'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
1955 'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
1956 'info_dict': {
1957 'title': 'Uploads from Interstellar Movie',
1958 'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
1959 },
1960 'playlist_mincount': 21,
1961 }, {
1962 # Playlist URL that does not actually serve a playlist
1963 'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
1964 'info_dict': {
1965 'id': 'FqZTN594JQw',
1966 'ext': 'webm',
1967 'title': "Smiley's People 01 detective, Adventure Series, Action",
1968 'uploader': 'STREEM',
1969 'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
1970 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
1971 'upload_date': '20150526',
1972 'license': 'Standard YouTube License',
1973 'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
1974 'categories': ['People & Blogs'],
1975 'tags': list,
1976 'like_count': int,
1977 'dislike_count': int,
1978 },
1979 'params': {
1980 'skip_download': True,
1981 },
1982 'add_ie': [YoutubeIE.ie_key()],
1983 }, {
1984 'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
1985 'info_dict': {
1986 'id': 'yeWKywCrFtk',
1987 'ext': 'mp4',
1988 'title': 'Small Scale Baler and Braiding Rugs',
1989 'uploader': 'Backus-Page House Museum',
1990 'uploader_id': 'backuspagemuseum',
1991 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
1992 'upload_date': '20161008',
1993 'license': 'Standard YouTube License',
1994 'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
1995 'categories': ['Nonprofits & Activism'],
1996 'tags': list,
1997 'like_count': int,
1998 'dislike_count': int,
1999 },
2000 'params': {
2001 'noplaylist': True,
2002 'skip_download': True,
2003 },
2004 }, {
2005 'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
2006 'only_matching': True,
2007 }, {
2008 'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
2009 'only_matching': True,
2010 }]
2011
2012 def _real_initialize(self):
2013 self._login()
2014
2015 def _extract_mix(self, playlist_id):
2016 # The mixes are generated from a single video
2017 # the id of the playlist is just 'RD' + video_id
2018 ids = []
2019 last_id = playlist_id[-11:]
2020 for n in itertools.count(1):
2021 url = 'https://youtube.com/watch?v=%s&list=%s' % (last_id, playlist_id)
2022 webpage = self._download_webpage(
2023 url, playlist_id, 'Downloading page {0} of Youtube mix'.format(n))
2024 new_ids = orderedSet(re.findall(
2025 r'''(?xs)data-video-username=".*?".*?
2026 href="/watch\?v=([0-9A-Za-z_-]{11})&amp;[^"]*?list=%s''' % re.escape(playlist_id),
2027 webpage))
2028 # Fetch new pages until all the videos are repeated, it seems that
2029 # there are always 51 unique videos.
2030 new_ids = [_id for _id in new_ids if _id not in ids]
2031 if not new_ids:
2032 break
2033 ids.extend(new_ids)
2034 last_id = ids[-1]
2035
2036 url_results = self._ids_to_results(ids)
2037
2038 search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
2039 title_span = (
2040 search_title('playlist-title') or
2041 search_title('title long-title') or
2042 search_title('title'))
2043 title = clean_html(title_span)
2044
2045 return self.playlist_result(url_results, playlist_id, title)
2046
2047 def _extract_playlist(self, playlist_id):
2048 url = self._TEMPLATE_URL % playlist_id
2049 page = self._download_webpage(url, playlist_id)
2050
2051 # the yt-alert-message now has tabindex attribute (see https://github.com/rg3/youtube-dl/issues/11604)
2052 for match in re.findall(r'<div class="yt-alert-message"[^>]*>([^<]+)</div>', page):
2053 match = match.strip()
2054 # Check if the playlist exists or is private
2055 mobj = re.match(r'[^<]*(?:The|This) playlist (?P<reason>does not exist|is private)[^<]*', match)
2056 if mobj:
2057 reason = mobj.group('reason')
2058 message = 'This playlist %s' % reason
2059 if 'private' in reason:
2060 message += ', use --username or --netrc to access it'
2061 message += '.'
2062 raise ExtractorError(message, expected=True)
2063 elif re.match(r'[^<]*Invalid parameters[^<]*', match):
2064 raise ExtractorError(
2065 'Invalid parameters. Maybe URL is incorrect.',
2066 expected=True)
2067 elif re.match(r'[^<]*Choose your language[^<]*', match):
2068 continue
2069 else:
2070 self.report_warning('Youtube gives an alert message: ' + match)
2071
2072 playlist_title = self._html_search_regex(
2073 r'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>',
2074 page, 'title', default=None)
2075
2076 has_videos = True
2077
2078 if not playlist_title:
2079 try:
2080 # Some playlist URLs don't actually serve a playlist (e.g.
2081 # https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4)
2082 next(self._entries(page, playlist_id))
2083 except StopIteration:
2084 has_videos = False
2085
2086 return has_videos, self.playlist_result(
2087 self._entries(page, playlist_id), playlist_id, playlist_title)
2088
2089 def _check_download_just_video(self, url, playlist_id):
2090 # Check if it's a video-specific URL
2091 query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
2092 video_id = query_dict.get('v', [None])[0] or self._search_regex(
2093 r'(?:(?:^|//)youtu\.be/|youtube\.com/embed/(?!videoseries))([0-9A-Za-z_-]{11})', url,
2094 'video id', default=None)
2095 if video_id:
2096 if self._downloader.params.get('noplaylist'):
2097 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
2098 return video_id, self.url_result(video_id, 'Youtube', video_id=video_id)
2099 else:
2100 self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
2101 return video_id, None
2102 return None, None
2103
2104 def _real_extract(self, url):
2105 # Extract playlist id
2106 mobj = re.match(self._VALID_URL, url)
2107 if mobj is None:
2108 raise ExtractorError('Invalid URL: %s' % url)
2109 playlist_id = mobj.group(1) or mobj.group(2)
2110
2111 video_id, video = self._check_download_just_video(url, playlist_id)
2112 if video:
2113 return video
2114
2115 if playlist_id.startswith(('RD', 'UL', 'PU')):
2116 # Mixes require a custom extraction process
2117 return self._extract_mix(playlist_id)
2118
2119 has_videos, playlist = self._extract_playlist(playlist_id)
2120 if has_videos or not video_id:
2121 return playlist
2122
2123 # Some playlist URLs don't actually serve a playlist (see
2124 # https://github.com/rg3/youtube-dl/issues/10537).
2125 # Fallback to plain video extraction if there is a video id
2126 # along with playlist id.
2127 return self.url_result(video_id, 'Youtube', video_id=video_id)
2128
2129
2130 class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
2131 IE_DESC = 'YouTube.com channels'
2132 _VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)'
2133 _TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
2134 _VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
2135 IE_NAME = 'youtube:channel'
2136 _TESTS = [{
2137 'note': 'paginated channel',
2138 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
2139 'playlist_mincount': 91,
2140 'info_dict': {
2141 'id': 'UUKfVa3S1e4PHvxWcwyMMg8w',
2142 'title': 'Uploads from lex will',
2143 }
2144 }, {
2145 'note': 'Age restricted channel',
2146 # from https://www.youtube.com/user/DeusExOfficial
2147 'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w',
2148 'playlist_mincount': 64,
2149 'info_dict': {
2150 'id': 'UUs0ifCMCm1icqRbqhUINa0w',
2151 'title': 'Uploads from Deus Ex',
2152 },
2153 }]
2154
2155 @classmethod
2156 def suitable(cls, url):
2157 return (False if YoutubePlaylistsIE.suitable(url) or YoutubeLiveIE.suitable(url)
2158 else super(YoutubeChannelIE, cls).suitable(url))
2159
2160 def _build_template_url(self, url, channel_id):
2161 return self._TEMPLATE_URL % channel_id
2162
2163 def _real_extract(self, url):
2164 channel_id = self._match_id(url)
2165
2166 url = self._build_template_url(url, channel_id)
2167
2168 # Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778)
2169 # Workaround by extracting as a playlist if managed to obtain channel playlist URL
2170 # otherwise fallback on channel by page extraction
2171 channel_page = self._download_webpage(
2172 url + '?view=57', channel_id,
2173 'Downloading channel page', fatal=False)
2174 if channel_page is False:
2175 channel_playlist_id = False
2176 else:
2177 channel_playlist_id = self._html_search_meta(
2178 'channelId', channel_page, 'channel id', default=None)
2179 if not channel_playlist_id:
2180 channel_url = self._html_search_meta(
2181 ('al:ios:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad'),
2182 channel_page, 'channel url', default=None)
2183 if channel_url:
2184 channel_playlist_id = self._search_regex(
2185 r'vnd\.youtube://user/([0-9A-Za-z_-]+)',
2186 channel_url, 'channel id', default=None)
2187 if channel_playlist_id and channel_playlist_id.startswith('UC'):
2188 playlist_id = 'UU' + channel_playlist_id[2:]
2189 return self.url_result(
2190 compat_urlparse.urljoin(url, '/playlist?list=%s' % playlist_id), 'YoutubePlaylist')
2191
2192 channel_page = self._download_webpage(url, channel_id, 'Downloading page #1')
2193 autogenerated = re.search(r'''(?x)
2194 class="[^"]*?(?:
2195 channel-header-autogenerated-label|
2196 yt-channel-title-autogenerated
2197 )[^"]*"''', channel_page) is not None
2198
2199 if autogenerated:
2200 # The videos are contained in a single page
2201 # the ajax pages can't be used, they are empty
2202 entries = [
2203 self.url_result(
2204 video_id, 'Youtube', video_id=video_id,
2205 video_title=video_title)
2206 for video_id, video_title in self.extract_videos_from_page(channel_page)]
2207 return self.playlist_result(entries, channel_id)
2208
2209 try:
2210 next(self._entries(channel_page, channel_id))
2211 except StopIteration:
2212 alert_message = self._html_search_regex(
2213 r'(?s)<div[^>]+class=(["\']).*?\byt-alert-message\b.*?\1[^>]*>(?P<alert>[^<]+)</div>',
2214 channel_page, 'alert', default=None, group='alert')
2215 if alert_message:
2216 raise ExtractorError('Youtube said: %s' % alert_message, expected=True)
2217
2218 return self.playlist_result(self._entries(channel_page, channel_id), channel_id)
2219
2220
2221 class YoutubeUserIE(YoutubeChannelIE):
2222 IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
2223 _VALID_URL = r'(?:(?:https?://(?:\w+\.)?youtube\.com/(?:(?P<user>user|c)/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
2224 _TEMPLATE_URL = 'https://www.youtube.com/%s/%s/videos'
2225 IE_NAME = 'youtube:user'
2226
2227 _TESTS = [{
2228 'url': 'https://www.youtube.com/user/TheLinuxFoundation',
2229 'playlist_mincount': 320,
2230 'info_dict': {
2231 'id': 'UUfX55Sx5hEFjoC3cNs6mCUQ',
2232 'title': 'Uploads from The Linux Foundation',
2233 }
2234 }, {
2235 # Only available via https://www.youtube.com/c/12minuteathlete/videos
2236 # but not https://www.youtube.com/user/12minuteathlete/videos
2237 'url': 'https://www.youtube.com/c/12minuteathlete/videos',
2238 'playlist_mincount': 249,
2239 'info_dict': {
2240 'id': 'UUVjM-zV6_opMDx7WYxnjZiQ',
2241 'title': 'Uploads from 12 Minute Athlete',
2242 }
2243 }, {
2244 'url': 'ytuser:phihag',
2245 'only_matching': True,
2246 }, {
2247 'url': 'https://www.youtube.com/c/gametrailers',
2248 'only_matching': True,
2249 }, {
2250 'url': 'https://www.youtube.com/gametrailers',
2251 'only_matching': True,
2252 }, {
2253 # This channel is not available, geo restricted to JP
2254 'url': 'https://www.youtube.com/user/kananishinoSMEJ/videos',
2255 'only_matching': True,
2256 }]
2257
2258 @classmethod
2259 def suitable(cls, url):
2260 # Don't return True if the url can be extracted with other youtube
2261 # extractor, the regex would is too permissive and it would match.
2262 other_yt_ies = iter(klass for (name, klass) in globals().items() if name.startswith('Youtube') and name.endswith('IE') and klass is not cls)
2263 if any(ie.suitable(url) for ie in other_yt_ies):
2264 return False
2265 else:
2266 return super(YoutubeUserIE, cls).suitable(url)
2267
2268 def _build_template_url(self, url, channel_id):
2269 mobj = re.match(self._VALID_URL, url)
2270 return self._TEMPLATE_URL % (mobj.group('user') or 'user', mobj.group('id'))
2271
2272
2273 class YoutubeLiveIE(YoutubeBaseInfoExtractor):
2274 IE_DESC = 'YouTube.com live streams'
2275 _VALID_URL = r'(?P<base_url>https?://(?:\w+\.)?youtube\.com/(?:(?:user|channel|c)/)?(?P<id>[^/]+))/live'
2276 IE_NAME = 'youtube:live'
2277
2278 _TESTS = [{
2279 'url': 'https://www.youtube.com/user/TheYoungTurks/live',
2280 'info_dict': {
2281 'id': 'a48o2S1cPoo',
2282 'ext': 'mp4',
2283 'title': 'The Young Turks - Live Main Show',
2284 'uploader': 'The Young Turks',
2285 'uploader_id': 'TheYoungTurks',
2286 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
2287 'upload_date': '20150715',
2288 'license': 'Standard YouTube License',
2289 'description': 'md5:438179573adcdff3c97ebb1ee632b891',
2290 'categories': ['News & Politics'],
2291 'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
2292 'like_count': int,
2293 'dislike_count': int,
2294 },
2295 'params': {
2296 'skip_download': True,
2297 },
2298 }, {
2299 'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
2300 'only_matching': True,
2301 }, {
2302 'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
2303 'only_matching': True,
2304 }, {
2305 'url': 'https://www.youtube.com/TheYoungTurks/live',
2306 'only_matching': True,
2307 }]
2308
2309 def _real_extract(self, url):
2310 mobj = re.match(self._VALID_URL, url)
2311 channel_id = mobj.group('id')
2312 base_url = mobj.group('base_url')
2313 webpage = self._download_webpage(url, channel_id, fatal=False)
2314 if webpage:
2315 page_type = self._og_search_property(
2316 'type', webpage, 'page type', default=None)
2317 video_id = self._html_search_meta(
2318 'videoId', webpage, 'video id', default=None)
2319 if page_type == 'video' and video_id and re.match(r'^[0-9A-Za-z_-]{11}$', video_id):
2320 return self.url_result(video_id, YoutubeIE.ie_key())
2321 return self.url_result(base_url)
2322
2323
2324 class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
2325 IE_DESC = 'YouTube.com user/channel playlists'
2326 _VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+)/playlists'
2327 IE_NAME = 'youtube:playlists'
2328
2329 _TESTS = [{
2330 'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
2331 'playlist_mincount': 4,
2332 'info_dict': {
2333 'id': 'ThirstForScience',
2334 'title': 'Thirst for Science',
2335 },
2336 }, {
2337 # with "Load more" button
2338 'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
2339 'playlist_mincount': 70,
2340 'info_dict': {
2341 'id': 'igorkle1',
2342 'title': 'Š˜Š³Š¾Ń€ŃŒ ŠšŠ»ŠµŠ¹Š½ŠµŃ€',
2343 },
2344 }, {
2345 'url': 'https://www.youtube.com/channel/UCiU1dHvZObB2iP6xkJ__Icw/playlists',
2346 'playlist_mincount': 17,
2347 'info_dict': {
2348 'id': 'UCiU1dHvZObB2iP6xkJ__Icw',
2349 'title': 'Chem Player',
2350 },
2351 }]
2352
2353
2354 class YoutubeSearchIE(SearchInfoExtractor, YoutubePlaylistIE):
2355 IE_DESC = 'YouTube.com searches'
2356 # there doesn't appear to be a real limit, for example if you search for
2357 # 'python' you get more than 8.000.000 results
2358 _MAX_RESULTS = float('inf')
2359 IE_NAME = 'youtube:search'
2360 _SEARCH_KEY = 'ytsearch'
2361 _EXTRA_QUERY_ARGS = {}
2362 _TESTS = []
2363
2364 def _get_n_results(self, query, n):
2365 """Get a specified number of results for a query"""
2366
2367 videos = []
2368 limit = n
2369
2370 url_query = {
2371 'search_query': query.encode('utf-8'),
2372 }
2373 url_query.update(self._EXTRA_QUERY_ARGS)
2374 result_url = 'https://www.youtube.com/results?' + compat_urllib_parse_urlencode(url_query)
2375
2376 for pagenum in itertools.count(1):
2377 data = self._download_json(
2378 result_url, video_id='query "%s"' % query,
2379 note='Downloading page %s' % pagenum,
2380 errnote='Unable to download API page',
2381 query={'spf': 'navigate'})
2382 html_content = data[1]['body']['content']
2383
2384 if 'class="search-message' in html_content:
2385 raise ExtractorError(
2386 '[youtube] No video results', expected=True)
2387
2388 new_videos = self._ids_to_results(orderedSet(re.findall(
2389 r'href="/watch\?v=(.{11})', html_content)))
2390 videos += new_videos
2391 if not new_videos or len(videos) > limit:
2392 break
2393 next_link = self._html_search_regex(
2394 r'href="(/results\?[^"]*\bsp=[^"]+)"[^>]*>\s*<span[^>]+class="[^"]*\byt-uix-button-content\b[^"]*"[^>]*>Next',
2395 html_content, 'next link', default=None)
2396 if next_link is None:
2397 break
2398 result_url = compat_urlparse.urljoin('https://www.youtube.com/', next_link)
2399
2400 if len(videos) > n:
2401 videos = videos[:n]
2402 return self.playlist_result(videos, query)
2403
2404
2405 class YoutubeSearchDateIE(YoutubeSearchIE):
2406 IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
2407 _SEARCH_KEY = 'ytsearchdate'
2408 IE_DESC = 'YouTube.com searches, newest videos first'
2409 _EXTRA_QUERY_ARGS = {'search_sort': 'video_date_uploaded'}
2410
2411
2412 class YoutubeSearchURLIE(YoutubePlaylistBaseInfoExtractor):
2413 IE_DESC = 'YouTube.com search URLs'
2414 IE_NAME = 'youtube:search_url'
2415 _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)'
2416 _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})(?:[^"]*"[^>]+\btitle="(?P<title>[^"]+))?'
2417 _TESTS = [{
2418 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
2419 'playlist_mincount': 5,
2420 'info_dict': {
2421 'title': 'youtube-dl test video',
2422 }
2423 }, {
2424 'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
2425 'only_matching': True,
2426 }]
2427
2428 def _real_extract(self, url):
2429 mobj = re.match(self._VALID_URL, url)
2430 query = compat_urllib_parse_unquote_plus(mobj.group('query'))
2431 webpage = self._download_webpage(url, query)
2432 return self.playlist_result(self._process_page(webpage), playlist_title=query)
2433
2434
2435 class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor):
2436 IE_DESC = 'YouTube.com (multi-season) shows'
2437 _VALID_URL = r'https?://(?:www\.)?youtube\.com/show/(?P<id>[^?#]*)'
2438 IE_NAME = 'youtube:show'
2439 _TESTS = [{
2440 'url': 'https://www.youtube.com/show/airdisasters',
2441 'playlist_mincount': 5,
2442 'info_dict': {
2443 'id': 'airdisasters',
2444 'title': 'Air Disasters',
2445 }
2446 }]
2447
2448 def _real_extract(self, url):
2449 playlist_id = self._match_id(url)
2450 return super(YoutubeShowIE, self)._real_extract(
2451 'https://www.youtube.com/show/%s/playlists' % playlist_id)
2452
2453
2454 class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
2455 """
2456 Base class for feed extractors
2457 Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
2458 """
2459 _LOGIN_REQUIRED = True
2460
2461 @property
2462 def IE_NAME(self):
2463 return 'youtube:%s' % self._FEED_NAME
2464
2465 def _real_initialize(self):
2466 self._login()
2467
2468 def _real_extract(self, url):
2469 page = self._download_webpage(
2470 'https://www.youtube.com/feed/%s' % self._FEED_NAME, self._PLAYLIST_TITLE)
2471
2472 # The extraction process is the same as for playlists, but the regex
2473 # for the video ids doesn't contain an index
2474 ids = []
2475 more_widget_html = content_html = page
2476 for page_num in itertools.count(1):
2477 matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
2478
2479 # 'recommended' feed has infinite 'load more' and each new portion spins
2480 # the same videos in (sometimes) slightly different order, so we'll check
2481 # for unicity and break when portion has no new videos
2482 new_ids = filter(lambda video_id: video_id not in ids, orderedSet(matches))
2483 if not new_ids:
2484 break
2485
2486 ids.extend(new_ids)
2487
2488 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
2489 if not mobj:
2490 break
2491
2492 more = self._download_json(
2493 'https://youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
2494 'Downloading page #%s' % page_num,
2495 transform_source=uppercase_escape)
2496 content_html = more['content_html']
2497 more_widget_html = more['load_more_widget_html']
2498
2499 return self.playlist_result(
2500 self._ids_to_results(ids), playlist_title=self._PLAYLIST_TITLE)
2501
2502
2503 class YoutubeWatchLaterIE(YoutubePlaylistIE):
2504 IE_NAME = 'youtube:watchlater'
2505 IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
2506 _VALID_URL = r'https?://(?:www\.)?youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater'
2507
2508 _TESTS = [{
2509 'url': 'https://www.youtube.com/playlist?list=WL',
2510 'only_matching': True,
2511 }, {
2512 'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL',
2513 'only_matching': True,
2514 }]
2515
2516 def _real_extract(self, url):
2517 _, video = self._check_download_just_video(url, 'WL')
2518 if video:
2519 return video
2520 _, playlist = self._extract_playlist('WL')
2521 return playlist
2522
2523
2524 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
2525 IE_NAME = 'youtube:favorites'
2526 IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
2527 _VALID_URL = r'https?://(?:www\.)?youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
2528 _LOGIN_REQUIRED = True
2529
2530 def _real_extract(self, url):
2531 webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
2532 playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
2533 return self.url_result(playlist_id, 'YoutubePlaylist')
2534
2535
2536 class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
2537 IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
2538 _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/recommended|:ytrec(?:ommended)?'
2539 _FEED_NAME = 'recommended'
2540 _PLAYLIST_TITLE = 'Youtube Recommended videos'
2541
2542
2543 class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
2544 IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
2545 _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
2546 _FEED_NAME = 'subscriptions'
2547 _PLAYLIST_TITLE = 'Youtube Subscriptions'
2548
2549
2550 class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
2551 IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
2552 _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/history|:ythistory'
2553 _FEED_NAME = 'history'
2554 _PLAYLIST_TITLE = 'Youtube History'
2555
2556
2557 class YoutubeTruncatedURLIE(InfoExtractor):
2558 IE_NAME = 'youtube:truncated_url'
2559 IE_DESC = False # Do not list
2560 _VALID_URL = r'''(?x)
2561 (?:https?://)?
2562 (?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
2563 (?:watch\?(?:
2564 feature=[a-z_]+|
2565 annotation_id=annotation_[^&]+|
2566 x-yt-cl=[0-9]+|
2567 hl=[^&]*|
2568 t=[0-9]+
2569 )?
2570 |
2571 attribution_link\?a=[^&]+
2572 )
2573 $
2574 '''
2575
2576 _TESTS = [{
2577 'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
2578 'only_matching': True,
2579 }, {
2580 'url': 'https://www.youtube.com/watch?',
2581 'only_matching': True,
2582 }, {
2583 'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
2584 'only_matching': True,
2585 }, {
2586 'url': 'https://www.youtube.com/watch?feature=foo',
2587 'only_matching': True,
2588 }, {
2589 'url': 'https://www.youtube.com/watch?hl=en-GB',
2590 'only_matching': True,
2591 }, {
2592 'url': 'https://www.youtube.com/watch?t=2372',
2593 'only_matching': True,
2594 }]
2595
2596 def _real_extract(self, url):
2597 raise ExtractorError(
2598 'Did you forget to quote the URL? Remember that & is a meta '
2599 'character in most shells, so you want to put the URL in quotes, '
2600 'like youtube-dl '
2601 '"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
2602 ' or simply youtube-dl BaW_jenozKc .',
2603 expected=True)
2604
2605
2606 class YoutubeTruncatedIDIE(InfoExtractor):
2607 IE_NAME = 'youtube:truncated_id'
2608 IE_DESC = False # Do not list
2609 _VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
2610
2611 _TESTS = [{
2612 'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
2613 'only_matching': True,
2614 }]
2615
2616 def _real_extract(self, url):
2617 video_id = self._match_id(url)
2618 raise ExtractorError(
2619 'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
2620 expected=True)