9 from .common 
import InfoExtractor
, SearchInfoExtractor
 
  15     compat_urllib_request
, 
  26 class YoutubeBaseInfoExtractor(InfoExtractor
): 
  27     """Provide base functions for Youtube extractors""" 
  28     _LOGIN_URL 
= 'https://accounts.google.com/ServiceLogin' 
  29     _LANG_URL 
= r
'https://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1' 
  30     _AGE_URL 
= 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en' 
  31     _NETRC_MACHINE 
= 'youtube' 
  32     # If True it will raise an error if no login info is provided 
  33     _LOGIN_REQUIRED 
= False 
  35     def report_lang(self
): 
  36         """Report attempt to set language.""" 
  37         self
.to_screen(u
'Setting language') 
  39     def _set_language(self
): 
  40         request 
= compat_urllib_request
.Request(self
._LANG
_URL
) 
  43             compat_urllib_request
.urlopen(request
).read() 
  44         except (compat_urllib_error
.URLError
, compat_http_client
.HTTPException
, socket
.error
) as err
: 
  45             self
._downloader
.report_warning(u
'unable to set language: %s' % compat_str(err
)) 
  50         (username
, password
) = self
._get
_login
_info
() 
  51         # No authentication to be performed 
  53             if self
._LOGIN
_REQUIRED
: 
  54                 raise ExtractorError(u
'No login info available, needed for using %s.' % self
.IE_NAME
, expected
=True) 
  57         request 
= compat_urllib_request
.Request(self
._LOGIN
_URL
) 
  59             login_page 
= compat_urllib_request
.urlopen(request
).read().decode('utf-8') 
  60         except (compat_urllib_error
.URLError
, compat_http_client
.HTTPException
, socket
.error
) as err
: 
  61             self
._downloader
.report_warning(u
'unable to fetch login page: %s' % compat_str(err
)) 
  66         match 
= re
.search(re
.compile(r
'<input.+?name="GALX".+?value="(.+?)"', re
.DOTALL
), login_page
) 
  69         match 
= re
.search(re
.compile(r
'<input.+?name="dsh".+?value="(.+?)"', re
.DOTALL
), login_page
) 
  75                 u
'continue': u
'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1', 
  79                 u
'PersistentCookie': u
'yes', 
  81                 u
'bgresponse': u
'js_disabled', 
  82                 u
'checkConnection': u
'', 
  83                 u
'checkedDomains': u
'youtube', 
  89                 u
'signIn': u
'Sign in', 
  91                 u
'service': u
'youtube', 
  95         # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode 
  97         login_form 
= dict((k
.encode('utf-8'), v
.encode('utf-8')) for k
,v 
in login_form_strs
.items()) 
  98         login_data 
= compat_urllib_parse
.urlencode(login_form
).encode('ascii') 
  99         request 
= compat_urllib_request
.Request(self
._LOGIN
_URL
, login_data
) 
 102             login_results 
= compat_urllib_request
.urlopen(request
).read().decode('utf-8') 
 103             if re
.search(r
'(?i)<form[^>]* id="gaia_loginform"', login_results
) is not None: 
 104                 self
._downloader
.report_warning(u
'unable to log in: bad username or password') 
 106         except (compat_urllib_error
.URLError
, compat_http_client
.HTTPException
, socket
.error
) as err
: 
 107             self
._downloader
.report_warning(u
'unable to log in: %s' % compat_str(err
)) 
 111     def _confirm_age(self
): 
 114                 'action_confirm':   'Confirm', 
 116         request 
= compat_urllib_request
.Request(self
._AGE
_URL
, compat_urllib_parse
.urlencode(age_form
)) 
 118             self
.report_age_confirmation() 
 119             compat_urllib_request
.urlopen(request
).read().decode('utf-8') 
 120         except (compat_urllib_error
.URLError
, compat_http_client
.HTTPException
, socket
.error
) as err
: 
 121             raise ExtractorError(u
'Unable to confirm age: %s' % compat_str(err
)) 
 124     def _real_initialize(self
): 
 125         if self
._downloader 
is None: 
 127         if not self
._set
_language
(): 
 129         if not self
._login
(): 
 133 class YoutubeIE(YoutubeBaseInfoExtractor
): 
 134     IE_DESC 
= u
'YouTube.com' 
 137                          (?:https?://)?                                       # http(s):// (optional) 
 138                          (?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/| 
 139                             tube\.majestyc\.net/)                             # the various hostnames, with wildcard subdomains 
 140                          (?:.*?\#/)?                                          # handle anchor (#/) redirect urls 
 141                          (?:                                                  # the various things that can precede the ID: 
 142                              (?:(?:v|embed|e)/)                               # v/ or embed/ or e/ 
 143                              |(?:                                             # or the v= param in all its forms 
 144                                  (?:watch|movie(?:_popup)?(?:\.php)?)?              # preceding watch(_popup|.php) or nothing (like /?v=xxxx) 
 145                                  (?:\?|\#!?)                                  # the params delimiter ? or # or #! 
 146                                  (?:.*?&)?                                    # any other preceding param (like /?s=tuff&v=xxxx) 
 149                          )?                                                   # optional -> youtube.com/xxxx is OK 
 150                      )?                                                       # all until now is optional -> you can pass the naked ID 
 151                      ([0-9A-Za-z_-]+)                                         # here is it! the YouTube video ID 
 152                      (?(1).+)?                                                # if we found the ID, everything can follow 
 154     _NEXT_URL_RE 
= r
'[\?&]next_url=([^&]+)' 
 155     # Listed in order of quality 
 156     _available_formats 
= ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13', 
 157                           '95', '94', '93', '92', '132', '151', 
 158                           '85', '84', '102', '83', '101', '82', '100', 
 160     _available_formats_prefer_free 
= ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13', 
 161                                       '95', '94', '93', '92', '132', '151', 
 162                                       '85', '102', '84', '101', '83', '100', '82', 
 164     _video_extensions 
= { 
 185         # videos that use m3u8 
 194     _video_dimensions 
= { 
 224     _3d_itags 
= ['85', '84', '102', '83', '101', '82', '100'] 
 228             u
"url":  u
"http://www.youtube.com/watch?v=BaW_jenozKc", 
 229             u
"file":  u
"BaW_jenozKc.mp4", 
 231                 u
"title": u
"youtube-dl test video \"'/\\ä↭𝕐", 
 232                 u
"uploader": u
"Philipp Hagemeister", 
 233                 u
"uploader_id": u
"phihag", 
 234                 u
"upload_date": u
"20121002", 
 235                 u
"description": u
"test chars:  \"'/\\ä↭𝕐\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de ." 
 239             u
"url":  u
"http://www.youtube.com/watch?v=1ltcDfZMA3U", 
 240             u
"file":  u
"1ltcDfZMA3U.flv", 
 241             u
"note": u
"Test VEVO video (#897)", 
 243                 u
"upload_date": u
"20070518", 
 244                 u
"title": u
"Maps - It Will Find You", 
 245                 u
"description": u
"Music video by Maps performing It Will Find You.", 
 246                 u
"uploader": u
"MuteUSA", 
 247                 u
"uploader_id": u
"MuteUSA" 
 251             u
"url":  u
"http://www.youtube.com/watch?v=UxxajLWwzqY", 
 252             u
"file":  u
"UxxajLWwzqY.mp4", 
 253             u
"note": u
"Test generic use_cipher_signature video (#897)", 
 255                 u
"upload_date": u
"20120506", 
 256                 u
"title": u
"Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]", 
 257                 u
"description": u
"md5:b085c9804f5ab69f4adea963a2dceb3c", 
 258                 u
"uploader": u
"IconaPop", 
 259                 u
"uploader_id": u
"IconaPop" 
 263             u
"url":  u
"https://www.youtube.com/watch?v=07FYdnEawAQ", 
 264             u
"file":  u
"07FYdnEawAQ.mp4", 
 265             u
"note": u
"Test VEVO video with age protection (#956)", 
 267                 u
"upload_date": u
"20130703", 
 268                 u
"title": u
"Justin Timberlake - Tunnel Vision (Explicit)", 
 269                 u
"description": u
"md5:64249768eec3bc4276236606ea996373", 
 270                 u
"uploader": u
"justintimberlakeVEVO", 
 271                 u
"uploader_id": u
"justintimberlakeVEVO" 
 275             u
'url': u
'https://www.youtube.com/watch?v=TGi3HqYrWHE', 
 276             u
'file': u
'TGi3HqYrWHE.mp4', 
 277             u
'note': u
'm3u8 video', 
 279                 u
'title': u
'Triathlon - Men - London 2012 Olympic Games', 
 280                 u
'description': u
'- Men -  TR02 - Triathlon - 07 August 2012 - London 2012 Olympic Games', 
 281                 u
'uploader': u
'olympic', 
 282                 u
'upload_date': u
'20120807', 
 283                 u
'uploader_id': u
'olympic', 
 286                 u
'skip_download': True, 
 293     def suitable(cls
, url
): 
 294         """Receives a URL and returns True if suitable for this IE.""" 
 295         if YoutubePlaylistIE
.suitable(url
) or YoutubeSubscriptionsIE
.suitable(url
): return False 
 296         return re
.match(cls
._VALID
_URL
, url
, re
.VERBOSE
) is not None 
 298     def report_video_webpage_download(self
, video_id
): 
 299         """Report attempt to download video webpage.""" 
 300         self
.to_screen(u
'%s: Downloading video webpage' % video_id
) 
 302     def report_video_info_webpage_download(self
, video_id
): 
 303         """Report attempt to download video info webpage.""" 
 304         self
.to_screen(u
'%s: Downloading video info webpage' % video_id
) 
 306     def report_video_subtitles_download(self
, video_id
): 
 307         """Report attempt to download video info webpage.""" 
 308         self
.to_screen(u
'%s: Checking available subtitles' % video_id
) 
 310     def report_video_subtitles_request(self
, video_id
, sub_lang
, format
): 
 311         """Report attempt to download video info webpage.""" 
 312         self
.to_screen(u
'%s: Downloading video subtitles for %s.%s' % (video_id
, sub_lang
, format
)) 
 314     def report_video_subtitles_available(self
, video_id
, sub_lang_list
): 
 315         """Report available subtitles.""" 
 316         sub_lang 
= ",".join(list(sub_lang_list
.keys())) 
 317         self
.to_screen(u
'%s: Available subtitles for video: %s' % (video_id
, sub_lang
)) 
 319     def report_information_extraction(self
, video_id
): 
 320         """Report attempt to extract video information.""" 
 321         self
.to_screen(u
'%s: Extracting video information' % video_id
) 
 323     def report_unavailable_format(self
, video_id
, format
): 
 324         """Report extracted video URL.""" 
 325         self
.to_screen(u
'%s: Format %s not available' % (video_id
, format
)) 
 327     def report_rtmp_download(self
): 
 328         """Indicate the download will use the RTMP protocol.""" 
 329         self
.to_screen(u
'RTMP download detected') 
 331     def _decrypt_signature(self
, s
): 
 332         """Turn the encrypted s field into a working signature""" 
 335             return s
[25] + s
[3:25] + s
[0] + s
[26:42] + s
[79] + s
[43:79] + s
[91] + s
[80:83] 
 337             return s
[25] + s
[3:25] + s
[2] + s
[26:40] + s
[77] + s
[41:77] + s
[89] + s
[78:81] 
 339             return s
[84:78:-1] + s
[87] + s
[77:60:-1] + s
[0] + s
[59:3:-1] 
 341             return s
[48] + s
[81:67:-1] + s
[82] + s
[66:62:-1] + s
[85] + s
[61:48:-1] + s
[67] + s
[47:12:-1] + s
[3] + s
[11:3:-1] + s
[2] + s
[12] 
 343             return s
[6:27] + s
[4] + s
[28:39] + s
[27] + s
[40:59] + s
[2] + s
[60:] 
 345             return s
[5:20] + s
[2] + s
[21:] 
 347             return s
[83:34:-1] + s
[0] + s
[33:27:-1] + s
[3] + s
[26:19:-1] + s
[34] + s
[18:3:-1] + s
[27] 
 349             return s
[83:27:-1] + s
[0] + s
[26:5:-1] + s
[2:0:-1] + s
[27] 
 351             return s
[81:64:-1] + s
[82] + s
[63:52:-1] + s
[45] + s
[51:45:-1] + s
[1] + s
[44:1:-1] + s
[0] 
 353             return s
[36] + s
[79:67:-1] + s
[81] + s
[66:40:-1] + s
[33] + s
[39:36:-1] + s
[40] + s
[35] + s
[0] + s
[67] + s
[32:0:-1] + s
[34] 
 355             return s
[56] + s
[79:56:-1] + s
[41] + s
[55:41:-1] + s
[80] + s
[40:34:-1] + s
[0] + s
[33:29:-1] + s
[34] + s
[28:9:-1] + s
[29] + s
[8:0:-1] + s
[9] 
 357             return s
[54] + s
[77:54:-1] + s
[39] + s
[53:39:-1] + s
[78] + s
[38:34:-1] + s
[0] + s
[33:29:-1] + s
[34] + s
[28:9:-1] + s
[29] + s
[8:0:-1] + s
[9] 
 360             raise ExtractorError(u
'Unable to decrypt signature, key length %d not supported; retrying might work' % (len(s
))) 
 362     def _decrypt_signature_age_gate(self
, s
): 
 363         # The videos with age protection use another player, so the algorithms 
 366             return s
[2:63] + s
[82] + s
[64:82] + s
[63] 
 368             # Fallback to the other algortihms 
 369             return self
._decrypt
_signature
(s
) 
 372     def _get_available_subtitles(self
, video_id
): 
 373         self
.report_video_subtitles_download(video_id
) 
 374         request 
= compat_urllib_request
.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id
) 
 376             sub_list 
= compat_urllib_request
.urlopen(request
).read().decode('utf-8') 
 377         except (compat_urllib_error
.URLError
, compat_http_client
.HTTPException
, socket
.error
) as err
: 
 378             return (u
'unable to download video subtitles: %s' % compat_str(err
), None) 
 379         sub_lang_list 
= re
.findall(r
'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list
) 
 380         sub_lang_list 
= dict((l
[1], l
[0]) for l 
in sub_lang_list
) 
 381         if not sub_lang_list
: 
 382             return (u
'video doesn\'t have subtitles', None) 
 385     def _list_available_subtitles(self
, video_id
): 
 386         sub_lang_list 
= self
._get
_available
_subtitles
(video_id
) 
 387         self
.report_video_subtitles_available(video_id
, sub_lang_list
) 
 389     def _request_subtitle(self
, sub_lang
, sub_name
, video_id
, format
): 
 392         (error_message, sub_lang, sub) 
 394         self
.report_video_subtitles_request(video_id
, sub_lang
, format
) 
 395         params 
= compat_urllib_parse
.urlencode({ 
 401         url 
= 'http://www.youtube.com/api/timedtext?' + params
 
 403             sub 
= compat_urllib_request
.urlopen(url
).read().decode('utf-8') 
 404         except (compat_urllib_error
.URLError
, compat_http_client
.HTTPException
, socket
.error
) as err
: 
 405             return (u
'unable to download video subtitles: %s' % compat_str(err
), None, None) 
 407             return (u
'Did not fetch video subtitles', None, None) 
 408         return (None, sub_lang
, sub
) 
 410     def _request_automatic_caption(self
, video_id
, webpage
): 
 411         """We need the webpage for getting the captions url, pass it as an 
 412            argument to speed up the process.""" 
 413         sub_lang 
= self
._downloader
.params
.get('subtitleslang') or 'en' 
 414         sub_format 
= self
._downloader
.params
.get('subtitlesformat') 
 415         self
.to_screen(u
'%s: Looking for automatic captions' % video_id
) 
 416         mobj 
= re
.search(r
';ytplayer.config = ({.*?});', webpage
) 
 417         err_msg 
= u
'Couldn\'t find automatic captions for "%s"' % sub_lang
 
 419             return [(err_msg
, None, None)] 
 420         player_config 
= json
.loads(mobj
.group(1)) 
 422             args 
= player_config
[u
'args'] 
 423             caption_url 
= args
[u
'ttsurl'] 
 424             timestamp 
= args
[u
'timestamp'] 
 425             params 
= compat_urllib_parse
.urlencode({ 
 432             subtitles_url 
= caption_url 
+ '&' + params
 
 433             sub 
= self
._download
_webpage
(subtitles_url
, video_id
, u
'Downloading automatic captions') 
 434             return [(None, sub_lang
, sub
)] 
 436             return [(err_msg
, None, None)] 
 438     def _extract_subtitle(self
, video_id
): 
 440         Return a list with a tuple: 
 441         [(error_message, sub_lang, sub)] 
 443         sub_lang_list 
= self
._get
_available
_subtitles
(video_id
) 
 444         sub_format 
= self
._downloader
.params
.get('subtitlesformat') 
 445         if  isinstance(sub_lang_list
,tuple): #There was some error, it didn't get the available subtitles 
 446             return [(sub_lang_list
[0], None, None)] 
 447         if self
._downloader
.params
.get('subtitleslang', False): 
 448             sub_lang 
= self
._downloader
.params
.get('subtitleslang') 
 449         elif 'en' in sub_lang_list
: 
 452             sub_lang 
= list(sub_lang_list
.keys())[0] 
 453         if not sub_lang 
in sub_lang_list
: 
 454             return [(u
'no closed captions found in the specified language "%s"' % sub_lang
, None, None)] 
 456         subtitle 
= self
._request
_subtitle
(sub_lang
, sub_lang_list
[sub_lang
].encode('utf-8'), video_id
, sub_format
) 
 459     def _extract_all_subtitles(self
, video_id
): 
 460         sub_lang_list 
= self
._get
_available
_subtitles
(video_id
) 
 461         sub_format 
= self
._downloader
.params
.get('subtitlesformat') 
 462         if  isinstance(sub_lang_list
,tuple): #There was some error, it didn't get the available subtitles 
 463             return [(sub_lang_list
[0], None, None)] 
 465         for sub_lang 
in sub_lang_list
: 
 466             subtitle 
= self
._request
_subtitle
(sub_lang
, sub_lang_list
[sub_lang
].encode('utf-8'), video_id
, sub_format
) 
 467             subtitles
.append(subtitle
) 
 470     def _print_formats(self
, formats
): 
 471         print('Available formats:') 
 473             print('%s\t:\t%s\t[%s]%s' %(x
, self
._video
_extensions
.get(x
, 'flv'), 
 474                                         self
._video
_dimensions
.get(x
, '???'), 
 475                                         ' (3D)' if x 
in self
._3d
_itags 
else '')) 
 477     def _extract_id(self
, url
): 
 478         mobj 
= re
.match(self
._VALID
_URL
, url
, re
.VERBOSE
) 
 480             raise ExtractorError(u
'Invalid URL: %s' % url
) 
 481         video_id 
= mobj
.group(2) 
 484     def _get_video_url_list(self
, url_map
): 
 486         Transform a dictionary in the format {itag:url} to a list of (itag, url) 
 487         with the requested formats. 
 489         req_format 
= self
._downloader
.params
.get('format', None) 
 490         format_limit 
= self
._downloader
.params
.get('format_limit', None) 
 491         available_formats 
= self
._available
_formats
_prefer
_free 
if self
._downloader
.params
.get('prefer_free_formats', False) else self
._available
_formats
 
 492         if format_limit 
is not None and format_limit 
in available_formats
: 
 493             format_list 
= available_formats
[available_formats
.index(format_limit
):] 
 495             format_list 
= available_formats
 
 496         existing_formats 
= [x 
for x 
in format_list 
if x 
in url_map
] 
 497         if len(existing_formats
) == 0: 
 498             raise ExtractorError(u
'no known formats available for video') 
 499         if self
._downloader
.params
.get('listformats', None): 
 500             self
._print
_formats
(existing_formats
) 
 502         if req_format 
is None or req_format 
== 'best': 
 503             video_url_list 
= [(existing_formats
[0], url_map
[existing_formats
[0]])] # Best quality 
 504         elif req_format 
== 'worst': 
 505             video_url_list 
= [(existing_formats
[-1], url_map
[existing_formats
[-1]])] # worst quality 
 506         elif req_format 
in ('-1', 'all'): 
 507             video_url_list 
= [(f
, url_map
[f
]) for f 
in existing_formats
] # All formats 
 509             # Specific formats. We pick the first in a slash-delimeted sequence. 
 510             # For example, if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'. 
 511             req_formats 
= req_format
.split('/') 
 512             video_url_list 
= None 
 513             for rf 
in req_formats
: 
 515                     video_url_list 
= [(rf
, url_map
[rf
])] 
 517             if video_url_list 
is None: 
 518                 raise ExtractorError(u
'requested format not available') 
 519         return video_url_list
 
 521     def _extract_from_m3u8(self
, manifest_url
, video_id
): 
 523         def _get_urls(_manifest
): 
 524             lines 
= _manifest
.split('\n') 
 525             urls 
= filter(lambda l
: l 
and not l
.startswith('#'), 
 528         manifest 
= self
._download
_webpage
(manifest_url
, video_id
, u
'Downloading formats manifest') 
 529         formats_urls 
= _get_urls(manifest
) 
 530         for format_url 
in formats_urls
: 
 531             itag 
= self
._search
_regex
(r
'itag/(\d+?)/', format_url
, 'itag') 
 532             url_map
[itag
] = format_url
 
 535     def _real_extract(self
, url
): 
 536         if re
.match(r
'(?:https?://)?[^/]+/watch\?feature=[a-z_]+$', url
): 
 537             self
._downloader
.report_warning(u
'Did you forget to quote the URL? Remember that & is a meta-character in most shells, so you want to put the URL in quotes, like  youtube-dl \'http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc\' (or simply  youtube-dl BaW_jenozKc  ).') 
 539         # Extract original video URL from URL with redirection, like age verification, using next_url parameter 
 540         mobj 
= re
.search(self
._NEXT
_URL
_RE
, url
) 
 542             url 
= 'https://www.youtube.com/' + compat_urllib_parse
.unquote(mobj
.group(1)).lstrip('/') 
 543         video_id 
= self
._extract
_id
(url
) 
 546         self
.report_video_webpage_download(video_id
) 
 547         url 
= 'https://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
 
 548         request 
= compat_urllib_request
.Request(url
) 
 550             video_webpage_bytes 
= compat_urllib_request
.urlopen(request
).read() 
 551         except (compat_urllib_error
.URLError
, compat_http_client
.HTTPException
, socket
.error
) as err
: 
 552             raise ExtractorError(u
'Unable to download video webpage: %s' % compat_str(err
)) 
 554         video_webpage 
= video_webpage_bytes
.decode('utf-8', 'ignore') 
 556         # Attempt to extract SWF player URL 
 557         mobj 
= re
.search(r
'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage
) 
 559             player_url 
= re
.sub(r
'\\(.)', r
'\1', mobj
.group(1)) 
 564         self
.report_video_info_webpage_download(video_id
) 
 565         if re
.search(r
'player-age-gate-content">', video_webpage
) is not None: 
 566             self
.report_age_confirmation() 
 568             # We simulate the access to the video from www.youtube.com/v/{video_id} 
 569             # this can be viewed without login into Youtube 
 570             data 
= compat_urllib_parse
.urlencode({'video_id': video_id
, 
 574                                                   'eurl': 'https://youtube.googleapis.com/v/' + video_id
, 
 578             video_info_url 
= 'https://www.youtube.com/get_video_info?' + data
 
 579             video_info_webpage 
= self
._download
_webpage
(video_info_url
, video_id
, 
 581                                     errnote
='unable to download video info webpage') 
 582             video_info 
= compat_parse_qs(video_info_webpage
) 
 585             for el_type 
in ['&el=embedded', '&el=detailpage', '&el=vevo', '']: 
 586                 video_info_url 
= ('https://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en' 
 587                         % (video_id
, el_type
)) 
 588                 video_info_webpage 
= self
._download
_webpage
(video_info_url
, video_id
, 
 590                                         errnote
='unable to download video info webpage') 
 591                 video_info 
= compat_parse_qs(video_info_webpage
) 
 592                 if 'token' in video_info
: 
 594         if 'token' not in video_info
: 
 595             if 'reason' in video_info
: 
 596                 raise ExtractorError(u
'YouTube said: %s' % video_info
['reason'][0], expected
=True) 
 598                 raise ExtractorError(u
'"token" parameter not in video info for unknown reason') 
 600         # Check for "rental" videos 
 601         if 'ypc_video_rental_bar_text' in video_info 
and 'author' not in video_info
: 
 602             raise ExtractorError(u
'"rental" videos not supported') 
 604         # Start extracting information 
 605         self
.report_information_extraction(video_id
) 
 608         if 'author' not in video_info
: 
 609             raise ExtractorError(u
'Unable to extract uploader name') 
 610         video_uploader 
= compat_urllib_parse
.unquote_plus(video_info
['author'][0]) 
 613         video_uploader_id 
= None 
 614         mobj 
= re
.search(r
'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage
) 
 616             video_uploader_id 
= mobj
.group(1) 
 618             self
._downloader
.report_warning(u
'unable to extract uploader nickname') 
 621         if 'title' not in video_info
: 
 622             raise ExtractorError(u
'Unable to extract video title') 
 623         video_title 
= compat_urllib_parse
.unquote_plus(video_info
['title'][0]) 
 626         # We try first to get a high quality image: 
 627         m_thumb 
= re
.search(r
'<span itemprop="thumbnail".*?href="(.*?)">', 
 628                             video_webpage
, re
.DOTALL
) 
 629         if m_thumb 
is not None: 
 630             video_thumbnail 
= m_thumb
.group(1) 
 631         elif 'thumbnail_url' not in video_info
: 
 632             self
._downloader
.report_warning(u
'unable to extract video thumbnail') 
 634         else:   # don't panic if we can't find it 
 635             video_thumbnail 
= compat_urllib_parse
.unquote_plus(video_info
['thumbnail_url'][0]) 
 639         mobj 
= re
.search(r
'id="eow-date.*?>(.*?)</span>', video_webpage
, re
.DOTALL
) 
 641             upload_date 
= ' '.join(re
.sub(r
'[/,-]', r
' ', mobj
.group(1)).split()) 
 642             upload_date 
= unified_strdate(upload_date
) 
 645         video_description 
= get_element_by_id("eow-description", video_webpage
) 
 646         if video_description
: 
 647             video_description 
= clean_html(video_description
) 
 649             fd_mobj 
= re
.search(r
'<meta name="description" content="([^"]+)"', video_webpage
) 
 651                 video_description 
= unescapeHTML(fd_mobj
.group(1)) 
 653                 video_description 
= u
'' 
 656         video_subtitles 
= None 
 658         if self
._downloader
.params
.get('writesubtitles', False): 
 659             video_subtitles 
= self
._extract
_subtitle
(video_id
) 
 661                 (sub_error
, sub_lang
, sub
) = video_subtitles
[0] 
 663                     self
._downloader
.report_warning(sub_error
) 
 665         if self
._downloader
.params
.get('writeautomaticsub', False): 
 666             video_subtitles 
= self
._request
_automatic
_caption
(video_id
, video_webpage
) 
 667             (sub_error
, sub_lang
, sub
) = video_subtitles
[0] 
 669                 self
._downloader
.report_warning(sub_error
) 
 671         if self
._downloader
.params
.get('allsubtitles', False): 
 672             video_subtitles 
= self
._extract
_all
_subtitles
(video_id
) 
 673             for video_subtitle 
in video_subtitles
: 
 674                 (sub_error
, sub_lang
, sub
) = video_subtitle
 
 676                     self
._downloader
.report_warning(sub_error
) 
 678         if self
._downloader
.params
.get('listsubtitles', False): 
 679             self
._list
_available
_subtitles
(video_id
) 
 682         if 'length_seconds' not in video_info
: 
 683             self
._downloader
.report_warning(u
'unable to extract video duration') 
 686             video_duration 
= compat_urllib_parse
.unquote_plus(video_info
['length_seconds'][0]) 
 688         # Decide which formats to download 
 691             mobj 
= re
.search(r
';ytplayer.config = ({.*?});', video_webpage
) 
 693                 raise ValueError('Could not find vevo ID') 
 694             info 
= json
.loads(mobj
.group(1)) 
 696             # Easy way to know if the 's' value is in url_encoded_fmt_stream_map 
 697             # this signatures are encrypted 
 698             m_s 
= re
.search(r
'[&,]s=', args
['url_encoded_fmt_stream_map']) 
 700                 self
.to_screen(u
'%s: Encrypted signatures detected.' % video_id
) 
 701                 video_info
['url_encoded_fmt_stream_map'] = [args
['url_encoded_fmt_stream_map']] 
 705         if 'conn' in video_info 
and video_info
['conn'][0].startswith('rtmp'): 
 706             self
.report_rtmp_download() 
 707             video_url_list 
= [(None, video_info
['conn'][0])] 
 708         elif 'url_encoded_fmt_stream_map' in video_info 
and len(video_info
['url_encoded_fmt_stream_map']) >= 1: 
 709             if 'rtmpe%3Dyes' in video_info
['url_encoded_fmt_stream_map'][0]: 
 710                 raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected
=True) 
 712             for url_data_str 
in video_info
['url_encoded_fmt_stream_map'][0].split(','): 
 713                 url_data 
= compat_parse_qs(url_data_str
) 
 714                 if 'itag' in url_data 
and 'url' in url_data
: 
 715                     url 
= url_data
['url'][0] 
 716                     if 'sig' in url_data
: 
 717                         url 
+= '&signature=' + url_data
['sig'][0] 
 718                     elif 's' in url_data
: 
 719                         if self
._downloader
.params
.get('verbose'): 
 722                                 player_version 
= self
._search
_regex
(r
'ad3-(.+?)\.swf', 
 723                                     video_info
['ad3_module'][0] if 'ad3_module' in video_info 
else 'NOT FOUND', 
 724                                     'flash player', fatal
=False) 
 725                                 player 
= 'flash player %s' % player_version
 
 727                                 player 
= u
'html5 player %s' % self
._search
_regex
(r
'html5player-(.+?)\.js', video_webpage
, 
 728                                     'html5 player', fatal
=False) 
 729                             parts_sizes 
= u
'.'.join(compat_str(len(part
)) for part 
in s
.split('.')) 
 730                             self
.to_screen(u
'encrypted signature length %d (%s), itag %s, %s' % 
 731                                 (len(s
), parts_sizes
, url_data
['itag'][0], player
)) 
 732                         encrypted_sig 
= url_data
['s'][0] 
 734                             signature 
= self
._decrypt
_signature
_age
_gate
(encrypted_sig
) 
 736                             signature 
= self
._decrypt
_signature
(encrypted_sig
) 
 737                         url 
+= '&signature=' + signature
 
 738                     if 'ratebypass' not in url
: 
 739                         url 
+= '&ratebypass=yes' 
 740                     url_map
[url_data
['itag'][0]] = url
 
 741             video_url_list 
= self
._get
_video
_url
_list
(url_map
) 
 742             if not video_url_list
: 
 744         elif video_info
.get('hlsvp'): 
 745             manifest_url 
= video_info
['hlsvp'][0] 
 746             url_map 
= self
._extract
_from
_m
3u8(manifest_url
, video_id
) 
 747             video_url_list 
= self
._get
_video
_url
_list
(url_map
) 
 748             if not video_url_list
: 
 752             raise ExtractorError(u
'no conn or url_encoded_fmt_stream_map information found in video info') 
 755         for format_param
, video_real_url 
in video_url_list
: 
 757             video_extension 
= self
._video
_extensions
.get(format_param
, 'flv') 
 759             video_format 
= '{0} - {1}{2}'.format(format_param 
if format_param 
else video_extension
, 
 760                                               self
._video
_dimensions
.get(format_param
, '???'), 
 761                                               ' (3D)' if format_param 
in self
._3d
_itags 
else '') 
 765                 'url':      video_real_url
, 
 766                 'uploader': video_uploader
, 
 767                 'uploader_id': video_uploader_id
, 
 768                 'upload_date':  upload_date
, 
 769                 'title':    video_title
, 
 770                 'ext':      video_extension
, 
 771                 'format':   video_format
, 
 772                 'thumbnail':    video_thumbnail
, 
 773                 'description':  video_description
, 
 774                 'player_url':   player_url
, 
 775                 'subtitles':    video_subtitles
, 
 776                 'duration':     video_duration
 
 780 class YoutubePlaylistIE(InfoExtractor
): 
 781     IE_DESC 
= u
'YouTube.com playlists' 
 787                            (?:course|view_play_list|my_playlists|artist|playlist|watch) 
 788                            \? (?:.*?&)*? (?:p|a|list)= 
 791                         ((?:PL|EC|UU|FL)?[0-9A-Za-z-_]{10,}) 
 794                         ((?:PL|EC|UU|FL)[0-9A-Za-z-_]{10,}) 
 796     _TEMPLATE_URL 
= 'https://gdata.youtube.com/feeds/api/playlists/%s?max-results=%i&start-index=%i&v=2&alt=json&safeSearch=none' 
 798     IE_NAME 
= u
'youtube:playlist' 
 801     def suitable(cls
, url
): 
 802         """Receives a URL and returns True if suitable for this IE.""" 
 803         return re
.match(cls
._VALID
_URL
, url
, re
.VERBOSE
) is not None 
 805     def _real_extract(self
, url
): 
 806         # Extract playlist id 
 807         mobj 
= re
.match(self
._VALID
_URL
, url
, re
.VERBOSE
) 
 809             raise ExtractorError(u
'Invalid URL: %s' % url
) 
 811         # Download playlist videos from API 
 812         playlist_id 
= mobj
.group(1) or mobj
.group(2) 
 815         for page_num 
in itertools
.count(1): 
 816             start_index 
= self
._MAX
_RESULTS 
* (page_num 
- 1) + 1 
 817             if start_index 
>= 1000: 
 818                 self
._downloader
.report_warning(u
'Max number of results reached') 
 820             url 
= self
._TEMPLATE
_URL 
% (playlist_id
, self
._MAX
_RESULTS
, start_index
) 
 821             page 
= self
._download
_webpage
(url
, playlist_id
, u
'Downloading page #%s' % page_num
) 
 824                 response 
= json
.loads(page
) 
 825             except ValueError as err
: 
 826                 raise ExtractorError(u
'Invalid JSON in API response: ' + compat_str(err
)) 
 828             if 'feed' not in response
: 
 829                 raise ExtractorError(u
'Got a malformed response from YouTube API') 
 830             playlist_title 
= response
['feed']['title']['$t'] 
 831             if 'entry' not in response
['feed']: 
 832                 # Number of videos is a multiple of self._MAX_RESULTS 
 835             for entry 
in response
['feed']['entry']: 
 836                 index 
= entry
['yt$position']['$t'] 
 837                 if 'media$group' in entry 
and 'media$player' in entry
['media$group']: 
 838                     videos
.append((index
, entry
['media$group']['media$player']['url'])) 
 840         videos 
= [v
[1] for v 
in sorted(videos
)] 
 842         url_results 
= [self
.url_result(vurl
, 'Youtube') for vurl 
in videos
] 
 843         return [self
.playlist_result(url_results
, playlist_id
, playlist_title
)] 
 846 class YoutubeChannelIE(InfoExtractor
): 
 847     IE_DESC 
= u
'YouTube.com channels' 
 848     _VALID_URL 
= r
"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)" 
 849     _TEMPLATE_URL 
= 'http://www.youtube.com/channel/%s/videos?sort=da&flow=list&view=0&page=%s&gl=US&hl=en' 
 850     _MORE_PAGES_INDICATOR 
= 'yt-uix-load-more' 
 851     _MORE_PAGES_URL 
= 'http://www.youtube.com/c4_browse_ajax?action_load_more_videos=1&flow=list&paging=%s&view=0&sort=da&channel_id=%s' 
 852     IE_NAME 
= u
'youtube:channel' 
 854     def extract_videos_from_page(self
, page
): 
 856         for mobj 
in re
.finditer(r
'href="/watch\?v=([0-9A-Za-z_-]+)&?', page
): 
 857             if mobj
.group(1) not in ids_in_page
: 
 858                 ids_in_page
.append(mobj
.group(1)) 
 861     def _real_extract(self
, url
): 
 863         mobj 
= re
.match(self
._VALID
_URL
, url
) 
 865             raise ExtractorError(u
'Invalid URL: %s' % url
) 
 867         # Download channel page 
 868         channel_id 
= mobj
.group(1) 
 872         url 
= self
._TEMPLATE
_URL 
% (channel_id
, pagenum
) 
 873         page 
= self
._download
_webpage
(url
, channel_id
, 
 874                                       u
'Downloading page #%s' % pagenum
) 
 876         # Extract video identifiers 
 877         ids_in_page 
= self
.extract_videos_from_page(page
) 
 878         video_ids
.extend(ids_in_page
) 
 880         # Download any subsequent channel pages using the json-based channel_ajax query 
 881         if self
._MORE
_PAGES
_INDICATOR 
in page
: 
 882             for pagenum 
in itertools
.count(1): 
 883                 url 
= self
._MORE
_PAGES
_URL 
% (pagenum
, channel_id
) 
 884                 page 
= self
._download
_webpage
(url
, channel_id
, 
 885                                               u
'Downloading page #%s' % pagenum
) 
 887                 page 
= json
.loads(page
) 
 889                 ids_in_page 
= self
.extract_videos_from_page(page
['content_html']) 
 890                 video_ids
.extend(ids_in_page
) 
 892                 if self
._MORE
_PAGES
_INDICATOR  
not in page
['load_more_widget_html']: 
 895         self
._downloader
.to_screen(u
'[youtube] Channel %s: Found %i videos' % (channel_id
, len(video_ids
))) 
 897         urls 
= ['http://www.youtube.com/watch?v=%s' % id for id in video_ids
] 
 898         url_entries 
= [self
.url_result(eurl
, 'Youtube') for eurl 
in urls
] 
 899         return [self
.playlist_result(url_entries
, channel_id
)] 
 902 class YoutubeUserIE(InfoExtractor
): 
 903     IE_DESC 
= u
'YouTube.com user videos (URL or "ytuser" keyword)' 
 904     _VALID_URL 
= r
'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/user/)|ytuser:)([A-Za-z0-9_-]+)' 
 905     _TEMPLATE_URL 
= 'http://gdata.youtube.com/feeds/api/users/%s' 
 906     _GDATA_PAGE_SIZE 
= 50 
 907     _GDATA_URL 
= 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d' 
 908     _VIDEO_INDICATOR 
= r
'/watch\?v=(.+?)[\<&]' 
 909     IE_NAME 
= u
'youtube:user' 
 911     def _real_extract(self
, url
): 
 913         mobj 
= re
.match(self
._VALID
_URL
, url
) 
 915             raise ExtractorError(u
'Invalid URL: %s' % url
) 
 917         username 
= mobj
.group(1) 
 919         # Download video ids using YouTube Data API. Result size per 
 920         # query is limited (currently to 50 videos) so we need to query 
 921         # page by page until there are no video ids - it means we got 
 926         for pagenum 
in itertools
.count(0): 
 927             start_index 
= pagenum 
* self
._GDATA
_PAGE
_SIZE 
+ 1 
 929             gdata_url 
= self
._GDATA
_URL 
% (username
, self
._GDATA
_PAGE
_SIZE
, start_index
) 
 930             page 
= self
._download
_webpage
(gdata_url
, username
, 
 931                                           u
'Downloading video ids from %d to %d' % (start_index
, start_index 
+ self
._GDATA
_PAGE
_SIZE
)) 
 933             # Extract video identifiers 
 936             for mobj 
in re
.finditer(self
._VIDEO
_INDICATOR
, page
): 
 937                 if mobj
.group(1) not in ids_in_page
: 
 938                     ids_in_page
.append(mobj
.group(1)) 
 940             video_ids
.extend(ids_in_page
) 
 942             # A little optimization - if current page is not 
 943             # "full", ie. does not contain PAGE_SIZE video ids then 
 944             # we can assume that this page is the last one - there 
 945             # are no more ids on further pages - no need to query 
 948             if len(ids_in_page
) < self
._GDATA
_PAGE
_SIZE
: 
 951         urls 
= ['http://www.youtube.com/watch?v=%s' % video_id 
for video_id 
in video_ids
] 
 952         url_results 
= [self
.url_result(rurl
, 'Youtube') for rurl 
in urls
] 
 953         return [self
.playlist_result(url_results
, playlist_title 
= username
)] 
 955 class YoutubeSearchIE(SearchInfoExtractor
): 
 956     IE_DESC 
= u
'YouTube.com searches' 
 957     _API_URL 
= 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc' 
 959     IE_NAME 
= u
'youtube:search' 
 960     _SEARCH_KEY 
= 'ytsearch' 
 962     def report_download_page(self
, query
, pagenum
): 
 963         """Report attempt to download search page with given number.""" 
 964         self
._downloader
.to_screen(u
'[youtube] query "%s": Downloading page %s' % (query
, pagenum
)) 
 966     def _get_n_results(self
, query
, n
): 
 967         """Get a specified number of results for a query""" 
 973         while (50 * pagenum
) < limit
: 
 974             self
.report_download_page(query
, pagenum
+1) 
 975             result_url 
= self
._API
_URL 
% (compat_urllib_parse
.quote_plus(query
), (50*pagenum
)+1) 
 976             request 
= compat_urllib_request
.Request(result_url
) 
 978                 data 
= compat_urllib_request
.urlopen(request
).read().decode('utf-8') 
 979             except (compat_urllib_error
.URLError
, compat_http_client
.HTTPException
, socket
.error
) as err
: 
 980                 raise ExtractorError(u
'Unable to download API page: %s' % compat_str(err
)) 
 981             api_response 
= json
.loads(data
)['data'] 
 983             if not 'items' in api_response
: 
 984                 raise ExtractorError(u
'[youtube] No video results') 
 986             new_ids 
= list(video
['id'] for video 
in api_response
['items']) 
 989             limit 
= min(n
, api_response
['totalItems']) 
 992         if len(video_ids
) > n
: 
 993             video_ids 
= video_ids
[:n
] 
 994         videos 
= [self
.url_result('http://www.youtube.com/watch?v=%s' % id, 'Youtube') for id in video_ids
] 
 995         return self
.playlist_result(videos
, query
) 
 998 class YoutubeShowIE(InfoExtractor
): 
 999     IE_DESC 
= u
'YouTube.com (multi-season) shows' 
1000     _VALID_URL 
= r
'https?://www\.youtube\.com/show/(.*)' 
1001     IE_NAME 
= u
'youtube:show' 
1003     def _real_extract(self
, url
): 
1004         mobj 
= re
.match(self
._VALID
_URL
, url
) 
1005         show_name 
= mobj
.group(1) 
1006         webpage 
= self
._download
_webpage
(url
, show_name
, u
'Downloading show webpage') 
1007         # There's one playlist for each season of the show 
1008         m_seasons 
= list(re
.finditer(r
'href="(/playlist\?list=.*?)"', webpage
)) 
1009         self
.to_screen(u
'%s: Found %s seasons' % (show_name
, len(m_seasons
))) 
1010         return [self
.url_result('https://www.youtube.com' + season
.group(1), 'YoutubePlaylist') for season 
in m_seasons
] 
1013 class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor
): 
1015     Base class for extractors that fetch info from 
1016     http://www.youtube.com/feed_ajax 
1017     Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties. 
1019     _LOGIN_REQUIRED 
= True 
1021     # use action_load_personal_feed instead of action_load_system_feed 
1022     _PERSONAL_FEED 
= False 
1025     def _FEED_TEMPLATE(self
): 
1026         action 
= 'action_load_system_feed' 
1027         if self
._PERSONAL
_FEED
: 
1028             action 
= 'action_load_personal_feed' 
1029         return 'http://www.youtube.com/feed_ajax?%s=1&feed_name=%s&paging=%%s' % (action
, self
._FEED
_NAME
) 
1033         return u
'youtube:%s' % self
._FEED
_NAME
 
1035     def _real_initialize(self
): 
1038     def _real_extract(self
, url
): 
1040         # The step argument is available only in 2.7 or higher 
1041         for i 
in itertools
.count(0): 
1042             paging 
= i
*self
._PAGING
_STEP
 
1043             info 
= self
._download
_webpage
(self
._FEED
_TEMPLATE 
% paging
, 
1044                                           u
'%s feed' % self
._FEED
_NAME
, 
1045                                           u
'Downloading page %s' % i
) 
1046             info 
= json
.loads(info
) 
1047             feed_html 
= info
['feed_html'] 
1048             m_ids 
= re
.finditer(r
'"/watch\?v=(.*?)["&]', feed_html
) 
1049             ids 
= orderedSet(m
.group(1) for m 
in m_ids
) 
1050             feed_entries
.extend(self
.url_result(id, 'Youtube') for id in ids
) 
1051             if info
['paging'] is None: 
1053         return self
.playlist_result(feed_entries
, playlist_title
=self
._PLAYLIST
_TITLE
) 
1055 class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor
): 
1056     IE_DESC 
= u
'YouTube.com subscriptions feed, "ytsubs" keyword(requires authentication)' 
1057     _VALID_URL 
= r
'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?' 
1058     _FEED_NAME 
= 'subscriptions' 
1059     _PLAYLIST_TITLE 
= u
'Youtube Subscriptions' 
1061 class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor
): 
1062     IE_DESC 
= u
'YouTube.com recommended videos, "ytrec" keyword (requires authentication)' 
1063     _VALID_URL 
= r
'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?' 
1064     _FEED_NAME 
= 'recommended' 
1065     _PLAYLIST_TITLE 
= u
'Youtube Recommended videos' 
1067 class YoutubeWatchLaterIE(YoutubeFeedsInfoExtractor
): 
1068     IE_DESC 
= u
'Youtube watch later list, "ytwatchlater" keyword (requires authentication)' 
1069     _VALID_URL 
= r
'https?://www\.youtube\.com/feed/watch_later|:ytwatchlater' 
1070     _FEED_NAME 
= 'watch_later' 
1071     _PLAYLIST_TITLE 
= u
'Youtube Watch Later' 
1073     _PERSONAL_FEED 
= True 
1075 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor
): 
1076     IE_NAME 
= u
'youtube:favorites' 
1077     IE_DESC 
= u
'YouTube.com favourite videos, "ytfav" keyword (requires authentication)' 
1078     _VALID_URL 
= r
'https?://www\.youtube\.com/my_favorites|:ytfav(?:o?rites)?' 
1079     _LOGIN_REQUIRED 
= True 
1081     def _real_extract(self
, url
): 
1082         webpage 
= self
._download
_webpage
('https://www.youtube.com/my_favorites', 'Youtube Favourites videos') 
1083         playlist_id 
= self
._search
_regex
(r
'list=(.+?)["&]', webpage
, u
'favourites playlist id') 
1084         return self
.url_result(playlist_id
, 'YoutubePlaylist')