9 from .common 
import InfoExtractor
, SearchInfoExtractor
 
  15     compat_urllib_request
, 
  26 class YoutubeBaseInfoExtractor(InfoExtractor
): 
  27     """Provide base functions for Youtube extractors""" 
  28     _LOGIN_URL 
= 'https://accounts.google.com/ServiceLogin' 
  29     _LANG_URL 
= r
'https://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1' 
  30     _AGE_URL 
= 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en' 
  31     _NETRC_MACHINE 
= 'youtube' 
  32     # If True it will raise an error if no login info is provided 
  33     _LOGIN_REQUIRED 
= False 
  35     def report_lang(self
): 
  36         """Report attempt to set language.""" 
  37         self
.to_screen(u
'Setting language') 
  39     def _set_language(self
): 
  40         request 
= compat_urllib_request
.Request(self
._LANG
_URL
) 
  43             compat_urllib_request
.urlopen(request
).read() 
  44         except (compat_urllib_error
.URLError
, compat_http_client
.HTTPException
, socket
.error
) as err
: 
  45             self
._downloader
.report_warning(u
'unable to set language: %s' % compat_str(err
)) 
  50         (username
, password
) = self
._get
_login
_info
() 
  51         # No authentication to be performed 
  53             if self
._LOGIN
_REQUIRED
: 
  54                 raise ExtractorError(u
'No login info available, needed for using %s.' % self
.IE_NAME
, expected
=True) 
  57         request 
= compat_urllib_request
.Request(self
._LOGIN
_URL
) 
  59             login_page 
= compat_urllib_request
.urlopen(request
).read().decode('utf-8') 
  60         except (compat_urllib_error
.URLError
, compat_http_client
.HTTPException
, socket
.error
) as err
: 
  61             self
._downloader
.report_warning(u
'unable to fetch login page: %s' % compat_str(err
)) 
  66         match 
= re
.search(re
.compile(r
'<input.+?name="GALX".+?value="(.+?)"', re
.DOTALL
), login_page
) 
  69         match 
= re
.search(re
.compile(r
'<input.+?name="dsh".+?value="(.+?)"', re
.DOTALL
), login_page
) 
  75                 u
'continue': u
'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1', 
  79                 u
'PersistentCookie': u
'yes', 
  81                 u
'bgresponse': u
'js_disabled', 
  82                 u
'checkConnection': u
'', 
  83                 u
'checkedDomains': u
'youtube', 
  89                 u
'signIn': u
'Sign in', 
  91                 u
'service': u
'youtube', 
  95         # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode 
  97         login_form 
= dict((k
.encode('utf-8'), v
.encode('utf-8')) for k
,v 
in login_form_strs
.items()) 
  98         login_data 
= compat_urllib_parse
.urlencode(login_form
).encode('ascii') 
  99         request 
= compat_urllib_request
.Request(self
._LOGIN
_URL
, login_data
) 
 102             login_results 
= compat_urllib_request
.urlopen(request
).read().decode('utf-8') 
 103             if re
.search(r
'(?i)<form[^>]* id="gaia_loginform"', login_results
) is not None: 
 104                 self
._downloader
.report_warning(u
'unable to log in: bad username or password') 
 106         except (compat_urllib_error
.URLError
, compat_http_client
.HTTPException
, socket
.error
) as err
: 
 107             self
._downloader
.report_warning(u
'unable to log in: %s' % compat_str(err
)) 
 111     def _confirm_age(self
): 
 114                 'action_confirm':   'Confirm', 
 116         request 
= compat_urllib_request
.Request(self
._AGE
_URL
, compat_urllib_parse
.urlencode(age_form
)) 
 118             self
.report_age_confirmation() 
 119             compat_urllib_request
.urlopen(request
).read().decode('utf-8') 
 120         except (compat_urllib_error
.URLError
, compat_http_client
.HTTPException
, socket
.error
) as err
: 
 121             raise ExtractorError(u
'Unable to confirm age: %s' % compat_str(err
)) 
 124     def _real_initialize(self
): 
 125         if self
._downloader 
is None: 
 127         if not self
._set
_language
(): 
 129         if not self
._login
(): 
 133 class YoutubeIE(YoutubeBaseInfoExtractor
): 
 134     IE_DESC 
= u
'YouTube.com' 
 137                          (?:https?://)?                                       # http(s):// (optional) 
 138                          (?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/| 
 139                             tube\.majestyc\.net/)                             # the various hostnames, with wildcard subdomains 
 140                          (?:.*?\#/)?                                          # handle anchor (#/) redirect urls 
 141                          (?:                                                  # the various things that can precede the ID: 
 142                              (?:(?:v|embed|e)/)                               # v/ or embed/ or e/ 
 143                              |(?:                                             # or the v= param in all its forms 
 144                                  (?:watch|movie(?:_popup)?(?:\.php)?)?              # preceding watch(_popup|.php) or nothing (like /?v=xxxx) 
 145                                  (?:\?|\#!?)                                  # the params delimiter ? or # or #! 
 146                                  (?:.*?&)?                                    # any other preceding param (like /?s=tuff&v=xxxx) 
 149                          )?                                                   # optional -> youtube.com/xxxx is OK 
 150                      )?                                                       # all until now is optional -> you can pass the naked ID 
 151                      ([0-9A-Za-z_-]+)                                         # here is it! the YouTube video ID 
 152                      (?(1).+)?                                                # if we found the ID, everything can follow 
 154     _NEXT_URL_RE 
= r
'[\?&]next_url=([^&]+)' 
 155     # Listed in order of quality 
 156     _available_formats 
= ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13', 
 157                           '95', '94', '93', '92', '132', '151', 
 158                           '85', '84', '102', '83', '101', '82', '100', 
 160     _available_formats_prefer_free 
= ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13', 
 161                                       '95', '94', '93', '92', '132', '151', 
 162                                       '85', '102', '84', '101', '83', '100', '82', 
 164     _video_extensions 
= { 
 185         # videos that use m3u8 
 194     _video_dimensions 
= { 
 224     _3d_itags 
= ['85', '84', '102', '83', '101', '82', '100'] 
 228             u
"url":  u
"http://www.youtube.com/watch?v=BaW_jenozKc", 
 229             u
"file":  u
"BaW_jenozKc.mp4", 
 231                 u
"title": u
"youtube-dl test video \"'/\\ä↭𝕐", 
 232                 u
"uploader": u
"Philipp Hagemeister", 
 233                 u
"uploader_id": u
"phihag", 
 234                 u
"upload_date": u
"20121002", 
 235                 u
"description": u
"test chars:  \"'/\\ä↭𝕐\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de ." 
 239             u
"url":  u
"http://www.youtube.com/watch?v=1ltcDfZMA3U", 
 240             u
"file":  u
"1ltcDfZMA3U.flv", 
 241             u
"note": u
"Test VEVO video (#897)", 
 243                 u
"upload_date": u
"20070518", 
 244                 u
"title": u
"Maps - It Will Find You", 
 245                 u
"description": u
"Music video by Maps performing It Will Find You.", 
 246                 u
"uploader": u
"MuteUSA", 
 247                 u
"uploader_id": u
"MuteUSA" 
 251             u
"url":  u
"http://www.youtube.com/watch?v=UxxajLWwzqY", 
 252             u
"file":  u
"UxxajLWwzqY.mp4", 
 253             u
"note": u
"Test generic use_cipher_signature video (#897)", 
 255                 u
"upload_date": u
"20120506", 
 256                 u
"title": u
"Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]", 
 257                 u
"description": u
"md5:b085c9804f5ab69f4adea963a2dceb3c", 
 258                 u
"uploader": u
"IconaPop", 
 259                 u
"uploader_id": u
"IconaPop" 
 263             u
"url":  u
"https://www.youtube.com/watch?v=07FYdnEawAQ", 
 264             u
"file":  u
"07FYdnEawAQ.mp4", 
 265             u
"note": u
"Test VEVO video with age protection (#956)", 
 267                 u
"upload_date": u
"20130703", 
 268                 u
"title": u
"Justin Timberlake - Tunnel Vision (Explicit)", 
 269                 u
"description": u
"md5:64249768eec3bc4276236606ea996373", 
 270                 u
"uploader": u
"justintimberlakeVEVO", 
 271                 u
"uploader_id": u
"justintimberlakeVEVO" 
 275             u
'url': u
'https://www.youtube.com/watch?v=TGi3HqYrWHE', 
 276             u
'file': u
'TGi3HqYrWHE.mp4', 
 277             u
'note': u
'm3u8 video', 
 279                 u
'title': u
'Triathlon - Men - London 2012 Olympic Games', 
 280                 u
'description': u
'- Men -  TR02 - Triathlon - 07 August 2012 - London 2012 Olympic Games', 
 281                 u
'uploader': u
'olympic', 
 282                 u
'upload_date': u
'20120807', 
 283                 u
'uploader_id': u
'olympic', 
 286                 u
'skip_download': True, 
 293     def suitable(cls
, url
): 
 294         """Receives a URL and returns True if suitable for this IE.""" 
 295         if YoutubePlaylistIE
.suitable(url
) or YoutubeSubscriptionsIE
.suitable(url
): return False 
 296         return re
.match(cls
._VALID
_URL
, url
, re
.VERBOSE
) is not None 
 298     def report_video_webpage_download(self
, video_id
): 
 299         """Report attempt to download video webpage.""" 
 300         self
.to_screen(u
'%s: Downloading video webpage' % video_id
) 
 302     def report_video_info_webpage_download(self
, video_id
): 
 303         """Report attempt to download video info webpage.""" 
 304         self
.to_screen(u
'%s: Downloading video info webpage' % video_id
) 
 306     def report_video_subtitles_download(self
, video_id
): 
 307         """Report attempt to download video info webpage.""" 
 308         self
.to_screen(u
'%s: Checking available subtitles' % video_id
) 
 310     def report_video_subtitles_request(self
, video_id
, sub_lang
, format
): 
 311         """Report attempt to download video info webpage.""" 
 312         self
.to_screen(u
'%s: Downloading video subtitles for %s.%s' % (video_id
, sub_lang
, format
)) 
 314     def report_video_subtitles_available(self
, video_id
, sub_lang_list
): 
 315         """Report available subtitles.""" 
 316         sub_lang 
= ",".join(list(sub_lang_list
.keys())) 
 317         self
.to_screen(u
'%s: Available subtitles for video: %s' % (video_id
, sub_lang
)) 
 319     def report_information_extraction(self
, video_id
): 
 320         """Report attempt to extract video information.""" 
 321         self
.to_screen(u
'%s: Extracting video information' % video_id
) 
 323     def report_unavailable_format(self
, video_id
, format
): 
 324         """Report extracted video URL.""" 
 325         self
.to_screen(u
'%s: Format %s not available' % (video_id
, format
)) 
 327     def report_rtmp_download(self
): 
 328         """Indicate the download will use the RTMP protocol.""" 
 329         self
.to_screen(u
'RTMP download detected') 
 331     def _decrypt_signature(self
, s
): 
 332         """Turn the encrypted s field into a working signature""" 
 335             return s
[25] + s
[3:25] + s
[0] + s
[26:42] + s
[79] + s
[43:79] + s
[91] + s
[80:83] 
 337             return s
[25] + s
[3:25] + s
[2] + s
[26:40] + s
[77] + s
[41:77] + s
[89] + s
[78:81] 
 339             return s
[48] + s
[81:67:-1] + s
[82] + s
[66:62:-1] + s
[85] + s
[61:48:-1] + s
[67] + s
[47:12:-1] + s
[3] + s
[11:3:-1] + s
[2] + s
[12] 
 341             return s
[83:53:-1] + s
[3] + s
[52:40:-1] + s
[86] + s
[39:10:-1] + s
[0] + s
[9:3:-1] + s
[53] 
 343             return s
[83:85] + s
[26] + s
[79:46:-1] + s
[85] + s
[45:36:-1] + s
[30] + s
[35:30:-1] + s
[46] + s
[29:26:-1] + s
[82] + s
[25:1:-1] 
 345             return s
[2:8] + s
[0] + s
[9:21] + s
[65] + s
[22:65] + s
[84] + s
[66:82] + s
[21] 
 347             return s
[83:27:-1] + s
[0] + s
[26:5:-1] + s
[2:0:-1] + s
[27] 
 349             return s
[:15] + s
[80] + s
[16:80] + s
[15] 
 351             return s
[36] + s
[79:67:-1] + s
[81] + s
[66:40:-1] + s
[33] + s
[39:36:-1] + s
[40] + s
[35] + s
[0] + s
[67] + s
[32:0:-1] + s
[34] 
 353             return s
[56] + s
[79:56:-1] + s
[41] + s
[55:41:-1] + s
[80] + s
[40:34:-1] + s
[0] + s
[33:29:-1] + s
[34] + s
[28:9:-1] + s
[29] + s
[8:0:-1] + s
[9] 
 355             return s
[54] + s
[77:54:-1] + s
[39] + s
[53:39:-1] + s
[78] + s
[38:34:-1] + s
[0] + s
[33:29:-1] + s
[34] + s
[28:9:-1] + s
[29] + s
[8:0:-1] + s
[9] 
 358             raise ExtractorError(u
'Unable to decrypt signature, key length %d not supported; retrying might work' % (len(s
))) 
 360     def _decrypt_signature_age_gate(self
, s
): 
 361         # The videos with age protection use another player, so the algorithms 
 364             return s
[2:63] + s
[82] + s
[64:82] + s
[63] 
 366             # Fallback to the other algortihms 
 367             return self
._decrypt
_signature
(s
) 
 370     def _get_available_subtitles(self
, video_id
): 
 371         self
.report_video_subtitles_download(video_id
) 
 372         request 
= compat_urllib_request
.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id
) 
 374             sub_list 
= compat_urllib_request
.urlopen(request
).read().decode('utf-8') 
 375         except (compat_urllib_error
.URLError
, compat_http_client
.HTTPException
, socket
.error
) as err
: 
 376             return (u
'unable to download video subtitles: %s' % compat_str(err
), None) 
 377         sub_lang_list 
= re
.findall(r
'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list
) 
 378         sub_lang_list 
= dict((l
[1], l
[0]) for l 
in sub_lang_list
) 
 379         if not sub_lang_list
: 
 380             return (u
'video doesn\'t have subtitles', None) 
 383     def _list_available_subtitles(self
, video_id
): 
 384         sub_lang_list 
= self
._get
_available
_subtitles
(video_id
) 
 385         self
.report_video_subtitles_available(video_id
, sub_lang_list
) 
 387     def _request_subtitle(self
, sub_lang
, sub_name
, video_id
, format
): 
 390         (error_message, sub_lang, sub) 
 392         self
.report_video_subtitles_request(video_id
, sub_lang
, format
) 
 393         params 
= compat_urllib_parse
.urlencode({ 
 399         url 
= 'http://www.youtube.com/api/timedtext?' + params
 
 401             sub 
= compat_urllib_request
.urlopen(url
).read().decode('utf-8') 
 402         except (compat_urllib_error
.URLError
, compat_http_client
.HTTPException
, socket
.error
) as err
: 
 403             return (u
'unable to download video subtitles: %s' % compat_str(err
), None, None) 
 405             return (u
'Did not fetch video subtitles', None, None) 
 406         return (None, sub_lang
, sub
) 
 408     def _request_automatic_caption(self
, video_id
, webpage
): 
 409         """We need the webpage for getting the captions url, pass it as an 
 410            argument to speed up the process.""" 
 411         sub_lang 
= self
._downloader
.params
.get('subtitleslang') or 'en' 
 412         sub_format 
= self
._downloader
.params
.get('subtitlesformat') 
 413         self
.to_screen(u
'%s: Looking for automatic captions' % video_id
) 
 414         mobj 
= re
.search(r
';ytplayer.config = ({.*?});', webpage
) 
 415         err_msg 
= u
'Couldn\'t find automatic captions for "%s"' % sub_lang
 
 417             return [(err_msg
, None, None)] 
 418         player_config 
= json
.loads(mobj
.group(1)) 
 420             args 
= player_config
[u
'args'] 
 421             caption_url 
= args
[u
'ttsurl'] 
 422             timestamp 
= args
[u
'timestamp'] 
 423             params 
= compat_urllib_parse
.urlencode({ 
 430             subtitles_url 
= caption_url 
+ '&' + params
 
 431             sub 
= self
._download
_webpage
(subtitles_url
, video_id
, u
'Downloading automatic captions') 
 432             return [(None, sub_lang
, sub
)] 
 434             return [(err_msg
, None, None)] 
 436     def _extract_subtitle(self
, video_id
): 
 438         Return a list with a tuple: 
 439         [(error_message, sub_lang, sub)] 
 441         sub_lang_list 
= self
._get
_available
_subtitles
(video_id
) 
 442         sub_format 
= self
._downloader
.params
.get('subtitlesformat') 
 443         if  isinstance(sub_lang_list
,tuple): #There was some error, it didn't get the available subtitles 
 444             return [(sub_lang_list
[0], None, None)] 
 445         if self
._downloader
.params
.get('subtitleslang', False): 
 446             sub_lang 
= self
._downloader
.params
.get('subtitleslang') 
 447         elif 'en' in sub_lang_list
: 
 450             sub_lang 
= list(sub_lang_list
.keys())[0] 
 451         if not sub_lang 
in sub_lang_list
: 
 452             return [(u
'no closed captions found in the specified language "%s"' % sub_lang
, None, None)] 
 454         subtitle 
= self
._request
_subtitle
(sub_lang
, sub_lang_list
[sub_lang
].encode('utf-8'), video_id
, sub_format
) 
 457     def _extract_all_subtitles(self
, video_id
): 
 458         sub_lang_list 
= self
._get
_available
_subtitles
(video_id
) 
 459         sub_format 
= self
._downloader
.params
.get('subtitlesformat') 
 460         if  isinstance(sub_lang_list
,tuple): #There was some error, it didn't get the available subtitles 
 461             return [(sub_lang_list
[0], None, None)] 
 463         for sub_lang 
in sub_lang_list
: 
 464             subtitle 
= self
._request
_subtitle
(sub_lang
, sub_lang_list
[sub_lang
].encode('utf-8'), video_id
, sub_format
) 
 465             subtitles
.append(subtitle
) 
 468     def _print_formats(self
, formats
): 
 469         print('Available formats:') 
 471             print('%s\t:\t%s\t[%s]%s' %(x
, self
._video
_extensions
.get(x
, 'flv'), 
 472                                         self
._video
_dimensions
.get(x
, '???'), 
 473                                         ' (3D)' if x 
in self
._3d
_itags 
else '')) 
 475     def _extract_id(self
, url
): 
 476         mobj 
= re
.match(self
._VALID
_URL
, url
, re
.VERBOSE
) 
 478             raise ExtractorError(u
'Invalid URL: %s' % url
) 
 479         video_id 
= mobj
.group(2) 
 482     def _get_video_url_list(self
, url_map
): 
 484         Transform a dictionary in the format {itag:url} to a list of (itag, url) 
 485         with the requested formats. 
 487         req_format 
= self
._downloader
.params
.get('format', None) 
 488         format_limit 
= self
._downloader
.params
.get('format_limit', None) 
 489         available_formats 
= self
._available
_formats
_prefer
_free 
if self
._downloader
.params
.get('prefer_free_formats', False) else self
._available
_formats
 
 490         if format_limit 
is not None and format_limit 
in available_formats
: 
 491             format_list 
= available_formats
[available_formats
.index(format_limit
):] 
 493             format_list 
= available_formats
 
 494         existing_formats 
= [x 
for x 
in format_list 
if x 
in url_map
] 
 495         if len(existing_formats
) == 0: 
 496             raise ExtractorError(u
'no known formats available for video') 
 497         if self
._downloader
.params
.get('listformats', None): 
 498             self
._print
_formats
(existing_formats
) 
 500         if req_format 
is None or req_format 
== 'best': 
 501             video_url_list 
= [(existing_formats
[0], url_map
[existing_formats
[0]])] # Best quality 
 502         elif req_format 
== 'worst': 
 503             video_url_list 
= [(existing_formats
[-1], url_map
[existing_formats
[-1]])] # worst quality 
 504         elif req_format 
in ('-1', 'all'): 
 505             video_url_list 
= [(f
, url_map
[f
]) for f 
in existing_formats
] # All formats 
 507             # Specific formats. We pick the first in a slash-delimeted sequence. 
 508             # For example, if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'. 
 509             req_formats 
= req_format
.split('/') 
 510             video_url_list 
= None 
 511             for rf 
in req_formats
: 
 513                     video_url_list 
= [(rf
, url_map
[rf
])] 
 515             if video_url_list 
is None: 
 516                 raise ExtractorError(u
'requested format not available') 
 517         return video_url_list
 
 519     def _extract_from_m3u8(self
, manifest_url
, video_id
): 
 521         def _get_urls(_manifest
): 
 522             lines 
= _manifest
.split('\n') 
 523             urls 
= filter(lambda l
: l 
and not l
.startswith('#'), 
 526         manifest 
= self
._download
_webpage
(manifest_url
, video_id
, u
'Downloading formats manifest') 
 527         formats_urls 
= _get_urls(manifest
) 
 528         for format_url 
in formats_urls
: 
 529             itag 
= self
._search
_regex
(r
'itag/(\d+?)/', format_url
, 'itag') 
 530             url_map
[itag
] = format_url
 
 533     def _real_extract(self
, url
): 
 534         if re
.match(r
'(?:https?://)?[^/]+/watch\?feature=[a-z_]+$', url
): 
 535             self
._downloader
.report_warning(u
'Did you forget to quote the URL? Remember that & is a meta-character in most shells, so you want to put the URL in quotes, like  youtube-dl \'http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc\' (or simply  youtube-dl BaW_jenozKc  ).') 
 537         # Extract original video URL from URL with redirection, like age verification, using next_url parameter 
 538         mobj 
= re
.search(self
._NEXT
_URL
_RE
, url
) 
 540             url 
= 'https://www.youtube.com/' + compat_urllib_parse
.unquote(mobj
.group(1)).lstrip('/') 
 541         video_id 
= self
._extract
_id
(url
) 
 544         self
.report_video_webpage_download(video_id
) 
 545         url 
= 'https://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
 
 546         request 
= compat_urllib_request
.Request(url
) 
 548             video_webpage_bytes 
= compat_urllib_request
.urlopen(request
).read() 
 549         except (compat_urllib_error
.URLError
, compat_http_client
.HTTPException
, socket
.error
) as err
: 
 550             raise ExtractorError(u
'Unable to download video webpage: %s' % compat_str(err
)) 
 552         video_webpage 
= video_webpage_bytes
.decode('utf-8', 'ignore') 
 554         # Attempt to extract SWF player URL 
 555         mobj 
= re
.search(r
'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage
) 
 557             player_url 
= re
.sub(r
'\\(.)', r
'\1', mobj
.group(1)) 
 562         self
.report_video_info_webpage_download(video_id
) 
 563         if re
.search(r
'player-age-gate-content">', video_webpage
) is not None: 
 564             self
.report_age_confirmation() 
 566             # We simulate the access to the video from www.youtube.com/v/{video_id} 
 567             # this can be viewed without login into Youtube 
 568             data 
= compat_urllib_parse
.urlencode({'video_id': video_id
, 
 572                                                   'eurl': 'https://youtube.googleapis.com/v/' + video_id
, 
 576             video_info_url 
= 'https://www.youtube.com/get_video_info?' + data
 
 577             video_info_webpage 
= self
._download
_webpage
(video_info_url
, video_id
, 
 579                                     errnote
='unable to download video info webpage') 
 580             video_info 
= compat_parse_qs(video_info_webpage
) 
 583             for el_type 
in ['&el=embedded', '&el=detailpage', '&el=vevo', '']: 
 584                 video_info_url 
= ('https://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en' 
 585                         % (video_id
, el_type
)) 
 586                 video_info_webpage 
= self
._download
_webpage
(video_info_url
, video_id
, 
 588                                         errnote
='unable to download video info webpage') 
 589                 video_info 
= compat_parse_qs(video_info_webpage
) 
 590                 if 'token' in video_info
: 
 592         if 'token' not in video_info
: 
 593             if 'reason' in video_info
: 
 594                 raise ExtractorError(u
'YouTube said: %s' % video_info
['reason'][0], expected
=True) 
 596                 raise ExtractorError(u
'"token" parameter not in video info for unknown reason') 
 598         # Check for "rental" videos 
 599         if 'ypc_video_rental_bar_text' in video_info 
and 'author' not in video_info
: 
 600             raise ExtractorError(u
'"rental" videos not supported') 
 602         # Start extracting information 
 603         self
.report_information_extraction(video_id
) 
 606         if 'author' not in video_info
: 
 607             raise ExtractorError(u
'Unable to extract uploader name') 
 608         video_uploader 
= compat_urllib_parse
.unquote_plus(video_info
['author'][0]) 
 611         video_uploader_id 
= None 
 612         mobj 
= re
.search(r
'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage
) 
 614             video_uploader_id 
= mobj
.group(1) 
 616             self
._downloader
.report_warning(u
'unable to extract uploader nickname') 
 619         if 'title' not in video_info
: 
 620             raise ExtractorError(u
'Unable to extract video title') 
 621         video_title 
= compat_urllib_parse
.unquote_plus(video_info
['title'][0]) 
 624         # We try first to get a high quality image: 
 625         m_thumb 
= re
.search(r
'<span itemprop="thumbnail".*?href="(.*?)">', 
 626                             video_webpage
, re
.DOTALL
) 
 627         if m_thumb 
is not None: 
 628             video_thumbnail 
= m_thumb
.group(1) 
 629         elif 'thumbnail_url' not in video_info
: 
 630             self
._downloader
.report_warning(u
'unable to extract video thumbnail') 
 632         else:   # don't panic if we can't find it 
 633             video_thumbnail 
= compat_urllib_parse
.unquote_plus(video_info
['thumbnail_url'][0]) 
 637         mobj 
= re
.search(r
'id="eow-date.*?>(.*?)</span>', video_webpage
, re
.DOTALL
) 
 639             upload_date 
= ' '.join(re
.sub(r
'[/,-]', r
' ', mobj
.group(1)).split()) 
 640             upload_date 
= unified_strdate(upload_date
) 
 643         video_description 
= get_element_by_id("eow-description", video_webpage
) 
 644         if video_description
: 
 645             video_description 
= clean_html(video_description
) 
 647             fd_mobj 
= re
.search(r
'<meta name="description" content="([^"]+)"', video_webpage
) 
 649                 video_description 
= unescapeHTML(fd_mobj
.group(1)) 
 651                 video_description 
= u
'' 
 654         video_subtitles 
= None 
 656         if self
._downloader
.params
.get('writesubtitles', False): 
 657             video_subtitles 
= self
._extract
_subtitle
(video_id
) 
 659                 (sub_error
, sub_lang
, sub
) = video_subtitles
[0] 
 661                     self
._downloader
.report_warning(sub_error
) 
 663         if self
._downloader
.params
.get('writeautomaticsub', False): 
 664             video_subtitles 
= self
._request
_automatic
_caption
(video_id
, video_webpage
) 
 665             (sub_error
, sub_lang
, sub
) = video_subtitles
[0] 
 667                 self
._downloader
.report_warning(sub_error
) 
 669         if self
._downloader
.params
.get('allsubtitles', False): 
 670             video_subtitles 
= self
._extract
_all
_subtitles
(video_id
) 
 671             for video_subtitle 
in video_subtitles
: 
 672                 (sub_error
, sub_lang
, sub
) = video_subtitle
 
 674                     self
._downloader
.report_warning(sub_error
) 
 676         if self
._downloader
.params
.get('listsubtitles', False): 
 677             self
._list
_available
_subtitles
(video_id
) 
 680         if 'length_seconds' not in video_info
: 
 681             self
._downloader
.report_warning(u
'unable to extract video duration') 
 684             video_duration 
= compat_urllib_parse
.unquote_plus(video_info
['length_seconds'][0]) 
 686         # Decide which formats to download 
 689             mobj 
= re
.search(r
';ytplayer.config = ({.*?});', video_webpage
) 
 691                 raise ValueError('Could not find vevo ID') 
 692             info 
= json
.loads(mobj
.group(1)) 
 694             # Easy way to know if the 's' value is in url_encoded_fmt_stream_map 
 695             # this signatures are encrypted 
 696             m_s 
= re
.search(r
'[&,]s=', args
['url_encoded_fmt_stream_map']) 
 698                 self
.to_screen(u
'%s: Encrypted signatures detected.' % video_id
) 
 699                 video_info
['url_encoded_fmt_stream_map'] = [args
['url_encoded_fmt_stream_map']] 
 703         if 'conn' in video_info 
and video_info
['conn'][0].startswith('rtmp'): 
 704             self
.report_rtmp_download() 
 705             video_url_list 
= [(None, video_info
['conn'][0])] 
 706         elif 'url_encoded_fmt_stream_map' in video_info 
and len(video_info
['url_encoded_fmt_stream_map']) >= 1: 
 707             if 'rtmpe%3Dyes' in video_info
['url_encoded_fmt_stream_map'][0]: 
 708                 raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected
=True) 
 710             for url_data_str 
in video_info
['url_encoded_fmt_stream_map'][0].split(','): 
 711                 url_data 
= compat_parse_qs(url_data_str
) 
 712                 if 'itag' in url_data 
and 'url' in url_data
: 
 713                     url 
= url_data
['url'][0] 
 714                     if 'sig' in url_data
: 
 715                         url 
+= '&signature=' + url_data
['sig'][0] 
 716                     elif 's' in url_data
: 
 717                         if self
._downloader
.params
.get('verbose'): 
 720                                 player_version 
= self
._search
_regex
(r
'ad3-(.+?)\.swf', 
 721                                     video_info
['ad3_module'][0] if 'ad3_module' in video_info 
else 'NOT FOUND', 
 722                                     'flash player', fatal
=False) 
 723                                 player 
= 'flash player %s' % player_version
 
 725                                 player 
= u
'html5 player %s' % self
._search
_regex
(r
'html5player-(.+?)\.js', video_webpage
, 
 726                                     'html5 player', fatal
=False) 
 727                             parts_sizes 
= u
'.'.join(compat_str(len(part
)) for part 
in s
.split('.')) 
 728                             self
.to_screen(u
'encrypted signature length %d (%s), itag %s, %s' % 
 729                                 (len(s
), parts_sizes
, url_data
['itag'][0], player
)) 
 730                         encrypted_sig 
= url_data
['s'][0] 
 732                             signature 
= self
._decrypt
_signature
_age
_gate
(encrypted_sig
) 
 734                             signature 
= self
._decrypt
_signature
(encrypted_sig
) 
 735                         url 
+= '&signature=' + signature
 
 736                     if 'ratebypass' not in url
: 
 737                         url 
+= '&ratebypass=yes' 
 738                     url_map
[url_data
['itag'][0]] = url
 
 739             video_url_list 
= self
._get
_video
_url
_list
(url_map
) 
 740             if not video_url_list
: 
 742         elif video_info
.get('hlsvp'): 
 743             manifest_url 
= video_info
['hlsvp'][0] 
 744             url_map 
= self
._extract
_from
_m
3u8(manifest_url
, video_id
) 
 745             video_url_list 
= self
._get
_video
_url
_list
(url_map
) 
 746             if not video_url_list
: 
 750             raise ExtractorError(u
'no conn or url_encoded_fmt_stream_map information found in video info') 
 753         for format_param
, video_real_url 
in video_url_list
: 
 755             video_extension 
= self
._video
_extensions
.get(format_param
, 'flv') 
 757             video_format 
= '{0} - {1}{2}'.format(format_param 
if format_param 
else video_extension
, 
 758                                               self
._video
_dimensions
.get(format_param
, '???'), 
 759                                               ' (3D)' if format_param 
in self
._3d
_itags 
else '') 
 763                 'url':      video_real_url
, 
 764                 'uploader': video_uploader
, 
 765                 'uploader_id': video_uploader_id
, 
 766                 'upload_date':  upload_date
, 
 767                 'title':    video_title
, 
 768                 'ext':      video_extension
, 
 769                 'format':   video_format
, 
 770                 'thumbnail':    video_thumbnail
, 
 771                 'description':  video_description
, 
 772                 'player_url':   player_url
, 
 773                 'subtitles':    video_subtitles
, 
 774                 'duration':     video_duration
 
 778 class YoutubePlaylistIE(InfoExtractor
): 
 779     IE_DESC 
= u
'YouTube.com playlists' 
 785                            (?:course|view_play_list|my_playlists|artist|playlist|watch) 
 786                            \? (?:.*?&)*? (?:p|a|list)= 
 789                         ((?:PL|EC|UU|FL)?[0-9A-Za-z-_]{10,}) 
 792                         ((?:PL|EC|UU|FL)[0-9A-Za-z-_]{10,}) 
 794     _TEMPLATE_URL 
= 'https://gdata.youtube.com/feeds/api/playlists/%s?max-results=%i&start-index=%i&v=2&alt=json&safeSearch=none' 
 796     IE_NAME 
= u
'youtube:playlist' 
 799     def suitable(cls
, url
): 
 800         """Receives a URL and returns True if suitable for this IE.""" 
 801         return re
.match(cls
._VALID
_URL
, url
, re
.VERBOSE
) is not None 
 803     def _real_extract(self
, url
): 
 804         # Extract playlist id 
 805         mobj 
= re
.match(self
._VALID
_URL
, url
, re
.VERBOSE
) 
 807             raise ExtractorError(u
'Invalid URL: %s' % url
) 
 809         # Download playlist videos from API 
 810         playlist_id 
= mobj
.group(1) or mobj
.group(2) 
 813         for page_num 
in itertools
.count(1): 
 814             start_index 
= self
._MAX
_RESULTS 
* (page_num 
- 1) + 1 
 815             if start_index 
>= 1000: 
 816                 self
._downloader
.report_warning(u
'Max number of results reached') 
 818             url 
= self
._TEMPLATE
_URL 
% (playlist_id
, self
._MAX
_RESULTS
, start_index
) 
 819             page 
= self
._download
_webpage
(url
, playlist_id
, u
'Downloading page #%s' % page_num
) 
 822                 response 
= json
.loads(page
) 
 823             except ValueError as err
: 
 824                 raise ExtractorError(u
'Invalid JSON in API response: ' + compat_str(err
)) 
 826             if 'feed' not in response
: 
 827                 raise ExtractorError(u
'Got a malformed response from YouTube API') 
 828             playlist_title 
= response
['feed']['title']['$t'] 
 829             if 'entry' not in response
['feed']: 
 830                 # Number of videos is a multiple of self._MAX_RESULTS 
 833             for entry 
in response
['feed']['entry']: 
 834                 index 
= entry
['yt$position']['$t'] 
 835                 if 'media$group' in entry 
and 'media$player' in entry
['media$group']: 
 836                     videos
.append((index
, entry
['media$group']['media$player']['url'])) 
 838         videos 
= [v
[1] for v 
in sorted(videos
)] 
 840         url_results 
= [self
.url_result(vurl
, 'Youtube') for vurl 
in videos
] 
 841         return [self
.playlist_result(url_results
, playlist_id
, playlist_title
)] 
 844 class YoutubeChannelIE(InfoExtractor
): 
 845     IE_DESC 
= u
'YouTube.com channels' 
 846     _VALID_URL 
= r
"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)" 
 847     _TEMPLATE_URL 
= 'http://www.youtube.com/channel/%s/videos?sort=da&flow=list&view=0&page=%s&gl=US&hl=en' 
 848     _MORE_PAGES_INDICATOR 
= 'yt-uix-load-more' 
 849     _MORE_PAGES_URL 
= 'http://www.youtube.com/c4_browse_ajax?action_load_more_videos=1&flow=list&paging=%s&view=0&sort=da&channel_id=%s' 
 850     IE_NAME 
= u
'youtube:channel' 
 852     def extract_videos_from_page(self
, page
): 
 854         for mobj 
in re
.finditer(r
'href="/watch\?v=([0-9A-Za-z_-]+)&?', page
): 
 855             if mobj
.group(1) not in ids_in_page
: 
 856                 ids_in_page
.append(mobj
.group(1)) 
 859     def _real_extract(self
, url
): 
 861         mobj 
= re
.match(self
._VALID
_URL
, url
) 
 863             raise ExtractorError(u
'Invalid URL: %s' % url
) 
 865         # Download channel page 
 866         channel_id 
= mobj
.group(1) 
 870         url 
= self
._TEMPLATE
_URL 
% (channel_id
, pagenum
) 
 871         page 
= self
._download
_webpage
(url
, channel_id
, 
 872                                       u
'Downloading page #%s' % pagenum
) 
 874         # Extract video identifiers 
 875         ids_in_page 
= self
.extract_videos_from_page(page
) 
 876         video_ids
.extend(ids_in_page
) 
 878         # Download any subsequent channel pages using the json-based channel_ajax query 
 879         if self
._MORE
_PAGES
_INDICATOR 
in page
: 
 880             for pagenum 
in itertools
.count(1): 
 881                 url 
= self
._MORE
_PAGES
_URL 
% (pagenum
, channel_id
) 
 882                 page 
= self
._download
_webpage
(url
, channel_id
, 
 883                                               u
'Downloading page #%s' % pagenum
) 
 885                 page 
= json
.loads(page
) 
 887                 ids_in_page 
= self
.extract_videos_from_page(page
['content_html']) 
 888                 video_ids
.extend(ids_in_page
) 
 890                 if self
._MORE
_PAGES
_INDICATOR  
not in page
['load_more_widget_html']: 
 893         self
._downloader
.to_screen(u
'[youtube] Channel %s: Found %i videos' % (channel_id
, len(video_ids
))) 
 895         urls 
= ['http://www.youtube.com/watch?v=%s' % id for id in video_ids
] 
 896         url_entries 
= [self
.url_result(eurl
, 'Youtube') for eurl 
in urls
] 
 897         return [self
.playlist_result(url_entries
, channel_id
)] 
 900 class YoutubeUserIE(InfoExtractor
): 
 901     IE_DESC 
= u
'YouTube.com user videos (URL or "ytuser" keyword)' 
 902     _VALID_URL 
= r
'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/user/)|ytuser:)([A-Za-z0-9_-]+)' 
 903     _TEMPLATE_URL 
= 'http://gdata.youtube.com/feeds/api/users/%s' 
 904     _GDATA_PAGE_SIZE 
= 50 
 905     _GDATA_URL 
= 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d' 
 906     _VIDEO_INDICATOR 
= r
'/watch\?v=(.+?)[\<&]' 
 907     IE_NAME 
= u
'youtube:user' 
 909     def _real_extract(self
, url
): 
 911         mobj 
= re
.match(self
._VALID
_URL
, url
) 
 913             raise ExtractorError(u
'Invalid URL: %s' % url
) 
 915         username 
= mobj
.group(1) 
 917         # Download video ids using YouTube Data API. Result size per 
 918         # query is limited (currently to 50 videos) so we need to query 
 919         # page by page until there are no video ids - it means we got 
 924         for pagenum 
in itertools
.count(0): 
 925             start_index 
= pagenum 
* self
._GDATA
_PAGE
_SIZE 
+ 1 
 927             gdata_url 
= self
._GDATA
_URL 
% (username
, self
._GDATA
_PAGE
_SIZE
, start_index
) 
 928             page 
= self
._download
_webpage
(gdata_url
, username
, 
 929                                           u
'Downloading video ids from %d to %d' % (start_index
, start_index 
+ self
._GDATA
_PAGE
_SIZE
)) 
 931             # Extract video identifiers 
 934             for mobj 
in re
.finditer(self
._VIDEO
_INDICATOR
, page
): 
 935                 if mobj
.group(1) not in ids_in_page
: 
 936                     ids_in_page
.append(mobj
.group(1)) 
 938             video_ids
.extend(ids_in_page
) 
 940             # A little optimization - if current page is not 
 941             # "full", ie. does not contain PAGE_SIZE video ids then 
 942             # we can assume that this page is the last one - there 
 943             # are no more ids on further pages - no need to query 
 946             if len(ids_in_page
) < self
._GDATA
_PAGE
_SIZE
: 
 949         urls 
= ['http://www.youtube.com/watch?v=%s' % video_id 
for video_id 
in video_ids
] 
 950         url_results 
= [self
.url_result(rurl
, 'Youtube') for rurl 
in urls
] 
 951         return [self
.playlist_result(url_results
, playlist_title 
= username
)] 
 953 class YoutubeSearchIE(SearchInfoExtractor
): 
 954     IE_DESC 
= u
'YouTube.com searches' 
 955     _API_URL 
= 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc' 
 957     IE_NAME 
= u
'youtube:search' 
 958     _SEARCH_KEY 
= 'ytsearch' 
 960     def report_download_page(self
, query
, pagenum
): 
 961         """Report attempt to download search page with given number.""" 
 962         self
._downloader
.to_screen(u
'[youtube] query "%s": Downloading page %s' % (query
, pagenum
)) 
 964     def _get_n_results(self
, query
, n
): 
 965         """Get a specified number of results for a query""" 
 971         while (50 * pagenum
) < limit
: 
 972             self
.report_download_page(query
, pagenum
+1) 
 973             result_url 
= self
._API
_URL 
% (compat_urllib_parse
.quote_plus(query
), (50*pagenum
)+1) 
 974             request 
= compat_urllib_request
.Request(result_url
) 
 976                 data 
= compat_urllib_request
.urlopen(request
).read().decode('utf-8') 
 977             except (compat_urllib_error
.URLError
, compat_http_client
.HTTPException
, socket
.error
) as err
: 
 978                 raise ExtractorError(u
'Unable to download API page: %s' % compat_str(err
)) 
 979             api_response 
= json
.loads(data
)['data'] 
 981             if not 'items' in api_response
: 
 982                 raise ExtractorError(u
'[youtube] No video results') 
 984             new_ids 
= list(video
['id'] for video 
in api_response
['items']) 
 987             limit 
= min(n
, api_response
['totalItems']) 
 990         if len(video_ids
) > n
: 
 991             video_ids 
= video_ids
[:n
] 
 992         videos 
= [self
.url_result('http://www.youtube.com/watch?v=%s' % id, 'Youtube') for id in video_ids
] 
 993         return self
.playlist_result(videos
, query
) 
 996 class YoutubeShowIE(InfoExtractor
): 
 997     IE_DESC 
= u
'YouTube.com (multi-season) shows' 
 998     _VALID_URL 
= r
'https?://www\.youtube\.com/show/(.*)' 
 999     IE_NAME 
= u
'youtube:show' 
1001     def _real_extract(self
, url
): 
1002         mobj 
= re
.match(self
._VALID
_URL
, url
) 
1003         show_name 
= mobj
.group(1) 
1004         webpage 
= self
._download
_webpage
(url
, show_name
, u
'Downloading show webpage') 
1005         # There's one playlist for each season of the show 
1006         m_seasons 
= list(re
.finditer(r
'href="(/playlist\?list=.*?)"', webpage
)) 
1007         self
.to_screen(u
'%s: Found %s seasons' % (show_name
, len(m_seasons
))) 
1008         return [self
.url_result('https://www.youtube.com' + season
.group(1), 'YoutubePlaylist') for season 
in m_seasons
] 
1011 class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor
): 
1013     Base class for extractors that fetch info from 
1014     http://www.youtube.com/feed_ajax 
1015     Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties. 
1017     _LOGIN_REQUIRED 
= True 
1019     # use action_load_personal_feed instead of action_load_system_feed 
1020     _PERSONAL_FEED 
= False 
1023     def _FEED_TEMPLATE(self
): 
1024         action 
= 'action_load_system_feed' 
1025         if self
._PERSONAL
_FEED
: 
1026             action 
= 'action_load_personal_feed' 
1027         return 'http://www.youtube.com/feed_ajax?%s=1&feed_name=%s&paging=%%s' % (action
, self
._FEED
_NAME
) 
1031         return u
'youtube:%s' % self
._FEED
_NAME
 
1033     def _real_initialize(self
): 
1036     def _real_extract(self
, url
): 
1038         # The step argument is available only in 2.7 or higher 
1039         for i 
in itertools
.count(0): 
1040             paging 
= i
*self
._PAGING
_STEP
 
1041             info 
= self
._download
_webpage
(self
._FEED
_TEMPLATE 
% paging
, 
1042                                           u
'%s feed' % self
._FEED
_NAME
, 
1043                                           u
'Downloading page %s' % i
) 
1044             info 
= json
.loads(info
) 
1045             feed_html 
= info
['feed_html'] 
1046             m_ids 
= re
.finditer(r
'"/watch\?v=(.*?)["&]', feed_html
) 
1047             ids 
= orderedSet(m
.group(1) for m 
in m_ids
) 
1048             feed_entries
.extend(self
.url_result(id, 'Youtube') for id in ids
) 
1049             if info
['paging'] is None: 
1051         return self
.playlist_result(feed_entries
, playlist_title
=self
._PLAYLIST
_TITLE
) 
1053 class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor
): 
1054     IE_DESC 
= u
'YouTube.com subscriptions feed, "ytsubs" keyword(requires authentication)' 
1055     _VALID_URL 
= r
'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?' 
1056     _FEED_NAME 
= 'subscriptions' 
1057     _PLAYLIST_TITLE 
= u
'Youtube Subscriptions' 
1059 class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor
): 
1060     IE_DESC 
= u
'YouTube.com recommended videos, "ytrec" keyword (requires authentication)' 
1061     _VALID_URL 
= r
'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?' 
1062     _FEED_NAME 
= 'recommended' 
1063     _PLAYLIST_TITLE 
= u
'Youtube Recommended videos' 
1065 class YoutubeWatchLaterIE(YoutubeFeedsInfoExtractor
): 
1066     IE_DESC 
= u
'Youtube watch later list, "ytwatchlater" keyword (requires authentication)' 
1067     _VALID_URL 
= r
'https?://www\.youtube\.com/feed/watch_later|:ytwatchlater' 
1068     _FEED_NAME 
= 'watch_later' 
1069     _PLAYLIST_TITLE 
= u
'Youtube Watch Later' 
1071     _PERSONAL_FEED 
= True 
1073 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor
): 
1074     IE_NAME 
= u
'youtube:favorites' 
1075     IE_DESC 
= u
'YouTube.com favourite videos, "ytfav" keyword (requires authentication)' 
1076     _VALID_URL 
= r
'https?://www\.youtube\.com/my_favorites|:ytfav(?:o?rites)?' 
1077     _LOGIN_REQUIRED 
= True 
1079     def _real_extract(self
, url
): 
1080         webpage 
= self
._download
_webpage
('https://www.youtube.com/my_favorites', 'Youtube Favourites videos') 
1081         playlist_id 
= self
._search
_regex
(r
'list=(.+?)["&]', webpage
, u
'favourites playlist id') 
1082         return self
.url_result(playlist_id
, 'YoutubePlaylist')