2 # -*- coding: utf-8 -*- 
   4 from __future__ 
import unicode_literals
 
  33 import xml
.etree
.ElementTree
 
  39     compat_etree_fromstring
, 
  44     compat_socket_create_connection
, 
  48     compat_urllib_parse_urlparse
, 
  49     compat_urllib_request
, 
  55 # This is not clearly defined otherwise 
  56 compiled_regex_type 
= type(re
.compile('')) 
  59     'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/20.0 (Chrome)', 
  60     'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 
  61     'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 
  62     'Accept-Encoding': 'gzip, deflate', 
  63     'Accept-Language': 'en-us,en;q=0.5', 
  69 ENGLISH_MONTH_NAMES 
= [ 
  70     'January', 'February', 'March', 'April', 'May', 'June', 
  71     'July', 'August', 'September', 'October', 'November', 'December'] 
  74 def preferredencoding(): 
  75     """Get preferred encoding. 
  77     Returns the best encoding scheme for the system, based on 
  78     locale.getpreferredencoding() and some further tweaks. 
  81         pref 
= locale
.getpreferredencoding() 
  89 def write_json_file(obj
, fn
): 
  90     """ Encode obj as JSON and write it to fn, atomically if possible """ 
  92     fn 
= encodeFilename(fn
) 
  93     if sys
.version_info 
< (3, 0) and sys
.platform 
!= 'win32': 
  94         encoding 
= get_filesystem_encoding() 
  95         # os.path.basename returns a bytes object, but NamedTemporaryFile 
  96         # will fail if the filename contains non ascii characters unless we 
  97         # use a unicode object 
  98         path_basename 
= lambda f
: os
.path
.basename(fn
).decode(encoding
) 
  99         # the same for os.path.dirname 
 100         path_dirname 
= lambda f
: os
.path
.dirname(fn
).decode(encoding
) 
 102         path_basename 
= os
.path
.basename
 
 103         path_dirname 
= os
.path
.dirname
 
 107         'prefix': path_basename(fn
) + '.', 
 108         'dir': path_dirname(fn
), 
 112     # In Python 2.x, json.dump expects a bytestream. 
 113     # In Python 3.x, it writes to a character stream 
 114     if sys
.version_info 
< (3, 0): 
 122     tf 
= tempfile
.NamedTemporaryFile(**compat_kwargs(args
)) 
 127         if sys
.platform 
== 'win32': 
 128             # Need to remove existing file on Windows, else os.rename raises 
 129             # WindowsError or FileExistsError. 
 134         os
.rename(tf
.name
, fn
) 
 143 if sys
.version_info 
>= (2, 7): 
 144     def find_xpath_attr(node
, xpath
, key
, val
=None): 
 145         """ Find the xpath xpath[@key=val] """ 
 146         assert re
.match(r
'^[a-zA-Z_-]+$', key
) 
 148             assert re
.match(r
'^[a-zA-Z0-9@\s:._-]*$', val
) 
 149         expr 
= xpath 
+ ('[@%s]' % key 
if val 
is None else "[@%s='%s']" % (key
, val
)) 
 150         return node
.find(expr
) 
 152     def find_xpath_attr(node
, xpath
, key
, val
=None): 
 153         # Here comes the crazy part: In 2.6, if the xpath is a unicode, 
 154         # .//node does not match if a node is a direct child of . ! 
 155         if isinstance(xpath
, compat_str
): 
 156             xpath 
= xpath
.encode('ascii') 
 158         for f 
in node
.findall(xpath
): 
 159             if key 
not in f
.attrib
: 
 161             if val 
is None or f
.attrib
.get(key
) == val
: 
 165 # On python2.6 the xml.etree.ElementTree.Element methods don't support 
 166 # the namespace parameter 
 169 def xpath_with_ns(path
, ns_map
): 
 170     components 
= [c
.split(':') for c 
in path
.split('/')] 
 174             replaced
.append(c
[0]) 
 177             replaced
.append('{%s}%s' % (ns_map
[ns
], tag
)) 
 178     return '/'.join(replaced
) 
 181 def xpath_element(node
, xpath
, name
=None, fatal
=False, default
=NO_DEFAULT
): 
 182     def _find_xpath(xpath
): 
 183         if sys
.version_info 
< (2, 7):  # Crazy 2.6 
 184             xpath 
= xpath
.encode('ascii') 
 185         return node
.find(xpath
) 
 187     if isinstance(xpath
, (str, compat_str
)): 
 188         n 
= _find_xpath(xpath
) 
 196         if default 
is not NO_DEFAULT
: 
 199             name 
= xpath 
if name 
is None else name
 
 200             raise ExtractorError('Could not find XML element %s' % name
) 
 206 def xpath_text(node
, xpath
, name
=None, fatal
=False, default
=NO_DEFAULT
): 
 207     n 
= xpath_element(node
, xpath
, name
, fatal
=fatal
, default
=default
) 
 208     if n 
is None or n 
== default
: 
 211         if default 
is not NO_DEFAULT
: 
 214             name 
= xpath 
if name 
is None else name
 
 215             raise ExtractorError('Could not find XML element\'s text %s' % name
) 
 221 def xpath_attr(node
, xpath
, key
, name
=None, fatal
=False, default
=NO_DEFAULT
): 
 222     n 
= find_xpath_attr(node
, xpath
, key
) 
 224         if default 
is not NO_DEFAULT
: 
 227             name 
= '%s[@%s]' % (xpath
, key
) if name 
is None else name
 
 228             raise ExtractorError('Could not find XML attribute %s' % name
) 
 234 def get_element_by_id(id, html
): 
 235     """Return the content of the tag with the specified ID in the passed HTML document""" 
 236     return get_element_by_attribute("id", id, html
) 
 239 def get_element_by_attribute(attribute
, value
, html
): 
 240     """Return the content of the tag with the specified attribute in the passed HTML document""" 
 242     m 
= re
.search(r
'''(?xs) 
 244          (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*? 
 246          (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*? 
 250     ''' % (re
.escape(attribute
), re
.escape(value
)), html
) 
 254     res 
= m
.group('content') 
 256     if res
.startswith('"') or res
.startswith("'"): 
 259     return unescapeHTML(res
) 
 262 def clean_html(html
): 
 263     """Clean an HTML snippet into a readable string""" 
 265     if html 
is None:  # Convenience for sanitizing descriptions etc. 
 269     html 
= html
.replace('\n', ' ') 
 270     html 
= re
.sub(r
'\s*<\s*br\s*/?\s*>\s*', '\n', html
) 
 271     html 
= re
.sub(r
'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html
) 
 273     html 
= re
.sub('<.*?>', '', html
) 
 274     # Replace html entities 
 275     html 
= unescapeHTML(html
) 
 279 def sanitize_open(filename
, open_mode
): 
 280     """Try to open the given filename, and slightly tweak it if this fails. 
 282     Attempts to open the given filename. If this fails, it tries to change 
 283     the filename slightly, step by step, until it's either able to open it 
 284     or it fails and raises a final exception, like the standard open() 
 287     It returns the tuple (stream, definitive_file_name). 
 291             if sys
.platform 
== 'win32': 
 293                 msvcrt
.setmode(sys
.stdout
.fileno(), os
.O_BINARY
) 
 294             return (sys
.stdout
.buffer if hasattr(sys
.stdout
, 'buffer') else sys
.stdout
, filename
) 
 295         stream 
= open(encodeFilename(filename
), open_mode
) 
 296         return (stream
, filename
) 
 297     except (IOError, OSError) as err
: 
 298         if err
.errno 
in (errno
.EACCES
,): 
 301         # In case of error, try to remove win32 forbidden chars 
 302         alt_filename 
= sanitize_path(filename
) 
 303         if alt_filename 
== filename
: 
 306             # An exception here should be caught in the caller 
 307             stream 
= open(encodeFilename(alt_filename
), open_mode
) 
 308             return (stream
, alt_filename
) 
 311 def timeconvert(timestr
): 
 312     """Convert RFC 2822 defined time string into system timestamp""" 
 314     timetuple 
= email
.utils
.parsedate_tz(timestr
) 
 315     if timetuple 
is not None: 
 316         timestamp 
= email
.utils
.mktime_tz(timetuple
) 
 320 def sanitize_filename(s
, restricted
=False, is_id
=False): 
 321     """Sanitizes a string so it could be used as part of a filename. 
 322     If restricted is set, use a stricter subset of allowed characters. 
 323     Set is_id if this is not an arbitrary string, but an ID that should be kept if possible 
 325     def replace_insane(char
): 
 326         if char 
== '?' or ord(char
) < 32 or ord(char
) == 127: 
 329             return '' if restricted 
else '\'' 
 331             return '_-' if restricted 
else ' -' 
 332         elif char 
in '\\/|*<>': 
 334         if restricted 
and (char 
in '!&\'()[]{}$;`^,#' or char
.isspace()): 
 336         if restricted 
and ord(char
) > 127: 
 341     s 
= re
.sub(r
'[0-9]+(?::[0-9]+)+', lambda m
: m
.group(0).replace(':', '_'), s
) 
 342     result 
= ''.join(map(replace_insane
, s
)) 
 344         while '__' in result
: 
 345             result 
= result
.replace('__', '_') 
 346         result 
= result
.strip('_') 
 347         # Common case of "Foreign band name - English song title" 
 348         if restricted 
and result
.startswith('-_'): 
 350         if result
.startswith('-'): 
 351             result 
= '_' + result
[len('-'):] 
 352         result 
= result
.lstrip('.') 
 358 def sanitize_path(s
): 
 359     """Sanitizes and normalizes path on Windows""" 
 360     if sys
.platform 
!= 'win32': 
 362     drive_or_unc
, _ 
= os
.path
.splitdrive(s
) 
 363     if sys
.version_info 
< (2, 7) and not drive_or_unc
: 
 364         drive_or_unc
, _ 
= os
.path
.splitunc(s
) 
 365     norm_path 
= os
.path
.normpath(remove_start(s
, drive_or_unc
)).split(os
.path
.sep
) 
 369         path_part 
if path_part 
in ['.', '..'] else re
.sub('(?:[/<>:"\\|\\\\?\\*]|[\s.]$)', '#', path_part
) 
 370         for path_part 
in norm_path
] 
 372         sanitized_path
.insert(0, drive_or_unc 
+ os
.path
.sep
) 
 373     return os
.path
.join(*sanitized_path
) 
 376 # Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of 
 377 # unwanted failures due to missing protocol 
 378 def sanitized_Request(url
, *args
, **kwargs
): 
 379     return compat_urllib_request
.Request( 
 380         'http:%s' % url 
if url
.startswith('//') else url
, *args
, **kwargs
) 
 383 def orderedSet(iterable
): 
 384     """ Remove all duplicates from the input iterable """ 
 392 def _htmlentity_transform(entity
): 
 393     """Transforms an HTML entity to a character.""" 
 394     # Known non-numeric HTML entity 
 395     if entity 
in compat_html_entities
.name2codepoint
: 
 396         return compat_chr(compat_html_entities
.name2codepoint
[entity
]) 
 398     mobj 
= re
.match(r
'#(x[0-9a-fA-F]+|[0-9]+)', entity
) 
 400         numstr 
= mobj
.group(1) 
 401         if numstr
.startswith('x'): 
 403             numstr 
= '0%s' % numstr
 
 406         # See https://github.com/rg3/youtube-dl/issues/7518 
 408             return compat_chr(int(numstr
, base
)) 
 412     # Unknown entity in name, return its literal representation 
 413     return '&%s;' % entity
 
 419     assert type(s
) == compat_str
 
 422         r
'&([^;]+);', lambda m
: _htmlentity_transform(m
.group(1)), s
) 
 425 def get_subprocess_encoding(): 
 426     if sys
.platform 
== 'win32' and sys
.getwindowsversion()[0] >= 5: 
 427         # For subprocess calls, encode with locale encoding 
 428         # Refer to http://stackoverflow.com/a/9951851/35070 
 429         encoding 
= preferredencoding() 
 431         encoding 
= sys
.getfilesystemencoding() 
 437 def encodeFilename(s
, for_subprocess
=False): 
 439     @param s The name of the file 
 442     assert type(s
) == compat_str
 
 444     # Python 3 has a Unicode API 
 445     if sys
.version_info 
>= (3, 0): 
 448     # Pass '' directly to use Unicode APIs on Windows 2000 and up 
 449     # (Detecting Windows NT 4 is tricky because 'major >= 4' would 
 450     # match Windows 9x series as well. Besides, NT 4 is obsolete.) 
 451     if not for_subprocess 
and sys
.platform 
== 'win32' and sys
.getwindowsversion()[0] >= 5: 
 454     return s
.encode(get_subprocess_encoding(), 'ignore') 
 457 def decodeFilename(b
, for_subprocess
=False): 
 459     if sys
.version_info 
>= (3, 0): 
 462     if not isinstance(b
, bytes): 
 465     return b
.decode(get_subprocess_encoding(), 'ignore') 
 468 def encodeArgument(s
): 
 469     if not isinstance(s
, compat_str
): 
 470         # Legacy code that uses byte strings 
 471         # Uncomment the following line after fixing all post processors 
 472         # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) 
 473         s 
= s
.decode('ascii') 
 474     return encodeFilename(s
, True) 
 477 def decodeArgument(b
): 
 478     return decodeFilename(b
, True) 
 481 def decodeOption(optval
): 
 484     if isinstance(optval
, bytes): 
 485         optval 
= optval
.decode(preferredencoding()) 
 487     assert isinstance(optval
, compat_str
) 
 491 def formatSeconds(secs
): 
 493         return '%d:%02d:%02d' % (secs 
// 3600, (secs 
% 3600) // 60, secs 
% 60) 
 495         return '%d:%02d' % (secs 
// 60, secs 
% 60) 
 500 def make_HTTPS_handler(params
, **kwargs
): 
 501     opts_no_check_certificate 
= params
.get('nocheckcertificate', False) 
 502     if hasattr(ssl
, 'create_default_context'):  # Python >= 3.4 or 2.7.9 
 503         context 
= ssl
.create_default_context(ssl
.Purpose
.SERVER_AUTH
) 
 504         if opts_no_check_certificate
: 
 505             context
.check_hostname 
= False 
 506             context
.verify_mode 
= ssl
.CERT_NONE
 
 508             return YoutubeDLHTTPSHandler(params
, context
=context
, **kwargs
) 
 511             # (create_default_context present but HTTPSHandler has no context=) 
 514     if sys
.version_info 
< (3, 2): 
 515         return YoutubeDLHTTPSHandler(params
, **kwargs
) 
 517         context 
= ssl
.SSLContext(ssl
.PROTOCOL_TLSv1
) 
 518         context
.verify_mode 
= (ssl
.CERT_NONE
 
 519                                if opts_no_check_certificate
 
 520                                else ssl
.CERT_REQUIRED
) 
 521         context
.set_default_verify_paths() 
 522         return YoutubeDLHTTPSHandler(params
, context
=context
, **kwargs
) 
 525 def bug_reports_message(): 
 526     if ytdl_is_updateable(): 
 527         update_cmd 
= 'type  youtube-dl -U  to update' 
 529         update_cmd 
= 'see  https://yt-dl.org/update  on how to update' 
 530     msg 
= '; please report this issue on https://yt-dl.org/bug .' 
 531     msg 
+= ' Make sure you are using the latest version; %s.' % update_cmd
 
 532     msg 
+= ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' 
 536 class ExtractorError(Exception): 
 537     """Error during info extraction.""" 
 539     def __init__(self
, msg
, tb
=None, expected
=False, cause
=None, video_id
=None): 
 540         """ tb, if given, is the original traceback (so that it can be printed out). 
 541         If expected is set, this is a normal error message and most likely not a bug in youtube-dl. 
 544         if sys
.exc_info()[0] in (compat_urllib_error
.URLError
, socket
.timeout
, UnavailableVideoError
): 
 546         if video_id 
is not None: 
 547             msg 
= video_id 
+ ': ' + msg
 
 549             msg 
+= ' (caused by %r)' % cause
 
 551             msg 
+= bug_reports_message() 
 552         super(ExtractorError
, self
).__init
__(msg
) 
 555         self
.exc_info 
= sys
.exc_info()  # preserve original exception 
 557         self
.video_id 
= video_id
 
 559     def format_traceback(self
): 
 560         if self
.traceback 
is None: 
 562         return ''.join(traceback
.format_tb(self
.traceback
)) 
 565 class UnsupportedError(ExtractorError
): 
 566     def __init__(self
, url
): 
 567         super(UnsupportedError
, self
).__init
__( 
 568             'Unsupported URL: %s' % url
, expected
=True) 
 572 class RegexNotFoundError(ExtractorError
): 
 573     """Error when a regex didn't match""" 
 577 class DownloadError(Exception): 
 578     """Download Error exception. 
 580     This exception may be thrown by FileDownloader objects if they are not 
 581     configured to continue on errors. They will contain the appropriate 
 585     def __init__(self
, msg
, exc_info
=None): 
 586         """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ 
 587         super(DownloadError
, self
).__init
__(msg
) 
 588         self
.exc_info 
= exc_info
 
 591 class SameFileError(Exception): 
 592     """Same File exception. 
 594     This exception will be thrown by FileDownloader objects if they detect 
 595     multiple files would have to be downloaded to the same file on disk. 
 600 class PostProcessingError(Exception): 
 601     """Post Processing exception. 
 603     This exception may be raised by PostProcessor's .run() method to 
 604     indicate an error in the postprocessing task. 
 607     def __init__(self
, msg
): 
 611 class MaxDownloadsReached(Exception): 
 612     """ --max-downloads limit has been reached. """ 
 616 class UnavailableVideoError(Exception): 
 617     """Unavailable Format exception. 
 619     This exception will be thrown when a video is requested 
 620     in a format that is not available for that video. 
 625 class ContentTooShortError(Exception): 
 626     """Content Too Short exception. 
 628     This exception may be raised by FileDownloader objects when a file they 
 629     download is too small for what the server announced first, indicating 
 630     the connection was probably interrupted. 
 633     def __init__(self
, downloaded
, expected
): 
 635         self
.downloaded 
= downloaded
 
 636         self
.expected 
= expected
 
 639 def _create_http_connection(ydl_handler
, http_class
, is_https
, *args
, **kwargs
): 
 640     # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting 
 641     # expected HTTP responses to meet HTTP/1.0 or later (see also 
 642     # https://github.com/rg3/youtube-dl/issues/6727) 
 643     if sys
.version_info 
< (3, 0): 
 644         kwargs
[b
'strict'] = True 
 645     hc 
= http_class(*args
, **kwargs
) 
 646     source_address 
= ydl_handler
._params
.get('source_address') 
 647     if source_address 
is not None: 
 648         sa 
= (source_address
, 0) 
 649         if hasattr(hc
, 'source_address'):  # Python 2.7+ 
 650             hc
.source_address 
= sa
 
 652             def _hc_connect(self
, *args
, **kwargs
): 
 653                 sock 
= compat_socket_create_connection( 
 654                     (self
.host
, self
.port
), self
.timeout
, sa
) 
 656                     self
.sock 
= ssl
.wrap_socket( 
 657                         sock
, self
.key_file
, self
.cert_file
, 
 658                         ssl_version
=ssl
.PROTOCOL_TLSv1
) 
 661             hc
.connect 
= functools
.partial(_hc_connect
, hc
) 
 666 class YoutubeDLHandler(compat_urllib_request
.HTTPHandler
): 
 667     """Handler for HTTP requests and responses. 
 669     This class, when installed with an OpenerDirector, automatically adds 
 670     the standard headers to every HTTP request and handles gzipped and 
 671     deflated responses from web servers. If compression is to be avoided in 
 672     a particular request, the original request in the program code only has 
 673     to include the HTTP header "Youtubedl-No-Compression", which will be 
 674     removed before making the real request. 
 676     Part of this code was copied from: 
 678     http://techknack.net/python-urllib2-handlers/ 
 680     Andrew Rowls, the author of that code, agreed to release it to the 
 684     def __init__(self
, params
, *args
, **kwargs
): 
 685         compat_urllib_request
.HTTPHandler
.__init
__(self
, *args
, **kwargs
) 
 686         self
._params 
= params
 
 688     def http_open(self
, req
): 
 689         return self
.do_open(functools
.partial( 
 690             _create_http_connection
, self
, compat_http_client
.HTTPConnection
, False), 
 696             return zlib
.decompress(data
, -zlib
.MAX_WBITS
) 
 698             return zlib
.decompress(data
) 
 701     def addinfourl_wrapper(stream
, headers
, url
, code
): 
 702         if hasattr(compat_urllib_request
.addinfourl
, 'getcode'): 
 703             return compat_urllib_request
.addinfourl(stream
, headers
, url
, code
) 
 704         ret 
= compat_urllib_request
.addinfourl(stream
, headers
, url
) 
 708     def http_request(self
, req
): 
 709         # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not 
 710         # always respected by websites, some tend to give out URLs with non percent-encoded 
 711         # non-ASCII characters (see telemb.py, ard.py [#3412]) 
 712         # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) 
 713         # To work around aforementioned issue we will replace request's original URL with 
 714         # percent-encoded one 
 715         # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) 
 716         # the code of this workaround has been moved here from YoutubeDL.urlopen() 
 717         url 
= req
.get_full_url() 
 718         url_escaped 
= escape_url(url
) 
 720         # Substitute URL if any change after escaping 
 721         if url 
!= url_escaped
: 
 722             req_type 
= HEADRequest 
if req
.get_method() == 'HEAD' else compat_urllib_request
.Request
 
 724                 url_escaped
, data
=req
.data
, headers
=req
.headers
, 
 725                 origin_req_host
=req
.origin_req_host
, unverifiable
=req
.unverifiable
) 
 726             new_req
.timeout 
= req
.timeout
 
 729         for h
, v 
in std_headers
.items(): 
 730             # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 
 731             # The dict keys are capitalized because of this bug by urllib 
 732             if h
.capitalize() not in req
.headers
: 
 734         if 'Youtubedl-no-compression' in req
.headers
: 
 735             if 'Accept-encoding' in req
.headers
: 
 736                 del req
.headers
['Accept-encoding'] 
 737             del req
.headers
['Youtubedl-no-compression'] 
 739         if sys
.version_info 
< (2, 7) and '#' in req
.get_full_url(): 
 740             # Python 2.6 is brain-dead when it comes to fragments 
 741             req
._Request
__original 
= req
._Request
__original
.partition('#')[0] 
 742             req
._Request
__r
_type 
= req
._Request
__r
_type
.partition('#')[0] 
 746     def http_response(self
, req
, resp
): 
 749         if resp
.headers
.get('Content-encoding', '') == 'gzip': 
 750             content 
= resp
.read() 
 751             gz 
= gzip
.GzipFile(fileobj
=io
.BytesIO(content
), mode
='rb') 
 753                 uncompressed 
= io
.BytesIO(gz
.read()) 
 754             except IOError as original_ioerror
: 
 755                 # There may be junk add the end of the file 
 756                 # See http://stackoverflow.com/q/4928560/35070 for details 
 757                 for i 
in range(1, 1024): 
 759                         gz 
= gzip
.GzipFile(fileobj
=io
.BytesIO(content
[:-i
]), mode
='rb') 
 760                         uncompressed 
= io
.BytesIO(gz
.read()) 
 765                     raise original_ioerror
 
 766             resp 
= self
.addinfourl_wrapper(uncompressed
, old_resp
.headers
, old_resp
.url
, old_resp
.code
) 
 767             resp
.msg 
= old_resp
.msg
 
 769         if resp
.headers
.get('Content-encoding', '') == 'deflate': 
 770             gz 
= io
.BytesIO(self
.deflate(resp
.read())) 
 771             resp 
= self
.addinfourl_wrapper(gz
, old_resp
.headers
, old_resp
.url
, old_resp
.code
) 
 772             resp
.msg 
= old_resp
.msg
 
 773         # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see 
 774         # https://github.com/rg3/youtube-dl/issues/6457). 
 775         if 300 <= resp
.code 
< 400: 
 776             location 
= resp
.headers
.get('Location') 
 778                 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 
 779                 if sys
.version_info 
>= (3, 0): 
 780                     location 
= location
.encode('iso-8859-1').decode('utf-8') 
 781                 location_escaped 
= escape_url(location
) 
 782                 if location 
!= location_escaped
: 
 783                     del resp
.headers
['Location'] 
 784                     resp
.headers
['Location'] = location_escaped
 
 787     https_request 
= http_request
 
 788     https_response 
= http_response
 
 791 class YoutubeDLHTTPSHandler(compat_urllib_request
.HTTPSHandler
): 
 792     def __init__(self
, params
, https_conn_class
=None, *args
, **kwargs
): 
 793         compat_urllib_request
.HTTPSHandler
.__init
__(self
, *args
, **kwargs
) 
 794         self
._https
_conn
_class 
= https_conn_class 
or compat_http_client
.HTTPSConnection
 
 795         self
._params 
= params
 
 797     def https_open(self
, req
): 
 799         if hasattr(self
, '_context'):  # python > 2.6 
 800             kwargs
['context'] = self
._context
 
 801         if hasattr(self
, '_check_hostname'):  # python 3.x 
 802             kwargs
['check_hostname'] = self
._check
_hostname
 
 803         return self
.do_open(functools
.partial( 
 804             _create_http_connection
, self
, self
._https
_conn
_class
, True), 
 808 class YoutubeDLCookieProcessor(compat_urllib_request
.HTTPCookieProcessor
): 
 809     def __init__(self
, cookiejar
=None): 
 810         compat_urllib_request
.HTTPCookieProcessor
.__init
__(self
, cookiejar
) 
 812     def http_response(self
, request
, response
): 
 813         # Python 2 will choke on next HTTP request in row if there are non-ASCII 
 814         # characters in Set-Cookie HTTP header of last response (see 
 815         # https://github.com/rg3/youtube-dl/issues/6769). 
 816         # In order to at least prevent crashing we will percent encode Set-Cookie 
 817         # header before HTTPCookieProcessor starts processing it. 
 818         # if sys.version_info < (3, 0) and response.headers: 
 819         #     for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): 
 820         #         set_cookie = response.headers.get(set_cookie_header) 
 822         #             set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") 
 823         #             if set_cookie != set_cookie_escaped: 
 824         #                 del response.headers[set_cookie_header] 
 825         #                 response.headers[set_cookie_header] = set_cookie_escaped 
 826         return compat_urllib_request
.HTTPCookieProcessor
.http_response(self
, request
, response
) 
 828     https_request 
= compat_urllib_request
.HTTPCookieProcessor
.http_request
 
 829     https_response 
= http_response
 
 832 def parse_iso8601(date_str
, delimiter
='T', timezone
=None): 
 833     """ Return a UNIX timestamp from the given date """ 
 838     date_str 
= re
.sub(r
'\.[0-9]+', '', date_str
) 
 842             r
'(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', 
 845             timezone 
= datetime
.timedelta() 
 847             date_str 
= date_str
[:-len(m
.group(0))] 
 848             if not m
.group('sign'): 
 849                 timezone 
= datetime
.timedelta() 
 851                 sign 
= 1 if m
.group('sign') == '+' else -1 
 852                 timezone 
= datetime
.timedelta( 
 853                     hours
=sign 
* int(m
.group('hours')), 
 854                     minutes
=sign 
* int(m
.group('minutes'))) 
 856         date_format 
= '%Y-%m-%d{0}%H:%M:%S'.format(delimiter
) 
 857         dt 
= datetime
.datetime
.strptime(date_str
, date_format
) - timezone
 
 858         return calendar
.timegm(dt
.timetuple()) 
 863 def unified_strdate(date_str
, day_first
=True): 
 864     """Return a string with the date in the format YYYYMMDD""" 
 870     date_str 
= date_str
.replace(',', ' ') 
 871     # %z (UTC offset) is only supported in python>=3.2 
 872     if not re
.match(r
'^[0-9]{1,2}-[0-9]{1,2}-[0-9]{4}$', date_str
): 
 873         date_str 
= re
.sub(r
' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str
) 
 874     # Remove AM/PM + timezone 
 875     date_str 
= re
.sub(r
'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str
) 
 877     format_expressions 
= [ 
 882         '%b %dst %Y %I:%M%p', 
 883         '%b %dnd %Y %I:%M%p', 
 884         '%b %dth %Y %I:%M%p', 
 890         '%Y-%m-%d %H:%M:%S.%f', 
 893         '%Y-%m-%dT%H:%M:%SZ', 
 894         '%Y-%m-%dT%H:%M:%S.%fZ', 
 895         '%Y-%m-%dT%H:%M:%S.%f0Z', 
 897         '%Y-%m-%dT%H:%M:%S.%f', 
 901         format_expressions
.extend([ 
 909         format_expressions
.extend([ 
 916     for expression 
in format_expressions
: 
 918             upload_date 
= datetime
.datetime
.strptime(date_str
, expression
).strftime('%Y%m%d') 
 921     if upload_date 
is None: 
 922         timetuple 
= email
.utils
.parsedate_tz(date_str
) 
 924             upload_date 
= datetime
.datetime(*timetuple
[:6]).strftime('%Y%m%d') 
 925     if upload_date 
is not None: 
 926         return compat_str(upload_date
) 
 929 def determine_ext(url
, default_ext
='unknown_video'): 
 932     guess 
= url
.partition('?')[0].rpartition('.')[2] 
 933     if re
.match(r
'^[A-Za-z0-9]+$', guess
): 
 935     elif guess
.rstrip('/') in ( 
 936             'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', 
 937             'flv', 'f4v', 'f4a', 'f4b', 
 938             'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', 
 939             'mkv', 'mka', 'mk3d', 
 948             'f4f', 'f4m', 'm3u8', 'smil'): 
 949         return guess
.rstrip('/') 
 954 def subtitles_filename(filename
, sub_lang
, sub_format
): 
 955     return filename
.rsplit('.', 1)[0] + '.' + sub_lang 
+ '.' + sub_format
 
 958 def date_from_str(date_str
): 
 960     Return a datetime object from a string in the format YYYYMMDD or 
 961     (now|today)[+-][0-9](day|week|month|year)(s)?""" 
 962     today 
= datetime
.date
.today() 
 963     if date_str 
in ('now', 'today'): 
 965     if date_str 
== 'yesterday': 
 966         return today 
- datetime
.timedelta(days
=1) 
 967     match 
= re
.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str
) 
 968     if match 
is not None: 
 969         sign 
= match
.group('sign') 
 970         time 
= int(match
.group('time')) 
 973         unit 
= match
.group('unit') 
 974         # A bad aproximation? 
 982         delta 
= datetime
.timedelta(**{unit
: time
}) 
 984     return datetime
.datetime
.strptime(date_str
, "%Y%m%d").date() 
 987 def hyphenate_date(date_str
): 
 989     Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" 
 990     match 
= re
.match(r
'^(\d\d\d\d)(\d\d)(\d\d)$', date_str
) 
 991     if match 
is not None: 
 992         return '-'.join(match
.groups()) 
 997 class DateRange(object): 
 998     """Represents a time interval between two dates""" 
1000     def __init__(self
, start
=None, end
=None): 
1001         """start and end must be strings in the format accepted by date""" 
1002         if start 
is not None: 
1003             self
.start 
= date_from_str(start
) 
1005             self
.start 
= datetime
.datetime
.min.date() 
1007             self
.end 
= date_from_str(end
) 
1009             self
.end 
= datetime
.datetime
.max.date() 
1010         if self
.start 
> self
.end
: 
1011             raise ValueError('Date range: "%s" , the start date must be before the end date' % self
) 
1015         """Returns a range that only contains the given day""" 
1016         return cls(day
, day
) 
1018     def __contains__(self
, date
): 
1019         """Check if the date is in the range""" 
1020         if not isinstance(date
, datetime
.date
): 
1021             date 
= date_from_str(date
) 
1022         return self
.start 
<= date 
<= self
.end
 
1025         return '%s - %s' % (self
.start
.isoformat(), self
.end
.isoformat()) 
1028 def platform_name(): 
1029     """ Returns the platform name as a compat_str """ 
1030     res 
= platform
.platform() 
1031     if isinstance(res
, bytes): 
1032         res 
= res
.decode(preferredencoding()) 
1034     assert isinstance(res
, compat_str
) 
1038 def _windows_write_string(s
, out
): 
1039     """ Returns True if the string was written using special methods, 
1040     False if it has yet to be written out.""" 
1041     # Adapted from http://stackoverflow.com/a/3259271/35070 
1044     import ctypes
.wintypes
 
1052         fileno 
= out
.fileno() 
1053     except AttributeError: 
1054         # If the output stream doesn't have a fileno, it's virtual 
1056     except io
.UnsupportedOperation
: 
1057         # Some strange Windows pseudo files? 
1059     if fileno 
not in WIN_OUTPUT_IDS
: 
1062     GetStdHandle 
= ctypes
.WINFUNCTYPE( 
1063         ctypes
.wintypes
.HANDLE
, ctypes
.wintypes
.DWORD
)( 
1064         (b
"GetStdHandle", ctypes
.windll
.kernel32
)) 
1065     h 
= GetStdHandle(WIN_OUTPUT_IDS
[fileno
]) 
1067     WriteConsoleW 
= ctypes
.WINFUNCTYPE( 
1068         ctypes
.wintypes
.BOOL
, ctypes
.wintypes
.HANDLE
, ctypes
.wintypes
.LPWSTR
, 
1069         ctypes
.wintypes
.DWORD
, ctypes
.POINTER(ctypes
.wintypes
.DWORD
), 
1070         ctypes
.wintypes
.LPVOID
)((b
"WriteConsoleW", ctypes
.windll
.kernel32
)) 
1071     written 
= ctypes
.wintypes
.DWORD(0) 
1073     GetFileType 
= ctypes
.WINFUNCTYPE(ctypes
.wintypes
.DWORD
, ctypes
.wintypes
.DWORD
)((b
"GetFileType", ctypes
.windll
.kernel32
)) 
1074     FILE_TYPE_CHAR 
= 0x0002 
1075     FILE_TYPE_REMOTE 
= 0x8000 
1076     GetConsoleMode 
= ctypes
.WINFUNCTYPE( 
1077         ctypes
.wintypes
.BOOL
, ctypes
.wintypes
.HANDLE
, 
1078         ctypes
.POINTER(ctypes
.wintypes
.DWORD
))( 
1079         (b
"GetConsoleMode", ctypes
.windll
.kernel32
)) 
1080     INVALID_HANDLE_VALUE 
= ctypes
.wintypes
.DWORD(-1).value
 
1082     def not_a_console(handle
): 
1083         if handle 
== INVALID_HANDLE_VALUE 
or handle 
is None: 
1085         return ((GetFileType(handle
) & ~FILE_TYPE_REMOTE
) != FILE_TYPE_CHAR 
or 
1086                 GetConsoleMode(handle
, ctypes
.byref(ctypes
.wintypes
.DWORD())) == 0) 
1088     if not_a_console(h
): 
1091     def next_nonbmp_pos(s
): 
1093             return next(i 
for i
, c 
in enumerate(s
) if ord(c
) > 0xffff) 
1094         except StopIteration: 
1098         count 
= min(next_nonbmp_pos(s
), 1024) 
1100         ret 
= WriteConsoleW( 
1101             h
, s
, count 
if count 
else 2, ctypes
.byref(written
), None) 
1103             raise OSError('Failed to write string') 
1104         if not count
:  # We just wrote a non-BMP character 
1105             assert written
.value 
== 2 
1108             assert written
.value 
> 0 
1109             s 
= s
[written
.value
:] 
1113 def write_string(s
, out
=None, encoding
=None): 
1116     assert type(s
) == compat_str
 
1118     if sys
.platform 
== 'win32' and encoding 
is None and hasattr(out
, 'fileno'): 
1119         if _windows_write_string(s
, out
): 
1122     if ('b' in getattr(out
, 'mode', '') or 
1123             sys
.version_info
[0] < 3):  # Python 2 lies about mode of sys.stderr 
1124         byt 
= s
.encode(encoding 
or preferredencoding(), 'ignore') 
1126     elif hasattr(out
, 'buffer'): 
1127         enc 
= encoding 
or getattr(out
, 'encoding', None) or preferredencoding() 
1128         byt 
= s
.encode(enc
, 'ignore') 
1129         out
.buffer.write(byt
) 
1135 def bytes_to_intlist(bs
): 
1138     if isinstance(bs
[0], int):  # Python 3 
1141         return [ord(c
) for c 
in bs
] 
1144 def intlist_to_bytes(xs
): 
1147     return struct_pack('%dB' % len(xs
), *xs
) 
1150 # Cross-platform file locking 
1151 if sys
.platform 
== 'win32': 
1152     import ctypes
.wintypes
 
1155     class OVERLAPPED(ctypes
.Structure
): 
1157             ('Internal', ctypes
.wintypes
.LPVOID
), 
1158             ('InternalHigh', ctypes
.wintypes
.LPVOID
), 
1159             ('Offset', ctypes
.wintypes
.DWORD
), 
1160             ('OffsetHigh', ctypes
.wintypes
.DWORD
), 
1161             ('hEvent', ctypes
.wintypes
.HANDLE
), 
1164     kernel32 
= ctypes
.windll
.kernel32
 
1165     LockFileEx 
= kernel32
.LockFileEx
 
1166     LockFileEx
.argtypes 
= [ 
1167         ctypes
.wintypes
.HANDLE
,     # hFile 
1168         ctypes
.wintypes
.DWORD
,      # dwFlags 
1169         ctypes
.wintypes
.DWORD
,      # dwReserved 
1170         ctypes
.wintypes
.DWORD
,      # nNumberOfBytesToLockLow 
1171         ctypes
.wintypes
.DWORD
,      # nNumberOfBytesToLockHigh 
1172         ctypes
.POINTER(OVERLAPPED
)  # Overlapped 
1174     LockFileEx
.restype 
= ctypes
.wintypes
.BOOL
 
1175     UnlockFileEx 
= kernel32
.UnlockFileEx
 
1176     UnlockFileEx
.argtypes 
= [ 
1177         ctypes
.wintypes
.HANDLE
,     # hFile 
1178         ctypes
.wintypes
.DWORD
,      # dwReserved 
1179         ctypes
.wintypes
.DWORD
,      # nNumberOfBytesToLockLow 
1180         ctypes
.wintypes
.DWORD
,      # nNumberOfBytesToLockHigh 
1181         ctypes
.POINTER(OVERLAPPED
)  # Overlapped 
1183     UnlockFileEx
.restype 
= ctypes
.wintypes
.BOOL
 
1184     whole_low 
= 0xffffffff 
1185     whole_high 
= 0x7fffffff 
1187     def _lock_file(f
, exclusive
): 
1188         overlapped 
= OVERLAPPED() 
1189         overlapped
.Offset 
= 0 
1190         overlapped
.OffsetHigh 
= 0 
1191         overlapped
.hEvent 
= 0 
1192         f
._lock
_file
_overlapped
_p 
= ctypes
.pointer(overlapped
) 
1193         handle 
= msvcrt
.get_osfhandle(f
.fileno()) 
1194         if not LockFileEx(handle
, 0x2 if exclusive 
else 0x0, 0, 
1195                           whole_low
, whole_high
, f
._lock
_file
_overlapped
_p
): 
1196             raise OSError('Locking file failed: %r' % ctypes
.FormatError()) 
1198     def _unlock_file(f
): 
1199         assert f
._lock
_file
_overlapped
_p
 
1200         handle 
= msvcrt
.get_osfhandle(f
.fileno()) 
1201         if not UnlockFileEx(handle
, 0, 
1202                             whole_low
, whole_high
, f
._lock
_file
_overlapped
_p
): 
1203             raise OSError('Unlocking file failed: %r' % ctypes
.FormatError()) 
1208     def _lock_file(f
, exclusive
): 
1209         fcntl
.flock(f
, fcntl
.LOCK_EX 
if exclusive 
else fcntl
.LOCK_SH
) 
1211     def _unlock_file(f
): 
1212         fcntl
.flock(f
, fcntl
.LOCK_UN
) 
1215 class locked_file(object): 
1216     def __init__(self
, filename
, mode
, encoding
=None): 
1217         assert mode 
in ['r', 'a', 'w'] 
1218         self
.f 
= io
.open(filename
, mode
, encoding
=encoding
) 
1221     def __enter__(self
): 
1222         exclusive 
= self
.mode 
!= 'r' 
1224             _lock_file(self
.f
, exclusive
) 
1230     def __exit__(self
, etype
, value
, traceback
): 
1232             _unlock_file(self
.f
) 
1239     def write(self
, *args
): 
1240         return self
.f
.write(*args
) 
1242     def read(self
, *args
): 
1243         return self
.f
.read(*args
) 
1246 def get_filesystem_encoding(): 
1247     encoding 
= sys
.getfilesystemencoding() 
1248     return encoding 
if encoding 
is not None else 'utf-8' 
1251 def shell_quote(args
): 
1253     encoding 
= get_filesystem_encoding() 
1255         if isinstance(a
, bytes): 
1256             # We may get a filename encoded with 'encodeFilename' 
1257             a 
= a
.decode(encoding
) 
1258         quoted_args
.append(pipes
.quote(a
)) 
1259     return ' '.join(quoted_args
) 
1262 def smuggle_url(url
, data
): 
1263     """ Pass additional data in a URL for internal use. """ 
1265     sdata 
= compat_urllib_parse
.urlencode( 
1266         {'__youtubedl_smuggle': json
.dumps(data
)}) 
1267     return url 
+ '#' + sdata
 
1270 def unsmuggle_url(smug_url
, default
=None): 
1271     if '#__youtubedl_smuggle' not in smug_url
: 
1272         return smug_url
, default
 
1273     url
, _
, sdata 
= smug_url
.rpartition('#') 
1274     jsond 
= compat_parse_qs(sdata
)['__youtubedl_smuggle'][0] 
1275     data 
= json
.loads(jsond
) 
1279 def format_bytes(bytes): 
1282     if type(bytes) is str: 
1283         bytes = float(bytes) 
1287         exponent 
= int(math
.log(bytes, 1024.0)) 
1288     suffix 
= ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent
] 
1289     converted 
= float(bytes) / float(1024 ** exponent
) 
1290     return '%.2f%s' % (converted
, suffix
) 
1293 def parse_filesize(s
): 
1297     # The lower-case forms are of course incorrect and inofficial, 
1298     # but we support those too 
1336     units_re 
= '|'.join(re
.escape(u
) for u 
in _UNIT_TABLE
) 
1338         r
'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)' % units_re
, s
) 
1342     num_str 
= m
.group('num').replace(',', '.') 
1343     mult 
= _UNIT_TABLE
[m
.group('unit')] 
1344     return int(float(num_str
) * mult
) 
1347 def month_by_name(name
): 
1348     """ Return the number of a month by (locale-independently) English name """ 
1351         return ENGLISH_MONTH_NAMES
.index(name
) + 1 
1356 def month_by_abbreviation(abbrev
): 
1357     """ Return the number of a month by (locale-independently) English 
1361         return [s
[:3] for s 
in ENGLISH_MONTH_NAMES
].index(abbrev
) + 1 
1366 def fix_xml_ampersands(xml_str
): 
1367     """Replace all the '&' by '&' in XML""" 
1369         r
'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', 
1374 def setproctitle(title
): 
1375     assert isinstance(title
, compat_str
) 
1377         libc 
= ctypes
.cdll
.LoadLibrary("libc.so.6") 
1380     title_bytes 
= title
.encode('utf-8') 
1381     buf 
= ctypes
.create_string_buffer(len(title_bytes
)) 
1382     buf
.value 
= title_bytes
 
1384         libc
.prctl(15, buf
, 0, 0, 0) 
1385     except AttributeError: 
1386         return  # Strange libc, just skip this 
1389 def remove_start(s
, start
): 
1390     if s
.startswith(start
): 
1391         return s
[len(start
):] 
1395 def remove_end(s
, end
): 
1397         return s
[:-len(end
)] 
1401 def url_basename(url
): 
1402     path 
= compat_urlparse
.urlparse(url
).path
 
1403     return path
.strip('/').split('/')[-1] 
1406 class HEADRequest(compat_urllib_request
.Request
): 
1407     def get_method(self
): 
1411 def int_or_none(v
, scale
=1, default
=None, get_attr
=None, invscale
=1): 
1414             v 
= getattr(v
, get_attr
, None) 
1420         return int(v
) * invscale 
// scale
 
1425 def str_or_none(v
, default
=None): 
1426     return default 
if v 
is None else compat_str(v
) 
1429 def str_to_int(int_str
): 
1430     """ A more relaxed version of int_or_none """ 
1433     int_str 
= re
.sub(r
'[,\.\+]', '', int_str
) 
1437 def float_or_none(v
, scale
=1, invscale
=1, default
=None): 
1441         return float(v
) * invscale 
/ scale
 
1446 def parse_duration(s
): 
1447     if not isinstance(s
, compat_basestring
): 
1455             (?P<only_mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*| 
1456             (?P<only_hours>[0-9.]+)\s*(?:hours?)| 
1458             \s*(?P<hours_reversed>[0-9]+)\s*(?:[:h]|hours?)\s*(?P<mins_reversed>[0-9]+)\s*(?:[:m]|mins?\.?|minutes?)\s*| 
1461                     (?:(?P<days>[0-9]+)\s*(?:[:d]|days?)\s*)? 
1462                     (?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s* 
1464                 (?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s* 
1466             (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)? 
1471     if m
.group('only_mins'): 
1472         return float_or_none(m
.group('only_mins'), invscale
=60) 
1473     if m
.group('only_hours'): 
1474         return float_or_none(m
.group('only_hours'), invscale
=60 * 60) 
1476         res 
+= int(m
.group('secs')) 
1477     if m
.group('mins_reversed'): 
1478         res 
+= int(m
.group('mins_reversed')) * 60 
1480         res 
+= int(m
.group('mins')) * 60 
1481     if m
.group('hours'): 
1482         res 
+= int(m
.group('hours')) * 60 * 60 
1483     if m
.group('hours_reversed'): 
1484         res 
+= int(m
.group('hours_reversed')) * 60 * 60 
1486         res 
+= int(m
.group('days')) * 24 * 60 * 60 
1488         res 
+= float(m
.group('ms')) 
1492 def prepend_extension(filename
, ext
, expected_real_ext
=None): 
1493     name
, real_ext 
= os
.path
.splitext(filename
) 
1495         '{0}.{1}{2}'.format(name
, ext
, real_ext
) 
1496         if not expected_real_ext 
or real_ext
[1:] == expected_real_ext
 
1497         else '{0}.{1}'.format(filename
, ext
)) 
1500 def replace_extension(filename
, ext
, expected_real_ext
=None): 
1501     name
, real_ext 
= os
.path
.splitext(filename
) 
1502     return '{0}.{1}'.format( 
1503         name 
if not expected_real_ext 
or real_ext
[1:] == expected_real_ext 
else filename
, 
1507 def check_executable(exe
, args
=[]): 
1508     """ Checks if the given binary is installed somewhere in PATH, and returns its name. 
1509     args can be a list of arguments for a short output (like -version) """ 
1511         subprocess
.Popen([exe
] + args
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
).communicate() 
1517 def get_exe_version(exe
, args
=['--version'], 
1518                     version_re
=None, unrecognized
='present'): 
1519     """ Returns the version of the specified executable, 
1520     or False if the executable is not present """ 
1522         out
, _ 
= subprocess
.Popen( 
1523             [encodeArgument(exe
)] + args
, 
1524             stdout
=subprocess
.PIPE
, stderr
=subprocess
.STDOUT
).communicate() 
1527     if isinstance(out
, bytes):  # Python 2.x 
1528         out 
= out
.decode('ascii', 'ignore') 
1529     return detect_exe_version(out
, version_re
, unrecognized
) 
1532 def detect_exe_version(output
, version_re
=None, unrecognized
='present'): 
1533     assert isinstance(output
, compat_str
) 
1534     if version_re 
is None: 
1535         version_re 
= r
'version\s+([-0-9._a-zA-Z]+)' 
1536     m 
= re
.search(version_re
, output
) 
1543 class PagedList(object): 
1545         # This is only useful for tests 
1546         return len(self
.getslice()) 
1549 class OnDemandPagedList(PagedList
): 
1550     def __init__(self
, pagefunc
, pagesize
): 
1551         self
._pagefunc 
= pagefunc
 
1552         self
._pagesize 
= pagesize
 
1554     def getslice(self
, start
=0, end
=None): 
1556         for pagenum 
in itertools
.count(start 
// self
._pagesize
): 
1557             firstid 
= pagenum 
* self
._pagesize
 
1558             nextfirstid 
= pagenum 
* self
._pagesize 
+ self
._pagesize
 
1559             if start 
>= nextfirstid
: 
1562             page_results 
= list(self
._pagefunc
(pagenum
)) 
1565                 start 
% self
._pagesize
 
1566                 if firstid 
<= start 
< nextfirstid
 
1570                 ((end 
- 1) % self
._pagesize
) + 1 
1571                 if (end 
is not None and firstid 
<= end 
<= nextfirstid
) 
1574             if startv 
!= 0 or endv 
is not None: 
1575                 page_results 
= page_results
[startv
:endv
] 
1576             res
.extend(page_results
) 
1578             # A little optimization - if current page is not "full", ie. does 
1579             # not contain page_size videos then we can assume that this page 
1580             # is the last one - there are no more ids on further pages - 
1581             # i.e. no need to query again. 
1582             if len(page_results
) + startv 
< self
._pagesize
: 
1585             # If we got the whole page, but the next page is not interesting, 
1586             # break out early as well 
1587             if end 
== nextfirstid
: 
1592 class InAdvancePagedList(PagedList
): 
1593     def __init__(self
, pagefunc
, pagecount
, pagesize
): 
1594         self
._pagefunc 
= pagefunc
 
1595         self
._pagecount 
= pagecount
 
1596         self
._pagesize 
= pagesize
 
1598     def getslice(self
, start
=0, end
=None): 
1600         start_page 
= start 
// self
._pagesize
 
1602             self
._pagecount 
if end 
is None else (end 
// self
._pagesize 
+ 1)) 
1603         skip_elems 
= start 
- start_page 
* self
._pagesize
 
1604         only_more 
= None if end 
is None else end 
- start
 
1605         for pagenum 
in range(start_page
, end_page
): 
1606             page 
= list(self
._pagefunc
(pagenum
)) 
1608                 page 
= page
[skip_elems
:] 
1610             if only_more 
is not None: 
1611                 if len(page
) < only_more
: 
1612                     only_more 
-= len(page
) 
1614                     page 
= page
[:only_more
] 
1621 def uppercase_escape(s
): 
1622     unicode_escape 
= codecs
.getdecoder('unicode_escape') 
1624         r
'\\U[0-9a-fA-F]{8}', 
1625         lambda m
: unicode_escape(m
.group(0))[0], 
1629 def lowercase_escape(s
): 
1630     unicode_escape 
= codecs
.getdecoder('unicode_escape') 
1632         r
'\\u[0-9a-fA-F]{4}', 
1633         lambda m
: unicode_escape(m
.group(0))[0], 
1637 def escape_rfc3986(s
): 
1638     """Escape non-ASCII characters as suggested by RFC 3986""" 
1639     if sys
.version_info 
< (3, 0) and isinstance(s
, compat_str
): 
1640         s 
= s
.encode('utf-8') 
1641     return compat_urllib_parse
.quote(s
, b
"%/;:@&=+$,!~*'()?#[]") 
1644 def escape_url(url
): 
1645     """Escape URL as suggested by RFC 3986""" 
1646     url_parsed 
= compat_urllib_parse_urlparse(url
) 
1647     return url_parsed
._replace
( 
1648         path
=escape_rfc3986(url_parsed
.path
), 
1649         params
=escape_rfc3986(url_parsed
.params
), 
1650         query
=escape_rfc3986(url_parsed
.query
), 
1651         fragment
=escape_rfc3986(url_parsed
.fragment
) 
1655     struct
.pack('!I', 0) 
1657     # In Python 2.6 (and some 2.7 versions), struct requires a bytes argument 
1658     def struct_pack(spec
, *args
): 
1659         if isinstance(spec
, compat_str
): 
1660             spec 
= spec
.encode('ascii') 
1661         return struct
.pack(spec
, *args
) 
1663     def struct_unpack(spec
, *args
): 
1664         if isinstance(spec
, compat_str
): 
1665             spec 
= spec
.encode('ascii') 
1666         return struct
.unpack(spec
, *args
) 
1668     struct_pack 
= struct
.pack
 
1669     struct_unpack 
= struct
.unpack
 
1672 def read_batch_urls(batch_fd
): 
1674         if not isinstance(url
, compat_str
): 
1675             url 
= url
.decode('utf-8', 'replace') 
1676         BOM_UTF8 
= '\xef\xbb\xbf' 
1677         if url
.startswith(BOM_UTF8
): 
1678             url 
= url
[len(BOM_UTF8
):] 
1680         if url
.startswith(('#', ';', ']')): 
1684     with contextlib
.closing(batch_fd
) as fd
: 
1685         return [url 
for url 
in map(fixup
, fd
) if url
] 
1688 def urlencode_postdata(*args
, **kargs
): 
1689     return compat_urllib_parse
.urlencode(*args
, **kargs
).encode('ascii') 
1692 def encode_dict(d
, encoding
='utf-8'): 
1694         return v
.encode(encoding
) if isinstance(v
, compat_basestring
) else v
 
1695     return dict((encode(k
), encode(v
)) for k
, v 
in d
.items()) 
1707 def parse_age_limit(s
): 
1710     m 
= re
.match(r
'^(?P<age>\d{1,2})\+?$', s
) 
1711     return int(m
.group('age')) if m 
else US_RATINGS
.get(s
, None) 
1714 def strip_jsonp(code
): 
1716         r
'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r
'\1', code
) 
1719 def js_to_json(code
): 
1722         if v 
in ('true', 'false', 'null'): 
1724         if v
.startswith('"'): 
1725             v 
= re
.sub(r
"\\'", "'", v
[1:-1]) 
1726         elif v
.startswith("'"): 
1728             v 
= re
.sub(r
"\\\\|\\'|\"", lambda m: { 
1735     res = re.sub(r'''(?x) 
1736         "(?
:[^
"\\]*(?:\\\\|\\['"nu
]))*[^
"\\]*"|
 
1737         '(?:[^'\\]*(?
:\\\\|
\\['"nu]))*[^'\\]*'| 
1738         [a-zA-Z_][.a-zA-Z_0-9]* 
1740     res = re.sub(r',(\s
*[\
]}])', lambda m: m.group(1), res) 
1744 def qualities(quality_ids): 
1745     """ Get a numeric quality value out of a list of possible values """ 
1748             return quality_ids.index(qid) 
1754 DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' 
1757 def limit_length(s, length): 
1758     """ Add ellipses to overly long strings """ 
1763         return s[:length - len(ELLIPSES)] + ELLIPSES 
1767 def version_tuple(v): 
1768     return tuple(int(e) for e in re.split(r'[-.]', v)) 
1771 def is_outdated_version(version, limit, assume_new=True): 
1773         return not assume_new 
1775         return version_tuple(version) < version_tuple(limit) 
1777         return not assume_new 
1780 def ytdl_is_updateable(): 
1781     """ Returns if youtube-dl can be updated with -U """ 
1782     from zipimport import zipimporter 
1784     return isinstance(globals().get('__loader__
'), zipimporter) or hasattr(sys, 'frozen
') 
1787 def args_to_str(args): 
1788     # Get a short string representation for a subprocess command 
1789     return ' '.join(shlex_quote(a) for a in args) 
1792 def mimetype2ext(mt): 
1793     _, _, res = mt.rpartition('/') 
1797         'x
-mp4
-fragmented
': 'mp4
', 
1802 def urlhandle_detect_ext(url_handle): 
1805         getheader = lambda h: url_handle.headers[h] 
1806     except AttributeError:  # Python < 3 
1807         getheader = url_handle.info().getheader 
1809     cd = getheader('Content
-Disposition
') 
1811         m = re.match(r'attachment
;\s
*filename
="(?P<filename>[^"]+)"', cd) 
1813             e = determine_ext(m.group('filename'), default_ext=None) 
1817     return mimetype2ext(getheader('Content-Type')) 
1820 def encode_data_uri(data, mime_type): 
1821     return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) 
1824 def age_restricted(content_limit, age_limit): 
1825     """ Returns True iff the content should be blocked """ 
1827     if age_limit is None:  # No limit set 
1829     if content_limit is None: 
1830         return False  # Content available for everyone 
1831     return age_limit < content_limit 
1834 def is_html(first_bytes): 
1835     """ Detect whether a file contains HTML by examining its first bytes. """ 
1838         (b'\xef\xbb\xbf', 'utf-8'), 
1839         (b'\x00\x00\xfe\xff', 'utf-32-be'), 
1840         (b'\xff\xfe\x00\x00', 'utf-32-le'), 
1841         (b'\xff\xfe', 'utf-16-le'), 
1842         (b'\xfe\xff', 'utf-16-be'), 
1844     for bom, enc in BOMS: 
1845         if first_bytes.startswith(bom): 
1846             s = first_bytes[len(bom):].decode(enc, 'replace') 
1849         s = first_bytes.decode('utf-8', 'replace') 
1851     return re.match(r'^\s*<', s) 
1854 def determine_protocol(info_dict): 
1855     protocol = info_dict.get('protocol') 
1856     if protocol is not None: 
1859     url = info_dict['url'] 
1860     if url.startswith('rtmp'): 
1862     elif url.startswith('mms'): 
1864     elif url.startswith('rtsp'): 
1867     ext = determine_ext(url) 
1873     return compat_urllib_parse_urlparse(url).scheme 
1876 def render_table(header_row, data): 
1877     """ Render a list of rows, each as a list of values """ 
1878     table = [header_row] + data 
1879     max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] 
1880     format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' 
1881     return '\n'.join(format_str % tuple(row) for row in table) 
1884 def _match_one(filter_part, dct): 
1885     COMPARISON_OPERATORS = { 
1893     operator_rex = re.compile(r'''(?x)\s* 
1895         \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* 
1897             (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| 
1898             (?P<strval>(?![0-9.])[a-z0-9A-Z]*) 
1901         ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) 
1902     m = operator_rex.search(filter_part) 
1904         op = COMPARISON_OPERATORS[m.group('op')] 
1905         if m.group('strval') is not None: 
1906             if m.group('op') not in ('=', '!='): 
1908                     'Operator %s does not support string values!' % m.group('op')) 
1909             comparison_value = m.group('strval') 
1912                 comparison_value = int(m.group('intval')) 
1914                 comparison_value = parse_filesize(m.group('intval')) 
1915                 if comparison_value is None: 
1916                     comparison_value = parse_filesize(m.group('intval') + 'B') 
1917                 if comparison_value is None: 
1919                         'Invalid integer value %r in filter part %r' % ( 
1920                             m.group('intval'), filter_part)) 
1921         actual_value = dct.get(m.group('key')) 
1922         if actual_value is None: 
1923             return m.group('none_inclusive') 
1924         return op(actual_value, comparison_value) 
1927         '': lambda v: v is not None, 
1928         '!': lambda v: v is None, 
1930     operator_rex = re.compile(r'''(?x)\s* 
1931         (?P<op>%s)\s*(?P<key>[a-z_]+) 
1933         ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) 
1934     m = operator_rex.search(filter_part) 
1936         op = UNARY_OPERATORS[m.group('op')] 
1937         actual_value = dct.get(m.group('key')) 
1938         return op(actual_value) 
1940     raise ValueError('Invalid filter part %r' % filter_part) 
1943 def match_str(filter_str, dct): 
1944     """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ 
1947         _match_one(filter_part, dct) for filter_part in filter_str.split('&')) 
1950 def match_filter_func(filter_str): 
1951     def _match_func(info_dict): 
1952         if match_str(filter_str, info_dict): 
1955             video_title = info_dict.get('title', info_dict.get('id', 'video')) 
1956             return '%s does not pass filter %s, skipping ..' % (video_title, filter_str) 
1960 def parse_dfxp_time_expr(time_expr): 
1964     mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) 
1966         return float(mobj.group('time_offset')) 
1968     mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:\.\d+)?)$', time_expr) 
1970         return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3)) 
1973 def srt_subtitles_timecode(seconds): 
1974     return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000) 
1977 def dfxp2srt(dfxp_data): 
1978     _x = functools.partial(xpath_with_ns, ns_map={ 
1979         'ttml': 'http://www.w3.org/ns/ttml', 
1980         'ttaf1': 'http://www.w3.org/2006/10/ttaf1', 
1983     def parse_node(node): 
1984         str_or_empty = functools.partial(str_or_none, default='') 
1986         out = str_or_empty(node.text) 
1989             if child.tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'): 
1990                 out += '\n' + str_or_empty(child.tail) 
1991             elif child.tag in (_x('ttml:span'), _x('ttaf1:span'), 'span'): 
1992                 out += str_or_empty(parse_node(child)) 
1994                 out += str_or_empty(xml.etree.ElementTree.tostring(child)) 
1998     dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8')) 
2000     paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall('.//p') 
2003         raise ValueError('Invalid dfxp/TTML subtitle') 
2005     for para, index in zip(paras, itertools.count(1)): 
2006         begin_time = parse_dfxp_time_expr(para.attrib['begin']) 
2007         end_time = parse_dfxp_time_expr(para.attrib.get('end')) 
2009             end_time = begin_time + parse_dfxp_time_expr(para.attrib['dur']) 
2010         out.append('%d\n%s --> %s\n%s\n\n' % ( 
2012             srt_subtitles_timecode(begin_time), 
2013             srt_subtitles_timecode(end_time), 
2019 def cli_option(params, command_option, param): 
2020     param = params.get(param) 
2021     return [command_option, param] if param is not None else [] 
2024 def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): 
2025     param = params.get(param) 
2026     assert isinstance(param, bool) 
2028         return [command_option + separator + (true_value if param else false_value)] 
2029     return [command_option, true_value if param else false_value] 
2032 def cli_valueless_option(params, command_option, param, expected_value=True): 
2033     param = params.get(param) 
2034     return [command_option] if param == expected_value else [] 
2037 def cli_configuration_args(params, param, default=[]): 
2038     ex_args = params.get(param) 
2041     assert isinstance(ex_args, list) 
2045 class ISO639Utils(object): 
2046     # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt 
2235     def short2long(cls, code): 
2236         """Convert language code from ISO 639-1 to ISO 639-2/T""" 
2237         return cls._lang_map.get(code[:2]) 
2240     def long2short(cls, code): 
2241         """Convert language code from ISO 639-2/T to ISO 639-1""" 
2242         for short_name, long_name in cls._lang_map.items(): 
2243             if long_name == code: 
2247 class ISO3166Utils(object): 
2248     # From http://data.okfn.org/data/core/country-list 
2250         'AF': 'Afghanistan', 
2251         'AX': 'Åland Islands', 
2254         'AS': 'American Samoa', 
2259         'AG': 'Antigua and Barbuda', 
2276         'BO': 'Bolivia, Plurinational State of', 
2277         'BQ': 'Bonaire, Sint Eustatius and Saba', 
2278         'BA': 'Bosnia and Herzegovina', 
2280         'BV': 'Bouvet Island', 
2282         'IO': 'British Indian Ocean Territory', 
2283         'BN': 'Brunei Darussalam', 
2285         'BF': 'Burkina Faso', 
2291         'KY': 'Cayman Islands', 
2292         'CF': 'Central African Republic', 
2296         'CX': 'Christmas Island', 
2297         'CC': 'Cocos (Keeling) Islands', 
2301         'CD': 'Congo, the Democratic Republic of the', 
2302         'CK': 'Cook Islands', 
2304         'CI': 'Côte d\'Ivoire', 
2309         'CZ': 'Czech Republic', 
2313         'DO': 'Dominican Republic', 
2316         'SV': 'El Salvador', 
2317         'GQ': 'Equatorial Guinea', 
2321         'FK': 'Falkland Islands (Malvinas)', 
2322         'FO': 'Faroe Islands', 
2326         'GF': 'French Guiana', 
2327         'PF': 'French Polynesia', 
2328         'TF': 'French Southern Territories', 
2343         'GW': 'Guinea-Bissau', 
2346         'HM': 'Heard Island and McDonald Islands', 
2347         'VA': 'Holy See (Vatican City State)', 
2354         'IR': 'Iran, Islamic Republic of', 
2357         'IM': 'Isle of Man', 
2367         'KP': 'Korea, Democratic People\'s Republic of', 
2368         'KR': 'Korea, Republic of', 
2371         'LA': 'Lao People\'s Democratic Republic', 
2377         'LI': 'Liechtenstein', 
2381         'MK': 'Macedonia, the Former Yugoslav Republic of', 
2388         'MH': 'Marshall Islands', 
2394         'FM': 'Micronesia, Federated States of', 
2395         'MD': 'Moldova, Republic of', 
2406         'NL': 'Netherlands', 
2407         'NC': 'New Caledonia', 
2408         'NZ': 'New Zealand', 
2413         'NF': 'Norfolk Island', 
2414         'MP': 'Northern Mariana Islands', 
2419         'PS': 'Palestine, State of', 
2421         'PG': 'Papua New Guinea', 
2424         'PH': 'Philippines', 
2428         'PR': 'Puerto Rico', 
2432         'RU': 'Russian Federation', 
2434         'BL': 'Saint Barthélemy', 
2435         'SH': 'Saint Helena, Ascension and Tristan da Cunha', 
2436         'KN': 'Saint Kitts and Nevis', 
2437         'LC': 'Saint Lucia', 
2438         'MF': 'Saint Martin (French part)', 
2439         'PM': 'Saint Pierre and Miquelon', 
2440         'VC': 'Saint Vincent and the Grenadines', 
2443         'ST': 'Sao Tome and Principe', 
2444         'SA': 'Saudi Arabia', 
2448         'SL': 'Sierra Leone', 
2450         'SX': 'Sint Maarten (Dutch part)', 
2453         'SB': 'Solomon Islands', 
2455         'ZA': 'South Africa', 
2456         'GS': 'South Georgia and the South Sandwich Islands', 
2457         'SS': 'South Sudan', 
2462         'SJ': 'Svalbard and Jan Mayen', 
2465         'CH': 'Switzerland', 
2466         'SY': 'Syrian Arab Republic', 
2467         'TW': 'Taiwan, Province of China', 
2469         'TZ': 'Tanzania, United Republic of', 
2471         'TL': 'Timor-Leste', 
2475         'TT': 'Trinidad and Tobago', 
2478         'TM': 'Turkmenistan', 
2479         'TC': 'Turks and Caicos Islands', 
2483         'AE': 'United Arab Emirates', 
2484         'GB': 'United Kingdom', 
2485         'US': 'United States', 
2486         'UM': 'United States Minor Outlying Islands', 
2490         'VE': 'Venezuela, Bolivarian Republic of', 
2492         'VG': 'Virgin Islands, British', 
2493         'VI': 'Virgin Islands, U.S.', 
2494         'WF': 'Wallis and Futuna', 
2495         'EH': 'Western Sahara', 
2502     def short2full(cls, code): 
2503         """Convert an ISO 3166-2 country code to the corresponding full name""" 
2504         return cls._country_map.get(code.upper()) 
2507 class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): 
2508     def __init__(self, proxies=None): 
2509         # Set default handlers 
2510         for type in ('http', 'https'): 
2511             setattr(self, '%s_open' % type, 
2512                     lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: 
2513                         meth(r, proxy, type)) 
2514         return compat_urllib_request.ProxyHandler.__init__(self, proxies) 
2516     def proxy_open(self, req, proxy, type): 
2517         req_proxy = req.headers.get('Ytdl-request-proxy') 
2518         if req_proxy is not None: 
2520             del req.headers['Ytdl-request-proxy'] 
2522         if proxy == '__noproxy__': 
2523             return None  # No Proxy 
2524         return compat_urllib_request.ProxyHandler.proxy_open( 
2525             self, req, proxy, type)