4 from __future__
import unicode_literals
34 import xml
.etree
.ElementTree
38 compat_HTMLParseError
,
42 compat_ctypes_WINFUNCTYPE
,
43 compat_etree_fromstring
,
46 compat_html_entities_html5
,
57 compat_urllib_parse_urlencode
,
58 compat_urllib_parse_urlparse
,
59 compat_urllib_parse_unquote_plus
,
60 compat_urllib_request
,
71 def register_socks_protocols():
72 # "Register" SOCKS protocols
73 # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
74 # URLs with protocols not in urlparse.uses_netloc are not handled correctly
75 for scheme
in ('socks', 'socks4', 'socks4a', 'socks5'):
76 if scheme
not in compat_urlparse
.uses_netloc
:
77 compat_urlparse
.uses_netloc
.append(scheme
)
80 # This is not clearly defined otherwise
81 compiled_regex_type
= type(re
.compile(''))
84 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:59.0) Gecko/20100101 Firefox/59.0',
85 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
86 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
87 'Accept-Encoding': 'gzip, deflate',
88 'Accept-Language': 'en-us,en;q=0.5',
93 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
99 ENGLISH_MONTH_NAMES
= [
100 'January', 'February', 'March', 'April', 'May', 'June',
101 'July', 'August', 'September', 'October', 'November', 'December']
104 'en': ENGLISH_MONTH_NAMES
,
106 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
107 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
111 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
112 'flv', 'f4v', 'f4a', 'f4b',
113 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
114 'mkv', 'mka', 'mk3d',
123 'f4f', 'f4m', 'm3u8', 'smil')
125 # needed for sanitizing filenames in restricted mode
126 ACCENT_CHARS
= dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
127 itertools
.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'],
128 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy')))
151 '%Y-%m-%d %H:%M:%S.%f',
154 '%Y-%m-%dT%H:%M:%SZ',
155 '%Y-%m-%dT%H:%M:%S.%fZ',
156 '%Y-%m-%dT%H:%M:%S.%f0Z',
158 '%Y-%m-%dT%H:%M:%S.%f',
161 '%b %d %Y at %H:%M:%S',
163 '%B %d %Y at %H:%M:%S',
166 DATE_FORMATS_DAY_FIRST
= list(DATE_FORMATS
)
167 DATE_FORMATS_DAY_FIRST
.extend([
176 DATE_FORMATS_MONTH_FIRST
= list(DATE_FORMATS
)
177 DATE_FORMATS_MONTH_FIRST
.extend([
185 PACKED_CODES_RE
= r
"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
186 JSON_LD_RE
= r
'(?is)<script[^>]+type=(["\'])application
/ld\
+json\
1[^
>]*>(?P
<json_ld
>.+?
)</script
>'
189 def preferredencoding():
190 """Get preferred encoding.
192 Returns the best encoding scheme for the system, based on
193 locale.getpreferredencoding() and some further tweaks.
196 pref = locale.getpreferredencoding()
204 def write_json_file(obj, fn):
205 """ Encode obj as JSON and write it to fn, atomically if possible """
207 fn = encodeFilename(fn)
208 if sys.version_info < (3, 0) and sys.platform != 'win32
':
209 encoding = get_filesystem_encoding()
210 # os.path.basename returns a bytes object, but NamedTemporaryFile
211 # will fail if the filename contains non ascii characters unless we
212 # use a unicode object
213 path_basename = lambda f: os.path.basename(fn).decode(encoding)
214 # the same for os.path.dirname
215 path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
217 path_basename = os.path.basename
218 path_dirname = os.path.dirname
222 'prefix
': path_basename(fn) + '.',
223 'dir': path_dirname(fn),
227 # In Python 2.x, json.dump expects a bytestream.
228 # In Python 3.x, it writes to a character stream
229 if sys.version_info < (3, 0):
237 tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
242 if sys.platform == 'win32
':
243 # Need to remove existing file on Windows, else os.rename raises
244 # WindowsError or FileExistsError.
249 os.rename(tf.name, fn)
258 if sys.version_info >= (2, 7):
259 def find_xpath_attr(node, xpath, key, val=None):
260 """ Find the xpath xpath[@key=val] """
261 assert re.match(r'^
[a
-zA
-Z_
-]+$
', key)
262 expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
263 return node.find(expr)
265 def find_xpath_attr(node, xpath, key, val=None):
266 for f in node.findall(compat_xpath(xpath)):
267 if key not in f.attrib:
269 if val is None or f.attrib.get(key) == val:
273 # On python2.6 the xml.etree.ElementTree.Element methods don't support
274 # the namespace parameter
277 def xpath_with_ns(path
, ns_map
):
278 components
= [c
.split(':') for c
in path
.split('/')]
282 replaced
.append(c
[0])
285 replaced
.append('{%s}%s' % (ns_map
[ns
], tag
))
286 return '/'.join(replaced
)
289 def xpath_element(node
, xpath
, name
=None, fatal
=False, default
=NO_DEFAULT
):
290 def _find_xpath(xpath
):
291 return node
.find(compat_xpath(xpath
))
293 if isinstance(xpath
, (str, compat_str
)):
294 n
= _find_xpath(xpath
)
302 if default
is not NO_DEFAULT
:
305 name
= xpath
if name
is None else name
306 raise ExtractorError('Could not find XML element %s' % name
)
312 def xpath_text(node
, xpath
, name
=None, fatal
=False, default
=NO_DEFAULT
):
313 n
= xpath_element(node
, xpath
, name
, fatal
=fatal
, default
=default
)
314 if n
is None or n
== default
:
317 if default
is not NO_DEFAULT
:
320 name
= xpath
if name
is None else name
321 raise ExtractorError('Could not find XML element\'s text %s' % name
)
327 def xpath_attr(node
, xpath
, key
, name
=None, fatal
=False, default
=NO_DEFAULT
):
328 n
= find_xpath_attr(node
, xpath
, key
)
330 if default
is not NO_DEFAULT
:
333 name
= '%s[@%s]' % (xpath
, key
) if name
is None else name
334 raise ExtractorError('Could not find XML attribute %s' % name
)
340 def get_element_by_id(id, html
):
341 """Return the content of the tag with the specified ID in the passed HTML document"""
342 return get_element_by_attribute('id', id, html
)
345 def get_element_by_class(class_name
, html
):
346 """Return the content of the first tag with the specified class in the passed HTML document"""
347 retval
= get_elements_by_class(class_name
, html
)
348 return retval
[0] if retval
else None
351 def get_element_by_attribute(attribute
, value
, html
, escape_value
=True):
352 retval
= get_elements_by_attribute(attribute
, value
, html
, escape_value
)
353 return retval
[0] if retval
else None
356 def get_elements_by_class(class_name
, html
):
357 """Return the content of all tags with the specified class in the passed HTML document as a list"""
358 return get_elements_by_attribute(
359 'class', r
'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
360 html, escape_value=False)
363 def get_elements_by_attribute(attribute, value, html, escape_value=True):
364 """Return the content of the tag with the specified attribute in the passed HTML document"""
366 value = re.escape(value) if escape_value else value
369 for m in re.finditer(r'''(?xs)
371 (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^
']*'|
))*?
373 (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^
"]*"|
='[^']*'|))*?
377 ''' % (re.escape(attribute), value), html):
378 res = m.group('content
')
380 if res.startswith('"') or res.startswith("'"):
383 retlist.append(unescapeHTML(res))
388 class HTMLAttributeParser(compat_HTMLParser):
389 """Trivial HTML parser to gather the attributes for a single element"""
392 compat_HTMLParser.__init__(self)
394 def handle_starttag(self, tag, attrs):
395 self.attrs = dict(attrs)
398 def extract_attributes(html_element):
399 """Given a string for an HTML element such as
401 a="foo" B="bar" c="&98;az" d=boz
402 empty= noval entity="&"
405 Decode and return a dictionary of attributes.
407 'a
': 'foo
', 'b
': 'bar
', c: 'baz
', d: 'boz
',
408 'empty
': '', 'noval
': None, 'entity
': '&',
409 'sq
': '"', 'dq': '\''
411 NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions,
412 but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5.
414 parser = HTMLAttributeParser()
416 parser.feed(html_element)
418 # Older Python may throw HTMLParseError in case of malformed HTML
419 except compat_HTMLParseError:
424 def clean_html(html):
425 """Clean an HTML snippet into a readable string"""
427 if html is None: # Convenience for sanitizing descriptions etc.
431 html = html.replace('\n', ' ')
432 html = re.sub(r'(?u)\s*<\s*br\s*/?\s*>\s*', '\n', html)
433 html = re.sub(r'(?u)<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
435 html = re.sub('<.*?>', '', html)
436 # Replace html entities
437 html = unescapeHTML(html)
441 def sanitize_open(filename, open_mode):
442 """Try to open the given filename, and slightly tweak it if this fails.
444 Attempts to open the given filename. If this fails, it tries to change
445 the filename slightly, step by step, until it's either able to open it
446 or it fails and raises a final exception, like the standard open()
449 It returns the tuple (stream, definitive_file_name).
453 if sys.platform == 'win32':
455 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
456 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
457 stream = open(encodeFilename(filename), open_mode)
458 return (stream, filename)
459 except (IOError, OSError) as err:
460 if err.errno in (errno.EACCES,):
463 # In case of error, try to remove win32 forbidden chars
464 alt_filename = sanitize_path(filename)
465 if alt_filename == filename:
468 # An exception here should be caught in the caller
469 stream = open(encodeFilename(alt_filename), open_mode)
470 return (stream, alt_filename)
473 def timeconvert(timestr):
474 """Convert RFC 2822 defined time string into system timestamp"""
476 timetuple = email.utils.parsedate_tz(timestr)
477 if timetuple is not None:
478 timestamp = email.utils.mktime_tz(timetuple)
482 def sanitize_filename(s, restricted=False, is_id=False):
483 """Sanitizes a string so it could be used as part of a filename.
484 If restricted is set, use a stricter subset of allowed characters.
485 Set is_id if this is not an arbitrary string, but an ID that should be kept
488 def replace_insane(char):
489 if restricted and char in ACCENT_CHARS:
490 return ACCENT_CHARS[char]
491 if char == '?' or ord(char) < 32 or ord(char) == 127:
494 return '' if restricted else '\''
496 return '_
-' if restricted else ' -'
497 elif char in '\\/|
*<>':
499 if restricted and (char in '!&\'()[]{}$
;`^
,#' or char.isspace()):
501 if restricted
and ord(char
) > 127:
506 s
= re
.sub(r
'[0-9]+(?::[0-9]+)+', lambda m
: m
.group(0).replace(':', '_'), s
)
507 result
= ''.join(map(replace_insane
, s
))
509 while '__' in result
:
510 result
= result
.replace('__', '_')
511 result
= result
.strip('_')
512 # Common case of "Foreign band name - English song title"
513 if restricted
and result
.startswith('-_'):
515 if result
.startswith('-'):
516 result
= '_' + result
[len('-'):]
517 result
= result
.lstrip('.')
523 def sanitize_path(s
):
524 """Sanitizes and normalizes path on Windows"""
525 if sys
.platform
!= 'win32':
527 drive_or_unc
, _
= os
.path
.splitdrive(s
)
528 if sys
.version_info
< (2, 7) and not drive_or_unc
:
529 drive_or_unc
, _
= os
.path
.splitunc(s
)
530 norm_path
= os
.path
.normpath(remove_start(s
, drive_or_unc
)).split(os
.path
.sep
)
534 path_part
if path_part
in ['.', '..'] else re
.sub(r
'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part
)
535 for path_part
in norm_path
]
537 sanitized_path
.insert(0, drive_or_unc
+ os
.path
.sep
)
538 return os
.path
.join(*sanitized_path
)
541 def sanitize_url(url
):
542 # Prepend protocol-less URLs with `http:` scheme in order to mitigate
543 # the number of unwanted failures due to missing protocol
544 if url
.startswith('//'):
545 return 'http:%s' % url
546 # Fix some common typos seen so far
548 # https://github.com/rg3/youtube-dl/issues/15649
549 (r
'^httpss://', r
'https://'),
550 # https://bx1.be/lives/direct-tv/
551 (r
'^rmtp([es]?)://', r
'rtmp\1://'),
553 for mistake
, fixup
in COMMON_TYPOS
:
554 if re
.match(mistake
, url
):
555 return re
.sub(mistake
, fixup
, url
)
559 def sanitized_Request(url
, *args
, **kwargs
):
560 return compat_urllib_request
.Request(sanitize_url(url
), *args
, **kwargs
)
564 """Expand shell variables and ~"""
565 return os
.path
.expandvars(compat_expanduser(s
))
568 def orderedSet(iterable
):
569 """ Remove all duplicates from the input iterable """
577 def _htmlentity_transform(entity_with_semicolon
):
578 """Transforms an HTML entity to a character."""
579 entity
= entity_with_semicolon
[:-1]
581 # Known non-numeric HTML entity
582 if entity
in compat_html_entities
.name2codepoint
:
583 return compat_chr(compat_html_entities
.name2codepoint
[entity
])
585 # TODO: HTML5 allows entities without a semicolon. For example,
586 # 'Éric' should be decoded as 'Éric'.
587 if entity_with_semicolon
in compat_html_entities_html5
:
588 return compat_html_entities_html5
[entity_with_semicolon
]
590 mobj
= re
.match(r
'#(x[0-9a-fA-F]+|[0-9]+)', entity
)
592 numstr
= mobj
.group(1)
593 if numstr
.startswith('x'):
595 numstr
= '0%s' % numstr
598 # See https://github.com/rg3/youtube-dl/issues/7518
600 return compat_chr(int(numstr
, base
))
604 # Unknown entity in name, return its literal representation
605 return '&%s;' % entity
611 assert type(s
) == compat_str
614 r
'&([^&;]+;)', lambda m
: _htmlentity_transform(m
.group(1)), s
)
617 def get_subprocess_encoding():
618 if sys
.platform
== 'win32' and sys
.getwindowsversion()[0] >= 5:
619 # For subprocess calls, encode with locale encoding
620 # Refer to http://stackoverflow.com/a/9951851/35070
621 encoding
= preferredencoding()
623 encoding
= sys
.getfilesystemencoding()
629 def encodeFilename(s
, for_subprocess
=False):
631 @param s The name of the file
634 assert type(s
) == compat_str
636 # Python 3 has a Unicode API
637 if sys
.version_info
>= (3, 0):
640 # Pass '' directly to use Unicode APIs on Windows 2000 and up
641 # (Detecting Windows NT 4 is tricky because 'major >= 4' would
642 # match Windows 9x series as well. Besides, NT 4 is obsolete.)
643 if not for_subprocess
and sys
.platform
== 'win32' and sys
.getwindowsversion()[0] >= 5:
646 # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
647 if sys
.platform
.startswith('java'):
650 return s
.encode(get_subprocess_encoding(), 'ignore')
653 def decodeFilename(b
, for_subprocess
=False):
655 if sys
.version_info
>= (3, 0):
658 if not isinstance(b
, bytes):
661 return b
.decode(get_subprocess_encoding(), 'ignore')
664 def encodeArgument(s
):
665 if not isinstance(s
, compat_str
):
666 # Legacy code that uses byte strings
667 # Uncomment the following line after fixing all post processors
668 # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
669 s
= s
.decode('ascii')
670 return encodeFilename(s
, True)
673 def decodeArgument(b
):
674 return decodeFilename(b
, True)
677 def decodeOption(optval
):
680 if isinstance(optval
, bytes):
681 optval
= optval
.decode(preferredencoding())
683 assert isinstance(optval
, compat_str
)
687 def formatSeconds(secs
):
689 return '%d:%02d:%02d' % (secs
// 3600, (secs
% 3600) // 60, secs
% 60)
691 return '%d:%02d' % (secs
// 60, secs
% 60)
696 def make_HTTPS_handler(params
, **kwargs
):
697 opts_no_check_certificate
= params
.get('nocheckcertificate', False)
698 if hasattr(ssl
, 'create_default_context'): # Python >= 3.4 or 2.7.9
699 context
= ssl
.create_default_context(ssl
.Purpose
.SERVER_AUTH
)
700 if opts_no_check_certificate
:
701 context
.check_hostname
= False
702 context
.verify_mode
= ssl
.CERT_NONE
704 return YoutubeDLHTTPSHandler(params
, context
=context
, **kwargs
)
707 # (create_default_context present but HTTPSHandler has no context=)
710 if sys
.version_info
< (3, 2):
711 return YoutubeDLHTTPSHandler(params
, **kwargs
)
713 context
= ssl
.SSLContext(ssl
.PROTOCOL_TLSv1
)
714 context
.verify_mode
= (ssl
.CERT_NONE
715 if opts_no_check_certificate
716 else ssl
.CERT_REQUIRED
)
717 context
.set_default_verify_paths()
718 return YoutubeDLHTTPSHandler(params
, context
=context
, **kwargs
)
721 def bug_reports_message():
722 if ytdl_is_updateable():
723 update_cmd
= 'type youtube-dl -U to update'
725 update_cmd
= 'see https://yt-dl.org/update on how to update'
726 msg
= '; please report this issue on https://yt-dl.org/bug .'
727 msg
+= ' Make sure you are using the latest version; %s.' % update_cmd
728 msg
+= ' Be sure to call youtube-dl with the --verbose flag and include its complete output.'
732 class YoutubeDLError(Exception):
733 """Base exception for YoutubeDL errors."""
737 class ExtractorError(YoutubeDLError
):
738 """Error during info extraction."""
740 def __init__(self
, msg
, tb
=None, expected
=False, cause
=None, video_id
=None):
741 """ tb, if given, is the original traceback (so that it can be printed out).
742 If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
745 if sys
.exc_info()[0] in (compat_urllib_error
.URLError
, socket
.timeout
, UnavailableVideoError
):
747 if video_id
is not None:
748 msg
= video_id
+ ': ' + msg
750 msg
+= ' (caused by %r)' % cause
752 msg
+= bug_reports_message()
753 super(ExtractorError
, self
).__init
__(msg
)
756 self
.exc_info
= sys
.exc_info() # preserve original exception
758 self
.video_id
= video_id
760 def format_traceback(self
):
761 if self
.traceback
is None:
763 return ''.join(traceback
.format_tb(self
.traceback
))
766 class UnsupportedError(ExtractorError
):
767 def __init__(self
, url
):
768 super(UnsupportedError
, self
).__init
__(
769 'Unsupported URL: %s' % url
, expected
=True)
773 class RegexNotFoundError(ExtractorError
):
774 """Error when a regex didn't match"""
778 class GeoRestrictedError(ExtractorError
):
779 """Geographic restriction Error exception.
781 This exception may be thrown when a video is not available from your
782 geographic location due to geographic restrictions imposed by a website.
784 def __init__(self
, msg
, countries
=None):
785 super(GeoRestrictedError
, self
).__init
__(msg
, expected
=True)
787 self
.countries
= countries
790 class DownloadError(YoutubeDLError
):
791 """Download Error exception.
793 This exception may be thrown by FileDownloader objects if they are not
794 configured to continue on errors. They will contain the appropriate
798 def __init__(self
, msg
, exc_info
=None):
799 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
800 super(DownloadError
, self
).__init
__(msg
)
801 self
.exc_info
= exc_info
804 class SameFileError(YoutubeDLError
):
805 """Same File exception.
807 This exception will be thrown by FileDownloader objects if they detect
808 multiple files would have to be downloaded to the same file on disk.
813 class PostProcessingError(YoutubeDLError
):
814 """Post Processing exception.
816 This exception may be raised by PostProcessor's .run() method to
817 indicate an error in the postprocessing task.
820 def __init__(self
, msg
):
821 super(PostProcessingError
, self
).__init
__(msg
)
825 class MaxDownloadsReached(YoutubeDLError
):
826 """ --max-downloads limit has been reached. """
830 class UnavailableVideoError(YoutubeDLError
):
831 """Unavailable Format exception.
833 This exception will be thrown when a video is requested
834 in a format that is not available for that video.
839 class ContentTooShortError(YoutubeDLError
):
840 """Content Too Short exception.
842 This exception may be raised by FileDownloader objects when a file they
843 download is too small for what the server announced first, indicating
844 the connection was probably interrupted.
847 def __init__(self
, downloaded
, expected
):
848 super(ContentTooShortError
, self
).__init
__(
849 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded
, expected
)
852 self
.downloaded
= downloaded
853 self
.expected
= expected
856 class XAttrMetadataError(YoutubeDLError
):
857 def __init__(self
, code
=None, msg
='Unknown error'):
858 super(XAttrMetadataError
, self
).__init
__(msg
)
862 # Parsing code and msg
863 if (self
.code
in (errno
.ENOSPC
, errno
.EDQUOT
) or
864 'No space left' in self
.msg
or 'Disk quota excedded' in self
.msg
):
865 self
.reason
= 'NO_SPACE'
866 elif self
.code
== errno
.E2BIG
or 'Argument list too long' in self
.msg
:
867 self
.reason
= 'VALUE_TOO_LONG'
869 self
.reason
= 'NOT_SUPPORTED'
872 class XAttrUnavailableError(YoutubeDLError
):
876 def _create_http_connection(ydl_handler
, http_class
, is_https
, *args
, **kwargs
):
877 # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
878 # expected HTTP responses to meet HTTP/1.0 or later (see also
879 # https://github.com/rg3/youtube-dl/issues/6727)
880 if sys
.version_info
< (3, 0):
881 kwargs
['strict'] = True
882 hc
= http_class(*args
, **compat_kwargs(kwargs
))
883 source_address
= ydl_handler
._params
.get('source_address')
885 if source_address
is not None:
886 # This is to workaround _create_connection() from socket where it will try all
887 # address data from getaddrinfo() including IPv6. This filters the result from
888 # getaddrinfo() based on the source_address value.
889 # This is based on the cpython socket.create_connection() function.
890 # https://github.com/python/cpython/blob/master/Lib/socket.py#L691
891 def _create_connection(address
, timeout
=socket
._GLOBAL
_DEFAULT
_TIMEOUT
, source_address
=None):
894 addrs
= socket
.getaddrinfo(host
, port
, 0, socket
.SOCK_STREAM
)
895 af
= socket
.AF_INET
if '.' in source_address
[0] else socket
.AF_INET6
896 ip_addrs
= [addr
for addr
in addrs
if addr
[0] == af
]
897 if addrs
and not ip_addrs
:
898 ip_version
= 'v4' if af
== socket
.AF_INET
else 'v6'
900 "No remote IP%s addresses available for connect, can't use '%s' as source address"
901 % (ip_version
, source_address
[0]))
903 af
, socktype
, proto
, canonname
, sa
= res
906 sock
= socket
.socket(af
, socktype
, proto
)
907 if timeout
is not socket
._GLOBAL
_DEFAULT
_TIMEOUT
:
908 sock
.settimeout(timeout
)
909 sock
.bind(source_address
)
911 err
= None # Explicitly break reference cycle
913 except socket
.error
as _
:
920 raise socket
.error('getaddrinfo returns an empty list')
921 if hasattr(hc
, '_create_connection'):
922 hc
._create
_connection
= _create_connection
923 sa
= (source_address
, 0)
924 if hasattr(hc
, 'source_address'): # Python 2.7+
925 hc
.source_address
= sa
927 def _hc_connect(self
, *args
, **kwargs
):
928 sock
= _create_connection(
929 (self
.host
, self
.port
), self
.timeout
, sa
)
931 self
.sock
= ssl
.wrap_socket(
932 sock
, self
.key_file
, self
.cert_file
,
933 ssl_version
=ssl
.PROTOCOL_TLSv1
)
936 hc
.connect
= functools
.partial(_hc_connect
, hc
)
941 def handle_youtubedl_headers(headers
):
942 filtered_headers
= headers
944 if 'Youtubedl-no-compression' in filtered_headers
:
945 filtered_headers
= dict((k
, v
) for k
, v
in filtered_headers
.items() if k
.lower() != 'accept-encoding')
946 del filtered_headers
['Youtubedl-no-compression']
948 return filtered_headers
951 class YoutubeDLHandler(compat_urllib_request
.HTTPHandler
):
952 """Handler for HTTP requests and responses.
954 This class, when installed with an OpenerDirector, automatically adds
955 the standard headers to every HTTP request and handles gzipped and
956 deflated responses from web servers. If compression is to be avoided in
957 a particular request, the original request in the program code only has
958 to include the HTTP header "Youtubedl-no-compression", which will be
959 removed before making the real request.
961 Part of this code was copied from:
963 http://techknack.net/python-urllib2-handlers/
965 Andrew Rowls, the author of that code, agreed to release it to the
969 def __init__(self
, params
, *args
, **kwargs
):
970 compat_urllib_request
.HTTPHandler
.__init
__(self
, *args
, **kwargs
)
971 self
._params
= params
973 def http_open(self
, req
):
974 conn_class
= compat_http_client
.HTTPConnection
976 socks_proxy
= req
.headers
.get('Ytdl-socks-proxy')
978 conn_class
= make_socks_conn_class(conn_class
, socks_proxy
)
979 del req
.headers
['Ytdl-socks-proxy']
981 return self
.do_open(functools
.partial(
982 _create_http_connection
, self
, conn_class
, False),
988 return zlib
.decompress(data
, -zlib
.MAX_WBITS
)
990 return zlib
.decompress(data
)
992 def http_request(self
, req
):
993 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
994 # always respected by websites, some tend to give out URLs with non percent-encoded
995 # non-ASCII characters (see telemb.py, ard.py [#3412])
996 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
997 # To work around aforementioned issue we will replace request's original URL with
998 # percent-encoded one
999 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
1000 # the code of this workaround has been moved here from YoutubeDL.urlopen()
1001 url
= req
.get_full_url()
1002 url_escaped
= escape_url(url
)
1004 # Substitute URL if any change after escaping
1005 if url
!= url_escaped
:
1006 req
= update_Request(req
, url
=url_escaped
)
1008 for h
, v
in std_headers
.items():
1009 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
1010 # The dict keys are capitalized because of this bug by urllib
1011 if h
.capitalize() not in req
.headers
:
1012 req
.add_header(h
, v
)
1014 req
.headers
= handle_youtubedl_headers(req
.headers
)
1016 if sys
.version_info
< (2, 7) and '#' in req
.get_full_url():
1017 # Python 2.6 is brain-dead when it comes to fragments
1018 req
._Request
__original
= req
._Request
__original
.partition('#')[0]
1019 req
._Request
__r
_type
= req
._Request
__r
_type
.partition('#')[0]
1023 def http_response(self
, req
, resp
):
1026 if resp
.headers
.get('Content-encoding', '') == 'gzip':
1027 content
= resp
.read()
1028 gz
= gzip
.GzipFile(fileobj
=io
.BytesIO(content
), mode
='rb')
1030 uncompressed
= io
.BytesIO(gz
.read())
1031 except IOError as original_ioerror
:
1032 # There may be junk add the end of the file
1033 # See http://stackoverflow.com/q/4928560/35070 for details
1034 for i
in range(1, 1024):
1036 gz
= gzip
.GzipFile(fileobj
=io
.BytesIO(content
[:-i
]), mode
='rb')
1037 uncompressed
= io
.BytesIO(gz
.read())
1042 raise original_ioerror
1043 resp
= compat_urllib_request
.addinfourl(uncompressed
, old_resp
.headers
, old_resp
.url
, old_resp
.code
)
1044 resp
.msg
= old_resp
.msg
1045 del resp
.headers
['Content-encoding']
1047 if resp
.headers
.get('Content-encoding', '') == 'deflate':
1048 gz
= io
.BytesIO(self
.deflate(resp
.read()))
1049 resp
= compat_urllib_request
.addinfourl(gz
, old_resp
.headers
, old_resp
.url
, old_resp
.code
)
1050 resp
.msg
= old_resp
.msg
1051 del resp
.headers
['Content-encoding']
1052 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
1053 # https://github.com/rg3/youtube-dl/issues/6457).
1054 if 300 <= resp
.code
< 400:
1055 location
= resp
.headers
.get('Location')
1057 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
1058 if sys
.version_info
>= (3, 0):
1059 location
= location
.encode('iso-8859-1').decode('utf-8')
1061 location
= location
.decode('utf-8')
1062 location_escaped
= escape_url(location
)
1063 if location
!= location_escaped
:
1064 del resp
.headers
['Location']
1065 if sys
.version_info
< (3, 0):
1066 location_escaped
= location_escaped
.encode('utf-8')
1067 resp
.headers
['Location'] = location_escaped
1070 https_request
= http_request
1071 https_response
= http_response
1074 def make_socks_conn_class(base_class
, socks_proxy
):
1075 assert issubclass(base_class
, (
1076 compat_http_client
.HTTPConnection
, compat_http_client
.HTTPSConnection
))
1078 url_components
= compat_urlparse
.urlparse(socks_proxy
)
1079 if url_components
.scheme
.lower() == 'socks5':
1080 socks_type
= ProxyType
.SOCKS5
1081 elif url_components
.scheme
.lower() in ('socks', 'socks4'):
1082 socks_type
= ProxyType
.SOCKS4
1083 elif url_components
.scheme
.lower() == 'socks4a':
1084 socks_type
= ProxyType
.SOCKS4A
1086 def unquote_if_non_empty(s
):
1089 return compat_urllib_parse_unquote_plus(s
)
1093 url_components
.hostname
, url_components
.port
or 1080,
1095 unquote_if_non_empty(url_components
.username
),
1096 unquote_if_non_empty(url_components
.password
),
1099 class SocksConnection(base_class
):
1101 self
.sock
= sockssocket()
1102 self
.sock
.setproxy(*proxy_args
)
1103 if type(self
.timeout
) in (int, float):
1104 self
.sock
.settimeout(self
.timeout
)
1105 self
.sock
.connect((self
.host
, self
.port
))
1107 if isinstance(self
, compat_http_client
.HTTPSConnection
):
1108 if hasattr(self
, '_context'): # Python > 2.6
1109 self
.sock
= self
._context
.wrap_socket(
1110 self
.sock
, server_hostname
=self
.host
)
1112 self
.sock
= ssl
.wrap_socket(self
.sock
)
1114 return SocksConnection
1117 class YoutubeDLHTTPSHandler(compat_urllib_request
.HTTPSHandler
):
1118 def __init__(self
, params
, https_conn_class
=None, *args
, **kwargs
):
1119 compat_urllib_request
.HTTPSHandler
.__init
__(self
, *args
, **kwargs
)
1120 self
._https
_conn
_class
= https_conn_class
or compat_http_client
.HTTPSConnection
1121 self
._params
= params
1123 def https_open(self
, req
):
1125 conn_class
= self
._https
_conn
_class
1127 if hasattr(self
, '_context'): # python > 2.6
1128 kwargs
['context'] = self
._context
1129 if hasattr(self
, '_check_hostname'): # python 3.x
1130 kwargs
['check_hostname'] = self
._check
_hostname
1132 socks_proxy
= req
.headers
.get('Ytdl-socks-proxy')
1134 conn_class
= make_socks_conn_class(conn_class
, socks_proxy
)
1135 del req
.headers
['Ytdl-socks-proxy']
1137 return self
.do_open(functools
.partial(
1138 _create_http_connection
, self
, conn_class
, True),
1142 class YoutubeDLCookieProcessor(compat_urllib_request
.HTTPCookieProcessor
):
1143 def __init__(self
, cookiejar
=None):
1144 compat_urllib_request
.HTTPCookieProcessor
.__init
__(self
, cookiejar
)
1146 def http_response(self
, request
, response
):
1147 # Python 2 will choke on next HTTP request in row if there are non-ASCII
1148 # characters in Set-Cookie HTTP header of last response (see
1149 # https://github.com/rg3/youtube-dl/issues/6769).
1150 # In order to at least prevent crashing we will percent encode Set-Cookie
1151 # header before HTTPCookieProcessor starts processing it.
1152 # if sys.version_info < (3, 0) and response.headers:
1153 # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
1154 # set_cookie = response.headers.get(set_cookie_header)
1156 # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
1157 # if set_cookie != set_cookie_escaped:
1158 # del response.headers[set_cookie_header]
1159 # response.headers[set_cookie_header] = set_cookie_escaped
1160 return compat_urllib_request
.HTTPCookieProcessor
.http_response(self
, request
, response
)
1162 https_request
= compat_urllib_request
.HTTPCookieProcessor
.http_request
1163 https_response
= http_response
1166 def extract_timezone(date_str
):
1168 r
'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
1171 timezone
= datetime
.timedelta()
1173 date_str
= date_str
[:-len(m
.group('tz'))]
1174 if not m
.group('sign'):
1175 timezone
= datetime
.timedelta()
1177 sign
= 1 if m
.group('sign') == '+' else -1
1178 timezone
= datetime
.timedelta(
1179 hours
=sign
* int(m
.group('hours')),
1180 minutes
=sign
* int(m
.group('minutes')))
1181 return timezone
, date_str
1184 def parse_iso8601(date_str
, delimiter
='T', timezone
=None):
1185 """ Return a UNIX timestamp from the given date """
1187 if date_str
is None:
1190 date_str
= re
.sub(r
'\.[0-9]+', '', date_str
)
1192 if timezone
is None:
1193 timezone
, date_str
= extract_timezone(date_str
)
1196 date_format
= '%Y-%m-%d{0}%H:%M:%S'.format(delimiter
)
1197 dt
= datetime
.datetime
.strptime(date_str
, date_format
) - timezone
1198 return calendar
.timegm(dt
.timetuple())
1203 def date_formats(day_first
=True):
1204 return DATE_FORMATS_DAY_FIRST
if day_first
else DATE_FORMATS_MONTH_FIRST
1207 def unified_strdate(date_str
, day_first
=True):
1208 """Return a string with the date in the format YYYYMMDD"""
1210 if date_str
is None:
1214 date_str
= date_str
.replace(',', ' ')
1215 # Remove AM/PM + timezone
1216 date_str
= re
.sub(r
'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str
)
1217 _
, date_str
= extract_timezone(date_str
)
1219 for expression
in date_formats(day_first
):
1221 upload_date
= datetime
.datetime
.strptime(date_str
, expression
).strftime('%Y%m%d')
1224 if upload_date
is None:
1225 timetuple
= email
.utils
.parsedate_tz(date_str
)
1228 upload_date
= datetime
.datetime(*timetuple
[:6]).strftime('%Y%m%d')
1231 if upload_date
is not None:
1232 return compat_str(upload_date
)
1235 def unified_timestamp(date_str
, day_first
=True):
1236 if date_str
is None:
1239 date_str
= re
.sub(r
'[,|]', '', date_str
)
1241 pm_delta
= 12 if re
.search(r
'(?i)PM', date_str
) else 0
1242 timezone
, date_str
= extract_timezone(date_str
)
1244 # Remove AM/PM + timezone
1245 date_str
= re
.sub(r
'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str
)
1247 # Remove unrecognized timezones from ISO 8601 alike timestamps
1248 m
= re
.search(r
'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str
)
1250 date_str
= date_str
[:-len(m
.group('tz'))]
1252 # Python only supports microseconds, so remove nanoseconds
1253 m
= re
.search(r
'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str
)
1255 date_str
= m
.group(1)
1257 for expression
in date_formats(day_first
):
1259 dt
= datetime
.datetime
.strptime(date_str
, expression
) - timezone
+ datetime
.timedelta(hours
=pm_delta
)
1260 return calendar
.timegm(dt
.timetuple())
1263 timetuple
= email
.utils
.parsedate_tz(date_str
)
1265 return calendar
.timegm(timetuple
) + pm_delta
* 3600
1268 def determine_ext(url
, default_ext
='unknown_video'):
1269 if url
is None or '.' not in url
:
1271 guess
= url
.partition('?')[0].rpartition('.')[2]
1272 if re
.match(r
'^[A-Za-z0-9]+$', guess
):
1274 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
1275 elif guess
.rstrip('/') in KNOWN_EXTENSIONS
:
1276 return guess
.rstrip('/')
1281 def subtitles_filename(filename
, sub_lang
, sub_format
):
1282 return filename
.rsplit('.', 1)[0] + '.' + sub_lang
+ '.' + sub_format
1285 def date_from_str(date_str
):
1287 Return a datetime object from a string in the format YYYYMMDD or
1288 (now|today)[+-][0-9](day|week|month|year)(s)?"""
1289 today
= datetime
.date
.today()
1290 if date_str
in ('now', 'today'):
1292 if date_str
== 'yesterday':
1293 return today
- datetime
.timedelta(days
=1)
1294 match
= re
.match(r
'(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str
)
1295 if match
is not None:
1296 sign
= match
.group('sign')
1297 time
= int(match
.group('time'))
1300 unit
= match
.group('unit')
1301 # A bad approximation?
1305 elif unit
== 'year':
1309 delta
= datetime
.timedelta(**{unit
: time
})
1310 return today
+ delta
1311 return datetime
.datetime
.strptime(date_str
, '%Y%m%d').date()
1314 def hyphenate_date(date_str
):
1316 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
1317 match
= re
.match(r
'^(\d\d\d\d)(\d\d)(\d\d)$', date_str
)
1318 if match
is not None:
1319 return '-'.join(match
.groups())
1324 class DateRange(object):
1325 """Represents a time interval between two dates"""
1327 def __init__(self
, start
=None, end
=None):
1328 """start and end must be strings in the format accepted by date"""
1329 if start
is not None:
1330 self
.start
= date_from_str(start
)
1332 self
.start
= datetime
.datetime
.min.date()
1334 self
.end
= date_from_str(end
)
1336 self
.end
= datetime
.datetime
.max.date()
1337 if self
.start
> self
.end
:
1338 raise ValueError('Date range: "%s" , the start date must be before the end date' % self
)
1342 """Returns a range that only contains the given day"""
1343 return cls(day
, day
)
1345 def __contains__(self
, date
):
1346 """Check if the date is in the range"""
1347 if not isinstance(date
, datetime
.date
):
1348 date
= date_from_str(date
)
1349 return self
.start
<= date
<= self
.end
1352 return '%s - %s' % (self
.start
.isoformat(), self
.end
.isoformat())
1355 def platform_name():
1356 """ Returns the platform name as a compat_str """
1357 res
= platform
.platform()
1358 if isinstance(res
, bytes):
1359 res
= res
.decode(preferredencoding())
1361 assert isinstance(res
, compat_str
)
1365 def _windows_write_string(s
, out
):
1366 """ Returns True if the string was written using special methods,
1367 False if it has yet to be written out."""
1368 # Adapted from http://stackoverflow.com/a/3259271/35070
1371 import ctypes
.wintypes
1379 fileno
= out
.fileno()
1380 except AttributeError:
1381 # If the output stream doesn't have a fileno, it's virtual
1383 except io
.UnsupportedOperation
:
1384 # Some strange Windows pseudo files?
1386 if fileno
not in WIN_OUTPUT_IDS
:
1389 GetStdHandle
= compat_ctypes_WINFUNCTYPE(
1390 ctypes
.wintypes
.HANDLE
, ctypes
.wintypes
.DWORD
)(
1391 ('GetStdHandle', ctypes
.windll
.kernel32
))
1392 h
= GetStdHandle(WIN_OUTPUT_IDS
[fileno
])
1394 WriteConsoleW
= compat_ctypes_WINFUNCTYPE(
1395 ctypes
.wintypes
.BOOL
, ctypes
.wintypes
.HANDLE
, ctypes
.wintypes
.LPWSTR
,
1396 ctypes
.wintypes
.DWORD
, ctypes
.POINTER(ctypes
.wintypes
.DWORD
),
1397 ctypes
.wintypes
.LPVOID
)(('WriteConsoleW', ctypes
.windll
.kernel32
))
1398 written
= ctypes
.wintypes
.DWORD(0)
1400 GetFileType
= compat_ctypes_WINFUNCTYPE(ctypes
.wintypes
.DWORD
, ctypes
.wintypes
.DWORD
)(('GetFileType', ctypes
.windll
.kernel32
))
1401 FILE_TYPE_CHAR
= 0x0002
1402 FILE_TYPE_REMOTE
= 0x8000
1403 GetConsoleMode
= compat_ctypes_WINFUNCTYPE(
1404 ctypes
.wintypes
.BOOL
, ctypes
.wintypes
.HANDLE
,
1405 ctypes
.POINTER(ctypes
.wintypes
.DWORD
))(
1406 ('GetConsoleMode', ctypes
.windll
.kernel32
))
1407 INVALID_HANDLE_VALUE
= ctypes
.wintypes
.DWORD(-1).value
1409 def not_a_console(handle
):
1410 if handle
== INVALID_HANDLE_VALUE
or handle
is None:
1412 return ((GetFileType(handle
) & ~FILE_TYPE_REMOTE
) != FILE_TYPE_CHAR
or
1413 GetConsoleMode(handle
, ctypes
.byref(ctypes
.wintypes
.DWORD())) == 0)
1415 if not_a_console(h
):
1418 def next_nonbmp_pos(s
):
1420 return next(i
for i
, c
in enumerate(s
) if ord(c
) > 0xffff)
1421 except StopIteration:
1425 count
= min(next_nonbmp_pos(s
), 1024)
1427 ret
= WriteConsoleW(
1428 h
, s
, count
if count
else 2, ctypes
.byref(written
), None)
1430 raise OSError('Failed to write string')
1431 if not count
: # We just wrote a non-BMP character
1432 assert written
.value
== 2
1435 assert written
.value
> 0
1436 s
= s
[written
.value
:]
1440 def write_string(s
, out
=None, encoding
=None):
1443 assert type(s
) == compat_str
1445 if sys
.platform
== 'win32' and encoding
is None and hasattr(out
, 'fileno'):
1446 if _windows_write_string(s
, out
):
1449 if ('b' in getattr(out
, 'mode', '') or
1450 sys
.version_info
[0] < 3): # Python 2 lies about mode of sys.stderr
1451 byt
= s
.encode(encoding
or preferredencoding(), 'ignore')
1453 elif hasattr(out
, 'buffer'):
1454 enc
= encoding
or getattr(out
, 'encoding', None) or preferredencoding()
1455 byt
= s
.encode(enc
, 'ignore')
1456 out
.buffer.write(byt
)
1462 def bytes_to_intlist(bs
):
1465 if isinstance(bs
[0], int): # Python 3
1468 return [ord(c
) for c
in bs
]
1471 def intlist_to_bytes(xs
):
1474 return compat_struct_pack('%dB' % len(xs
), *xs
)
1477 # Cross-platform file locking
1478 if sys
.platform
== 'win32':
1479 import ctypes
.wintypes
1482 class OVERLAPPED(ctypes
.Structure
):
1484 ('Internal', ctypes
.wintypes
.LPVOID
),
1485 ('InternalHigh', ctypes
.wintypes
.LPVOID
),
1486 ('Offset', ctypes
.wintypes
.DWORD
),
1487 ('OffsetHigh', ctypes
.wintypes
.DWORD
),
1488 ('hEvent', ctypes
.wintypes
.HANDLE
),
1491 kernel32
= ctypes
.windll
.kernel32
1492 LockFileEx
= kernel32
.LockFileEx
1493 LockFileEx
.argtypes
= [
1494 ctypes
.wintypes
.HANDLE
, # hFile
1495 ctypes
.wintypes
.DWORD
, # dwFlags
1496 ctypes
.wintypes
.DWORD
, # dwReserved
1497 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockLow
1498 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockHigh
1499 ctypes
.POINTER(OVERLAPPED
) # Overlapped
1501 LockFileEx
.restype
= ctypes
.wintypes
.BOOL
1502 UnlockFileEx
= kernel32
.UnlockFileEx
1503 UnlockFileEx
.argtypes
= [
1504 ctypes
.wintypes
.HANDLE
, # hFile
1505 ctypes
.wintypes
.DWORD
, # dwReserved
1506 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockLow
1507 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockHigh
1508 ctypes
.POINTER(OVERLAPPED
) # Overlapped
1510 UnlockFileEx
.restype
= ctypes
.wintypes
.BOOL
1511 whole_low
= 0xffffffff
1512 whole_high
= 0x7fffffff
1514 def _lock_file(f
, exclusive
):
1515 overlapped
= OVERLAPPED()
1516 overlapped
.Offset
= 0
1517 overlapped
.OffsetHigh
= 0
1518 overlapped
.hEvent
= 0
1519 f
._lock
_file
_overlapped
_p
= ctypes
.pointer(overlapped
)
1520 handle
= msvcrt
.get_osfhandle(f
.fileno())
1521 if not LockFileEx(handle
, 0x2 if exclusive
else 0x0, 0,
1522 whole_low
, whole_high
, f
._lock
_file
_overlapped
_p
):
1523 raise OSError('Locking file failed: %r' % ctypes
.FormatError())
1525 def _unlock_file(f
):
1526 assert f
._lock
_file
_overlapped
_p
1527 handle
= msvcrt
.get_osfhandle(f
.fileno())
1528 if not UnlockFileEx(handle
, 0,
1529 whole_low
, whole_high
, f
._lock
_file
_overlapped
_p
):
1530 raise OSError('Unlocking file failed: %r' % ctypes
.FormatError())
1533 # Some platforms, such as Jython, is missing fcntl
1537 def _lock_file(f
, exclusive
):
1538 fcntl
.flock(f
, fcntl
.LOCK_EX
if exclusive
else fcntl
.LOCK_SH
)
1540 def _unlock_file(f
):
1541 fcntl
.flock(f
, fcntl
.LOCK_UN
)
1543 UNSUPPORTED_MSG
= 'file locking is not supported on this platform'
1545 def _lock_file(f
, exclusive
):
1546 raise IOError(UNSUPPORTED_MSG
)
1548 def _unlock_file(f
):
1549 raise IOError(UNSUPPORTED_MSG
)
1552 class locked_file(object):
1553 def __init__(self
, filename
, mode
, encoding
=None):
1554 assert mode
in ['r', 'a', 'w']
1555 self
.f
= io
.open(filename
, mode
, encoding
=encoding
)
1558 def __enter__(self
):
1559 exclusive
= self
.mode
!= 'r'
1561 _lock_file(self
.f
, exclusive
)
1567 def __exit__(self
, etype
, value
, traceback
):
1569 _unlock_file(self
.f
)
1576 def write(self
, *args
):
1577 return self
.f
.write(*args
)
1579 def read(self
, *args
):
1580 return self
.f
.read(*args
)
1583 def get_filesystem_encoding():
1584 encoding
= sys
.getfilesystemencoding()
1585 return encoding
if encoding
is not None else 'utf-8'
1588 def shell_quote(args
):
1590 encoding
= get_filesystem_encoding()
1592 if isinstance(a
, bytes):
1593 # We may get a filename encoded with 'encodeFilename'
1594 a
= a
.decode(encoding
)
1595 quoted_args
.append(compat_shlex_quote(a
))
1596 return ' '.join(quoted_args
)
1599 def smuggle_url(url
, data
):
1600 """ Pass additional data in a URL for internal use. """
1602 url
, idata
= unsmuggle_url(url
, {})
1604 sdata
= compat_urllib_parse_urlencode(
1605 {'__youtubedl_smuggle': json
.dumps(data
)})
1606 return url
+ '#' + sdata
1609 def unsmuggle_url(smug_url
, default
=None):
1610 if '#__youtubedl_smuggle' not in smug_url
:
1611 return smug_url
, default
1612 url
, _
, sdata
= smug_url
.rpartition('#')
1613 jsond
= compat_parse_qs(sdata
)['__youtubedl_smuggle'][0]
1614 data
= json
.loads(jsond
)
1618 def format_bytes(bytes):
1621 if type(bytes) is str:
1622 bytes = float(bytes)
1626 exponent
= int(math
.log(bytes, 1024.0))
1627 suffix
= ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent
]
1628 converted
= float(bytes) / float(1024 ** exponent
)
1629 return '%.2f%s' % (converted
, suffix
)
1632 def lookup_unit_table(unit_table
, s
):
1633 units_re
= '|'.join(re
.escape(u
) for u
in unit_table
)
1635 r
'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re
, s
)
1638 num_str
= m
.group('num').replace(',', '.')
1639 mult
= unit_table
[m
.group('unit')]
1640 return int(float(num_str
) * mult
)
1643 def parse_filesize(s
):
1647 # The lower-case forms are of course incorrect and unofficial,
1648 # but we support those too
1665 'megabytes': 1000 ** 2,
1666 'mebibytes': 1024 ** 2,
1672 'gigabytes': 1000 ** 3,
1673 'gibibytes': 1024 ** 3,
1679 'terabytes': 1000 ** 4,
1680 'tebibytes': 1024 ** 4,
1686 'petabytes': 1000 ** 5,
1687 'pebibytes': 1024 ** 5,
1693 'exabytes': 1000 ** 6,
1694 'exbibytes': 1024 ** 6,
1700 'zettabytes': 1000 ** 7,
1701 'zebibytes': 1024 ** 7,
1707 'yottabytes': 1000 ** 8,
1708 'yobibytes': 1024 ** 8,
1711 return lookup_unit_table(_UNIT_TABLE
, s
)
1720 if re
.match(r
'^[\d,.]+$', s
):
1721 return str_to_int(s
)
1732 return lookup_unit_table(_UNIT_TABLE
, s
)
1735 def parse_resolution(s
):
1739 mobj
= re
.search(r
'\b(?P<w>\d+)\s*[xX×]\s*(?P<h>\d+)\b', s
)
1742 'width': int(mobj
.group('w')),
1743 'height': int(mobj
.group('h')),
1746 mobj
= re
.search(r
'\b(\d+)[pPiI]\b', s
)
1748 return {'height': int(mobj
.group(1))}
1750 mobj
= re
.search(r
'\b([48])[kK]\b', s
)
1752 return {'height': int(mobj
.group(1)) * 540}
1757 def month_by_name(name
, lang
='en'):
1758 """ Return the number of a month by (locale-independently) English name """
1760 month_names
= MONTH_NAMES
.get(lang
, MONTH_NAMES
['en'])
1763 return month_names
.index(name
) + 1
1768 def month_by_abbreviation(abbrev
):
1769 """ Return the number of a month by (locale-independently) English
1773 return [s
[:3] for s
in ENGLISH_MONTH_NAMES
].index(abbrev
) + 1
1778 def fix_xml_ampersands(xml_str
):
1779 """Replace all the '&' by '&' in XML"""
1781 r
'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
1786 def setproctitle(title
):
1787 assert isinstance(title
, compat_str
)
1789 # ctypes in Jython is not complete
1790 # http://bugs.jython.org/issue2148
1791 if sys
.platform
.startswith('java'):
1795 libc
= ctypes
.cdll
.LoadLibrary('libc.so.6')
1799 # LoadLibrary in Windows Python 2.7.13 only expects
1800 # a bytestring, but since unicode_literals turns
1801 # every string into a unicode string, it fails.
1803 title_bytes
= title
.encode('utf-8')
1804 buf
= ctypes
.create_string_buffer(len(title_bytes
))
1805 buf
.value
= title_bytes
1807 libc
.prctl(15, buf
, 0, 0, 0)
1808 except AttributeError:
1809 return # Strange libc, just skip this
1812 def remove_start(s
, start
):
1813 return s
[len(start
):] if s
is not None and s
.startswith(start
) else s
1816 def remove_end(s
, end
):
1817 return s
[:-len(end
)] if s
is not None and s
.endswith(end
) else s
1820 def remove_quotes(s
):
1821 if s
is None or len(s
) < 2:
1823 for quote
in ('"', "'", ):
1824 if s
[0] == quote
and s
[-1] == quote
:
1829 def url_basename(url
):
1830 path
= compat_urlparse
.urlparse(url
).path
1831 return path
.strip('/').split('/')[-1]
1835 return re
.match(r
'https?://[^?#&]+/', url
).group()
1838 def urljoin(base
, path
):
1839 if isinstance(path
, bytes):
1840 path
= path
.decode('utf-8')
1841 if not isinstance(path
, compat_str
) or not path
:
1843 if re
.match(r
'^(?:https?:)?//', path
):
1845 if isinstance(base
, bytes):
1846 base
= base
.decode('utf-8')
1847 if not isinstance(base
, compat_str
) or not re
.match(
1848 r
'^(?:https?:)?//', base
):
1850 return compat_urlparse
.urljoin(base
, path
)
1853 class HEADRequest(compat_urllib_request
.Request
):
1854 def get_method(self
):
1858 class PUTRequest(compat_urllib_request
.Request
):
1859 def get_method(self
):
1863 def int_or_none(v
, scale
=1, default
=None, get_attr
=None, invscale
=1):
1866 v
= getattr(v
, get_attr
, None)
1872 return int(v
) * invscale
// scale
1877 def str_or_none(v
, default
=None):
1878 return default
if v
is None else compat_str(v
)
1881 def str_to_int(int_str
):
1882 """ A more relaxed version of int_or_none """
1885 int_str
= re
.sub(r
'[,\.\+]', '', int_str
)
1889 def float_or_none(v
, scale
=1, invscale
=1, default
=None):
1893 return float(v
) * invscale
/ scale
1898 def bool_or_none(v
, default
=None):
1899 return v
if isinstance(v
, bool) else default
1902 def strip_or_none(v
):
1903 return None if v
is None else v
.strip()
1906 def url_or_none(url
):
1907 if not url
or not isinstance(url
, compat_str
):
1910 return url
if re
.match(r
'^(?:[a-zA-Z][\da-zA-Z.+-]*:)?//', url
) else None
1913 def parse_duration(s
):
1914 if not isinstance(s
, compat_basestring
):
1919 days
, hours
, mins
, secs
, ms
= [None] * 5
1920 m
= re
.match(r
'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s
)
1922 days
, hours
, mins
, secs
, ms
= m
.groups()
1927 [0-9]+\s*y(?:ears?)?\s*
1930 [0-9]+\s*m(?:onths?)?\s*
1933 [0-9]+\s*w(?:eeks?)?\s*
1936 (?P<days>[0-9]+)\s*d(?:ays?)?\s*
1940 (?P<hours>[0-9]+)\s*h(?:ours?)?\s*
1943 (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s*
1946 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
1949 days
, hours
, mins
, secs
, ms
= m
.groups()
1951 m
= re
.match(r
'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s
)
1953 hours
, mins
= m
.groups()
1959 duration
+= float(secs
)
1961 duration
+= float(mins
) * 60
1963 duration
+= float(hours
) * 60 * 60
1965 duration
+= float(days
) * 24 * 60 * 60
1967 duration
+= float(ms
)
1971 def prepend_extension(filename
, ext
, expected_real_ext
=None):
1972 name
, real_ext
= os
.path
.splitext(filename
)
1974 '{0}.{1}{2}'.format(name
, ext
, real_ext
)
1975 if not expected_real_ext
or real_ext
[1:] == expected_real_ext
1976 else '{0}.{1}'.format(filename
, ext
))
1979 def replace_extension(filename
, ext
, expected_real_ext
=None):
1980 name
, real_ext
= os
.path
.splitext(filename
)
1981 return '{0}.{1}'.format(
1982 name
if not expected_real_ext
or real_ext
[1:] == expected_real_ext
else filename
,
1986 def check_executable(exe
, args
=[]):
1987 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
1988 args can be a list of arguments for a short output (like -version) """
1990 subprocess
.Popen([exe
] + args
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
).communicate()
1996 def get_exe_version(exe
, args
=['--version'],
1997 version_re
=None, unrecognized
='present'):
1998 """ Returns the version of the specified executable,
1999 or False if the executable is not present """
2001 # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
2002 # SIGTTOU if youtube-dl is run in the background.
2003 # See https://github.com/rg3/youtube-dl/issues/955#issuecomment-209789656
2004 out
, _
= subprocess
.Popen(
2005 [encodeArgument(exe
)] + args
,
2006 stdin
=subprocess
.PIPE
,
2007 stdout
=subprocess
.PIPE
, stderr
=subprocess
.STDOUT
).communicate()
2010 if isinstance(out
, bytes): # Python 2.x
2011 out
= out
.decode('ascii', 'ignore')
2012 return detect_exe_version(out
, version_re
, unrecognized
)
2015 def detect_exe_version(output
, version_re
=None, unrecognized
='present'):
2016 assert isinstance(output
, compat_str
)
2017 if version_re
is None:
2018 version_re
= r
'version\s+([-0-9._a-zA-Z]+)'
2019 m
= re
.search(version_re
, output
)
2026 class PagedList(object):
2028 # This is only useful for tests
2029 return len(self
.getslice())
2032 class OnDemandPagedList(PagedList
):
2033 def __init__(self
, pagefunc
, pagesize
, use_cache
=True):
2034 self
._pagefunc
= pagefunc
2035 self
._pagesize
= pagesize
2036 self
._use
_cache
= use_cache
2040 def getslice(self
, start
=0, end
=None):
2042 for pagenum
in itertools
.count(start
// self
._pagesize
):
2043 firstid
= pagenum
* self
._pagesize
2044 nextfirstid
= pagenum
* self
._pagesize
+ self
._pagesize
2045 if start
>= nextfirstid
:
2050 page_results
= self
._cache
.get(pagenum
)
2051 if page_results
is None:
2052 page_results
= list(self
._pagefunc
(pagenum
))
2054 self
._cache
[pagenum
] = page_results
2057 start
% self
._pagesize
2058 if firstid
<= start
< nextfirstid
2062 ((end
- 1) % self
._pagesize
) + 1
2063 if (end
is not None and firstid
<= end
<= nextfirstid
)
2066 if startv
!= 0 or endv
is not None:
2067 page_results
= page_results
[startv
:endv
]
2068 res
.extend(page_results
)
2070 # A little optimization - if current page is not "full", ie. does
2071 # not contain page_size videos then we can assume that this page
2072 # is the last one - there are no more ids on further pages -
2073 # i.e. no need to query again.
2074 if len(page_results
) + startv
< self
._pagesize
:
2077 # If we got the whole page, but the next page is not interesting,
2078 # break out early as well
2079 if end
== nextfirstid
:
2084 class InAdvancePagedList(PagedList
):
2085 def __init__(self
, pagefunc
, pagecount
, pagesize
):
2086 self
._pagefunc
= pagefunc
2087 self
._pagecount
= pagecount
2088 self
._pagesize
= pagesize
2090 def getslice(self
, start
=0, end
=None):
2092 start_page
= start
// self
._pagesize
2094 self
._pagecount
if end
is None else (end
// self
._pagesize
+ 1))
2095 skip_elems
= start
- start_page
* self
._pagesize
2096 only_more
= None if end
is None else end
- start
2097 for pagenum
in range(start_page
, end_page
):
2098 page
= list(self
._pagefunc
(pagenum
))
2100 page
= page
[skip_elems
:]
2102 if only_more
is not None:
2103 if len(page
) < only_more
:
2104 only_more
-= len(page
)
2106 page
= page
[:only_more
]
2113 def uppercase_escape(s
):
2114 unicode_escape
= codecs
.getdecoder('unicode_escape')
2116 r
'\\U[0-9a-fA-F]{8}',
2117 lambda m
: unicode_escape(m
.group(0))[0],
2121 def lowercase_escape(s
):
2122 unicode_escape
= codecs
.getdecoder('unicode_escape')
2124 r
'\\u[0-9a-fA-F]{4}',
2125 lambda m
: unicode_escape(m
.group(0))[0],
2129 def escape_rfc3986(s
):
2130 """Escape non-ASCII characters as suggested by RFC 3986"""
2131 if sys
.version_info
< (3, 0) and isinstance(s
, compat_str
):
2132 s
= s
.encode('utf-8')
2133 return compat_urllib_parse
.quote(s
, b
"%/;:@&=+$,!~*'()?#[]")
2136 def escape_url(url
):
2137 """Escape URL as suggested by RFC 3986"""
2138 url_parsed
= compat_urllib_parse_urlparse(url
)
2139 return url_parsed
._replace
(
2140 netloc
=url_parsed
.netloc
.encode('idna').decode('ascii'),
2141 path
=escape_rfc3986(url_parsed
.path
),
2142 params
=escape_rfc3986(url_parsed
.params
),
2143 query
=escape_rfc3986(url_parsed
.query
),
2144 fragment
=escape_rfc3986(url_parsed
.fragment
)
2148 def read_batch_urls(batch_fd
):
2150 if not isinstance(url
, compat_str
):
2151 url
= url
.decode('utf-8', 'replace')
2152 BOM_UTF8
= '\xef\xbb\xbf'
2153 if url
.startswith(BOM_UTF8
):
2154 url
= url
[len(BOM_UTF8
):]
2156 if url
.startswith(('#', ';', ']')):
2160 with contextlib
.closing(batch_fd
) as fd
:
2161 return [url
for url
in map(fixup
, fd
) if url
]
2164 def urlencode_postdata(*args
, **kargs
):
2165 return compat_urllib_parse_urlencode(*args
, **kargs
).encode('ascii')
2168 def update_url_query(url
, query
):
2171 parsed_url
= compat_urlparse
.urlparse(url
)
2172 qs
= compat_parse_qs(parsed_url
.query
)
2174 return compat_urlparse
.urlunparse(parsed_url
._replace
(
2175 query
=compat_urllib_parse_urlencode(qs
, True)))
2178 def update_Request(req
, url
=None, data
=None, headers
={}, query
={}):
2179 req_headers
= req
.headers
.copy()
2180 req_headers
.update(headers
)
2181 req_data
= data
or req
.data
2182 req_url
= update_url_query(url
or req
.get_full_url(), query
)
2183 req_get_method
= req
.get_method()
2184 if req_get_method
== 'HEAD':
2185 req_type
= HEADRequest
2186 elif req_get_method
== 'PUT':
2187 req_type
= PUTRequest
2189 req_type
= compat_urllib_request
.Request
2191 req_url
, data
=req_data
, headers
=req_headers
,
2192 origin_req_host
=req
.origin_req_host
, unverifiable
=req
.unverifiable
)
2193 if hasattr(req
, 'timeout'):
2194 new_req
.timeout
= req
.timeout
2198 def _multipart_encode_impl(data
, boundary
):
2199 content_type
= 'multipart/form-data; boundary=%s' % boundary
2202 for k
, v
in data
.items():
2203 out
+= b
'--' + boundary
.encode('ascii') + b
'\r\n'
2204 if isinstance(k
, compat_str
):
2205 k
= k
.encode('utf-8')
2206 if isinstance(v
, compat_str
):
2207 v
= v
.encode('utf-8')
2208 # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
2209 # suggests sending UTF-8 directly. Firefox sends UTF-8, too
2210 content
= b
'Content-Disposition: form-data; name="' + k
+ b
'"\r\n\r\n' + v
+ b
'\r\n'
2211 if boundary
.encode('ascii') in content
:
2212 raise ValueError('Boundary overlaps with data')
2215 out
+= b
'--' + boundary
.encode('ascii') + b
'--\r\n'
2217 return out
, content_type
2220 def multipart_encode(data
, boundary
=None):
2222 Encode a dict to RFC 7578-compliant form-data
2225 A dict where keys and values can be either Unicode or bytes-like
2228 If specified a Unicode object, it's used as the boundary. Otherwise
2229 a random boundary is generated.
2231 Reference: https://tools.ietf.org/html/rfc7578
2233 has_specified_boundary
= boundary
is not None
2236 if boundary
is None:
2237 boundary
= '---------------' + str(random
.randrange(0x0fffffff, 0xffffffff))
2240 out
, content_type
= _multipart_encode_impl(data
, boundary
)
2243 if has_specified_boundary
:
2247 return out
, content_type
2250 def dict_get(d
, key_or_keys
, default
=None, skip_false_values
=True):
2251 if isinstance(key_or_keys
, (list, tuple)):
2252 for key
in key_or_keys
:
2253 if key
not in d
or d
[key
] is None or skip_false_values
and not d
[key
]:
2257 return d
.get(key_or_keys
, default
)
2260 def try_get(src
, getter
, expected_type
=None):
2261 if not isinstance(getter
, (list, tuple)):
2266 except (AttributeError, KeyError, TypeError, IndexError):
2269 if expected_type
is None or isinstance(v
, expected_type
):
2273 def merge_dicts(*dicts
):
2275 for a_dict
in dicts
:
2276 for k
, v
in a_dict
.items():
2279 if (k
not in merged
or
2280 (isinstance(v
, compat_str
) and v
and
2281 isinstance(merged
[k
], compat_str
) and
2287 def encode_compat_str(string
, encoding
=preferredencoding(), errors
='strict'):
2288 return string
if isinstance(string
, compat_str
) else compat_str(string
, encoding
, errors
)
2300 TV_PARENTAL_GUIDELINES
= {
2310 def parse_age_limit(s
):
2312 return s
if 0 <= s
<= 21 else None
2313 if not isinstance(s
, compat_basestring
):
2315 m
= re
.match(r
'^(?P<age>\d{1,2})\+?$', s
)
2317 return int(m
.group('age'))
2319 return US_RATINGS
[s
]
2320 m
= re
.match(r
'^TV[_-]?(%s)$' % '|'.join(k
[3:] for k
in TV_PARENTAL_GUIDELINES
), s
)
2322 return TV_PARENTAL_GUIDELINES
['TV-' + m
.group(1)]
2326 def strip_jsonp(code
):
2329 (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
2330 (?:\s*&&\s*(?P=func_name))?
2331 \s*\(\s*(?P<callback_data>.*)\);?
2332 \s*?(?://[^\n]*)*$''',
2333 r
'\g<callback_data>', code
)
2336 def js_to_json(code
):
2337 COMMENT_RE
= r
'/\*(?:(?!\*/).)*?\*/|//[^\n]*'
2338 SKIP_RE
= r
'\s*(?:{comment})?\s*'.format(comment
=COMMENT_RE
)
2340 (r
'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip
=SKIP_RE
), 16),
2341 (r
'(?s)^(0+[0-7]+){skip}:?$'.format(skip
=SKIP_RE
), 8),
2346 if v
in ('true', 'false', 'null'):
2348 elif v
.startswith('/*') or v
.startswith('//') or v
== ',':
2351 if v
[0] in ("'", '"'):
2352 v
= re
.sub(r
'(?s)\\.|"', lambda m
: {
2357 }.get(m
.group(0), m
.group(0)), v
[1:-1])
2359 for regex
, base
in INTEGER_TABLE
:
2360 im
= re
.match(regex
, v
)
2362 i
= int(im
.group(1), base
)
2363 return '"%d":' % i
if v
.endswith(':') else '%d' % i
2367 return re
.sub(r
'''(?sx)
2368 "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
2369 '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
2370 {comment}|,(?={skip}[\]}}])|
2371 (?:(?<![0-9])[eE]|[a-df-zA-DF-Z_])[.a-zA-Z_0-9]*|
2372 \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
2374 '''.format(comment
=COMMENT_RE
, skip
=SKIP_RE
), fix_kv
, code
)
2377 def qualities(quality_ids
):
2378 """ Get a numeric quality value out of a list of possible values """
2381 return quality_ids
.index(qid
)
2387 DEFAULT_OUTTMPL
= '%(title)s-%(id)s.%(ext)s'
2390 def limit_length(s
, length
):
2391 """ Add ellipses to overly long strings """
2396 return s
[:length
- len(ELLIPSES
)] + ELLIPSES
2400 def version_tuple(v
):
2401 return tuple(int(e
) for e
in re
.split(r
'[-.]', v
))
2404 def is_outdated_version(version
, limit
, assume_new
=True):
2406 return not assume_new
2408 return version_tuple(version
) < version_tuple(limit
)
2410 return not assume_new
2413 def ytdl_is_updateable():
2414 """ Returns if youtube-dl can be updated with -U """
2415 from zipimport
import zipimporter
2417 return isinstance(globals().get('__loader__'), zipimporter
) or hasattr(sys
, 'frozen')
2420 def args_to_str(args
):
2421 # Get a short string representation for a subprocess command
2422 return ' '.join(compat_shlex_quote(a
) for a
in args
)
2425 def error_to_compat_str(err
):
2427 # On python 2 error byte string must be decoded with proper
2428 # encoding rather than ascii
2429 if sys
.version_info
[0] < 3:
2430 err_str
= err_str
.decode(preferredencoding())
2434 def mimetype2ext(mt
):
2440 # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
2441 # it's the most popular one
2442 'audio/mpeg': 'mp3',
2447 _
, _
, res
= mt
.rpartition('/')
2448 res
= res
.split(';')[0].strip().lower()
2452 'smptett+xml': 'tt',
2456 'x-mp4-fragmented': 'mp4',
2457 'x-ms-sami': 'sami',
2460 'x-mpegurl': 'm3u8',
2461 'vnd.apple.mpegurl': 'm3u8',
2465 'vnd.ms-sstr+xml': 'ism',
2471 def parse_codecs(codecs_str
):
2472 # http://tools.ietf.org/html/rfc6381
2475 splited_codecs
= list(filter(None, map(
2476 lambda str: str.strip(), codecs_str
.strip().strip(',').split(','))))
2477 vcodec
, acodec
= None, None
2478 for full_codec
in splited_codecs
:
2479 codec
= full_codec
.split('.')[0]
2480 if codec
in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v', 'hvc1', 'av01'):
2483 elif codec
in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
2487 write_string('WARNING: Unknown codec %s\n' % full_codec
, sys
.stderr
)
2488 if not vcodec
and not acodec
:
2489 if len(splited_codecs
) == 2:
2494 elif len(splited_codecs
) == 1:
2501 'vcodec': vcodec
or 'none',
2502 'acodec': acodec
or 'none',
2507 def urlhandle_detect_ext(url_handle
):
2508 getheader
= url_handle
.headers
.get
2510 cd
= getheader('Content-Disposition')
2512 m
= re
.match(r
'attachment;\s*filename="(?P<filename>[^"]+)"', cd
)
2514 e
= determine_ext(m
.group('filename'), default_ext
=None)
2518 return mimetype2ext(getheader('Content-Type'))
2521 def encode_data_uri(data
, mime_type
):
2522 return 'data:%s;base64,%s' % (mime_type
, base64
.b64encode(data
).decode('ascii'))
2525 def age_restricted(content_limit
, age_limit
):
2526 """ Returns True iff the content should be blocked """
2528 if age_limit
is None: # No limit set
2530 if content_limit
is None:
2531 return False # Content available for everyone
2532 return age_limit
< content_limit
2535 def is_html(first_bytes
):
2536 """ Detect whether a file contains HTML by examining its first bytes. """
2539 (b
'\xef\xbb\xbf', 'utf-8'),
2540 (b
'\x00\x00\xfe\xff', 'utf-32-be'),
2541 (b
'\xff\xfe\x00\x00', 'utf-32-le'),
2542 (b
'\xff\xfe', 'utf-16-le'),
2543 (b
'\xfe\xff', 'utf-16-be'),
2545 for bom
, enc
in BOMS
:
2546 if first_bytes
.startswith(bom
):
2547 s
= first_bytes
[len(bom
):].decode(enc
, 'replace')
2550 s
= first_bytes
.decode('utf-8', 'replace')
2552 return re
.match(r
'^\s*<', s
)
2555 def determine_protocol(info_dict
):
2556 protocol
= info_dict
.get('protocol')
2557 if protocol
is not None:
2560 url
= info_dict
['url']
2561 if url
.startswith('rtmp'):
2563 elif url
.startswith('mms'):
2565 elif url
.startswith('rtsp'):
2568 ext
= determine_ext(url
)
2574 return compat_urllib_parse_urlparse(url
).scheme
2577 def render_table(header_row
, data
):
2578 """ Render a list of rows, each as a list of values """
2579 table
= [header_row
] + data
2580 max_lens
= [max(len(compat_str(v
)) for v
in col
) for col
in zip(*table
)]
2581 format_str
= ' '.join('%-' + compat_str(ml
+ 1) + 's' for ml
in max_lens
[:-1]) + '%s'
2582 return '\n'.join(format_str
% tuple(row
) for row
in table
)
2585 def _match_one(filter_part
, dct
):
2586 COMPARISON_OPERATORS
= {
2594 operator_rex
= re
.compile(r
'''(?x)\s*
2596 \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
2598 (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)|
2599 (?P<quote>["\'])(?P
<quotedstrval
>(?
:\\.|
(?
!(?P
=quote
)|
\\).)+?
)(?P
=quote
)|
2600 (?P
<strval
>(?
![0-9.])[a
-z0
-9A
-Z
]*)
2603 ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
2604 m = operator_rex.search(filter_part)
2606 op = COMPARISON_OPERATORS[m.group('op')]
2607 actual_value = dct.get(m.group('key'))
2608 if (m.group('quotedstrval') is not None or
2609 m.group('strval') is not None or
2610 # If the original field is a string and matching comparisonvalue is
2611 # a number we should respect the origin of the original field
2612 # and process comparison value as a string (see
2613 # https://github.com/rg3/youtube-dl/issues/11082).
2614 actual_value is not None and m.group('intval') is not None and
2615 isinstance(actual_value, compat_str)):
2616 if m.group('op') not in ('=', '!='):
2618 'Operator %s does not support string values!' % m.group('op'))
2619 comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval')
2620 quote = m.group('quote')
2621 if quote is not None:
2622 comparison_value = comparison_value.replace(r'\%s' % quote, quote)
2625 comparison_value = int(m.group('intval'))
2627 comparison_value = parse_filesize(m.group('intval'))
2628 if comparison_value is None:
2629 comparison_value = parse_filesize(m.group('intval') + 'B')
2630 if comparison_value is None:
2632 'Invalid integer value %r in filter part %r' % (
2633 m.group('intval'), filter_part))
2634 if actual_value is None:
2635 return m.group('none_inclusive')
2636 return op(actual_value, comparison_value)
2639 '': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
2640 '!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
2642 operator_rex = re.compile(r'''(?x
)\s
*
2643 (?P
<op
>%s)\s
*(?P
<key
>[a
-z_
]+)
2645 ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
2646 m = operator_rex.search(filter_part)
2648 op = UNARY_OPERATORS[m.group('op')]
2649 actual_value = dct.get(m.group('key'))
2650 return op(actual_value)
2652 raise ValueError('Invalid filter part %r' % filter_part)
2655 def match_str(filter_str, dct):
2656 """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """
2659 _match_one(filter_part, dct) for filter_part in filter_str.split('&'))
2662 def match_filter_func(filter_str):
2663 def _match_func(info_dict):
2664 if match_str(filter_str, info_dict):
2667 video_title = info_dict.get('title', info_dict.get('id', 'video'))
2668 return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
2672 def parse_dfxp_time_expr(time_expr):
2676 mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
2678 return float(mobj.group('time_offset'))
2680 mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
2682 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
2685 def srt_subtitles_timecode(seconds):
2686 return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000)
2689 def dfxp2srt(dfxp_data):
2691 @param dfxp_data A
bytes-like
object containing DFXP data
2692 @returns A
unicode object containing converted SRT data
2694 LEGACY_NAMESPACES = (
2695 (b'http://www.w3.org/ns/ttml', [
2696 b'http://www.w3.org/2004/11/ttaf1',
2697 b'http://www.w3.org/2006/04/ttaf1',
2698 b'http://www.w3.org/2006/10/ttaf1',
2700 (b'http://www.w3.org/ns/ttml#styling', [
2701 b'http://www.w3.org/ns/ttml#style',
2705 SUPPORTED_STYLING = [
2714 _x = functools.partial(xpath_with_ns, ns_map={
2715 'xml': 'http://www.w3.org/XML/1998/namespace',
2716 'ttml': 'http://www.w3.org/ns/ttml',
2717 'tts': 'http://www.w3.org/ns/ttml#styling',
2723 class TTMLPElementParser(object):
2725 _unclosed_elements = []
2726 _applied_styles = []
2728 def start(self, tag, attrib):
2729 if tag in (_x('ttml:br'), 'br'):
2732 unclosed_elements = []
2734 element_style_id = attrib.get('style')
2736 style.update(default_style)
2737 if element_style_id:
2738 style.update(styles.get(element_style_id, {}))
2739 for prop in SUPPORTED_STYLING:
2740 prop_val = attrib.get(_x('tts:' + prop))
2742 style[prop] = prop_val
2745 for k, v in sorted(style.items()):
2746 if self._applied_styles and self._applied_styles[-1].get(k) == v:
2749 font += ' color="%s"' % v
2750 elif k == 'fontSize':
2751 font += ' size="%s"' % v
2752 elif k == 'fontFamily':
2753 font += ' face="%s"' % v
2754 elif k == 'fontWeight' and v == 'bold':
2756 unclosed_elements.append('b')
2757 elif k == 'fontStyle' and v == 'italic':
2759 unclosed_elements.append('i')
2760 elif k == 'textDecoration' and v == 'underline':
2762 unclosed_elements.append('u')
2764 self._out += '<font' + font + '>'
2765 unclosed_elements.append('font')
2767 if self._applied_styles:
2768 applied_style.update(self._applied_styles[-1])
2769 applied_style.update(style)
2770 self._applied_styles.append(applied_style)
2771 self._unclosed_elements.append(unclosed_elements)
2774 if tag not in (_x('ttml:br'), 'br'):
2775 unclosed_elements = self._unclosed_elements.pop()
2776 for element in reversed(unclosed_elements):
2777 self._out += '</%s>' % element
2778 if unclosed_elements and self._applied_styles:
2779 self._applied_styles.pop()
2781 def data(self, data):
2785 return self._out.strip()
2787 def parse_node(node):
2788 target = TTMLPElementParser()
2789 parser = xml.etree.ElementTree.XMLParser(target=target)
2790 parser.feed(xml.etree.ElementTree.tostring(node))
2791 return parser.close()
2793 for k, v in LEGACY_NAMESPACES:
2795 dfxp_data = dfxp_data.replace(ns, k)
2797 dfxp = compat_etree_fromstring(dfxp_data)
2799 paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
2802 raise ValueError('Invalid dfxp/TTML subtitle')
2806 for style in dfxp.findall(_x('.//ttml:style')):
2807 style_id = style.get('id') or style.get(_x('xml:id'))
2810 parent_style_id = style.get('style')
2812 if parent_style_id not in styles:
2815 styles[style_id] = styles[parent_style_id].copy()
2816 for prop in SUPPORTED_STYLING:
2817 prop_val = style.get(_x('tts:' + prop))
2819 styles.setdefault(style_id, {})[prop] = prop_val
2825 for p in ('body', 'div'):
2826 ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
2829 style = styles.get(ele.get('style'))
2832 default_style.update(style)
2834 for para, index in zip(paras, itertools.count(1)):
2835 begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
2836 end_time = parse_dfxp_time_expr(para.attrib.get('end'))
2837 dur = parse_dfxp_time_expr(para.attrib.get('dur'))
2838 if begin_time is None:
2843 end_time = begin_time + dur
2844 out.append('%d\n%s --> %s\n%s\n\n' % (
2846 srt_subtitles_timecode(begin_time),
2847 srt_subtitles_timecode(end_time),
2853 def cli_option(params, command_option, param):
2854 param = params.get(param)
2856 param = compat_str(param)
2857 return [command_option, param] if param is not None else []
2860 def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
2861 param = params.get(param)
2864 assert isinstance(param, bool)
2866 return [command_option + separator + (true_value if param else false_value)]
2867 return [command_option, true_value if param else false_value]
2870 def cli_valueless_option(params, command_option, param, expected_value=True):
2871 param = params.get(param)
2872 return [command_option] if param == expected_value else []
2875 def cli_configuration_args(params, param, default=[]):
2876 ex_args = params.get(param)
2879 assert isinstance(ex_args, list)
2883 class ISO639Utils(object):
2884 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
3073 def short2long(cls, code):
3074 """Convert language code from ISO 639-1 to ISO 639-2/T"""
3075 return cls._lang_map.get(code[:2])
3078 def long2short(cls, code):
3079 """Convert language code from ISO 639-2/T to ISO 639-1"""
3080 for short_name, long_name in cls._lang_map.items():
3081 if long_name == code:
3085 class ISO3166Utils(object):
3086 # From http://data.okfn.org/data/core/country-list
3088 'AF': 'Afghanistan',
3089 'AX': 'Åland Islands',
3092 'AS': 'American Samoa',
3097 'AG': 'Antigua and Barbuda',
3114 'BO': 'Bolivia, Plurinational State of',
3115 'BQ': 'Bonaire, Sint Eustatius and Saba',
3116 'BA': 'Bosnia and Herzegovina',
3118 'BV': 'Bouvet Island',
3120 'IO': 'British Indian Ocean Territory',
3121 'BN': 'Brunei Darussalam',
3123 'BF': 'Burkina Faso',
3129 'KY': 'Cayman Islands',
3130 'CF': 'Central African Republic',
3134 'CX': 'Christmas Island',
3135 'CC': 'Cocos (Keeling) Islands',
3139 'CD': 'Congo, the Democratic Republic of the',
3140 'CK': 'Cook Islands',
3142 'CI': 'Côte d\'Ivoire',
3147 'CZ': 'Czech Republic',
3151 'DO': 'Dominican Republic',
3154 'SV': 'El Salvador',
3155 'GQ': 'Equatorial Guinea',
3159 'FK': 'Falkland Islands (Malvinas)',
3160 'FO': 'Faroe Islands',
3164 'GF': 'French Guiana',
3165 'PF': 'French Polynesia',
3166 'TF': 'French Southern Territories',
3181 'GW': 'Guinea-Bissau',
3184 'HM': 'Heard Island and McDonald Islands',
3185 'VA': 'Holy See (Vatican City State)',
3192 'IR': 'Iran, Islamic Republic of',
3195 'IM': 'Isle of Man',
3205 'KP': 'Korea, Democratic People\'s Republic of',
3206 'KR': 'Korea, Republic of',
3209 'LA': 'Lao People\'s Democratic Republic',
3215 'LI': 'Liechtenstein',
3219 'MK': 'Macedonia, the Former Yugoslav Republic of',
3226 'MH': 'Marshall Islands',
3232 'FM': 'Micronesia, Federated States of',
3233 'MD': 'Moldova, Republic of',
3244 'NL': 'Netherlands',
3245 'NC': 'New Caledonia',
3246 'NZ': 'New Zealand',
3251 'NF': 'Norfolk Island',
3252 'MP': 'Northern Mariana Islands',
3257 'PS': 'Palestine, State of',
3259 'PG': 'Papua New Guinea',
3262 'PH': 'Philippines',
3266 'PR': 'Puerto Rico',
3270 'RU': 'Russian Federation',
3272 'BL': 'Saint Barthélemy',
3273 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
3274 'KN': 'Saint Kitts and Nevis',
3275 'LC': 'Saint Lucia',
3276 'MF': 'Saint Martin (French part)',
3277 'PM': 'Saint Pierre and Miquelon',
3278 'VC': 'Saint Vincent and the Grenadines',
3281 'ST': 'Sao Tome and Principe',
3282 'SA': 'Saudi Arabia',
3286 'SL': 'Sierra Leone',
3288 'SX': 'Sint Maarten (Dutch part)',
3291 'SB': 'Solomon Islands',
3293 'ZA': 'South Africa',
3294 'GS': 'South Georgia and the South Sandwich Islands',
3295 'SS': 'South Sudan',
3300 'SJ': 'Svalbard and Jan Mayen',
3303 'CH': 'Switzerland',
3304 'SY': 'Syrian Arab Republic',
3305 'TW': 'Taiwan, Province of China',
3307 'TZ': 'Tanzania, United Republic of',
3309 'TL': 'Timor-Leste',
3313 'TT': 'Trinidad and Tobago',
3316 'TM': 'Turkmenistan',
3317 'TC': 'Turks and Caicos Islands',
3321 'AE': 'United Arab Emirates',
3322 'GB': 'United Kingdom',
3323 'US': 'United States',
3324 'UM': 'United States Minor Outlying Islands',
3328 'VE': 'Venezuela, Bolivarian Republic of',
3330 'VG': 'Virgin Islands, British',
3331 'VI': 'Virgin Islands, U.S.',
3332 'WF': 'Wallis and Futuna',
3333 'EH': 'Western Sahara',
3340 def short2full(cls, code):
3341 """Convert an ISO 3166-2 country code to the corresponding full name"""
3342 return cls._country_map.get(code.upper())
3345 class GeoUtils(object):
3346 # Major IPv4 address blocks per country
3348 'AD': '85.94.160.0/19',
3349 'AE': '94.200.0.0/13',
3350 'AF': '149.54.0.0/17',
3351 'AG': '209.59.64.0/18',
3352 'AI': '204.14.248.0/21',
3353 'AL': '46.99.0.0/16',
3354 'AM': '46.70.0.0/15',
3355 'AO': '105.168.0.0/13',
3356 'AP': '159.117.192.0/21',
3357 'AR': '181.0.0.0/12',
3358 'AS': '202.70.112.0/20',
3359 'AT': '84.112.0.0/13',
3360 'AU': '1.128.0.0/11',
3361 'AW': '181.41.0.0/18',
3362 'AZ': '5.191.0.0/16',
3363 'BA': '31.176.128.0/17',
3364 'BB': '65.48.128.0/17',
3365 'BD': '114.130.0.0/16',
3367 'BF': '129.45.128.0/17',
3368 'BG': '95.42.0.0/15',
3369 'BH': '37.131.0.0/17',
3370 'BI': '154.117.192.0/18',
3371 'BJ': '137.255.0.0/16',
3372 'BL': '192.131.134.0/24',
3373 'BM': '196.12.64.0/18',
3374 'BN': '156.31.0.0/16',
3375 'BO': '161.56.0.0/16',
3376 'BQ': '161.0.80.0/20',
3377 'BR': '152.240.0.0/12',
3378 'BS': '24.51.64.0/18',
3379 'BT': '119.2.96.0/19',
3380 'BW': '168.167.0.0/16',
3381 'BY': '178.120.0.0/13',
3382 'BZ': '179.42.192.0/18',
3383 'CA': '99.224.0.0/11',
3384 'CD': '41.243.0.0/16',
3385 'CF': '196.32.200.0/21',
3386 'CG': '197.214.128.0/17',
3387 'CH': '85.0.0.0/13',
3388 'CI': '154.232.0.0/14',
3389 'CK': '202.65.32.0/19',
3390 'CL': '152.172.0.0/14',
3391 'CM': '165.210.0.0/15',
3392 'CN': '36.128.0.0/10',
3393 'CO': '181.240.0.0/12',
3394 'CR': '201.192.0.0/12',
3395 'CU': '152.206.0.0/15',
3396 'CV': '165.90.96.0/19',
3397 'CW': '190.88.128.0/17',
3398 'CY': '46.198.0.0/15',
3399 'CZ': '88.100.0.0/14',
3401 'DJ': '197.241.0.0/17',
3402 'DK': '87.48.0.0/12',
3403 'DM': '192.243.48.0/20',
3404 'DO': '152.166.0.0/15',
3405 'DZ': '41.96.0.0/12',
3406 'EC': '186.68.0.0/15',
3407 'EE': '90.190.0.0/15',
3408 'EG': '156.160.0.0/11',
3409 'ER': '196.200.96.0/20',
3410 'ES': '88.0.0.0/11',
3411 'ET': '196.188.0.0/14',
3412 'EU': '2.16.0.0/13',
3413 'FI': '91.152.0.0/13',
3414 'FJ': '144.120.0.0/16',
3415 'FM': '119.252.112.0/20',
3416 'FO': '88.85.32.0/19',
3418 'GA': '41.158.0.0/15',
3420 'GD': '74.122.88.0/21',
3421 'GE': '31.146.0.0/16',
3422 'GF': '161.22.64.0/18',
3423 'GG': '62.68.160.0/19',
3424 'GH': '45.208.0.0/14',
3425 'GI': '85.115.128.0/19',
3426 'GL': '88.83.0.0/19',
3427 'GM': '160.182.0.0/15',
3428 'GN': '197.149.192.0/18',
3429 'GP': '104.250.0.0/19',
3430 'GQ': '105.235.224.0/20',
3431 'GR': '94.64.0.0/13',
3432 'GT': '168.234.0.0/16',
3433 'GU': '168.123.0.0/16',
3434 'GW': '197.214.80.0/20',
3435 'GY': '181.41.64.0/18',
3436 'HK': '113.252.0.0/14',
3437 'HN': '181.210.0.0/16',
3438 'HR': '93.136.0.0/13',
3439 'HT': '148.102.128.0/17',
3440 'HU': '84.0.0.0/14',
3441 'ID': '39.192.0.0/10',
3442 'IE': '87.32.0.0/12',
3443 'IL': '79.176.0.0/13',
3444 'IM': '5.62.80.0/20',
3445 'IN': '117.192.0.0/10',
3446 'IO': '203.83.48.0/21',
3447 'IQ': '37.236.0.0/14',
3448 'IR': '2.176.0.0/12',
3449 'IS': '82.221.0.0/16',
3450 'IT': '79.0.0.0/10',
3451 'JE': '87.244.64.0/18',
3452 'JM': '72.27.0.0/17',
3453 'JO': '176.29.0.0/16',
3454 'JP': '126.0.0.0/8',
3455 'KE': '105.48.0.0/12',
3456 'KG': '158.181.128.0/17',
3457 'KH': '36.37.128.0/17',
3458 'KI': '103.25.140.0/22',
3459 'KM': '197.255.224.0/20',
3460 'KN': '198.32.32.0/19',
3461 'KP': '175.45.176.0/22',
3462 'KR': '175.192.0.0/10',
3463 'KW': '37.36.0.0/14',
3464 'KY': '64.96.0.0/15',
3465 'KZ': '2.72.0.0/13',
3466 'LA': '115.84.64.0/18',
3467 'LB': '178.135.0.0/16',
3468 'LC': '192.147.231.0/24',
3469 'LI': '82.117.0.0/19',
3470 'LK': '112.134.0.0/15',
3471 'LR': '41.86.0.0/19',
3472 'LS': '129.232.0.0/17',
3473 'LT': '78.56.0.0/13',
3474 'LU': '188.42.0.0/16',
3475 'LV': '46.109.0.0/16',
3476 'LY': '41.252.0.0/14',
3477 'MA': '105.128.0.0/11',
3478 'MC': '88.209.64.0/18',
3479 'MD': '37.246.0.0/16',
3480 'ME': '178.175.0.0/17',
3481 'MF': '74.112.232.0/21',
3482 'MG': '154.126.0.0/17',
3483 'MH': '117.103.88.0/21',
3484 'MK': '77.28.0.0/15',
3485 'ML': '154.118.128.0/18',
3486 'MM': '37.111.0.0/17',
3487 'MN': '49.0.128.0/17',
3488 'MO': '60.246.0.0/16',
3489 'MP': '202.88.64.0/20',
3490 'MQ': '109.203.224.0/19',
3491 'MR': '41.188.64.0/18',
3492 'MS': '208.90.112.0/22',
3493 'MT': '46.11.0.0/16',
3494 'MU': '105.16.0.0/12',
3495 'MV': '27.114.128.0/18',
3496 'MW': '105.234.0.0/16',
3497 'MX': '187.192.0.0/11',
3498 'MY': '175.136.0.0/13',
3499 'MZ': '197.218.0.0/15',
3500 'NA': '41.182.0.0/16',
3501 'NC': '101.101.0.0/18',
3502 'NE': '197.214.0.0/18',
3503 'NF': '203.17.240.0/22',
3504 'NG': '105.112.0.0/12',
3505 'NI': '186.76.0.0/15',
3506 'NL': '145.96.0.0/11',
3507 'NO': '84.208.0.0/13',
3508 'NP': '36.252.0.0/15',
3509 'NR': '203.98.224.0/19',
3510 'NU': '49.156.48.0/22',
3511 'NZ': '49.224.0.0/14',
3512 'OM': '5.36.0.0/15',
3513 'PA': '186.72.0.0/15',
3514 'PE': '186.160.0.0/14',
3515 'PF': '123.50.64.0/18',
3516 'PG': '124.240.192.0/19',
3517 'PH': '49.144.0.0/13',
3518 'PK': '39.32.0.0/11',
3519 'PL': '83.0.0.0/11',
3520 'PM': '70.36.0.0/20',
3521 'PR': '66.50.0.0/16',
3522 'PS': '188.161.0.0/16',
3523 'PT': '85.240.0.0/13',
3524 'PW': '202.124.224.0/20',
3525 'PY': '181.120.0.0/14',
3526 'QA': '37.210.0.0/15',
3527 'RE': '139.26.0.0/16',
3528 'RO': '79.112.0.0/13',
3529 'RS': '178.220.0.0/14',
3530 'RU': '5.136.0.0/13',
3531 'RW': '105.178.0.0/15',
3532 'SA': '188.48.0.0/13',
3533 'SB': '202.1.160.0/19',
3534 'SC': '154.192.0.0/11',
3535 'SD': '154.96.0.0/13',
3536 'SE': '78.64.0.0/12',
3537 'SG': '152.56.0.0/14',
3538 'SI': '188.196.0.0/14',
3539 'SK': '78.98.0.0/15',
3540 'SL': '197.215.0.0/17',
3541 'SM': '89.186.32.0/19',
3542 'SN': '41.82.0.0/15',
3543 'SO': '197.220.64.0/19',
3544 'SR': '186.179.128.0/17',
3545 'SS': '105.235.208.0/21',
3546 'ST': '197.159.160.0/19',
3547 'SV': '168.243.0.0/16',
3548 'SX': '190.102.0.0/20',
3550 'SZ': '41.84.224.0/19',
3551 'TC': '65.255.48.0/20',
3552 'TD': '154.68.128.0/19',
3553 'TG': '196.168.0.0/14',
3554 'TH': '171.96.0.0/13',
3555 'TJ': '85.9.128.0/18',
3556 'TK': '27.96.24.0/21',
3557 'TL': '180.189.160.0/20',
3558 'TM': '95.85.96.0/19',
3559 'TN': '197.0.0.0/11',
3560 'TO': '175.176.144.0/21',
3561 'TR': '78.160.0.0/11',
3562 'TT': '186.44.0.0/15',
3563 'TV': '202.2.96.0/19',
3564 'TW': '120.96.0.0/11',
3565 'TZ': '156.156.0.0/14',
3566 'UA': '93.72.0.0/13',
3567 'UG': '154.224.0.0/13',
3569 'UY': '167.56.0.0/13',
3570 'UZ': '82.215.64.0/18',
3571 'VA': '212.77.0.0/19',
3572 'VC': '24.92.144.0/20',
3573 'VE': '186.88.0.0/13',
3574 'VG': '172.103.64.0/18',
3575 'VI': '146.226.0.0/16',
3576 'VN': '14.160.0.0/11',
3577 'VU': '202.80.32.0/20',
3578 'WF': '117.20.32.0/21',
3579 'WS': '202.4.32.0/19',
3580 'YE': '134.35.0.0/16',
3581 'YT': '41.242.116.0/22',
3582 'ZA': '41.0.0.0/11',
3583 'ZM': '165.56.0.0/13',
3584 'ZW': '41.85.192.0/19',
3588 def random_ipv4(cls, code_or_block):
3589 if len(code_or_block) == 2:
3590 block = cls._country_ip_map.get(code_or_block.upper())
3594 block = code_or_block
3595 addr, preflen = block.split('/')
3596 addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
3597 addr_max = addr_min | (0xffffffff >> int(preflen))
3598 return compat_str(socket.inet_ntoa(
3599 compat_struct_pack('!L', random.randint(addr_min, addr_max))))
3602 class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
3603 def __init__(self, proxies=None):
3604 # Set default handlers
3605 for type in ('http', 'https'):
3606 setattr(self, '%s_open' % type,
3607 lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
3608 meth(r, proxy, type))
3609 compat_urllib_request.ProxyHandler.__init__(self, proxies)
3611 def proxy_open(self, req, proxy, type):
3612 req_proxy = req.headers.get('Ytdl-request-proxy')
3613 if req_proxy is not None:
3615 del req.headers['Ytdl-request-proxy']
3617 if proxy == '__noproxy__':
3618 return None # No Proxy
3619 if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
3620 req.add_header('Ytdl-socks-proxy', proxy)
3621 # youtube-dl's http/https handlers do wrapping the socket with socks
3623 return compat_urllib_request.ProxyHandler.proxy_open(
3624 self, req, proxy, type)
3627 # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
3628 # released into Public Domain
3629 # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
3631 def long_to_bytes(n, blocksize=0):
3632 """long_to_bytes(n:long, blocksize:int) : string
3633 Convert a long integer to a byte string.
3635 If optional blocksize is given and greater than zero, pad the front of the
3636 byte string with binary zeros so that the length is a multiple of
3639 # after much testing, this algorithm was deemed to be the fastest
3643 s = compat_struct_pack('>I', n & 0xffffffff) + s
3645 # strip off leading zeros
3646 for i in range(len(s)):
3647 if s[i] != b'\000'[0]:
3650 # only happens when n == 0
3654 # add back some pad bytes. this could be done more efficiently w.r.t. the
3655 # de-padding being done above, but sigh...
3656 if blocksize > 0 and len(s) % blocksize:
3657 s = (blocksize - len(s) % blocksize) * b'\000' + s
3661 def bytes_to_long(s):
3662 """bytes_to_long(string) : long
3663 Convert a byte string to a long integer.
3665 This is (essentially) the inverse of long_to_bytes().
3670 extra = (4 - length % 4)
3671 s = b'\000' * extra + s
3672 length = length + extra
3673 for i in range(0, length, 4):
3674 acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0]
3678 def ohdave_rsa_encrypt(data, exponent, modulus):
3680 Implement OHDave
's RSA algorithm. See http://www.ohdave.com/rsa/
3683 data: data to encrypt, bytes-like object
3684 exponent, modulus: parameter e and N of RSA algorithm, both integer
3685 Output: hex string of encrypted data
3687 Limitation: supports one block encryption only
3690 payload = int(binascii.hexlify(data[::-1]), 16)
3691 encrypted = pow(payload, exponent, modulus)
3692 return '%x' % encrypted
3695 def pkcs1pad(data, length):
3697 Padding input data with PKCS#1 scheme
3699 @param {int[]} data input data
3700 @param {int} length target length
3701 @returns {int[]} padded data
3703 if len(data) > length - 11:
3704 raise ValueError('Input data too
long for PKCS
#1 padding')
3706 pseudo_random
= [random
.randint(0, 254) for _
in range(length
- len(data
) - 3)]
3707 return [0, 2] + pseudo_random
+ [0] + data
3710 def encode_base_n(num
, n
, table
=None):
3711 FULL_TABLE
= '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
3713 table
= FULL_TABLE
[:n
]
3716 raise ValueError('base %d exceeds table length %d' % (n
, len(table
)))
3723 ret
= table
[num
% n
] + ret
3728 def decode_packed_codes(code
):
3729 mobj
= re
.search(PACKED_CODES_RE
, code
)
3730 obfucasted_code
, base
, count
, symbols
= mobj
.groups()
3733 symbols
= symbols
.split('|')
3738 base_n_count
= encode_base_n(count
, base
)
3739 symbol_table
[base_n_count
] = symbols
[count
] or base_n_count
3742 r
'\b(\w+)\b', lambda mobj
: symbol_table
[mobj
.group(0)],
3746 def parse_m3u8_attributes(attrib
):
3748 for (key
, val
) in re
.findall(r
'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib
):
3749 if val
.startswith('"'):
3755 def urshift(val
, n
):
3756 return val
>> n
if val
>= 0 else (val
+ 0x100000000) >> n
3759 # Based on png2str() written by @gdkchan and improved by @yokrysty
3760 # Originally posted at https://github.com/rg3/youtube-dl/issues/9706
3761 def decode_png(png_data
):
3762 # Reference: https://www.w3.org/TR/PNG/
3763 header
= png_data
[8:]
3765 if png_data
[:8] != b
'\x89PNG\x0d\x0a\x1a\x0a' or header
[4:8] != b
'IHDR':
3766 raise IOError('Not a valid PNG file.')
3768 int_map
= {1: '>B', 2: '>H', 4: '>I'}
3769 unpack_integer
= lambda x
: compat_struct_unpack(int_map
[len(x
)], x
)[0]
3774 length
= unpack_integer(header
[:4])
3777 chunk_type
= header
[:4]
3780 chunk_data
= header
[:length
]
3781 header
= header
[length
:]
3783 header
= header
[4:] # Skip CRC
3791 ihdr
= chunks
[0]['data']
3793 width
= unpack_integer(ihdr
[:4])
3794 height
= unpack_integer(ihdr
[4:8])
3798 for chunk
in chunks
:
3799 if chunk
['type'] == b
'IDAT':
3800 idat
+= chunk
['data']
3803 raise IOError('Unable to read PNG data.')
3805 decompressed_data
= bytearray(zlib
.decompress(idat
))
3810 def _get_pixel(idx
):
3815 for y
in range(height
):
3816 basePos
= y
* (1 + stride
)
3817 filter_type
= decompressed_data
[basePos
]
3821 pixels
.append(current_row
)
3823 for x
in range(stride
):
3824 color
= decompressed_data
[1 + basePos
+ x
]
3825 basex
= y
* stride
+ x
3830 left
= _get_pixel(basex
- 3)
3832 up
= _get_pixel(basex
- stride
)
3834 if filter_type
== 1: # Sub
3835 color
= (color
+ left
) & 0xff
3836 elif filter_type
== 2: # Up
3837 color
= (color
+ up
) & 0xff
3838 elif filter_type
== 3: # Average
3839 color
= (color
+ ((left
+ up
) >> 1)) & 0xff
3840 elif filter_type
== 4: # Paeth
3846 c
= _get_pixel(basex
- stride
- 3)
3854 if pa
<= pb
and pa
<= pc
:
3855 color
= (color
+ a
) & 0xff
3857 color
= (color
+ b
) & 0xff
3859 color
= (color
+ c
) & 0xff
3861 current_row
.append(color
)
3863 return width
, height
, pixels
3866 def write_xattr(path
, key
, value
):
3867 # This mess below finds the best xattr tool for the job
3869 # try the pyxattr module...
3872 if hasattr(xattr
, 'set'): # pyxattr
3873 # Unicode arguments are not supported in python-pyxattr until
3875 # See https://github.com/rg3/youtube-dl/issues/5498
3876 pyxattr_required_version
= '0.5.0'
3877 if version_tuple(xattr
.__version
__) < version_tuple(pyxattr_required_version
):
3878 # TODO: fallback to CLI tools
3879 raise XAttrUnavailableError(
3880 'python-pyxattr is detected but is too old. '
3881 'youtube-dl requires %s or above while your version is %s. '
3882 'Falling back to other xattr implementations' % (
3883 pyxattr_required_version
, xattr
.__version
__))
3885 setxattr
= xattr
.set
3887 setxattr
= xattr
.setxattr
3890 setxattr(path
, key
, value
)
3891 except EnvironmentError as e
:
3892 raise XAttrMetadataError(e
.errno
, e
.strerror
)
3895 if compat_os_name
== 'nt':
3896 # Write xattrs to NTFS Alternate Data Streams:
3897 # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
3898 assert ':' not in key
3899 assert os
.path
.exists(path
)
3901 ads_fn
= path
+ ':' + key
3903 with open(ads_fn
, 'wb') as f
:
3905 except EnvironmentError as e
:
3906 raise XAttrMetadataError(e
.errno
, e
.strerror
)
3908 user_has_setfattr
= check_executable('setfattr', ['--version'])
3909 user_has_xattr
= check_executable('xattr', ['-h'])
3911 if user_has_setfattr
or user_has_xattr
:
3913 value
= value
.decode('utf-8')
3914 if user_has_setfattr
:
3915 executable
= 'setfattr'
3916 opts
= ['-n', key
, '-v', value
]
3917 elif user_has_xattr
:
3918 executable
= 'xattr'
3919 opts
= ['-w', key
, value
]
3921 cmd
= ([encodeFilename(executable
, True)] +
3922 [encodeArgument(o
) for o
in opts
] +
3923 [encodeFilename(path
, True)])
3926 p
= subprocess
.Popen(
3927 cmd
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
, stdin
=subprocess
.PIPE
)
3928 except EnvironmentError as e
:
3929 raise XAttrMetadataError(e
.errno
, e
.strerror
)
3930 stdout
, stderr
= p
.communicate()
3931 stderr
= stderr
.decode('utf-8', 'replace')
3932 if p
.returncode
!= 0:
3933 raise XAttrMetadataError(p
.returncode
, stderr
)
3936 # On Unix, and can't find pyxattr, setfattr, or xattr.
3937 if sys
.platform
.startswith('linux'):
3938 raise XAttrUnavailableError(
3939 "Couldn't find a tool to set the xattrs. "
3940 "Install either the python 'pyxattr' or 'xattr' "
3941 "modules, or the GNU 'attr' package "
3942 "(which contains the 'setfattr' tool).")
3944 raise XAttrUnavailableError(
3945 "Couldn't find a tool to set the xattrs. "
3946 "Install either the python 'xattr' module, "
3947 "or the 'xattr' binary.")
3950 def random_birthday(year_field
, month_field
, day_field
):
3952 year_field
: str(random
.randint(1950, 1995)),
3953 month_field
: str(random
.randint(1, 12)),
3954 day_field
: str(random
.randint(1, 31)),