X-Git-Url: https://git.rapsys.eu/youtubedl/blobdiff_plain/415fdb62500dca2e22067a05008dfbf87c75b662..6ec58e925503be14adcd2282d20834b97a8a7e06:/youtube_dl/utils.py diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py index b97e62a..4d3cbac 100644 --- a/youtube_dl/utils.py +++ b/youtube_dl/utils.py @@ -1,6 +1,8 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- +from __future__ import unicode_literals + import calendar import codecs import contextlib @@ -8,7 +10,6 @@ import ctypes import datetime import email.utils import errno -import getpass import gzip import itertools import io @@ -24,169 +25,25 @@ import socket import struct import subprocess import sys +import tempfile import traceback import xml.etree.ElementTree import zlib -try: - import urllib.request as compat_urllib_request -except ImportError: # Python 2 - import urllib2 as compat_urllib_request - -try: - import urllib.error as compat_urllib_error -except ImportError: # Python 2 - import urllib2 as compat_urllib_error - -try: - import urllib.parse as compat_urllib_parse -except ImportError: # Python 2 - import urllib as compat_urllib_parse - -try: - from urllib.parse import urlparse as compat_urllib_parse_urlparse -except ImportError: # Python 2 - from urlparse import urlparse as compat_urllib_parse_urlparse - -try: - import urllib.parse as compat_urlparse -except ImportError: # Python 2 - import urlparse as compat_urlparse - -try: - import http.cookiejar as compat_cookiejar -except ImportError: # Python 2 - import cookielib as compat_cookiejar - -try: - import html.entities as compat_html_entities -except ImportError: # Python 2 - import htmlentitydefs as compat_html_entities - -try: - import html.parser as compat_html_parser -except ImportError: # Python 2 - import HTMLParser as compat_html_parser - -try: - import http.client as compat_http_client -except ImportError: # Python 2 - import httplib as compat_http_client - -try: - from urllib.error import HTTPError as compat_HTTPError -except ImportError: # Python 2 - from urllib2 import HTTPError as compat_HTTPError - -try: - from urllib.request import urlretrieve as compat_urlretrieve -except ImportError: # Python 2 - from urllib import urlretrieve as compat_urlretrieve - - -try: - from subprocess import DEVNULL - compat_subprocess_get_DEVNULL = lambda: DEVNULL -except ImportError: - compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w') - -try: - from urllib.parse import parse_qs as compat_parse_qs -except ImportError: # Python 2 - # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib. - # Python 2's version is apparently totally broken - def _unquote(string, encoding='utf-8', errors='replace'): - if string == '': - return string - res = string.split('%') - if len(res) == 1: - return string - if encoding is None: - encoding = 'utf-8' - if errors is None: - errors = 'replace' - # pct_sequence: contiguous sequence of percent-encoded bytes, decoded - pct_sequence = b'' - string = res[0] - for item in res[1:]: - try: - if not item: - raise ValueError - pct_sequence += item[:2].decode('hex') - rest = item[2:] - if not rest: - # This segment was just a single percent-encoded character. - # May be part of a sequence of code units, so delay decoding. - # (Stored in pct_sequence). - continue - except ValueError: - rest = '%' + item - # Encountered non-percent-encoded characters. Flush the current - # pct_sequence. - string += pct_sequence.decode(encoding, errors) + rest - pct_sequence = b'' - if pct_sequence: - # Flush the final pct_sequence - string += pct_sequence.decode(encoding, errors) - return string - - def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False, - encoding='utf-8', errors='replace'): - qs, _coerce_result = qs, unicode - pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] - r = [] - for name_value in pairs: - if not name_value and not strict_parsing: - continue - nv = name_value.split('=', 1) - if len(nv) != 2: - if strict_parsing: - raise ValueError("bad query field: %r" % (name_value,)) - # Handle case of a control-name with no equal sign - if keep_blank_values: - nv.append('') - else: - continue - if len(nv[1]) or keep_blank_values: - name = nv[0].replace('+', ' ') - name = _unquote(name, encoding=encoding, errors=errors) - name = _coerce_result(name) - value = nv[1].replace('+', ' ') - value = _unquote(value, encoding=encoding, errors=errors) - value = _coerce_result(value) - r.append((name, value)) - return r - - def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False, - encoding='utf-8', errors='replace'): - parsed_result = {} - pairs = _parse_qsl(qs, keep_blank_values, strict_parsing, - encoding=encoding, errors=errors) - for name, value in pairs: - if name in parsed_result: - parsed_result[name].append(value) - else: - parsed_result[name] = [value] - return parsed_result - -try: - compat_str = unicode # Python 2 -except NameError: - compat_str = str - -try: - compat_chr = unichr # Python 2 -except NameError: - compat_chr = chr - -try: - from xml.etree.ElementTree import ParseError as compat_xml_parse_error -except ImportError: # Python 2.6 - from xml.parsers.expat import ExpatError as compat_xml_parse_error +from .compat import ( + compat_chr, + compat_getenv, + compat_html_entities, + compat_parse_qs, + compat_str, + compat_urllib_error, + compat_urllib_parse, + compat_urllib_parse_urlparse, + compat_urllib_request, + compat_urlparse, + shlex_quote, +) -def compat_ord(c): - if type(c) is int: return c - else: return ord(c) # This is not clearly defined otherwise compiled_regex_type = type(re.compile('')) @@ -199,6 +56,7 @@ std_headers = { 'Accept-Language': 'en-us,en;q=0.5', } + def preferredencoding(): """Get preferred encoding. @@ -207,40 +65,81 @@ def preferredencoding(): """ try: pref = locale.getpreferredencoding() - u'TEST'.encode(pref) + 'TEST'.encode(pref) except: pref = 'UTF-8' return pref -if sys.version_info < (3,0): - def compat_print(s): - print(s.encode(preferredencoding(), 'xmlcharrefreplace')) -else: - def compat_print(s): - assert type(s) == type(u'') - print(s) - -# In Python 2.x, json.dump expects a bytestream. -# In Python 3.x, it writes to a character stream -if sys.version_info < (3,0): - def write_json_file(obj, fn): - with open(fn, 'wb') as f: - json.dump(obj, f) -else: - def write_json_file(obj, fn): - with open(fn, 'w', encoding='utf-8') as f: - json.dump(obj, f) -if sys.version_info >= (2,7): +def write_json_file(obj, fn): + """ Encode obj as JSON and write it to fn, atomically if possible """ + + fn = encodeFilename(fn) + if sys.version_info < (3, 0) and sys.platform != 'win32': + encoding = get_filesystem_encoding() + # os.path.basename returns a bytes object, but NamedTemporaryFile + # will fail if the filename contains non ascii characters unless we + # use a unicode object + path_basename = lambda f: os.path.basename(fn).decode(encoding) + # the same for os.path.dirname + path_dirname = lambda f: os.path.dirname(fn).decode(encoding) + else: + path_basename = os.path.basename + path_dirname = os.path.dirname + + args = { + 'suffix': '.tmp', + 'prefix': path_basename(fn) + '.', + 'dir': path_dirname(fn), + 'delete': False, + } + + # In Python 2.x, json.dump expects a bytestream. + # In Python 3.x, it writes to a character stream + if sys.version_info < (3, 0): + args['mode'] = 'wb' + else: + args.update({ + 'mode': 'w', + 'encoding': 'utf-8', + }) + + tf = tempfile.NamedTemporaryFile(**args) + + try: + with tf: + json.dump(obj, tf) + if sys.platform == 'win32': + # Need to remove existing file on Windows, else os.rename raises + # WindowsError or FileExistsError. + try: + os.unlink(fn) + except OSError: + pass + os.rename(tf.name, fn) + except: + try: + os.remove(tf.name) + except OSError: + pass + raise + + +if sys.version_info >= (2, 7): def find_xpath_attr(node, xpath, key, val): """ Find the xpath xpath[@key=val] """ - assert re.match(r'^[a-zA-Z]+$', key) - assert re.match(r'^[a-zA-Z0-9@\s:._]*$', val) - expr = xpath + u"[@%s='%s']" % (key, val) + assert re.match(r'^[a-zA-Z-]+$', key) + assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val) + expr = xpath + "[@%s='%s']" % (key, val) return node.find(expr) else: def find_xpath_attr(node, xpath, key, val): + # Here comes the crazy part: In 2.6, if the xpath is a unicode, + # .//node does not match if a node is a direct child of . ! + if isinstance(xpath, unicode): + xpath = xpath.encode('ascii') + for f in node.findall(xpath): if f.attrib.get(key) == val: return f @@ -248,6 +147,8 @@ else: # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter + + def xpath_with_ns(path, ns_map): components = [c.split(':') for c in path.split('/')] replaced = [] @@ -259,152 +160,47 @@ def xpath_with_ns(path, ns_map): replaced.append('{%s}%s' % (ns_map[ns], tag)) return '/'.join(replaced) -def htmlentity_transform(matchobj): - """Transforms an HTML entity to a character. - This function receives a match object and is intended to be used with - the re.sub() function. - """ - entity = matchobj.group(1) - - # Known non-numeric HTML entity - if entity in compat_html_entities.name2codepoint: - return compat_chr(compat_html_entities.name2codepoint[entity]) +def xpath_text(node, xpath, name=None, fatal=False): + if sys.version_info < (2, 7): # Crazy 2.6 + xpath = xpath.encode('ascii') - mobj = re.match(u'(?u)#(x?\\d+)', entity) - if mobj is not None: - numstr = mobj.group(1) - if numstr.startswith(u'x'): - base = 16 - numstr = u'0%s' % numstr + n = node.find(xpath) + if n is None: + if fatal: + name = xpath if name is None else name + raise ExtractorError('Could not find XML element %s' % name) else: - base = 10 - return compat_chr(int(numstr, base)) - - # Unknown entity in name, return its literal representation - return (u'&%s;' % entity) - -compat_html_parser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix -class BaseHTMLParser(compat_html_parser.HTMLParser): - def __init(self): - compat_html_parser.HTMLParser.__init__(self) - self.html = None - - def loads(self, html): - self.html = html - self.feed(html) - self.close() - -class AttrParser(BaseHTMLParser): - """Modified HTMLParser that isolates a tag with the specified attribute""" - def __init__(self, attribute, value): - self.attribute = attribute - self.value = value - self.result = None - self.started = False - self.depth = {} - self.watch_startpos = False - self.error_count = 0 - BaseHTMLParser.__init__(self) - - def error(self, message): - if self.error_count > 10 or self.started: - raise compat_html_parser.HTMLParseError(message, self.getpos()) - self.rawdata = '\n'.join(self.html.split('\n')[self.getpos()[0]:]) # skip one line - self.error_count += 1 - self.goahead(1) - - def handle_starttag(self, tag, attrs): - attrs = dict(attrs) - if self.started: - self.find_startpos(None) - if self.attribute in attrs and attrs[self.attribute] == self.value: - self.result = [tag] - self.started = True - self.watch_startpos = True - if self.started: - if not tag in self.depth: self.depth[tag] = 0 - self.depth[tag] += 1 - - def handle_endtag(self, tag): - if self.started: - if tag in self.depth: self.depth[tag] -= 1 - if self.depth[self.result[0]] == 0: - self.started = False - self.result.append(self.getpos()) - - def find_startpos(self, x): - """Needed to put the start position of the result (self.result[1]) - after the opening tag with the requested id""" - if self.watch_startpos: - self.watch_startpos = False - self.result.append(self.getpos()) - handle_entityref = handle_charref = handle_data = handle_comment = \ - handle_decl = handle_pi = unknown_decl = find_startpos - - def get_result(self): - if self.result is None: - return None - if len(self.result) != 3: return None - lines = self.html.split('\n') - lines = lines[self.result[1][0]-1:self.result[2][0]] - lines[0] = lines[0][self.result[1][1]:] - if len(lines) == 1: - lines[-1] = lines[-1][:self.result[2][1]-self.result[1][1]] - lines[-1] = lines[-1][:self.result[2][1]] - return '\n'.join(lines).strip() -# Hack for https://github.com/rg3/youtube-dl/issues/662 -if sys.version_info < (2, 7, 3): - AttrParser.parse_endtag = (lambda self, i: - i + len("") - if self.rawdata[i:].startswith("") - else compat_html_parser.HTMLParser.parse_endtag(self, i)) + return n.text + def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute("id", id, html) + def get_element_by_attribute(attribute, value, html): """Return the content of the tag with the specified attribute in the passed HTML document""" - parser = AttrParser(attribute, value) - try: - parser.loads(html) - except compat_html_parser.HTMLParseError: - pass - return parser.get_result() -class MetaParser(BaseHTMLParser): - """ - Modified HTMLParser that isolates a meta tag with the specified name - attribute. - """ - def __init__(self, name): - BaseHTMLParser.__init__(self) - self.name = name - self.content = None - self.result = None - - def handle_starttag(self, tag, attrs): - if tag != 'meta': - return - attrs = dict(attrs) - if attrs.get('name') == self.name: - self.result = attrs.get('content') + m = re.search(r'''(?xs) + <([a-zA-Z0-9:._-]+) + (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*? + \s+%s=['"]?%s['"]? + (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*? + \s*> + (?P.*?) + + ''' % (re.escape(attribute), re.escape(value)), html) - def get_result(self): - return self.result + if not m: + return None + res = m.group('content') -def get_meta_content(name, html): - """ - Return the content attribute from the meta tag with the given name attribute. - """ - parser = MetaParser(name) - try: - parser.loads(html) - except compat_html_parser.HTMLParseError: - pass - return parser.get_result() + if res.startswith('"') or res.startswith("'"): + res = res[1:-1] + + return unescapeHTML(res) def clean_html(html): @@ -431,7 +227,7 @@ def sanitize_open(filename, open_mode): It returns the tuple (stream, definitive_file_name). """ try: - if filename == u'-': + if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) @@ -444,9 +240,9 @@ def sanitize_open(filename, open_mode): # In case of error, try to remove win32 forbidden chars alt_filename = os.path.join( - re.sub(u'[/<>:"\\|\\\\?\\*]', u'#', path_part) - for path_part in os.path.split(filename) - ) + re.sub('[/<>:"\\|\\\\?\\*]', '#', path_part) + for path_part in os.path.split(filename) + ) if alt_filename == filename: raise else: @@ -463,6 +259,7 @@ def timeconvert(timestr): timestamp = email.utils.mktime_tz(timetuple) return timestamp + def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. @@ -483,7 +280,7 @@ def sanitize_filename(s, restricted=False, is_id=False): return '_' return char - result = u''.join(map(replace_insane, s)) + result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') @@ -495,6 +292,7 @@ def sanitize_filename(s, restricted=False, is_id=False): result = '_' return result + def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] @@ -504,13 +302,33 @@ def orderedSet(iterable): return res +def _htmlentity_transform(entity): + """Transforms an HTML entity to a character.""" + # Known non-numeric HTML entity + if entity in compat_html_entities.name2codepoint: + return compat_chr(compat_html_entities.name2codepoint[entity]) + + mobj = re.match(r'#(x?[0-9]+)', entity) + if mobj is not None: + numstr = mobj.group(1) + if numstr.startswith('x'): + base = 16 + numstr = '0%s' % numstr + else: + base = 10 + return compat_chr(int(numstr, base)) + + # Unknown entity in name, return its literal representation + return ('&%s;' % entity) + + def unescapeHTML(s): if s is None: return None assert type(s) == compat_str - result = re.sub(r'(?u)&(.+?);', htmlentity_transform, s) - return result + return re.sub( + r'&([^;]+);', lambda m: _htmlentity_transform(m.group(1)), s) def encodeFilename(s, for_subprocess=False): @@ -525,7 +343,7 @@ def encodeFilename(s, for_subprocess=False): return s if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: - # Pass u'' directly to use Unicode APIs on Windows 2000 and up + # Pass '' directly to use Unicode APIs on Windows 2000 and up # (Detecting Windows NT 4 is tricky because 'major >= 4' would # match Windows 9x series as well. Besides, NT 4 is obsolete.) if not for_subprocess: @@ -559,6 +377,7 @@ def decodeOption(optval): assert isinstance(optval, compat_str) return optval + def formatSeconds(secs): if secs > 3600: return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) @@ -582,7 +401,7 @@ def make_HTTPS_handler(opts_no_check_certificate, **kwargs): self.sock = sock self._tunnel() try: - self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_SSLv3) + self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) except ssl.SSLError: self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_SSLv23) @@ -590,8 +409,14 @@ def make_HTTPS_handler(opts_no_check_certificate, **kwargs): def https_open(self, req): return self.do_open(HTTPSConnectionV3, req) return HTTPSHandlerV3(**kwargs) - else: - context = ssl.SSLContext(ssl.PROTOCOL_SSLv3) + elif hasattr(ssl, 'create_default_context'): # Python >= 3.4 + context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) + context.options &= ~ssl.OP_NO_SSLv3 # Allow older, not-as-secure SSLv3 + if opts_no_check_certificate: + context.verify_mode = ssl.CERT_NONE + return compat_urllib_request.HTTPSHandler(context=context, **kwargs) + else: # Python < 3.4 + context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.verify_mode = (ssl.CERT_NONE if opts_no_check_certificate else ssl.CERT_REQUIRED) @@ -602,8 +427,10 @@ def make_HTTPS_handler(opts_no_check_certificate, **kwargs): pass # Python < 3.4 return compat_urllib_request.HTTPSHandler(context=context, **kwargs) + class ExtractorError(Exception): """Error during info extraction.""" + def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. @@ -613,8 +440,16 @@ class ExtractorError(Exception): expected = True if video_id is not None: msg = video_id + ': ' + msg + if cause: + msg += ' (caused by %r)' % cause if not expected: - msg = msg + u'; please report this issue on https://yt-dl.org/bug . Be sure to call youtube-dl with the --verbose flag and include its complete output. Make sure you are using the latest version; type youtube-dl -U to update.' + if ytdl_is_updateable(): + update_cmd = 'type youtube-dl -U to update' + else: + update_cmd = 'see https://yt-dl.org/update on how to update' + msg += '; please report this issue on https://yt-dl.org/bug .' + msg += ' Make sure you are using the latest version; %s.' % update_cmd + msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' super(ExtractorError, self).__init__(msg) self.traceback = tb @@ -625,7 +460,7 @@ class ExtractorError(Exception): def format_traceback(self): if self.traceback is None: return None - return u''.join(traceback.format_tb(self.traceback)) + return ''.join(traceback.format_tb(self.traceback)) class RegexNotFoundError(ExtractorError): @@ -640,6 +475,7 @@ class DownloadError(Exception): configured to continue on errors. They will contain the appropriate error message. """ + def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ super(DownloadError, self).__init__(msg) @@ -661,9 +497,11 @@ class PostProcessingError(Exception): This exception may be raised by PostProcessor's .run() method to indicate an error in the postprocessing task. """ + def __init__(self, msg): self.msg = msg + class MaxDownloadsReached(Exception): """ --max-downloads limit has been reached. """ pass @@ -693,6 +531,7 @@ class ContentTooShortError(Exception): self.downloaded = downloaded self.expected = expected + class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. @@ -727,10 +566,9 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler): return ret def http_request(self, req): - for h,v in std_headers.items(): - if h in req.headers: - del req.headers[h] - req.add_header(h, v) + for h, v in std_headers.items(): + if h not in req.headers: + req.add_header(h, v) if 'Youtubedl-no-compression' in req.headers: if 'Accept-encoding' in req.headers: del req.headers['Accept-encoding'] @@ -740,6 +578,12 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler): del req.headers['User-agent'] req.headers['User-agent'] = req.headers['Youtubedl-user-agent'] del req.headers['Youtubedl-user-agent'] + + if sys.version_info < (2, 7) and '#' in req.get_full_url(): + # Python 2.6 is brain-dead when it comes to fragments + req._Request__original = req._Request__original.partition('#')[0] + req._Request__r_type = req._Request__r_type.partition('#')[0] + return req def http_response(self, req, resp): @@ -775,14 +619,14 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler): https_response = http_response -def parse_iso8601(date_str): +def parse_iso8601(date_str, delimiter='T'): """ Return a UNIX timestamp from the given date """ if date_str is None: return None m = re.search( - r'Z$| ?(?P\+|-)(?P[0-9]{2}):?(?P[0-9]{2})$', + r'(\.[0-9]+)?(?:Z$| ?(?P\+|-)(?P[0-9]{2}):?(?P[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() @@ -795,8 +639,8 @@ def parse_iso8601(date_str): timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) - - dt = datetime.datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%S') - timezone + date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) + dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) @@ -807,7 +651,7 @@ def unified_strdate(date_str): return None upload_date = None - #Replace commas + # Replace commas date_str = date_str.replace(',', ' ') # %z (UTC offset) is only supported in python>=3.2 date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str) @@ -816,11 +660,18 @@ def unified_strdate(date_str): '%d %b %Y', '%B %d %Y', '%b %d %Y', + '%b %dst %Y %I:%M%p', + '%b %dnd %Y %I:%M%p', + '%b %dth %Y %I:%M%p', '%Y-%m-%d', + '%Y/%m/%d', '%d.%m.%Y', '%d/%m/%Y', + '%d/%m/%y', '%Y/%m/%d %H:%M:%S', + '%d/%m/%Y %H:%M:%S', '%Y-%m-%d %H:%M:%S', + '%Y-%m-%d %H:%M:%S.%f', '%d.%m.%Y %H:%M', '%d.%m.%Y %H.%M', '%Y-%m-%dT%H:%M:%SZ', @@ -841,15 +692,20 @@ def unified_strdate(date_str): upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') return upload_date -def determine_ext(url, default_ext=u'unknown_video'): - guess = url.partition(u'?')[0].rpartition(u'.')[2] + +def determine_ext(url, default_ext='unknown_video'): + if url is None: + return default_ext + guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess else: return default_ext + def subtitles_filename(filename, sub_lang, sub_format): - return filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format + return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format + def date_from_str(date_str): """ @@ -865,7 +721,7 @@ def date_from_str(date_str): if sign == '-': time = -time unit = match.group('unit') - #A bad aproximation? + # A bad aproximation? if unit == 'month': unit = 'day' time *= 30 @@ -876,7 +732,8 @@ def date_from_str(date_str): delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, "%Y%m%d").date() - + + def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" @@ -886,8 +743,10 @@ def hyphenate_date(date_str): else: return date_str + class DateRange(object): """Represents a time interval between two dates""" + def __init__(self, start=None, end=None): """start and end must be strings in the format accepted by date""" if start is not None: @@ -900,17 +759,20 @@ class DateRange(object): self.end = datetime.datetime.max.date() if self.start > self.end: raise ValueError('Date range: "%s" , the start date must be before the end date' % self) + @classmethod def day(cls, day): """Returns a range that only contains the given day""" - return cls(day,day) + return cls(day, day) + def __contains__(self, date): """Check if the date is in the range""" if not isinstance(date, datetime.date): date = date_from_str(date) return self.start <= date <= self.end + def __str__(self): - return '%s - %s' % ( self.start.isoformat(), self.end.isoformat()) + return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) def platform_name(): @@ -1029,16 +891,7 @@ def bytes_to_intlist(bs): def intlist_to_bytes(xs): if not xs: return b'' - if isinstance(chr(0), bytes): # Python 2 - return ''.join([chr(x) for x in xs]) - else: - return bytes(xs) - - -def get_cachedir(params={}): - cache_root = os.environ.get('XDG_CACHE_HOME', - os.path.expanduser('~/.cache')) - return params.get('cachedir', os.path.join(cache_root, 'youtube-dl')) + return struct_pack('%dB' % len(xs), *xs) # Cross-platform file locking @@ -1100,10 +953,10 @@ else: import fcntl def _lock_file(f, exclusive): - fcntl.lockf(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) + fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) def _unlock_file(f): - fcntl.lockf(f, fcntl.LOCK_UN) + fcntl.flock(f, fcntl.LOCK_UN) class locked_file(object): @@ -1137,17 +990,20 @@ class locked_file(object): return self.f.read(*args) +def get_filesystem_encoding(): + encoding = sys.getfilesystemencoding() + return encoding if encoding is not None else 'utf-8' + + def shell_quote(args): quoted_args = [] - encoding = sys.getfilesystemencoding() - if encoding is None: - encoding = 'utf-8' + encoding = get_filesystem_encoding() for a in args: if isinstance(a, bytes): # We may get a filename encoded with 'encodeFilename' a = a.decode(encoding) quoted_args.append(pipes.quote(a)) - return u' '.join(quoted_args) + return ' '.join(quoted_args) def takewhile_inclusive(pred, seq): @@ -1163,40 +1019,86 @@ def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ sdata = compat_urllib_parse.urlencode( - {u'__youtubedl_smuggle': json.dumps(data)}) - return url + u'#' + sdata + {'__youtubedl_smuggle': json.dumps(data)}) + return url + '#' + sdata def unsmuggle_url(smug_url, default=None): if not '#__youtubedl_smuggle' in smug_url: return smug_url, default - url, _, sdata = smug_url.rpartition(u'#') - jsond = compat_parse_qs(sdata)[u'__youtubedl_smuggle'][0] + url, _, sdata = smug_url.rpartition('#') + jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data def format_bytes(bytes): if bytes is None: - return u'N/A' + return 'N/A' if type(bytes) is str: bytes = float(bytes) if bytes == 0.0: exponent = 0 else: exponent = int(math.log(bytes, 1024.0)) - suffix = [u'B', u'KiB', u'MiB', u'GiB', u'TiB', u'PiB', u'EiB', u'ZiB', u'YiB'][exponent] + suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] converted = float(bytes) / float(1024 ** exponent) - return u'%.2f%s' % (converted, suffix) + return '%.2f%s' % (converted, suffix) -def str_to_int(int_str): - int_str = re.sub(r'[,\.]', u'', int_str) - return int(int_str) +def parse_filesize(s): + if s is None: + return None + + # The lower-case forms are of course incorrect and inofficial, + # but we support those too + _UNIT_TABLE = { + 'B': 1, + 'b': 1, + 'KiB': 1024, + 'KB': 1000, + 'kB': 1024, + 'Kb': 1000, + 'MiB': 1024 ** 2, + 'MB': 1000 ** 2, + 'mB': 1024 ** 2, + 'Mb': 1000 ** 2, + 'GiB': 1024 ** 3, + 'GB': 1000 ** 3, + 'gB': 1024 ** 3, + 'Gb': 1000 ** 3, + 'TiB': 1024 ** 4, + 'TB': 1000 ** 4, + 'tB': 1024 ** 4, + 'Tb': 1000 ** 4, + 'PiB': 1024 ** 5, + 'PB': 1000 ** 5, + 'pB': 1024 ** 5, + 'Pb': 1000 ** 5, + 'EiB': 1024 ** 6, + 'EB': 1000 ** 6, + 'eB': 1024 ** 6, + 'Eb': 1000 ** 6, + 'ZiB': 1024 ** 7, + 'ZB': 1000 ** 7, + 'zB': 1024 ** 7, + 'Zb': 1000 ** 7, + 'YiB': 1024 ** 8, + 'YB': 1000 ** 8, + 'yB': 1024 ** 8, + 'Yb': 1000 ** 8, + } + + units_re = '|'.join(re.escape(u) for u in _UNIT_TABLE) + m = re.match(r'(?P[0-9]+(?:\.[0-9]*)?)\s*(?P%s)' % units_re, s) + if not m: + return None + + return int(float(m.group('num')) * _UNIT_TABLE[m.group('unit')]) def get_term_width(): - columns = os.environ.get('COLUMNS', None) + columns = compat_getenv('COLUMNS', None) if columns: return int(columns) @@ -1215,8 +1117,8 @@ def month_by_name(name): """ Return the number of a month by (locale-independently) English name """ ENGLISH_NAMES = [ - u'January', u'February', u'March', u'April', u'May', u'June', - u'July', u'August', u'September', u'October', u'November', u'December'] + 'January', 'February', 'March', 'April', 'May', 'June', + 'July', 'August', 'September', 'October', 'November', 'December'] try: return ENGLISH_NAMES.index(name) + 1 except ValueError: @@ -1227,7 +1129,7 @@ def fix_xml_ampersands(xml_str): """Replace all the '&' by '&' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', - u'&', + '&', xml_str) @@ -1252,9 +1154,15 @@ def remove_start(s, start): return s +def remove_end(s, end): + if s.endswith(end): + return s[:-len(end)] + return s + + def url_basename(url): path = compat_urlparse.urlparse(url).path - return path.strip(u'/').split(u'/')[-1] + return path.strip('/').split('/')[-1] class HEADRequest(compat_urllib_request.Request): @@ -1262,23 +1170,44 @@ class HEADRequest(compat_urllib_request.Request): return "HEAD" -def int_or_none(v, scale=1, default=None, get_attr=None): +def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): if get_attr: if v is not None: v = getattr(v, get_attr, None) - return default if v is None else (int(v) // scale) + if v == '': + v = None + return default if v is None else (int(v) * invscale // scale) + + +def str_or_none(v, default=None): + return default if v is None else compat_str(v) + +def str_to_int(int_str): + """ A more relaxed version of int_or_none """ + if int_str is None: + return None + int_str = re.sub(r'[,\.\+]', '', int_str) + return int(int_str) -def float_or_none(v, scale=1, default=None): - return default if v is None else (float(v) / scale) + +def float_or_none(v, scale=1, invscale=1, default=None): + return default if v is None else (float(v) * invscale / scale) def parse_duration(s): if s is None: return None + s = s.strip() + m = re.match( - r'(?:(?:(?P[0-9]+)[:h])?(?P[0-9]+)[:m])?(?P[0-9]+)s?(?::[0-9]+)?$', s) + r'''(?ix)T? + (?: + (?:(?P[0-9]+)\s*(?:[:h]|hours?)\s*)? + (?P[0-9]+)\s*(?:[:m]|mins?|minutes?)\s* + )? + (?P[0-9]+)(?P\.[0-9]+)?\s*(?:s|secs?|seconds?)?$''', s) if not m: return None res = int(m.group('secs')) @@ -1286,12 +1215,14 @@ def parse_duration(s): res += int(m.group('mins')) * 60 if m.group('hours'): res += int(m.group('hours')) * 60 * 60 + if m.group('ms'): + res += float(m.group('ms')) return res def prepend_extension(filename, ext): - name, real_ext = os.path.splitext(filename) - return u'{0}.{1}{2}'.format(name, ext, real_ext) + name, real_ext = os.path.splitext(filename) + return '{0}.{1}{2}'.format(name, ext, real_ext) def check_executable(exe, args=[]): @@ -1304,15 +1235,36 @@ def check_executable(exe, args=[]): return exe -class PagedList(object): - def __init__(self, pagefunc, pagesize): - self._pagefunc = pagefunc - self._pagesize = pagesize +def get_exe_version(exe, args=['--version'], + version_re=r'version\s+([0-9._-a-zA-Z]+)', + unrecognized='present'): + """ Returns the version of the specified executable, + or False if the executable is not present """ + try: + out, err = subprocess.Popen( + [exe] + args, + stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() + except OSError: + return False + firstline = out.partition(b'\n')[0].decode('ascii', 'ignore') + m = re.search(version_re, firstline) + if m: + return m.group(1) + else: + return unrecognized + +class PagedList(object): def __len__(self): # This is only useful for tests return len(self.getslice()) + +class OnDemandPagedList(PagedList): + def __init__(self, pagefunc, pagesize): + self._pagefunc = pagefunc + self._pagesize = pagesize + def getslice(self, start=0, end=None): res = [] for pagenum in itertools.count(start // self._pagesize): @@ -1351,6 +1303,35 @@ class PagedList(object): return res +class InAdvancePagedList(PagedList): + def __init__(self, pagefunc, pagecount, pagesize): + self._pagefunc = pagefunc + self._pagecount = pagecount + self._pagesize = pagesize + + def getslice(self, start=0, end=None): + res = [] + start_page = start // self._pagesize + end_page = ( + self._pagecount if end is None else (end // self._pagesize + 1)) + skip_elems = start - start_page * self._pagesize + only_more = None if end is None else end - start + for pagenum in range(start_page, end_page): + page = list(self._pagefunc(pagenum)) + if skip_elems: + page = page[skip_elems:] + skip_elems = None + if only_more is not None: + if len(page) < only_more: + only_more -= len(page) + else: + page = page[:only_more] + res.extend(page) + break + res.extend(page) + return res + + def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( @@ -1358,8 +1339,26 @@ def uppercase_escape(s): lambda m: unicode_escape(m.group(0))[0], s) + +def escape_rfc3986(s): + """Escape non-ASCII characters as suggested by RFC 3986""" + if sys.version_info < (3, 0) and isinstance(s, unicode): + s = s.encode('utf-8') + return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") + + +def escape_url(url): + """Escape URL as suggested by RFC 3986""" + url_parsed = compat_urllib_parse_urlparse(url) + return url_parsed._replace( + path=escape_rfc3986(url_parsed.path), + params=escape_rfc3986(url_parsed.params), + query=escape_rfc3986(url_parsed.query), + fragment=escape_rfc3986(url_parsed.fragment) + ).geturl() + try: - struct.pack(u'!I', 0) + struct.pack('!I', 0) except TypeError: # In Python 2.6 (and some 2.7 versions), struct requires a bytes argument def struct_pack(spec, *args): @@ -1380,7 +1379,7 @@ def read_batch_urls(batch_fd): def fixup(url): if not isinstance(url, compat_str): url = url.decode('utf-8', 'replace') - BOM_UTF8 = u'\xef\xbb\xbf' + BOM_UTF8 = '\xef\xbb\xbf' if url.startswith(BOM_UTF8): url = url[len(BOM_UTF8):] url = url.strip() @@ -1396,6 +1395,12 @@ def urlencode_postdata(*args, **kargs): return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii') +try: + etree_iter = xml.etree.ElementTree.Element.iter +except AttributeError: # Python <=2.6 + etree_iter = lambda n: n.findall('.//*') + + def parse_xml(s): class TreeBuilder(xml.etree.ElementTree.TreeBuilder): def doctype(self, name, pubid, system): @@ -1403,16 +1408,14 @@ def parse_xml(s): parser = xml.etree.ElementTree.XMLParser(target=TreeBuilder()) kwargs = {'parser': parser} if sys.version_info >= (2, 7) else {} - return xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs) - - -if sys.version_info < (3, 0) and sys.platform == 'win32': - def compat_getpass(prompt, *args, **kwargs): - if isinstance(prompt, compat_str): - prompt = prompt.encode(preferredencoding()) - return getpass.getpass(prompt, *args, **kwargs) -else: - compat_getpass = getpass.getpass + tree = xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs) + # Fix up XML parser in Python 2.x + if sys.version_info < (3, 0): + for n in etree_iter(tree): + if n.text is not None: + if not isinstance(n.text, compat_str): + n.text = n.text.decode('utf-8') + return tree US_RATINGS = { @@ -1424,8 +1427,41 @@ US_RATINGS = { } +def parse_age_limit(s): + if s is None: + return None + m = re.match(r'^(?P\d{1,2})\+?$', s) + return int(m.group('age')) if m else US_RATINGS.get(s, None) + + def strip_jsonp(code): - return re.sub(r'(?s)^[a-zA-Z_]+\s*\(\s*(.*)\);\s*?\s*$', r'\1', code) + return re.sub( + r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code) + + +def js_to_json(code): + def fix_kv(m): + v = m.group(0) + if v in ('true', 'false', 'null'): + return v + if v.startswith('"'): + return v + if v.startswith("'"): + v = v[1:-1] + v = re.sub(r"\\\\|\\'|\"", lambda m: { + '\\\\': '\\\\', + "\\'": "'", + '"': '\\"', + }[m.group(0)], v) + return '"%s"' % v + + res = re.sub(r'''(?x) + "(?:[^"\\]*(?:\\\\|\\")?)*"| + '(?:[^'\\]*(?:\\\\|\\')?)*'| + [a-zA-Z_][a-zA-Z_0-9]* + ''', fix_kv, code) + res = re.sub(r',(\s*\])', lambda m: m.group(1), res) + return res def qualities(quality_ids): @@ -1440,14 +1476,37 @@ def qualities(quality_ids): DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' -try: - subprocess_check_output = subprocess.check_output -except AttributeError: - def subprocess_check_output(*args, **kwargs): - assert 'input' not in kwargs - p = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs) - output, _ = p.communicate() - ret = p.poll() - if ret: - raise subprocess.CalledProcessError(ret, p.args, output=output) - return output + +def limit_length(s, length): + """ Add ellipses to overly long strings """ + if s is None: + return None + ELLIPSES = '...' + if len(s) > length: + return s[:length - len(ELLIPSES)] + ELLIPSES + return s + + +def version_tuple(v): + return [int(e) for e in v.split('.')] + + +def is_outdated_version(version, limit, assume_new=True): + if not version: + return not assume_new + try: + return version_tuple(version) < version_tuple(limit) + except ValueError: + return not assume_new + + +def ytdl_is_updateable(): + """ Returns if youtube-dl can be updated with -U """ + from zipimport import zipimporter + + return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') + + +def args_to_str(args): + # Get a short string representation for a subprocess command + return ' '.join(shlex_quote(a) for a in args)