#!/usr/bin/env python
# -*- coding: utf-8 -*-
+import calendar
+import codecs
+import contextlib
import ctypes
import datetime
import email.utils
import errno
+import getpass
import gzip
+import itertools
import io
import json
import locale
import re
import ssl
import socket
+import struct
import subprocess
import sys
import traceback
+import xml.etree.ElementTree
import zlib
try:
compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
try:
- from urllib.parse import parse_qs as compat_parse_qs
-except ImportError: # Python 2
- # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
- # Python 2's version is apparently totally broken
- def _unquote(string, encoding='utf-8', errors='replace'):
+ from urllib.parse import unquote as compat_urllib_parse_unquote
+except ImportError:
+ def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
if string == '':
return string
res = string.split('%')
string += pct_sequence.decode(encoding, errors)
return string
+
+try:
+ from urllib.parse import parse_qs as compat_parse_qs
+except ImportError: # Python 2
+ # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
+ # Python 2's version is apparently totally broken
+
def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
qs, _coerce_result = qs, unicode
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
- name = _unquote(name, encoding=encoding, errors=errors)
+ name = compat_urllib_parse_unquote(
+ name, encoding=encoding, errors=errors)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
- value = _unquote(value, encoding=encoding, errors=errors)
+ value = compat_urllib_parse_unquote(
+ value, encoding=encoding, errors=errors)
value = _coerce_result(value)
r.append((name, value))
return r
except NameError:
compat_chr = chr
+try:
+ from xml.etree.ElementTree import ParseError as compat_xml_parse_error
+except ImportError: # Python 2.6
+ from xml.parsers.expat import ExpatError as compat_xml_parse_error
+
def compat_ord(c):
if type(c) is int: return c
else: return ord(c)
if sys.version_info >= (2,7):
def find_xpath_attr(node, xpath, key, val):
""" Find the xpath xpath[@key=val] """
- assert re.match(r'^[a-zA-Z]+$', key)
- assert re.match(r'^[a-zA-Z0-9@\s]*$', val)
+ assert re.match(r'^[a-zA-Z-]+$', key)
+ assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val)
expr = xpath + u"[@%s='%s']" % (key, val)
return node.find(expr)
else:
res.append(el)
return res
+
def unescapeHTML(s):
- """
- @param s a string
- """
- assert type(s) == type(u'')
+ if s is None:
+ return None
+ assert type(s) == compat_str
- result = re.sub(u'(?u)&(.+?);', htmlentity_transform, s)
+ result = re.sub(r'(?u)&(.+?);', htmlentity_transform, s)
return result
return s.encode(encoding, 'ignore')
+def encodeArgument(s):
+ if not isinstance(s, compat_str):
+ # Legacy code that uses byte strings
+ # Uncomment the following line after fixing all post processors
+ #assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
+ s = s.decode('ascii')
+ return encodeFilename(s, True)
+
+
def decodeOption(optval):
if optval is None:
return optval
class ExtractorError(Exception):
"""Error during info extraction."""
- def __init__(self, msg, tb=None, expected=False, cause=None):
+ def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
""" tb, if given, is the original traceback (so that it can be printed out).
If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
"""
if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
expected = True
+ if video_id is not None:
+ msg = video_id + ': ' + msg
if not expected:
msg = msg + u'; please report this issue on https://yt-dl.org/bug . Be sure to call youtube-dl with the --verbose flag and include its complete output. Make sure you are using the latest version; type youtube-dl -U to update.'
super(ExtractorError, self).__init__(msg)
self.traceback = tb
self.exc_info = sys.exc_info() # preserve original exception
self.cause = cause
+ self.video_id = video_id
def format_traceback(self):
if self.traceback is None:
https_request = http_request
https_response = http_response
+
+def parse_iso8601(date_str, delimiter='T'):
+ """ Return a UNIX timestamp from the given date """
+
+ if date_str is None:
+ return None
+
+ m = re.search(
+ r'Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$',
+ date_str)
+ if not m:
+ timezone = datetime.timedelta()
+ else:
+ date_str = date_str[:-len(m.group(0))]
+ if not m.group('sign'):
+ timezone = datetime.timedelta()
+ else:
+ sign = 1 if m.group('sign') == '+' else -1
+ timezone = datetime.timedelta(
+ hours=sign * int(m.group('hours')),
+ minutes=sign * int(m.group('minutes')))
+ date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
+ dt = datetime.datetime.strptime(date_str, date_format) - timezone
+ return calendar.timegm(dt.timetuple())
+
+
def unified_strdate(date_str):
"""Return a string with the date in the format YYYYMMDD"""
+
+ if date_str is None:
+ return None
+
upload_date = None
#Replace commas
- date_str = date_str.replace(',',' ')
+ date_str = date_str.replace(',', ' ')
# %z (UTC offset) is only supported in python>=3.2
- date_str = re.sub(r' (\+|-)[\d]*$', '', date_str)
+ date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str)
format_expressions = [
'%d %B %Y',
+ '%d %b %Y',
'%B %d %Y',
'%b %d %Y',
+ '%b %dst %Y %I:%M%p',
+ '%b %dnd %Y %I:%M%p',
+ '%b %dth %Y %I:%M%p',
'%Y-%m-%d',
+ '%d.%m.%Y',
'%d/%m/%Y',
'%Y/%m/%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S',
'%d.%m.%Y %H:%M',
+ '%d.%m.%Y %H.%M',
'%Y-%m-%dT%H:%M:%SZ',
'%Y-%m-%dT%H:%M:%S.%fZ',
'%Y-%m-%dT%H:%M:%S.%f0Z',
'%Y-%m-%dT%H:%M:%S',
+ '%Y-%m-%dT%H:%M:%S.%f',
+ '%Y-%m-%dT%H:%M',
]
for expression in format_expressions:
try:
upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
- except:
+ except ValueError:
pass
if upload_date is None:
timetuple = email.utils.parsedate_tz(date_str)
return upload_date
def determine_ext(url, default_ext=u'unknown_video'):
+ if url is None:
+ return default_ext
guess = url.partition(u'?')[0].rpartition(u'.')[2]
if re.match(r'^[A-Za-z0-9]+$', guess):
return guess
return res
-def write_string(s, out=None):
+def _windows_write_string(s, out):
+ """ Returns True if the string was written using special methods,
+ False if it has yet to be written out."""
+ # Adapted from http://stackoverflow.com/a/3259271/35070
+
+ import ctypes
+ import ctypes.wintypes
+
+ WIN_OUTPUT_IDS = {
+ 1: -11,
+ 2: -12,
+ }
+
+ try:
+ fileno = out.fileno()
+ except AttributeError:
+ # If the output stream doesn't have a fileno, it's virtual
+ return False
+ if fileno not in WIN_OUTPUT_IDS:
+ return False
+
+ GetStdHandle = ctypes.WINFUNCTYPE(
+ ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
+ ("GetStdHandle", ctypes.windll.kernel32))
+ h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
+
+ WriteConsoleW = ctypes.WINFUNCTYPE(
+ ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
+ ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
+ ctypes.wintypes.LPVOID)(("WriteConsoleW", ctypes.windll.kernel32))
+ written = ctypes.wintypes.DWORD(0)
+
+ GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(("GetFileType", ctypes.windll.kernel32))
+ FILE_TYPE_CHAR = 0x0002
+ FILE_TYPE_REMOTE = 0x8000
+ GetConsoleMode = ctypes.WINFUNCTYPE(
+ ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
+ ctypes.POINTER(ctypes.wintypes.DWORD))(
+ ("GetConsoleMode", ctypes.windll.kernel32))
+ INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
+
+ def not_a_console(handle):
+ if handle == INVALID_HANDLE_VALUE or handle is None:
+ return True
+ return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR
+ or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
+
+ if not_a_console(h):
+ return False
+
+ def next_nonbmp_pos(s):
+ try:
+ return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
+ except StopIteration:
+ return len(s)
+
+ while s:
+ count = min(next_nonbmp_pos(s), 1024)
+
+ ret = WriteConsoleW(
+ h, s, count if count else 2, ctypes.byref(written), None)
+ if ret == 0:
+ raise OSError('Failed to write string')
+ if not count: # We just wrote a non-BMP character
+ assert written.value == 2
+ s = s[1:]
+ else:
+ assert written.value > 0
+ s = s[written.value:]
+ return True
+
+
+def write_string(s, out=None, encoding=None):
if out is None:
out = sys.stderr
assert type(s) == compat_str
+ if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
+ if _windows_write_string(s, out):
+ return
+
if ('b' in getattr(out, 'mode', '') or
sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
- s = s.encode(preferredencoding(), 'ignore')
- try:
+ byt = s.encode(encoding or preferredencoding(), 'ignore')
+ out.write(byt)
+ elif hasattr(out, 'buffer'):
+ enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
+ byt = s.encode(enc, 'ignore')
+ out.buffer.write(byt)
+ else:
out.write(s)
- except UnicodeEncodeError:
- # In Windows shells, this can fail even when the codec is just charmap!?
- # See https://wiki.python.org/moin/PrintFails#Issue
- if sys.platform == 'win32' and hasattr(out, 'encoding'):
- s = s.encode(out.encoding, 'ignore').decode(out.encoding)
- out.write(s)
- else:
- raise
-
out.flush()
return u'%.2f%s' % (converted, suffix)
-def str_to_int(int_str):
- int_str = re.sub(r'[,\.]', u'', int_str)
- return int(int_str)
-
-
def get_term_width():
columns = os.environ.get('COLUMNS', None)
if columns:
return None
-def fix_xml_all_ampersand(xml_str):
+def fix_xml_ampersands(xml_str):
"""Replace all the '&' by '&' in XML"""
- return xml_str.replace(u'&', u'&')
+ return re.sub(
+ r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
+ u'&',
+ xml_str)
def setproctitle(title):
libc = ctypes.cdll.LoadLibrary("libc.so.6")
except OSError:
return
- title = title
- buf = ctypes.create_string_buffer(len(title) + 1)
- buf.value = title.encode('utf-8')
+ title_bytes = title.encode('utf-8')
+ buf = ctypes.create_string_buffer(len(title_bytes))
+ buf.value = title_bytes
try:
- libc.prctl(15, ctypes.byref(buf), 0, 0, 0)
+ libc.prctl(15, buf, 0, 0, 0)
except AttributeError:
return # Strange libc, just skip this
return "HEAD"
-def int_or_none(v):
- return v if v is None else int(v)
+def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
+ if get_attr:
+ if v is not None:
+ v = getattr(v, get_attr, None)
+ return default if v is None else (int(v) * invscale // scale)
+
+
+def str_to_int(int_str):
+ if int_str is None:
+ return None
+ int_str = re.sub(r'[,\.]', u'', int_str)
+ return int(int_str)
+
+
+def float_or_none(v, scale=1, invscale=1, default=None):
+ return default if v is None else (float(v) * invscale / scale)
def parse_duration(s):
return None
m = re.match(
- r'(?:(?:(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)$', s)
+ r'(?:(?:(?P<hours>[0-9]+)[:h])?(?P<mins>[0-9]+)[:m])?(?P<secs>[0-9]+)s?(?::[0-9]+)?$', s)
if not m:
return None
res = int(m.group('secs'))
except OSError:
return False
return exe
+
+
+class PagedList(object):
+ def __init__(self, pagefunc, pagesize):
+ self._pagefunc = pagefunc
+ self._pagesize = pagesize
+
+ def __len__(self):
+ # This is only useful for tests
+ return len(self.getslice())
+
+ def getslice(self, start=0, end=None):
+ res = []
+ for pagenum in itertools.count(start // self._pagesize):
+ firstid = pagenum * self._pagesize
+ nextfirstid = pagenum * self._pagesize + self._pagesize
+ if start >= nextfirstid:
+ continue
+
+ page_results = list(self._pagefunc(pagenum))
+
+ startv = (
+ start % self._pagesize
+ if firstid <= start < nextfirstid
+ else 0)
+
+ endv = (
+ ((end - 1) % self._pagesize) + 1
+ if (end is not None and firstid <= end <= nextfirstid)
+ else None)
+
+ if startv != 0 or endv is not None:
+ page_results = page_results[startv:endv]
+ res.extend(page_results)
+
+ # A little optimization - if current page is not "full", ie. does
+ # not contain page_size videos then we can assume that this page
+ # is the last one - there are no more ids on further pages -
+ # i.e. no need to query again.
+ if len(page_results) + startv < self._pagesize:
+ break
+
+ # If we got the whole page, but the next page is not interesting,
+ # break out early as well
+ if end == nextfirstid:
+ break
+ return res
+
+
+def uppercase_escape(s):
+ unicode_escape = codecs.getdecoder('unicode_escape')
+ return re.sub(
+ r'\\U[0-9a-fA-F]{8}',
+ lambda m: unicode_escape(m.group(0))[0],
+ s)
+
+try:
+ struct.pack(u'!I', 0)
+except TypeError:
+ # In Python 2.6 (and some 2.7 versions), struct requires a bytes argument
+ def struct_pack(spec, *args):
+ if isinstance(spec, compat_str):
+ spec = spec.encode('ascii')
+ return struct.pack(spec, *args)
+
+ def struct_unpack(spec, *args):
+ if isinstance(spec, compat_str):
+ spec = spec.encode('ascii')
+ return struct.unpack(spec, *args)
+else:
+ struct_pack = struct.pack
+ struct_unpack = struct.unpack
+
+
+def read_batch_urls(batch_fd):
+ def fixup(url):
+ if not isinstance(url, compat_str):
+ url = url.decode('utf-8', 'replace')
+ BOM_UTF8 = u'\xef\xbb\xbf'
+ if url.startswith(BOM_UTF8):
+ url = url[len(BOM_UTF8):]
+ url = url.strip()
+ if url.startswith(('#', ';', ']')):
+ return False
+ return url
+
+ with contextlib.closing(batch_fd) as fd:
+ return [url for url in map(fixup, fd) if url]
+
+
+def urlencode_postdata(*args, **kargs):
+ return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii')
+
+
+def parse_xml(s):
+ class TreeBuilder(xml.etree.ElementTree.TreeBuilder):
+ def doctype(self, name, pubid, system):
+ pass # Ignore doctypes
+
+ parser = xml.etree.ElementTree.XMLParser(target=TreeBuilder())
+ kwargs = {'parser': parser} if sys.version_info >= (2, 7) else {}
+ return xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs)
+
+
+if sys.version_info < (3, 0) and sys.platform == 'win32':
+ def compat_getpass(prompt, *args, **kwargs):
+ if isinstance(prompt, compat_str):
+ prompt = prompt.encode(preferredencoding())
+ return getpass.getpass(prompt, *args, **kwargs)
+else:
+ compat_getpass = getpass.getpass
+
+
+US_RATINGS = {
+ 'G': 0,
+ 'PG': 10,
+ 'PG-13': 13,
+ 'R': 16,
+ 'NC': 18,
+}
+
+
+def strip_jsonp(code):
+ return re.sub(r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?\s*$', r'\1', code)
+
+
+def qualities(quality_ids):
+ """ Get a numeric quality value out of a list of possible values """
+ def q(qid):
+ try:
+ return quality_ids.index(qid)
+ except ValueError:
+ return -1
+ return q
+
+
+DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s'
+
+try:
+ subprocess_check_output = subprocess.check_output
+except AttributeError:
+ def subprocess_check_output(*args, **kwargs):
+ assert 'input' not in kwargs
+ p = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs)
+ output, _ = p.communicate()
+ ret = p.poll()
+ if ret:
+ raise subprocess.CalledProcessError(ret, p.args, output=output)
+ return output