]> Raphaël G. Git Repositories - youtubedl/blob - youtube_dl/utils.py
Prepare to release.
[youtubedl] / youtube_dl / utils.py
1 #!/usr/bin/env python
2 # coding: utf-8
3
4 from __future__ import unicode_literals
5
6 import base64
7 import binascii
8 import calendar
9 import codecs
10 import contextlib
11 import ctypes
12 import datetime
13 import email.utils
14 import errno
15 import functools
16 import gzip
17 import io
18 import itertools
19 import json
20 import locale
21 import math
22 import operator
23 import os
24 import pipes
25 import platform
26 import random
27 import re
28 import socket
29 import ssl
30 import subprocess
31 import sys
32 import tempfile
33 import traceback
34 import xml.etree.ElementTree
35 import zlib
36
37 from .compat import (
38 compat_HTMLParser,
39 compat_basestring,
40 compat_chr,
41 compat_etree_fromstring,
42 compat_expanduser,
43 compat_html_entities,
44 compat_html_entities_html5,
45 compat_http_client,
46 compat_kwargs,
47 compat_os_name,
48 compat_parse_qs,
49 compat_shlex_quote,
50 compat_socket_create_connection,
51 compat_str,
52 compat_struct_pack,
53 compat_struct_unpack,
54 compat_urllib_error,
55 compat_urllib_parse,
56 compat_urllib_parse_urlencode,
57 compat_urllib_parse_urlparse,
58 compat_urllib_parse_unquote_plus,
59 compat_urllib_request,
60 compat_urlparse,
61 compat_xpath,
62 )
63
64 from .socks import (
65 ProxyType,
66 sockssocket,
67 )
68
69
70 def register_socks_protocols():
71 # "Register" SOCKS protocols
72 # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
73 # URLs with protocols not in urlparse.uses_netloc are not handled correctly
74 for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
75 if scheme not in compat_urlparse.uses_netloc:
76 compat_urlparse.uses_netloc.append(scheme)
77
78
79 # This is not clearly defined otherwise
80 compiled_regex_type = type(re.compile(''))
81
82 std_headers = {
83 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)',
84 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
85 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
86 'Accept-Encoding': 'gzip, deflate',
87 'Accept-Language': 'en-us,en;q=0.5',
88 }
89
90
91 USER_AGENTS = {
92 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
93 }
94
95
96 NO_DEFAULT = object()
97
98 ENGLISH_MONTH_NAMES = [
99 'January', 'February', 'March', 'April', 'May', 'June',
100 'July', 'August', 'September', 'October', 'November', 'December']
101
102 MONTH_NAMES = {
103 'en': ENGLISH_MONTH_NAMES,
104 'fr': [
105 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
106 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
107 }
108
109 KNOWN_EXTENSIONS = (
110 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
111 'flv', 'f4v', 'f4a', 'f4b',
112 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
113 'mkv', 'mka', 'mk3d',
114 'avi', 'divx',
115 'mov',
116 'asf', 'wmv', 'wma',
117 '3gp', '3g2',
118 'mp3',
119 'flac',
120 'ape',
121 'wav',
122 'f4f', 'f4m', 'm3u8', 'smil')
123
124 # needed for sanitizing filenames in restricted mode
125 ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
126 itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'],
127 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy')))
128
129 DATE_FORMATS = (
130 '%d %B %Y',
131 '%d %b %Y',
132 '%B %d %Y',
133 '%B %dst %Y',
134 '%B %dnd %Y',
135 '%B %dth %Y',
136 '%b %d %Y',
137 '%b %dst %Y',
138 '%b %dnd %Y',
139 '%b %dth %Y',
140 '%b %dst %Y %I:%M',
141 '%b %dnd %Y %I:%M',
142 '%b %dth %Y %I:%M',
143 '%Y %m %d',
144 '%Y-%m-%d',
145 '%Y/%m/%d',
146 '%Y/%m/%d %H:%M',
147 '%Y/%m/%d %H:%M:%S',
148 '%Y-%m-%d %H:%M',
149 '%Y-%m-%d %H:%M:%S',
150 '%Y-%m-%d %H:%M:%S.%f',
151 '%d.%m.%Y %H:%M',
152 '%d.%m.%Y %H.%M',
153 '%Y-%m-%dT%H:%M:%SZ',
154 '%Y-%m-%dT%H:%M:%S.%fZ',
155 '%Y-%m-%dT%H:%M:%S.%f0Z',
156 '%Y-%m-%dT%H:%M:%S',
157 '%Y-%m-%dT%H:%M:%S.%f',
158 '%Y-%m-%dT%H:%M',
159 '%b %d %Y at %H:%M',
160 '%b %d %Y at %H:%M:%S',
161 )
162
163 DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
164 DATE_FORMATS_DAY_FIRST.extend([
165 '%d-%m-%Y',
166 '%d.%m.%Y',
167 '%d.%m.%y',
168 '%d/%m/%Y',
169 '%d/%m/%y',
170 '%d/%m/%Y %H:%M:%S',
171 ])
172
173 DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
174 DATE_FORMATS_MONTH_FIRST.extend([
175 '%m-%d-%Y',
176 '%m.%d.%Y',
177 '%m/%d/%Y',
178 '%m/%d/%y',
179 '%m/%d/%Y %H:%M:%S',
180 ])
181
182 PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
183
184
185 def preferredencoding():
186 """Get preferred encoding.
187
188 Returns the best encoding scheme for the system, based on
189 locale.getpreferredencoding() and some further tweaks.
190 """
191 try:
192 pref = locale.getpreferredencoding()
193 'TEST'.encode(pref)
194 except Exception:
195 pref = 'UTF-8'
196
197 return pref
198
199
200 def write_json_file(obj, fn):
201 """ Encode obj as JSON and write it to fn, atomically if possible """
202
203 fn = encodeFilename(fn)
204 if sys.version_info < (3, 0) and sys.platform != 'win32':
205 encoding = get_filesystem_encoding()
206 # os.path.basename returns a bytes object, but NamedTemporaryFile
207 # will fail if the filename contains non ascii characters unless we
208 # use a unicode object
209 path_basename = lambda f: os.path.basename(fn).decode(encoding)
210 # the same for os.path.dirname
211 path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
212 else:
213 path_basename = os.path.basename
214 path_dirname = os.path.dirname
215
216 args = {
217 'suffix': '.tmp',
218 'prefix': path_basename(fn) + '.',
219 'dir': path_dirname(fn),
220 'delete': False,
221 }
222
223 # In Python 2.x, json.dump expects a bytestream.
224 # In Python 3.x, it writes to a character stream
225 if sys.version_info < (3, 0):
226 args['mode'] = 'wb'
227 else:
228 args.update({
229 'mode': 'w',
230 'encoding': 'utf-8',
231 })
232
233 tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
234
235 try:
236 with tf:
237 json.dump(obj, tf)
238 if sys.platform == 'win32':
239 # Need to remove existing file on Windows, else os.rename raises
240 # WindowsError or FileExistsError.
241 try:
242 os.unlink(fn)
243 except OSError:
244 pass
245 os.rename(tf.name, fn)
246 except Exception:
247 try:
248 os.remove(tf.name)
249 except OSError:
250 pass
251 raise
252
253
254 if sys.version_info >= (2, 7):
255 def find_xpath_attr(node, xpath, key, val=None):
256 """ Find the xpath xpath[@key=val] """
257 assert re.match(r'^[a-zA-Z_-]+$', key)
258 expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
259 return node.find(expr)
260 else:
261 def find_xpath_attr(node, xpath, key, val=None):
262 for f in node.findall(compat_xpath(xpath)):
263 if key not in f.attrib:
264 continue
265 if val is None or f.attrib.get(key) == val:
266 return f
267 return None
268
269 # On python2.6 the xml.etree.ElementTree.Element methods don't support
270 # the namespace parameter
271
272
273 def xpath_with_ns(path, ns_map):
274 components = [c.split(':') for c in path.split('/')]
275 replaced = []
276 for c in components:
277 if len(c) == 1:
278 replaced.append(c[0])
279 else:
280 ns, tag = c
281 replaced.append('{%s}%s' % (ns_map[ns], tag))
282 return '/'.join(replaced)
283
284
285 def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
286 def _find_xpath(xpath):
287 return node.find(compat_xpath(xpath))
288
289 if isinstance(xpath, (str, compat_str)):
290 n = _find_xpath(xpath)
291 else:
292 for xp in xpath:
293 n = _find_xpath(xp)
294 if n is not None:
295 break
296
297 if n is None:
298 if default is not NO_DEFAULT:
299 return default
300 elif fatal:
301 name = xpath if name is None else name
302 raise ExtractorError('Could not find XML element %s' % name)
303 else:
304 return None
305 return n
306
307
308 def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
309 n = xpath_element(node, xpath, name, fatal=fatal, default=default)
310 if n is None or n == default:
311 return n
312 if n.text is None:
313 if default is not NO_DEFAULT:
314 return default
315 elif fatal:
316 name = xpath if name is None else name
317 raise ExtractorError('Could not find XML element\'s text %s' % name)
318 else:
319 return None
320 return n.text
321
322
323 def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
324 n = find_xpath_attr(node, xpath, key)
325 if n is None:
326 if default is not NO_DEFAULT:
327 return default
328 elif fatal:
329 name = '%s[@%s]' % (xpath, key) if name is None else name
330 raise ExtractorError('Could not find XML attribute %s' % name)
331 else:
332 return None
333 return n.attrib[key]
334
335
336 def get_element_by_id(id, html):
337 """Return the content of the tag with the specified ID in the passed HTML document"""
338 return get_element_by_attribute('id', id, html)
339
340
341 def get_element_by_class(class_name, html):
342 """Return the content of the first tag with the specified class in the passed HTML document"""
343 retval = get_elements_by_class(class_name, html)
344 return retval[0] if retval else None
345
346
347 def get_element_by_attribute(attribute, value, html, escape_value=True):
348 retval = get_elements_by_attribute(attribute, value, html, escape_value)
349 return retval[0] if retval else None
350
351
352 def get_elements_by_class(class_name, html):
353 """Return the content of all tags with the specified class in the passed HTML document as a list"""
354 return get_elements_by_attribute(
355 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
356 html, escape_value=False)
357
358
359 def get_elements_by_attribute(attribute, value, html, escape_value=True):
360 """Return the content of the tag with the specified attribute in the passed HTML document"""
361
362 value = re.escape(value) if escape_value else value
363
364 retlist = []
365 for m in re.finditer(r'''(?xs)
366 <([a-zA-Z0-9:._-]+)
367 (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*?
368 \s+%s=['"]?%s['"]?
369 (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*?
370 \s*>
371 (?P<content>.*?)
372 </\1>
373 ''' % (re.escape(attribute), value), html):
374 res = m.group('content')
375
376 if res.startswith('"') or res.startswith("'"):
377 res = res[1:-1]
378
379 retlist.append(unescapeHTML(res))
380
381 return retlist
382
383
384 class HTMLAttributeParser(compat_HTMLParser):
385 """Trivial HTML parser to gather the attributes for a single element"""
386 def __init__(self):
387 self.attrs = {}
388 compat_HTMLParser.__init__(self)
389
390 def handle_starttag(self, tag, attrs):
391 self.attrs = dict(attrs)
392
393
394 def extract_attributes(html_element):
395 """Given a string for an HTML element such as
396 <el
397 a="foo" B="bar" c="&98;az" d=boz
398 empty= noval entity="&amp;"
399 sq='"' dq="'"
400 >
401 Decode and return a dictionary of attributes.
402 {
403 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
404 'empty': '', 'noval': None, 'entity': '&',
405 'sq': '"', 'dq': '\''
406 }.
407 NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions,
408 but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5.
409 """
410 parser = HTMLAttributeParser()
411 parser.feed(html_element)
412 parser.close()
413 return parser.attrs
414
415
416 def clean_html(html):
417 """Clean an HTML snippet into a readable string"""
418
419 if html is None: # Convenience for sanitizing descriptions etc.
420 return html
421
422 # Newline vs <br />
423 html = html.replace('\n', ' ')
424 html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html)
425 html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
426 # Strip html tags
427 html = re.sub('<.*?>', '', html)
428 # Replace html entities
429 html = unescapeHTML(html)
430 return html.strip()
431
432
433 def sanitize_open(filename, open_mode):
434 """Try to open the given filename, and slightly tweak it if this fails.
435
436 Attempts to open the given filename. If this fails, it tries to change
437 the filename slightly, step by step, until it's either able to open it
438 or it fails and raises a final exception, like the standard open()
439 function.
440
441 It returns the tuple (stream, definitive_file_name).
442 """
443 try:
444 if filename == '-':
445 if sys.platform == 'win32':
446 import msvcrt
447 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
448 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
449 stream = open(encodeFilename(filename), open_mode)
450 return (stream, filename)
451 except (IOError, OSError) as err:
452 if err.errno in (errno.EACCES,):
453 raise
454
455 # In case of error, try to remove win32 forbidden chars
456 alt_filename = sanitize_path(filename)
457 if alt_filename == filename:
458 raise
459 else:
460 # An exception here should be caught in the caller
461 stream = open(encodeFilename(alt_filename), open_mode)
462 return (stream, alt_filename)
463
464
465 def timeconvert(timestr):
466 """Convert RFC 2822 defined time string into system timestamp"""
467 timestamp = None
468 timetuple = email.utils.parsedate_tz(timestr)
469 if timetuple is not None:
470 timestamp = email.utils.mktime_tz(timetuple)
471 return timestamp
472
473
474 def sanitize_filename(s, restricted=False, is_id=False):
475 """Sanitizes a string so it could be used as part of a filename.
476 If restricted is set, use a stricter subset of allowed characters.
477 Set is_id if this is not an arbitrary string, but an ID that should be kept
478 if possible.
479 """
480 def replace_insane(char):
481 if restricted and char in ACCENT_CHARS:
482 return ACCENT_CHARS[char]
483 if char == '?' or ord(char) < 32 or ord(char) == 127:
484 return ''
485 elif char == '"':
486 return '' if restricted else '\''
487 elif char == ':':
488 return '_-' if restricted else ' -'
489 elif char in '\\/|*<>':
490 return '_'
491 if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
492 return '_'
493 if restricted and ord(char) > 127:
494 return '_'
495 return char
496
497 # Handle timestamps
498 s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
499 result = ''.join(map(replace_insane, s))
500 if not is_id:
501 while '__' in result:
502 result = result.replace('__', '_')
503 result = result.strip('_')
504 # Common case of "Foreign band name - English song title"
505 if restricted and result.startswith('-_'):
506 result = result[2:]
507 if result.startswith('-'):
508 result = '_' + result[len('-'):]
509 result = result.lstrip('.')
510 if not result:
511 result = '_'
512 return result
513
514
515 def sanitize_path(s):
516 """Sanitizes and normalizes path on Windows"""
517 if sys.platform != 'win32':
518 return s
519 drive_or_unc, _ = os.path.splitdrive(s)
520 if sys.version_info < (2, 7) and not drive_or_unc:
521 drive_or_unc, _ = os.path.splitunc(s)
522 norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
523 if drive_or_unc:
524 norm_path.pop(0)
525 sanitized_path = [
526 path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
527 for path_part in norm_path]
528 if drive_or_unc:
529 sanitized_path.insert(0, drive_or_unc + os.path.sep)
530 return os.path.join(*sanitized_path)
531
532
533 # Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of
534 # unwanted failures due to missing protocol
535 def sanitize_url(url):
536 return 'http:%s' % url if url.startswith('//') else url
537
538
539 def sanitized_Request(url, *args, **kwargs):
540 return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs)
541
542
543 def expand_path(s):
544 """Expand shell variables and ~"""
545 return os.path.expandvars(compat_expanduser(s))
546
547
548 def orderedSet(iterable):
549 """ Remove all duplicates from the input iterable """
550 res = []
551 for el in iterable:
552 if el not in res:
553 res.append(el)
554 return res
555
556
557 def _htmlentity_transform(entity_with_semicolon):
558 """Transforms an HTML entity to a character."""
559 entity = entity_with_semicolon[:-1]
560
561 # Known non-numeric HTML entity
562 if entity in compat_html_entities.name2codepoint:
563 return compat_chr(compat_html_entities.name2codepoint[entity])
564
565 # TODO: HTML5 allows entities without a semicolon. For example,
566 # '&Eacuteric' should be decoded as 'Éric'.
567 if entity_with_semicolon in compat_html_entities_html5:
568 return compat_html_entities_html5[entity_with_semicolon]
569
570 mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
571 if mobj is not None:
572 numstr = mobj.group(1)
573 if numstr.startswith('x'):
574 base = 16
575 numstr = '0%s' % numstr
576 else:
577 base = 10
578 # See https://github.com/rg3/youtube-dl/issues/7518
579 try:
580 return compat_chr(int(numstr, base))
581 except ValueError:
582 pass
583
584 # Unknown entity in name, return its literal representation
585 return '&%s;' % entity
586
587
588 def unescapeHTML(s):
589 if s is None:
590 return None
591 assert type(s) == compat_str
592
593 return re.sub(
594 r'&([^;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
595
596
597 def get_subprocess_encoding():
598 if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
599 # For subprocess calls, encode with locale encoding
600 # Refer to http://stackoverflow.com/a/9951851/35070
601 encoding = preferredencoding()
602 else:
603 encoding = sys.getfilesystemencoding()
604 if encoding is None:
605 encoding = 'utf-8'
606 return encoding
607
608
609 def encodeFilename(s, for_subprocess=False):
610 """
611 @param s The name of the file
612 """
613
614 assert type(s) == compat_str
615
616 # Python 3 has a Unicode API
617 if sys.version_info >= (3, 0):
618 return s
619
620 # Pass '' directly to use Unicode APIs on Windows 2000 and up
621 # (Detecting Windows NT 4 is tricky because 'major >= 4' would
622 # match Windows 9x series as well. Besides, NT 4 is obsolete.)
623 if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
624 return s
625
626 # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
627 if sys.platform.startswith('java'):
628 return s
629
630 return s.encode(get_subprocess_encoding(), 'ignore')
631
632
633 def decodeFilename(b, for_subprocess=False):
634
635 if sys.version_info >= (3, 0):
636 return b
637
638 if not isinstance(b, bytes):
639 return b
640
641 return b.decode(get_subprocess_encoding(), 'ignore')
642
643
644 def encodeArgument(s):
645 if not isinstance(s, compat_str):
646 # Legacy code that uses byte strings
647 # Uncomment the following line after fixing all post processors
648 # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
649 s = s.decode('ascii')
650 return encodeFilename(s, True)
651
652
653 def decodeArgument(b):
654 return decodeFilename(b, True)
655
656
657 def decodeOption(optval):
658 if optval is None:
659 return optval
660 if isinstance(optval, bytes):
661 optval = optval.decode(preferredencoding())
662
663 assert isinstance(optval, compat_str)
664 return optval
665
666
667 def formatSeconds(secs):
668 if secs > 3600:
669 return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60)
670 elif secs > 60:
671 return '%d:%02d' % (secs // 60, secs % 60)
672 else:
673 return '%d' % secs
674
675
676 def make_HTTPS_handler(params, **kwargs):
677 opts_no_check_certificate = params.get('nocheckcertificate', False)
678 if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9
679 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
680 if opts_no_check_certificate:
681 context.check_hostname = False
682 context.verify_mode = ssl.CERT_NONE
683 try:
684 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
685 except TypeError:
686 # Python 2.7.8
687 # (create_default_context present but HTTPSHandler has no context=)
688 pass
689
690 if sys.version_info < (3, 2):
691 return YoutubeDLHTTPSHandler(params, **kwargs)
692 else: # Python < 3.4
693 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
694 context.verify_mode = (ssl.CERT_NONE
695 if opts_no_check_certificate
696 else ssl.CERT_REQUIRED)
697 context.set_default_verify_paths()
698 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
699
700
701 def bug_reports_message():
702 if ytdl_is_updateable():
703 update_cmd = 'type youtube-dl -U to update'
704 else:
705 update_cmd = 'see https://yt-dl.org/update on how to update'
706 msg = '; please report this issue on https://yt-dl.org/bug .'
707 msg += ' Make sure you are using the latest version; %s.' % update_cmd
708 msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.'
709 return msg
710
711
712 class YoutubeDLError(Exception):
713 """Base exception for YoutubeDL errors."""
714 pass
715
716
717 class ExtractorError(YoutubeDLError):
718 """Error during info extraction."""
719
720 def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
721 """ tb, if given, is the original traceback (so that it can be printed out).
722 If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
723 """
724
725 if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
726 expected = True
727 if video_id is not None:
728 msg = video_id + ': ' + msg
729 if cause:
730 msg += ' (caused by %r)' % cause
731 if not expected:
732 msg += bug_reports_message()
733 super(ExtractorError, self).__init__(msg)
734
735 self.traceback = tb
736 self.exc_info = sys.exc_info() # preserve original exception
737 self.cause = cause
738 self.video_id = video_id
739
740 def format_traceback(self):
741 if self.traceback is None:
742 return None
743 return ''.join(traceback.format_tb(self.traceback))
744
745
746 class UnsupportedError(ExtractorError):
747 def __init__(self, url):
748 super(UnsupportedError, self).__init__(
749 'Unsupported URL: %s' % url, expected=True)
750 self.url = url
751
752
753 class RegexNotFoundError(ExtractorError):
754 """Error when a regex didn't match"""
755 pass
756
757
758 class GeoRestrictedError(ExtractorError):
759 """Geographic restriction Error exception.
760
761 This exception may be thrown when a video is not available from your
762 geographic location due to geographic restrictions imposed by a website.
763 """
764 def __init__(self, msg, countries=None):
765 super(GeoRestrictedError, self).__init__(msg, expected=True)
766 self.msg = msg
767 self.countries = countries
768
769
770 class DownloadError(YoutubeDLError):
771 """Download Error exception.
772
773 This exception may be thrown by FileDownloader objects if they are not
774 configured to continue on errors. They will contain the appropriate
775 error message.
776 """
777
778 def __init__(self, msg, exc_info=None):
779 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
780 super(DownloadError, self).__init__(msg)
781 self.exc_info = exc_info
782
783
784 class SameFileError(YoutubeDLError):
785 """Same File exception.
786
787 This exception will be thrown by FileDownloader objects if they detect
788 multiple files would have to be downloaded to the same file on disk.
789 """
790 pass
791
792
793 class PostProcessingError(YoutubeDLError):
794 """Post Processing exception.
795
796 This exception may be raised by PostProcessor's .run() method to
797 indicate an error in the postprocessing task.
798 """
799
800 def __init__(self, msg):
801 super(PostProcessingError, self).__init__(msg)
802 self.msg = msg
803
804
805 class MaxDownloadsReached(YoutubeDLError):
806 """ --max-downloads limit has been reached. """
807 pass
808
809
810 class UnavailableVideoError(YoutubeDLError):
811 """Unavailable Format exception.
812
813 This exception will be thrown when a video is requested
814 in a format that is not available for that video.
815 """
816 pass
817
818
819 class ContentTooShortError(YoutubeDLError):
820 """Content Too Short exception.
821
822 This exception may be raised by FileDownloader objects when a file they
823 download is too small for what the server announced first, indicating
824 the connection was probably interrupted.
825 """
826
827 def __init__(self, downloaded, expected):
828 super(ContentTooShortError, self).__init__(
829 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected)
830 )
831 # Both in bytes
832 self.downloaded = downloaded
833 self.expected = expected
834
835
836 class XAttrMetadataError(YoutubeDLError):
837 def __init__(self, code=None, msg='Unknown error'):
838 super(XAttrMetadataError, self).__init__(msg)
839 self.code = code
840 self.msg = msg
841
842 # Parsing code and msg
843 if (self.code in (errno.ENOSPC, errno.EDQUOT) or
844 'No space left' in self.msg or 'Disk quota excedded' in self.msg):
845 self.reason = 'NO_SPACE'
846 elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
847 self.reason = 'VALUE_TOO_LONG'
848 else:
849 self.reason = 'NOT_SUPPORTED'
850
851
852 class XAttrUnavailableError(YoutubeDLError):
853 pass
854
855
856 def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
857 # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
858 # expected HTTP responses to meet HTTP/1.0 or later (see also
859 # https://github.com/rg3/youtube-dl/issues/6727)
860 if sys.version_info < (3, 0):
861 kwargs[b'strict'] = True
862 hc = http_class(*args, **kwargs)
863 source_address = ydl_handler._params.get('source_address')
864 if source_address is not None:
865 sa = (source_address, 0)
866 if hasattr(hc, 'source_address'): # Python 2.7+
867 hc.source_address = sa
868 else: # Python 2.6
869 def _hc_connect(self, *args, **kwargs):
870 sock = compat_socket_create_connection(
871 (self.host, self.port), self.timeout, sa)
872 if is_https:
873 self.sock = ssl.wrap_socket(
874 sock, self.key_file, self.cert_file,
875 ssl_version=ssl.PROTOCOL_TLSv1)
876 else:
877 self.sock = sock
878 hc.connect = functools.partial(_hc_connect, hc)
879
880 return hc
881
882
883 def handle_youtubedl_headers(headers):
884 filtered_headers = headers
885
886 if 'Youtubedl-no-compression' in filtered_headers:
887 filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
888 del filtered_headers['Youtubedl-no-compression']
889
890 return filtered_headers
891
892
893 class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
894 """Handler for HTTP requests and responses.
895
896 This class, when installed with an OpenerDirector, automatically adds
897 the standard headers to every HTTP request and handles gzipped and
898 deflated responses from web servers. If compression is to be avoided in
899 a particular request, the original request in the program code only has
900 to include the HTTP header "Youtubedl-no-compression", which will be
901 removed before making the real request.
902
903 Part of this code was copied from:
904
905 http://techknack.net/python-urllib2-handlers/
906
907 Andrew Rowls, the author of that code, agreed to release it to the
908 public domain.
909 """
910
911 def __init__(self, params, *args, **kwargs):
912 compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
913 self._params = params
914
915 def http_open(self, req):
916 conn_class = compat_http_client.HTTPConnection
917
918 socks_proxy = req.headers.get('Ytdl-socks-proxy')
919 if socks_proxy:
920 conn_class = make_socks_conn_class(conn_class, socks_proxy)
921 del req.headers['Ytdl-socks-proxy']
922
923 return self.do_open(functools.partial(
924 _create_http_connection, self, conn_class, False),
925 req)
926
927 @staticmethod
928 def deflate(data):
929 try:
930 return zlib.decompress(data, -zlib.MAX_WBITS)
931 except zlib.error:
932 return zlib.decompress(data)
933
934 @staticmethod
935 def addinfourl_wrapper(stream, headers, url, code):
936 if hasattr(compat_urllib_request.addinfourl, 'getcode'):
937 return compat_urllib_request.addinfourl(stream, headers, url, code)
938 ret = compat_urllib_request.addinfourl(stream, headers, url)
939 ret.code = code
940 return ret
941
942 def http_request(self, req):
943 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
944 # always respected by websites, some tend to give out URLs with non percent-encoded
945 # non-ASCII characters (see telemb.py, ard.py [#3412])
946 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
947 # To work around aforementioned issue we will replace request's original URL with
948 # percent-encoded one
949 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
950 # the code of this workaround has been moved here from YoutubeDL.urlopen()
951 url = req.get_full_url()
952 url_escaped = escape_url(url)
953
954 # Substitute URL if any change after escaping
955 if url != url_escaped:
956 req = update_Request(req, url=url_escaped)
957
958 for h, v in std_headers.items():
959 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
960 # The dict keys are capitalized because of this bug by urllib
961 if h.capitalize() not in req.headers:
962 req.add_header(h, v)
963
964 req.headers = handle_youtubedl_headers(req.headers)
965
966 if sys.version_info < (2, 7) and '#' in req.get_full_url():
967 # Python 2.6 is brain-dead when it comes to fragments
968 req._Request__original = req._Request__original.partition('#')[0]
969 req._Request__r_type = req._Request__r_type.partition('#')[0]
970
971 return req
972
973 def http_response(self, req, resp):
974 old_resp = resp
975 # gzip
976 if resp.headers.get('Content-encoding', '') == 'gzip':
977 content = resp.read()
978 gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
979 try:
980 uncompressed = io.BytesIO(gz.read())
981 except IOError as original_ioerror:
982 # There may be junk add the end of the file
983 # See http://stackoverflow.com/q/4928560/35070 for details
984 for i in range(1, 1024):
985 try:
986 gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
987 uncompressed = io.BytesIO(gz.read())
988 except IOError:
989 continue
990 break
991 else:
992 raise original_ioerror
993 resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
994 resp.msg = old_resp.msg
995 del resp.headers['Content-encoding']
996 # deflate
997 if resp.headers.get('Content-encoding', '') == 'deflate':
998 gz = io.BytesIO(self.deflate(resp.read()))
999 resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
1000 resp.msg = old_resp.msg
1001 del resp.headers['Content-encoding']
1002 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
1003 # https://github.com/rg3/youtube-dl/issues/6457).
1004 if 300 <= resp.code < 400:
1005 location = resp.headers.get('Location')
1006 if location:
1007 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
1008 if sys.version_info >= (3, 0):
1009 location = location.encode('iso-8859-1').decode('utf-8')
1010 else:
1011 location = location.decode('utf-8')
1012 location_escaped = escape_url(location)
1013 if location != location_escaped:
1014 del resp.headers['Location']
1015 if sys.version_info < (3, 0):
1016 location_escaped = location_escaped.encode('utf-8')
1017 resp.headers['Location'] = location_escaped
1018 return resp
1019
1020 https_request = http_request
1021 https_response = http_response
1022
1023
1024 def make_socks_conn_class(base_class, socks_proxy):
1025 assert issubclass(base_class, (
1026 compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
1027
1028 url_components = compat_urlparse.urlparse(socks_proxy)
1029 if url_components.scheme.lower() == 'socks5':
1030 socks_type = ProxyType.SOCKS5
1031 elif url_components.scheme.lower() in ('socks', 'socks4'):
1032 socks_type = ProxyType.SOCKS4
1033 elif url_components.scheme.lower() == 'socks4a':
1034 socks_type = ProxyType.SOCKS4A
1035
1036 def unquote_if_non_empty(s):
1037 if not s:
1038 return s
1039 return compat_urllib_parse_unquote_plus(s)
1040
1041 proxy_args = (
1042 socks_type,
1043 url_components.hostname, url_components.port or 1080,
1044 True, # Remote DNS
1045 unquote_if_non_empty(url_components.username),
1046 unquote_if_non_empty(url_components.password),
1047 )
1048
1049 class SocksConnection(base_class):
1050 def connect(self):
1051 self.sock = sockssocket()
1052 self.sock.setproxy(*proxy_args)
1053 if type(self.timeout) in (int, float):
1054 self.sock.settimeout(self.timeout)
1055 self.sock.connect((self.host, self.port))
1056
1057 if isinstance(self, compat_http_client.HTTPSConnection):
1058 if hasattr(self, '_context'): # Python > 2.6
1059 self.sock = self._context.wrap_socket(
1060 self.sock, server_hostname=self.host)
1061 else:
1062 self.sock = ssl.wrap_socket(self.sock)
1063
1064 return SocksConnection
1065
1066
1067 class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
1068 def __init__(self, params, https_conn_class=None, *args, **kwargs):
1069 compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
1070 self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
1071 self._params = params
1072
1073 def https_open(self, req):
1074 kwargs = {}
1075 conn_class = self._https_conn_class
1076
1077 if hasattr(self, '_context'): # python > 2.6
1078 kwargs['context'] = self._context
1079 if hasattr(self, '_check_hostname'): # python 3.x
1080 kwargs['check_hostname'] = self._check_hostname
1081
1082 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1083 if socks_proxy:
1084 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1085 del req.headers['Ytdl-socks-proxy']
1086
1087 return self.do_open(functools.partial(
1088 _create_http_connection, self, conn_class, True),
1089 req, **kwargs)
1090
1091
1092 class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
1093 def __init__(self, cookiejar=None):
1094 compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
1095
1096 def http_response(self, request, response):
1097 # Python 2 will choke on next HTTP request in row if there are non-ASCII
1098 # characters in Set-Cookie HTTP header of last response (see
1099 # https://github.com/rg3/youtube-dl/issues/6769).
1100 # In order to at least prevent crashing we will percent encode Set-Cookie
1101 # header before HTTPCookieProcessor starts processing it.
1102 # if sys.version_info < (3, 0) and response.headers:
1103 # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
1104 # set_cookie = response.headers.get(set_cookie_header)
1105 # if set_cookie:
1106 # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
1107 # if set_cookie != set_cookie_escaped:
1108 # del response.headers[set_cookie_header]
1109 # response.headers[set_cookie_header] = set_cookie_escaped
1110 return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
1111
1112 https_request = compat_urllib_request.HTTPCookieProcessor.http_request
1113 https_response = http_response
1114
1115
1116 def extract_timezone(date_str):
1117 m = re.search(
1118 r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
1119 date_str)
1120 if not m:
1121 timezone = datetime.timedelta()
1122 else:
1123 date_str = date_str[:-len(m.group('tz'))]
1124 if not m.group('sign'):
1125 timezone = datetime.timedelta()
1126 else:
1127 sign = 1 if m.group('sign') == '+' else -1
1128 timezone = datetime.timedelta(
1129 hours=sign * int(m.group('hours')),
1130 minutes=sign * int(m.group('minutes')))
1131 return timezone, date_str
1132
1133
1134 def parse_iso8601(date_str, delimiter='T', timezone=None):
1135 """ Return a UNIX timestamp from the given date """
1136
1137 if date_str is None:
1138 return None
1139
1140 date_str = re.sub(r'\.[0-9]+', '', date_str)
1141
1142 if timezone is None:
1143 timezone, date_str = extract_timezone(date_str)
1144
1145 try:
1146 date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
1147 dt = datetime.datetime.strptime(date_str, date_format) - timezone
1148 return calendar.timegm(dt.timetuple())
1149 except ValueError:
1150 pass
1151
1152
1153 def date_formats(day_first=True):
1154 return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
1155
1156
1157 def unified_strdate(date_str, day_first=True):
1158 """Return a string with the date in the format YYYYMMDD"""
1159
1160 if date_str is None:
1161 return None
1162 upload_date = None
1163 # Replace commas
1164 date_str = date_str.replace(',', ' ')
1165 # Remove AM/PM + timezone
1166 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
1167 _, date_str = extract_timezone(date_str)
1168
1169 for expression in date_formats(day_first):
1170 try:
1171 upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
1172 except ValueError:
1173 pass
1174 if upload_date is None:
1175 timetuple = email.utils.parsedate_tz(date_str)
1176 if timetuple:
1177 try:
1178 upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
1179 except ValueError:
1180 pass
1181 if upload_date is not None:
1182 return compat_str(upload_date)
1183
1184
1185 def unified_timestamp(date_str, day_first=True):
1186 if date_str is None:
1187 return None
1188
1189 date_str = date_str.replace(',', ' ')
1190
1191 pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
1192 timezone, date_str = extract_timezone(date_str)
1193
1194 # Remove AM/PM + timezone
1195 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
1196
1197 for expression in date_formats(day_first):
1198 try:
1199 dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
1200 return calendar.timegm(dt.timetuple())
1201 except ValueError:
1202 pass
1203 timetuple = email.utils.parsedate_tz(date_str)
1204 if timetuple:
1205 return calendar.timegm(timetuple) + pm_delta * 3600
1206
1207
1208 def determine_ext(url, default_ext='unknown_video'):
1209 if url is None:
1210 return default_ext
1211 guess = url.partition('?')[0].rpartition('.')[2]
1212 if re.match(r'^[A-Za-z0-9]+$', guess):
1213 return guess
1214 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
1215 elif guess.rstrip('/') in KNOWN_EXTENSIONS:
1216 return guess.rstrip('/')
1217 else:
1218 return default_ext
1219
1220
1221 def subtitles_filename(filename, sub_lang, sub_format):
1222 return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format
1223
1224
1225 def date_from_str(date_str):
1226 """
1227 Return a datetime object from a string in the format YYYYMMDD or
1228 (now|today)[+-][0-9](day|week|month|year)(s)?"""
1229 today = datetime.date.today()
1230 if date_str in ('now', 'today'):
1231 return today
1232 if date_str == 'yesterday':
1233 return today - datetime.timedelta(days=1)
1234 match = re.match(r'(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str)
1235 if match is not None:
1236 sign = match.group('sign')
1237 time = int(match.group('time'))
1238 if sign == '-':
1239 time = -time
1240 unit = match.group('unit')
1241 # A bad approximation?
1242 if unit == 'month':
1243 unit = 'day'
1244 time *= 30
1245 elif unit == 'year':
1246 unit = 'day'
1247 time *= 365
1248 unit += 's'
1249 delta = datetime.timedelta(**{unit: time})
1250 return today + delta
1251 return datetime.datetime.strptime(date_str, '%Y%m%d').date()
1252
1253
1254 def hyphenate_date(date_str):
1255 """
1256 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
1257 match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
1258 if match is not None:
1259 return '-'.join(match.groups())
1260 else:
1261 return date_str
1262
1263
1264 class DateRange(object):
1265 """Represents a time interval between two dates"""
1266
1267 def __init__(self, start=None, end=None):
1268 """start and end must be strings in the format accepted by date"""
1269 if start is not None:
1270 self.start = date_from_str(start)
1271 else:
1272 self.start = datetime.datetime.min.date()
1273 if end is not None:
1274 self.end = date_from_str(end)
1275 else:
1276 self.end = datetime.datetime.max.date()
1277 if self.start > self.end:
1278 raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
1279
1280 @classmethod
1281 def day(cls, day):
1282 """Returns a range that only contains the given day"""
1283 return cls(day, day)
1284
1285 def __contains__(self, date):
1286 """Check if the date is in the range"""
1287 if not isinstance(date, datetime.date):
1288 date = date_from_str(date)
1289 return self.start <= date <= self.end
1290
1291 def __str__(self):
1292 return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
1293
1294
1295 def platform_name():
1296 """ Returns the platform name as a compat_str """
1297 res = platform.platform()
1298 if isinstance(res, bytes):
1299 res = res.decode(preferredencoding())
1300
1301 assert isinstance(res, compat_str)
1302 return res
1303
1304
1305 def _windows_write_string(s, out):
1306 """ Returns True if the string was written using special methods,
1307 False if it has yet to be written out."""
1308 # Adapted from http://stackoverflow.com/a/3259271/35070
1309
1310 import ctypes
1311 import ctypes.wintypes
1312
1313 WIN_OUTPUT_IDS = {
1314 1: -11,
1315 2: -12,
1316 }
1317
1318 try:
1319 fileno = out.fileno()
1320 except AttributeError:
1321 # If the output stream doesn't have a fileno, it's virtual
1322 return False
1323 except io.UnsupportedOperation:
1324 # Some strange Windows pseudo files?
1325 return False
1326 if fileno not in WIN_OUTPUT_IDS:
1327 return False
1328
1329 GetStdHandle = ctypes.WINFUNCTYPE(
1330 ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
1331 (b'GetStdHandle', ctypes.windll.kernel32))
1332 h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
1333
1334 WriteConsoleW = ctypes.WINFUNCTYPE(
1335 ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
1336 ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
1337 ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32))
1338 written = ctypes.wintypes.DWORD(0)
1339
1340 GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32))
1341 FILE_TYPE_CHAR = 0x0002
1342 FILE_TYPE_REMOTE = 0x8000
1343 GetConsoleMode = ctypes.WINFUNCTYPE(
1344 ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
1345 ctypes.POINTER(ctypes.wintypes.DWORD))(
1346 (b'GetConsoleMode', ctypes.windll.kernel32))
1347 INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
1348
1349 def not_a_console(handle):
1350 if handle == INVALID_HANDLE_VALUE or handle is None:
1351 return True
1352 return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or
1353 GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
1354
1355 if not_a_console(h):
1356 return False
1357
1358 def next_nonbmp_pos(s):
1359 try:
1360 return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
1361 except StopIteration:
1362 return len(s)
1363
1364 while s:
1365 count = min(next_nonbmp_pos(s), 1024)
1366
1367 ret = WriteConsoleW(
1368 h, s, count if count else 2, ctypes.byref(written), None)
1369 if ret == 0:
1370 raise OSError('Failed to write string')
1371 if not count: # We just wrote a non-BMP character
1372 assert written.value == 2
1373 s = s[1:]
1374 else:
1375 assert written.value > 0
1376 s = s[written.value:]
1377 return True
1378
1379
1380 def write_string(s, out=None, encoding=None):
1381 if out is None:
1382 out = sys.stderr
1383 assert type(s) == compat_str
1384
1385 if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
1386 if _windows_write_string(s, out):
1387 return
1388
1389 if ('b' in getattr(out, 'mode', '') or
1390 sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
1391 byt = s.encode(encoding or preferredencoding(), 'ignore')
1392 out.write(byt)
1393 elif hasattr(out, 'buffer'):
1394 enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
1395 byt = s.encode(enc, 'ignore')
1396 out.buffer.write(byt)
1397 else:
1398 out.write(s)
1399 out.flush()
1400
1401
1402 def bytes_to_intlist(bs):
1403 if not bs:
1404 return []
1405 if isinstance(bs[0], int): # Python 3
1406 return list(bs)
1407 else:
1408 return [ord(c) for c in bs]
1409
1410
1411 def intlist_to_bytes(xs):
1412 if not xs:
1413 return b''
1414 return compat_struct_pack('%dB' % len(xs), *xs)
1415
1416
1417 # Cross-platform file locking
1418 if sys.platform == 'win32':
1419 import ctypes.wintypes
1420 import msvcrt
1421
1422 class OVERLAPPED(ctypes.Structure):
1423 _fields_ = [
1424 ('Internal', ctypes.wintypes.LPVOID),
1425 ('InternalHigh', ctypes.wintypes.LPVOID),
1426 ('Offset', ctypes.wintypes.DWORD),
1427 ('OffsetHigh', ctypes.wintypes.DWORD),
1428 ('hEvent', ctypes.wintypes.HANDLE),
1429 ]
1430
1431 kernel32 = ctypes.windll.kernel32
1432 LockFileEx = kernel32.LockFileEx
1433 LockFileEx.argtypes = [
1434 ctypes.wintypes.HANDLE, # hFile
1435 ctypes.wintypes.DWORD, # dwFlags
1436 ctypes.wintypes.DWORD, # dwReserved
1437 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
1438 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
1439 ctypes.POINTER(OVERLAPPED) # Overlapped
1440 ]
1441 LockFileEx.restype = ctypes.wintypes.BOOL
1442 UnlockFileEx = kernel32.UnlockFileEx
1443 UnlockFileEx.argtypes = [
1444 ctypes.wintypes.HANDLE, # hFile
1445 ctypes.wintypes.DWORD, # dwReserved
1446 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
1447 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
1448 ctypes.POINTER(OVERLAPPED) # Overlapped
1449 ]
1450 UnlockFileEx.restype = ctypes.wintypes.BOOL
1451 whole_low = 0xffffffff
1452 whole_high = 0x7fffffff
1453
1454 def _lock_file(f, exclusive):
1455 overlapped = OVERLAPPED()
1456 overlapped.Offset = 0
1457 overlapped.OffsetHigh = 0
1458 overlapped.hEvent = 0
1459 f._lock_file_overlapped_p = ctypes.pointer(overlapped)
1460 handle = msvcrt.get_osfhandle(f.fileno())
1461 if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
1462 whole_low, whole_high, f._lock_file_overlapped_p):
1463 raise OSError('Locking file failed: %r' % ctypes.FormatError())
1464
1465 def _unlock_file(f):
1466 assert f._lock_file_overlapped_p
1467 handle = msvcrt.get_osfhandle(f.fileno())
1468 if not UnlockFileEx(handle, 0,
1469 whole_low, whole_high, f._lock_file_overlapped_p):
1470 raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
1471
1472 else:
1473 # Some platforms, such as Jython, is missing fcntl
1474 try:
1475 import fcntl
1476
1477 def _lock_file(f, exclusive):
1478 fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
1479
1480 def _unlock_file(f):
1481 fcntl.flock(f, fcntl.LOCK_UN)
1482 except ImportError:
1483 UNSUPPORTED_MSG = 'file locking is not supported on this platform'
1484
1485 def _lock_file(f, exclusive):
1486 raise IOError(UNSUPPORTED_MSG)
1487
1488 def _unlock_file(f):
1489 raise IOError(UNSUPPORTED_MSG)
1490
1491
1492 class locked_file(object):
1493 def __init__(self, filename, mode, encoding=None):
1494 assert mode in ['r', 'a', 'w']
1495 self.f = io.open(filename, mode, encoding=encoding)
1496 self.mode = mode
1497
1498 def __enter__(self):
1499 exclusive = self.mode != 'r'
1500 try:
1501 _lock_file(self.f, exclusive)
1502 except IOError:
1503 self.f.close()
1504 raise
1505 return self
1506
1507 def __exit__(self, etype, value, traceback):
1508 try:
1509 _unlock_file(self.f)
1510 finally:
1511 self.f.close()
1512
1513 def __iter__(self):
1514 return iter(self.f)
1515
1516 def write(self, *args):
1517 return self.f.write(*args)
1518
1519 def read(self, *args):
1520 return self.f.read(*args)
1521
1522
1523 def get_filesystem_encoding():
1524 encoding = sys.getfilesystemencoding()
1525 return encoding if encoding is not None else 'utf-8'
1526
1527
1528 def shell_quote(args):
1529 quoted_args = []
1530 encoding = get_filesystem_encoding()
1531 for a in args:
1532 if isinstance(a, bytes):
1533 # We may get a filename encoded with 'encodeFilename'
1534 a = a.decode(encoding)
1535 quoted_args.append(pipes.quote(a))
1536 return ' '.join(quoted_args)
1537
1538
1539 def smuggle_url(url, data):
1540 """ Pass additional data in a URL for internal use. """
1541
1542 url, idata = unsmuggle_url(url, {})
1543 data.update(idata)
1544 sdata = compat_urllib_parse_urlencode(
1545 {'__youtubedl_smuggle': json.dumps(data)})
1546 return url + '#' + sdata
1547
1548
1549 def unsmuggle_url(smug_url, default=None):
1550 if '#__youtubedl_smuggle' not in smug_url:
1551 return smug_url, default
1552 url, _, sdata = smug_url.rpartition('#')
1553 jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
1554 data = json.loads(jsond)
1555 return url, data
1556
1557
1558 def format_bytes(bytes):
1559 if bytes is None:
1560 return 'N/A'
1561 if type(bytes) is str:
1562 bytes = float(bytes)
1563 if bytes == 0.0:
1564 exponent = 0
1565 else:
1566 exponent = int(math.log(bytes, 1024.0))
1567 suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
1568 converted = float(bytes) / float(1024 ** exponent)
1569 return '%.2f%s' % (converted, suffix)
1570
1571
1572 def lookup_unit_table(unit_table, s):
1573 units_re = '|'.join(re.escape(u) for u in unit_table)
1574 m = re.match(
1575 r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s)
1576 if not m:
1577 return None
1578 num_str = m.group('num').replace(',', '.')
1579 mult = unit_table[m.group('unit')]
1580 return int(float(num_str) * mult)
1581
1582
1583 def parse_filesize(s):
1584 if s is None:
1585 return None
1586
1587 # The lower-case forms are of course incorrect and unofficial,
1588 # but we support those too
1589 _UNIT_TABLE = {
1590 'B': 1,
1591 'b': 1,
1592 'bytes': 1,
1593 'KiB': 1024,
1594 'KB': 1000,
1595 'kB': 1024,
1596 'Kb': 1000,
1597 'kb': 1000,
1598 'kilobytes': 1000,
1599 'kibibytes': 1024,
1600 'MiB': 1024 ** 2,
1601 'MB': 1000 ** 2,
1602 'mB': 1024 ** 2,
1603 'Mb': 1000 ** 2,
1604 'mb': 1000 ** 2,
1605 'megabytes': 1000 ** 2,
1606 'mebibytes': 1024 ** 2,
1607 'GiB': 1024 ** 3,
1608 'GB': 1000 ** 3,
1609 'gB': 1024 ** 3,
1610 'Gb': 1000 ** 3,
1611 'gb': 1000 ** 3,
1612 'gigabytes': 1000 ** 3,
1613 'gibibytes': 1024 ** 3,
1614 'TiB': 1024 ** 4,
1615 'TB': 1000 ** 4,
1616 'tB': 1024 ** 4,
1617 'Tb': 1000 ** 4,
1618 'tb': 1000 ** 4,
1619 'terabytes': 1000 ** 4,
1620 'tebibytes': 1024 ** 4,
1621 'PiB': 1024 ** 5,
1622 'PB': 1000 ** 5,
1623 'pB': 1024 ** 5,
1624 'Pb': 1000 ** 5,
1625 'pb': 1000 ** 5,
1626 'petabytes': 1000 ** 5,
1627 'pebibytes': 1024 ** 5,
1628 'EiB': 1024 ** 6,
1629 'EB': 1000 ** 6,
1630 'eB': 1024 ** 6,
1631 'Eb': 1000 ** 6,
1632 'eb': 1000 ** 6,
1633 'exabytes': 1000 ** 6,
1634 'exbibytes': 1024 ** 6,
1635 'ZiB': 1024 ** 7,
1636 'ZB': 1000 ** 7,
1637 'zB': 1024 ** 7,
1638 'Zb': 1000 ** 7,
1639 'zb': 1000 ** 7,
1640 'zettabytes': 1000 ** 7,
1641 'zebibytes': 1024 ** 7,
1642 'YiB': 1024 ** 8,
1643 'YB': 1000 ** 8,
1644 'yB': 1024 ** 8,
1645 'Yb': 1000 ** 8,
1646 'yb': 1000 ** 8,
1647 'yottabytes': 1000 ** 8,
1648 'yobibytes': 1024 ** 8,
1649 }
1650
1651 return lookup_unit_table(_UNIT_TABLE, s)
1652
1653
1654 def parse_count(s):
1655 if s is None:
1656 return None
1657
1658 s = s.strip()
1659
1660 if re.match(r'^[\d,.]+$', s):
1661 return str_to_int(s)
1662
1663 _UNIT_TABLE = {
1664 'k': 1000,
1665 'K': 1000,
1666 'm': 1000 ** 2,
1667 'M': 1000 ** 2,
1668 'kk': 1000 ** 2,
1669 'KK': 1000 ** 2,
1670 }
1671
1672 return lookup_unit_table(_UNIT_TABLE, s)
1673
1674
1675 def month_by_name(name, lang='en'):
1676 """ Return the number of a month by (locale-independently) English name """
1677
1678 month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
1679
1680 try:
1681 return month_names.index(name) + 1
1682 except ValueError:
1683 return None
1684
1685
1686 def month_by_abbreviation(abbrev):
1687 """ Return the number of a month by (locale-independently) English
1688 abbreviations """
1689
1690 try:
1691 return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
1692 except ValueError:
1693 return None
1694
1695
1696 def fix_xml_ampersands(xml_str):
1697 """Replace all the '&' by '&amp;' in XML"""
1698 return re.sub(
1699 r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
1700 '&amp;',
1701 xml_str)
1702
1703
1704 def setproctitle(title):
1705 assert isinstance(title, compat_str)
1706
1707 # ctypes in Jython is not complete
1708 # http://bugs.jython.org/issue2148
1709 if sys.platform.startswith('java'):
1710 return
1711
1712 try:
1713 libc = ctypes.cdll.LoadLibrary('libc.so.6')
1714 except OSError:
1715 return
1716 except TypeError:
1717 # LoadLibrary in Windows Python 2.7.13 only expects
1718 # a bytestring, but since unicode_literals turns
1719 # every string into a unicode string, it fails.
1720 return
1721 title_bytes = title.encode('utf-8')
1722 buf = ctypes.create_string_buffer(len(title_bytes))
1723 buf.value = title_bytes
1724 try:
1725 libc.prctl(15, buf, 0, 0, 0)
1726 except AttributeError:
1727 return # Strange libc, just skip this
1728
1729
1730 def remove_start(s, start):
1731 return s[len(start):] if s is not None and s.startswith(start) else s
1732
1733
1734 def remove_end(s, end):
1735 return s[:-len(end)] if s is not None and s.endswith(end) else s
1736
1737
1738 def remove_quotes(s):
1739 if s is None or len(s) < 2:
1740 return s
1741 for quote in ('"', "'", ):
1742 if s[0] == quote and s[-1] == quote:
1743 return s[1:-1]
1744 return s
1745
1746
1747 def url_basename(url):
1748 path = compat_urlparse.urlparse(url).path
1749 return path.strip('/').split('/')[-1]
1750
1751
1752 def base_url(url):
1753 return re.match(r'https?://[^?#&]+/', url).group()
1754
1755
1756 def urljoin(base, path):
1757 if isinstance(path, bytes):
1758 path = path.decode('utf-8')
1759 if not isinstance(path, compat_str) or not path:
1760 return None
1761 if re.match(r'^(?:https?:)?//', path):
1762 return path
1763 if isinstance(base, bytes):
1764 base = base.decode('utf-8')
1765 if not isinstance(base, compat_str) or not re.match(
1766 r'^(?:https?:)?//', base):
1767 return None
1768 return compat_urlparse.urljoin(base, path)
1769
1770
1771 class HEADRequest(compat_urllib_request.Request):
1772 def get_method(self):
1773 return 'HEAD'
1774
1775
1776 class PUTRequest(compat_urllib_request.Request):
1777 def get_method(self):
1778 return 'PUT'
1779
1780
1781 def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
1782 if get_attr:
1783 if v is not None:
1784 v = getattr(v, get_attr, None)
1785 if v == '':
1786 v = None
1787 if v is None:
1788 return default
1789 try:
1790 return int(v) * invscale // scale
1791 except ValueError:
1792 return default
1793
1794
1795 def str_or_none(v, default=None):
1796 return default if v is None else compat_str(v)
1797
1798
1799 def str_to_int(int_str):
1800 """ A more relaxed version of int_or_none """
1801 if int_str is None:
1802 return None
1803 int_str = re.sub(r'[,\.\+]', '', int_str)
1804 return int(int_str)
1805
1806
1807 def float_or_none(v, scale=1, invscale=1, default=None):
1808 if v is None:
1809 return default
1810 try:
1811 return float(v) * invscale / scale
1812 except ValueError:
1813 return default
1814
1815
1816 def strip_or_none(v):
1817 return None if v is None else v.strip()
1818
1819
1820 def parse_duration(s):
1821 if not isinstance(s, compat_basestring):
1822 return None
1823
1824 s = s.strip()
1825
1826 days, hours, mins, secs, ms = [None] * 5
1827 m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s)
1828 if m:
1829 days, hours, mins, secs, ms = m.groups()
1830 else:
1831 m = re.match(
1832 r'''(?ix)(?:P?T)?
1833 (?:
1834 (?P<days>[0-9]+)\s*d(?:ays?)?\s*
1835 )?
1836 (?:
1837 (?P<hours>[0-9]+)\s*h(?:ours?)?\s*
1838 )?
1839 (?:
1840 (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s*
1841 )?
1842 (?:
1843 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
1844 )?Z?$''', s)
1845 if m:
1846 days, hours, mins, secs, ms = m.groups()
1847 else:
1848 m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
1849 if m:
1850 hours, mins = m.groups()
1851 else:
1852 return None
1853
1854 duration = 0
1855 if secs:
1856 duration += float(secs)
1857 if mins:
1858 duration += float(mins) * 60
1859 if hours:
1860 duration += float(hours) * 60 * 60
1861 if days:
1862 duration += float(days) * 24 * 60 * 60
1863 if ms:
1864 duration += float(ms)
1865 return duration
1866
1867
1868 def prepend_extension(filename, ext, expected_real_ext=None):
1869 name, real_ext = os.path.splitext(filename)
1870 return (
1871 '{0}.{1}{2}'.format(name, ext, real_ext)
1872 if not expected_real_ext or real_ext[1:] == expected_real_ext
1873 else '{0}.{1}'.format(filename, ext))
1874
1875
1876 def replace_extension(filename, ext, expected_real_ext=None):
1877 name, real_ext = os.path.splitext(filename)
1878 return '{0}.{1}'.format(
1879 name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
1880 ext)
1881
1882
1883 def check_executable(exe, args=[]):
1884 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
1885 args can be a list of arguments for a short output (like -version) """
1886 try:
1887 subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
1888 except OSError:
1889 return False
1890 return exe
1891
1892
1893 def get_exe_version(exe, args=['--version'],
1894 version_re=None, unrecognized='present'):
1895 """ Returns the version of the specified executable,
1896 or False if the executable is not present """
1897 try:
1898 # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
1899 # SIGTTOU if youtube-dl is run in the background.
1900 # See https://github.com/rg3/youtube-dl/issues/955#issuecomment-209789656
1901 out, _ = subprocess.Popen(
1902 [encodeArgument(exe)] + args,
1903 stdin=subprocess.PIPE,
1904 stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
1905 except OSError:
1906 return False
1907 if isinstance(out, bytes): # Python 2.x
1908 out = out.decode('ascii', 'ignore')
1909 return detect_exe_version(out, version_re, unrecognized)
1910
1911
1912 def detect_exe_version(output, version_re=None, unrecognized='present'):
1913 assert isinstance(output, compat_str)
1914 if version_re is None:
1915 version_re = r'version\s+([-0-9._a-zA-Z]+)'
1916 m = re.search(version_re, output)
1917 if m:
1918 return m.group(1)
1919 else:
1920 return unrecognized
1921
1922
1923 class PagedList(object):
1924 def __len__(self):
1925 # This is only useful for tests
1926 return len(self.getslice())
1927
1928
1929 class OnDemandPagedList(PagedList):
1930 def __init__(self, pagefunc, pagesize, use_cache=False):
1931 self._pagefunc = pagefunc
1932 self._pagesize = pagesize
1933 self._use_cache = use_cache
1934 if use_cache:
1935 self._cache = {}
1936
1937 def getslice(self, start=0, end=None):
1938 res = []
1939 for pagenum in itertools.count(start // self._pagesize):
1940 firstid = pagenum * self._pagesize
1941 nextfirstid = pagenum * self._pagesize + self._pagesize
1942 if start >= nextfirstid:
1943 continue
1944
1945 page_results = None
1946 if self._use_cache:
1947 page_results = self._cache.get(pagenum)
1948 if page_results is None:
1949 page_results = list(self._pagefunc(pagenum))
1950 if self._use_cache:
1951 self._cache[pagenum] = page_results
1952
1953 startv = (
1954 start % self._pagesize
1955 if firstid <= start < nextfirstid
1956 else 0)
1957
1958 endv = (
1959 ((end - 1) % self._pagesize) + 1
1960 if (end is not None and firstid <= end <= nextfirstid)
1961 else None)
1962
1963 if startv != 0 or endv is not None:
1964 page_results = page_results[startv:endv]
1965 res.extend(page_results)
1966
1967 # A little optimization - if current page is not "full", ie. does
1968 # not contain page_size videos then we can assume that this page
1969 # is the last one - there are no more ids on further pages -
1970 # i.e. no need to query again.
1971 if len(page_results) + startv < self._pagesize:
1972 break
1973
1974 # If we got the whole page, but the next page is not interesting,
1975 # break out early as well
1976 if end == nextfirstid:
1977 break
1978 return res
1979
1980
1981 class InAdvancePagedList(PagedList):
1982 def __init__(self, pagefunc, pagecount, pagesize):
1983 self._pagefunc = pagefunc
1984 self._pagecount = pagecount
1985 self._pagesize = pagesize
1986
1987 def getslice(self, start=0, end=None):
1988 res = []
1989 start_page = start // self._pagesize
1990 end_page = (
1991 self._pagecount if end is None else (end // self._pagesize + 1))
1992 skip_elems = start - start_page * self._pagesize
1993 only_more = None if end is None else end - start
1994 for pagenum in range(start_page, end_page):
1995 page = list(self._pagefunc(pagenum))
1996 if skip_elems:
1997 page = page[skip_elems:]
1998 skip_elems = None
1999 if only_more is not None:
2000 if len(page) < only_more:
2001 only_more -= len(page)
2002 else:
2003 page = page[:only_more]
2004 res.extend(page)
2005 break
2006 res.extend(page)
2007 return res
2008
2009
2010 def uppercase_escape(s):
2011 unicode_escape = codecs.getdecoder('unicode_escape')
2012 return re.sub(
2013 r'\\U[0-9a-fA-F]{8}',
2014 lambda m: unicode_escape(m.group(0))[0],
2015 s)
2016
2017
2018 def lowercase_escape(s):
2019 unicode_escape = codecs.getdecoder('unicode_escape')
2020 return re.sub(
2021 r'\\u[0-9a-fA-F]{4}',
2022 lambda m: unicode_escape(m.group(0))[0],
2023 s)
2024
2025
2026 def escape_rfc3986(s):
2027 """Escape non-ASCII characters as suggested by RFC 3986"""
2028 if sys.version_info < (3, 0) and isinstance(s, compat_str):
2029 s = s.encode('utf-8')
2030 return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
2031
2032
2033 def escape_url(url):
2034 """Escape URL as suggested by RFC 3986"""
2035 url_parsed = compat_urllib_parse_urlparse(url)
2036 return url_parsed._replace(
2037 netloc=url_parsed.netloc.encode('idna').decode('ascii'),
2038 path=escape_rfc3986(url_parsed.path),
2039 params=escape_rfc3986(url_parsed.params),
2040 query=escape_rfc3986(url_parsed.query),
2041 fragment=escape_rfc3986(url_parsed.fragment)
2042 ).geturl()
2043
2044
2045 def read_batch_urls(batch_fd):
2046 def fixup(url):
2047 if not isinstance(url, compat_str):
2048 url = url.decode('utf-8', 'replace')
2049 BOM_UTF8 = '\xef\xbb\xbf'
2050 if url.startswith(BOM_UTF8):
2051 url = url[len(BOM_UTF8):]
2052 url = url.strip()
2053 if url.startswith(('#', ';', ']')):
2054 return False
2055 return url
2056
2057 with contextlib.closing(batch_fd) as fd:
2058 return [url for url in map(fixup, fd) if url]
2059
2060
2061 def urlencode_postdata(*args, **kargs):
2062 return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
2063
2064
2065 def update_url_query(url, query):
2066 if not query:
2067 return url
2068 parsed_url = compat_urlparse.urlparse(url)
2069 qs = compat_parse_qs(parsed_url.query)
2070 qs.update(query)
2071 return compat_urlparse.urlunparse(parsed_url._replace(
2072 query=compat_urllib_parse_urlencode(qs, True)))
2073
2074
2075 def update_Request(req, url=None, data=None, headers={}, query={}):
2076 req_headers = req.headers.copy()
2077 req_headers.update(headers)
2078 req_data = data or req.data
2079 req_url = update_url_query(url or req.get_full_url(), query)
2080 req_get_method = req.get_method()
2081 if req_get_method == 'HEAD':
2082 req_type = HEADRequest
2083 elif req_get_method == 'PUT':
2084 req_type = PUTRequest
2085 else:
2086 req_type = compat_urllib_request.Request
2087 new_req = req_type(
2088 req_url, data=req_data, headers=req_headers,
2089 origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
2090 if hasattr(req, 'timeout'):
2091 new_req.timeout = req.timeout
2092 return new_req
2093
2094
2095 def dict_get(d, key_or_keys, default=None, skip_false_values=True):
2096 if isinstance(key_or_keys, (list, tuple)):
2097 for key in key_or_keys:
2098 if key not in d or d[key] is None or skip_false_values and not d[key]:
2099 continue
2100 return d[key]
2101 return default
2102 return d.get(key_or_keys, default)
2103
2104
2105 def try_get(src, getter, expected_type=None):
2106 try:
2107 v = getter(src)
2108 except (AttributeError, KeyError, TypeError, IndexError):
2109 pass
2110 else:
2111 if expected_type is None or isinstance(v, expected_type):
2112 return v
2113
2114
2115 def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
2116 return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
2117
2118
2119 US_RATINGS = {
2120 'G': 0,
2121 'PG': 10,
2122 'PG-13': 13,
2123 'R': 16,
2124 'NC': 18,
2125 }
2126
2127
2128 TV_PARENTAL_GUIDELINES = {
2129 'TV-Y': 0,
2130 'TV-Y7': 7,
2131 'TV-G': 0,
2132 'TV-PG': 0,
2133 'TV-14': 14,
2134 'TV-MA': 17,
2135 }
2136
2137
2138 def parse_age_limit(s):
2139 if type(s) == int:
2140 return s if 0 <= s <= 21 else None
2141 if not isinstance(s, compat_basestring):
2142 return None
2143 m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
2144 if m:
2145 return int(m.group('age'))
2146 if s in US_RATINGS:
2147 return US_RATINGS[s]
2148 return TV_PARENTAL_GUIDELINES.get(s)
2149
2150
2151 def strip_jsonp(code):
2152 return re.sub(
2153 r'(?s)^[a-zA-Z0-9_.$]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code)
2154
2155
2156 def js_to_json(code):
2157 COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*'
2158 SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE)
2159 INTEGER_TABLE = (
2160 (r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16),
2161 (r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8),
2162 )
2163
2164 def fix_kv(m):
2165 v = m.group(0)
2166 if v in ('true', 'false', 'null'):
2167 return v
2168 elif v.startswith('/*') or v.startswith('//') or v == ',':
2169 return ""
2170
2171 if v[0] in ("'", '"'):
2172 v = re.sub(r'(?s)\\.|"', lambda m: {
2173 '"': '\\"',
2174 "\\'": "'",
2175 '\\\n': '',
2176 '\\x': '\\u00',
2177 }.get(m.group(0), m.group(0)), v[1:-1])
2178
2179 for regex, base in INTEGER_TABLE:
2180 im = re.match(regex, v)
2181 if im:
2182 i = int(im.group(1), base)
2183 return '"%d":' % i if v.endswith(':') else '%d' % i
2184
2185 return '"%s"' % v
2186
2187 return re.sub(r'''(?sx)
2188 "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
2189 '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
2190 {comment}|,(?={skip}[\]}}])|
2191 [a-zA-Z_][.a-zA-Z_0-9]*|
2192 \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
2193 [0-9]+(?={skip}:)
2194 '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code)
2195
2196
2197 def qualities(quality_ids):
2198 """ Get a numeric quality value out of a list of possible values """
2199 def q(qid):
2200 try:
2201 return quality_ids.index(qid)
2202 except ValueError:
2203 return -1
2204 return q
2205
2206
2207 DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s'
2208
2209
2210 def limit_length(s, length):
2211 """ Add ellipses to overly long strings """
2212 if s is None:
2213 return None
2214 ELLIPSES = '...'
2215 if len(s) > length:
2216 return s[:length - len(ELLIPSES)] + ELLIPSES
2217 return s
2218
2219
2220 def version_tuple(v):
2221 return tuple(int(e) for e in re.split(r'[-.]', v))
2222
2223
2224 def is_outdated_version(version, limit, assume_new=True):
2225 if not version:
2226 return not assume_new
2227 try:
2228 return version_tuple(version) < version_tuple(limit)
2229 except ValueError:
2230 return not assume_new
2231
2232
2233 def ytdl_is_updateable():
2234 """ Returns if youtube-dl can be updated with -U """
2235 from zipimport import zipimporter
2236
2237 return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen')
2238
2239
2240 def args_to_str(args):
2241 # Get a short string representation for a subprocess command
2242 return ' '.join(compat_shlex_quote(a) for a in args)
2243
2244
2245 def error_to_compat_str(err):
2246 err_str = str(err)
2247 # On python 2 error byte string must be decoded with proper
2248 # encoding rather than ascii
2249 if sys.version_info[0] < 3:
2250 err_str = err_str.decode(preferredencoding())
2251 return err_str
2252
2253
2254 def mimetype2ext(mt):
2255 if mt is None:
2256 return None
2257
2258 ext = {
2259 'audio/mp4': 'm4a',
2260 # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
2261 # it's the most popular one
2262 'audio/mpeg': 'mp3',
2263 }.get(mt)
2264 if ext is not None:
2265 return ext
2266
2267 _, _, res = mt.rpartition('/')
2268 res = res.split(';')[0].strip().lower()
2269
2270 return {
2271 '3gpp': '3gp',
2272 'smptett+xml': 'tt',
2273 'srt': 'srt',
2274 'ttaf+xml': 'dfxp',
2275 'ttml+xml': 'ttml',
2276 'vtt': 'vtt',
2277 'x-flv': 'flv',
2278 'x-mp4-fragmented': 'mp4',
2279 'x-ms-wmv': 'wmv',
2280 'mpegurl': 'm3u8',
2281 'x-mpegurl': 'm3u8',
2282 'vnd.apple.mpegurl': 'm3u8',
2283 'dash+xml': 'mpd',
2284 'f4m': 'f4m',
2285 'f4m+xml': 'f4m',
2286 'hds+xml': 'f4m',
2287 'vnd.ms-sstr+xml': 'ism',
2288 'quicktime': 'mov',
2289 }.get(res, res)
2290
2291
2292 def parse_codecs(codecs_str):
2293 # http://tools.ietf.org/html/rfc6381
2294 if not codecs_str:
2295 return {}
2296 splited_codecs = list(filter(None, map(
2297 lambda str: str.strip(), codecs_str.strip().strip(',').split(','))))
2298 vcodec, acodec = None, None
2299 for full_codec in splited_codecs:
2300 codec = full_codec.split('.')[0]
2301 if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v'):
2302 if not vcodec:
2303 vcodec = full_codec
2304 elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3'):
2305 if not acodec:
2306 acodec = full_codec
2307 else:
2308 write_string('WARNING: Unknown codec %s' % full_codec, sys.stderr)
2309 if not vcodec and not acodec:
2310 if len(splited_codecs) == 2:
2311 return {
2312 'vcodec': vcodec,
2313 'acodec': acodec,
2314 }
2315 elif len(splited_codecs) == 1:
2316 return {
2317 'vcodec': 'none',
2318 'acodec': vcodec,
2319 }
2320 else:
2321 return {
2322 'vcodec': vcodec or 'none',
2323 'acodec': acodec or 'none',
2324 }
2325 return {}
2326
2327
2328 def urlhandle_detect_ext(url_handle):
2329 getheader = url_handle.headers.get
2330
2331 cd = getheader('Content-Disposition')
2332 if cd:
2333 m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
2334 if m:
2335 e = determine_ext(m.group('filename'), default_ext=None)
2336 if e:
2337 return e
2338
2339 return mimetype2ext(getheader('Content-Type'))
2340
2341
2342 def encode_data_uri(data, mime_type):
2343 return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
2344
2345
2346 def age_restricted(content_limit, age_limit):
2347 """ Returns True iff the content should be blocked """
2348
2349 if age_limit is None: # No limit set
2350 return False
2351 if content_limit is None:
2352 return False # Content available for everyone
2353 return age_limit < content_limit
2354
2355
2356 def is_html(first_bytes):
2357 """ Detect whether a file contains HTML by examining its first bytes. """
2358
2359 BOMS = [
2360 (b'\xef\xbb\xbf', 'utf-8'),
2361 (b'\x00\x00\xfe\xff', 'utf-32-be'),
2362 (b'\xff\xfe\x00\x00', 'utf-32-le'),
2363 (b'\xff\xfe', 'utf-16-le'),
2364 (b'\xfe\xff', 'utf-16-be'),
2365 ]
2366 for bom, enc in BOMS:
2367 if first_bytes.startswith(bom):
2368 s = first_bytes[len(bom):].decode(enc, 'replace')
2369 break
2370 else:
2371 s = first_bytes.decode('utf-8', 'replace')
2372
2373 return re.match(r'^\s*<', s)
2374
2375
2376 def determine_protocol(info_dict):
2377 protocol = info_dict.get('protocol')
2378 if protocol is not None:
2379 return protocol
2380
2381 url = info_dict['url']
2382 if url.startswith('rtmp'):
2383 return 'rtmp'
2384 elif url.startswith('mms'):
2385 return 'mms'
2386 elif url.startswith('rtsp'):
2387 return 'rtsp'
2388
2389 ext = determine_ext(url)
2390 if ext == 'm3u8':
2391 return 'm3u8'
2392 elif ext == 'f4m':
2393 return 'f4m'
2394
2395 return compat_urllib_parse_urlparse(url).scheme
2396
2397
2398 def render_table(header_row, data):
2399 """ Render a list of rows, each as a list of values """
2400 table = [header_row] + data
2401 max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)]
2402 format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s'
2403 return '\n'.join(format_str % tuple(row) for row in table)
2404
2405
2406 def _match_one(filter_part, dct):
2407 COMPARISON_OPERATORS = {
2408 '<': operator.lt,
2409 '<=': operator.le,
2410 '>': operator.gt,
2411 '>=': operator.ge,
2412 '=': operator.eq,
2413 '!=': operator.ne,
2414 }
2415 operator_rex = re.compile(r'''(?x)\s*
2416 (?P<key>[a-z_]+)
2417 \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
2418 (?:
2419 (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)|
2420 (?P<quote>["\'])(?P<quotedstrval>(?:\\.|(?!(?P=quote)|\\).)+?)(?P=quote)|
2421 (?P<strval>(?![0-9.])[a-z0-9A-Z]*)
2422 )
2423 \s*$
2424 ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
2425 m = operator_rex.search(filter_part)
2426 if m:
2427 op = COMPARISON_OPERATORS[m.group('op')]
2428 actual_value = dct.get(m.group('key'))
2429 if (m.group('quotedstrval') is not None or
2430 m.group('strval') is not None or
2431 # If the original field is a string and matching comparisonvalue is
2432 # a number we should respect the origin of the original field
2433 # and process comparison value as a string (see
2434 # https://github.com/rg3/youtube-dl/issues/11082).
2435 actual_value is not None and m.group('intval') is not None and
2436 isinstance(actual_value, compat_str)):
2437 if m.group('op') not in ('=', '!='):
2438 raise ValueError(
2439 'Operator %s does not support string values!' % m.group('op'))
2440 comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval')
2441 quote = m.group('quote')
2442 if quote is not None:
2443 comparison_value = comparison_value.replace(r'\%s' % quote, quote)
2444 else:
2445 try:
2446 comparison_value = int(m.group('intval'))
2447 except ValueError:
2448 comparison_value = parse_filesize(m.group('intval'))
2449 if comparison_value is None:
2450 comparison_value = parse_filesize(m.group('intval') + 'B')
2451 if comparison_value is None:
2452 raise ValueError(
2453 'Invalid integer value %r in filter part %r' % (
2454 m.group('intval'), filter_part))
2455 if actual_value is None:
2456 return m.group('none_inclusive')
2457 return op(actual_value, comparison_value)
2458
2459 UNARY_OPERATORS = {
2460 '': lambda v: v is not None,
2461 '!': lambda v: v is None,
2462 }
2463 operator_rex = re.compile(r'''(?x)\s*
2464 (?P<op>%s)\s*(?P<key>[a-z_]+)
2465 \s*$
2466 ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
2467 m = operator_rex.search(filter_part)
2468 if m:
2469 op = UNARY_OPERATORS[m.group('op')]
2470 actual_value = dct.get(m.group('key'))
2471 return op(actual_value)
2472
2473 raise ValueError('Invalid filter part %r' % filter_part)
2474
2475
2476 def match_str(filter_str, dct):
2477 """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """
2478
2479 return all(
2480 _match_one(filter_part, dct) for filter_part in filter_str.split('&'))
2481
2482
2483 def match_filter_func(filter_str):
2484 def _match_func(info_dict):
2485 if match_str(filter_str, info_dict):
2486 return None
2487 else:
2488 video_title = info_dict.get('title', info_dict.get('id', 'video'))
2489 return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
2490 return _match_func
2491
2492
2493 def parse_dfxp_time_expr(time_expr):
2494 if not time_expr:
2495 return
2496
2497 mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
2498 if mobj:
2499 return float(mobj.group('time_offset'))
2500
2501 mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
2502 if mobj:
2503 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
2504
2505
2506 def srt_subtitles_timecode(seconds):
2507 return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000)
2508
2509
2510 def dfxp2srt(dfxp_data):
2511 _x = functools.partial(xpath_with_ns, ns_map={
2512 'ttml': 'http://www.w3.org/ns/ttml',
2513 'ttaf1': 'http://www.w3.org/2006/10/ttaf1',
2514 'ttaf1_0604': 'http://www.w3.org/2006/04/ttaf1',
2515 })
2516
2517 class TTMLPElementParser(object):
2518 out = ''
2519
2520 def start(self, tag, attrib):
2521 if tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'):
2522 self.out += '\n'
2523
2524 def end(self, tag):
2525 pass
2526
2527 def data(self, data):
2528 self.out += data
2529
2530 def close(self):
2531 return self.out.strip()
2532
2533 def parse_node(node):
2534 target = TTMLPElementParser()
2535 parser = xml.etree.ElementTree.XMLParser(target=target)
2536 parser.feed(xml.etree.ElementTree.tostring(node))
2537 return parser.close()
2538
2539 dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8'))
2540 out = []
2541 paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall(_x('.//ttaf1_0604:p')) or dfxp.findall('.//p')
2542
2543 if not paras:
2544 raise ValueError('Invalid dfxp/TTML subtitle')
2545
2546 for para, index in zip(paras, itertools.count(1)):
2547 begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
2548 end_time = parse_dfxp_time_expr(para.attrib.get('end'))
2549 dur = parse_dfxp_time_expr(para.attrib.get('dur'))
2550 if begin_time is None:
2551 continue
2552 if not end_time:
2553 if not dur:
2554 continue
2555 end_time = begin_time + dur
2556 out.append('%d\n%s --> %s\n%s\n\n' % (
2557 index,
2558 srt_subtitles_timecode(begin_time),
2559 srt_subtitles_timecode(end_time),
2560 parse_node(para)))
2561
2562 return ''.join(out)
2563
2564
2565 def cli_option(params, command_option, param):
2566 param = params.get(param)
2567 if param:
2568 param = compat_str(param)
2569 return [command_option, param] if param is not None else []
2570
2571
2572 def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
2573 param = params.get(param)
2574 assert isinstance(param, bool)
2575 if separator:
2576 return [command_option + separator + (true_value if param else false_value)]
2577 return [command_option, true_value if param else false_value]
2578
2579
2580 def cli_valueless_option(params, command_option, param, expected_value=True):
2581 param = params.get(param)
2582 return [command_option] if param == expected_value else []
2583
2584
2585 def cli_configuration_args(params, param, default=[]):
2586 ex_args = params.get(param)
2587 if ex_args is None:
2588 return default
2589 assert isinstance(ex_args, list)
2590 return ex_args
2591
2592
2593 class ISO639Utils(object):
2594 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
2595 _lang_map = {
2596 'aa': 'aar',
2597 'ab': 'abk',
2598 'ae': 'ave',
2599 'af': 'afr',
2600 'ak': 'aka',
2601 'am': 'amh',
2602 'an': 'arg',
2603 'ar': 'ara',
2604 'as': 'asm',
2605 'av': 'ava',
2606 'ay': 'aym',
2607 'az': 'aze',
2608 'ba': 'bak',
2609 'be': 'bel',
2610 'bg': 'bul',
2611 'bh': 'bih',
2612 'bi': 'bis',
2613 'bm': 'bam',
2614 'bn': 'ben',
2615 'bo': 'bod',
2616 'br': 'bre',
2617 'bs': 'bos',
2618 'ca': 'cat',
2619 'ce': 'che',
2620 'ch': 'cha',
2621 'co': 'cos',
2622 'cr': 'cre',
2623 'cs': 'ces',
2624 'cu': 'chu',
2625 'cv': 'chv',
2626 'cy': 'cym',
2627 'da': 'dan',
2628 'de': 'deu',
2629 'dv': 'div',
2630 'dz': 'dzo',
2631 'ee': 'ewe',
2632 'el': 'ell',
2633 'en': 'eng',
2634 'eo': 'epo',
2635 'es': 'spa',
2636 'et': 'est',
2637 'eu': 'eus',
2638 'fa': 'fas',
2639 'ff': 'ful',
2640 'fi': 'fin',
2641 'fj': 'fij',
2642 'fo': 'fao',
2643 'fr': 'fra',
2644 'fy': 'fry',
2645 'ga': 'gle',
2646 'gd': 'gla',
2647 'gl': 'glg',
2648 'gn': 'grn',
2649 'gu': 'guj',
2650 'gv': 'glv',
2651 'ha': 'hau',
2652 'he': 'heb',
2653 'hi': 'hin',
2654 'ho': 'hmo',
2655 'hr': 'hrv',
2656 'ht': 'hat',
2657 'hu': 'hun',
2658 'hy': 'hye',
2659 'hz': 'her',
2660 'ia': 'ina',
2661 'id': 'ind',
2662 'ie': 'ile',
2663 'ig': 'ibo',
2664 'ii': 'iii',
2665 'ik': 'ipk',
2666 'io': 'ido',
2667 'is': 'isl',
2668 'it': 'ita',
2669 'iu': 'iku',
2670 'ja': 'jpn',
2671 'jv': 'jav',
2672 'ka': 'kat',
2673 'kg': 'kon',
2674 'ki': 'kik',
2675 'kj': 'kua',
2676 'kk': 'kaz',
2677 'kl': 'kal',
2678 'km': 'khm',
2679 'kn': 'kan',
2680 'ko': 'kor',
2681 'kr': 'kau',
2682 'ks': 'kas',
2683 'ku': 'kur',
2684 'kv': 'kom',
2685 'kw': 'cor',
2686 'ky': 'kir',
2687 'la': 'lat',
2688 'lb': 'ltz',
2689 'lg': 'lug',
2690 'li': 'lim',
2691 'ln': 'lin',
2692 'lo': 'lao',
2693 'lt': 'lit',
2694 'lu': 'lub',
2695 'lv': 'lav',
2696 'mg': 'mlg',
2697 'mh': 'mah',
2698 'mi': 'mri',
2699 'mk': 'mkd',
2700 'ml': 'mal',
2701 'mn': 'mon',
2702 'mr': 'mar',
2703 'ms': 'msa',
2704 'mt': 'mlt',
2705 'my': 'mya',
2706 'na': 'nau',
2707 'nb': 'nob',
2708 'nd': 'nde',
2709 'ne': 'nep',
2710 'ng': 'ndo',
2711 'nl': 'nld',
2712 'nn': 'nno',
2713 'no': 'nor',
2714 'nr': 'nbl',
2715 'nv': 'nav',
2716 'ny': 'nya',
2717 'oc': 'oci',
2718 'oj': 'oji',
2719 'om': 'orm',
2720 'or': 'ori',
2721 'os': 'oss',
2722 'pa': 'pan',
2723 'pi': 'pli',
2724 'pl': 'pol',
2725 'ps': 'pus',
2726 'pt': 'por',
2727 'qu': 'que',
2728 'rm': 'roh',
2729 'rn': 'run',
2730 'ro': 'ron',
2731 'ru': 'rus',
2732 'rw': 'kin',
2733 'sa': 'san',
2734 'sc': 'srd',
2735 'sd': 'snd',
2736 'se': 'sme',
2737 'sg': 'sag',
2738 'si': 'sin',
2739 'sk': 'slk',
2740 'sl': 'slv',
2741 'sm': 'smo',
2742 'sn': 'sna',
2743 'so': 'som',
2744 'sq': 'sqi',
2745 'sr': 'srp',
2746 'ss': 'ssw',
2747 'st': 'sot',
2748 'su': 'sun',
2749 'sv': 'swe',
2750 'sw': 'swa',
2751 'ta': 'tam',
2752 'te': 'tel',
2753 'tg': 'tgk',
2754 'th': 'tha',
2755 'ti': 'tir',
2756 'tk': 'tuk',
2757 'tl': 'tgl',
2758 'tn': 'tsn',
2759 'to': 'ton',
2760 'tr': 'tur',
2761 'ts': 'tso',
2762 'tt': 'tat',
2763 'tw': 'twi',
2764 'ty': 'tah',
2765 'ug': 'uig',
2766 'uk': 'ukr',
2767 'ur': 'urd',
2768 'uz': 'uzb',
2769 've': 'ven',
2770 'vi': 'vie',
2771 'vo': 'vol',
2772 'wa': 'wln',
2773 'wo': 'wol',
2774 'xh': 'xho',
2775 'yi': 'yid',
2776 'yo': 'yor',
2777 'za': 'zha',
2778 'zh': 'zho',
2779 'zu': 'zul',
2780 }
2781
2782 @classmethod
2783 def short2long(cls, code):
2784 """Convert language code from ISO 639-1 to ISO 639-2/T"""
2785 return cls._lang_map.get(code[:2])
2786
2787 @classmethod
2788 def long2short(cls, code):
2789 """Convert language code from ISO 639-2/T to ISO 639-1"""
2790 for short_name, long_name in cls._lang_map.items():
2791 if long_name == code:
2792 return short_name
2793
2794
2795 class ISO3166Utils(object):
2796 # From http://data.okfn.org/data/core/country-list
2797 _country_map = {
2798 'AF': 'Afghanistan',
2799 'AX': 'Åland Islands',
2800 'AL': 'Albania',
2801 'DZ': 'Algeria',
2802 'AS': 'American Samoa',
2803 'AD': 'Andorra',
2804 'AO': 'Angola',
2805 'AI': 'Anguilla',
2806 'AQ': 'Antarctica',
2807 'AG': 'Antigua and Barbuda',
2808 'AR': 'Argentina',
2809 'AM': 'Armenia',
2810 'AW': 'Aruba',
2811 'AU': 'Australia',
2812 'AT': 'Austria',
2813 'AZ': 'Azerbaijan',
2814 'BS': 'Bahamas',
2815 'BH': 'Bahrain',
2816 'BD': 'Bangladesh',
2817 'BB': 'Barbados',
2818 'BY': 'Belarus',
2819 'BE': 'Belgium',
2820 'BZ': 'Belize',
2821 'BJ': 'Benin',
2822 'BM': 'Bermuda',
2823 'BT': 'Bhutan',
2824 'BO': 'Bolivia, Plurinational State of',
2825 'BQ': 'Bonaire, Sint Eustatius and Saba',
2826 'BA': 'Bosnia and Herzegovina',
2827 'BW': 'Botswana',
2828 'BV': 'Bouvet Island',
2829 'BR': 'Brazil',
2830 'IO': 'British Indian Ocean Territory',
2831 'BN': 'Brunei Darussalam',
2832 'BG': 'Bulgaria',
2833 'BF': 'Burkina Faso',
2834 'BI': 'Burundi',
2835 'KH': 'Cambodia',
2836 'CM': 'Cameroon',
2837 'CA': 'Canada',
2838 'CV': 'Cape Verde',
2839 'KY': 'Cayman Islands',
2840 'CF': 'Central African Republic',
2841 'TD': 'Chad',
2842 'CL': 'Chile',
2843 'CN': 'China',
2844 'CX': 'Christmas Island',
2845 'CC': 'Cocos (Keeling) Islands',
2846 'CO': 'Colombia',
2847 'KM': 'Comoros',
2848 'CG': 'Congo',
2849 'CD': 'Congo, the Democratic Republic of the',
2850 'CK': 'Cook Islands',
2851 'CR': 'Costa Rica',
2852 'CI': 'Côte d\'Ivoire',
2853 'HR': 'Croatia',
2854 'CU': 'Cuba',
2855 'CW': 'Curaçao',
2856 'CY': 'Cyprus',
2857 'CZ': 'Czech Republic',
2858 'DK': 'Denmark',
2859 'DJ': 'Djibouti',
2860 'DM': 'Dominica',
2861 'DO': 'Dominican Republic',
2862 'EC': 'Ecuador',
2863 'EG': 'Egypt',
2864 'SV': 'El Salvador',
2865 'GQ': 'Equatorial Guinea',
2866 'ER': 'Eritrea',
2867 'EE': 'Estonia',
2868 'ET': 'Ethiopia',
2869 'FK': 'Falkland Islands (Malvinas)',
2870 'FO': 'Faroe Islands',
2871 'FJ': 'Fiji',
2872 'FI': 'Finland',
2873 'FR': 'France',
2874 'GF': 'French Guiana',
2875 'PF': 'French Polynesia',
2876 'TF': 'French Southern Territories',
2877 'GA': 'Gabon',
2878 'GM': 'Gambia',
2879 'GE': 'Georgia',
2880 'DE': 'Germany',
2881 'GH': 'Ghana',
2882 'GI': 'Gibraltar',
2883 'GR': 'Greece',
2884 'GL': 'Greenland',
2885 'GD': 'Grenada',
2886 'GP': 'Guadeloupe',
2887 'GU': 'Guam',
2888 'GT': 'Guatemala',
2889 'GG': 'Guernsey',
2890 'GN': 'Guinea',
2891 'GW': 'Guinea-Bissau',
2892 'GY': 'Guyana',
2893 'HT': 'Haiti',
2894 'HM': 'Heard Island and McDonald Islands',
2895 'VA': 'Holy See (Vatican City State)',
2896 'HN': 'Honduras',
2897 'HK': 'Hong Kong',
2898 'HU': 'Hungary',
2899 'IS': 'Iceland',
2900 'IN': 'India',
2901 'ID': 'Indonesia',
2902 'IR': 'Iran, Islamic Republic of',
2903 'IQ': 'Iraq',
2904 'IE': 'Ireland',
2905 'IM': 'Isle of Man',
2906 'IL': 'Israel',
2907 'IT': 'Italy',
2908 'JM': 'Jamaica',
2909 'JP': 'Japan',
2910 'JE': 'Jersey',
2911 'JO': 'Jordan',
2912 'KZ': 'Kazakhstan',
2913 'KE': 'Kenya',
2914 'KI': 'Kiribati',
2915 'KP': 'Korea, Democratic People\'s Republic of',
2916 'KR': 'Korea, Republic of',
2917 'KW': 'Kuwait',
2918 'KG': 'Kyrgyzstan',
2919 'LA': 'Lao People\'s Democratic Republic',
2920 'LV': 'Latvia',
2921 'LB': 'Lebanon',
2922 'LS': 'Lesotho',
2923 'LR': 'Liberia',
2924 'LY': 'Libya',
2925 'LI': 'Liechtenstein',
2926 'LT': 'Lithuania',
2927 'LU': 'Luxembourg',
2928 'MO': 'Macao',
2929 'MK': 'Macedonia, the Former Yugoslav Republic of',
2930 'MG': 'Madagascar',
2931 'MW': 'Malawi',
2932 'MY': 'Malaysia',
2933 'MV': 'Maldives',
2934 'ML': 'Mali',
2935 'MT': 'Malta',
2936 'MH': 'Marshall Islands',
2937 'MQ': 'Martinique',
2938 'MR': 'Mauritania',
2939 'MU': 'Mauritius',
2940 'YT': 'Mayotte',
2941 'MX': 'Mexico',
2942 'FM': 'Micronesia, Federated States of',
2943 'MD': 'Moldova, Republic of',
2944 'MC': 'Monaco',
2945 'MN': 'Mongolia',
2946 'ME': 'Montenegro',
2947 'MS': 'Montserrat',
2948 'MA': 'Morocco',
2949 'MZ': 'Mozambique',
2950 'MM': 'Myanmar',
2951 'NA': 'Namibia',
2952 'NR': 'Nauru',
2953 'NP': 'Nepal',
2954 'NL': 'Netherlands',
2955 'NC': 'New Caledonia',
2956 'NZ': 'New Zealand',
2957 'NI': 'Nicaragua',
2958 'NE': 'Niger',
2959 'NG': 'Nigeria',
2960 'NU': 'Niue',
2961 'NF': 'Norfolk Island',
2962 'MP': 'Northern Mariana Islands',
2963 'NO': 'Norway',
2964 'OM': 'Oman',
2965 'PK': 'Pakistan',
2966 'PW': 'Palau',
2967 'PS': 'Palestine, State of',
2968 'PA': 'Panama',
2969 'PG': 'Papua New Guinea',
2970 'PY': 'Paraguay',
2971 'PE': 'Peru',
2972 'PH': 'Philippines',
2973 'PN': 'Pitcairn',
2974 'PL': 'Poland',
2975 'PT': 'Portugal',
2976 'PR': 'Puerto Rico',
2977 'QA': 'Qatar',
2978 'RE': 'Réunion',
2979 'RO': 'Romania',
2980 'RU': 'Russian Federation',
2981 'RW': 'Rwanda',
2982 'BL': 'Saint Barthélemy',
2983 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
2984 'KN': 'Saint Kitts and Nevis',
2985 'LC': 'Saint Lucia',
2986 'MF': 'Saint Martin (French part)',
2987 'PM': 'Saint Pierre and Miquelon',
2988 'VC': 'Saint Vincent and the Grenadines',
2989 'WS': 'Samoa',
2990 'SM': 'San Marino',
2991 'ST': 'Sao Tome and Principe',
2992 'SA': 'Saudi Arabia',
2993 'SN': 'Senegal',
2994 'RS': 'Serbia',
2995 'SC': 'Seychelles',
2996 'SL': 'Sierra Leone',
2997 'SG': 'Singapore',
2998 'SX': 'Sint Maarten (Dutch part)',
2999 'SK': 'Slovakia',
3000 'SI': 'Slovenia',
3001 'SB': 'Solomon Islands',
3002 'SO': 'Somalia',
3003 'ZA': 'South Africa',
3004 'GS': 'South Georgia and the South Sandwich Islands',
3005 'SS': 'South Sudan',
3006 'ES': 'Spain',
3007 'LK': 'Sri Lanka',
3008 'SD': 'Sudan',
3009 'SR': 'Suriname',
3010 'SJ': 'Svalbard and Jan Mayen',
3011 'SZ': 'Swaziland',
3012 'SE': 'Sweden',
3013 'CH': 'Switzerland',
3014 'SY': 'Syrian Arab Republic',
3015 'TW': 'Taiwan, Province of China',
3016 'TJ': 'Tajikistan',
3017 'TZ': 'Tanzania, United Republic of',
3018 'TH': 'Thailand',
3019 'TL': 'Timor-Leste',
3020 'TG': 'Togo',
3021 'TK': 'Tokelau',
3022 'TO': 'Tonga',
3023 'TT': 'Trinidad and Tobago',
3024 'TN': 'Tunisia',
3025 'TR': 'Turkey',
3026 'TM': 'Turkmenistan',
3027 'TC': 'Turks and Caicos Islands',
3028 'TV': 'Tuvalu',
3029 'UG': 'Uganda',
3030 'UA': 'Ukraine',
3031 'AE': 'United Arab Emirates',
3032 'GB': 'United Kingdom',
3033 'US': 'United States',
3034 'UM': 'United States Minor Outlying Islands',
3035 'UY': 'Uruguay',
3036 'UZ': 'Uzbekistan',
3037 'VU': 'Vanuatu',
3038 'VE': 'Venezuela, Bolivarian Republic of',
3039 'VN': 'Viet Nam',
3040 'VG': 'Virgin Islands, British',
3041 'VI': 'Virgin Islands, U.S.',
3042 'WF': 'Wallis and Futuna',
3043 'EH': 'Western Sahara',
3044 'YE': 'Yemen',
3045 'ZM': 'Zambia',
3046 'ZW': 'Zimbabwe',
3047 }
3048
3049 @classmethod
3050 def short2full(cls, code):
3051 """Convert an ISO 3166-2 country code to the corresponding full name"""
3052 return cls._country_map.get(code.upper())
3053
3054
3055 class GeoUtils(object):
3056 # Major IPv4 address blocks per country
3057 _country_ip_map = {
3058 'AD': '85.94.160.0/19',
3059 'AE': '94.200.0.0/13',
3060 'AF': '149.54.0.0/17',
3061 'AG': '209.59.64.0/18',
3062 'AI': '204.14.248.0/21',
3063 'AL': '46.99.0.0/16',
3064 'AM': '46.70.0.0/15',
3065 'AO': '105.168.0.0/13',
3066 'AP': '159.117.192.0/21',
3067 'AR': '181.0.0.0/12',
3068 'AS': '202.70.112.0/20',
3069 'AT': '84.112.0.0/13',
3070 'AU': '1.128.0.0/11',
3071 'AW': '181.41.0.0/18',
3072 'AZ': '5.191.0.0/16',
3073 'BA': '31.176.128.0/17',
3074 'BB': '65.48.128.0/17',
3075 'BD': '114.130.0.0/16',
3076 'BE': '57.0.0.0/8',
3077 'BF': '129.45.128.0/17',
3078 'BG': '95.42.0.0/15',
3079 'BH': '37.131.0.0/17',
3080 'BI': '154.117.192.0/18',
3081 'BJ': '137.255.0.0/16',
3082 'BL': '192.131.134.0/24',
3083 'BM': '196.12.64.0/18',
3084 'BN': '156.31.0.0/16',
3085 'BO': '161.56.0.0/16',
3086 'BQ': '161.0.80.0/20',
3087 'BR': '152.240.0.0/12',
3088 'BS': '24.51.64.0/18',
3089 'BT': '119.2.96.0/19',
3090 'BW': '168.167.0.0/16',
3091 'BY': '178.120.0.0/13',
3092 'BZ': '179.42.192.0/18',
3093 'CA': '99.224.0.0/11',
3094 'CD': '41.243.0.0/16',
3095 'CF': '196.32.200.0/21',
3096 'CG': '197.214.128.0/17',
3097 'CH': '85.0.0.0/13',
3098 'CI': '154.232.0.0/14',
3099 'CK': '202.65.32.0/19',
3100 'CL': '152.172.0.0/14',
3101 'CM': '165.210.0.0/15',
3102 'CN': '36.128.0.0/10',
3103 'CO': '181.240.0.0/12',
3104 'CR': '201.192.0.0/12',
3105 'CU': '152.206.0.0/15',
3106 'CV': '165.90.96.0/19',
3107 'CW': '190.88.128.0/17',
3108 'CY': '46.198.0.0/15',
3109 'CZ': '88.100.0.0/14',
3110 'DE': '53.0.0.0/8',
3111 'DJ': '197.241.0.0/17',
3112 'DK': '87.48.0.0/12',
3113 'DM': '192.243.48.0/20',
3114 'DO': '152.166.0.0/15',
3115 'DZ': '41.96.0.0/12',
3116 'EC': '186.68.0.0/15',
3117 'EE': '90.190.0.0/15',
3118 'EG': '156.160.0.0/11',
3119 'ER': '196.200.96.0/20',
3120 'ES': '88.0.0.0/11',
3121 'ET': '196.188.0.0/14',
3122 'EU': '2.16.0.0/13',
3123 'FI': '91.152.0.0/13',
3124 'FJ': '144.120.0.0/16',
3125 'FM': '119.252.112.0/20',
3126 'FO': '88.85.32.0/19',
3127 'FR': '90.0.0.0/9',
3128 'GA': '41.158.0.0/15',
3129 'GB': '25.0.0.0/8',
3130 'GD': '74.122.88.0/21',
3131 'GE': '31.146.0.0/16',
3132 'GF': '161.22.64.0/18',
3133 'GG': '62.68.160.0/19',
3134 'GH': '45.208.0.0/14',
3135 'GI': '85.115.128.0/19',
3136 'GL': '88.83.0.0/19',
3137 'GM': '160.182.0.0/15',
3138 'GN': '197.149.192.0/18',
3139 'GP': '104.250.0.0/19',
3140 'GQ': '105.235.224.0/20',
3141 'GR': '94.64.0.0/13',
3142 'GT': '168.234.0.0/16',
3143 'GU': '168.123.0.0/16',
3144 'GW': '197.214.80.0/20',
3145 'GY': '181.41.64.0/18',
3146 'HK': '113.252.0.0/14',
3147 'HN': '181.210.0.0/16',
3148 'HR': '93.136.0.0/13',
3149 'HT': '148.102.128.0/17',
3150 'HU': '84.0.0.0/14',
3151 'ID': '39.192.0.0/10',
3152 'IE': '87.32.0.0/12',
3153 'IL': '79.176.0.0/13',
3154 'IM': '5.62.80.0/20',
3155 'IN': '117.192.0.0/10',
3156 'IO': '203.83.48.0/21',
3157 'IQ': '37.236.0.0/14',
3158 'IR': '2.176.0.0/12',
3159 'IS': '82.221.0.0/16',
3160 'IT': '79.0.0.0/10',
3161 'JE': '87.244.64.0/18',
3162 'JM': '72.27.0.0/17',
3163 'JO': '176.29.0.0/16',
3164 'JP': '126.0.0.0/8',
3165 'KE': '105.48.0.0/12',
3166 'KG': '158.181.128.0/17',
3167 'KH': '36.37.128.0/17',
3168 'KI': '103.25.140.0/22',
3169 'KM': '197.255.224.0/20',
3170 'KN': '198.32.32.0/19',
3171 'KP': '175.45.176.0/22',
3172 'KR': '175.192.0.0/10',
3173 'KW': '37.36.0.0/14',
3174 'KY': '64.96.0.0/15',
3175 'KZ': '2.72.0.0/13',
3176 'LA': '115.84.64.0/18',
3177 'LB': '178.135.0.0/16',
3178 'LC': '192.147.231.0/24',
3179 'LI': '82.117.0.0/19',
3180 'LK': '112.134.0.0/15',
3181 'LR': '41.86.0.0/19',
3182 'LS': '129.232.0.0/17',
3183 'LT': '78.56.0.0/13',
3184 'LU': '188.42.0.0/16',
3185 'LV': '46.109.0.0/16',
3186 'LY': '41.252.0.0/14',
3187 'MA': '105.128.0.0/11',
3188 'MC': '88.209.64.0/18',
3189 'MD': '37.246.0.0/16',
3190 'ME': '178.175.0.0/17',
3191 'MF': '74.112.232.0/21',
3192 'MG': '154.126.0.0/17',
3193 'MH': '117.103.88.0/21',
3194 'MK': '77.28.0.0/15',
3195 'ML': '154.118.128.0/18',
3196 'MM': '37.111.0.0/17',
3197 'MN': '49.0.128.0/17',
3198 'MO': '60.246.0.0/16',
3199 'MP': '202.88.64.0/20',
3200 'MQ': '109.203.224.0/19',
3201 'MR': '41.188.64.0/18',
3202 'MS': '208.90.112.0/22',
3203 'MT': '46.11.0.0/16',
3204 'MU': '105.16.0.0/12',
3205 'MV': '27.114.128.0/18',
3206 'MW': '105.234.0.0/16',
3207 'MX': '187.192.0.0/11',
3208 'MY': '175.136.0.0/13',
3209 'MZ': '197.218.0.0/15',
3210 'NA': '41.182.0.0/16',
3211 'NC': '101.101.0.0/18',
3212 'NE': '197.214.0.0/18',
3213 'NF': '203.17.240.0/22',
3214 'NG': '105.112.0.0/12',
3215 'NI': '186.76.0.0/15',
3216 'NL': '145.96.0.0/11',
3217 'NO': '84.208.0.0/13',
3218 'NP': '36.252.0.0/15',
3219 'NR': '203.98.224.0/19',
3220 'NU': '49.156.48.0/22',
3221 'NZ': '49.224.0.0/14',
3222 'OM': '5.36.0.0/15',
3223 'PA': '186.72.0.0/15',
3224 'PE': '186.160.0.0/14',
3225 'PF': '123.50.64.0/18',
3226 'PG': '124.240.192.0/19',
3227 'PH': '49.144.0.0/13',
3228 'PK': '39.32.0.0/11',
3229 'PL': '83.0.0.0/11',
3230 'PM': '70.36.0.0/20',
3231 'PR': '66.50.0.0/16',
3232 'PS': '188.161.0.0/16',
3233 'PT': '85.240.0.0/13',
3234 'PW': '202.124.224.0/20',
3235 'PY': '181.120.0.0/14',
3236 'QA': '37.210.0.0/15',
3237 'RE': '139.26.0.0/16',
3238 'RO': '79.112.0.0/13',
3239 'RS': '178.220.0.0/14',
3240 'RU': '5.136.0.0/13',
3241 'RW': '105.178.0.0/15',
3242 'SA': '188.48.0.0/13',
3243 'SB': '202.1.160.0/19',
3244 'SC': '154.192.0.0/11',
3245 'SD': '154.96.0.0/13',
3246 'SE': '78.64.0.0/12',
3247 'SG': '152.56.0.0/14',
3248 'SI': '188.196.0.0/14',
3249 'SK': '78.98.0.0/15',
3250 'SL': '197.215.0.0/17',
3251 'SM': '89.186.32.0/19',
3252 'SN': '41.82.0.0/15',
3253 'SO': '197.220.64.0/19',
3254 'SR': '186.179.128.0/17',
3255 'SS': '105.235.208.0/21',
3256 'ST': '197.159.160.0/19',
3257 'SV': '168.243.0.0/16',
3258 'SX': '190.102.0.0/20',
3259 'SY': '5.0.0.0/16',
3260 'SZ': '41.84.224.0/19',
3261 'TC': '65.255.48.0/20',
3262 'TD': '154.68.128.0/19',
3263 'TG': '196.168.0.0/14',
3264 'TH': '171.96.0.0/13',
3265 'TJ': '85.9.128.0/18',
3266 'TK': '27.96.24.0/21',
3267 'TL': '180.189.160.0/20',
3268 'TM': '95.85.96.0/19',
3269 'TN': '197.0.0.0/11',
3270 'TO': '175.176.144.0/21',
3271 'TR': '78.160.0.0/11',
3272 'TT': '186.44.0.0/15',
3273 'TV': '202.2.96.0/19',
3274 'TW': '120.96.0.0/11',
3275 'TZ': '156.156.0.0/14',
3276 'UA': '93.72.0.0/13',
3277 'UG': '154.224.0.0/13',
3278 'US': '3.0.0.0/8',
3279 'UY': '167.56.0.0/13',
3280 'UZ': '82.215.64.0/18',
3281 'VA': '212.77.0.0/19',
3282 'VC': '24.92.144.0/20',
3283 'VE': '186.88.0.0/13',
3284 'VG': '172.103.64.0/18',
3285 'VI': '146.226.0.0/16',
3286 'VN': '14.160.0.0/11',
3287 'VU': '202.80.32.0/20',
3288 'WF': '117.20.32.0/21',
3289 'WS': '202.4.32.0/19',
3290 'YE': '134.35.0.0/16',
3291 'YT': '41.242.116.0/22',
3292 'ZA': '41.0.0.0/11',
3293 'ZM': '165.56.0.0/13',
3294 'ZW': '41.85.192.0/19',
3295 }
3296
3297 @classmethod
3298 def random_ipv4(cls, code):
3299 block = cls._country_ip_map.get(code.upper())
3300 if not block:
3301 return None
3302 addr, preflen = block.split('/')
3303 addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
3304 addr_max = addr_min | (0xffffffff >> int(preflen))
3305 return compat_str(socket.inet_ntoa(
3306 compat_struct_pack('!L', random.randint(addr_min, addr_max))))
3307
3308
3309 class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
3310 def __init__(self, proxies=None):
3311 # Set default handlers
3312 for type in ('http', 'https'):
3313 setattr(self, '%s_open' % type,
3314 lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
3315 meth(r, proxy, type))
3316 return compat_urllib_request.ProxyHandler.__init__(self, proxies)
3317
3318 def proxy_open(self, req, proxy, type):
3319 req_proxy = req.headers.get('Ytdl-request-proxy')
3320 if req_proxy is not None:
3321 proxy = req_proxy
3322 del req.headers['Ytdl-request-proxy']
3323
3324 if proxy == '__noproxy__':
3325 return None # No Proxy
3326 if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
3327 req.add_header('Ytdl-socks-proxy', proxy)
3328 # youtube-dl's http/https handlers do wrapping the socket with socks
3329 return None
3330 return compat_urllib_request.ProxyHandler.proxy_open(
3331 self, req, proxy, type)
3332
3333
3334 # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
3335 # released into Public Domain
3336 # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
3337
3338 def long_to_bytes(n, blocksize=0):
3339 """long_to_bytes(n:long, blocksize:int) : string
3340 Convert a long integer to a byte string.
3341
3342 If optional blocksize is given and greater than zero, pad the front of the
3343 byte string with binary zeros so that the length is a multiple of
3344 blocksize.
3345 """
3346 # after much testing, this algorithm was deemed to be the fastest
3347 s = b''
3348 n = int(n)
3349 while n > 0:
3350 s = compat_struct_pack('>I', n & 0xffffffff) + s
3351 n = n >> 32
3352 # strip off leading zeros
3353 for i in range(len(s)):
3354 if s[i] != b'\000'[0]:
3355 break
3356 else:
3357 # only happens when n == 0
3358 s = b'\000'
3359 i = 0
3360 s = s[i:]
3361 # add back some pad bytes. this could be done more efficiently w.r.t. the
3362 # de-padding being done above, but sigh...
3363 if blocksize > 0 and len(s) % blocksize:
3364 s = (blocksize - len(s) % blocksize) * b'\000' + s
3365 return s
3366
3367
3368 def bytes_to_long(s):
3369 """bytes_to_long(string) : long
3370 Convert a byte string to a long integer.
3371
3372 This is (essentially) the inverse of long_to_bytes().
3373 """
3374 acc = 0
3375 length = len(s)
3376 if length % 4:
3377 extra = (4 - length % 4)
3378 s = b'\000' * extra + s
3379 length = length + extra
3380 for i in range(0, length, 4):
3381 acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0]
3382 return acc
3383
3384
3385 def ohdave_rsa_encrypt(data, exponent, modulus):
3386 '''
3387 Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
3388
3389 Input:
3390 data: data to encrypt, bytes-like object
3391 exponent, modulus: parameter e and N of RSA algorithm, both integer
3392 Output: hex string of encrypted data
3393
3394 Limitation: supports one block encryption only
3395 '''
3396
3397 payload = int(binascii.hexlify(data[::-1]), 16)
3398 encrypted = pow(payload, exponent, modulus)
3399 return '%x' % encrypted
3400
3401
3402 def pkcs1pad(data, length):
3403 """
3404 Padding input data with PKCS#1 scheme
3405
3406 @param {int[]} data input data
3407 @param {int} length target length
3408 @returns {int[]} padded data
3409 """
3410 if len(data) > length - 11:
3411 raise ValueError('Input data too long for PKCS#1 padding')
3412
3413 pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)]
3414 return [0, 2] + pseudo_random + [0] + data
3415
3416
3417 def encode_base_n(num, n, table=None):
3418 FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
3419 if not table:
3420 table = FULL_TABLE[:n]
3421
3422 if n > len(table):
3423 raise ValueError('base %d exceeds table length %d' % (n, len(table)))
3424
3425 if num == 0:
3426 return table[0]
3427
3428 ret = ''
3429 while num:
3430 ret = table[num % n] + ret
3431 num = num // n
3432 return ret
3433
3434
3435 def decode_packed_codes(code):
3436 mobj = re.search(PACKED_CODES_RE, code)
3437 obfucasted_code, base, count, symbols = mobj.groups()
3438 base = int(base)
3439 count = int(count)
3440 symbols = symbols.split('|')
3441 symbol_table = {}
3442
3443 while count:
3444 count -= 1
3445 base_n_count = encode_base_n(count, base)
3446 symbol_table[base_n_count] = symbols[count] or base_n_count
3447
3448 return re.sub(
3449 r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
3450 obfucasted_code)
3451
3452
3453 def parse_m3u8_attributes(attrib):
3454 info = {}
3455 for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
3456 if val.startswith('"'):
3457 val = val[1:-1]
3458 info[key] = val
3459 return info
3460
3461
3462 def urshift(val, n):
3463 return val >> n if val >= 0 else (val + 0x100000000) >> n
3464
3465
3466 # Based on png2str() written by @gdkchan and improved by @yokrysty
3467 # Originally posted at https://github.com/rg3/youtube-dl/issues/9706
3468 def decode_png(png_data):
3469 # Reference: https://www.w3.org/TR/PNG/
3470 header = png_data[8:]
3471
3472 if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
3473 raise IOError('Not a valid PNG file.')
3474
3475 int_map = {1: '>B', 2: '>H', 4: '>I'}
3476 unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0]
3477
3478 chunks = []
3479
3480 while header:
3481 length = unpack_integer(header[:4])
3482 header = header[4:]
3483
3484 chunk_type = header[:4]
3485 header = header[4:]
3486
3487 chunk_data = header[:length]
3488 header = header[length:]
3489
3490 header = header[4:] # Skip CRC
3491
3492 chunks.append({
3493 'type': chunk_type,
3494 'length': length,
3495 'data': chunk_data
3496 })
3497
3498 ihdr = chunks[0]['data']
3499
3500 width = unpack_integer(ihdr[:4])
3501 height = unpack_integer(ihdr[4:8])
3502
3503 idat = b''
3504
3505 for chunk in chunks:
3506 if chunk['type'] == b'IDAT':
3507 idat += chunk['data']
3508
3509 if not idat:
3510 raise IOError('Unable to read PNG data.')
3511
3512 decompressed_data = bytearray(zlib.decompress(idat))
3513
3514 stride = width * 3
3515 pixels = []
3516
3517 def _get_pixel(idx):
3518 x = idx % stride
3519 y = idx // stride
3520 return pixels[y][x]
3521
3522 for y in range(height):
3523 basePos = y * (1 + stride)
3524 filter_type = decompressed_data[basePos]
3525
3526 current_row = []
3527
3528 pixels.append(current_row)
3529
3530 for x in range(stride):
3531 color = decompressed_data[1 + basePos + x]
3532 basex = y * stride + x
3533 left = 0
3534 up = 0
3535
3536 if x > 2:
3537 left = _get_pixel(basex - 3)
3538 if y > 0:
3539 up = _get_pixel(basex - stride)
3540
3541 if filter_type == 1: # Sub
3542 color = (color + left) & 0xff
3543 elif filter_type == 2: # Up
3544 color = (color + up) & 0xff
3545 elif filter_type == 3: # Average
3546 color = (color + ((left + up) >> 1)) & 0xff
3547 elif filter_type == 4: # Paeth
3548 a = left
3549 b = up
3550 c = 0
3551
3552 if x > 2 and y > 0:
3553 c = _get_pixel(basex - stride - 3)
3554
3555 p = a + b - c
3556
3557 pa = abs(p - a)
3558 pb = abs(p - b)
3559 pc = abs(p - c)
3560
3561 if pa <= pb and pa <= pc:
3562 color = (color + a) & 0xff
3563 elif pb <= pc:
3564 color = (color + b) & 0xff
3565 else:
3566 color = (color + c) & 0xff
3567
3568 current_row.append(color)
3569
3570 return width, height, pixels
3571
3572
3573 def write_xattr(path, key, value):
3574 # This mess below finds the best xattr tool for the job
3575 try:
3576 # try the pyxattr module...
3577 import xattr
3578
3579 if hasattr(xattr, 'set'): # pyxattr
3580 # Unicode arguments are not supported in python-pyxattr until
3581 # version 0.5.0
3582 # See https://github.com/rg3/youtube-dl/issues/5498
3583 pyxattr_required_version = '0.5.0'
3584 if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version):
3585 # TODO: fallback to CLI tools
3586 raise XAttrUnavailableError(
3587 'python-pyxattr is detected but is too old. '
3588 'youtube-dl requires %s or above while your version is %s. '
3589 'Falling back to other xattr implementations' % (
3590 pyxattr_required_version, xattr.__version__))
3591
3592 setxattr = xattr.set
3593 else: # xattr
3594 setxattr = xattr.setxattr
3595
3596 try:
3597 setxattr(path, key, value)
3598 except EnvironmentError as e:
3599 raise XAttrMetadataError(e.errno, e.strerror)
3600
3601 except ImportError:
3602 if compat_os_name == 'nt':
3603 # Write xattrs to NTFS Alternate Data Streams:
3604 # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
3605 assert ':' not in key
3606 assert os.path.exists(path)
3607
3608 ads_fn = path + ':' + key
3609 try:
3610 with open(ads_fn, 'wb') as f:
3611 f.write(value)
3612 except EnvironmentError as e:
3613 raise XAttrMetadataError(e.errno, e.strerror)
3614 else:
3615 user_has_setfattr = check_executable('setfattr', ['--version'])
3616 user_has_xattr = check_executable('xattr', ['-h'])
3617
3618 if user_has_setfattr or user_has_xattr:
3619
3620 value = value.decode('utf-8')
3621 if user_has_setfattr:
3622 executable = 'setfattr'
3623 opts = ['-n', key, '-v', value]
3624 elif user_has_xattr:
3625 executable = 'xattr'
3626 opts = ['-w', key, value]
3627
3628 cmd = ([encodeFilename(executable, True)] +
3629 [encodeArgument(o) for o in opts] +
3630 [encodeFilename(path, True)])
3631
3632 try:
3633 p = subprocess.Popen(
3634 cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
3635 except EnvironmentError as e:
3636 raise XAttrMetadataError(e.errno, e.strerror)
3637 stdout, stderr = p.communicate()
3638 stderr = stderr.decode('utf-8', 'replace')
3639 if p.returncode != 0:
3640 raise XAttrMetadataError(p.returncode, stderr)
3641
3642 else:
3643 # On Unix, and can't find pyxattr, setfattr, or xattr.
3644 if sys.platform.startswith('linux'):
3645 raise XAttrUnavailableError(
3646 "Couldn't find a tool to set the xattrs. "
3647 "Install either the python 'pyxattr' or 'xattr' "
3648 "modules, or the GNU 'attr' package "
3649 "(which contains the 'setfattr' tool).")
3650 else:
3651 raise XAttrUnavailableError(
3652 "Couldn't find a tool to set the xattrs. "
3653 "Install either the python 'xattr' module, "
3654 "or the 'xattr' binary.")