]> Raphaël G. Git Repositories - youtubedl/blob - youtube_dl/utils.py
debian/control: Update list of supported sites (and list one per line).
[youtubedl] / youtube_dl / utils.py
1 #!/usr/bin/env python
2 # coding: utf-8
3
4 from __future__ import unicode_literals
5
6 import base64
7 import binascii
8 import calendar
9 import codecs
10 import contextlib
11 import ctypes
12 import datetime
13 import email.utils
14 import errno
15 import functools
16 import gzip
17 import io
18 import itertools
19 import json
20 import locale
21 import math
22 import operator
23 import os
24 import pipes
25 import platform
26 import random
27 import re
28 import socket
29 import ssl
30 import subprocess
31 import sys
32 import tempfile
33 import traceback
34 import xml.etree.ElementTree
35 import zlib
36
37 from .compat import (
38 compat_HTMLParser,
39 compat_basestring,
40 compat_chr,
41 compat_etree_fromstring,
42 compat_html_entities,
43 compat_html_entities_html5,
44 compat_http_client,
45 compat_kwargs,
46 compat_os_name,
47 compat_parse_qs,
48 compat_shlex_quote,
49 compat_socket_create_connection,
50 compat_str,
51 compat_struct_pack,
52 compat_struct_unpack,
53 compat_urllib_error,
54 compat_urllib_parse,
55 compat_urllib_parse_urlencode,
56 compat_urllib_parse_urlparse,
57 compat_urllib_parse_unquote_plus,
58 compat_urllib_request,
59 compat_urlparse,
60 compat_xpath,
61 )
62
63 from .socks import (
64 ProxyType,
65 sockssocket,
66 )
67
68
69 def register_socks_protocols():
70 # "Register" SOCKS protocols
71 # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
72 # URLs with protocols not in urlparse.uses_netloc are not handled correctly
73 for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
74 if scheme not in compat_urlparse.uses_netloc:
75 compat_urlparse.uses_netloc.append(scheme)
76
77
78 # This is not clearly defined otherwise
79 compiled_regex_type = type(re.compile(''))
80
81 std_headers = {
82 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)',
83 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
84 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
85 'Accept-Encoding': 'gzip, deflate',
86 'Accept-Language': 'en-us,en;q=0.5',
87 }
88
89
90 USER_AGENTS = {
91 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
92 }
93
94
95 NO_DEFAULT = object()
96
97 ENGLISH_MONTH_NAMES = [
98 'January', 'February', 'March', 'April', 'May', 'June',
99 'July', 'August', 'September', 'October', 'November', 'December']
100
101 MONTH_NAMES = {
102 'en': ENGLISH_MONTH_NAMES,
103 'fr': [
104 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
105 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
106 }
107
108 KNOWN_EXTENSIONS = (
109 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
110 'flv', 'f4v', 'f4a', 'f4b',
111 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
112 'mkv', 'mka', 'mk3d',
113 'avi', 'divx',
114 'mov',
115 'asf', 'wmv', 'wma',
116 '3gp', '3g2',
117 'mp3',
118 'flac',
119 'ape',
120 'wav',
121 'f4f', 'f4m', 'm3u8', 'smil')
122
123 # needed for sanitizing filenames in restricted mode
124 ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
125 itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'],
126 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy')))
127
128 DATE_FORMATS = (
129 '%d %B %Y',
130 '%d %b %Y',
131 '%B %d %Y',
132 '%B %dst %Y',
133 '%B %dnd %Y',
134 '%B %dth %Y',
135 '%b %d %Y',
136 '%b %dst %Y',
137 '%b %dnd %Y',
138 '%b %dth %Y',
139 '%b %dst %Y %I:%M',
140 '%b %dnd %Y %I:%M',
141 '%b %dth %Y %I:%M',
142 '%Y %m %d',
143 '%Y-%m-%d',
144 '%Y/%m/%d',
145 '%Y/%m/%d %H:%M',
146 '%Y/%m/%d %H:%M:%S',
147 '%Y-%m-%d %H:%M',
148 '%Y-%m-%d %H:%M:%S',
149 '%Y-%m-%d %H:%M:%S.%f',
150 '%d.%m.%Y %H:%M',
151 '%d.%m.%Y %H.%M',
152 '%Y-%m-%dT%H:%M:%SZ',
153 '%Y-%m-%dT%H:%M:%S.%fZ',
154 '%Y-%m-%dT%H:%M:%S.%f0Z',
155 '%Y-%m-%dT%H:%M:%S',
156 '%Y-%m-%dT%H:%M:%S.%f',
157 '%Y-%m-%dT%H:%M',
158 '%b %d %Y at %H:%M',
159 '%b %d %Y at %H:%M:%S',
160 )
161
162 DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
163 DATE_FORMATS_DAY_FIRST.extend([
164 '%d-%m-%Y',
165 '%d.%m.%Y',
166 '%d.%m.%y',
167 '%d/%m/%Y',
168 '%d/%m/%y',
169 '%d/%m/%Y %H:%M:%S',
170 ])
171
172 DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
173 DATE_FORMATS_MONTH_FIRST.extend([
174 '%m-%d-%Y',
175 '%m.%d.%Y',
176 '%m/%d/%Y',
177 '%m/%d/%y',
178 '%m/%d/%Y %H:%M:%S',
179 ])
180
181 PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
182
183
184 def preferredencoding():
185 """Get preferred encoding.
186
187 Returns the best encoding scheme for the system, based on
188 locale.getpreferredencoding() and some further tweaks.
189 """
190 try:
191 pref = locale.getpreferredencoding()
192 'TEST'.encode(pref)
193 except Exception:
194 pref = 'UTF-8'
195
196 return pref
197
198
199 def write_json_file(obj, fn):
200 """ Encode obj as JSON and write it to fn, atomically if possible """
201
202 fn = encodeFilename(fn)
203 if sys.version_info < (3, 0) and sys.platform != 'win32':
204 encoding = get_filesystem_encoding()
205 # os.path.basename returns a bytes object, but NamedTemporaryFile
206 # will fail if the filename contains non ascii characters unless we
207 # use a unicode object
208 path_basename = lambda f: os.path.basename(fn).decode(encoding)
209 # the same for os.path.dirname
210 path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
211 else:
212 path_basename = os.path.basename
213 path_dirname = os.path.dirname
214
215 args = {
216 'suffix': '.tmp',
217 'prefix': path_basename(fn) + '.',
218 'dir': path_dirname(fn),
219 'delete': False,
220 }
221
222 # In Python 2.x, json.dump expects a bytestream.
223 # In Python 3.x, it writes to a character stream
224 if sys.version_info < (3, 0):
225 args['mode'] = 'wb'
226 else:
227 args.update({
228 'mode': 'w',
229 'encoding': 'utf-8',
230 })
231
232 tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
233
234 try:
235 with tf:
236 json.dump(obj, tf)
237 if sys.platform == 'win32':
238 # Need to remove existing file on Windows, else os.rename raises
239 # WindowsError or FileExistsError.
240 try:
241 os.unlink(fn)
242 except OSError:
243 pass
244 os.rename(tf.name, fn)
245 except Exception:
246 try:
247 os.remove(tf.name)
248 except OSError:
249 pass
250 raise
251
252
253 if sys.version_info >= (2, 7):
254 def find_xpath_attr(node, xpath, key, val=None):
255 """ Find the xpath xpath[@key=val] """
256 assert re.match(r'^[a-zA-Z_-]+$', key)
257 expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
258 return node.find(expr)
259 else:
260 def find_xpath_attr(node, xpath, key, val=None):
261 for f in node.findall(compat_xpath(xpath)):
262 if key not in f.attrib:
263 continue
264 if val is None or f.attrib.get(key) == val:
265 return f
266 return None
267
268 # On python2.6 the xml.etree.ElementTree.Element methods don't support
269 # the namespace parameter
270
271
272 def xpath_with_ns(path, ns_map):
273 components = [c.split(':') for c in path.split('/')]
274 replaced = []
275 for c in components:
276 if len(c) == 1:
277 replaced.append(c[0])
278 else:
279 ns, tag = c
280 replaced.append('{%s}%s' % (ns_map[ns], tag))
281 return '/'.join(replaced)
282
283
284 def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
285 def _find_xpath(xpath):
286 return node.find(compat_xpath(xpath))
287
288 if isinstance(xpath, (str, compat_str)):
289 n = _find_xpath(xpath)
290 else:
291 for xp in xpath:
292 n = _find_xpath(xp)
293 if n is not None:
294 break
295
296 if n is None:
297 if default is not NO_DEFAULT:
298 return default
299 elif fatal:
300 name = xpath if name is None else name
301 raise ExtractorError('Could not find XML element %s' % name)
302 else:
303 return None
304 return n
305
306
307 def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
308 n = xpath_element(node, xpath, name, fatal=fatal, default=default)
309 if n is None or n == default:
310 return n
311 if n.text is None:
312 if default is not NO_DEFAULT:
313 return default
314 elif fatal:
315 name = xpath if name is None else name
316 raise ExtractorError('Could not find XML element\'s text %s' % name)
317 else:
318 return None
319 return n.text
320
321
322 def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
323 n = find_xpath_attr(node, xpath, key)
324 if n is None:
325 if default is not NO_DEFAULT:
326 return default
327 elif fatal:
328 name = '%s[@%s]' % (xpath, key) if name is None else name
329 raise ExtractorError('Could not find XML attribute %s' % name)
330 else:
331 return None
332 return n.attrib[key]
333
334
335 def get_element_by_id(id, html):
336 """Return the content of the tag with the specified ID in the passed HTML document"""
337 return get_element_by_attribute('id', id, html)
338
339
340 def get_element_by_class(class_name, html):
341 """Return the content of the first tag with the specified class in the passed HTML document"""
342 retval = get_elements_by_class(class_name, html)
343 return retval[0] if retval else None
344
345
346 def get_element_by_attribute(attribute, value, html, escape_value=True):
347 retval = get_elements_by_attribute(attribute, value, html, escape_value)
348 return retval[0] if retval else None
349
350
351 def get_elements_by_class(class_name, html):
352 """Return the content of all tags with the specified class in the passed HTML document as a list"""
353 return get_elements_by_attribute(
354 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
355 html, escape_value=False)
356
357
358 def get_elements_by_attribute(attribute, value, html, escape_value=True):
359 """Return the content of the tag with the specified attribute in the passed HTML document"""
360
361 value = re.escape(value) if escape_value else value
362
363 retlist = []
364 for m in re.finditer(r'''(?xs)
365 <([a-zA-Z0-9:._-]+)
366 (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*?
367 \s+%s=['"]?%s['"]?
368 (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*?
369 \s*>
370 (?P<content>.*?)
371 </\1>
372 ''' % (re.escape(attribute), value), html):
373 res = m.group('content')
374
375 if res.startswith('"') or res.startswith("'"):
376 res = res[1:-1]
377
378 retlist.append(unescapeHTML(res))
379
380 return retlist
381
382
383 class HTMLAttributeParser(compat_HTMLParser):
384 """Trivial HTML parser to gather the attributes for a single element"""
385 def __init__(self):
386 self.attrs = {}
387 compat_HTMLParser.__init__(self)
388
389 def handle_starttag(self, tag, attrs):
390 self.attrs = dict(attrs)
391
392
393 def extract_attributes(html_element):
394 """Given a string for an HTML element such as
395 <el
396 a="foo" B="bar" c="&98;az" d=boz
397 empty= noval entity="&amp;"
398 sq='"' dq="'"
399 >
400 Decode and return a dictionary of attributes.
401 {
402 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
403 'empty': '', 'noval': None, 'entity': '&',
404 'sq': '"', 'dq': '\''
405 }.
406 NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions,
407 but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5.
408 """
409 parser = HTMLAttributeParser()
410 parser.feed(html_element)
411 parser.close()
412 return parser.attrs
413
414
415 def clean_html(html):
416 """Clean an HTML snippet into a readable string"""
417
418 if html is None: # Convenience for sanitizing descriptions etc.
419 return html
420
421 # Newline vs <br />
422 html = html.replace('\n', ' ')
423 html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html)
424 html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
425 # Strip html tags
426 html = re.sub('<.*?>', '', html)
427 # Replace html entities
428 html = unescapeHTML(html)
429 return html.strip()
430
431
432 def sanitize_open(filename, open_mode):
433 """Try to open the given filename, and slightly tweak it if this fails.
434
435 Attempts to open the given filename. If this fails, it tries to change
436 the filename slightly, step by step, until it's either able to open it
437 or it fails and raises a final exception, like the standard open()
438 function.
439
440 It returns the tuple (stream, definitive_file_name).
441 """
442 try:
443 if filename == '-':
444 if sys.platform == 'win32':
445 import msvcrt
446 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
447 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
448 stream = open(encodeFilename(filename), open_mode)
449 return (stream, filename)
450 except (IOError, OSError) as err:
451 if err.errno in (errno.EACCES,):
452 raise
453
454 # In case of error, try to remove win32 forbidden chars
455 alt_filename = sanitize_path(filename)
456 if alt_filename == filename:
457 raise
458 else:
459 # An exception here should be caught in the caller
460 stream = open(encodeFilename(alt_filename), open_mode)
461 return (stream, alt_filename)
462
463
464 def timeconvert(timestr):
465 """Convert RFC 2822 defined time string into system timestamp"""
466 timestamp = None
467 timetuple = email.utils.parsedate_tz(timestr)
468 if timetuple is not None:
469 timestamp = email.utils.mktime_tz(timetuple)
470 return timestamp
471
472
473 def sanitize_filename(s, restricted=False, is_id=False):
474 """Sanitizes a string so it could be used as part of a filename.
475 If restricted is set, use a stricter subset of allowed characters.
476 Set is_id if this is not an arbitrary string, but an ID that should be kept
477 if possible.
478 """
479 def replace_insane(char):
480 if restricted and char in ACCENT_CHARS:
481 return ACCENT_CHARS[char]
482 if char == '?' or ord(char) < 32 or ord(char) == 127:
483 return ''
484 elif char == '"':
485 return '' if restricted else '\''
486 elif char == ':':
487 return '_-' if restricted else ' -'
488 elif char in '\\/|*<>':
489 return '_'
490 if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
491 return '_'
492 if restricted and ord(char) > 127:
493 return '_'
494 return char
495
496 # Handle timestamps
497 s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
498 result = ''.join(map(replace_insane, s))
499 if not is_id:
500 while '__' in result:
501 result = result.replace('__', '_')
502 result = result.strip('_')
503 # Common case of "Foreign band name - English song title"
504 if restricted and result.startswith('-_'):
505 result = result[2:]
506 if result.startswith('-'):
507 result = '_' + result[len('-'):]
508 result = result.lstrip('.')
509 if not result:
510 result = '_'
511 return result
512
513
514 def sanitize_path(s):
515 """Sanitizes and normalizes path on Windows"""
516 if sys.platform != 'win32':
517 return s
518 drive_or_unc, _ = os.path.splitdrive(s)
519 if sys.version_info < (2, 7) and not drive_or_unc:
520 drive_or_unc, _ = os.path.splitunc(s)
521 norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
522 if drive_or_unc:
523 norm_path.pop(0)
524 sanitized_path = [
525 path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
526 for path_part in norm_path]
527 if drive_or_unc:
528 sanitized_path.insert(0, drive_or_unc + os.path.sep)
529 return os.path.join(*sanitized_path)
530
531
532 # Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of
533 # unwanted failures due to missing protocol
534 def sanitize_url(url):
535 return 'http:%s' % url if url.startswith('//') else url
536
537
538 def sanitized_Request(url, *args, **kwargs):
539 return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs)
540
541
542 def orderedSet(iterable):
543 """ Remove all duplicates from the input iterable """
544 res = []
545 for el in iterable:
546 if el not in res:
547 res.append(el)
548 return res
549
550
551 def _htmlentity_transform(entity_with_semicolon):
552 """Transforms an HTML entity to a character."""
553 entity = entity_with_semicolon[:-1]
554
555 # Known non-numeric HTML entity
556 if entity in compat_html_entities.name2codepoint:
557 return compat_chr(compat_html_entities.name2codepoint[entity])
558
559 # TODO: HTML5 allows entities without a semicolon. For example,
560 # '&Eacuteric' should be decoded as 'Éric'.
561 if entity_with_semicolon in compat_html_entities_html5:
562 return compat_html_entities_html5[entity_with_semicolon]
563
564 mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
565 if mobj is not None:
566 numstr = mobj.group(1)
567 if numstr.startswith('x'):
568 base = 16
569 numstr = '0%s' % numstr
570 else:
571 base = 10
572 # See https://github.com/rg3/youtube-dl/issues/7518
573 try:
574 return compat_chr(int(numstr, base))
575 except ValueError:
576 pass
577
578 # Unknown entity in name, return its literal representation
579 return '&%s;' % entity
580
581
582 def unescapeHTML(s):
583 if s is None:
584 return None
585 assert type(s) == compat_str
586
587 return re.sub(
588 r'&([^;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
589
590
591 def get_subprocess_encoding():
592 if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
593 # For subprocess calls, encode with locale encoding
594 # Refer to http://stackoverflow.com/a/9951851/35070
595 encoding = preferredencoding()
596 else:
597 encoding = sys.getfilesystemencoding()
598 if encoding is None:
599 encoding = 'utf-8'
600 return encoding
601
602
603 def encodeFilename(s, for_subprocess=False):
604 """
605 @param s The name of the file
606 """
607
608 assert type(s) == compat_str
609
610 # Python 3 has a Unicode API
611 if sys.version_info >= (3, 0):
612 return s
613
614 # Pass '' directly to use Unicode APIs on Windows 2000 and up
615 # (Detecting Windows NT 4 is tricky because 'major >= 4' would
616 # match Windows 9x series as well. Besides, NT 4 is obsolete.)
617 if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
618 return s
619
620 # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
621 if sys.platform.startswith('java'):
622 return s
623
624 return s.encode(get_subprocess_encoding(), 'ignore')
625
626
627 def decodeFilename(b, for_subprocess=False):
628
629 if sys.version_info >= (3, 0):
630 return b
631
632 if not isinstance(b, bytes):
633 return b
634
635 return b.decode(get_subprocess_encoding(), 'ignore')
636
637
638 def encodeArgument(s):
639 if not isinstance(s, compat_str):
640 # Legacy code that uses byte strings
641 # Uncomment the following line after fixing all post processors
642 # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
643 s = s.decode('ascii')
644 return encodeFilename(s, True)
645
646
647 def decodeArgument(b):
648 return decodeFilename(b, True)
649
650
651 def decodeOption(optval):
652 if optval is None:
653 return optval
654 if isinstance(optval, bytes):
655 optval = optval.decode(preferredencoding())
656
657 assert isinstance(optval, compat_str)
658 return optval
659
660
661 def formatSeconds(secs):
662 if secs > 3600:
663 return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60)
664 elif secs > 60:
665 return '%d:%02d' % (secs // 60, secs % 60)
666 else:
667 return '%d' % secs
668
669
670 def make_HTTPS_handler(params, **kwargs):
671 opts_no_check_certificate = params.get('nocheckcertificate', False)
672 if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9
673 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
674 if opts_no_check_certificate:
675 context.check_hostname = False
676 context.verify_mode = ssl.CERT_NONE
677 try:
678 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
679 except TypeError:
680 # Python 2.7.8
681 # (create_default_context present but HTTPSHandler has no context=)
682 pass
683
684 if sys.version_info < (3, 2):
685 return YoutubeDLHTTPSHandler(params, **kwargs)
686 else: # Python < 3.4
687 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
688 context.verify_mode = (ssl.CERT_NONE
689 if opts_no_check_certificate
690 else ssl.CERT_REQUIRED)
691 context.set_default_verify_paths()
692 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
693
694
695 def bug_reports_message():
696 if ytdl_is_updateable():
697 update_cmd = 'type youtube-dl -U to update'
698 else:
699 update_cmd = 'see https://yt-dl.org/update on how to update'
700 msg = '; please report this issue on https://yt-dl.org/bug .'
701 msg += ' Make sure you are using the latest version; %s.' % update_cmd
702 msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.'
703 return msg
704
705
706 class YoutubeDLError(Exception):
707 """Base exception for YoutubeDL errors."""
708 pass
709
710
711 class ExtractorError(YoutubeDLError):
712 """Error during info extraction."""
713
714 def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
715 """ tb, if given, is the original traceback (so that it can be printed out).
716 If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
717 """
718
719 if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
720 expected = True
721 if video_id is not None:
722 msg = video_id + ': ' + msg
723 if cause:
724 msg += ' (caused by %r)' % cause
725 if not expected:
726 msg += bug_reports_message()
727 super(ExtractorError, self).__init__(msg)
728
729 self.traceback = tb
730 self.exc_info = sys.exc_info() # preserve original exception
731 self.cause = cause
732 self.video_id = video_id
733
734 def format_traceback(self):
735 if self.traceback is None:
736 return None
737 return ''.join(traceback.format_tb(self.traceback))
738
739
740 class UnsupportedError(ExtractorError):
741 def __init__(self, url):
742 super(UnsupportedError, self).__init__(
743 'Unsupported URL: %s' % url, expected=True)
744 self.url = url
745
746
747 class RegexNotFoundError(ExtractorError):
748 """Error when a regex didn't match"""
749 pass
750
751
752 class GeoRestrictedError(ExtractorError):
753 """Geographic restriction Error exception.
754
755 This exception may be thrown when a video is not available from your
756 geographic location due to geographic restrictions imposed by a website.
757 """
758 def __init__(self, msg, countries=None):
759 super(GeoRestrictedError, self).__init__(msg, expected=True)
760 self.msg = msg
761 self.countries = countries
762
763
764 class DownloadError(YoutubeDLError):
765 """Download Error exception.
766
767 This exception may be thrown by FileDownloader objects if they are not
768 configured to continue on errors. They will contain the appropriate
769 error message.
770 """
771
772 def __init__(self, msg, exc_info=None):
773 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
774 super(DownloadError, self).__init__(msg)
775 self.exc_info = exc_info
776
777
778 class SameFileError(YoutubeDLError):
779 """Same File exception.
780
781 This exception will be thrown by FileDownloader objects if they detect
782 multiple files would have to be downloaded to the same file on disk.
783 """
784 pass
785
786
787 class PostProcessingError(YoutubeDLError):
788 """Post Processing exception.
789
790 This exception may be raised by PostProcessor's .run() method to
791 indicate an error in the postprocessing task.
792 """
793
794 def __init__(self, msg):
795 super(PostProcessingError, self).__init__(msg)
796 self.msg = msg
797
798
799 class MaxDownloadsReached(YoutubeDLError):
800 """ --max-downloads limit has been reached. """
801 pass
802
803
804 class UnavailableVideoError(YoutubeDLError):
805 """Unavailable Format exception.
806
807 This exception will be thrown when a video is requested
808 in a format that is not available for that video.
809 """
810 pass
811
812
813 class ContentTooShortError(YoutubeDLError):
814 """Content Too Short exception.
815
816 This exception may be raised by FileDownloader objects when a file they
817 download is too small for what the server announced first, indicating
818 the connection was probably interrupted.
819 """
820
821 def __init__(self, downloaded, expected):
822 super(ContentTooShortError, self).__init__(
823 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected)
824 )
825 # Both in bytes
826 self.downloaded = downloaded
827 self.expected = expected
828
829
830 class XAttrMetadataError(YoutubeDLError):
831 def __init__(self, code=None, msg='Unknown error'):
832 super(XAttrMetadataError, self).__init__(msg)
833 self.code = code
834 self.msg = msg
835
836 # Parsing code and msg
837 if (self.code in (errno.ENOSPC, errno.EDQUOT) or
838 'No space left' in self.msg or 'Disk quota excedded' in self.msg):
839 self.reason = 'NO_SPACE'
840 elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
841 self.reason = 'VALUE_TOO_LONG'
842 else:
843 self.reason = 'NOT_SUPPORTED'
844
845
846 class XAttrUnavailableError(YoutubeDLError):
847 pass
848
849
850 def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
851 # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
852 # expected HTTP responses to meet HTTP/1.0 or later (see also
853 # https://github.com/rg3/youtube-dl/issues/6727)
854 if sys.version_info < (3, 0):
855 kwargs[b'strict'] = True
856 hc = http_class(*args, **kwargs)
857 source_address = ydl_handler._params.get('source_address')
858 if source_address is not None:
859 sa = (source_address, 0)
860 if hasattr(hc, 'source_address'): # Python 2.7+
861 hc.source_address = sa
862 else: # Python 2.6
863 def _hc_connect(self, *args, **kwargs):
864 sock = compat_socket_create_connection(
865 (self.host, self.port), self.timeout, sa)
866 if is_https:
867 self.sock = ssl.wrap_socket(
868 sock, self.key_file, self.cert_file,
869 ssl_version=ssl.PROTOCOL_TLSv1)
870 else:
871 self.sock = sock
872 hc.connect = functools.partial(_hc_connect, hc)
873
874 return hc
875
876
877 def handle_youtubedl_headers(headers):
878 filtered_headers = headers
879
880 if 'Youtubedl-no-compression' in filtered_headers:
881 filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
882 del filtered_headers['Youtubedl-no-compression']
883
884 return filtered_headers
885
886
887 class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
888 """Handler for HTTP requests and responses.
889
890 This class, when installed with an OpenerDirector, automatically adds
891 the standard headers to every HTTP request and handles gzipped and
892 deflated responses from web servers. If compression is to be avoided in
893 a particular request, the original request in the program code only has
894 to include the HTTP header "Youtubedl-no-compression", which will be
895 removed before making the real request.
896
897 Part of this code was copied from:
898
899 http://techknack.net/python-urllib2-handlers/
900
901 Andrew Rowls, the author of that code, agreed to release it to the
902 public domain.
903 """
904
905 def __init__(self, params, *args, **kwargs):
906 compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
907 self._params = params
908
909 def http_open(self, req):
910 conn_class = compat_http_client.HTTPConnection
911
912 socks_proxy = req.headers.get('Ytdl-socks-proxy')
913 if socks_proxy:
914 conn_class = make_socks_conn_class(conn_class, socks_proxy)
915 del req.headers['Ytdl-socks-proxy']
916
917 return self.do_open(functools.partial(
918 _create_http_connection, self, conn_class, False),
919 req)
920
921 @staticmethod
922 def deflate(data):
923 try:
924 return zlib.decompress(data, -zlib.MAX_WBITS)
925 except zlib.error:
926 return zlib.decompress(data)
927
928 @staticmethod
929 def addinfourl_wrapper(stream, headers, url, code):
930 if hasattr(compat_urllib_request.addinfourl, 'getcode'):
931 return compat_urllib_request.addinfourl(stream, headers, url, code)
932 ret = compat_urllib_request.addinfourl(stream, headers, url)
933 ret.code = code
934 return ret
935
936 def http_request(self, req):
937 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
938 # always respected by websites, some tend to give out URLs with non percent-encoded
939 # non-ASCII characters (see telemb.py, ard.py [#3412])
940 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
941 # To work around aforementioned issue we will replace request's original URL with
942 # percent-encoded one
943 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
944 # the code of this workaround has been moved here from YoutubeDL.urlopen()
945 url = req.get_full_url()
946 url_escaped = escape_url(url)
947
948 # Substitute URL if any change after escaping
949 if url != url_escaped:
950 req = update_Request(req, url=url_escaped)
951
952 for h, v in std_headers.items():
953 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
954 # The dict keys are capitalized because of this bug by urllib
955 if h.capitalize() not in req.headers:
956 req.add_header(h, v)
957
958 req.headers = handle_youtubedl_headers(req.headers)
959
960 if sys.version_info < (2, 7) and '#' in req.get_full_url():
961 # Python 2.6 is brain-dead when it comes to fragments
962 req._Request__original = req._Request__original.partition('#')[0]
963 req._Request__r_type = req._Request__r_type.partition('#')[0]
964
965 return req
966
967 def http_response(self, req, resp):
968 old_resp = resp
969 # gzip
970 if resp.headers.get('Content-encoding', '') == 'gzip':
971 content = resp.read()
972 gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
973 try:
974 uncompressed = io.BytesIO(gz.read())
975 except IOError as original_ioerror:
976 # There may be junk add the end of the file
977 # See http://stackoverflow.com/q/4928560/35070 for details
978 for i in range(1, 1024):
979 try:
980 gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
981 uncompressed = io.BytesIO(gz.read())
982 except IOError:
983 continue
984 break
985 else:
986 raise original_ioerror
987 resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
988 resp.msg = old_resp.msg
989 del resp.headers['Content-encoding']
990 # deflate
991 if resp.headers.get('Content-encoding', '') == 'deflate':
992 gz = io.BytesIO(self.deflate(resp.read()))
993 resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
994 resp.msg = old_resp.msg
995 del resp.headers['Content-encoding']
996 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
997 # https://github.com/rg3/youtube-dl/issues/6457).
998 if 300 <= resp.code < 400:
999 location = resp.headers.get('Location')
1000 if location:
1001 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
1002 if sys.version_info >= (3, 0):
1003 location = location.encode('iso-8859-1').decode('utf-8')
1004 else:
1005 location = location.decode('utf-8')
1006 location_escaped = escape_url(location)
1007 if location != location_escaped:
1008 del resp.headers['Location']
1009 if sys.version_info < (3, 0):
1010 location_escaped = location_escaped.encode('utf-8')
1011 resp.headers['Location'] = location_escaped
1012 return resp
1013
1014 https_request = http_request
1015 https_response = http_response
1016
1017
1018 def make_socks_conn_class(base_class, socks_proxy):
1019 assert issubclass(base_class, (
1020 compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
1021
1022 url_components = compat_urlparse.urlparse(socks_proxy)
1023 if url_components.scheme.lower() == 'socks5':
1024 socks_type = ProxyType.SOCKS5
1025 elif url_components.scheme.lower() in ('socks', 'socks4'):
1026 socks_type = ProxyType.SOCKS4
1027 elif url_components.scheme.lower() == 'socks4a':
1028 socks_type = ProxyType.SOCKS4A
1029
1030 def unquote_if_non_empty(s):
1031 if not s:
1032 return s
1033 return compat_urllib_parse_unquote_plus(s)
1034
1035 proxy_args = (
1036 socks_type,
1037 url_components.hostname, url_components.port or 1080,
1038 True, # Remote DNS
1039 unquote_if_non_empty(url_components.username),
1040 unquote_if_non_empty(url_components.password),
1041 )
1042
1043 class SocksConnection(base_class):
1044 def connect(self):
1045 self.sock = sockssocket()
1046 self.sock.setproxy(*proxy_args)
1047 if type(self.timeout) in (int, float):
1048 self.sock.settimeout(self.timeout)
1049 self.sock.connect((self.host, self.port))
1050
1051 if isinstance(self, compat_http_client.HTTPSConnection):
1052 if hasattr(self, '_context'): # Python > 2.6
1053 self.sock = self._context.wrap_socket(
1054 self.sock, server_hostname=self.host)
1055 else:
1056 self.sock = ssl.wrap_socket(self.sock)
1057
1058 return SocksConnection
1059
1060
1061 class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
1062 def __init__(self, params, https_conn_class=None, *args, **kwargs):
1063 compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
1064 self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
1065 self._params = params
1066
1067 def https_open(self, req):
1068 kwargs = {}
1069 conn_class = self._https_conn_class
1070
1071 if hasattr(self, '_context'): # python > 2.6
1072 kwargs['context'] = self._context
1073 if hasattr(self, '_check_hostname'): # python 3.x
1074 kwargs['check_hostname'] = self._check_hostname
1075
1076 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1077 if socks_proxy:
1078 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1079 del req.headers['Ytdl-socks-proxy']
1080
1081 return self.do_open(functools.partial(
1082 _create_http_connection, self, conn_class, True),
1083 req, **kwargs)
1084
1085
1086 class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
1087 def __init__(self, cookiejar=None):
1088 compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
1089
1090 def http_response(self, request, response):
1091 # Python 2 will choke on next HTTP request in row if there are non-ASCII
1092 # characters in Set-Cookie HTTP header of last response (see
1093 # https://github.com/rg3/youtube-dl/issues/6769).
1094 # In order to at least prevent crashing we will percent encode Set-Cookie
1095 # header before HTTPCookieProcessor starts processing it.
1096 # if sys.version_info < (3, 0) and response.headers:
1097 # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
1098 # set_cookie = response.headers.get(set_cookie_header)
1099 # if set_cookie:
1100 # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
1101 # if set_cookie != set_cookie_escaped:
1102 # del response.headers[set_cookie_header]
1103 # response.headers[set_cookie_header] = set_cookie_escaped
1104 return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
1105
1106 https_request = compat_urllib_request.HTTPCookieProcessor.http_request
1107 https_response = http_response
1108
1109
1110 def extract_timezone(date_str):
1111 m = re.search(
1112 r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
1113 date_str)
1114 if not m:
1115 timezone = datetime.timedelta()
1116 else:
1117 date_str = date_str[:-len(m.group('tz'))]
1118 if not m.group('sign'):
1119 timezone = datetime.timedelta()
1120 else:
1121 sign = 1 if m.group('sign') == '+' else -1
1122 timezone = datetime.timedelta(
1123 hours=sign * int(m.group('hours')),
1124 minutes=sign * int(m.group('minutes')))
1125 return timezone, date_str
1126
1127
1128 def parse_iso8601(date_str, delimiter='T', timezone=None):
1129 """ Return a UNIX timestamp from the given date """
1130
1131 if date_str is None:
1132 return None
1133
1134 date_str = re.sub(r'\.[0-9]+', '', date_str)
1135
1136 if timezone is None:
1137 timezone, date_str = extract_timezone(date_str)
1138
1139 try:
1140 date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
1141 dt = datetime.datetime.strptime(date_str, date_format) - timezone
1142 return calendar.timegm(dt.timetuple())
1143 except ValueError:
1144 pass
1145
1146
1147 def date_formats(day_first=True):
1148 return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
1149
1150
1151 def unified_strdate(date_str, day_first=True):
1152 """Return a string with the date in the format YYYYMMDD"""
1153
1154 if date_str is None:
1155 return None
1156 upload_date = None
1157 # Replace commas
1158 date_str = date_str.replace(',', ' ')
1159 # Remove AM/PM + timezone
1160 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
1161 _, date_str = extract_timezone(date_str)
1162
1163 for expression in date_formats(day_first):
1164 try:
1165 upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
1166 except ValueError:
1167 pass
1168 if upload_date is None:
1169 timetuple = email.utils.parsedate_tz(date_str)
1170 if timetuple:
1171 try:
1172 upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
1173 except ValueError:
1174 pass
1175 if upload_date is not None:
1176 return compat_str(upload_date)
1177
1178
1179 def unified_timestamp(date_str, day_first=True):
1180 if date_str is None:
1181 return None
1182
1183 date_str = date_str.replace(',', ' ')
1184
1185 pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
1186 timezone, date_str = extract_timezone(date_str)
1187
1188 # Remove AM/PM + timezone
1189 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
1190
1191 for expression in date_formats(day_first):
1192 try:
1193 dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
1194 return calendar.timegm(dt.timetuple())
1195 except ValueError:
1196 pass
1197 timetuple = email.utils.parsedate_tz(date_str)
1198 if timetuple:
1199 return calendar.timegm(timetuple) + pm_delta * 3600
1200
1201
1202 def determine_ext(url, default_ext='unknown_video'):
1203 if url is None:
1204 return default_ext
1205 guess = url.partition('?')[0].rpartition('.')[2]
1206 if re.match(r'^[A-Za-z0-9]+$', guess):
1207 return guess
1208 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
1209 elif guess.rstrip('/') in KNOWN_EXTENSIONS:
1210 return guess.rstrip('/')
1211 else:
1212 return default_ext
1213
1214
1215 def subtitles_filename(filename, sub_lang, sub_format):
1216 return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format
1217
1218
1219 def date_from_str(date_str):
1220 """
1221 Return a datetime object from a string in the format YYYYMMDD or
1222 (now|today)[+-][0-9](day|week|month|year)(s)?"""
1223 today = datetime.date.today()
1224 if date_str in ('now', 'today'):
1225 return today
1226 if date_str == 'yesterday':
1227 return today - datetime.timedelta(days=1)
1228 match = re.match(r'(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str)
1229 if match is not None:
1230 sign = match.group('sign')
1231 time = int(match.group('time'))
1232 if sign == '-':
1233 time = -time
1234 unit = match.group('unit')
1235 # A bad approximation?
1236 if unit == 'month':
1237 unit = 'day'
1238 time *= 30
1239 elif unit == 'year':
1240 unit = 'day'
1241 time *= 365
1242 unit += 's'
1243 delta = datetime.timedelta(**{unit: time})
1244 return today + delta
1245 return datetime.datetime.strptime(date_str, '%Y%m%d').date()
1246
1247
1248 def hyphenate_date(date_str):
1249 """
1250 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
1251 match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
1252 if match is not None:
1253 return '-'.join(match.groups())
1254 else:
1255 return date_str
1256
1257
1258 class DateRange(object):
1259 """Represents a time interval between two dates"""
1260
1261 def __init__(self, start=None, end=None):
1262 """start and end must be strings in the format accepted by date"""
1263 if start is not None:
1264 self.start = date_from_str(start)
1265 else:
1266 self.start = datetime.datetime.min.date()
1267 if end is not None:
1268 self.end = date_from_str(end)
1269 else:
1270 self.end = datetime.datetime.max.date()
1271 if self.start > self.end:
1272 raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
1273
1274 @classmethod
1275 def day(cls, day):
1276 """Returns a range that only contains the given day"""
1277 return cls(day, day)
1278
1279 def __contains__(self, date):
1280 """Check if the date is in the range"""
1281 if not isinstance(date, datetime.date):
1282 date = date_from_str(date)
1283 return self.start <= date <= self.end
1284
1285 def __str__(self):
1286 return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
1287
1288
1289 def platform_name():
1290 """ Returns the platform name as a compat_str """
1291 res = platform.platform()
1292 if isinstance(res, bytes):
1293 res = res.decode(preferredencoding())
1294
1295 assert isinstance(res, compat_str)
1296 return res
1297
1298
1299 def _windows_write_string(s, out):
1300 """ Returns True if the string was written using special methods,
1301 False if it has yet to be written out."""
1302 # Adapted from http://stackoverflow.com/a/3259271/35070
1303
1304 import ctypes
1305 import ctypes.wintypes
1306
1307 WIN_OUTPUT_IDS = {
1308 1: -11,
1309 2: -12,
1310 }
1311
1312 try:
1313 fileno = out.fileno()
1314 except AttributeError:
1315 # If the output stream doesn't have a fileno, it's virtual
1316 return False
1317 except io.UnsupportedOperation:
1318 # Some strange Windows pseudo files?
1319 return False
1320 if fileno not in WIN_OUTPUT_IDS:
1321 return False
1322
1323 GetStdHandle = ctypes.WINFUNCTYPE(
1324 ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
1325 (b'GetStdHandle', ctypes.windll.kernel32))
1326 h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
1327
1328 WriteConsoleW = ctypes.WINFUNCTYPE(
1329 ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
1330 ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
1331 ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32))
1332 written = ctypes.wintypes.DWORD(0)
1333
1334 GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32))
1335 FILE_TYPE_CHAR = 0x0002
1336 FILE_TYPE_REMOTE = 0x8000
1337 GetConsoleMode = ctypes.WINFUNCTYPE(
1338 ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
1339 ctypes.POINTER(ctypes.wintypes.DWORD))(
1340 (b'GetConsoleMode', ctypes.windll.kernel32))
1341 INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
1342
1343 def not_a_console(handle):
1344 if handle == INVALID_HANDLE_VALUE or handle is None:
1345 return True
1346 return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or
1347 GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
1348
1349 if not_a_console(h):
1350 return False
1351
1352 def next_nonbmp_pos(s):
1353 try:
1354 return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
1355 except StopIteration:
1356 return len(s)
1357
1358 while s:
1359 count = min(next_nonbmp_pos(s), 1024)
1360
1361 ret = WriteConsoleW(
1362 h, s, count if count else 2, ctypes.byref(written), None)
1363 if ret == 0:
1364 raise OSError('Failed to write string')
1365 if not count: # We just wrote a non-BMP character
1366 assert written.value == 2
1367 s = s[1:]
1368 else:
1369 assert written.value > 0
1370 s = s[written.value:]
1371 return True
1372
1373
1374 def write_string(s, out=None, encoding=None):
1375 if out is None:
1376 out = sys.stderr
1377 assert type(s) == compat_str
1378
1379 if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
1380 if _windows_write_string(s, out):
1381 return
1382
1383 if ('b' in getattr(out, 'mode', '') or
1384 sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
1385 byt = s.encode(encoding or preferredencoding(), 'ignore')
1386 out.write(byt)
1387 elif hasattr(out, 'buffer'):
1388 enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
1389 byt = s.encode(enc, 'ignore')
1390 out.buffer.write(byt)
1391 else:
1392 out.write(s)
1393 out.flush()
1394
1395
1396 def bytes_to_intlist(bs):
1397 if not bs:
1398 return []
1399 if isinstance(bs[0], int): # Python 3
1400 return list(bs)
1401 else:
1402 return [ord(c) for c in bs]
1403
1404
1405 def intlist_to_bytes(xs):
1406 if not xs:
1407 return b''
1408 return compat_struct_pack('%dB' % len(xs), *xs)
1409
1410
1411 # Cross-platform file locking
1412 if sys.platform == 'win32':
1413 import ctypes.wintypes
1414 import msvcrt
1415
1416 class OVERLAPPED(ctypes.Structure):
1417 _fields_ = [
1418 ('Internal', ctypes.wintypes.LPVOID),
1419 ('InternalHigh', ctypes.wintypes.LPVOID),
1420 ('Offset', ctypes.wintypes.DWORD),
1421 ('OffsetHigh', ctypes.wintypes.DWORD),
1422 ('hEvent', ctypes.wintypes.HANDLE),
1423 ]
1424
1425 kernel32 = ctypes.windll.kernel32
1426 LockFileEx = kernel32.LockFileEx
1427 LockFileEx.argtypes = [
1428 ctypes.wintypes.HANDLE, # hFile
1429 ctypes.wintypes.DWORD, # dwFlags
1430 ctypes.wintypes.DWORD, # dwReserved
1431 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
1432 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
1433 ctypes.POINTER(OVERLAPPED) # Overlapped
1434 ]
1435 LockFileEx.restype = ctypes.wintypes.BOOL
1436 UnlockFileEx = kernel32.UnlockFileEx
1437 UnlockFileEx.argtypes = [
1438 ctypes.wintypes.HANDLE, # hFile
1439 ctypes.wintypes.DWORD, # dwReserved
1440 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
1441 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
1442 ctypes.POINTER(OVERLAPPED) # Overlapped
1443 ]
1444 UnlockFileEx.restype = ctypes.wintypes.BOOL
1445 whole_low = 0xffffffff
1446 whole_high = 0x7fffffff
1447
1448 def _lock_file(f, exclusive):
1449 overlapped = OVERLAPPED()
1450 overlapped.Offset = 0
1451 overlapped.OffsetHigh = 0
1452 overlapped.hEvent = 0
1453 f._lock_file_overlapped_p = ctypes.pointer(overlapped)
1454 handle = msvcrt.get_osfhandle(f.fileno())
1455 if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
1456 whole_low, whole_high, f._lock_file_overlapped_p):
1457 raise OSError('Locking file failed: %r' % ctypes.FormatError())
1458
1459 def _unlock_file(f):
1460 assert f._lock_file_overlapped_p
1461 handle = msvcrt.get_osfhandle(f.fileno())
1462 if not UnlockFileEx(handle, 0,
1463 whole_low, whole_high, f._lock_file_overlapped_p):
1464 raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
1465
1466 else:
1467 # Some platforms, such as Jython, is missing fcntl
1468 try:
1469 import fcntl
1470
1471 def _lock_file(f, exclusive):
1472 fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
1473
1474 def _unlock_file(f):
1475 fcntl.flock(f, fcntl.LOCK_UN)
1476 except ImportError:
1477 UNSUPPORTED_MSG = 'file locking is not supported on this platform'
1478
1479 def _lock_file(f, exclusive):
1480 raise IOError(UNSUPPORTED_MSG)
1481
1482 def _unlock_file(f):
1483 raise IOError(UNSUPPORTED_MSG)
1484
1485
1486 class locked_file(object):
1487 def __init__(self, filename, mode, encoding=None):
1488 assert mode in ['r', 'a', 'w']
1489 self.f = io.open(filename, mode, encoding=encoding)
1490 self.mode = mode
1491
1492 def __enter__(self):
1493 exclusive = self.mode != 'r'
1494 try:
1495 _lock_file(self.f, exclusive)
1496 except IOError:
1497 self.f.close()
1498 raise
1499 return self
1500
1501 def __exit__(self, etype, value, traceback):
1502 try:
1503 _unlock_file(self.f)
1504 finally:
1505 self.f.close()
1506
1507 def __iter__(self):
1508 return iter(self.f)
1509
1510 def write(self, *args):
1511 return self.f.write(*args)
1512
1513 def read(self, *args):
1514 return self.f.read(*args)
1515
1516
1517 def get_filesystem_encoding():
1518 encoding = sys.getfilesystemencoding()
1519 return encoding if encoding is not None else 'utf-8'
1520
1521
1522 def shell_quote(args):
1523 quoted_args = []
1524 encoding = get_filesystem_encoding()
1525 for a in args:
1526 if isinstance(a, bytes):
1527 # We may get a filename encoded with 'encodeFilename'
1528 a = a.decode(encoding)
1529 quoted_args.append(pipes.quote(a))
1530 return ' '.join(quoted_args)
1531
1532
1533 def smuggle_url(url, data):
1534 """ Pass additional data in a URL for internal use. """
1535
1536 url, idata = unsmuggle_url(url, {})
1537 data.update(idata)
1538 sdata = compat_urllib_parse_urlencode(
1539 {'__youtubedl_smuggle': json.dumps(data)})
1540 return url + '#' + sdata
1541
1542
1543 def unsmuggle_url(smug_url, default=None):
1544 if '#__youtubedl_smuggle' not in smug_url:
1545 return smug_url, default
1546 url, _, sdata = smug_url.rpartition('#')
1547 jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
1548 data = json.loads(jsond)
1549 return url, data
1550
1551
1552 def format_bytes(bytes):
1553 if bytes is None:
1554 return 'N/A'
1555 if type(bytes) is str:
1556 bytes = float(bytes)
1557 if bytes == 0.0:
1558 exponent = 0
1559 else:
1560 exponent = int(math.log(bytes, 1024.0))
1561 suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
1562 converted = float(bytes) / float(1024 ** exponent)
1563 return '%.2f%s' % (converted, suffix)
1564
1565
1566 def lookup_unit_table(unit_table, s):
1567 units_re = '|'.join(re.escape(u) for u in unit_table)
1568 m = re.match(
1569 r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s)
1570 if not m:
1571 return None
1572 num_str = m.group('num').replace(',', '.')
1573 mult = unit_table[m.group('unit')]
1574 return int(float(num_str) * mult)
1575
1576
1577 def parse_filesize(s):
1578 if s is None:
1579 return None
1580
1581 # The lower-case forms are of course incorrect and unofficial,
1582 # but we support those too
1583 _UNIT_TABLE = {
1584 'B': 1,
1585 'b': 1,
1586 'bytes': 1,
1587 'KiB': 1024,
1588 'KB': 1000,
1589 'kB': 1024,
1590 'Kb': 1000,
1591 'kb': 1000,
1592 'kilobytes': 1000,
1593 'kibibytes': 1024,
1594 'MiB': 1024 ** 2,
1595 'MB': 1000 ** 2,
1596 'mB': 1024 ** 2,
1597 'Mb': 1000 ** 2,
1598 'mb': 1000 ** 2,
1599 'megabytes': 1000 ** 2,
1600 'mebibytes': 1024 ** 2,
1601 'GiB': 1024 ** 3,
1602 'GB': 1000 ** 3,
1603 'gB': 1024 ** 3,
1604 'Gb': 1000 ** 3,
1605 'gb': 1000 ** 3,
1606 'gigabytes': 1000 ** 3,
1607 'gibibytes': 1024 ** 3,
1608 'TiB': 1024 ** 4,
1609 'TB': 1000 ** 4,
1610 'tB': 1024 ** 4,
1611 'Tb': 1000 ** 4,
1612 'tb': 1000 ** 4,
1613 'terabytes': 1000 ** 4,
1614 'tebibytes': 1024 ** 4,
1615 'PiB': 1024 ** 5,
1616 'PB': 1000 ** 5,
1617 'pB': 1024 ** 5,
1618 'Pb': 1000 ** 5,
1619 'pb': 1000 ** 5,
1620 'petabytes': 1000 ** 5,
1621 'pebibytes': 1024 ** 5,
1622 'EiB': 1024 ** 6,
1623 'EB': 1000 ** 6,
1624 'eB': 1024 ** 6,
1625 'Eb': 1000 ** 6,
1626 'eb': 1000 ** 6,
1627 'exabytes': 1000 ** 6,
1628 'exbibytes': 1024 ** 6,
1629 'ZiB': 1024 ** 7,
1630 'ZB': 1000 ** 7,
1631 'zB': 1024 ** 7,
1632 'Zb': 1000 ** 7,
1633 'zb': 1000 ** 7,
1634 'zettabytes': 1000 ** 7,
1635 'zebibytes': 1024 ** 7,
1636 'YiB': 1024 ** 8,
1637 'YB': 1000 ** 8,
1638 'yB': 1024 ** 8,
1639 'Yb': 1000 ** 8,
1640 'yb': 1000 ** 8,
1641 'yottabytes': 1000 ** 8,
1642 'yobibytes': 1024 ** 8,
1643 }
1644
1645 return lookup_unit_table(_UNIT_TABLE, s)
1646
1647
1648 def parse_count(s):
1649 if s is None:
1650 return None
1651
1652 s = s.strip()
1653
1654 if re.match(r'^[\d,.]+$', s):
1655 return str_to_int(s)
1656
1657 _UNIT_TABLE = {
1658 'k': 1000,
1659 'K': 1000,
1660 'm': 1000 ** 2,
1661 'M': 1000 ** 2,
1662 'kk': 1000 ** 2,
1663 'KK': 1000 ** 2,
1664 }
1665
1666 return lookup_unit_table(_UNIT_TABLE, s)
1667
1668
1669 def month_by_name(name, lang='en'):
1670 """ Return the number of a month by (locale-independently) English name """
1671
1672 month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
1673
1674 try:
1675 return month_names.index(name) + 1
1676 except ValueError:
1677 return None
1678
1679
1680 def month_by_abbreviation(abbrev):
1681 """ Return the number of a month by (locale-independently) English
1682 abbreviations """
1683
1684 try:
1685 return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
1686 except ValueError:
1687 return None
1688
1689
1690 def fix_xml_ampersands(xml_str):
1691 """Replace all the '&' by '&amp;' in XML"""
1692 return re.sub(
1693 r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
1694 '&amp;',
1695 xml_str)
1696
1697
1698 def setproctitle(title):
1699 assert isinstance(title, compat_str)
1700
1701 # ctypes in Jython is not complete
1702 # http://bugs.jython.org/issue2148
1703 if sys.platform.startswith('java'):
1704 return
1705
1706 try:
1707 libc = ctypes.cdll.LoadLibrary('libc.so.6')
1708 except OSError:
1709 return
1710 except TypeError:
1711 # LoadLibrary in Windows Python 2.7.13 only expects
1712 # a bytestring, but since unicode_literals turns
1713 # every string into a unicode string, it fails.
1714 return
1715 title_bytes = title.encode('utf-8')
1716 buf = ctypes.create_string_buffer(len(title_bytes))
1717 buf.value = title_bytes
1718 try:
1719 libc.prctl(15, buf, 0, 0, 0)
1720 except AttributeError:
1721 return # Strange libc, just skip this
1722
1723
1724 def remove_start(s, start):
1725 return s[len(start):] if s is not None and s.startswith(start) else s
1726
1727
1728 def remove_end(s, end):
1729 return s[:-len(end)] if s is not None and s.endswith(end) else s
1730
1731
1732 def remove_quotes(s):
1733 if s is None or len(s) < 2:
1734 return s
1735 for quote in ('"', "'", ):
1736 if s[0] == quote and s[-1] == quote:
1737 return s[1:-1]
1738 return s
1739
1740
1741 def url_basename(url):
1742 path = compat_urlparse.urlparse(url).path
1743 return path.strip('/').split('/')[-1]
1744
1745
1746 def base_url(url):
1747 return re.match(r'https?://[^?#&]+/', url).group()
1748
1749
1750 def urljoin(base, path):
1751 if isinstance(path, bytes):
1752 path = path.decode('utf-8')
1753 if not isinstance(path, compat_str) or not path:
1754 return None
1755 if re.match(r'^(?:https?:)?//', path):
1756 return path
1757 if isinstance(base, bytes):
1758 base = base.decode('utf-8')
1759 if not isinstance(base, compat_str) or not re.match(
1760 r'^(?:https?:)?//', base):
1761 return None
1762 return compat_urlparse.urljoin(base, path)
1763
1764
1765 class HEADRequest(compat_urllib_request.Request):
1766 def get_method(self):
1767 return 'HEAD'
1768
1769
1770 class PUTRequest(compat_urllib_request.Request):
1771 def get_method(self):
1772 return 'PUT'
1773
1774
1775 def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
1776 if get_attr:
1777 if v is not None:
1778 v = getattr(v, get_attr, None)
1779 if v == '':
1780 v = None
1781 if v is None:
1782 return default
1783 try:
1784 return int(v) * invscale // scale
1785 except ValueError:
1786 return default
1787
1788
1789 def str_or_none(v, default=None):
1790 return default if v is None else compat_str(v)
1791
1792
1793 def str_to_int(int_str):
1794 """ A more relaxed version of int_or_none """
1795 if int_str is None:
1796 return None
1797 int_str = re.sub(r'[,\.\+]', '', int_str)
1798 return int(int_str)
1799
1800
1801 def float_or_none(v, scale=1, invscale=1, default=None):
1802 if v is None:
1803 return default
1804 try:
1805 return float(v) * invscale / scale
1806 except ValueError:
1807 return default
1808
1809
1810 def strip_or_none(v):
1811 return None if v is None else v.strip()
1812
1813
1814 def parse_duration(s):
1815 if not isinstance(s, compat_basestring):
1816 return None
1817
1818 s = s.strip()
1819
1820 days, hours, mins, secs, ms = [None] * 5
1821 m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s)
1822 if m:
1823 days, hours, mins, secs, ms = m.groups()
1824 else:
1825 m = re.match(
1826 r'''(?ix)(?:P?T)?
1827 (?:
1828 (?P<days>[0-9]+)\s*d(?:ays?)?\s*
1829 )?
1830 (?:
1831 (?P<hours>[0-9]+)\s*h(?:ours?)?\s*
1832 )?
1833 (?:
1834 (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s*
1835 )?
1836 (?:
1837 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
1838 )?Z?$''', s)
1839 if m:
1840 days, hours, mins, secs, ms = m.groups()
1841 else:
1842 m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
1843 if m:
1844 hours, mins = m.groups()
1845 else:
1846 return None
1847
1848 duration = 0
1849 if secs:
1850 duration += float(secs)
1851 if mins:
1852 duration += float(mins) * 60
1853 if hours:
1854 duration += float(hours) * 60 * 60
1855 if days:
1856 duration += float(days) * 24 * 60 * 60
1857 if ms:
1858 duration += float(ms)
1859 return duration
1860
1861
1862 def prepend_extension(filename, ext, expected_real_ext=None):
1863 name, real_ext = os.path.splitext(filename)
1864 return (
1865 '{0}.{1}{2}'.format(name, ext, real_ext)
1866 if not expected_real_ext or real_ext[1:] == expected_real_ext
1867 else '{0}.{1}'.format(filename, ext))
1868
1869
1870 def replace_extension(filename, ext, expected_real_ext=None):
1871 name, real_ext = os.path.splitext(filename)
1872 return '{0}.{1}'.format(
1873 name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
1874 ext)
1875
1876
1877 def check_executable(exe, args=[]):
1878 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
1879 args can be a list of arguments for a short output (like -version) """
1880 try:
1881 subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
1882 except OSError:
1883 return False
1884 return exe
1885
1886
1887 def get_exe_version(exe, args=['--version'],
1888 version_re=None, unrecognized='present'):
1889 """ Returns the version of the specified executable,
1890 or False if the executable is not present """
1891 try:
1892 # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
1893 # SIGTTOU if youtube-dl is run in the background.
1894 # See https://github.com/rg3/youtube-dl/issues/955#issuecomment-209789656
1895 out, _ = subprocess.Popen(
1896 [encodeArgument(exe)] + args,
1897 stdin=subprocess.PIPE,
1898 stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
1899 except OSError:
1900 return False
1901 if isinstance(out, bytes): # Python 2.x
1902 out = out.decode('ascii', 'ignore')
1903 return detect_exe_version(out, version_re, unrecognized)
1904
1905
1906 def detect_exe_version(output, version_re=None, unrecognized='present'):
1907 assert isinstance(output, compat_str)
1908 if version_re is None:
1909 version_re = r'version\s+([-0-9._a-zA-Z]+)'
1910 m = re.search(version_re, output)
1911 if m:
1912 return m.group(1)
1913 else:
1914 return unrecognized
1915
1916
1917 class PagedList(object):
1918 def __len__(self):
1919 # This is only useful for tests
1920 return len(self.getslice())
1921
1922
1923 class OnDemandPagedList(PagedList):
1924 def __init__(self, pagefunc, pagesize, use_cache=False):
1925 self._pagefunc = pagefunc
1926 self._pagesize = pagesize
1927 self._use_cache = use_cache
1928 if use_cache:
1929 self._cache = {}
1930
1931 def getslice(self, start=0, end=None):
1932 res = []
1933 for pagenum in itertools.count(start // self._pagesize):
1934 firstid = pagenum * self._pagesize
1935 nextfirstid = pagenum * self._pagesize + self._pagesize
1936 if start >= nextfirstid:
1937 continue
1938
1939 page_results = None
1940 if self._use_cache:
1941 page_results = self._cache.get(pagenum)
1942 if page_results is None:
1943 page_results = list(self._pagefunc(pagenum))
1944 if self._use_cache:
1945 self._cache[pagenum] = page_results
1946
1947 startv = (
1948 start % self._pagesize
1949 if firstid <= start < nextfirstid
1950 else 0)
1951
1952 endv = (
1953 ((end - 1) % self._pagesize) + 1
1954 if (end is not None and firstid <= end <= nextfirstid)
1955 else None)
1956
1957 if startv != 0 or endv is not None:
1958 page_results = page_results[startv:endv]
1959 res.extend(page_results)
1960
1961 # A little optimization - if current page is not "full", ie. does
1962 # not contain page_size videos then we can assume that this page
1963 # is the last one - there are no more ids on further pages -
1964 # i.e. no need to query again.
1965 if len(page_results) + startv < self._pagesize:
1966 break
1967
1968 # If we got the whole page, but the next page is not interesting,
1969 # break out early as well
1970 if end == nextfirstid:
1971 break
1972 return res
1973
1974
1975 class InAdvancePagedList(PagedList):
1976 def __init__(self, pagefunc, pagecount, pagesize):
1977 self._pagefunc = pagefunc
1978 self._pagecount = pagecount
1979 self._pagesize = pagesize
1980
1981 def getslice(self, start=0, end=None):
1982 res = []
1983 start_page = start // self._pagesize
1984 end_page = (
1985 self._pagecount if end is None else (end // self._pagesize + 1))
1986 skip_elems = start - start_page * self._pagesize
1987 only_more = None if end is None else end - start
1988 for pagenum in range(start_page, end_page):
1989 page = list(self._pagefunc(pagenum))
1990 if skip_elems:
1991 page = page[skip_elems:]
1992 skip_elems = None
1993 if only_more is not None:
1994 if len(page) < only_more:
1995 only_more -= len(page)
1996 else:
1997 page = page[:only_more]
1998 res.extend(page)
1999 break
2000 res.extend(page)
2001 return res
2002
2003
2004 def uppercase_escape(s):
2005 unicode_escape = codecs.getdecoder('unicode_escape')
2006 return re.sub(
2007 r'\\U[0-9a-fA-F]{8}',
2008 lambda m: unicode_escape(m.group(0))[0],
2009 s)
2010
2011
2012 def lowercase_escape(s):
2013 unicode_escape = codecs.getdecoder('unicode_escape')
2014 return re.sub(
2015 r'\\u[0-9a-fA-F]{4}',
2016 lambda m: unicode_escape(m.group(0))[0],
2017 s)
2018
2019
2020 def escape_rfc3986(s):
2021 """Escape non-ASCII characters as suggested by RFC 3986"""
2022 if sys.version_info < (3, 0) and isinstance(s, compat_str):
2023 s = s.encode('utf-8')
2024 return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
2025
2026
2027 def escape_url(url):
2028 """Escape URL as suggested by RFC 3986"""
2029 url_parsed = compat_urllib_parse_urlparse(url)
2030 return url_parsed._replace(
2031 netloc=url_parsed.netloc.encode('idna').decode('ascii'),
2032 path=escape_rfc3986(url_parsed.path),
2033 params=escape_rfc3986(url_parsed.params),
2034 query=escape_rfc3986(url_parsed.query),
2035 fragment=escape_rfc3986(url_parsed.fragment)
2036 ).geturl()
2037
2038
2039 def read_batch_urls(batch_fd):
2040 def fixup(url):
2041 if not isinstance(url, compat_str):
2042 url = url.decode('utf-8', 'replace')
2043 BOM_UTF8 = '\xef\xbb\xbf'
2044 if url.startswith(BOM_UTF8):
2045 url = url[len(BOM_UTF8):]
2046 url = url.strip()
2047 if url.startswith(('#', ';', ']')):
2048 return False
2049 return url
2050
2051 with contextlib.closing(batch_fd) as fd:
2052 return [url for url in map(fixup, fd) if url]
2053
2054
2055 def urlencode_postdata(*args, **kargs):
2056 return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
2057
2058
2059 def update_url_query(url, query):
2060 if not query:
2061 return url
2062 parsed_url = compat_urlparse.urlparse(url)
2063 qs = compat_parse_qs(parsed_url.query)
2064 qs.update(query)
2065 return compat_urlparse.urlunparse(parsed_url._replace(
2066 query=compat_urllib_parse_urlencode(qs, True)))
2067
2068
2069 def update_Request(req, url=None, data=None, headers={}, query={}):
2070 req_headers = req.headers.copy()
2071 req_headers.update(headers)
2072 req_data = data or req.data
2073 req_url = update_url_query(url or req.get_full_url(), query)
2074 req_get_method = req.get_method()
2075 if req_get_method == 'HEAD':
2076 req_type = HEADRequest
2077 elif req_get_method == 'PUT':
2078 req_type = PUTRequest
2079 else:
2080 req_type = compat_urllib_request.Request
2081 new_req = req_type(
2082 req_url, data=req_data, headers=req_headers,
2083 origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
2084 if hasattr(req, 'timeout'):
2085 new_req.timeout = req.timeout
2086 return new_req
2087
2088
2089 def dict_get(d, key_or_keys, default=None, skip_false_values=True):
2090 if isinstance(key_or_keys, (list, tuple)):
2091 for key in key_or_keys:
2092 if key not in d or d[key] is None or skip_false_values and not d[key]:
2093 continue
2094 return d[key]
2095 return default
2096 return d.get(key_or_keys, default)
2097
2098
2099 def try_get(src, getter, expected_type=None):
2100 try:
2101 v = getter(src)
2102 except (AttributeError, KeyError, TypeError, IndexError):
2103 pass
2104 else:
2105 if expected_type is None or isinstance(v, expected_type):
2106 return v
2107
2108
2109 def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
2110 return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
2111
2112
2113 US_RATINGS = {
2114 'G': 0,
2115 'PG': 10,
2116 'PG-13': 13,
2117 'R': 16,
2118 'NC': 18,
2119 }
2120
2121
2122 TV_PARENTAL_GUIDELINES = {
2123 'TV-Y': 0,
2124 'TV-Y7': 7,
2125 'TV-G': 0,
2126 'TV-PG': 0,
2127 'TV-14': 14,
2128 'TV-MA': 17,
2129 }
2130
2131
2132 def parse_age_limit(s):
2133 if type(s) == int:
2134 return s if 0 <= s <= 21 else None
2135 if not isinstance(s, compat_basestring):
2136 return None
2137 m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
2138 if m:
2139 return int(m.group('age'))
2140 if s in US_RATINGS:
2141 return US_RATINGS[s]
2142 return TV_PARENTAL_GUIDELINES.get(s)
2143
2144
2145 def strip_jsonp(code):
2146 return re.sub(
2147 r'(?s)^[a-zA-Z0-9_.$]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code)
2148
2149
2150 def js_to_json(code):
2151 COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*'
2152 SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE)
2153 INTEGER_TABLE = (
2154 (r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16),
2155 (r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8),
2156 )
2157
2158 def fix_kv(m):
2159 v = m.group(0)
2160 if v in ('true', 'false', 'null'):
2161 return v
2162 elif v.startswith('/*') or v.startswith('//') or v == ',':
2163 return ""
2164
2165 if v[0] in ("'", '"'):
2166 v = re.sub(r'(?s)\\.|"', lambda m: {
2167 '"': '\\"',
2168 "\\'": "'",
2169 '\\\n': '',
2170 '\\x': '\\u00',
2171 }.get(m.group(0), m.group(0)), v[1:-1])
2172
2173 for regex, base in INTEGER_TABLE:
2174 im = re.match(regex, v)
2175 if im:
2176 i = int(im.group(1), base)
2177 return '"%d":' % i if v.endswith(':') else '%d' % i
2178
2179 return '"%s"' % v
2180
2181 return re.sub(r'''(?sx)
2182 "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
2183 '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
2184 {comment}|,(?={skip}[\]}}])|
2185 [a-zA-Z_][.a-zA-Z_0-9]*|
2186 \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
2187 [0-9]+(?={skip}:)
2188 '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code)
2189
2190
2191 def qualities(quality_ids):
2192 """ Get a numeric quality value out of a list of possible values """
2193 def q(qid):
2194 try:
2195 return quality_ids.index(qid)
2196 except ValueError:
2197 return -1
2198 return q
2199
2200
2201 DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s'
2202
2203
2204 def limit_length(s, length):
2205 """ Add ellipses to overly long strings """
2206 if s is None:
2207 return None
2208 ELLIPSES = '...'
2209 if len(s) > length:
2210 return s[:length - len(ELLIPSES)] + ELLIPSES
2211 return s
2212
2213
2214 def version_tuple(v):
2215 return tuple(int(e) for e in re.split(r'[-.]', v))
2216
2217
2218 def is_outdated_version(version, limit, assume_new=True):
2219 if not version:
2220 return not assume_new
2221 try:
2222 return version_tuple(version) < version_tuple(limit)
2223 except ValueError:
2224 return not assume_new
2225
2226
2227 def ytdl_is_updateable():
2228 """ Returns if youtube-dl can be updated with -U """
2229 from zipimport import zipimporter
2230
2231 return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen')
2232
2233
2234 def args_to_str(args):
2235 # Get a short string representation for a subprocess command
2236 return ' '.join(compat_shlex_quote(a) for a in args)
2237
2238
2239 def error_to_compat_str(err):
2240 err_str = str(err)
2241 # On python 2 error byte string must be decoded with proper
2242 # encoding rather than ascii
2243 if sys.version_info[0] < 3:
2244 err_str = err_str.decode(preferredencoding())
2245 return err_str
2246
2247
2248 def mimetype2ext(mt):
2249 if mt is None:
2250 return None
2251
2252 ext = {
2253 'audio/mp4': 'm4a',
2254 # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
2255 # it's the most popular one
2256 'audio/mpeg': 'mp3',
2257 }.get(mt)
2258 if ext is not None:
2259 return ext
2260
2261 _, _, res = mt.rpartition('/')
2262 res = res.split(';')[0].strip().lower()
2263
2264 return {
2265 '3gpp': '3gp',
2266 'smptett+xml': 'tt',
2267 'srt': 'srt',
2268 'ttaf+xml': 'dfxp',
2269 'ttml+xml': 'ttml',
2270 'vtt': 'vtt',
2271 'x-flv': 'flv',
2272 'x-mp4-fragmented': 'mp4',
2273 'x-ms-wmv': 'wmv',
2274 'mpegurl': 'm3u8',
2275 'x-mpegurl': 'm3u8',
2276 'vnd.apple.mpegurl': 'm3u8',
2277 'dash+xml': 'mpd',
2278 'f4m': 'f4m',
2279 'f4m+xml': 'f4m',
2280 'hds+xml': 'f4m',
2281 'vnd.ms-sstr+xml': 'ism',
2282 'quicktime': 'mov',
2283 }.get(res, res)
2284
2285
2286 def parse_codecs(codecs_str):
2287 # http://tools.ietf.org/html/rfc6381
2288 if not codecs_str:
2289 return {}
2290 splited_codecs = list(filter(None, map(
2291 lambda str: str.strip(), codecs_str.strip().strip(',').split(','))))
2292 vcodec, acodec = None, None
2293 for full_codec in splited_codecs:
2294 codec = full_codec.split('.')[0]
2295 if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v'):
2296 if not vcodec:
2297 vcodec = full_codec
2298 elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3'):
2299 if not acodec:
2300 acodec = full_codec
2301 else:
2302 write_string('WARNING: Unknown codec %s' % full_codec, sys.stderr)
2303 if not vcodec and not acodec:
2304 if len(splited_codecs) == 2:
2305 return {
2306 'vcodec': vcodec,
2307 'acodec': acodec,
2308 }
2309 elif len(splited_codecs) == 1:
2310 return {
2311 'vcodec': 'none',
2312 'acodec': vcodec,
2313 }
2314 else:
2315 return {
2316 'vcodec': vcodec or 'none',
2317 'acodec': acodec or 'none',
2318 }
2319 return {}
2320
2321
2322 def urlhandle_detect_ext(url_handle):
2323 getheader = url_handle.headers.get
2324
2325 cd = getheader('Content-Disposition')
2326 if cd:
2327 m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
2328 if m:
2329 e = determine_ext(m.group('filename'), default_ext=None)
2330 if e:
2331 return e
2332
2333 return mimetype2ext(getheader('Content-Type'))
2334
2335
2336 def encode_data_uri(data, mime_type):
2337 return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
2338
2339
2340 def age_restricted(content_limit, age_limit):
2341 """ Returns True iff the content should be blocked """
2342
2343 if age_limit is None: # No limit set
2344 return False
2345 if content_limit is None:
2346 return False # Content available for everyone
2347 return age_limit < content_limit
2348
2349
2350 def is_html(first_bytes):
2351 """ Detect whether a file contains HTML by examining its first bytes. """
2352
2353 BOMS = [
2354 (b'\xef\xbb\xbf', 'utf-8'),
2355 (b'\x00\x00\xfe\xff', 'utf-32-be'),
2356 (b'\xff\xfe\x00\x00', 'utf-32-le'),
2357 (b'\xff\xfe', 'utf-16-le'),
2358 (b'\xfe\xff', 'utf-16-be'),
2359 ]
2360 for bom, enc in BOMS:
2361 if first_bytes.startswith(bom):
2362 s = first_bytes[len(bom):].decode(enc, 'replace')
2363 break
2364 else:
2365 s = first_bytes.decode('utf-8', 'replace')
2366
2367 return re.match(r'^\s*<', s)
2368
2369
2370 def determine_protocol(info_dict):
2371 protocol = info_dict.get('protocol')
2372 if protocol is not None:
2373 return protocol
2374
2375 url = info_dict['url']
2376 if url.startswith('rtmp'):
2377 return 'rtmp'
2378 elif url.startswith('mms'):
2379 return 'mms'
2380 elif url.startswith('rtsp'):
2381 return 'rtsp'
2382
2383 ext = determine_ext(url)
2384 if ext == 'm3u8':
2385 return 'm3u8'
2386 elif ext == 'f4m':
2387 return 'f4m'
2388
2389 return compat_urllib_parse_urlparse(url).scheme
2390
2391
2392 def render_table(header_row, data):
2393 """ Render a list of rows, each as a list of values """
2394 table = [header_row] + data
2395 max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)]
2396 format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s'
2397 return '\n'.join(format_str % tuple(row) for row in table)
2398
2399
2400 def _match_one(filter_part, dct):
2401 COMPARISON_OPERATORS = {
2402 '<': operator.lt,
2403 '<=': operator.le,
2404 '>': operator.gt,
2405 '>=': operator.ge,
2406 '=': operator.eq,
2407 '!=': operator.ne,
2408 }
2409 operator_rex = re.compile(r'''(?x)\s*
2410 (?P<key>[a-z_]+)
2411 \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
2412 (?:
2413 (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)|
2414 (?P<quote>["\'])(?P<quotedstrval>(?:\\.|(?!(?P=quote)|\\).)+?)(?P=quote)|
2415 (?P<strval>(?![0-9.])[a-z0-9A-Z]*)
2416 )
2417 \s*$
2418 ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
2419 m = operator_rex.search(filter_part)
2420 if m:
2421 op = COMPARISON_OPERATORS[m.group('op')]
2422 actual_value = dct.get(m.group('key'))
2423 if (m.group('quotedstrval') is not None or
2424 m.group('strval') is not None or
2425 # If the original field is a string and matching comparisonvalue is
2426 # a number we should respect the origin of the original field
2427 # and process comparison value as a string (see
2428 # https://github.com/rg3/youtube-dl/issues/11082).
2429 actual_value is not None and m.group('intval') is not None and
2430 isinstance(actual_value, compat_str)):
2431 if m.group('op') not in ('=', '!='):
2432 raise ValueError(
2433 'Operator %s does not support string values!' % m.group('op'))
2434 comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval')
2435 quote = m.group('quote')
2436 if quote is not None:
2437 comparison_value = comparison_value.replace(r'\%s' % quote, quote)
2438 else:
2439 try:
2440 comparison_value = int(m.group('intval'))
2441 except ValueError:
2442 comparison_value = parse_filesize(m.group('intval'))
2443 if comparison_value is None:
2444 comparison_value = parse_filesize(m.group('intval') + 'B')
2445 if comparison_value is None:
2446 raise ValueError(
2447 'Invalid integer value %r in filter part %r' % (
2448 m.group('intval'), filter_part))
2449 if actual_value is None:
2450 return m.group('none_inclusive')
2451 return op(actual_value, comparison_value)
2452
2453 UNARY_OPERATORS = {
2454 '': lambda v: v is not None,
2455 '!': lambda v: v is None,
2456 }
2457 operator_rex = re.compile(r'''(?x)\s*
2458 (?P<op>%s)\s*(?P<key>[a-z_]+)
2459 \s*$
2460 ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
2461 m = operator_rex.search(filter_part)
2462 if m:
2463 op = UNARY_OPERATORS[m.group('op')]
2464 actual_value = dct.get(m.group('key'))
2465 return op(actual_value)
2466
2467 raise ValueError('Invalid filter part %r' % filter_part)
2468
2469
2470 def match_str(filter_str, dct):
2471 """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """
2472
2473 return all(
2474 _match_one(filter_part, dct) for filter_part in filter_str.split('&'))
2475
2476
2477 def match_filter_func(filter_str):
2478 def _match_func(info_dict):
2479 if match_str(filter_str, info_dict):
2480 return None
2481 else:
2482 video_title = info_dict.get('title', info_dict.get('id', 'video'))
2483 return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
2484 return _match_func
2485
2486
2487 def parse_dfxp_time_expr(time_expr):
2488 if not time_expr:
2489 return
2490
2491 mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
2492 if mobj:
2493 return float(mobj.group('time_offset'))
2494
2495 mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
2496 if mobj:
2497 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
2498
2499
2500 def srt_subtitles_timecode(seconds):
2501 return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000)
2502
2503
2504 def dfxp2srt(dfxp_data):
2505 _x = functools.partial(xpath_with_ns, ns_map={
2506 'ttml': 'http://www.w3.org/ns/ttml',
2507 'ttaf1': 'http://www.w3.org/2006/10/ttaf1',
2508 'ttaf1_0604': 'http://www.w3.org/2006/04/ttaf1',
2509 })
2510
2511 class TTMLPElementParser(object):
2512 out = ''
2513
2514 def start(self, tag, attrib):
2515 if tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'):
2516 self.out += '\n'
2517
2518 def end(self, tag):
2519 pass
2520
2521 def data(self, data):
2522 self.out += data
2523
2524 def close(self):
2525 return self.out.strip()
2526
2527 def parse_node(node):
2528 target = TTMLPElementParser()
2529 parser = xml.etree.ElementTree.XMLParser(target=target)
2530 parser.feed(xml.etree.ElementTree.tostring(node))
2531 return parser.close()
2532
2533 dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8'))
2534 out = []
2535 paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall(_x('.//ttaf1_0604:p')) or dfxp.findall('.//p')
2536
2537 if not paras:
2538 raise ValueError('Invalid dfxp/TTML subtitle')
2539
2540 for para, index in zip(paras, itertools.count(1)):
2541 begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
2542 end_time = parse_dfxp_time_expr(para.attrib.get('end'))
2543 dur = parse_dfxp_time_expr(para.attrib.get('dur'))
2544 if begin_time is None:
2545 continue
2546 if not end_time:
2547 if not dur:
2548 continue
2549 end_time = begin_time + dur
2550 out.append('%d\n%s --> %s\n%s\n\n' % (
2551 index,
2552 srt_subtitles_timecode(begin_time),
2553 srt_subtitles_timecode(end_time),
2554 parse_node(para)))
2555
2556 return ''.join(out)
2557
2558
2559 def cli_option(params, command_option, param):
2560 param = params.get(param)
2561 if param:
2562 param = compat_str(param)
2563 return [command_option, param] if param is not None else []
2564
2565
2566 def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
2567 param = params.get(param)
2568 assert isinstance(param, bool)
2569 if separator:
2570 return [command_option + separator + (true_value if param else false_value)]
2571 return [command_option, true_value if param else false_value]
2572
2573
2574 def cli_valueless_option(params, command_option, param, expected_value=True):
2575 param = params.get(param)
2576 return [command_option] if param == expected_value else []
2577
2578
2579 def cli_configuration_args(params, param, default=[]):
2580 ex_args = params.get(param)
2581 if ex_args is None:
2582 return default
2583 assert isinstance(ex_args, list)
2584 return ex_args
2585
2586
2587 class ISO639Utils(object):
2588 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
2589 _lang_map = {
2590 'aa': 'aar',
2591 'ab': 'abk',
2592 'ae': 'ave',
2593 'af': 'afr',
2594 'ak': 'aka',
2595 'am': 'amh',
2596 'an': 'arg',
2597 'ar': 'ara',
2598 'as': 'asm',
2599 'av': 'ava',
2600 'ay': 'aym',
2601 'az': 'aze',
2602 'ba': 'bak',
2603 'be': 'bel',
2604 'bg': 'bul',
2605 'bh': 'bih',
2606 'bi': 'bis',
2607 'bm': 'bam',
2608 'bn': 'ben',
2609 'bo': 'bod',
2610 'br': 'bre',
2611 'bs': 'bos',
2612 'ca': 'cat',
2613 'ce': 'che',
2614 'ch': 'cha',
2615 'co': 'cos',
2616 'cr': 'cre',
2617 'cs': 'ces',
2618 'cu': 'chu',
2619 'cv': 'chv',
2620 'cy': 'cym',
2621 'da': 'dan',
2622 'de': 'deu',
2623 'dv': 'div',
2624 'dz': 'dzo',
2625 'ee': 'ewe',
2626 'el': 'ell',
2627 'en': 'eng',
2628 'eo': 'epo',
2629 'es': 'spa',
2630 'et': 'est',
2631 'eu': 'eus',
2632 'fa': 'fas',
2633 'ff': 'ful',
2634 'fi': 'fin',
2635 'fj': 'fij',
2636 'fo': 'fao',
2637 'fr': 'fra',
2638 'fy': 'fry',
2639 'ga': 'gle',
2640 'gd': 'gla',
2641 'gl': 'glg',
2642 'gn': 'grn',
2643 'gu': 'guj',
2644 'gv': 'glv',
2645 'ha': 'hau',
2646 'he': 'heb',
2647 'hi': 'hin',
2648 'ho': 'hmo',
2649 'hr': 'hrv',
2650 'ht': 'hat',
2651 'hu': 'hun',
2652 'hy': 'hye',
2653 'hz': 'her',
2654 'ia': 'ina',
2655 'id': 'ind',
2656 'ie': 'ile',
2657 'ig': 'ibo',
2658 'ii': 'iii',
2659 'ik': 'ipk',
2660 'io': 'ido',
2661 'is': 'isl',
2662 'it': 'ita',
2663 'iu': 'iku',
2664 'ja': 'jpn',
2665 'jv': 'jav',
2666 'ka': 'kat',
2667 'kg': 'kon',
2668 'ki': 'kik',
2669 'kj': 'kua',
2670 'kk': 'kaz',
2671 'kl': 'kal',
2672 'km': 'khm',
2673 'kn': 'kan',
2674 'ko': 'kor',
2675 'kr': 'kau',
2676 'ks': 'kas',
2677 'ku': 'kur',
2678 'kv': 'kom',
2679 'kw': 'cor',
2680 'ky': 'kir',
2681 'la': 'lat',
2682 'lb': 'ltz',
2683 'lg': 'lug',
2684 'li': 'lim',
2685 'ln': 'lin',
2686 'lo': 'lao',
2687 'lt': 'lit',
2688 'lu': 'lub',
2689 'lv': 'lav',
2690 'mg': 'mlg',
2691 'mh': 'mah',
2692 'mi': 'mri',
2693 'mk': 'mkd',
2694 'ml': 'mal',
2695 'mn': 'mon',
2696 'mr': 'mar',
2697 'ms': 'msa',
2698 'mt': 'mlt',
2699 'my': 'mya',
2700 'na': 'nau',
2701 'nb': 'nob',
2702 'nd': 'nde',
2703 'ne': 'nep',
2704 'ng': 'ndo',
2705 'nl': 'nld',
2706 'nn': 'nno',
2707 'no': 'nor',
2708 'nr': 'nbl',
2709 'nv': 'nav',
2710 'ny': 'nya',
2711 'oc': 'oci',
2712 'oj': 'oji',
2713 'om': 'orm',
2714 'or': 'ori',
2715 'os': 'oss',
2716 'pa': 'pan',
2717 'pi': 'pli',
2718 'pl': 'pol',
2719 'ps': 'pus',
2720 'pt': 'por',
2721 'qu': 'que',
2722 'rm': 'roh',
2723 'rn': 'run',
2724 'ro': 'ron',
2725 'ru': 'rus',
2726 'rw': 'kin',
2727 'sa': 'san',
2728 'sc': 'srd',
2729 'sd': 'snd',
2730 'se': 'sme',
2731 'sg': 'sag',
2732 'si': 'sin',
2733 'sk': 'slk',
2734 'sl': 'slv',
2735 'sm': 'smo',
2736 'sn': 'sna',
2737 'so': 'som',
2738 'sq': 'sqi',
2739 'sr': 'srp',
2740 'ss': 'ssw',
2741 'st': 'sot',
2742 'su': 'sun',
2743 'sv': 'swe',
2744 'sw': 'swa',
2745 'ta': 'tam',
2746 'te': 'tel',
2747 'tg': 'tgk',
2748 'th': 'tha',
2749 'ti': 'tir',
2750 'tk': 'tuk',
2751 'tl': 'tgl',
2752 'tn': 'tsn',
2753 'to': 'ton',
2754 'tr': 'tur',
2755 'ts': 'tso',
2756 'tt': 'tat',
2757 'tw': 'twi',
2758 'ty': 'tah',
2759 'ug': 'uig',
2760 'uk': 'ukr',
2761 'ur': 'urd',
2762 'uz': 'uzb',
2763 've': 'ven',
2764 'vi': 'vie',
2765 'vo': 'vol',
2766 'wa': 'wln',
2767 'wo': 'wol',
2768 'xh': 'xho',
2769 'yi': 'yid',
2770 'yo': 'yor',
2771 'za': 'zha',
2772 'zh': 'zho',
2773 'zu': 'zul',
2774 }
2775
2776 @classmethod
2777 def short2long(cls, code):
2778 """Convert language code from ISO 639-1 to ISO 639-2/T"""
2779 return cls._lang_map.get(code[:2])
2780
2781 @classmethod
2782 def long2short(cls, code):
2783 """Convert language code from ISO 639-2/T to ISO 639-1"""
2784 for short_name, long_name in cls._lang_map.items():
2785 if long_name == code:
2786 return short_name
2787
2788
2789 class ISO3166Utils(object):
2790 # From http://data.okfn.org/data/core/country-list
2791 _country_map = {
2792 'AF': 'Afghanistan',
2793 'AX': 'Åland Islands',
2794 'AL': 'Albania',
2795 'DZ': 'Algeria',
2796 'AS': 'American Samoa',
2797 'AD': 'Andorra',
2798 'AO': 'Angola',
2799 'AI': 'Anguilla',
2800 'AQ': 'Antarctica',
2801 'AG': 'Antigua and Barbuda',
2802 'AR': 'Argentina',
2803 'AM': 'Armenia',
2804 'AW': 'Aruba',
2805 'AU': 'Australia',
2806 'AT': 'Austria',
2807 'AZ': 'Azerbaijan',
2808 'BS': 'Bahamas',
2809 'BH': 'Bahrain',
2810 'BD': 'Bangladesh',
2811 'BB': 'Barbados',
2812 'BY': 'Belarus',
2813 'BE': 'Belgium',
2814 'BZ': 'Belize',
2815 'BJ': 'Benin',
2816 'BM': 'Bermuda',
2817 'BT': 'Bhutan',
2818 'BO': 'Bolivia, Plurinational State of',
2819 'BQ': 'Bonaire, Sint Eustatius and Saba',
2820 'BA': 'Bosnia and Herzegovina',
2821 'BW': 'Botswana',
2822 'BV': 'Bouvet Island',
2823 'BR': 'Brazil',
2824 'IO': 'British Indian Ocean Territory',
2825 'BN': 'Brunei Darussalam',
2826 'BG': 'Bulgaria',
2827 'BF': 'Burkina Faso',
2828 'BI': 'Burundi',
2829 'KH': 'Cambodia',
2830 'CM': 'Cameroon',
2831 'CA': 'Canada',
2832 'CV': 'Cape Verde',
2833 'KY': 'Cayman Islands',
2834 'CF': 'Central African Republic',
2835 'TD': 'Chad',
2836 'CL': 'Chile',
2837 'CN': 'China',
2838 'CX': 'Christmas Island',
2839 'CC': 'Cocos (Keeling) Islands',
2840 'CO': 'Colombia',
2841 'KM': 'Comoros',
2842 'CG': 'Congo',
2843 'CD': 'Congo, the Democratic Republic of the',
2844 'CK': 'Cook Islands',
2845 'CR': 'Costa Rica',
2846 'CI': 'Côte d\'Ivoire',
2847 'HR': 'Croatia',
2848 'CU': 'Cuba',
2849 'CW': 'Curaçao',
2850 'CY': 'Cyprus',
2851 'CZ': 'Czech Republic',
2852 'DK': 'Denmark',
2853 'DJ': 'Djibouti',
2854 'DM': 'Dominica',
2855 'DO': 'Dominican Republic',
2856 'EC': 'Ecuador',
2857 'EG': 'Egypt',
2858 'SV': 'El Salvador',
2859 'GQ': 'Equatorial Guinea',
2860 'ER': 'Eritrea',
2861 'EE': 'Estonia',
2862 'ET': 'Ethiopia',
2863 'FK': 'Falkland Islands (Malvinas)',
2864 'FO': 'Faroe Islands',
2865 'FJ': 'Fiji',
2866 'FI': 'Finland',
2867 'FR': 'France',
2868 'GF': 'French Guiana',
2869 'PF': 'French Polynesia',
2870 'TF': 'French Southern Territories',
2871 'GA': 'Gabon',
2872 'GM': 'Gambia',
2873 'GE': 'Georgia',
2874 'DE': 'Germany',
2875 'GH': 'Ghana',
2876 'GI': 'Gibraltar',
2877 'GR': 'Greece',
2878 'GL': 'Greenland',
2879 'GD': 'Grenada',
2880 'GP': 'Guadeloupe',
2881 'GU': 'Guam',
2882 'GT': 'Guatemala',
2883 'GG': 'Guernsey',
2884 'GN': 'Guinea',
2885 'GW': 'Guinea-Bissau',
2886 'GY': 'Guyana',
2887 'HT': 'Haiti',
2888 'HM': 'Heard Island and McDonald Islands',
2889 'VA': 'Holy See (Vatican City State)',
2890 'HN': 'Honduras',
2891 'HK': 'Hong Kong',
2892 'HU': 'Hungary',
2893 'IS': 'Iceland',
2894 'IN': 'India',
2895 'ID': 'Indonesia',
2896 'IR': 'Iran, Islamic Republic of',
2897 'IQ': 'Iraq',
2898 'IE': 'Ireland',
2899 'IM': 'Isle of Man',
2900 'IL': 'Israel',
2901 'IT': 'Italy',
2902 'JM': 'Jamaica',
2903 'JP': 'Japan',
2904 'JE': 'Jersey',
2905 'JO': 'Jordan',
2906 'KZ': 'Kazakhstan',
2907 'KE': 'Kenya',
2908 'KI': 'Kiribati',
2909 'KP': 'Korea, Democratic People\'s Republic of',
2910 'KR': 'Korea, Republic of',
2911 'KW': 'Kuwait',
2912 'KG': 'Kyrgyzstan',
2913 'LA': 'Lao People\'s Democratic Republic',
2914 'LV': 'Latvia',
2915 'LB': 'Lebanon',
2916 'LS': 'Lesotho',
2917 'LR': 'Liberia',
2918 'LY': 'Libya',
2919 'LI': 'Liechtenstein',
2920 'LT': 'Lithuania',
2921 'LU': 'Luxembourg',
2922 'MO': 'Macao',
2923 'MK': 'Macedonia, the Former Yugoslav Republic of',
2924 'MG': 'Madagascar',
2925 'MW': 'Malawi',
2926 'MY': 'Malaysia',
2927 'MV': 'Maldives',
2928 'ML': 'Mali',
2929 'MT': 'Malta',
2930 'MH': 'Marshall Islands',
2931 'MQ': 'Martinique',
2932 'MR': 'Mauritania',
2933 'MU': 'Mauritius',
2934 'YT': 'Mayotte',
2935 'MX': 'Mexico',
2936 'FM': 'Micronesia, Federated States of',
2937 'MD': 'Moldova, Republic of',
2938 'MC': 'Monaco',
2939 'MN': 'Mongolia',
2940 'ME': 'Montenegro',
2941 'MS': 'Montserrat',
2942 'MA': 'Morocco',
2943 'MZ': 'Mozambique',
2944 'MM': 'Myanmar',
2945 'NA': 'Namibia',
2946 'NR': 'Nauru',
2947 'NP': 'Nepal',
2948 'NL': 'Netherlands',
2949 'NC': 'New Caledonia',
2950 'NZ': 'New Zealand',
2951 'NI': 'Nicaragua',
2952 'NE': 'Niger',
2953 'NG': 'Nigeria',
2954 'NU': 'Niue',
2955 'NF': 'Norfolk Island',
2956 'MP': 'Northern Mariana Islands',
2957 'NO': 'Norway',
2958 'OM': 'Oman',
2959 'PK': 'Pakistan',
2960 'PW': 'Palau',
2961 'PS': 'Palestine, State of',
2962 'PA': 'Panama',
2963 'PG': 'Papua New Guinea',
2964 'PY': 'Paraguay',
2965 'PE': 'Peru',
2966 'PH': 'Philippines',
2967 'PN': 'Pitcairn',
2968 'PL': 'Poland',
2969 'PT': 'Portugal',
2970 'PR': 'Puerto Rico',
2971 'QA': 'Qatar',
2972 'RE': 'Réunion',
2973 'RO': 'Romania',
2974 'RU': 'Russian Federation',
2975 'RW': 'Rwanda',
2976 'BL': 'Saint Barthélemy',
2977 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
2978 'KN': 'Saint Kitts and Nevis',
2979 'LC': 'Saint Lucia',
2980 'MF': 'Saint Martin (French part)',
2981 'PM': 'Saint Pierre and Miquelon',
2982 'VC': 'Saint Vincent and the Grenadines',
2983 'WS': 'Samoa',
2984 'SM': 'San Marino',
2985 'ST': 'Sao Tome and Principe',
2986 'SA': 'Saudi Arabia',
2987 'SN': 'Senegal',
2988 'RS': 'Serbia',
2989 'SC': 'Seychelles',
2990 'SL': 'Sierra Leone',
2991 'SG': 'Singapore',
2992 'SX': 'Sint Maarten (Dutch part)',
2993 'SK': 'Slovakia',
2994 'SI': 'Slovenia',
2995 'SB': 'Solomon Islands',
2996 'SO': 'Somalia',
2997 'ZA': 'South Africa',
2998 'GS': 'South Georgia and the South Sandwich Islands',
2999 'SS': 'South Sudan',
3000 'ES': 'Spain',
3001 'LK': 'Sri Lanka',
3002 'SD': 'Sudan',
3003 'SR': 'Suriname',
3004 'SJ': 'Svalbard and Jan Mayen',
3005 'SZ': 'Swaziland',
3006 'SE': 'Sweden',
3007 'CH': 'Switzerland',
3008 'SY': 'Syrian Arab Republic',
3009 'TW': 'Taiwan, Province of China',
3010 'TJ': 'Tajikistan',
3011 'TZ': 'Tanzania, United Republic of',
3012 'TH': 'Thailand',
3013 'TL': 'Timor-Leste',
3014 'TG': 'Togo',
3015 'TK': 'Tokelau',
3016 'TO': 'Tonga',
3017 'TT': 'Trinidad and Tobago',
3018 'TN': 'Tunisia',
3019 'TR': 'Turkey',
3020 'TM': 'Turkmenistan',
3021 'TC': 'Turks and Caicos Islands',
3022 'TV': 'Tuvalu',
3023 'UG': 'Uganda',
3024 'UA': 'Ukraine',
3025 'AE': 'United Arab Emirates',
3026 'GB': 'United Kingdom',
3027 'US': 'United States',
3028 'UM': 'United States Minor Outlying Islands',
3029 'UY': 'Uruguay',
3030 'UZ': 'Uzbekistan',
3031 'VU': 'Vanuatu',
3032 'VE': 'Venezuela, Bolivarian Republic of',
3033 'VN': 'Viet Nam',
3034 'VG': 'Virgin Islands, British',
3035 'VI': 'Virgin Islands, U.S.',
3036 'WF': 'Wallis and Futuna',
3037 'EH': 'Western Sahara',
3038 'YE': 'Yemen',
3039 'ZM': 'Zambia',
3040 'ZW': 'Zimbabwe',
3041 }
3042
3043 @classmethod
3044 def short2full(cls, code):
3045 """Convert an ISO 3166-2 country code to the corresponding full name"""
3046 return cls._country_map.get(code.upper())
3047
3048
3049 class GeoUtils(object):
3050 # Major IPv4 address blocks per country
3051 _country_ip_map = {
3052 'AD': '85.94.160.0/19',
3053 'AE': '94.200.0.0/13',
3054 'AF': '149.54.0.0/17',
3055 'AG': '209.59.64.0/18',
3056 'AI': '204.14.248.0/21',
3057 'AL': '46.99.0.0/16',
3058 'AM': '46.70.0.0/15',
3059 'AO': '105.168.0.0/13',
3060 'AP': '159.117.192.0/21',
3061 'AR': '181.0.0.0/12',
3062 'AS': '202.70.112.0/20',
3063 'AT': '84.112.0.0/13',
3064 'AU': '1.128.0.0/11',
3065 'AW': '181.41.0.0/18',
3066 'AZ': '5.191.0.0/16',
3067 'BA': '31.176.128.0/17',
3068 'BB': '65.48.128.0/17',
3069 'BD': '114.130.0.0/16',
3070 'BE': '57.0.0.0/8',
3071 'BF': '129.45.128.0/17',
3072 'BG': '95.42.0.0/15',
3073 'BH': '37.131.0.0/17',
3074 'BI': '154.117.192.0/18',
3075 'BJ': '137.255.0.0/16',
3076 'BL': '192.131.134.0/24',
3077 'BM': '196.12.64.0/18',
3078 'BN': '156.31.0.0/16',
3079 'BO': '161.56.0.0/16',
3080 'BQ': '161.0.80.0/20',
3081 'BR': '152.240.0.0/12',
3082 'BS': '24.51.64.0/18',
3083 'BT': '119.2.96.0/19',
3084 'BW': '168.167.0.0/16',
3085 'BY': '178.120.0.0/13',
3086 'BZ': '179.42.192.0/18',
3087 'CA': '99.224.0.0/11',
3088 'CD': '41.243.0.0/16',
3089 'CF': '196.32.200.0/21',
3090 'CG': '197.214.128.0/17',
3091 'CH': '85.0.0.0/13',
3092 'CI': '154.232.0.0/14',
3093 'CK': '202.65.32.0/19',
3094 'CL': '152.172.0.0/14',
3095 'CM': '165.210.0.0/15',
3096 'CN': '36.128.0.0/10',
3097 'CO': '181.240.0.0/12',
3098 'CR': '201.192.0.0/12',
3099 'CU': '152.206.0.0/15',
3100 'CV': '165.90.96.0/19',
3101 'CW': '190.88.128.0/17',
3102 'CY': '46.198.0.0/15',
3103 'CZ': '88.100.0.0/14',
3104 'DE': '53.0.0.0/8',
3105 'DJ': '197.241.0.0/17',
3106 'DK': '87.48.0.0/12',
3107 'DM': '192.243.48.0/20',
3108 'DO': '152.166.0.0/15',
3109 'DZ': '41.96.0.0/12',
3110 'EC': '186.68.0.0/15',
3111 'EE': '90.190.0.0/15',
3112 'EG': '156.160.0.0/11',
3113 'ER': '196.200.96.0/20',
3114 'ES': '88.0.0.0/11',
3115 'ET': '196.188.0.0/14',
3116 'EU': '2.16.0.0/13',
3117 'FI': '91.152.0.0/13',
3118 'FJ': '144.120.0.0/16',
3119 'FM': '119.252.112.0/20',
3120 'FO': '88.85.32.0/19',
3121 'FR': '90.0.0.0/9',
3122 'GA': '41.158.0.0/15',
3123 'GB': '25.0.0.0/8',
3124 'GD': '74.122.88.0/21',
3125 'GE': '31.146.0.0/16',
3126 'GF': '161.22.64.0/18',
3127 'GG': '62.68.160.0/19',
3128 'GH': '45.208.0.0/14',
3129 'GI': '85.115.128.0/19',
3130 'GL': '88.83.0.0/19',
3131 'GM': '160.182.0.0/15',
3132 'GN': '197.149.192.0/18',
3133 'GP': '104.250.0.0/19',
3134 'GQ': '105.235.224.0/20',
3135 'GR': '94.64.0.0/13',
3136 'GT': '168.234.0.0/16',
3137 'GU': '168.123.0.0/16',
3138 'GW': '197.214.80.0/20',
3139 'GY': '181.41.64.0/18',
3140 'HK': '113.252.0.0/14',
3141 'HN': '181.210.0.0/16',
3142 'HR': '93.136.0.0/13',
3143 'HT': '148.102.128.0/17',
3144 'HU': '84.0.0.0/14',
3145 'ID': '39.192.0.0/10',
3146 'IE': '87.32.0.0/12',
3147 'IL': '79.176.0.0/13',
3148 'IM': '5.62.80.0/20',
3149 'IN': '117.192.0.0/10',
3150 'IO': '203.83.48.0/21',
3151 'IQ': '37.236.0.0/14',
3152 'IR': '2.176.0.0/12',
3153 'IS': '82.221.0.0/16',
3154 'IT': '79.0.0.0/10',
3155 'JE': '87.244.64.0/18',
3156 'JM': '72.27.0.0/17',
3157 'JO': '176.29.0.0/16',
3158 'JP': '126.0.0.0/8',
3159 'KE': '105.48.0.0/12',
3160 'KG': '158.181.128.0/17',
3161 'KH': '36.37.128.0/17',
3162 'KI': '103.25.140.0/22',
3163 'KM': '197.255.224.0/20',
3164 'KN': '198.32.32.0/19',
3165 'KP': '175.45.176.0/22',
3166 'KR': '175.192.0.0/10',
3167 'KW': '37.36.0.0/14',
3168 'KY': '64.96.0.0/15',
3169 'KZ': '2.72.0.0/13',
3170 'LA': '115.84.64.0/18',
3171 'LB': '178.135.0.0/16',
3172 'LC': '192.147.231.0/24',
3173 'LI': '82.117.0.0/19',
3174 'LK': '112.134.0.0/15',
3175 'LR': '41.86.0.0/19',
3176 'LS': '129.232.0.0/17',
3177 'LT': '78.56.0.0/13',
3178 'LU': '188.42.0.0/16',
3179 'LV': '46.109.0.0/16',
3180 'LY': '41.252.0.0/14',
3181 'MA': '105.128.0.0/11',
3182 'MC': '88.209.64.0/18',
3183 'MD': '37.246.0.0/16',
3184 'ME': '178.175.0.0/17',
3185 'MF': '74.112.232.0/21',
3186 'MG': '154.126.0.0/17',
3187 'MH': '117.103.88.0/21',
3188 'MK': '77.28.0.0/15',
3189 'ML': '154.118.128.0/18',
3190 'MM': '37.111.0.0/17',
3191 'MN': '49.0.128.0/17',
3192 'MO': '60.246.0.0/16',
3193 'MP': '202.88.64.0/20',
3194 'MQ': '109.203.224.0/19',
3195 'MR': '41.188.64.0/18',
3196 'MS': '208.90.112.0/22',
3197 'MT': '46.11.0.0/16',
3198 'MU': '105.16.0.0/12',
3199 'MV': '27.114.128.0/18',
3200 'MW': '105.234.0.0/16',
3201 'MX': '187.192.0.0/11',
3202 'MY': '175.136.0.0/13',
3203 'MZ': '197.218.0.0/15',
3204 'NA': '41.182.0.0/16',
3205 'NC': '101.101.0.0/18',
3206 'NE': '197.214.0.0/18',
3207 'NF': '203.17.240.0/22',
3208 'NG': '105.112.0.0/12',
3209 'NI': '186.76.0.0/15',
3210 'NL': '145.96.0.0/11',
3211 'NO': '84.208.0.0/13',
3212 'NP': '36.252.0.0/15',
3213 'NR': '203.98.224.0/19',
3214 'NU': '49.156.48.0/22',
3215 'NZ': '49.224.0.0/14',
3216 'OM': '5.36.0.0/15',
3217 'PA': '186.72.0.0/15',
3218 'PE': '186.160.0.0/14',
3219 'PF': '123.50.64.0/18',
3220 'PG': '124.240.192.0/19',
3221 'PH': '49.144.0.0/13',
3222 'PK': '39.32.0.0/11',
3223 'PL': '83.0.0.0/11',
3224 'PM': '70.36.0.0/20',
3225 'PR': '66.50.0.0/16',
3226 'PS': '188.161.0.0/16',
3227 'PT': '85.240.0.0/13',
3228 'PW': '202.124.224.0/20',
3229 'PY': '181.120.0.0/14',
3230 'QA': '37.210.0.0/15',
3231 'RE': '139.26.0.0/16',
3232 'RO': '79.112.0.0/13',
3233 'RS': '178.220.0.0/14',
3234 'RU': '5.136.0.0/13',
3235 'RW': '105.178.0.0/15',
3236 'SA': '188.48.0.0/13',
3237 'SB': '202.1.160.0/19',
3238 'SC': '154.192.0.0/11',
3239 'SD': '154.96.0.0/13',
3240 'SE': '78.64.0.0/12',
3241 'SG': '152.56.0.0/14',
3242 'SI': '188.196.0.0/14',
3243 'SK': '78.98.0.0/15',
3244 'SL': '197.215.0.0/17',
3245 'SM': '89.186.32.0/19',
3246 'SN': '41.82.0.0/15',
3247 'SO': '197.220.64.0/19',
3248 'SR': '186.179.128.0/17',
3249 'SS': '105.235.208.0/21',
3250 'ST': '197.159.160.0/19',
3251 'SV': '168.243.0.0/16',
3252 'SX': '190.102.0.0/20',
3253 'SY': '5.0.0.0/16',
3254 'SZ': '41.84.224.0/19',
3255 'TC': '65.255.48.0/20',
3256 'TD': '154.68.128.0/19',
3257 'TG': '196.168.0.0/14',
3258 'TH': '171.96.0.0/13',
3259 'TJ': '85.9.128.0/18',
3260 'TK': '27.96.24.0/21',
3261 'TL': '180.189.160.0/20',
3262 'TM': '95.85.96.0/19',
3263 'TN': '197.0.0.0/11',
3264 'TO': '175.176.144.0/21',
3265 'TR': '78.160.0.0/11',
3266 'TT': '186.44.0.0/15',
3267 'TV': '202.2.96.0/19',
3268 'TW': '120.96.0.0/11',
3269 'TZ': '156.156.0.0/14',
3270 'UA': '93.72.0.0/13',
3271 'UG': '154.224.0.0/13',
3272 'US': '3.0.0.0/8',
3273 'UY': '167.56.0.0/13',
3274 'UZ': '82.215.64.0/18',
3275 'VA': '212.77.0.0/19',
3276 'VC': '24.92.144.0/20',
3277 'VE': '186.88.0.0/13',
3278 'VG': '172.103.64.0/18',
3279 'VI': '146.226.0.0/16',
3280 'VN': '14.160.0.0/11',
3281 'VU': '202.80.32.0/20',
3282 'WF': '117.20.32.0/21',
3283 'WS': '202.4.32.0/19',
3284 'YE': '134.35.0.0/16',
3285 'YT': '41.242.116.0/22',
3286 'ZA': '41.0.0.0/11',
3287 'ZM': '165.56.0.0/13',
3288 'ZW': '41.85.192.0/19',
3289 }
3290
3291 @classmethod
3292 def random_ipv4(cls, code):
3293 block = cls._country_ip_map.get(code.upper())
3294 if not block:
3295 return None
3296 addr, preflen = block.split('/')
3297 addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
3298 addr_max = addr_min | (0xffffffff >> int(preflen))
3299 return compat_str(socket.inet_ntoa(
3300 compat_struct_pack('!L', random.randint(addr_min, addr_max))))
3301
3302
3303 class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
3304 def __init__(self, proxies=None):
3305 # Set default handlers
3306 for type in ('http', 'https'):
3307 setattr(self, '%s_open' % type,
3308 lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
3309 meth(r, proxy, type))
3310 return compat_urllib_request.ProxyHandler.__init__(self, proxies)
3311
3312 def proxy_open(self, req, proxy, type):
3313 req_proxy = req.headers.get('Ytdl-request-proxy')
3314 if req_proxy is not None:
3315 proxy = req_proxy
3316 del req.headers['Ytdl-request-proxy']
3317
3318 if proxy == '__noproxy__':
3319 return None # No Proxy
3320 if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
3321 req.add_header('Ytdl-socks-proxy', proxy)
3322 # youtube-dl's http/https handlers do wrapping the socket with socks
3323 return None
3324 return compat_urllib_request.ProxyHandler.proxy_open(
3325 self, req, proxy, type)
3326
3327
3328 # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
3329 # released into Public Domain
3330 # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
3331
3332 def long_to_bytes(n, blocksize=0):
3333 """long_to_bytes(n:long, blocksize:int) : string
3334 Convert a long integer to a byte string.
3335
3336 If optional blocksize is given and greater than zero, pad the front of the
3337 byte string with binary zeros so that the length is a multiple of
3338 blocksize.
3339 """
3340 # after much testing, this algorithm was deemed to be the fastest
3341 s = b''
3342 n = int(n)
3343 while n > 0:
3344 s = compat_struct_pack('>I', n & 0xffffffff) + s
3345 n = n >> 32
3346 # strip off leading zeros
3347 for i in range(len(s)):
3348 if s[i] != b'\000'[0]:
3349 break
3350 else:
3351 # only happens when n == 0
3352 s = b'\000'
3353 i = 0
3354 s = s[i:]
3355 # add back some pad bytes. this could be done more efficiently w.r.t. the
3356 # de-padding being done above, but sigh...
3357 if blocksize > 0 and len(s) % blocksize:
3358 s = (blocksize - len(s) % blocksize) * b'\000' + s
3359 return s
3360
3361
3362 def bytes_to_long(s):
3363 """bytes_to_long(string) : long
3364 Convert a byte string to a long integer.
3365
3366 This is (essentially) the inverse of long_to_bytes().
3367 """
3368 acc = 0
3369 length = len(s)
3370 if length % 4:
3371 extra = (4 - length % 4)
3372 s = b'\000' * extra + s
3373 length = length + extra
3374 for i in range(0, length, 4):
3375 acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0]
3376 return acc
3377
3378
3379 def ohdave_rsa_encrypt(data, exponent, modulus):
3380 '''
3381 Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
3382
3383 Input:
3384 data: data to encrypt, bytes-like object
3385 exponent, modulus: parameter e and N of RSA algorithm, both integer
3386 Output: hex string of encrypted data
3387
3388 Limitation: supports one block encryption only
3389 '''
3390
3391 payload = int(binascii.hexlify(data[::-1]), 16)
3392 encrypted = pow(payload, exponent, modulus)
3393 return '%x' % encrypted
3394
3395
3396 def pkcs1pad(data, length):
3397 """
3398 Padding input data with PKCS#1 scheme
3399
3400 @param {int[]} data input data
3401 @param {int} length target length
3402 @returns {int[]} padded data
3403 """
3404 if len(data) > length - 11:
3405 raise ValueError('Input data too long for PKCS#1 padding')
3406
3407 pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)]
3408 return [0, 2] + pseudo_random + [0] + data
3409
3410
3411 def encode_base_n(num, n, table=None):
3412 FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
3413 if not table:
3414 table = FULL_TABLE[:n]
3415
3416 if n > len(table):
3417 raise ValueError('base %d exceeds table length %d' % (n, len(table)))
3418
3419 if num == 0:
3420 return table[0]
3421
3422 ret = ''
3423 while num:
3424 ret = table[num % n] + ret
3425 num = num // n
3426 return ret
3427
3428
3429 def decode_packed_codes(code):
3430 mobj = re.search(PACKED_CODES_RE, code)
3431 obfucasted_code, base, count, symbols = mobj.groups()
3432 base = int(base)
3433 count = int(count)
3434 symbols = symbols.split('|')
3435 symbol_table = {}
3436
3437 while count:
3438 count -= 1
3439 base_n_count = encode_base_n(count, base)
3440 symbol_table[base_n_count] = symbols[count] or base_n_count
3441
3442 return re.sub(
3443 r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
3444 obfucasted_code)
3445
3446
3447 def parse_m3u8_attributes(attrib):
3448 info = {}
3449 for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
3450 if val.startswith('"'):
3451 val = val[1:-1]
3452 info[key] = val
3453 return info
3454
3455
3456 def urshift(val, n):
3457 return val >> n if val >= 0 else (val + 0x100000000) >> n
3458
3459
3460 # Based on png2str() written by @gdkchan and improved by @yokrysty
3461 # Originally posted at https://github.com/rg3/youtube-dl/issues/9706
3462 def decode_png(png_data):
3463 # Reference: https://www.w3.org/TR/PNG/
3464 header = png_data[8:]
3465
3466 if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
3467 raise IOError('Not a valid PNG file.')
3468
3469 int_map = {1: '>B', 2: '>H', 4: '>I'}
3470 unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0]
3471
3472 chunks = []
3473
3474 while header:
3475 length = unpack_integer(header[:4])
3476 header = header[4:]
3477
3478 chunk_type = header[:4]
3479 header = header[4:]
3480
3481 chunk_data = header[:length]
3482 header = header[length:]
3483
3484 header = header[4:] # Skip CRC
3485
3486 chunks.append({
3487 'type': chunk_type,
3488 'length': length,
3489 'data': chunk_data
3490 })
3491
3492 ihdr = chunks[0]['data']
3493
3494 width = unpack_integer(ihdr[:4])
3495 height = unpack_integer(ihdr[4:8])
3496
3497 idat = b''
3498
3499 for chunk in chunks:
3500 if chunk['type'] == b'IDAT':
3501 idat += chunk['data']
3502
3503 if not idat:
3504 raise IOError('Unable to read PNG data.')
3505
3506 decompressed_data = bytearray(zlib.decompress(idat))
3507
3508 stride = width * 3
3509 pixels = []
3510
3511 def _get_pixel(idx):
3512 x = idx % stride
3513 y = idx // stride
3514 return pixels[y][x]
3515
3516 for y in range(height):
3517 basePos = y * (1 + stride)
3518 filter_type = decompressed_data[basePos]
3519
3520 current_row = []
3521
3522 pixels.append(current_row)
3523
3524 for x in range(stride):
3525 color = decompressed_data[1 + basePos + x]
3526 basex = y * stride + x
3527 left = 0
3528 up = 0
3529
3530 if x > 2:
3531 left = _get_pixel(basex - 3)
3532 if y > 0:
3533 up = _get_pixel(basex - stride)
3534
3535 if filter_type == 1: # Sub
3536 color = (color + left) & 0xff
3537 elif filter_type == 2: # Up
3538 color = (color + up) & 0xff
3539 elif filter_type == 3: # Average
3540 color = (color + ((left + up) >> 1)) & 0xff
3541 elif filter_type == 4: # Paeth
3542 a = left
3543 b = up
3544 c = 0
3545
3546 if x > 2 and y > 0:
3547 c = _get_pixel(basex - stride - 3)
3548
3549 p = a + b - c
3550
3551 pa = abs(p - a)
3552 pb = abs(p - b)
3553 pc = abs(p - c)
3554
3555 if pa <= pb and pa <= pc:
3556 color = (color + a) & 0xff
3557 elif pb <= pc:
3558 color = (color + b) & 0xff
3559 else:
3560 color = (color + c) & 0xff
3561
3562 current_row.append(color)
3563
3564 return width, height, pixels
3565
3566
3567 def write_xattr(path, key, value):
3568 # This mess below finds the best xattr tool for the job
3569 try:
3570 # try the pyxattr module...
3571 import xattr
3572
3573 if hasattr(xattr, 'set'): # pyxattr
3574 # Unicode arguments are not supported in python-pyxattr until
3575 # version 0.5.0
3576 # See https://github.com/rg3/youtube-dl/issues/5498
3577 pyxattr_required_version = '0.5.0'
3578 if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version):
3579 # TODO: fallback to CLI tools
3580 raise XAttrUnavailableError(
3581 'python-pyxattr is detected but is too old. '
3582 'youtube-dl requires %s or above while your version is %s. '
3583 'Falling back to other xattr implementations' % (
3584 pyxattr_required_version, xattr.__version__))
3585
3586 setxattr = xattr.set
3587 else: # xattr
3588 setxattr = xattr.setxattr
3589
3590 try:
3591 setxattr(path, key, value)
3592 except EnvironmentError as e:
3593 raise XAttrMetadataError(e.errno, e.strerror)
3594
3595 except ImportError:
3596 if compat_os_name == 'nt':
3597 # Write xattrs to NTFS Alternate Data Streams:
3598 # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
3599 assert ':' not in key
3600 assert os.path.exists(path)
3601
3602 ads_fn = path + ':' + key
3603 try:
3604 with open(ads_fn, 'wb') as f:
3605 f.write(value)
3606 except EnvironmentError as e:
3607 raise XAttrMetadataError(e.errno, e.strerror)
3608 else:
3609 user_has_setfattr = check_executable('setfattr', ['--version'])
3610 user_has_xattr = check_executable('xattr', ['-h'])
3611
3612 if user_has_setfattr or user_has_xattr:
3613
3614 value = value.decode('utf-8')
3615 if user_has_setfattr:
3616 executable = 'setfattr'
3617 opts = ['-n', key, '-v', value]
3618 elif user_has_xattr:
3619 executable = 'xattr'
3620 opts = ['-w', key, value]
3621
3622 cmd = ([encodeFilename(executable, True)] +
3623 [encodeArgument(o) for o in opts] +
3624 [encodeFilename(path, True)])
3625
3626 try:
3627 p = subprocess.Popen(
3628 cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
3629 except EnvironmentError as e:
3630 raise XAttrMetadataError(e.errno, e.strerror)
3631 stdout, stderr = p.communicate()
3632 stderr = stderr.decode('utf-8', 'replace')
3633 if p.returncode != 0:
3634 raise XAttrMetadataError(p.returncode, stderr)
3635
3636 else:
3637 # On Unix, and can't find pyxattr, setfattr, or xattr.
3638 if sys.platform.startswith('linux'):
3639 raise XAttrUnavailableError(
3640 "Couldn't find a tool to set the xattrs. "
3641 "Install either the python 'pyxattr' or 'xattr' "
3642 "modules, or the GNU 'attr' package "
3643 "(which contains the 'setfattr' tool).")
3644 else:
3645 raise XAttrUnavailableError(
3646 "Couldn't find a tool to set the xattrs. "
3647 "Install either the python 'xattr' module, "
3648 "or the 'xattr' binary.")