From: Rogério Brito Date: Tue, 2 Dec 2014 23:06:28 +0000 (-0200) Subject: Imported Upstream version 2014.12.01 X-Git-Url: https://git.rapsys.eu/youtubedl/commitdiff_plain/0cf0312991a54458a07e903da2e47e9f3c8855ae Imported Upstream version 2014.12.01 --- diff --git a/README.md b/README.md index c284db4..d6e7ff9 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ Alternatively, refer to the developer instructions below for how to check out an # DESCRIPTION **youtube-dl** is a small command-line program to download videos from YouTube.com and a few more sites. It requires the Python interpreter, version -2.6, 2.7, or 3.3+, and it is not platform specific. It should work on +2.6, 2.7, or 3.2+, and it is not platform specific. It should work on your Unix box, on Windows or on Mac OS X. It is released to the public domain, which means you can modify it, redistribute it or use it however you like. @@ -93,7 +93,8 @@ which means you can modify it, redistribute it or use it however you like. COUNT views --max-views COUNT Do not download any videos with more than COUNT views - --no-playlist download only the currently playing video + --no-playlist If the URL refers to a video and a + playlist, download only the video. --age-limit YEARS download only videos suitable for the given age --download-archive FILE Download only videos not listed in the @@ -492,14 +493,15 @@ If you want to add support for a new site, you can follow this quick list (assum def _real_extract(self, url): video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) # TODO more code goes here, for example ... - webpage = self._download_webpage(url, video_id) title = self._html_search_regex(r'

(.*?)

', webpage, 'title') return { 'id': video_id, 'title': title, + 'description': self._og_search_description(webpage), # TODO more properties (see youtube_dl/extractor/common.py) } ``` diff --git a/README.txt b/README.txt index 3ba6f53..601fe9a 100644 --- a/README.txt +++ b/README.txt @@ -39,7 +39,7 @@ DESCRIPTION youtube-dl is a small command-line program to download videos from YouTube.com and a few more sites. It requires the Python interpreter, -version 2.6, 2.7, or 3.3+, and it is not platform specific. It should +version 2.6, 2.7, or 3.2+, and it is not platform specific. It should work on your Unix box, on Windows or on Mac OS X. It is released to the public domain, which means you can modify it, redistribute it or use it however you like. @@ -107,7 +107,8 @@ Video Selection: COUNT views --max-views COUNT Do not download any videos with more than COUNT views - --no-playlist download only the currently playing video + --no-playlist If the URL refers to a video and a + playlist, download only the video. --age-limit YEARS download only videos suitable for the given age --download-archive FILE Download only videos not listed in the @@ -605,14 +606,15 @@ list (assuming your service is called yourextractor): def _real_extract(self, url): video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) # TODO more code goes here, for example ... - webpage = self._download_webpage(url, video_id) title = self._html_search_regex(r'

(.*?)

', webpage, 'title') return { 'id': video_id, 'title': title, + 'description': self._og_search_description(webpage), # TODO more properties (see youtube_dl/extractor/common.py) } ``` diff --git a/devscripts/bash-completion.py b/devscripts/bash-completion.py index 4928772..cd26cc0 100755 --- a/devscripts/bash-completion.py +++ b/devscripts/bash-completion.py @@ -1,4 +1,6 @@ #!/usr/bin/env python +from __future__ import unicode_literals + import os from os.path import dirname as dirn import sys @@ -9,16 +11,17 @@ import youtube_dl BASH_COMPLETION_FILE = "youtube-dl.bash-completion" BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in" + def build_completion(opt_parser): opts_flag = [] for group in opt_parser.option_groups: for option in group.option_list: - #for every long flag + # for every long flag opts_flag.append(option.get_opt_string()) with open(BASH_COMPLETION_TEMPLATE) as f: template = f.read() with open(BASH_COMPLETION_FILE, "w") as f: - #just using the special char + # just using the special char filled_template = template.replace("{{flags}}", " ".join(opts_flag)) f.write(filled_template) diff --git a/devscripts/buildserver.py b/devscripts/buildserver.py index e0c3cc8..7c2f49f 100644 --- a/devscripts/buildserver.py +++ b/devscripts/buildserver.py @@ -142,7 +142,7 @@ def win_service_set_status(handle, status_code): def win_service_main(service_name, real_main, argc, argv_raw): try: - #args = [argv_raw[i].value for i in range(argc)] + # args = [argv_raw[i].value for i in range(argc)] stop_event = threading.Event() handler = HandlerEx(functools.partial(stop_event, win_service_handler)) h = advapi32.RegisterServiceCtrlHandlerExW(service_name, handler, None) @@ -233,6 +233,7 @@ def rmtree(path): #============================================================================== + class BuildError(Exception): def __init__(self, output, code=500): self.output = output @@ -369,7 +370,7 @@ class Builder(PythonBuilder, GITBuilder, YoutubeDLBuilder, DownloadBuilder, Clea class BuildHTTPRequestHandler(BaseHTTPRequestHandler): - actionDict = { 'build': Builder, 'download': Builder } # They're the same, no more caching. + actionDict = {'build': Builder, 'download': Builder} # They're the same, no more caching. def do_GET(self): path = urlparse.urlparse(self.path) diff --git a/devscripts/check-porn.py b/devscripts/check-porn.py index 86aa37b..2162827 100644 --- a/devscripts/check-porn.py +++ b/devscripts/check-porn.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import unicode_literals """ This script employs a VERY basic heuristic ('porn' in webpage.lower()) to check diff --git a/devscripts/fish-completion.py b/devscripts/fish-completion.py index f4aaf02..c2f2387 100755 --- a/devscripts/fish-completion.py +++ b/devscripts/fish-completion.py @@ -23,13 +23,13 @@ EXTRA_ARGS = { 'batch-file': ['--require-parameter'], } + def build_completion(opt_parser): commands = [] for group in opt_parser.option_groups: for option in group.option_list: long_option = option.get_opt_string().strip('-') - help_msg = shell_quote([option.help]) complete_cmd = ['complete', '--command', 'youtube-dl', '--long-option', long_option] if option._short_opts: complete_cmd += ['--short-option', option._short_opts[0].strip('-')] diff --git a/devscripts/gh-pages/add-version.py b/devscripts/gh-pages/add-version.py index 35865b2..867ea00 100755 --- a/devscripts/gh-pages/add-version.py +++ b/devscripts/gh-pages/add-version.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +from __future__ import unicode_literals import json import sys diff --git a/devscripts/gh-pages/generate-download.py b/devscripts/gh-pages/generate-download.py index 55912e1..392e3ba 100755 --- a/devscripts/gh-pages/generate-download.py +++ b/devscripts/gh-pages/generate-download.py @@ -1,8 +1,7 @@ #!/usr/bin/env python3 +from __future__ import unicode_literals + import hashlib -import shutil -import subprocess -import tempfile import urllib.request import json diff --git a/devscripts/gh-pages/sign-versions.py b/devscripts/gh-pages/sign-versions.py index 8a824df..fa389c3 100755 --- a/devscripts/gh-pages/sign-versions.py +++ b/devscripts/gh-pages/sign-versions.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +from __future__ import unicode_literals, with_statement import rsa import json @@ -11,22 +12,23 @@ except NameError: versions_info = json.load(open('update/versions.json')) if 'signature' in versions_info: - del versions_info['signature'] + del versions_info['signature'] print('Enter the PKCS1 private key, followed by a blank line:') privkey = b'' while True: - try: - line = input() - except EOFError: - break - if line == '': - break - privkey += line.encode('ascii') + b'\n' + try: + line = input() + except EOFError: + break + if line == '': + break + privkey += line.encode('ascii') + b'\n' privkey = rsa.PrivateKey.load_pkcs1(privkey) signature = hexlify(rsa.pkcs1.sign(json.dumps(versions_info, sort_keys=True).encode('utf-8'), privkey, 'SHA-256')).decode() print('signature: ' + signature) versions_info['signature'] = signature -json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True) \ No newline at end of file +with open('update/versions.json', 'w') as versionsf: + json.dump(versions_info, versionsf, indent=4, sort_keys=True) diff --git a/devscripts/gh-pages/update-copyright.py b/devscripts/gh-pages/update-copyright.py index 12c2a91..3663c8a 100755 --- a/devscripts/gh-pages/update-copyright.py +++ b/devscripts/gh-pages/update-copyright.py @@ -1,11 +1,11 @@ #!/usr/bin/env python # coding: utf-8 -from __future__ import with_statement +from __future__ import with_statement, unicode_literals import datetime import glob -import io # For Python 2 compatibilty +import io # For Python 2 compatibilty import os import re @@ -13,7 +13,7 @@ year = str(datetime.datetime.now().year) for fn in glob.glob('*.html*'): with io.open(fn, encoding='utf-8') as f: content = f.read() - newc = re.sub(u'(?PCopyright © 2006-)(?P[0-9]{4})', u'Copyright © 2006-' + year, content) + newc = re.sub(r'(?PCopyright © 2006-)(?P[0-9]{4})', 'Copyright © 2006-' + year, content) if content != newc: tmpFn = fn + '.part' with io.open(tmpFn, 'wt', encoding='utf-8') as outf: diff --git a/devscripts/gh-pages/update-feed.py b/devscripts/gh-pages/update-feed.py index 0ba15ae..e93eb60 100755 --- a/devscripts/gh-pages/update-feed.py +++ b/devscripts/gh-pages/update-feed.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +from __future__ import unicode_literals import datetime import io @@ -73,4 +74,3 @@ atom_template = atom_template.replace('@ENTRIES@', entries_str) with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file: atom_file.write(atom_template) - diff --git a/devscripts/gh-pages/update-sites.py b/devscripts/gh-pages/update-sites.py index 153e15c..f0f0481 100755 --- a/devscripts/gh-pages/update-sites.py +++ b/devscripts/gh-pages/update-sites.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +from __future__ import unicode_literals import sys import os @@ -9,6 +10,7 @@ sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath( import youtube_dl + def main(): with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf: template = tmplf.read() @@ -21,7 +23,7 @@ def main(): continue elif ie_desc is not None: ie_html += ': {}'.format(ie.IE_DESC) - if ie.working() == False: + if not ie.working(): ie_html += ' (Currently broken)' ie_htmls.append('
  • {}
  • '.format(ie_html)) diff --git a/devscripts/make_readme.py b/devscripts/make_readme.py index 70fa942..8fbce07 100755 --- a/devscripts/make_readme.py +++ b/devscripts/make_readme.py @@ -1,3 +1,5 @@ +from __future__ import unicode_literals + import io import sys import re diff --git a/devscripts/prepare_manpage.py b/devscripts/prepare_manpage.py index d9c8570..f66bebf 100644 --- a/devscripts/prepare_manpage.py +++ b/devscripts/prepare_manpage.py @@ -1,3 +1,4 @@ +from __future__ import unicode_literals import io import os.path diff --git a/devscripts/transition_helper.py b/devscripts/transition_helper.py deleted file mode 100644 index d5ca2d4..0000000 --- a/devscripts/transition_helper.py +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env python - -import sys, os - -try: - import urllib.request as compat_urllib_request -except ImportError: # Python 2 - import urllib2 as compat_urllib_request - -sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n') -sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n') -sys.stderr.write(u'The new location of the binaries is https://github.com/rg3/youtube-dl/downloads, not the git repository.\n\n') - -try: - raw_input() -except NameError: # Python 3 - input() - -filename = sys.argv[0] - -API_URL = "https://api.github.com/repos/rg3/youtube-dl/downloads" -BIN_URL = "https://github.com/downloads/rg3/youtube-dl/youtube-dl" - -if not os.access(filename, os.W_OK): - sys.exit('ERROR: no write permissions on %s' % filename) - -try: - urlh = compat_urllib_request.urlopen(BIN_URL) - newcontent = urlh.read() - urlh.close() -except (IOError, OSError) as err: - sys.exit('ERROR: unable to download latest version') - -try: - with open(filename, 'wb') as outf: - outf.write(newcontent) -except (IOError, OSError) as err: - sys.exit('ERROR: unable to overwrite current version') - -sys.stderr.write(u'Done! Now you can run youtube-dl.\n') diff --git a/devscripts/transition_helper_exe/setup.py b/devscripts/transition_helper_exe/setup.py deleted file mode 100644 index aaf5c29..0000000 --- a/devscripts/transition_helper_exe/setup.py +++ /dev/null @@ -1,12 +0,0 @@ -from distutils.core import setup -import py2exe - -py2exe_options = { - "bundle_files": 1, - "compressed": 1, - "optimize": 2, - "dist_dir": '.', - "dll_excludes": ['w9xpopen.exe'] -} - -setup(console=['youtube-dl.py'], options={ "py2exe": py2exe_options }, zipfile=None) \ No newline at end of file diff --git a/devscripts/transition_helper_exe/youtube-dl.py b/devscripts/transition_helper_exe/youtube-dl.py deleted file mode 100644 index 6297dfd..0000000 --- a/devscripts/transition_helper_exe/youtube-dl.py +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/env python - -import sys, os -import urllib2 -import json, hashlib - -def rsa_verify(message, signature, key): - from struct import pack - from hashlib import sha256 - from sys import version_info - def b(x): - if version_info[0] == 2: return x - else: return x.encode('latin1') - assert(type(message) == type(b(''))) - block_size = 0 - n = key[0] - while n: - block_size += 1 - n >>= 8 - signature = pow(int(signature, 16), key[1], key[0]) - raw_bytes = [] - while signature: - raw_bytes.insert(0, pack("B", signature & 0xFF)) - signature >>= 8 - signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes) - if signature[0:2] != b('\x00\x01'): return False - signature = signature[2:] - if not b('\x00') in signature: return False - signature = signature[signature.index(b('\x00'))+1:] - if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False - signature = signature[19:] - if signature != sha256(message).digest(): return False - return True - -sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n') -sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n') -sys.stderr.write(u'From now on, get the binaries from http://rg3.github.com/youtube-dl/download.html, not from the git repository.\n\n') - -raw_input() - -filename = sys.argv[0] - -UPDATE_URL = "http://rg3.github.io/youtube-dl/update/" -VERSION_URL = UPDATE_URL + 'LATEST_VERSION' -JSON_URL = UPDATE_URL + 'versions.json' -UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537) - -if not os.access(filename, os.W_OK): - sys.exit('ERROR: no write permissions on %s' % filename) - -exe = os.path.abspath(filename) -directory = os.path.dirname(exe) -if not os.access(directory, os.W_OK): - sys.exit('ERROR: no write permissions on %s' % directory) - -try: - versions_info = urllib2.urlopen(JSON_URL).read().decode('utf-8') - versions_info = json.loads(versions_info) -except: - sys.exit(u'ERROR: can\'t obtain versions info. Please try again later.') -if not 'signature' in versions_info: - sys.exit(u'ERROR: the versions file is not signed or corrupted. Aborting.') -signature = versions_info['signature'] -del versions_info['signature'] -if not rsa_verify(json.dumps(versions_info, sort_keys=True), signature, UPDATES_RSA_KEY): - sys.exit(u'ERROR: the versions file signature is invalid. Aborting.') - -version = versions_info['versions'][versions_info['latest']] - -try: - urlh = urllib2.urlopen(version['exe'][0]) - newcontent = urlh.read() - urlh.close() -except (IOError, OSError) as err: - sys.exit('ERROR: unable to download latest version') - -newcontent_hash = hashlib.sha256(newcontent).hexdigest() -if newcontent_hash != version['exe'][1]: - sys.exit(u'ERROR: the downloaded file hash does not match. Aborting.') - -try: - with open(exe + '.new', 'wb') as outf: - outf.write(newcontent) -except (IOError, OSError) as err: - sys.exit(u'ERROR: unable to write the new version') - -try: - bat = os.path.join(directory, 'youtube-dl-updater.bat') - b = open(bat, 'w') - b.write(""" -echo Updating youtube-dl... -ping 127.0.0.1 -n 5 -w 1000 > NUL -move /Y "%s.new" "%s" -del "%s" - \n""" %(exe, exe, bat)) - b.close() - - os.startfile(bat) -except (IOError, OSError) as err: - sys.exit('ERROR: unable to overwrite current version') - -sys.stderr.write(u'Done! Now you can run youtube-dl.\n') diff --git a/devscripts/zsh-completion.py b/devscripts/zsh-completion.py index e8d7192..f200f2c 100755 --- a/devscripts/zsh-completion.py +++ b/devscripts/zsh-completion.py @@ -1,4 +1,6 @@ #!/usr/bin/env python +from __future__ import unicode_literals + import os from os.path import dirname as dirn import sys diff --git a/setup.py b/setup.py index cf6b92b..4686260 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,6 @@ from __future__ import print_function import os.path -import pkg_resources import warnings import sys @@ -103,7 +102,9 @@ setup( "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.3" + "Programming Language :: Python :: 3.2", + "Programming Language :: Python :: 3.3", + "Programming Language :: Python :: 3.4", ], **params diff --git a/test/helper.py b/test/helper.py index 8be37a1..9a7f074 100644 --- a/test/helper.py +++ b/test/helper.py @@ -59,7 +59,7 @@ class FakeYDL(YoutubeDL): params = get_params(override=override) super(FakeYDL, self).__init__(params, auto_init=False) self.result = [] - + def to_screen(self, s, skip_eol=None): print(s) @@ -72,8 +72,10 @@ class FakeYDL(YoutubeDL): def expect_warning(self, regex): # Silence an expected warning matching a regex old_report_warning = self.report_warning + def report_warning(self, message): - if re.match(regex, message): return + if re.match(regex, message): + return old_report_warning(message) self.report_warning = types.MethodType(report_warning, self) @@ -114,14 +116,14 @@ def expect_info_dict(self, expected_dict, got_dict): elif isinstance(expected, type): got = got_dict.get(info_field) self.assertTrue(isinstance(got, expected), - 'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got))) + 'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got))) else: if isinstance(expected, compat_str) and expected.startswith('md5:'): got = 'md5:' + md5(got_dict.get(info_field)) else: got = got_dict.get(info_field) self.assertEqual(expected, got, - 'invalid value for field %s, expected %r, got %r' % (info_field, expected, got)) + 'invalid value for field %s, expected %r, got %r' % (info_field, expected, got)) # Check for the presence of mandatory fields if got_dict.get('_type') != 'playlist': @@ -133,13 +135,13 @@ def expect_info_dict(self, expected_dict, got_dict): # Are checkable fields missing from the test case definition? test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value)) - for key, value in got_dict.items() - if value and key in ('title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location')) + for key, value in got_dict.items() + if value and key in ('title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location')) missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys()) if missing_keys: def _repr(v): if isinstance(v, compat_str): - return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'") + return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'").replace('\n', '\\n') else: return repr(v) info_dict_str = ''.join( diff --git a/test/test_YoutubeDL.py b/test/test_YoutubeDL.py index ab61e19..f8e4f93 100644 --- a/test/test_YoutubeDL.py +++ b/test/test_YoutubeDL.py @@ -266,6 +266,7 @@ class TestFormatSelection(unittest.TestCase): 'ext': 'mp4', 'width': None, } + def fname(templ): ydl = YoutubeDL({'outtmpl': templ}) return ydl.prepare_filename(info) diff --git a/test/test_all_urls.py b/test/test_all_urls.py index 965e5d8..bd4fe17 100644 --- a/test/test_all_urls.py +++ b/test/test_all_urls.py @@ -32,19 +32,19 @@ class TestAllURLsMatching(unittest.TestCase): def test_youtube_playlist_matching(self): assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist']) assertPlaylist('ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8') - assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') #585 + assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') # 585 assertPlaylist('PL63F0C78739B09958') assertPlaylist('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q') assertPlaylist('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8') assertPlaylist('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC') - assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') #668 + assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') # 668 self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M')) # Top tracks assertPlaylist('https://www.youtube.com/playlist?list=MCUS.20142101') def test_youtube_matching(self): self.assertTrue(YoutubeIE.suitable('PLtS2H6bU1M')) - self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668 + self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) # 668 self.assertMatch('http://youtu.be/BaW_jenozKc', ['youtube']) self.assertMatch('http://www.youtube.com/v/BaW_jenozKc', ['youtube']) self.assertMatch('https://youtube.googleapis.com/v/BaW_jenozKc', ['youtube']) diff --git a/test/test_download.py b/test/test_download.py index 12cfb5c..a009aa4 100644 --- a/test/test_download.py +++ b/test/test_download.py @@ -40,18 +40,22 @@ from youtube_dl.extractor import get_info_extractor RETRIES = 3 + class YoutubeDL(youtube_dl.YoutubeDL): def __init__(self, *args, **kwargs): self.to_stderr = self.to_screen self.processed_info_dicts = [] super(YoutubeDL, self).__init__(*args, **kwargs) + def report_warning(self, message): # Don't accept warnings during tests raise ExtractorError(message) + def process_info(self, info_dict): self.processed_info_dicts.append(info_dict) return super(YoutubeDL, self).process_info(info_dict) + def _file_md5(fn): with open(fn, 'rb') as f: return hashlib.md5(f.read()).hexdigest() @@ -61,10 +65,13 @@ defs = gettestcases() class TestDownload(unittest.TestCase): maxDiff = None + def setUp(self): self.defs = defs -### Dynamically generate tests +# Dynamically generate tests + + def generator(test_case): def test_template(self): @@ -90,7 +97,7 @@ def generator(test_case): return for other_ie in other_ies: if not other_ie.working(): - print_skipping(u'test depends on %sIE, marked as not WORKING' % other_ie.ie_key()) + print_skipping('test depends on %sIE, marked as not WORKING' % other_ie.ie_key()) return params = get_params(test_case.get('params', {})) @@ -101,6 +108,7 @@ def generator(test_case): ydl = YoutubeDL(params, auto_init=False) ydl.add_default_info_extractors() finished_hook_called = set() + def _hook(status): if status['status'] == 'finished': finished_hook_called.add(status['filename']) @@ -111,6 +119,7 @@ def generator(test_case): return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {})) res_dict = None + def try_rm_tcs_files(tcs=None): if tcs is None: tcs = test_cases @@ -134,7 +143,7 @@ def generator(test_case): raise if try_num == RETRIES: - report_warning(u'Failed due to network errors, skipping...') + report_warning('Failed due to network errors, skipping...') return print('Retrying: {0} failed tries\n\n##########\n\n'.format(try_num)) @@ -206,7 +215,7 @@ def generator(test_case): return test_template -### And add them to TestDownload +# And add them to TestDownload for n, test_case in enumerate(defs): test_method = generator(test_case) tname = 'test_' + str(test_case['name']) diff --git a/test/test_subtitles.py b/test/test_subtitles.py index 94e3290..7c4cd82 100644 --- a/test/test_subtitles.py +++ b/test/test_subtitles.py @@ -23,6 +23,7 @@ from youtube_dl.extractor import ( class BaseTestSubtitles(unittest.TestCase): url = None IE = None + def setUp(self): self.DL = FakeYDL() self.ie = self.IE(self.DL) @@ -237,7 +238,7 @@ class TestVimeoSubtitles(BaseTestSubtitles): def test_subtitles(self): self.DL.params['writesubtitles'] = True subtitles = self.getSubtitles() - self.assertEqual(md5(subtitles['en']), '8062383cf4dec168fc40a088aa6d5888') + self.assertEqual(md5(subtitles['en']), '26399116d23ae3cf2c087cea94bc43b4') def test_subtitles_lang(self): self.DL.params['writesubtitles'] = True diff --git a/test/test_unicode_literals.py b/test/test_unicode_literals.py index a4ba7ba..2cc431b 100644 --- a/test/test_unicode_literals.py +++ b/test/test_unicode_literals.py @@ -9,14 +9,13 @@ rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) IGNORED_FILES = [ 'setup.py', # http://bugs.python.org/issue13943 + 'conf.py', + 'buildserver.py', ] class TestUnicodeLiterals(unittest.TestCase): def test_all_files(self): - print('Skipping this test (not yet fully implemented)') - return - for dirpath, _, filenames in os.walk(rootDir): for basename in filenames: if not basename.endswith('.py'): @@ -30,10 +29,10 @@ class TestUnicodeLiterals(unittest.TestCase): if "'" not in code and '"' not in code: continue - imps = 'from __future__ import unicode_literals' - self.assertTrue( - imps in code, - ' %s missing in %s' % (imps, fn)) + self.assertRegexpMatches( + code, + r'(?:#.*\n*)?from __future__ import (?:[a-z_]+,\s*)*unicode_literals', + 'unicode_literals import missing in %s' % fn) m = re.search(r'(?<=\s)u[\'"](?!\)|,|$)', code) if m is not None: diff --git a/test/test_utils.py b/test/test_utils.py index 0fa8731..baa3a21 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -45,8 +45,9 @@ from youtube_dl.utils import ( escape_rfc3986, escape_url, js_to_json, - get_filesystem_encoding, intlist_to_bytes, + args_to_str, + parse_filesize, ) @@ -119,16 +120,16 @@ class TestUtil(unittest.TestCase): self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7]) self.assertEqual(orderedSet([]), []) self.assertEqual(orderedSet([1]), [1]) - #keep the list ordered + # keep the list ordered self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1]) def test_unescape_html(self): self.assertEqual(unescapeHTML('%20;'), '%20;') self.assertEqual( unescapeHTML('é'), 'é') - + def test_daterange(self): - _20century = DateRange("19000101","20000101") + _20century = DateRange("19000101", "20000101") self.assertFalse("17890714" in _20century) _ac = DateRange("00010101") self.assertTrue("19690721" in _ac) @@ -170,7 +171,7 @@ class TestUtil(unittest.TestCase): self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3') def test_smuggle_url(self): - data = {u"ö": u"ö", u"abc": [3]} + data = {"ö": "ö", "abc": [3]} url = 'https://foo.bar/baz?x=y#a' smug_url = smuggle_url(url, data) unsmug_url, unsmug_data = unsmuggle_url(smug_url) @@ -361,5 +362,20 @@ class TestUtil(unittest.TestCase): intlist_to_bytes([0, 1, 127, 128, 255]), b'\x00\x01\x7f\x80\xff') + def test_args_to_str(self): + self.assertEqual( + args_to_str(['foo', 'ba/r', '-baz', '2 be', '']), + 'foo ba/r -baz \'2 be\' \'\'' + ) + + def test_parse_filesize(self): + self.assertEqual(parse_filesize(None), None) + self.assertEqual(parse_filesize(''), None) + self.assertEqual(parse_filesize('91 B'), 91) + self.assertEqual(parse_filesize('foobar'), None) + self.assertEqual(parse_filesize('2 MiB'), 2097152) + self.assertEqual(parse_filesize('5 GB'), 5000000000) + self.assertEqual(parse_filesize('1.2Tb'), 1200000000000) + if __name__ == '__main__': unittest.main() diff --git a/test/test_write_annotations.py b/test/test_write_annotations.py index eac53b2..780636c 100644 --- a/test/test_write_annotations.py +++ b/test/test_write_annotations.py @@ -1,5 +1,6 @@ #!/usr/bin/env python # coding: utf-8 +from __future__ import unicode_literals # Allow direct execution import os @@ -31,19 +32,18 @@ params = get_params({ }) - TEST_ID = 'gr51aVj-mLg' ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml' EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label'] + class TestAnnotations(unittest.TestCase): def setUp(self): # Clear old files self.tearDown() - def test_info_json(self): - expected = list(EXPECTED_ANNOTATIONS) #Two annotations could have the same text. + expected = list(EXPECTED_ANNOTATIONS) # Two annotations could have the same text. ie = youtube_dl.extractor.YoutubeIE() ydl = YoutubeDL(params) ydl.add_info_extractor(ie) @@ -51,7 +51,7 @@ class TestAnnotations(unittest.TestCase): self.assertTrue(os.path.exists(ANNOTATIONS_FILE)) annoxml = None with io.open(ANNOTATIONS_FILE, 'r', encoding='utf-8') as annof: - annoxml = xml.etree.ElementTree.parse(annof) + annoxml = xml.etree.ElementTree.parse(annof) self.assertTrue(annoxml is not None, 'Failed to parse annotations XML') root = annoxml.getroot() self.assertEqual(root.tag, 'document') @@ -59,18 +59,17 @@ class TestAnnotations(unittest.TestCase): self.assertEqual(annotationsTag.tag, 'annotations') annotations = annotationsTag.findall('annotation') - #Not all the annotations have TEXT children and the annotations are returned unsorted. + # Not all the annotations have TEXT children and the annotations are returned unsorted. for a in annotations: - self.assertEqual(a.tag, 'annotation') - if a.get('type') == 'text': - textTag = a.find('TEXT') - text = textTag.text - self.assertTrue(text in expected) #assertIn only added in python 2.7 - #remove the first occurance, there could be more than one annotation with the same text - expected.remove(text) - #We should have seen (and removed) all the expected annotation texts. + self.assertEqual(a.tag, 'annotation') + if a.get('type') == 'text': + textTag = a.find('TEXT') + text = textTag.text + self.assertTrue(text in expected) # assertIn only added in python 2.7 + # remove the first occurance, there could be more than one annotation with the same text + expected.remove(text) + # We should have seen (and removed) all the expected annotation texts. self.assertEqual(len(expected), 0, 'Not all expected annotations were found.') - def tearDown(self): try_rm(ANNOTATIONS_FILE) diff --git a/test/test_write_info_json.py b/test/test_write_info_json.py index 90426a5..0396ef2 100644 --- a/test/test_write_info_json.py +++ b/test/test_write_info_json.py @@ -1,5 +1,6 @@ #!/usr/bin/env python # coding: utf-8 +from __future__ import unicode_literals # Allow direct execution import os @@ -32,7 +33,7 @@ params = get_params({ TEST_ID = 'BaW_jenozKc' INFO_JSON_FILE = TEST_ID + '.info.json' DESCRIPTION_FILE = TEST_ID + '.mp4.description' -EXPECTED_DESCRIPTION = u'''test chars: "'/\ä↭𝕐 +EXPECTED_DESCRIPTION = '''test chars: "'/\ä↭𝕐 test URL: https://github.com/rg3/youtube-dl/issues/1892 This is a test video for youtube-dl. @@ -53,11 +54,11 @@ class TestInfoJSON(unittest.TestCase): self.assertTrue(os.path.exists(INFO_JSON_FILE)) with io.open(INFO_JSON_FILE, 'r', encoding='utf-8') as jsonf: jd = json.load(jsonf) - self.assertEqual(jd['upload_date'], u'20121002') + self.assertEqual(jd['upload_date'], '20121002') self.assertEqual(jd['description'], EXPECTED_DESCRIPTION) self.assertEqual(jd['id'], TEST_ID) self.assertEqual(jd['extractor'], 'youtube') - self.assertEqual(jd['title'], u'''youtube-dl test video "'/\ä↭𝕐''') + self.assertEqual(jd['title'], '''youtube-dl test video "'/\ä↭𝕐''') self.assertEqual(jd['uploader'], 'Philipp Hagemeister') self.assertTrue(os.path.exists(DESCRIPTION_FILE)) diff --git a/test/test_youtube_lists.py b/test/test_youtube_lists.py index 410f9ed..c889b6f 100644 --- a/test/test_youtube_lists.py +++ b/test/test_youtube_lists.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import unicode_literals # Allow direct execution import os @@ -12,10 +13,6 @@ from test.helper import FakeYDL from youtube_dl.extractor import ( YoutubePlaylistIE, YoutubeIE, - YoutubeChannelIE, - YoutubeShowIE, - YoutubeTopListIE, - YoutubeSearchURLIE, ) @@ -31,7 +28,7 @@ class TestYoutubeLists(unittest.TestCase): result = ie.extract('https://www.youtube.com/watch?v=FXxLjLQi3Fg&list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re') self.assertEqual(result['_type'], 'url') self.assertEqual(YoutubeIE().extract_id(result['url']), 'FXxLjLQi3Fg') - + def test_youtube_course(self): dl = FakeYDL() ie = YoutubePlaylistIE(dl) diff --git a/youtube-dl b/youtube-dl index 9cb5e7a..49ec013 100755 Binary files a/youtube-dl and b/youtube-dl differ diff --git a/youtube-dl.1 b/youtube-dl.1 index 2bcdc01..08e2ae3 100644 --- a/youtube-dl.1 +++ b/youtube-dl.1 @@ -9,7 +9,7 @@ youtube\-dl \- download videos from youtube.com or other video platforms .PP \f[B]youtube\-dl\f[] is a small command\-line program to download videos from YouTube.com and a few more sites. -It requires the Python interpreter, version 2.6, 2.7, or 3.3+, and it is +It requires the Python interpreter, version 2.6, 2.7, or 3.2+, and it is not platform specific. It should work on your Unix box, on Windows or on Mac OS X. It is released to the public domain, which means you can modify it, @@ -80,7 +80,8 @@ redistribute it or use it however you like. \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ COUNT\ views \-\-max\-views\ COUNT\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ Do\ not\ download\ any\ videos\ with\ more\ than \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ COUNT\ views -\-\-no\-playlist\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ download\ only\ the\ currently\ playing\ video +\-\-no\-playlist\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ If\ the\ URL\ refers\ to\ a\ video\ and\ a +\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ playlist,\ download\ only\ the\ video. \-\-age\-limit\ YEARS\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ download\ only\ videos\ suitable\ for\ the\ given \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ age \-\-download\-archive\ FILE\ \ \ \ \ \ \ \ \ \ Download\ only\ videos\ not\ listed\ in\ the @@ -647,14 +648,15 @@ class\ YourExtractorIE(InfoExtractor): \ \ \ \ def\ _real_extract(self,\ url): \ \ \ \ \ \ \ \ video_id\ =\ self._match_id(url) +\ \ \ \ \ \ \ \ webpage\ =\ self._download_webpage(url,\ video_id) \ \ \ \ \ \ \ \ #\ TODO\ more\ code\ goes\ here,\ for\ example\ ... -\ \ \ \ \ \ \ \ webpage\ =\ self._download_webpage(url,\ video_id) \ \ \ \ \ \ \ \ title\ =\ self._html_search_regex(r\[aq]

    (.*?)

    \[aq],\ webpage,\ \[aq]title\[aq]) \ \ \ \ \ \ \ \ return\ { \ \ \ \ \ \ \ \ \ \ \ \ \[aq]id\[aq]:\ video_id, \ \ \ \ \ \ \ \ \ \ \ \ \[aq]title\[aq]:\ title, +\ \ \ \ \ \ \ \ \ \ \ \ \[aq]description\[aq]:\ self._og_search_description(webpage), \ \ \ \ \ \ \ \ \ \ \ \ #\ TODO\ more\ properties\ (see\ youtube_dl/extractor/common.py) \ \ \ \ \ \ \ \ } \f[] diff --git a/youtube-dl.fish b/youtube-dl.fish index 461fd1b..923fd60 100644 --- a/youtube-dl.fish +++ b/youtube-dl.fish @@ -24,7 +24,7 @@ complete --command youtube-dl --long-option datebefore --description 'download o complete --command youtube-dl --long-option dateafter --description 'download only videos uploaded on or after this date (i.e. inclusive)' complete --command youtube-dl --long-option min-views --description 'Do not download any videos with less than COUNT views' complete --command youtube-dl --long-option max-views --description 'Do not download any videos with more than COUNT views' -complete --command youtube-dl --long-option no-playlist --description 'download only the currently playing video' +complete --command youtube-dl --long-option no-playlist --description 'If the URL refers to a video and a playlist, download only the video.' complete --command youtube-dl --long-option age-limit --description 'download only videos suitable for the given age' complete --command youtube-dl --long-option download-archive --description 'Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.' --require-parameter complete --command youtube-dl --long-option include-ads --description 'Download advertisements as well (experimental)' diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py index fde026f..21c7c29 100755 --- a/youtube_dl/YoutubeDL.py +++ b/youtube_dl/YoutubeDL.py @@ -60,6 +60,7 @@ from .utils import ( write_string, YoutubeDLHandler, prepend_extension, + args_to_str, ) from .cache import Cache from .extractor import get_info_extractor, gen_extractors @@ -253,6 +254,22 @@ class YoutubeDL(object): self.print_debug_header() self.add_default_info_extractors() + def warn_if_short_id(self, argv): + # short YouTube ID starting with dash? + idxs = [ + i for i, a in enumerate(argv) + if re.match(r'^-[0-9A-Za-z_-]{10}$', a)] + if idxs: + correct_argv = ( + ['youtube-dl'] + + [a for i, a in enumerate(argv) if i not in idxs] + + ['--'] + [argv[i] for i in idxs] + ) + self.report_warning( + 'Long argument string detected. ' + 'Use -- to separate parameters and URLs, like this:\n%s\n' % + args_to_str(correct_argv)) + def add_info_extractor(self, ie): """Add an InfoExtractor object to the end of the list.""" self._ies.append(ie) @@ -297,7 +314,7 @@ class YoutubeDL(object): self._output_process.stdin.write((message + '\n').encode('utf-8')) self._output_process.stdin.flush() res = ''.join(self._output_channel.readline().decode('utf-8') - for _ in range(line_count)) + for _ in range(line_count)) return res[:-len('\n')] def to_screen(self, message, skip_eol=False): @@ -534,7 +551,7 @@ class YoutubeDL(object): try: ie_result = ie.extract(url) - if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here) + if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here) break if isinstance(ie_result, list): # Backwards compatibility: old IE result format @@ -547,7 +564,7 @@ class YoutubeDL(object): return self.process_ie_result(ie_result, download, extra_info) else: return ie_result - except ExtractorError as de: # An error we somewhat expected + except ExtractorError as de: # An error we somewhat expected self.report_error(compat_str(de), de.format_traceback()) break except MaxDownloadsReached: @@ -682,14 +699,17 @@ class YoutubeDL(object): self.report_warning( 'Extractor %s returned a compat_list result. ' 'It needs to be updated.' % ie_result.get('extractor')) + def _fixup(r): - self.add_extra_info(r, + self.add_extra_info( + r, { 'extractor': ie_result['extractor'], 'webpage_url': ie_result['webpage_url'], 'webpage_url_basename': url_basename(ie_result['webpage_url']), 'extractor_key': ie_result['extractor_key'], - }) + } + ) return r ie_result['entries'] = [ self.process_ie_result(_fixup(r), download, extra_info) @@ -839,14 +859,14 @@ class YoutubeDL(object): # Two formats have been requested like '137+139' format_1, format_2 = rf.split('+') formats_info = (self.select_format(format_1, formats), - self.select_format(format_2, formats)) + self.select_format(format_2, formats)) if all(formats_info): # The first format must contain the video and the # second the audio if formats_info[0].get('vcodec') == 'none': self.report_error('The first format must ' - 'contain the video, try using ' - '"-f %s+%s"' % (format_2, format_1)) + 'contain the video, try using ' + '"-f %s+%s"' % (format_2, format_1)) return selected_format = { 'requested_formats': formats_info, @@ -992,7 +1012,7 @@ class YoutubeDL(object): else: self.to_screen('[info] Writing video subtitles to: ' + sub_filename) with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile: - subfile.write(sub) + subfile.write(sub) except (OSError, IOError): self.report_error('Cannot write subtitles file ' + sub_filename) return @@ -1024,10 +1044,10 @@ class YoutubeDL(object): with open(thumb_filename, 'wb') as thumbf: shutil.copyfileobj(uf, thumbf) self.to_screen('[%s] %s: Writing thumbnail to: %s' % - (info_dict['extractor'], info_dict['id'], thumb_filename)) + (info_dict['extractor'], info_dict['id'], thumb_filename)) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_warning('Unable to download thumbnail "%s": %s' % - (info_dict['thumbnail'], compat_str(err))) + (info_dict['thumbnail'], compat_str(err))) if not self.params.get('skip_download', False): if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)): @@ -1048,8 +1068,8 @@ class YoutubeDL(object): if not merger._executable: postprocessors = [] self.report_warning('You have requested multiple ' - 'formats but ffmpeg or avconv are not installed.' - ' The formats won\'t be merged') + 'formats but ffmpeg or avconv are not installed.' + ' The formats won\'t be merged') else: postprocessors = [merger] for f in info_dict['requested_formats']: @@ -1093,7 +1113,7 @@ class YoutubeDL(object): for url in url_list: try: - #It also downloads the videos + # It also downloads the videos res = self.extract_info(url) except UnavailableVideoError: self.report_error('unable to download video') diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py index c1323b4..77b3384 100644 --- a/youtube_dl/__init__.py +++ b/youtube_dl/__init__.py @@ -76,10 +76,10 @@ def _real_main(argv=None): if opts.headers is not None: for h in opts.headers: if h.find(':', 1) < 0: - parser.error('wrong header formatting, it should be key:value, not "%s"'%h) + parser.error('wrong header formatting, it should be key:value, not "%s"' % h) key, value = h.split(':', 2) if opts.verbose: - write_string('[debug] Adding header from command line option %s:%s\n'%(key, value)) + write_string('[debug] Adding header from command line option %s:%s\n' % (key, value)) std_headers[key] = value # Dump user agent @@ -128,7 +128,6 @@ def _real_main(argv=None): compat_print(desc) sys.exit(0) - # Conflicting, missing and erroneous options if opts.usenetrc and (opts.username is not None or opts.password is not None): parser.error('using .netrc conflicts with giving username/password') @@ -190,21 +189,21 @@ def _real_main(argv=None): # --all-sub automatically sets --write-sub if --write-auto-sub is not given # this was the old behaviour if only --all-sub was given. - if opts.allsubtitles and (opts.writeautomaticsub == False): + if opts.allsubtitles and not opts.writeautomaticsub: opts.writesubtitles = True if sys.version_info < (3,): # In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems) if opts.outtmpl is not None: opts.outtmpl = opts.outtmpl.decode(preferredencoding()) - outtmpl =((opts.outtmpl is not None and opts.outtmpl) - or (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s') - or (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s') - or (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s') - or (opts.usetitle and '%(title)s-%(id)s.%(ext)s') - or (opts.useid and '%(id)s.%(ext)s') - or (opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s') - or DEFAULT_OUTTMPL) + outtmpl = ((opts.outtmpl is not None and opts.outtmpl) + or (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s') + or (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s') + or (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s') + or (opts.usetitle and '%(title)s-%(id)s.%(ext)s') + or (opts.useid and '%(id)s.%(ext)s') + or (opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s') + or DEFAULT_OUTTMPL) if not os.path.splitext(outtmpl)[1] and opts.extractaudio: parser.error('Cannot download a video and extract audio into the same' ' file! Use "{0}.%(ext)s" instead of "{0}" as the output' @@ -317,7 +316,6 @@ def _real_main(argv=None): ydl.add_post_processor(FFmpegAudioFixPP()) ydl.add_post_processor(AtomicParsleyPP()) - # Please keep ExecAfterDownload towards the bottom as it allows the user to modify the final file in any way. # So if the user is able to remove the file before your postprocessor runs it might cause a few problems. if opts.exec_cmd: @@ -334,11 +332,12 @@ def _real_main(argv=None): # Maybe do nothing if (len(all_urls) < 1) and (opts.load_info_filename is None): - if not (opts.update_self or opts.rm_cachedir): - parser.error('you must provide at least one URL') - else: + if opts.update_self or opts.rm_cachedir: sys.exit() + ydl.warn_if_short_id(sys.argv[1:] if argv is None else argv) + parser.error('you must provide at least one URL') + try: if opts.load_info_filename is not None: retcode = ydl.download_with_info_file(opts.load_info_filename) diff --git a/youtube_dl/__main__.py b/youtube_dl/__main__.py index 3fe29c9..65a0f89 100755 --- a/youtube_dl/__main__.py +++ b/youtube_dl/__main__.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import unicode_literals # Execute with # $ python youtube_dl/__main__.py (2.6+) diff --git a/youtube_dl/aes.py b/youtube_dl/aes.py index e9c5e21..5efd0f8 100644 --- a/youtube_dl/aes.py +++ b/youtube_dl/aes.py @@ -1,3 +1,5 @@ +from __future__ import unicode_literals + __all__ = ['aes_encrypt', 'key_expansion', 'aes_ctr_decrypt', 'aes_cbc_decrypt', 'aes_decrypt_text'] import base64 @@ -7,10 +9,11 @@ from .utils import bytes_to_intlist, intlist_to_bytes BLOCK_SIZE_BYTES = 16 + def aes_ctr_decrypt(data, key, counter): """ Decrypt with aes in counter mode - + @param {int[]} data cipher @param {int[]} key 16/24/32-Byte cipher key @param {instance} counter Instance whose next_value function (@returns {int[]} 16-Byte block) @@ -19,23 +22,24 @@ def aes_ctr_decrypt(data, key, counter): """ expanded_key = key_expansion(key) block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES)) - - decrypted_data=[] + + decrypted_data = [] for i in range(block_count): counter_block = counter.next_value() - block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES] - block += [0]*(BLOCK_SIZE_BYTES - len(block)) - + block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] + block += [0] * (BLOCK_SIZE_BYTES - len(block)) + cipher_counter_block = aes_encrypt(counter_block, expanded_key) decrypted_data += xor(block, cipher_counter_block) decrypted_data = decrypted_data[:len(data)] - + return decrypted_data + def aes_cbc_decrypt(data, key, iv): """ Decrypt with aes in CBC mode - + @param {int[]} data cipher @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv 16-Byte IV @@ -43,94 +47,98 @@ def aes_cbc_decrypt(data, key, iv): """ expanded_key = key_expansion(key) block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES)) - - decrypted_data=[] + + decrypted_data = [] previous_cipher_block = iv for i in range(block_count): - block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES] - block += [0]*(BLOCK_SIZE_BYTES - len(block)) - + block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] + block += [0] * (BLOCK_SIZE_BYTES - len(block)) + decrypted_block = aes_decrypt(block, expanded_key) decrypted_data += xor(decrypted_block, previous_cipher_block) previous_cipher_block = block decrypted_data = decrypted_data[:len(data)] - + return decrypted_data + def key_expansion(data): """ Generate key schedule - + @param {int[]} data 16/24/32-Byte cipher key - @returns {int[]} 176/208/240-Byte expanded key + @returns {int[]} 176/208/240-Byte expanded key """ - data = data[:] # copy + data = data[:] # copy rcon_iteration = 1 key_size_bytes = len(data) expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES - + while len(data) < expanded_key_size_bytes: temp = data[-4:] temp = key_schedule_core(temp, rcon_iteration) rcon_iteration += 1 - data += xor(temp, data[-key_size_bytes : 4-key_size_bytes]) - + data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) + for _ in range(3): temp = data[-4:] - data += xor(temp, data[-key_size_bytes : 4-key_size_bytes]) - + data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) + if key_size_bytes == 32: temp = data[-4:] temp = sub_bytes(temp) - data += xor(temp, data[-key_size_bytes : 4-key_size_bytes]) - - for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0): + data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) + + for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0): temp = data[-4:] - data += xor(temp, data[-key_size_bytes : 4-key_size_bytes]) + data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) data = data[:expanded_key_size_bytes] - + return data + def aes_encrypt(data, expanded_key): """ Encrypt one block with aes - + @param {int[]} data 16-Byte state - @param {int[]} expanded_key 176/208/240-Byte expanded key + @param {int[]} expanded_key 176/208/240-Byte expanded key @returns {int[]} 16-Byte cipher """ rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1 data = xor(data, expanded_key[:BLOCK_SIZE_BYTES]) - for i in range(1, rounds+1): + for i in range(1, rounds + 1): data = sub_bytes(data) data = shift_rows(data) if i != rounds: data = mix_columns(data) - data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]) + data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]) return data + def aes_decrypt(data, expanded_key): """ Decrypt one block with aes - + @param {int[]} data 16-Byte cipher @param {int[]} expanded_key 176/208/240-Byte expanded key @returns {int[]} 16-Byte state """ rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1 - + for i in range(rounds, 0, -1): - data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]) + data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]) if i != rounds: data = mix_columns_inv(data) data = shift_rows_inv(data) data = sub_bytes_inv(data) data = xor(data, expanded_key[:BLOCK_SIZE_BYTES]) - + return data + def aes_decrypt_text(data, password, key_size_bytes): """ Decrypt text @@ -138,33 +146,34 @@ def aes_decrypt_text(data, password, key_size_bytes): - The cipher key is retrieved by encrypting the first 16 Byte of 'password' with the first 'key_size_bytes' Bytes from 'password' (if necessary filled with 0's) - Mode of operation is 'counter' - + @param {str} data Base64 encoded string @param {str,unicode} password Password (will be encoded with utf-8) @param {int} key_size_bytes Possible values: 16 for 128-Bit, 24 for 192-Bit or 32 for 256-Bit @returns {str} Decrypted data """ NONCE_LENGTH_BYTES = 8 - + data = bytes_to_intlist(base64.b64decode(data)) password = bytes_to_intlist(password.encode('utf-8')) - - key = password[:key_size_bytes] + [0]*(key_size_bytes - len(password)) + + key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password)) key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES) - + nonce = data[:NONCE_LENGTH_BYTES] cipher = data[NONCE_LENGTH_BYTES:] - + class Counter: - __value = nonce + [0]*(BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES) + __value = nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES) + def next_value(self): temp = self.__value self.__value = inc(self.__value) return temp - + decrypted_data = aes_ctr_decrypt(cipher, key, Counter()) plaintext = intlist_to_bytes(decrypted_data) - + return plaintext RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36) @@ -200,14 +209,14 @@ SBOX_INV = (0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d) -MIX_COLUMN_MATRIX = ((0x2,0x3,0x1,0x1), - (0x1,0x2,0x3,0x1), - (0x1,0x1,0x2,0x3), - (0x3,0x1,0x1,0x2)) -MIX_COLUMN_MATRIX_INV = ((0xE,0xB,0xD,0x9), - (0x9,0xE,0xB,0xD), - (0xD,0x9,0xE,0xB), - (0xB,0xD,0x9,0xE)) +MIX_COLUMN_MATRIX = ((0x2, 0x3, 0x1, 0x1), + (0x1, 0x2, 0x3, 0x1), + (0x1, 0x1, 0x2, 0x3), + (0x3, 0x1, 0x1, 0x2)) +MIX_COLUMN_MATRIX_INV = ((0xE, 0xB, 0xD, 0x9), + (0x9, 0xE, 0xB, 0xD), + (0xD, 0x9, 0xE, 0xB), + (0xB, 0xD, 0x9, 0xE)) RIJNDAEL_EXP_TABLE = (0x01, 0x03, 0x05, 0x0F, 0x11, 0x33, 0x55, 0xFF, 0x1A, 0x2E, 0x72, 0x96, 0xA1, 0xF8, 0x13, 0x35, 0x5F, 0xE1, 0x38, 0x48, 0xD8, 0x73, 0x95, 0xA4, 0xF7, 0x02, 0x06, 0x0A, 0x1E, 0x22, 0x66, 0xAA, 0xE5, 0x34, 0x5C, 0xE4, 0x37, 0x59, 0xEB, 0x26, 0x6A, 0xBE, 0xD9, 0x70, 0x90, 0xAB, 0xE6, 0x31, @@ -241,30 +250,37 @@ RIJNDAEL_LOG_TABLE = (0x00, 0x00, 0x19, 0x01, 0x32, 0x02, 0x1a, 0xc6, 0x4b, 0xc7 0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5, 0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07) + def sub_bytes(data): return [SBOX[x] for x in data] + def sub_bytes_inv(data): return [SBOX_INV[x] for x in data] + def rotate(data): return data[1:] + [data[0]] + def key_schedule_core(data, rcon_iteration): data = rotate(data) data = sub_bytes(data) data[0] = data[0] ^ RCON[rcon_iteration] - + return data + def xor(data1, data2): - return [x^y for x, y in zip(data1, data2)] + return [x ^ y for x, y in zip(data1, data2)] + def rijndael_mul(a, b): - if(a==0 or b==0): + if(a == 0 or b == 0): return 0 return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF] + def mix_column(data, matrix): data_mixed = [] for row in range(4): @@ -275,33 +291,38 @@ def mix_column(data, matrix): data_mixed.append(mixed) return data_mixed + def mix_columns(data, matrix=MIX_COLUMN_MATRIX): data_mixed = [] for i in range(4): - column = data[i*4 : (i+1)*4] + column = data[i * 4: (i + 1) * 4] data_mixed += mix_column(column, matrix) return data_mixed + def mix_columns_inv(data): return mix_columns(data, MIX_COLUMN_MATRIX_INV) + def shift_rows(data): data_shifted = [] for column in range(4): for row in range(4): - data_shifted.append( data[((column + row) & 0b11) * 4 + row] ) + data_shifted.append(data[((column + row) & 0b11) * 4 + row]) return data_shifted + def shift_rows_inv(data): data_shifted = [] for column in range(4): for row in range(4): - data_shifted.append( data[((column - row) & 0b11) * 4 + row] ) + data_shifted.append(data[((column - row) & 0b11) * 4 + row]) return data_shifted + def inc(data): - data = data[:] # copy - for i in range(len(data)-1,-1,-1): + data = data[:] # copy + for i in range(len(data) - 1, -1, -1): if data[i] == 255: data[i] = 0 else: diff --git a/youtube_dl/compat.py b/youtube_dl/compat.py index 9d33a8e..2759668 100644 --- a/youtube_dl/compat.py +++ b/youtube_dl/compat.py @@ -3,53 +3,54 @@ from __future__ import unicode_literals import getpass import optparse import os +import re import subprocess import sys try: import urllib.request as compat_urllib_request -except ImportError: # Python 2 +except ImportError: # Python 2 import urllib2 as compat_urllib_request try: import urllib.error as compat_urllib_error -except ImportError: # Python 2 +except ImportError: # Python 2 import urllib2 as compat_urllib_error try: import urllib.parse as compat_urllib_parse -except ImportError: # Python 2 +except ImportError: # Python 2 import urllib as compat_urllib_parse try: from urllib.parse import urlparse as compat_urllib_parse_urlparse -except ImportError: # Python 2 +except ImportError: # Python 2 from urlparse import urlparse as compat_urllib_parse_urlparse try: import urllib.parse as compat_urlparse -except ImportError: # Python 2 +except ImportError: # Python 2 import urlparse as compat_urlparse try: import http.cookiejar as compat_cookiejar -except ImportError: # Python 2 +except ImportError: # Python 2 import cookielib as compat_cookiejar try: import html.entities as compat_html_entities -except ImportError: # Python 2 +except ImportError: # Python 2 import htmlentitydefs as compat_html_entities try: import html.parser as compat_html_parser -except ImportError: # Python 2 +except ImportError: # Python 2 import HTMLParser as compat_html_parser try: import http.client as compat_http_client -except ImportError: # Python 2 +except ImportError: # Python 2 import httplib as compat_http_client try: @@ -110,12 +111,12 @@ except ImportError: try: from urllib.parse import parse_qs as compat_parse_qs -except ImportError: # Python 2 +except ImportError: # Python 2 # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib. # Python 2's version is apparently totally broken def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False, - encoding='utf-8', errors='replace'): + encoding='utf-8', errors='replace'): qs, _coerce_result = qs, unicode pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] r = [] @@ -144,10 +145,10 @@ except ImportError: # Python 2 return r def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False, - encoding='utf-8', errors='replace'): + encoding='utf-8', errors='replace'): parsed_result = {} pairs = _parse_qsl(qs, keep_blank_values, strict_parsing, - encoding=encoding, errors=errors) + encoding=encoding, errors=errors) for name, value in pairs: if name in parsed_result: parsed_result[name].append(value) @@ -156,12 +157,12 @@ except ImportError: # Python 2 return parsed_result try: - compat_str = unicode # Python 2 + compat_str = unicode # Python 2 except NameError: compat_str = str try: - compat_chr = unichr # Python 2 + compat_chr = unichr # Python 2 except NameError: compat_chr = chr @@ -174,12 +175,17 @@ try: from shlex import quote as shlex_quote except ImportError: # Python < 3.3 def shlex_quote(s): - return "'" + s.replace("'", "'\"'\"'") + "'" + if re.match(r'^[-_\w./]+$', s): + return s + else: + return "'" + s.replace("'", "'\"'\"'") + "'" def compat_ord(c): - if type(c) is int: return c - else: return ord(c) + if type(c) is int: + return c + else: + return ord(c) if sys.version_info >= (3, 0): @@ -250,7 +256,7 @@ else: drive = '' userhome = os.path.join(drive, compat_getenv('HOMEPATH')) - if i != 1: #~user + if i != 1: # ~user userhome = os.path.join(os.path.dirname(userhome), path[1:i]) return userhome + path[i:] @@ -264,7 +270,7 @@ if sys.version_info < (3, 0): print(s.encode(preferredencoding(), 'xmlcharrefreplace')) else: def compat_print(s): - assert type(s) == type(u'') + assert isinstance(s, compat_str) print(s) diff --git a/youtube_dl/downloader/__init__.py b/youtube_dl/downloader/__init__.py index 3f94159..31e28df 100644 --- a/youtube_dl/downloader/__init__.py +++ b/youtube_dl/downloader/__init__.py @@ -30,3 +30,8 @@ def get_suitable_downloader(info_dict): return F4mFD else: return HttpFD + +__all__ = [ + 'get_suitable_downloader', + 'FileDownloader', +] diff --git a/youtube_dl/downloader/common.py b/youtube_dl/downloader/common.py index 7c33004..c0af50c 100644 --- a/youtube_dl/downloader/common.py +++ b/youtube_dl/downloader/common.py @@ -81,7 +81,7 @@ class FileDownloader(object): if total is None: return None dif = now - start - if current == 0 or dif < 0.001: # One millisecond + if current == 0 or dif < 0.001: # One millisecond return None rate = float(current) / dif return int((float(total) - float(current)) / rate) @@ -95,7 +95,7 @@ class FileDownloader(object): @staticmethod def calc_speed(start, now, bytes): dif = now - start - if bytes == 0 or dif < 0.001: # One millisecond + if bytes == 0 or dif < 0.001: # One millisecond return None return float(bytes) / dif @@ -108,7 +108,7 @@ class FileDownloader(object): @staticmethod def best_block_size(elapsed_time, bytes): new_min = max(bytes / 2.0, 1.0) - new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB + new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB if elapsed_time < 0.001: return int(new_max) rate = bytes / elapsed_time diff --git a/youtube_dl/downloader/f4m.py b/youtube_dl/downloader/f4m.py index b607f64..7cd22c5 100644 --- a/youtube_dl/downloader/f4m.py +++ b/youtube_dl/downloader/f4m.py @@ -55,7 +55,7 @@ class FlvReader(io.BytesIO): if size == 1: real_size = self.read_unsigned_long_long() header_end = 16 - return real_size, box_type, self.read(real_size-header_end) + return real_size, box_type, self.read(real_size - header_end) def read_asrt(self): # version @@ -180,7 +180,7 @@ def build_fragments_list(boot_info): n_frags = segment_run_entry[1] fragment_run_entry_table = boot_info['fragments'][0]['fragments'] first_frag_number = fragment_run_entry_table[0]['first'] - for (i, frag_number) in zip(range(1, n_frags+1), itertools.count(first_frag_number)): + for (i, frag_number) in zip(range(1, n_frags + 1), itertools.count(first_frag_number)): res.append((1, frag_number)) return res @@ -225,13 +225,15 @@ class F4mFD(FileDownloader): self.to_screen('[download] Downloading f4m manifest') manifest = self.ydl.urlopen(man_url).read() self.report_destination(filename) - http_dl = HttpQuietDownloader(self.ydl, + http_dl = HttpQuietDownloader( + self.ydl, { 'continuedl': True, 'quiet': True, 'noprogress': True, 'test': self.params.get('test', False), - }) + } + ) doc = etree.fromstring(manifest) formats = [(int(f.attrib.get('bitrate', -1)), f) for f in doc.findall(_add_ns('media'))] @@ -277,7 +279,7 @@ class F4mFD(FileDownloader): def frag_progress_hook(status): frag_total_bytes = status.get('total_bytes', 0) estimated_size = (state['downloaded_bytes'] + - (total_frags - state['frag_counter']) * frag_total_bytes) + (total_frags - state['frag_counter']) * frag_total_bytes) if status['status'] == 'finished': state['downloaded_bytes'] += frag_total_bytes state['frag_counter'] += 1 @@ -287,13 +289,13 @@ class F4mFD(FileDownloader): frag_downloaded_bytes = status['downloaded_bytes'] byte_counter = state['downloaded_bytes'] + frag_downloaded_bytes frag_progress = self.calc_percent(frag_downloaded_bytes, - frag_total_bytes) + frag_total_bytes) progress = self.calc_percent(state['frag_counter'], total_frags) progress += frag_progress / float(total_frags) eta = self.calc_eta(start, time.time(), estimated_size, byte_counter) self.report_progress(progress, format_bytes(estimated_size), - status.get('speed'), eta) + status.get('speed'), eta) http_dl.add_progress_hook(frag_progress_hook) frags_filenames = [] diff --git a/youtube_dl/downloader/hls.py b/youtube_dl/downloader/hls.py index 68eafa4..954beff 100644 --- a/youtube_dl/downloader/hls.py +++ b/youtube_dl/downloader/hls.py @@ -28,14 +28,14 @@ class HlsFD(FileDownloader): if check_executable(program, ['-version']): break else: - self.report_error(u'm3u8 download detected but ffmpeg or avconv could not be found. Please install one.') + self.report_error('m3u8 download detected but ffmpeg or avconv could not be found. Please install one.') return False cmd = [program] + args retval = subprocess.call(cmd) if retval == 0: fsize = os.path.getsize(encodeFilename(tmpfilename)) - self.to_screen(u'\r[%s] %s bytes' % (cmd[0], fsize)) + self.to_screen('\r[%s] %s bytes' % (cmd[0], fsize)) self.try_rename(tmpfilename, filename) self._hook_progress({ 'downloaded_bytes': fsize, @@ -45,8 +45,8 @@ class HlsFD(FileDownloader): }) return True else: - self.to_stderr(u"\n") - self.report_error(u'%s exited with code %d' % (program, retval)) + self.to_stderr('\n') + self.report_error('%s exited with code %d' % (program, retval)) return False @@ -101,4 +101,3 @@ class NativeHlsFD(FileDownloader): }) self.try_rename(tmpfilename, filename) return True - diff --git a/youtube_dl/downloader/http.py b/youtube_dl/downloader/http.py index f62555c..8491cee 100644 --- a/youtube_dl/downloader/http.py +++ b/youtube_dl/downloader/http.py @@ -1,3 +1,5 @@ +from __future__ import unicode_literals + import os import time @@ -106,7 +108,7 @@ class HttpFD(FileDownloader): self.report_retry(count, retries) if count > retries: - self.report_error(u'giving up after %s retries' % retries) + self.report_error('giving up after %s retries' % retries) return False data_len = data.info().get('Content-length', None) @@ -124,10 +126,10 @@ class HttpFD(FileDownloader): min_data_len = self.params.get("min_filesize", None) max_data_len = self.params.get("max_filesize", None) if min_data_len is not None and data_len < min_data_len: - self.to_screen(u'\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len)) + self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len)) return False if max_data_len is not None and data_len > max_data_len: - self.to_screen(u'\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len)) + self.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len)) return False data_len_str = format_bytes(data_len) @@ -151,13 +153,13 @@ class HttpFD(FileDownloader): filename = self.undo_temp_name(tmpfilename) self.report_destination(filename) except (OSError, IOError) as err: - self.report_error(u'unable to open for writing: %s' % str(err)) + self.report_error('unable to open for writing: %s' % str(err)) return False try: stream.write(data_block) except (IOError, OSError) as err: - self.to_stderr(u"\n") - self.report_error(u'unable to write data: %s' % str(err)) + self.to_stderr('\n') + self.report_error('unable to write data: %s' % str(err)) return False if not self.params.get('noresizebuffer', False): block_size = self.best_block_size(after - before, len(data_block)) @@ -188,10 +190,10 @@ class HttpFD(FileDownloader): self.slow_down(start, byte_counter - resume_len) if stream is None: - self.to_stderr(u"\n") - self.report_error(u'Did not get any data blocks') + self.to_stderr('\n') + self.report_error('Did not get any data blocks') return False - if tmpfilename != u'-': + if tmpfilename != '-': stream.close() self.report_finish(data_len_str, (time.time() - start)) if data_len is not None and byte_counter != data_len: diff --git a/youtube_dl/downloader/mplayer.py b/youtube_dl/downloader/mplayer.py index 4de7f15..c53195d 100644 --- a/youtube_dl/downloader/mplayer.py +++ b/youtube_dl/downloader/mplayer.py @@ -1,7 +1,10 @@ +from __future__ import unicode_literals + import os import subprocess from .common import FileDownloader +from ..compat import compat_subprocess_get_DEVNULL from ..utils import ( encodeFilename, ) @@ -13,19 +16,23 @@ class MplayerFD(FileDownloader): self.report_destination(filename) tmpfilename = self.temp_name(filename) - args = ['mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy', '-dumpstream', '-dumpfile', tmpfilename, url] + args = [ + 'mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy', + '-dumpstream', '-dumpfile', tmpfilename, url] # Check for mplayer first try: - subprocess.call(['mplayer', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT) + subprocess.call( + ['mplayer', '-h'], + stdout=compat_subprocess_get_DEVNULL(), stderr=subprocess.STDOUT) except (OSError, IOError): - self.report_error(u'MMS or RTSP download detected but "%s" could not be run' % args[0]) + self.report_error('MMS or RTSP download detected but "%s" could not be run' % args[0]) return False # Download using mplayer. retval = subprocess.call(args) if retval == 0: fsize = os.path.getsize(encodeFilename(tmpfilename)) - self.to_screen(u'\r[%s] %s bytes' % (args[0], fsize)) + self.to_screen('\r[%s] %s bytes' % (args[0], fsize)) self.try_rename(tmpfilename, filename) self._hook_progress({ 'downloaded_bytes': fsize, @@ -35,6 +42,6 @@ class MplayerFD(FileDownloader): }) return True else: - self.to_stderr(u"\n") - self.report_error(u'mplayer exited with code %d' % retval) + self.to_stderr('\n') + self.report_error('mplayer exited with code %d' % retval) return False diff --git a/youtube_dl/downloader/rtmp.py b/youtube_dl/downloader/rtmp.py index 17d9631..58ae200 100644 --- a/youtube_dl/downloader/rtmp.py +++ b/youtube_dl/downloader/rtmp.py @@ -46,13 +46,13 @@ class RtmpFD(FileDownloader): continue mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line) if mobj: - downloaded_data_len = int(float(mobj.group(1))*1024) + downloaded_data_len = int(float(mobj.group(1)) * 1024) percent = float(mobj.group(2)) if not resume_percent: resume_percent = percent resume_downloaded_data_len = downloaded_data_len - eta = self.calc_eta(start, time.time(), 100-resume_percent, percent-resume_percent) - speed = self.calc_speed(start, time.time(), downloaded_data_len-resume_downloaded_data_len) + eta = self.calc_eta(start, time.time(), 100 - resume_percent, percent - resume_percent) + speed = self.calc_speed(start, time.time(), downloaded_data_len - resume_downloaded_data_len) data_len = None if percent > 0: data_len = int(downloaded_data_len * 100 / percent) @@ -72,7 +72,7 @@ class RtmpFD(FileDownloader): # no percent for live streams mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line) if mobj: - downloaded_data_len = int(float(mobj.group(1))*1024) + downloaded_data_len = int(float(mobj.group(1)) * 1024) time_now = time.time() speed = self.calc_speed(start, time_now, downloaded_data_len) self.report_progress_live_stream(downloaded_data_len, speed, time_now - start) @@ -88,7 +88,7 @@ class RtmpFD(FileDownloader): if not cursor_in_new_line: self.to_screen('') cursor_in_new_line = True - self.to_screen('[rtmpdump] '+line) + self.to_screen('[rtmpdump] ' + line) proc.wait() if not cursor_in_new_line: self.to_screen('') @@ -180,7 +180,7 @@ class RtmpFD(FileDownloader): while (retval == RD_INCOMPLETE or retval == RD_FAILED) and not test and not live: prevsize = os.path.getsize(encodeFilename(tmpfilename)) self.to_screen('[rtmpdump] %s bytes' % prevsize) - time.sleep(5.0) # This seems to be needed + time.sleep(5.0) # This seems to be needed retval = run_rtmpdump(basic_args + ['-e'] + [[], ['-k', '1']][retval == RD_FAILED]) cursize = os.path.getsize(encodeFilename(tmpfilename)) if prevsize == cursize and retval == RD_FAILED: diff --git a/youtube_dl/extractor/__init__.py b/youtube_dl/extractor/__init__.py index 7497a97..0339d13 100644 --- a/youtube_dl/extractor/__init__.py +++ b/youtube_dl/extractor/__init__.py @@ -1,3 +1,5 @@ +from __future__ import unicode_literals + from .abc import ABCIE from .academicearth import AcademicEarthCourseIE from .addanime import AddAnimeIE @@ -32,9 +34,11 @@ from .bilibili import BiliBiliIE from .blinkx import BlinkxIE from .bliptv import BlipTVIE, BlipTVUserIE from .bloomberg import BloombergIE +from .bpb import BpbIE from .br import BRIE from .breakcom import BreakIE from .brightcove import BrightcoveIE +from .buzzfeed import BuzzFeedIE from .byutv import BYUtvIE from .c56 import C56IE from .canal13cl import Canal13clIE @@ -238,7 +242,7 @@ from .muenchentv import MuenchenTVIE from .musicplayon import MusicPlayOnIE from .musicvault import MusicVaultIE from .muzu import MuzuTVIE -from .myspace import MySpaceIE +from .myspace import MySpaceIE, MySpaceAlbumIE from .myspass import MySpassIE from .myvideo import MyVideoIE from .naver import NaverIE @@ -372,6 +376,7 @@ from .syfy import SyfyIE from .sztvhu import SztvHuIE from .tagesschau import TagesschauIE from .tapely import TapelyIE +from .tass import TassIE from .teachertube import ( TeacherTubeIE, TeacherTubeUserIE, @@ -392,6 +397,7 @@ from .thesixtyone import TheSixtyOneIE from .thisav import ThisAVIE from .tinypic import TinyPicIE from .tlc import TlcIE, TlcDeIE +from .tmz import TMZIE from .tnaflix import TNAFlixIE from .thvideo import ( THVideoIE, @@ -405,6 +411,7 @@ from .trutube import TruTubeIE from .tube8 import Tube8IE from .tudou import TudouIE from .tumblr import TumblrIE +from .tunein import TuneInIE from .turbo import TurboIE from .tutv import TutvIE from .tvigle import TvigleIE @@ -454,7 +461,10 @@ from .vine import ( VineUserIE, ) from .viki import VikiIE -from .vk import VKIE +from .vk import ( + VKIE, + VKUserVideosIE, +) from .vodlocker import VodlockerIE from .vporn import VpornIE from .vrt import VRTIE @@ -478,6 +488,7 @@ from .wrzuta import WrzutaIE from .xbef import XBefIE from .xboxclips import XboxClipsIE from .xhamster import XHamsterIE +from .xminus import XMinusIE from .xnxx import XNXXIE from .xvideos import XVideosIE from .xtube import XTubeUserIE, XTubeIE @@ -508,6 +519,10 @@ from .youtube import ( YoutubeWatchLaterIE, ) from .zdf import ZDFIE +from .zingmp3 import ( + ZingMp3SongIE, + ZingMp3AlbumIE, +) _ALL_CLASSES = [ klass @@ -526,4 +541,4 @@ def gen_extractors(): def get_info_extractor(ie_name): """Returns the info extractor class with the given ie_name""" - return globals()[ie_name+'IE'] + return globals()[ie_name + 'IE'] diff --git a/youtube_dl/extractor/academicearth.py b/youtube_dl/extractor/academicearth.py index c983ef0..47313fb 100644 --- a/youtube_dl/extractor/academicearth.py +++ b/youtube_dl/extractor/academicearth.py @@ -1,4 +1,5 @@ from __future__ import unicode_literals + import re from .common import InfoExtractor @@ -18,15 +19,14 @@ class AcademicEarthCourseIE(InfoExtractor): } def _real_extract(self, url): - m = re.match(self._VALID_URL, url) - playlist_id = m.group('id') + playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) title = self._html_search_regex( - r'

    ]*?>(.*?)

    ', webpage, u'title') + r'

    ]*?>(.*?)

    ', webpage, 'title') description = self._html_search_regex( r'

    ]*?>(.*?)

    ', - webpage, u'description', fatal=False) + webpage, 'description', fatal=False) urls = re.findall( r'
  • \s*?', webpage) diff --git a/youtube_dl/extractor/addanime.py b/youtube_dl/extractor/addanime.py index 11f149f..203936e 100644 --- a/youtube_dl/extractor/addanime.py +++ b/youtube_dl/extractor/addanime.py @@ -15,8 +15,7 @@ from ..utils import ( class AddAnimeIE(InfoExtractor): - - _VALID_URL = r'^http://(?:\w+\.)?add-anime\.net/watch_video\.php\?(?:.*?)v=(?P[\w_]+)(?:.*)' + _VALID_URL = r'^http://(?:\w+\.)?add-anime\.net/watch_video\.php\?(?:.*?)v=(?P[\w_]+)(?:.*)' _TEST = { 'url': 'http://www.add-anime.net/watch_video.php?v=24MR3YO5SAS9', 'md5': '72954ea10bc979ab5e2eb288b21425a0', @@ -29,9 +28,9 @@ class AddAnimeIE(InfoExtractor): } def _real_extract(self, url): + video_id = self._match_id(url) + try: - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('video_id') webpage = self._download_webpage(url, video_id) except ExtractorError as ee: if not isinstance(ee.cause, compat_HTTPError) or \ @@ -49,7 +48,7 @@ class AddAnimeIE(InfoExtractor): r'a\.value = ([0-9]+)[+]([0-9]+)[*]([0-9]+);', redir_webpage) if av is None: - raise ExtractorError(u'Cannot find redirect math task') + raise ExtractorError('Cannot find redirect math task') av_res = int(av.group(1)) + int(av.group(2)) * int(av.group(3)) parsed_url = compat_urllib_parse_urlparse(url) diff --git a/youtube_dl/extractor/adultswim.py b/youtube_dl/extractor/adultswim.py index b4b40f2..0d05cbb 100644 --- a/youtube_dl/extractor/adultswim.py +++ b/youtube_dl/extractor/adultswim.py @@ -5,6 +5,7 @@ import re from .common import InfoExtractor + class AdultSwimIE(InfoExtractor): _VALID_URL = r'https?://video\.adultswim\.com/(?P.+?)(?:\.html)?(?:\?.*)?(?:#.*)?$' _TEST = { diff --git a/youtube_dl/extractor/aparat.py b/youtube_dl/extractor/aparat.py index 7486088..1500633 100644 --- a/youtube_dl/extractor/aparat.py +++ b/youtube_dl/extractor/aparat.py @@ -1,5 +1,4 @@ -#coding: utf-8 - +# coding: utf-8 from __future__ import unicode_literals import re @@ -26,8 +25,7 @@ class AparatIE(InfoExtractor): } def _real_extract(self, url): - m = re.match(self._VALID_URL, url) - video_id = m.group('id') + video_id = self._match_id(url) # Note: There is an easier-to-parse configuration at # http://www.aparat.com/video/video/config/videohash/%video_id @@ -40,15 +38,15 @@ class AparatIE(InfoExtractor): for i, video_url in enumerate(video_urls): req = HEADRequest(video_url) res = self._request_webpage( - req, video_id, note=u'Testing video URL %d' % i, errnote=False) + req, video_id, note='Testing video URL %d' % i, errnote=False) if res: break else: - raise ExtractorError(u'No working video URLs found') + raise ExtractorError('No working video URLs found') - title = self._search_regex(r'\s+title:\s*"([^"]+)"', webpage, u'title') + title = self._search_regex(r'\s+title:\s*"([^"]+)"', webpage, 'title') thumbnail = self._search_regex( - r'\s+image:\s*"([^"]+)"', webpage, u'thumbnail', fatal=False) + r'\s+image:\s*"([^"]+)"', webpage, 'thumbnail', fatal=False) return { 'id': video_id, diff --git a/youtube_dl/extractor/appletrailers.py b/youtube_dl/extractor/appletrailers.py index 4359b88..0c01fa1 100644 --- a/youtube_dl/extractor/appletrailers.py +++ b/youtube_dl/extractor/appletrailers.py @@ -70,15 +70,17 @@ class AppleTrailersIE(InfoExtractor): uploader_id = mobj.group('company') playlist_url = compat_urlparse.urljoin(url, 'includes/playlists/itunes.inc') + def fix_html(s): s = re.sub(r'(?s).*?', '', s) s = re.sub(r'', r'', s) # The ' in the onClick attributes are not escaped, it couldn't be parsed # like: http://trailers.apple.com/trailers/wb/gravity/ + def _clean_json(m): return 'iTunes.playURL(%s);' % m.group(1).replace('\'', ''') s = re.sub(self._JSON_RE, _clean_json, s) - s = '' + s + u'' + s = '%s' % s return s doc = self._download_xml(playlist_url, movie, transform_source=fix_html) @@ -86,7 +88,7 @@ class AppleTrailersIE(InfoExtractor): for li in doc.findall('./div/ul/li'): on_click = li.find('.//a').attrib['onClick'] trailer_info_json = self._search_regex(self._JSON_RE, - on_click, 'trailer info') + on_click, 'trailer info') trailer_info = json.loads(trailer_info_json) title = trailer_info['title'] video_id = movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', title).lower() diff --git a/youtube_dl/extractor/ard.py b/youtube_dl/extractor/ard.py index 630b1fa..967bd86 100644 --- a/youtube_dl/extractor/ard.py +++ b/youtube_dl/extractor/ard.py @@ -192,4 +192,3 @@ class ARDIE(InfoExtractor): 'upload_date': upload_date, 'thumbnail': thumbnail, } - diff --git a/youtube_dl/extractor/arte.py b/youtube_dl/extractor/arte.py index 3a57ce5..219631b 100644 --- a/youtube_dl/extractor/arte.py +++ b/youtube_dl/extractor/arte.py @@ -13,7 +13,7 @@ from ..utils import ( qualities, ) -# There are different sources of video in arte.tv, the extraction process +# There are different sources of video in arte.tv, the extraction process # is different for each one. The videos usually expire in 7 days, so we can't # add tests. diff --git a/youtube_dl/extractor/audiomack.py b/youtube_dl/extractor/audiomack.py index 6232d2c..eeeec76 100644 --- a/youtube_dl/extractor/audiomack.py +++ b/youtube_dl/extractor/audiomack.py @@ -12,17 +12,17 @@ class AudiomackIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?audiomack\.com/song/(?P[\w/-]+)' IE_NAME = 'audiomack' _TESTS = [ - #hosted on audiomack + # hosted on audiomack { 'url': 'http://www.audiomack.com/song/roosh-williams/extraordinary', 'info_dict': { - 'id' : 'roosh-williams/extraordinary', + 'id': 'roosh-williams/extraordinary', 'ext': 'mp3', 'title': 'Roosh Williams - Extraordinary' } }, - #hosted on soundcloud via audiomack + # hosted on soundcloud via audiomack { 'url': 'http://www.audiomack.com/song/xclusiveszone/take-kare', 'file': '172419696.mp3', @@ -49,7 +49,7 @@ class AudiomackIE(InfoExtractor): raise ExtractorError("Unable to deduce api url of song") realurl = api_response["url"] - #Audiomack wraps a lot of soundcloud tracks in their branded wrapper + # Audiomack wraps a lot of soundcloud tracks in their branded wrapper # - if so, pass the work off to the soundcloud extractor if SoundcloudIE.suitable(realurl): return {'_type': 'url', 'url': realurl, 'ie_key': 'Soundcloud'} diff --git a/youtube_dl/extractor/bambuser.py b/youtube_dl/extractor/bambuser.py index de5d4fa..1ca0b7c 100644 --- a/youtube_dl/extractor/bambuser.py +++ b/youtube_dl/extractor/bambuser.py @@ -18,7 +18,7 @@ class BambuserIE(InfoExtractor): _TEST = { 'url': 'http://bambuser.com/v/4050584', # MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388 - #u'md5': 'fba8f7693e48fd4e8641b3fd5539a641', + # 'md5': 'fba8f7693e48fd4e8641b3fd5539a641', 'info_dict': { 'id': '4050584', 'ext': 'flv', @@ -38,7 +38,7 @@ class BambuserIE(InfoExtractor): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') info_url = ('http://player-c.api.bambuser.com/getVideo.json?' - '&api_key=%s&vid=%s' % (self._API_KEY, video_id)) + '&api_key=%s&vid=%s' % (self._API_KEY, video_id)) info_json = self._download_webpage(info_url, video_id) info = json.loads(info_json)['result'] @@ -73,10 +73,11 @@ class BambuserChannelIE(InfoExtractor): urls = [] last_id = '' for i in itertools.count(1): - req_url = ('http://bambuser.com/xhr-api/index.php?username={user}' + req_url = ( + 'http://bambuser.com/xhr-api/index.php?username={user}' '&sort=created&access_mode=0%2C1%2C2&limit={count}' '&method=broadcast&format=json&vid_older_than={last}' - ).format(user=user, count=self._STEP, last=last_id) + ).format(user=user, count=self._STEP, last=last_id) req = compat_urllib_request.Request(req_url) # Without setting this header, we wouldn't get any result req.add_header('Referer', 'http://bambuser.com/channel/%s' % user) diff --git a/youtube_dl/extractor/bandcamp.py b/youtube_dl/extractor/bandcamp.py index 1b8da43..acddbc8 100644 --- a/youtube_dl/extractor/bandcamp.py +++ b/youtube_dl/extractor/bandcamp.py @@ -83,12 +83,12 @@ class BandcampIE(InfoExtractor): initial_url = mp3_info['url'] re_url = r'(?Phttp://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P.*?)&id=(?P.*?)&ts=(?P.*)$' m_url = re.match(re_url, initial_url) - #We build the url we will use to get the final track url + # We build the url we will use to get the final track url # This url is build in Bandcamp in the script download_bunde_*.js request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), video_id, m_url.group('ts')) final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url') # If we could correctly generate the .rand field the url would be - #in the "download_url" key + # in the "download_url" key final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1) return { diff --git a/youtube_dl/extractor/bbccouk.py b/youtube_dl/extractor/bbccouk.py index 75e608f..beb6cfc 100644 --- a/youtube_dl/extractor/bbccouk.py +++ b/youtube_dl/extractor/bbccouk.py @@ -1,9 +1,11 @@ from __future__ import unicode_literals import re +import xml.etree.ElementTree from .subtitles import SubtitlesInfoExtractor from ..utils import ExtractorError +from ..compat import compat_HTTPError class BBCCoUkIE(SubtitlesInfoExtractor): @@ -55,7 +57,22 @@ class BBCCoUkIE(SubtitlesInfoExtractor): 'skip_download': True, }, 'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only', - } + }, + { + 'url': 'http://www.bbc.co.uk/iplayer/episode/p026c7jt/tomorrows-worlds-the-unearthly-history-of-science-fiction-2-invasion', + 'info_dict': { + 'id': 'b03k3pb7', + 'ext': 'flv', + 'title': "Tomorrow's Worlds: The Unearthly History of Science Fiction", + 'description': '2. Invasion', + 'duration': 3600, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + 'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only', + }, ] def _extract_asx_playlist(self, connection, programme_id): @@ -102,6 +119,10 @@ class BBCCoUkIE(SubtitlesInfoExtractor): return playlist.findall('./{http://bbc.co.uk/2008/emp/playlist}item') def _extract_medias(self, media_selection): + error = media_selection.find('./{http://bbc.co.uk/2008/mp/mediaselection}error') + if error is not None: + raise ExtractorError( + '%s returned error: %s' % (self.IE_NAME, error.get('id')), expected=True) return media_selection.findall('./{http://bbc.co.uk/2008/mp/mediaselection}media') def _extract_connections(self, media): @@ -158,54 +179,73 @@ class BBCCoUkIE(SubtitlesInfoExtractor): subtitles[lang] = srt return subtitles - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - group_id = mobj.group('id') - - webpage = self._download_webpage(url, group_id, 'Downloading video page') - if re.search(r'id="emp-error" class="notinuk">', webpage): - raise ExtractorError('Currently BBC iPlayer TV programmes are available to play in the UK only', - expected=True) - - playlist = self._download_xml('http://www.bbc.co.uk/iplayer/playlist/%s' % group_id, group_id, - 'Downloading playlist XML') - - no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems') - if no_items is not None: - reason = no_items.get('reason') - if reason == 'preAvailability': - msg = 'Episode %s is not yet available' % group_id - elif reason == 'postAvailability': - msg = 'Episode %s is no longer available' % group_id + def _download_media_selector(self, programme_id): + try: + media_selection = self._download_xml( + 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s' % programme_id, + programme_id, 'Downloading media selection XML') + except ExtractorError as ee: + if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403: + media_selection = xml.etree.ElementTree.fromstring(ee.cause.read().encode('utf-8')) else: - msg = 'Episode %s is not available: %s' % (group_id, reason) - raise ExtractorError(msg, expected=True) + raise formats = [] subtitles = None - for item in self._extract_items(playlist): - kind = item.get('kind') - if kind != 'programme' and kind != 'radioProgramme': - continue - title = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}title').text - description = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}summary').text + for media in self._extract_medias(media_selection): + kind = media.get('kind') + if kind == 'audio': + formats.extend(self._extract_audio(media, programme_id)) + elif kind == 'video': + formats.extend(self._extract_video(media, programme_id)) + elif kind == 'captions': + subtitles = self._extract_captions(media, programme_id) - programme_id = item.get('identifier') - duration = int(item.get('duration')) + return formats, subtitles - media_selection = self._download_xml( - 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s' % programme_id, - programme_id, 'Downloading media selection XML') + def _real_extract(self, url): + group_id = self._match_id(url) + + webpage = self._download_webpage(url, group_id, 'Downloading video page') - for media in self._extract_medias(media_selection): - kind = media.get('kind') - if kind == 'audio': - formats.extend(self._extract_audio(media, programme_id)) - elif kind == 'video': - formats.extend(self._extract_video(media, programme_id)) - elif kind == 'captions': - subtitles = self._extract_captions(media, programme_id) + programme_id = self._search_regex( + r'"vpid"\s*:\s*"([\da-z]{8})"', webpage, 'vpid', fatal=False) + if programme_id: + player = self._download_json( + 'http://www.bbc.co.uk/iplayer/episode/%s.json' % group_id, + group_id)['jsConf']['player'] + title = player['title'] + description = player['subtitle'] + duration = player['duration'] + formats, subtitles = self._download_media_selector(programme_id) + else: + playlist = self._download_xml( + 'http://www.bbc.co.uk/iplayer/playlist/%s' % group_id, + group_id, 'Downloading playlist XML') + + no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems') + if no_items is not None: + reason = no_items.get('reason') + if reason == 'preAvailability': + msg = 'Episode %s is not yet available' % group_id + elif reason == 'postAvailability': + msg = 'Episode %s is no longer available' % group_id + elif reason == 'noMedia': + msg = 'Episode %s is not currently available' % group_id + else: + msg = 'Episode %s is not available: %s' % (group_id, reason) + raise ExtractorError(msg, expected=True) + + for item in self._extract_items(playlist): + kind = item.get('kind') + if kind != 'programme' and kind != 'radioProgramme': + continue + title = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}title').text + description = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}summary').text + programme_id = item.get('identifier') + duration = int(item.get('duration')) + formats, subtitles = self._download_media_selector(programme_id) if self._downloader.params.get('listsubtitles', False): self._list_available_subtitles(programme_id, subtitles) @@ -220,4 +260,4 @@ class BBCCoUkIE(SubtitlesInfoExtractor): 'duration': duration, 'formats': formats, 'subtitles': subtitles, - } \ No newline at end of file + } diff --git a/youtube_dl/extractor/beeg.py b/youtube_dl/extractor/beeg.py index 314e37f..4e79fea 100644 --- a/youtube_dl/extractor/beeg.py +++ b/youtube_dl/extractor/beeg.py @@ -40,7 +40,7 @@ class BeegIE(InfoExtractor): title = self._html_search_regex( r'([^<]+)\s*-\s*beeg\.?', webpage, 'title') - + description = self._html_search_regex( r'[0-9]+)/' + + _TEST = { + 'url': 'http://www.bpb.de/mediathek/297/joachim-gauck-zu-1989-und-die-erinnerung-an-die-ddr', + 'md5': '0792086e8e2bfbac9cdf27835d5f2093', + 'info_dict': { + 'id': '297', + 'ext': 'mp4', + 'title': 'Joachim Gauck zu 1989 und die Erinnerung an die DDR', + 'description': 'Joachim Gauck, erster Beauftragter für die Stasi-Unterlagen, spricht auf dem Geschichtsforum über die friedliche Revolution 1989 und eine "gewisse Traurigkeit" im Umgang mit der DDR-Vergangenheit.' + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + title = self._html_search_regex( + r'

    (.*?)

    ', webpage, 'title') + video_url = self._html_search_regex( + r'(http://film\.bpb\.de/player/dokument_[0-9]+\.mp4)', + webpage, 'video URL') + + return { + 'id': video_id, + 'url': video_url, + 'title': title, + 'description': self._og_search_description(webpage), + } diff --git a/youtube_dl/extractor/buzzfeed.py b/youtube_dl/extractor/buzzfeed.py new file mode 100644 index 0000000..a40a1bb --- /dev/null +++ b/youtube_dl/extractor/buzzfeed.py @@ -0,0 +1,74 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import re + +from .common import InfoExtractor + + +class BuzzFeedIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?buzzfeed\.com/[^?#]*?/(?P[^?#]+)' + _TESTS = [{ + 'url': 'http://www.buzzfeed.com/abagg/this-angry-ram-destroys-a-punching-bag-like-a-boss?utm_term=4ldqpia', + 'info_dict': { + 'id': 'this-angry-ram-destroys-a-punching-bag-like-a-boss', + 'title': 'This Angry Ram Destroys A Punching Bag Like A Boss', + 'description': 'Rambro!', + }, + 'playlist': [{ + 'info_dict': { + 'id': 'aVCR29aE_OQ', + 'ext': 'mp4', + 'upload_date': '20141024', + 'uploader_id': 'Buddhanz1', + 'description': 'He likes to stay in shape with his heavy bag, he wont stop until its on the ground\n\nFollow Angry Ram on Facebook for regular updates -\nhttps://www.facebook.com/pages/Angry-Ram/1436897249899558?ref=hl', + 'uploader': 'Buddhanz', + 'title': 'Angry Ram destroys a punching bag', + } + }] + }, { + 'url': 'http://www.buzzfeed.com/sheridanwatson/look-at-this-cute-dog-omg?utm_term=4ldqpia', + 'params': { + 'skip_download': True, # Got enough YouTube download tests + }, + 'info_dict': { + 'description': 'Munchkin the Teddy Bear is back !', + 'title': 'You Need To Stop What You\'re Doing And Watching This Dog Walk On A Treadmill', + }, + 'playlist': [{ + 'info_dict': { + 'id': 'mVmBL8B-In0', + 'ext': 'mp4', + 'upload_date': '20141124', + 'uploader_id': 'CindysMunchkin', + 'description': '© 2014 Munchkin the Shih Tzu\nAll rights reserved\nFacebook: http://facebook.com/MunchkintheShihTzu', + 'uploader': 'Munchkin the Shih Tzu', + 'title': 'Munchkin the Teddy Bear gets her exercise', + }, + }] + }] + + def _real_extract(self, url): + playlist_id = self._match_id(url) + webpage = self._download_webpage(url, playlist_id) + + all_buckets = re.findall( + r'(?s)
    0: d = common.copy() - d.update({ 'title': title, 'formats': formats }) + d.update({'title': title, 'formats': formats}) result.append(d) return result @@ -270,5 +272,5 @@ class Channel9IE(InfoExtractor): else: raise ExtractorError('Unexpected WT.entryid %s' % page_type, expected=True) - else: # Assuming list + else: # Assuming list return self._extract_list(content_path) diff --git a/youtube_dl/extractor/cinemassacre.py b/youtube_dl/extractor/cinemassacre.py index 31fe906..b7fa73c 100644 --- a/youtube_dl/extractor/cinemassacre.py +++ b/youtube_dl/extractor/cinemassacre.py @@ -77,7 +77,7 @@ class CinemassacreIE(InfoExtractor): if videolist_url: videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML') formats = [] - baseurl = vidurl[:vidurl.rfind('/')+1] + baseurl = vidurl[:vidurl.rfind('/') + 1] for video in videolist.findall('.//video'): src = video.get('src') if not src: diff --git a/youtube_dl/extractor/clipfish.py b/youtube_dl/extractor/clipfish.py index 669919a..a5c3cb7 100644 --- a/youtube_dl/extractor/clipfish.py +++ b/youtube_dl/extractor/clipfish.py @@ -24,7 +24,7 @@ class ClipfishIE(InfoExtractor): 'title': 'FIFA 14 - E3 2013 Trailer', 'duration': 82, }, - u'skip': 'Blocked in the US' + 'skip': 'Blocked in the US' } def _real_extract(self, url): @@ -34,7 +34,7 @@ class ClipfishIE(InfoExtractor): info_url = ('http://www.clipfish.de/devxml/videoinfo/%s?ts=%d' % (video_id, int(time.time()))) doc = self._download_xml( - info_url, video_id, note=u'Downloading info page') + info_url, video_id, note='Downloading info page') title = doc.find('title').text video_url = doc.find('filename').text if video_url is None: diff --git a/youtube_dl/extractor/clipsyndicate.py b/youtube_dl/extractor/clipsyndicate.py index 02a1667..d07d544 100644 --- a/youtube_dl/extractor/clipsyndicate.py +++ b/youtube_dl/extractor/clipsyndicate.py @@ -39,6 +39,7 @@ class ClipsyndicateIE(InfoExtractor): transform_source=fix_xml_ampersands) track_doc = pdoc.find('trackList/track') + def find_param(name): node = find_xpath_attr(track_doc, './/param', 'name', name) if node is not None: diff --git a/youtube_dl/extractor/cnn.py b/youtube_dl/extractor/cnn.py index 3826ce7..81142ee 100644 --- a/youtube_dl/extractor/cnn.py +++ b/youtube_dl/extractor/cnn.py @@ -25,8 +25,7 @@ class CNNIE(InfoExtractor): 'duration': 135, 'upload_date': '20130609', }, - }, - { + }, { "url": "http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29", "md5": "b5cc60c60a3477d185af8f19a2a26f4e", "info_dict": { diff --git a/youtube_dl/extractor/collegehumor.py b/youtube_dl/extractor/collegehumor.py index 6f866e7..002b240 100644 --- a/youtube_dl/extractor/collegehumor.py +++ b/youtube_dl/extractor/collegehumor.py @@ -10,47 +10,46 @@ from ..utils import int_or_none class CollegeHumorIE(InfoExtractor): _VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/(video|embed|e)/(?P[0-9]+)/?(?P.*)$' - _TESTS = [{ - 'url': 'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe', - 'md5': 'dcc0f5c1c8be98dc33889a191f4c26bd', - 'info_dict': { - 'id': '6902724', - 'ext': 'mp4', - 'title': 'Comic-Con Cosplay Catastrophe', - 'description': "Fans get creative this year at San Diego. Too creative. And yes, that's really Joss Whedon.", - 'age_limit': 13, - 'duration': 187, + _TESTS = [ + { + 'url': 'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe', + 'md5': 'dcc0f5c1c8be98dc33889a191f4c26bd', + 'info_dict': { + 'id': '6902724', + 'ext': 'mp4', + 'title': 'Comic-Con Cosplay Catastrophe', + 'description': "Fans get creative this year at San Diego. Too creative. And yes, that's really Joss Whedon.", + 'age_limit': 13, + 'duration': 187, + }, + }, { + 'url': 'http://www.collegehumor.com/video/3505939/font-conference', + 'md5': '72fa701d8ef38664a4dbb9e2ab721816', + 'info_dict': { + 'id': '3505939', + 'ext': 'mp4', + 'title': 'Font Conference', + 'description': "This video wasn't long enough, so we made it double-spaced.", + 'age_limit': 10, + 'duration': 179, + }, + }, { + # embedded youtube video + 'url': 'http://www.collegehumor.com/embed/6950306', + 'info_dict': { + 'id': 'Z-bao9fg6Yc', + 'ext': 'mp4', + 'title': 'Young Americans Think President John F. Kennedy Died THIS MORNING IN A CAR ACCIDENT!!!', + 'uploader': 'Mark Dice', + 'uploader_id': 'MarkDice', + 'description': 'md5:62c3dab9351fac7bb44b53b69511d87f', + 'upload_date': '20140127', + }, + 'params': { + 'skip_download': True, + }, + 'add_ie': ['Youtube'], }, - }, - { - 'url': 'http://www.collegehumor.com/video/3505939/font-conference', - 'md5': '72fa701d8ef38664a4dbb9e2ab721816', - 'info_dict': { - 'id': '3505939', - 'ext': 'mp4', - 'title': 'Font Conference', - 'description': "This video wasn't long enough, so we made it double-spaced.", - 'age_limit': 10, - 'duration': 179, - }, - }, - # embedded youtube video - { - 'url': 'http://www.collegehumor.com/embed/6950306', - 'info_dict': { - 'id': 'Z-bao9fg6Yc', - 'ext': 'mp4', - 'title': 'Young Americans Think President John F. Kennedy Died THIS MORNING IN A CAR ACCIDENT!!!', - 'uploader': 'Mark Dice', - 'uploader_id': 'MarkDice', - 'description': 'md5:62c3dab9351fac7bb44b53b69511d87f', - 'upload_date': '20140127', - }, - 'params': { - 'skip_download': True, - }, - 'add_ie': ['Youtube'], - }, ] def _real_extract(self, url): diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index 93a5a3d..e80a2da 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -13,6 +13,7 @@ import time import xml.etree.ElementTree from ..compat import ( + compat_cookiejar, compat_http_client, compat_urllib_error, compat_urllib_parse_urlparse, @@ -296,9 +297,11 @@ class InfoExtractor(object): content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal) return (content, urlh) - def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True): + def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None): content_type = urlh.headers.get('Content-Type', '') webpage_bytes = urlh.read() + if prefix is not None: + webpage_bytes = prefix + webpage_bytes m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type) if m: encoding = m.group(1) @@ -423,17 +426,18 @@ class InfoExtractor(object): """Report attempt to log in.""" self.to_screen('Logging in') - #Methods for following #608 + # Methods for following #608 @staticmethod def url_result(url, ie=None, video_id=None): """Returns a url that points to a page that should be processed""" - #TODO: ie should be the class used for getting the info + # TODO: ie should be the class used for getting the info video_info = {'_type': 'url', 'url': url, 'ie_key': ie} if video_id is not None: video_info['id'] = video_id return video_info + @staticmethod def playlist_result(entries, playlist_id=None, playlist_title=None): """Returns a playlist""" @@ -477,7 +481,7 @@ class InfoExtractor(object): raise RegexNotFoundError('Unable to extract %s' % _name) else: self._downloader.report_warning('unable to extract %s; ' - 'please report this issue on http://yt-dl.org/bug' % _name) + 'please report this issue on http://yt-dl.org/bug' % _name) return None def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0, group=None): @@ -517,7 +521,7 @@ class InfoExtractor(object): raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) except (IOError, netrc.NetrcParseError) as err: self._downloader.report_warning('parsing .netrc: %s' % compat_str(err)) - + return (username, password) def _get_tfa_info(self): @@ -611,7 +615,7 @@ class InfoExtractor(object): def _twitter_search_player(self, html): return self._html_search_meta('twitter:player', html, - 'twitter card player') + 'twitter card player') def _sort_formats(self, formats): if not formats: @@ -814,6 +818,11 @@ class InfoExtractor(object): self._downloader.report_warning(msg) return res + def _set_cookie(self, domain, name, value, expire_time=None): + cookie = compat_cookiejar.Cookie(0, name, value, None, None, domain, None, + None, '/', True, False, expire_time, '', None, None, None) + self._downloader.cookiejar.set_cookie(cookie) + class SearchInfoExtractor(InfoExtractor): """ diff --git a/youtube_dl/extractor/cracked.py b/youtube_dl/extractor/cracked.py index 74b880f..cf763ee 100644 --- a/youtube_dl/extractor/cracked.py +++ b/youtube_dl/extractor/cracked.py @@ -54,7 +54,7 @@ class CrackedIE(InfoExtractor): return { 'id': video_id, - 'url':video_url, + 'url': video_url, 'title': title, 'description': description, 'timestamp': timestamp, @@ -62,4 +62,4 @@ class CrackedIE(InfoExtractor): 'comment_count': comment_count, 'height': height, 'width': width, - } \ No newline at end of file + } diff --git a/youtube_dl/extractor/crunchyroll.py b/youtube_dl/extractor/crunchyroll.py index fe1324f..d7e2b84 100644 --- a/youtube_dl/extractor/crunchyroll.py +++ b/youtube_dl/extractor/crunchyroll.py @@ -69,11 +69,9 @@ class CrunchyrollIE(SubtitlesInfoExtractor): login_request.add_header('Content-Type', 'application/x-www-form-urlencoded') self._download_webpage(login_request, None, False, 'Wrong login info') - def _real_initialize(self): self._login() - def _decrypt_subtitles(self, data, iv, id): data = bytes_to_intlist(data) iv = bytes_to_intlist(iv) @@ -99,8 +97,10 @@ class CrunchyrollIE(SubtitlesInfoExtractor): return shaHash + [0] * 12 key = obfuscate_key(id) + class Counter: __value = iv + def next_value(self): temp = self.__value self.__value = inc(self.__value) @@ -183,7 +183,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text return output - def _real_extract(self,url): + def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('video_id') @@ -226,10 +226,10 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text formats = [] for fmt in re.findall(r'\?p([0-9]{3,4})=1', webpage): stream_quality, stream_format = self._FORMAT_IDS[fmt] - video_format = fmt+'p' + video_format = fmt + 'p' streamdata_req = compat_urllib_request.Request('http://www.crunchyroll.com/xml/') # urlencode doesn't work! - streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality='+stream_quality+'&media%5Fid='+stream_id+'&video%5Fformat='+stream_format + streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality=' + stream_quality + '&media%5Fid=' + stream_id + '&video%5Fformat=' + stream_format streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded') streamdata_req.add_header('Content-Length', str(len(streamdata_req.data))) streamdata = self._download_xml( @@ -248,8 +248,9 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text subtitles = {} sub_format = self._downloader.params.get('subtitlesformat', 'srt') for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage): - sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id='+sub_id,\ - video_id, note='Downloading subtitles for '+sub_name) + sub_page = self._download_webpage( + 'http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id, + video_id, note='Downloading subtitles for ' + sub_name) id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False) iv = self._search_regex(r'([^<]+)', sub_page, 'subtitle_iv', fatal=False) data = self._search_regex(r'([^<]+)', sub_page, 'subtitle_data', fatal=False) @@ -274,14 +275,14 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text return return { - 'id': video_id, - 'title': video_title, + 'id': video_id, + 'title': video_title, 'description': video_description, - 'thumbnail': video_thumbnail, - 'uploader': video_uploader, + 'thumbnail': video_thumbnail, + 'uploader': video_uploader, 'upload_date': video_upload_date, - 'subtitles': subtitles, - 'formats': formats, + 'subtitles': subtitles, + 'formats': formats, } diff --git a/youtube_dl/extractor/dailymotion.py b/youtube_dl/extractor/dailymotion.py index fd4bc75..936c13c 100644 --- a/youtube_dl/extractor/dailymotion.py +++ b/youtube_dl/extractor/dailymotion.py @@ -1,4 +1,4 @@ -#coding: utf-8 +# coding: utf-8 from __future__ import unicode_literals import re @@ -18,6 +18,7 @@ from ..utils import ( unescapeHTML, ) + class DailymotionBaseInfoExtractor(InfoExtractor): @staticmethod def _build_request(url): @@ -27,6 +28,7 @@ class DailymotionBaseInfoExtractor(InfoExtractor): request.add_header('Cookie', 'ff=off') return request + class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor): """Information Extractor for Dailymotion""" @@ -112,7 +114,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor): embed_page = self._download_webpage(embed_url, video_id, 'Downloading embed page') info = self._search_regex(r'var info = ({.*?}),$', embed_page, - 'video info', flags=re.MULTILINE) + 'video info', flags=re.MULTILINE) info = json.loads(info) if info.get('error') is not None: msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title'] @@ -206,7 +208,7 @@ class DailymotionPlaylistIE(DailymotionBaseInfoExtractor): if re.search(self._MORE_PAGES_INDICATOR, webpage) is None: break return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion') - for video_id in orderedSet(video_ids)] + for video_id in orderedSet(video_ids)] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) diff --git a/youtube_dl/extractor/defense.py b/youtube_dl/extractor/defense.py index c5529f8..5e50c63 100644 --- a/youtube_dl/extractor/defense.py +++ b/youtube_dl/extractor/defense.py @@ -9,7 +9,7 @@ from .common import InfoExtractor class DefenseGouvFrIE(InfoExtractor): IE_NAME = 'defense.gouv.fr' _VALID_URL = (r'http://.*?\.defense\.gouv\.fr/layout/set/' - r'ligthboxvideo/base-de-medias/webtv/(.*)') + r'ligthboxvideo/base-de-medias/webtv/(.*)') _TEST = { 'url': 'http://www.defense.gouv.fr/layout/set/ligthboxvideo/base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1', @@ -26,13 +26,13 @@ class DefenseGouvFrIE(InfoExtractor): video_id = self._search_regex( r"flashvars.pvg_id=\"(\d+)\";", webpage, 'ID') - + json_url = ('http://static.videos.gouv.fr/brightcovehub/export/json/' - + video_id) + + video_id) info = self._download_webpage(json_url, title, - 'Downloading JSON config') + 'Downloading JSON config') video_url = json.loads(info)['renditions'][0]['url'] - + return {'id': video_id, 'ext': 'mp4', 'url': video_url, diff --git a/youtube_dl/extractor/discovery.py b/youtube_dl/extractor/discovery.py index 554df67..52c2d7d 100644 --- a/youtube_dl/extractor/discovery.py +++ b/youtube_dl/extractor/discovery.py @@ -16,9 +16,9 @@ class DiscoveryIE(InfoExtractor): 'ext': 'mp4', 'title': 'MythBusters: Mission Impossible Outtakes', 'description': ('Watch Jamie Hyneman and Adam Savage practice being' - ' each other -- to the point of confusing Jamie\'s dog -- and ' - 'don\'t miss Adam moon-walking as Jamie ... behind Jamie\'s' - ' back.'), + ' each other -- to the point of confusing Jamie\'s dog -- and ' + 'don\'t miss Adam moon-walking as Jamie ... behind Jamie\'s' + ' back.'), 'duration': 156, }, } @@ -29,7 +29,7 @@ class DiscoveryIE(InfoExtractor): webpage = self._download_webpage(url, video_id) video_list_json = self._search_regex(r'var videoListJSON = ({.*?});', - webpage, 'video list', flags=re.DOTALL) + webpage, 'video list', flags=re.DOTALL) video_list = json.loads(video_list_json) info = video_list['clips'][0] formats = [] diff --git a/youtube_dl/extractor/dotsub.py b/youtube_dl/extractor/dotsub.py index 5ae0ad5..638bb33 100644 --- a/youtube_dl/extractor/dotsub.py +++ b/youtube_dl/extractor/dotsub.py @@ -27,7 +27,7 @@ class DotsubIE(InfoExtractor): video_id = mobj.group('id') info_url = "https://dotsub.com/api/media/%s/metadata" % video_id info = self._download_json(info_url, video_id) - date = time.gmtime(info['dateCreated']/1000) # The timestamp is in miliseconds + date = time.gmtime(info['dateCreated'] / 1000) # The timestamp is in miliseconds return { 'id': video_id, diff --git a/youtube_dl/extractor/dropbox.py b/youtube_dl/extractor/dropbox.py index aefca84..14b6c00 100644 --- a/youtube_dl/extractor/dropbox.py +++ b/youtube_dl/extractor/dropbox.py @@ -11,18 +11,18 @@ from ..utils import url_basename class DropboxIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?dropbox[.]com/sh?/(?P[a-zA-Z0-9]{15})/.*' - _TESTS = [{ - 'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh/youtube-dl%20test%20video%20%27%C3%A4%22BaW_jenozKc.mp4?dl=0', - 'info_dict': { - 'id': 'nelirfsxnmcfbfh', - 'ext': 'mp4', - 'title': 'youtube-dl test video \'ä"BaW_jenozKc' - } - }, - { - 'url': 'https://www.dropbox.com/sh/662glsejgzoj9sr/AAByil3FGH9KFNZ13e08eSa1a/Pregame%20Ceremony%20Program%20PA%2020140518.m4v', - 'only_matching': True, - }, + _TESTS = [ + { + 'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh/youtube-dl%20test%20video%20%27%C3%A4%22BaW_jenozKc.mp4?dl=0', + 'info_dict': { + 'id': 'nelirfsxnmcfbfh', + 'ext': 'mp4', + 'title': 'youtube-dl test video \'ä"BaW_jenozKc' + } + }, { + 'url': 'https://www.dropbox.com/sh/662glsejgzoj9sr/AAByil3FGH9KFNZ13e08eSa1a/Pregame%20Ceremony%20Program%20PA%2020140518.m4v', + 'only_matching': True, + }, ] def _real_extract(self, url): diff --git a/youtube_dl/extractor/ehow.py b/youtube_dl/extractor/ehow.py index f8f49a0..b766e17 100644 --- a/youtube_dl/extractor/ehow.py +++ b/youtube_dl/extractor/ehow.py @@ -28,7 +28,7 @@ class EHowIE(InfoExtractor): video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) video_url = self._search_regex(r'(?:file|source)=(http[^\'"&]*)', - webpage, 'video URL') + webpage, 'video URL') final_url = compat_urllib_parse.unquote(video_url) uploader = self._html_search_meta('uploader', webpage) title = self._og_search_title(webpage).replace(' | eHow', '') diff --git a/youtube_dl/extractor/eighttracks.py b/youtube_dl/extractor/eighttracks.py index c1b4c72..f4c1e2a 100644 --- a/youtube_dl/extractor/eighttracks.py +++ b/youtube_dl/extractor/eighttracks.py @@ -125,7 +125,7 @@ class EightTracksIE(InfoExtractor): info = { 'id': compat_str(track_data['id']), 'url': track_data['track_file_stream_url'], - 'title': track_data['performer'] + u' - ' + track_data['name'], + 'title': track_data['performer'] + ' - ' + track_data['name'], 'raw_title': track_data['name'], 'uploader_id': data['user']['login'], 'ext': 'm4a', diff --git a/youtube_dl/extractor/facebook.py b/youtube_dl/extractor/facebook.py index 1048035..2139f68 100644 --- a/youtube_dl/extractor/facebook.py +++ b/youtube_dl/extractor/facebook.py @@ -60,8 +60,8 @@ class FacebookIE(InfoExtractor): login_page_req = compat_urllib_request.Request(self._LOGIN_URL) login_page_req.add_header('Cookie', 'locale=en_US') login_page = self._download_webpage(login_page_req, None, - note='Downloading login page', - errnote='Unable to download login page') + note='Downloading login page', + errnote='Unable to download login page') lsd = self._search_regex( r'', login_results) is not None: self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.') return @@ -96,7 +96,7 @@ class FacebookIE(InfoExtractor): check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form)) check_req.add_header('Content-Type', 'application/x-www-form-urlencoded') check_response = self._download_webpage(check_req, None, - note='Confirming login') + note='Confirming login') if re.search(r'id="checkpointSubmitButton"', check_response) is not None: self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: diff --git a/youtube_dl/extractor/fc2.py b/youtube_dl/extractor/fc2.py index c663a0f..6f5d235 100644 --- a/youtube_dl/extractor/fc2.py +++ b/youtube_dl/extractor/fc2.py @@ -40,7 +40,7 @@ class FC2IE(InfoExtractor): info_url = ( "http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&". - format(video_id, mimi, compat_urllib_request.quote(refer, safe='').replace('.','%2E'))) + format(video_id, mimi, compat_urllib_request.quote(refer, safe='').replace('.', '%2E'))) info_webpage = self._download_webpage( info_url, video_id, note='Downloading info page') diff --git a/youtube_dl/extractor/firsttv.py b/youtube_dl/extractor/firsttv.py index c2e987f..08ceee4 100644 --- a/youtube_dl/extractor/firsttv.py +++ b/youtube_dl/extractor/firsttv.py @@ -44,9 +44,9 @@ class FirstTVIE(InfoExtractor): duration = self._og_search_property('video:duration', webpage, 'video duration', fatal=False) like_count = self._html_search_regex(r'title="Понравилось".*?/> \[(\d+)\]', - webpage, 'like count', fatal=False) + webpage, 'like count', fatal=False) dislike_count = self._html_search_regex(r'title="Не понравилось".*?/> \[(\d+)\]', - webpage, 'dislike count', fatal=False) + webpage, 'dislike count', fatal=False) return { 'id': video_id, @@ -57,4 +57,4 @@ class FirstTVIE(InfoExtractor): 'duration': int_or_none(duration), 'like_count': int_or_none(like_count), 'dislike_count': int_or_none(dislike_count), - } \ No newline at end of file + } diff --git a/youtube_dl/extractor/fivemin.py b/youtube_dl/extractor/fivemin.py index 3a50bab..f9c127c 100644 --- a/youtube_dl/extractor/fivemin.py +++ b/youtube_dl/extractor/fivemin.py @@ -50,7 +50,7 @@ class FiveMinIE(InfoExtractor): video_id = mobj.group('id') embed_url = 'https://embed.5min.com/playerseed/?playList=%s' % video_id embed_page = self._download_webpage(embed_url, video_id, - 'Downloading embed page') + 'Downloading embed page') sid = self._search_regex(r'sid=(\d+)', embed_page, 'sid') query = compat_urllib_parse.urlencode({ 'func': 'GetResults', diff --git a/youtube_dl/extractor/fktv.py b/youtube_dl/extractor/fktv.py index 21b8914..d09d1c1 100644 --- a/youtube_dl/extractor/fktv.py +++ b/youtube_dl/extractor/fktv.py @@ -32,9 +32,9 @@ class FKTVIE(InfoExtractor): server = random.randint(2, 4) video_thumbnail = 'http://fernsehkritik.tv/images/magazin/folge%d.jpg' % episode start_webpage = self._download_webpage('http://fernsehkritik.tv/folge-%d/Start' % episode, - episode) + episode) playlist = self._search_regex(r'playlist = (\[.*?\]);', start_webpage, - 'playlist', flags=re.DOTALL) + 'playlist', flags=re.DOTALL) files = json.loads(re.sub('{[^{}]*?}', '{}', playlist)) # TODO: return a single multipart video videos = [] diff --git a/youtube_dl/extractor/flickr.py b/youtube_dl/extractor/flickr.py index e09982e..0c858b6 100644 --- a/youtube_dl/extractor/flickr.py +++ b/youtube_dl/extractor/flickr.py @@ -17,8 +17,8 @@ class FlickrIE(InfoExtractor): 'info_dict': { 'id': '5645318632', 'ext': 'mp4', - "description": "Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.", - "uploader_id": "forestwander-nature-pictures", + "description": "Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.", + "uploader_id": "forestwander-nature-pictures", "title": "Dark Hollow Waterfalls" } } @@ -37,7 +37,7 @@ class FlickrIE(InfoExtractor): first_xml = self._download_webpage(first_url, video_id, 'Downloading first data webpage') node_id = self._html_search_regex(r'(\d+-\d+)', - first_xml, 'node_id') + first_xml, 'node_id') second_url = 'https://secure.flickr.com/video_playlist.gne?node_id=' + node_id + '&tech=flash&mode=playlist&bitrate=700&secret=' + secret + '&rd=video.yahoo.com&noad=1' second_xml = self._download_webpage(second_url, video_id, 'Downloading second data webpage') diff --git a/youtube_dl/extractor/fourtube.py b/youtube_dl/extractor/fourtube.py index 7d56b9b..b22ce2a 100644 --- a/youtube_dl/extractor/fourtube.py +++ b/youtube_dl/extractor/fourtube.py @@ -55,7 +55,7 @@ class FourTubeIE(InfoExtractor): description = self._html_search_meta('description', webpage, 'description') if description: upload_date = self._search_regex(r'Published Date: (\d{2} [a-zA-Z]{3} \d{4})', description, 'upload date', - fatal=False) + fatal=False) if upload_date: upload_date = unified_strdate(upload_date) view_count = self._search_regex(r'Views: ([\d,\.]+)', description, 'view count', fatal=False) @@ -65,9 +65,9 @@ class FourTubeIE(InfoExtractor): token_url = "http://tkn.4tube.com/{0}/desktop/{1}".format(media_id, "+".join(sources)) headers = { - b'Content-Type': b'application/x-www-form-urlencoded', - b'Origin': b'http://www.4tube.com', - } + b'Content-Type': b'application/x-www-form-urlencoded', + b'Origin': b'http://www.4tube.com', + } token_req = compat_urllib_request.Request(token_url, b'{}', headers) tokens = self._download_json(token_req, video_id) @@ -76,7 +76,7 @@ class FourTubeIE(InfoExtractor): 'format_id': format + 'p', 'resolution': format + 'p', 'quality': int(format), - } for format in sources] + } for format in sources] self._sort_formats(formats) @@ -92,4 +92,4 @@ class FourTubeIE(InfoExtractor): 'duration': duration, 'age_limit': 18, 'webpage_url': webpage_url, - } \ No newline at end of file + } diff --git a/youtube_dl/extractor/francetv.py b/youtube_dl/extractor/francetv.py index 35d7d15..e0420a4 100644 --- a/youtube_dl/extractor/francetv.py +++ b/youtube_dl/extractor/francetv.py @@ -26,6 +26,19 @@ class FranceTVBaseInfoExtractor(InfoExtractor): if info.get('status') == 'NOK': raise ExtractorError( '%s returned error: %s' % (self.IE_NAME, info['message']), expected=True) + allowed_countries = info['videos'][0].get('geoblocage') + if allowed_countries: + georestricted = True + geo_info = self._download_json( + 'http://geo.francetv.fr/ws/edgescape.json', video_id, + 'Downloading geo restriction info') + country = geo_info['reponse']['geo_info']['country_code'] + if country not in allowed_countries: + raise ExtractorError( + 'The video is not available from your location', + expected=True) + else: + georestricted = False formats = [] for video in info['videos']: @@ -36,6 +49,10 @@ class FranceTVBaseInfoExtractor(InfoExtractor): continue format_id = video['format'] if video_url.endswith('.f4m'): + if georestricted: + # See https://github.com/rg3/youtube-dl/issues/3963 + # m3u8 urls work fine + continue video_url_parsed = compat_urllib_parse_urlparse(video_url) f4m_url = self._download_webpage( 'http://hdfauth.francetv.fr/esi/urltokengen2.html?url=%s' % video_url_parsed.path, @@ -234,7 +251,7 @@ class GenerationQuoiIE(InfoExtractor): info_json = self._download_webpage(info_url, name) info = json.loads(info_json) return self.url_result('http://www.dailymotion.com/video/%s' % info['id'], - ie='Dailymotion') + ie='Dailymotion') class CultureboxIE(FranceTVBaseInfoExtractor): diff --git a/youtube_dl/extractor/gamekings.py b/youtube_dl/extractor/gamekings.py index 11fee3d..cf8e90d 100644 --- a/youtube_dl/extractor/gamekings.py +++ b/youtube_dl/extractor/gamekings.py @@ -11,7 +11,7 @@ class GamekingsIE(InfoExtractor): 'url': 'http://www.gamekings.tv/videos/phoenix-wright-ace-attorney-dual-destinies-review/', # MD5 is flaky, seems to change regularly # 'md5': '2f32b1f7b80fdc5cb616efb4f387f8a3', - u'info_dict': { + 'info_dict': { 'id': '20130811', 'ext': 'mp4', 'title': 'Phoenix Wright: Ace Attorney \u2013 Dual Destinies Review', diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index c7a824c..328301d 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -445,6 +445,30 @@ class GenericIE(InfoExtractor): 'title': 'Rosetta #CometLanding webcast HL 10', } }, + # LazyYT + { + 'url': 'http://discourse.ubuntu.com/t/unity-8-desktop-mode-windows-on-mir/1986', + 'info_dict': { + 'title': 'Unity 8 desktop-mode windows on Mir! - Ubuntu Discourse', + }, + 'playlist_mincount': 2, + }, + # Direct link with incorrect MIME type + { + 'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm', + 'md5': '4ccbebe5f36706d85221f204d7eb5913', + 'info_dict': { + 'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm', + 'id': '5_Lennart_Poettering_-_Systemd', + 'ext': 'webm', + 'title': '5_Lennart_Poettering_-_Systemd', + 'upload_date': '20141120', + }, + 'expected_warnings': [ + 'URL could be a direct video link, returning it as such.' + ] + } + ] def report_following_redirect(self, new_url): @@ -537,9 +561,9 @@ class GenericIE(InfoExtractor): if default_search in ('error', 'fixup_error'): raise ExtractorError( - ('%r is not a valid URL. ' - 'Set --default-search "ytsearch" (or run youtube-dl "ytsearch:%s" ) to search YouTube' - ) % (url, url), expected=True) + '%r is not a valid URL. ' + 'Set --default-search "ytsearch" (or run youtube-dl "ytsearch:%s" ) to search YouTube' + % (url, url), expected=True) else: if ':' not in default_search: default_search += ':' @@ -598,10 +622,28 @@ class GenericIE(InfoExtractor): if not self._downloader.params.get('test', False) and not is_intentional: self._downloader.report_warning('Falling back on generic information extractor.') - if full_response: - webpage = self._webpage_read_content(full_response, url, video_id) - else: - webpage = self._download_webpage(url, video_id) + if not full_response: + full_response = self._request_webpage(url, video_id) + + # Maybe it's a direct link to a video? + # Be careful not to download the whole thing! + first_bytes = full_response.read(512) + if not re.match(r'^\s*<', first_bytes.decode('utf-8', 'replace')): + self._downloader.report_warning( + 'URL could be a direct video link, returning it as such.') + upload_date = unified_strdate( + head_response.headers.get('Last-Modified')) + return { + 'id': video_id, + 'title': os.path.splitext(url_basename(url))[0], + 'direct': True, + 'url': url, + 'upload_date': upload_date, + } + + webpage = self._webpage_read_content( + full_response, url, video_id, prefix=first_bytes) + self.report_extraction(video_id) # Is it an RSS feed? @@ -702,6 +744,12 @@ class GenericIE(InfoExtractor): return _playlist_from_matches( matches, lambda m: unescapeHTML(m[1])) + # Look for lazyYT YouTube embed + matches = re.findall( + r'class="lazyYT" data-youtube-id="([^"]+)"', webpage) + if matches: + return _playlist_from_matches(matches, lambda m: unescapeHTML(m)) + # Look for embedded Dailymotion player matches = re.findall( r']+?src=(["\'])(?P(?:https?:)?//(?:www\.)?dailymotion\.com/embed/video/.+?)\1', webpage) @@ -733,7 +781,7 @@ class GenericIE(InfoExtractor): 'title': video_title, 'id': video_id, } - + match = re.search(r'(?:id=["\']wistia_|data-wistia-?id=["\']|Wistia\.embed\(["\'])(?P[^"\']+)', webpage) if match: return { @@ -748,7 +796,7 @@ class GenericIE(InfoExtractor): # Look for embedded blip.tv player mobj = re.search(r']*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage) if mobj: - return self.url_result('http://blip.tv/a/a-'+mobj.group(1), 'BlipTV') + return self.url_result('http://blip.tv/a/a-' + mobj.group(1), 'BlipTV') mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*(https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)[a-zA-Z0-9_]+)', webpage) if mobj: return self.url_result(mobj.group(1), 'BlipTV') @@ -784,7 +832,7 @@ class GenericIE(InfoExtractor): # Look for Ooyala videos mobj = (re.search(r'player.ooyala.com/[^"?]+\?[^"]*?(?:embedCode|ec)=(?P[^"&]+)', webpage) or - re.search(r'OO.Player.create\([\'"].*?[\'"],\s*[\'"](?P.{32})[\'"]', webpage)) + re.search(r'OO.Player.create\([\'"].*?[\'"],\s*[\'"](?P.{32})[\'"]', webpage)) if mobj is not None: return OoyalaIE._build_url_result(mobj.group('ec')) @@ -1025,4 +1073,3 @@ class GenericIE(InfoExtractor): '_type': 'playlist', 'entries': entries, } - diff --git a/youtube_dl/extractor/globo.py b/youtube_dl/extractor/globo.py index 66ca379..6949a57 100644 --- a/youtube_dl/extractor/globo.py +++ b/youtube_dl/extractor/globo.py @@ -397,4 +397,4 @@ class GloboIE(InfoExtractor): 'uploader_id': uploader_id, 'like_count': like_count, 'formats': formats - } \ No newline at end of file + } diff --git a/youtube_dl/extractor/gorillavid.py b/youtube_dl/extractor/gorillavid.py index e21e575..1ac1da8 100644 --- a/youtube_dl/extractor/gorillavid.py +++ b/youtube_dl/extractor/gorillavid.py @@ -9,14 +9,15 @@ from ..utils import ( determine_ext, compat_urllib_parse, compat_urllib_request, + int_or_none, ) class GorillaVidIE(InfoExtractor): - IE_DESC = 'GorillaVid.in, daclips.in and movpod.in' + IE_DESC = 'GorillaVid.in, daclips.in, movpod.in and fastvideo.in' _VALID_URL = r'''(?x) https?://(?P(?:www\.)? - (?:daclips\.in|gorillavid\.in|movpod\.in))/ + (?:daclips\.in|gorillavid\.in|movpod\.in|fastvideo\.in))/ (?:embed-)?(?P[0-9a-zA-Z]+)(?:-[0-9]+x[0-9]+\.html)? ''' @@ -49,6 +50,16 @@ class GorillaVidIE(InfoExtractor): 'title': 'Micro Pig piglets ready on 16th July 2009-bG0PdrCdxUc', 'thumbnail': 're:http://.*\.jpg', } + }, { + # video with countdown timeout + 'url': 'http://fastvideo.in/1qmdn1lmsmbw', + 'md5': '8b87ec3f6564a3108a0e8e66594842ba', + 'info_dict': { + 'id': '1qmdn1lmsmbw', + 'ext': 'mp4', + 'title': 'Man of Steel - Trailer', + 'thumbnail': 're:http://.*\.jpg', + }, }, { 'url': 'http://movpod.in/0wguyyxi1yca', 'only_matching': True, @@ -69,8 +80,14 @@ class GorillaVidIE(InfoExtractor): (?:id="[^"]+"\s+)? value="([^"]*)" ''', webpage)) - + if fields['op'] == 'download1': + countdown = int_or_none(self._search_regex( + r'(?:[Ww]ait)?\s*(\d+)\s*(?:seconds?)?', + webpage, 'countdown', default=None)) + if countdown: + self._sleep(countdown, video_id) + post = compat_urllib_parse.urlencode(fields) req = compat_urllib_request.Request(url, post) @@ -78,9 +95,13 @@ class GorillaVidIE(InfoExtractor): webpage = self._download_webpage(req, video_id, 'Downloading video page') - title = self._search_regex(r'style="z-index: [0-9]+;">([^<]+)', webpage, 'title') - video_url = self._search_regex(r'file\s*:\s*\'(http[^\']+)\',', webpage, 'file url') - thumbnail = self._search_regex(r'image\s*:\s*\'(http[^\']+)\',', webpage, 'thumbnail', fatal=False) + title = self._search_regex( + r'style="z-index: [0-9]+;">([^<]+)', + webpage, 'title', default=None) or self._og_search_title(webpage) + video_url = self._search_regex( + r'file\s*:\s*["\'](http[^"\']+)["\'],', webpage, 'file url') + thumbnail = self._search_regex( + r'image\s*:\s*["\'](http[^"\']+)["\'],', webpage, 'thumbnail', fatal=False) formats = [{ 'format_id': 'sd', diff --git a/youtube_dl/extractor/hornbunny.py b/youtube_dl/extractor/hornbunny.py index 7e77144..5b6efb2 100644 --- a/youtube_dl/extractor/hornbunny.py +++ b/youtube_dl/extractor/hornbunny.py @@ -37,7 +37,7 @@ class HornBunnyIE(InfoExtractor): webpage2 = self._download_webpage(redirect_url, video_id) video_url = self._html_search_regex( r'flvMask:(.*?);', webpage2, 'video_url') - + duration = parse_duration(self._search_regex( r'Runtime:\s*([0-9:]+)
    ', webpage, 'duration', fatal=False)) diff --git a/youtube_dl/extractor/hotnewhiphop.py b/youtube_dl/extractor/hotnewhiphop.py index 80b48b1..651784b 100644 --- a/youtube_dl/extractor/hotnewhiphop.py +++ b/youtube_dl/extractor/hotnewhiphop.py @@ -1,12 +1,13 @@ from __future__ import unicode_literals -import re import base64 from .common import InfoExtractor -from ..utils import ( +from ..compat import ( compat_urllib_parse, compat_urllib_request, +) +from ..utils import ( ExtractorError, HEADRequest, ) @@ -16,25 +17,24 @@ class HotNewHipHopIE(InfoExtractor): _VALID_URL = r'http://www\.hotnewhiphop\.com/.*\.(?P.*)\.html' _TEST = { 'url': 'http://www.hotnewhiphop.com/freddie-gibbs-lay-it-down-song.1435540.html', - 'file': '1435540.mp3', 'md5': '2c2cd2f76ef11a9b3b581e8b232f3d96', 'info_dict': { + 'id': '1435540', + 'ext': 'mp3', 'title': 'Freddie Gibbs - Lay It Down' } } def _real_extract(self, url): - m = re.match(self._VALID_URL, url) - video_id = m.group('id') - - webpage_src = self._download_webpage(url, video_id) + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) video_url_base64 = self._search_regex( - r'data-path="(.*?)"', webpage_src, u'video URL', fatal=False) + r'data-path="(.*?)"', webpage, 'video URL', default=None) if video_url_base64 is None: video_url = self._search_regex( - r'"contentUrl" content="(.*?)"', webpage_src, u'video URL') + r'"contentUrl" content="(.*?)"', webpage, 'content URL') return self.url_result(video_url, ie='Youtube') reqdata = compat_urllib_parse.urlencode([ @@ -59,11 +59,11 @@ class HotNewHipHopIE(InfoExtractor): if video_url.endswith('.html'): raise ExtractorError('Redirect failed') - video_title = self._og_search_title(webpage_src).strip() + video_title = self._og_search_title(webpage).strip() return { 'id': video_id, 'url': video_url, 'title': video_title, - 'thumbnail': self._og_search_thumbnail(webpage_src), + 'thumbnail': self._og_search_thumbnail(webpage), } diff --git a/youtube_dl/extractor/howcast.py b/youtube_dl/extractor/howcast.py index 6ae0478..3f7d666 100644 --- a/youtube_dl/extractor/howcast.py +++ b/youtube_dl/extractor/howcast.py @@ -13,7 +13,7 @@ class HowcastIE(InfoExtractor): 'info_dict': { 'id': '390161', 'ext': 'mp4', - 'description': 'The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here\'s the proper way to tie a square knot.', + 'description': 'The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here\'s the proper way to tie a square knot.', 'title': 'How to Tie a Square Knot Properly', } } @@ -27,10 +27,10 @@ class HowcastIE(InfoExtractor): self.report_extraction(video_id) video_url = self._search_regex(r'\'?file\'?: "(http://mobile-media\.howcast\.com/[0-9]+\.mp4)', - webpage, 'video URL') + webpage, 'video URL') video_description = self._html_search_regex(r'[^<]+
    ' % compilation_id, compilation_page) - if len(seasons) == 0: # No seasons in this compilation + if len(seasons) == 0: # No seasons in this compilation entries = self._extract_entries(compilation_page, compilation_id) else: entries = [] @@ -172,4 +172,4 @@ class IviCompilationIE(InfoExtractor): compilation_id, 'Downloading season %s web page' % season_id) entries.extend(self._extract_entries(season_page, compilation_id)) - return self.playlist_result(entries, playlist_id, playlist_title) \ No newline at end of file + return self.playlist_result(entries, playlist_id, playlist_title) diff --git a/youtube_dl/extractor/jadorecettepub.py b/youtube_dl/extractor/jadorecettepub.py index ace0876..063e86d 100644 --- a/youtube_dl/extractor/jadorecettepub.py +++ b/youtube_dl/extractor/jadorecettepub.py @@ -45,4 +45,3 @@ class JadoreCettePubIE(InfoExtractor): 'title': title, 'description': description, } - diff --git a/youtube_dl/extractor/jeuxvideo.py b/youtube_dl/extractor/jeuxvideo.py index 1881659..8094cc2 100644 --- a/youtube_dl/extractor/jeuxvideo.py +++ b/youtube_dl/extractor/jeuxvideo.py @@ -29,7 +29,7 @@ class JeuxVideoIE(InfoExtractor): xml_link = self._html_search_regex( r'', webpage, 'config URL') - + video_id = self._search_regex( r'http://www\.jeuxvideo\.com/config/\w+/\d+/(.*?)/\d+_player\.xml', xml_link, 'video ID') @@ -38,7 +38,7 @@ class JeuxVideoIE(InfoExtractor): xml_link, title, 'Downloading XML config') info_json = config.find('format.json').text info = json.loads(info_json)['versions'][0] - + video_url = 'http://video720.jeuxvideo.com/' + info['file'] return { diff --git a/youtube_dl/extractor/jukebox.py b/youtube_dl/extractor/jukebox.py index 5aa32bf..da8068e 100644 --- a/youtube_dl/extractor/jukebox.py +++ b/youtube_dl/extractor/jukebox.py @@ -36,7 +36,7 @@ class JukeboxIE(InfoExtractor): try: video_url = self._search_regex(r'"config":{"file":"(?Phttp:[^"]+\?mdtk=[0-9]+)"', - iframe_html, 'video url') + iframe_html, 'video url') video_url = unescapeHTML(video_url).replace('\/', '/') except RegexNotFoundError: youtube_url = self._search_regex( @@ -47,9 +47,9 @@ class JukeboxIE(InfoExtractor): return self.url_result(youtube_url, ie='Youtube') title = self._html_search_regex(r'

    ([^<]+)

    ', - html, 'title') + html, 'title') artist = self._html_search_regex(r'([^<]+)', - html, 'artist') + html, 'artist') return { 'id': video_id, diff --git a/youtube_dl/extractor/kankan.py b/youtube_dl/extractor/kankan.py index 23103b1..dbfe4cc 100644 --- a/youtube_dl/extractor/kankan.py +++ b/youtube_dl/extractor/kankan.py @@ -10,7 +10,7 @@ _md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest() class KankanIE(InfoExtractor): _VALID_URL = r'https?://(?:.*?\.)?kankan\.com/.+?/(?P\d+)\.shtml' - + _TEST = { 'url': 'http://yinyue.kankan.com/vod/48/48863.shtml', 'file': '48863.flv', diff --git a/youtube_dl/extractor/kickstarter.py b/youtube_dl/extractor/kickstarter.py index 827091e..7d4b570 100644 --- a/youtube_dl/extractor/kickstarter.py +++ b/youtube_dl/extractor/kickstarter.py @@ -13,8 +13,10 @@ class KickStarterIE(InfoExtractor): 'id': '1404461844', 'ext': 'mp4', 'title': 'Intersection: The Story of Josh Grant by Kyle Cowling', - 'description': 'A unique motocross documentary that examines the ' - 'life and mind of one of sports most elite athletes: Josh Grant.', + 'description': ( + 'A unique motocross documentary that examines the ' + 'life and mind of one of sports most elite athletes: Josh Grant.' + ), }, }, { 'note': 'Embedded video (not using the native kickstarter video service)', diff --git a/youtube_dl/extractor/kontrtube.py b/youtube_dl/extractor/kontrtube.py index 8a73ecf..41fd620 100644 --- a/youtube_dl/extractor/kontrtube.py +++ b/youtube_dl/extractor/kontrtube.py @@ -63,4 +63,4 @@ class KontrTubeIE(InfoExtractor): 'duration': duration, 'view_count': int_or_none(view_count), 'comment_count': int_or_none(comment_count), - } \ No newline at end of file + } diff --git a/youtube_dl/extractor/ku6.py b/youtube_dl/extractor/ku6.py index 89013e5..a602980 100644 --- a/youtube_dl/extractor/ku6.py +++ b/youtube_dl/extractor/ku6.py @@ -30,4 +30,3 @@ class Ku6IE(InfoExtractor): 'title': title, 'url': downloadUrl } - diff --git a/youtube_dl/extractor/laola1tv.py b/youtube_dl/extractor/laola1tv.py index 102e29f..2fd3b46 100644 --- a/youtube_dl/extractor/laola1tv.py +++ b/youtube_dl/extractor/laola1tv.py @@ -75,4 +75,3 @@ class Laola1TvIE(InfoExtractor): 'categories': categories, 'ext': 'mp4', } - diff --git a/youtube_dl/extractor/lifenews.py b/youtube_dl/extractor/lifenews.py index 8d9491f..1dfe7f7 100644 --- a/youtube_dl/extractor/lifenews.py +++ b/youtube_dl/extractor/lifenews.py @@ -52,7 +52,7 @@ class LifeNewsIE(InfoExtractor): r'
    \s*(\d+)', webpage, 'comment count', fatal=False) upload_date = self._html_search_regex( - r'