]> Raphaƫl G. Git Repositories - youtubedl/blobdiff - youtube_dl/downloader/hls.py
Update changelog.
[youtubedl] / youtube_dl / downloader / hls.py
index e527ee425365a096b50f541b1c75c82dcb9013fb..2a775bf0023f7ddc09507d66ab660a8dd97d19b2 100644 (file)
@@ -3,16 +3,18 @@ from __future__ import unicode_literals
 import os
 import re
 import subprocess
+import sys
 
-from ..postprocessor.ffmpeg import FFmpegPostProcessor
 from .common import FileDownloader
-from ..compat import (
-    compat_urlparse,
-    compat_urllib_request,
-)
+from .fragment import FragmentFD
+
+from ..compat import compat_urlparse
+from ..postprocessor.ffmpeg import FFmpegPostProcessor
 from ..utils import (
     encodeArgument,
     encodeFilename,
+    sanitize_open,
+    handle_youtubedl_headers,
 )
 
 
@@ -23,18 +25,44 @@ class HlsFD(FileDownloader):
         tmpfilename = self.temp_name(filename)
 
         ffpp = FFmpegPostProcessor(downloader=self)
-        program = ffpp._executable
-        if program is None:
+        if not ffpp.available:
             self.report_error('m3u8 download detected but ffmpeg or avconv could not be found. Please install one.')
             return False
         ffpp.check_version()
 
-        args = [
-            encodeArgument(opt)
-            for opt in (program, '-y', '-i', url, '-f', 'mp4', '-c', 'copy', '-bsf:a', 'aac_adtstoasc')]
-        args.append(encodeFilename(tmpfilename, True))
+        args = [ffpp.executable, '-y']
+
+        if info_dict['http_headers'] and re.match(r'^https?://', url):
+            # Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv:
+            # [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
+            headers = handle_youtubedl_headers(info_dict['http_headers'])
+            args += [
+                '-headers',
+                ''.join('%s: %s\r\n' % (key, val) for key, val in headers.items())]
+
+        args += ['-i', url, '-c', 'copy']
+        if self.params.get('hls_use_mpegts', False):
+            args += ['-f', 'mpegts']
+        else:
+            args += ['-f', 'mp4', '-bsf:a', 'aac_adtstoasc']
+
+        args = [encodeArgument(opt) for opt in args]
+        args.append(encodeFilename(ffpp._ffmpeg_filename_argument(tmpfilename), True))
+
+        self._debug_cmd(args)
 
-        retval = subprocess.call(args)
+        proc = subprocess.Popen(args, stdin=subprocess.PIPE)
+        try:
+            retval = proc.wait()
+        except KeyboardInterrupt:
+            # subprocces.run would send the SIGKILL signal to ffmpeg and the
+            # mp4 file couldn't be played, but if we ask ffmpeg to quit it
+            # produces a file that is playable (this is mostly useful for live
+            # streams). Note that Windows is not affected and produces playable
+            # files (see https://github.com/rg3/youtube-dl/issues/8300).
+            if sys.platform != 'win32':
+                proc.communicate(b'q')
+            raise
         if retval == 0:
             fsize = os.path.getsize(encodeFilename(tmpfilename))
             self.to_screen('\r[%s] %s bytes' % (args[0], fsize))
@@ -48,58 +76,55 @@ class HlsFD(FileDownloader):
             return True
         else:
             self.to_stderr('\n')
-            self.report_error('%s exited with code %d' % (program, retval))
+            self.report_error('%s exited with code %d' % (ffpp.basename, retval))
             return False
 
 
-class NativeHlsFD(FileDownloader):
+class NativeHlsFD(FragmentFD):
     """ A more limited implementation that does not require ffmpeg """
 
+    FD_NAME = 'hlsnative'
+
     def real_download(self, filename, info_dict):
-        url = info_dict['url']
-        self.report_destination(filename)
-        tmpfilename = self.temp_name(filename)
+        man_url = info_dict['url']
+        self.to_screen('[%s] Downloading m3u8 manifest' % self.FD_NAME)
+        manifest = self.ydl.urlopen(man_url).read()
 
-        self.to_screen(
-            '[hlsnative] %s: Downloading m3u8 manifest' % info_dict['id'])
-        data = self.ydl.urlopen(url).read()
-        s = data.decode('utf-8', 'ignore')
-        segment_urls = []
+        s = manifest.decode('utf-8', 'ignore')
+        fragment_urls = []
         for line in s.splitlines():
             line = line.strip()
             if line and not line.startswith('#'):
                 segment_url = (
                     line
                     if re.match(r'^https?://', line)
-                    else compat_urlparse.urljoin(url, line))
-                segment_urls.append(segment_url)
-
-        is_test = self.params.get('test', False)
-        remaining_bytes = self._TEST_FILE_SIZE if is_test else None
-        byte_counter = 0
-        with open(tmpfilename, 'wb') as outf:
-            for i, segurl in enumerate(segment_urls):
-                self.to_screen(
-                    '[hlsnative] %s: Downloading segment %d / %d' %
-                    (info_dict['id'], i + 1, len(segment_urls)))
-                seg_req = compat_urllib_request.Request(segurl)
-                if remaining_bytes is not None:
-                    seg_req.add_header('Range', 'bytes=0-%d' % (remaining_bytes - 1))
-
-                segment = self.ydl.urlopen(seg_req).read()
-                if remaining_bytes is not None:
-                    segment = segment[:remaining_bytes]
-                    remaining_bytes -= len(segment)
-                outf.write(segment)
-                byte_counter += len(segment)
-                if remaining_bytes is not None and remaining_bytes <= 0:
+                    else compat_urlparse.urljoin(man_url, line))
+                fragment_urls.append(segment_url)
+                # We only download the first fragment during the test
+                if self.params.get('test', False):
                     break
 
-        self._hook_progress({
-            'downloaded_bytes': byte_counter,
-            'total_bytes': byte_counter,
+        ctx = {
             'filename': filename,
-            'status': 'finished',
-        })
-        self.try_rename(tmpfilename, filename)
+            'total_frags': len(fragment_urls),
+        }
+
+        self._prepare_and_start_frag_download(ctx)
+
+        frags_filenames = []
+        for i, frag_url in enumerate(fragment_urls):
+            frag_filename = '%s-Frag%d' % (ctx['tmpfilename'], i)
+            success = ctx['dl'].download(frag_filename, {'url': frag_url})
+            if not success:
+                return False
+            down, frag_sanitized = sanitize_open(frag_filename, 'rb')
+            ctx['dest_stream'].write(down.read())
+            down.close()
+            frags_filenames.append(frag_sanitized)
+
+        self._finish_frag_download(ctx)
+
+        for frag_file in frags_filenames:
+            os.remove(encodeFilename(frag_file))
+
         return True