]> Raphaƫl G. Git Repositories - youtubedl/blobdiff - youtube_dl/extractor/huffpost.py
Initiate new release.
[youtubedl] / youtube_dl / extractor / huffpost.py
index 4ccf6b9b8a82c3ef28c1d9d04dcc6f26ce2a8f8d..97e36f0568f45c0da495cdb54851405a12e51fc7 100644 (file)
@@ -4,6 +4,7 @@ import re
 
 from .common import InfoExtractor
 from ..utils import (
+    determine_ext,
     parse_duration,
     unified_strdate,
 )
@@ -29,7 +30,12 @@ class HuffPostIE(InfoExtractor):
             'description': 'This week on Legalese It, Mike talks to David Bosco about his new book on the ICC, "Rough Justice," he also discusses the Virginia AG\'s historic stance on gay marriage, the execution of Edgar Tamayo, the ICC\'s delay of Kenya\'s President and more.  ',
             'duration': 1549,
             'upload_date': '20140124',
-        }
+        },
+        'params': {
+            # m3u8 download
+            'skip_download': True,
+        },
+        'expected_warnings': ['HTTP Error 404: Not Found'],
     }
 
     def _real_extract(self, url):
@@ -39,13 +45,14 @@ class HuffPostIE(InfoExtractor):
         data = self._download_json(api_url, video_id)['data']
 
         video_title = data['title']
-        duration = parse_duration(data['running_time'])
-        upload_date = unified_strdate(data['schedule']['starts_at'])
+        duration = parse_duration(data.get('running_time'))
+        upload_date = unified_strdate(
+            data.get('schedule', {}).get('starts_at') or data.get('segment_start_date_time'))
         description = data.get('description')
 
         thumbnails = []
-        for url in data['images'].values():
-            m = re.match('.*-([0-9]+x[0-9]+)\.', url)
+        for url in filter(None, data['images'].values()):
+            m = re.match(r'.*-([0-9]+x[0-9]+)\.', url)
             if not m:
                 continue
             thumbnails.append({
@@ -53,22 +60,29 @@ class HuffPostIE(InfoExtractor):
                 'resolution': m.group(1),
             })
 
-        formats = [{
-            'format': key,
-            'format_id': key.replace('/', '.'),
-            'ext': 'mp4',
-            'url': url,
-            'vcodec': 'none' if key.startswith('audio/') else None,
-        } for key, url in data['sources']['live'].items()]
-        if data.get('fivemin_id'):
-            fid = data['fivemin_id']
-            fcat = str(int(fid) // 100 + 1)
-            furl = 'http://avideos.5min.com/2/' + fcat[-3:] + '/' + fcat + '/' + fid + '.mp4'
-            formats.append({
-                'format': 'fivemin',
-                'url': furl,
-                'preference': 1,
-            })
+        formats = []
+        sources = data.get('sources', {})
+        live_sources = list(sources.get('live', {}).items()) + list(sources.get('live_again', {}).items())
+        for key, url in live_sources:
+            ext = determine_ext(url)
+            if ext == 'm3u8':
+                formats.extend(self._extract_m3u8_formats(
+                    url, video_id, ext='mp4', m3u8_id='hls', fatal=False))
+            elif ext == 'f4m':
+                formats.extend(self._extract_f4m_formats(
+                    url + '?hdcore=2.9.5', video_id, f4m_id='hds', fatal=False))
+            else:
+                formats.append({
+                    'format': key,
+                    'format_id': key.replace('/', '.'),
+                    'ext': 'mp4',
+                    'url': url,
+                    'vcodec': 'none' if key.startswith('audio/') else None,
+                })
+
+        if not formats and data.get('fivemin_id'):
+            return self.url_result('5min:%s' % data['fivemin_id'])
+
         self._sort_formats(formats)
 
         return {