]> Raphaël G. Git Repositories - youtubedl/blobdiff - youtube_dl/extractor/faz.py
Set upstream metadata fields: Bug-Database, Bug-Submit, Repository, Repository-Browse.
[youtubedl] / youtube_dl / extractor / faz.py
index d0dfde694b4d93f7249f2dd3a326ecb0bdca98dd..312ee2aeed953400df55086a13bfa40ebbe31650 100644 (file)
@@ -1,53 +1,93 @@
-# encoding: utf-8
+# coding: utf-8
+from __future__ import unicode_literals
+
 import re
 
 from .common import InfoExtractor
 import re
 
 from .common import InfoExtractor
+from ..compat import compat_etree_fromstring
 from ..utils import (
 from ..utils import (
-    determine_ext,
+    xpath_element,
+    xpath_text,
+    int_or_none,
 )
 
 
 class FazIE(InfoExtractor):
 )
 
 
 class FazIE(InfoExtractor):
-    IE_NAME = u'faz.net'
-    _VALID_URL = r'https?://www\.faz\.net/multimedia/videos/.*?-(?P<id>\d+).html'
-
-    _TEST = {
-        u'url': u'http://www.faz.net/multimedia/videos/stockholm-chemie-nobelpreis-fuer-drei-amerikanische-forscher-12610585.html',
-        u'file': u'12610585.mp4',
-        u'info_dict': {
-            u'title': u'Stockholm: Chemie-Nobelpreis für drei amerikanische Forscher',
-            u'description': u'md5:1453fbf9a0d041d985a47306192ea253',
+    IE_NAME = 'faz.net'
+    _VALID_URL = r'https?://(?:www\.)?faz\.net/(?:[^/]+/)*.*?-(?P<id>\d+)\.html'
+
+    _TESTS = [{
+        'url': 'http://www.faz.net/multimedia/videos/stockholm-chemie-nobelpreis-fuer-drei-amerikanische-forscher-12610585.html',
+        'info_dict': {
+            'id': '12610585',
+            'ext': 'mp4',
+            'title': 'Stockholm: Chemie-Nobelpreis für drei amerikanische Forscher',
+            'description': 'md5:1453fbf9a0d041d985a47306192ea253',
         },
         },
-    }
+    }, {
+        'url': 'http://www.faz.net/aktuell/politik/berlin-gabriel-besteht-zerreissprobe-ueber-datenspeicherung-13659345.html',
+        'only_matching': True,
+    }, {
+        'url': 'http://www.faz.net/berlin-gabriel-besteht-zerreissprobe-ueber-datenspeicherung-13659345.html',
+        'only_matching': True,
+    }, {
+        'url': 'http://www.faz.net/-13659345.html',
+        'only_matching': True,
+    }, {
+        'url': 'http://www.faz.net/aktuell/politik/-13659345.html',
+        'only_matching': True,
+    }, {
+        'url': 'http://www.faz.net/foobarblafasel-13659345.html',
+        'only_matching': True,
+    }]
 
     def _real_extract(self, url):
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-        self.to_screen(video_id)
+        video_id = self._match_id(url)
+
         webpage = self._download_webpage(url, video_id)
         webpage = self._download_webpage(url, video_id)
-        config_xml_url = self._search_regex(r'writeFLV\(\'(.+?)\',', webpage,
-            u'config xml url')
-        config = self._download_xml(config_xml_url, video_id,
-            u'Downloading config xml')
+        description = self._og_search_description(webpage)
+        media = self._html_search_regex(
+            r"data-videojs-media='([^']+)",
+            webpage, 'media')
+        if media == 'extern':
+            perform_url = self._search_regex(
+                r"<iframe[^>]+?src='((?:http:)?//player\.performgroup\.com/eplayer/eplayer\.html#/?[0-9a-f]{26}\.[0-9a-z]{26})",
+                webpage, 'perform url')
+            return self.url_result(perform_url)
+        config = compat_etree_fromstring(media)
 
 
-        encodings = config.find('ENCODINGS')
+        encodings = xpath_element(config, 'ENCODINGS', 'encodings', True)
         formats = []
         formats = []
-        for code in ['LOW', 'HIGH', 'HQ']:
-            encoding = encodings.find(code)
-            if encoding is None:
-                continue
-            encoding_url = encoding.find('FILENAME').text
-            formats.append({
-                'url': encoding_url,
-                'ext': determine_ext(encoding_url),
-                'format_id': code.lower(),
-            })
-
-        descr = self._html_search_regex(r'<p class="Content Copy">(.*?)</p>', webpage, u'description')
+        for pref, code in enumerate(['LOW', 'HIGH', 'HQ']):
+            encoding = xpath_element(encodings, code)
+            if encoding is not None:
+                encoding_url = xpath_text(encoding, 'FILENAME')
+                if encoding_url:
+                    tbr = xpath_text(encoding, 'AVERAGEBITRATE', 1000)
+                    if tbr:
+                        tbr = int_or_none(tbr.replace(',', '.'))
+                    f = {
+                        'url': encoding_url,
+                        'format_id': code.lower(),
+                        'quality': pref,
+                        'tbr': tbr,
+                        'vcodec': xpath_text(encoding, 'CODEC'),
+                    }
+                    mobj = re.search(r'(\d+)x(\d+)_(\d+)\.mp4', encoding_url)
+                    if mobj:
+                        f.update({
+                            'width': int(mobj.group(1)),
+                            'height': int(mobj.group(2)),
+                            'tbr': tbr or int(mobj.group(3)),
+                        })
+                    formats.append(f)
+        self._sort_formats(formats)
+
         return {
             'id': video_id,
             'title': self._og_search_title(webpage),
             'formats': formats,
         return {
             'id': video_id,
             'title': self._og_search_title(webpage),
             'formats': formats,
-            'description': descr,
-            'thumbnail': config.find('STILL/STILL_BIG').text,
+            'description': description.strip() if description else None,
+            'thumbnail': xpath_text(config, 'STILL/STILL_BIG'),
+            'duration': int_or_none(xpath_text(config, 'DURATION')),
         }
         }