]> Raphaël G. Git Repositories - youtubedl/blobdiff - youtube_dl/extractor/faz.py
Imported Upstream version 2015.11.10
[youtubedl] / youtube_dl / extractor / faz.py
index deaa4ed2d9bc14406b6a7d3d6e8b015c6fcf915d..cebdd0193a82eaccc673dffe9d001f766e9e31d1 100644 (file)
@@ -1,60 +1,67 @@
 # encoding: utf-8
-import re
-import xml.etree.ElementTree
+from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..utils import (
-    determine_ext,
-    clean_html,
-    get_element_by_attribute,
-)
 
 
 class FazIE(InfoExtractor):
-    IE_NAME = u'faz.net'
-    _VALID_URL = r'https?://www\.faz\.net/multimedia/videos/.*?-(?P<id>\d+).html'
-
-    _TEST = {
-        u'url': u'http://www.faz.net/multimedia/videos/stockholm-chemie-nobelpreis-fuer-drei-amerikanische-forscher-12610585.html',
-        u'file': u'12610585.mp4',
-        u'info_dict': {
-            u'title': u'Stockholm: Chemie-Nobelpreis für drei amerikanische Forscher',
-            u'description': u'md5:1453fbf9a0d041d985a47306192ea253',
+    IE_NAME = 'faz.net'
+    _VALID_URL = r'https?://(?:www\.)?faz\.net/(?:[^/]+/)*.*?-(?P<id>\d+)\.html'
+
+    _TESTS = [{
+        'url': 'http://www.faz.net/multimedia/videos/stockholm-chemie-nobelpreis-fuer-drei-amerikanische-forscher-12610585.html',
+        'info_dict': {
+            'id': '12610585',
+            'ext': 'mp4',
+            'title': 'Stockholm: Chemie-Nobelpreis für drei amerikanische Forscher',
+            'description': 'md5:1453fbf9a0d041d985a47306192ea253',
         },
-    }
+    }, {
+        'url': 'http://www.faz.net/aktuell/politik/berlin-gabriel-besteht-zerreissprobe-ueber-datenspeicherung-13659345.html',
+        'only_matching': True,
+    }, {
+        'url': 'http://www.faz.net/berlin-gabriel-besteht-zerreissprobe-ueber-datenspeicherung-13659345.html',
+        'only_matching': True,
+    }, {
+        'url': 'http://www.faz.net/-13659345.html',
+        'only_matching': True,
+    }, {
+        'url': 'http://www.faz.net/aktuell/politik/-13659345.html',
+        'only_matching': True,
+    }, {
+        'url': 'http://www.faz.net/foobarblafasel-13659345.html',
+        'only_matching': True,
+    }]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-        self.to_screen(video_id)
+        video_id = self._match_id(url)
+
         webpage = self._download_webpage(url, video_id)
-        config_xml_url = self._search_regex(r'writeFLV\(\'(.+?)\',', webpage,
-            u'config xml url')
-        config_xml = self._download_webpage(config_xml_url, video_id,
-            u'Downloading config xml')
-        config = xml.etree.ElementTree.fromstring(config_xml.encode('utf-8'))
+        config_xml_url = self._search_regex(
+            r'writeFLV\(\'(.+?)\',', webpage, 'config xml url')
+        config = self._download_xml(
+            config_xml_url, video_id, 'Downloading config xml')
 
         encodings = config.find('ENCODINGS')
         formats = []
-        for code in ['LOW', 'HIGH', 'HQ']:
+        for pref, code in enumerate(['LOW', 'HIGH', 'HQ']):
             encoding = encodings.find(code)
             if encoding is None:
                 continue
             encoding_url = encoding.find('FILENAME').text
             formats.append({
                 'url': encoding_url,
-                'ext': determine_ext(encoding_url),
                 'format_id': code.lower(),
+                'quality': pref,
             })
+        self._sort_formats(formats)
 
-        descr_html = get_element_by_attribute('class', 'Content Copy', webpage)
-        info = {
+        descr = self._html_search_regex(
+            r'<p class="Content Copy">(.*?)</p>', webpage, 'description', fatal=False)
+        return {
             'id': video_id,
             'title': self._og_search_title(webpage),
             'formats': formats,
-            'description': clean_html(descr_html),
+            'description': descr,
             'thumbnail': config.find('STILL/STILL_BIG').text,
         }
-        # TODO: Remove when #980 has been merged
-        info.update(formats[-1])
-        return info