]> Raphaƫl G. Git Repositories - youtubedl/blobdiff - youtube_dl/extractor/arte.py
Prepare for release.
[youtubedl] / youtube_dl / extractor / arte.py
index 82e3ffe04312d27abb5865a48ccdc69075afbc6f..e7a91a1eb5e835c9b6e8bd9f16302a9bc7a8bf90 100644 (file)
@@ -1,54 +1,31 @@
 import re
 import re
-import socket
+import json
+import xml.etree.ElementTree
 
 from .common import InfoExtractor
 from ..utils import (
 
 from .common import InfoExtractor
 from ..utils import (
-    compat_http_client,
-    compat_str,
-    compat_urllib_error,
-    compat_urllib_parse,
-    compat_urllib_request,
-
     ExtractorError,
     unified_strdate,
 )
 
 class ArteTvIE(InfoExtractor):
     ExtractorError,
     unified_strdate,
 )
 
 class ArteTvIE(InfoExtractor):
-    """arte.tv information extractor."""
-
-    _VALID_URL = r'(?:http://)?videos\.arte\.tv/(?:fr|de)/videos/.*'
+    """
+    There are two sources of video in arte.tv: videos.arte.tv and
+    www.arte.tv/guide, the extraction process is different for each one.
+    The videos expire in 7 days, so we can't add tests.
+    """
+    _EMISSION_URL = r'(?:http://)?www\.arte.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?'
+    _VIDEOS_URL = r'(?:http://)?videos.arte.tv/(?P<lang>fr|de)/.*-(?P<id>.*?).html'
     _LIVE_URL = r'index-[0-9]+\.html$'
 
     IE_NAME = u'arte.tv'
 
     _LIVE_URL = r'index-[0-9]+\.html$'
 
     IE_NAME = u'arte.tv'
 
-    def fetch_webpage(self, url):
-        request = compat_urllib_request.Request(url)
-        try:
-            self.report_download_webpage(url)
-            webpage = compat_urllib_request.urlopen(request).read()
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            raise ExtractorError(u'Unable to retrieve video webpage: %s' % compat_str(err))
-        except ValueError as err:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-        return webpage
-
-    def grep_webpage(self, url, regex, regexFlags, matchTuples):
-        page = self.fetch_webpage(url)
-        mobj = re.search(regex, page, regexFlags)
-        info = {}
-
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        for (i, key, err) in matchTuples:
-            if mobj.group(i) is None:
-                raise ExtractorError(err)
-            else:
-                info[key] = mobj.group(i)
-
-        return info
+    @classmethod
+    def suitable(cls, url):
+        return any(re.match(regex, url) for regex in (cls._EMISSION_URL, cls._VIDEOS_URL))
 
     # TODO implement Live Stream
 
     # TODO implement Live Stream
+    # from ..utils import compat_urllib_parse
     # def extractLiveStream(self, url):
     #     video_lang = url.split('/')[-4]
     #     info = self.grep_webpage(
     # def extractLiveStream(self, url):
     #     video_lang = url.split('/')[-4]
     #     info = self.grep_webpage(
@@ -75,62 +52,94 @@ class ArteTvIE(InfoExtractor):
     #     )
     #     video_url = u'%s/%s' % (info.get('url'), info.get('path'))
 
     #     )
     #     video_url = u'%s/%s' % (info.get('url'), info.get('path'))
 
-    def extractPlus7Stream(self, url):
-        video_lang = url.split('/')[-3]
-        info = self.grep_webpage(
-            url,
-            r'param name="movie".*?videorefFileUrl=(http[^\'"&]*)',
-            0,
-            [
-                (1, 'url', u'Invalid URL: %s' % url)
-            ]
-        )
-        next_url = compat_urllib_parse.unquote(info.get('url'))
-        info = self.grep_webpage(
-            next_url,
-            r'<video lang="%s" ref="(http[^\'"&]*)' % video_lang,
-            0,
-            [
-                (1, 'url', u'Could not find <video> tag: %s' % url)
-            ]
-        )
-        next_url = compat_urllib_parse.unquote(info.get('url'))
-
-        info = self.grep_webpage(
-            next_url,
-            r'<video id="(.*?)".*?>.*?' +
-                '<name>(.*?)</name>.*?' +
-                '<dateVideo>(.*?)</dateVideo>.*?' +
-                '<url quality="hd">(.*?)</url>',
-            re.DOTALL,
-            [
-                (1, 'id',    u'could not extract video id: %s' % url),
-                (2, 'title', u'could not extract video title: %s' % url),
-                (3, 'date',  u'could not extract video date: %s' % url),
-                (4, 'url',   u'could not extract video url: %s' % url)
-            ]
-        )
-
-        return {
-            'id':           info.get('id'),
-            'url':          compat_urllib_parse.unquote(info.get('url')),
-            'uploader':     u'arte.tv',
-            'upload_date':  unified_strdate(info.get('date')),
-            'title':        info.get('title').decode('utf-8'),
-            'ext':          u'mp4',
-            'format':       u'NA',
-            'player_url':   None,
-        }
-
     def _real_extract(self, url):
     def _real_extract(self, url):
-        video_id = url.split('/')[-1]
-        self.report_extraction(video_id)
+        mobj = re.match(self._EMISSION_URL, url)
+        if mobj is not None:
+            lang = mobj.group('lang')
+            # This is not a real id, it can be for example AJT for the news
+            # http://www.arte.tv/guide/fr/emissions/AJT/arte-journal
+            video_id = mobj.group('id')
+            return self._extract_emission(url, video_id, lang)
+
+        mobj = re.match(self._VIDEOS_URL, url)
+        if mobj is not None:
+            id = mobj.group('id')
+            lang = mobj.group('lang')
+            return self._extract_video(url, id, lang)
 
         if re.search(self._LIVE_URL, video_id) is not None:
             raise ExtractorError(u'Arte live streams are not yet supported, sorry')
             # self.extractLiveStream(url)
             # return
 
         if re.search(self._LIVE_URL, video_id) is not None:
             raise ExtractorError(u'Arte live streams are not yet supported, sorry')
             # self.extractLiveStream(url)
             # return
-        else:
-            info = self.extractPlus7Stream(url)
 
 
-        return [info]
+    def _extract_emission(self, url, video_id, lang):
+        """Extract from www.arte.tv/guide"""
+        webpage = self._download_webpage(url, video_id)
+        json_url = self._html_search_regex(r'arte_vp_url="(.*?)"', webpage, 'json url')
+
+        json_info = self._download_webpage(json_url, video_id, 'Downloading info json')
+        self.report_extraction(video_id)
+        info = json.loads(json_info)
+        player_info = info['videoJsonPlayer']
+
+        info_dict = {'id': player_info['VID'],
+                     'title': player_info['VTI'],
+                     'description': player_info['VDE'],
+                     'upload_date': unified_strdate(player_info['VDA'].split(' ')[0]),
+                     'thumbnail': player_info['programImage'],
+                     'ext': 'flv',
+                     }
+
+        formats = player_info['VSR'].values()
+        def _match_lang(f):
+            # Return true if that format is in the language of the url
+            if lang == 'fr':
+                l = 'F'
+            elif lang == 'de':
+                l = 'A'
+            regexes = [r'VO?%s' % l, r'V%s-ST.' % l]
+            return any(re.match(r, f['versionCode']) for r in regexes)
+        # Some formats may not be in the same language as the url
+        formats = filter(_match_lang, formats)
+        # We order the formats by quality
+        formats = sorted(formats, key=lambda f: int(f['height']))
+        # Pick the best quality
+        format_info = formats[-1]
+        if format_info['mediaType'] == u'rtmp':
+            info_dict['url'] = format_info['streamer']
+            info_dict['play_path'] = 'mp4:' + format_info['url']
+        else:
+            info_dict['url'] = format_info['url']
+
+        return info_dict
+
+    def _extract_video(self, url, video_id, lang):
+        """Extract from videos.arte.tv"""
+        ref_xml_url = url.replace('/videos/', '/do_delegate/videos/')
+        ref_xml_url = ref_xml_url.replace('.html', ',view,asPlayerXml.xml')
+        ref_xml = self._download_webpage(ref_xml_url, video_id, note=u'Downloading metadata')
+        ref_xml_doc = xml.etree.ElementTree.fromstring(ref_xml)
+        config_node = ref_xml_doc.find('.//video[@lang="%s"]' % lang)
+        config_xml_url = config_node.attrib['ref']
+        config_xml = self._download_webpage(config_xml_url, video_id, note=u'Downloading configuration')
+
+        video_urls = list(re.finditer(r'<url quality="(?P<quality>.*?)">(?P<url>.*?)</url>', config_xml))
+        def _key(m):
+            quality = m.group('quality')
+            if quality == 'hd':
+                return 2
+            else:
+                return 1
+        # We pick the best quality
+        video_urls = sorted(video_urls, key=_key)
+        video_url = list(video_urls)[-1].group('url')
+        
+        title = self._html_search_regex(r'<name>(.*?)</name>', config_xml, 'title')
+        thumbnail = self._html_search_regex(r'<firstThumbnailUrl>(.*?)</firstThumbnailUrl>',
+                                            config_xml, 'thumbnail')
+        return {'id': video_id,
+                'title': title,
+                'thumbnail': thumbnail,
+                'url': video_url,
+                'ext': 'flv',
+                }