X-Git-Url: https://git.rapsys.eu/youtubedl/blobdiff_plain/feb5020b37d7d3ba4005a8bac6f4efece4ce4b8c..76d85602f8a22ca3817c3a86f4f0e8969c0b02a9:/youtube_dl/extractor/arte.py?ds=inline diff --git a/youtube_dl/extractor/arte.py b/youtube_dl/extractor/arte.py index 183274e..2a00da3 100644 --- a/youtube_dl/extractor/arte.py +++ b/youtube_dl/extractor/arte.py @@ -1,133 +1,264 @@ +# encoding: utf-8 +from __future__ import unicode_literals + import re -import json from .common import InfoExtractor +from ..compat import ( + compat_parse_qs, + compat_urllib_parse_urlparse, +) from ..utils import ( - # This is used by the not implemented extractLiveStream method - compat_urllib_parse, - - ExtractorError, + find_xpath_attr, unified_strdate, + get_element_by_attribute, + int_or_none, + qualities, ) +# There are different sources of video in arte.tv, the extraction process +# is different for each one. The videos usually expire in 7 days, so we can't +# add tests. + + class ArteTvIE(InfoExtractor): - """ - There are two sources of video in arte.tv: videos.arte.tv and - www.arte.tv/guide, the extraction process is different for each one. - The videos expire in 7 days, so we can't add tests. - """ - _EMISSION_URL = r'(?:http://)?www\.arte.tv/guide/(?:fr|de)/(?:(?:sendungen|emissions)/)?(?P.*?)/(?P.*?)(\?.*)?' - _VIDEOS_URL = r'(?:http://)?videos.arte.tv/(?:fr|de)/.*-(?P.*?).html' - _LIVE_URL = r'index-[0-9]+\.html$' + _VALID_URL = r'http://videos\.arte\.tv/(?Pfr|de)/.*-(?P.*?)\.html' + IE_NAME = 'arte.tv' + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + lang = mobj.group('lang') + video_id = mobj.group('id') + + ref_xml_url = url.replace('/videos/', '/do_delegate/videos/') + ref_xml_url = ref_xml_url.replace('.html', ',view,asPlayerXml.xml') + ref_xml_doc = self._download_xml( + ref_xml_url, video_id, note='Downloading metadata') + config_node = find_xpath_attr(ref_xml_doc, './/video', 'lang', lang) + config_xml_url = config_node.attrib['ref'] + config = self._download_xml( + config_xml_url, video_id, note='Downloading configuration') + + formats = [{ + 'format_id': q.attrib['quality'], + # The playpath starts at 'mp4:', if we don't manually + # split the url, rtmpdump will incorrectly parse them + 'url': q.text.split('mp4:', 1)[0], + 'play_path': 'mp4:' + q.text.split('mp4:', 1)[1], + 'ext': 'flv', + 'quality': 2 if q.attrib['quality'] == 'hd' else 1, + } for q in config.findall('./urls/url')] + self._sort_formats(formats) + + title = config.find('.//name').text + thumbnail = config.find('.//firstThumbnailUrl').text + return { + 'id': video_id, + 'title': title, + 'thumbnail': thumbnail, + 'formats': formats, + } - IE_NAME = u'arte.tv' + +class ArteTVPlus7IE(InfoExtractor): + IE_NAME = 'arte.tv:+7' + _VALID_URL = r'https?://(?:www\.)?arte\.tv/guide/(?Pfr|de)/(?:(?:sendungen|emissions)/)?(?P.*?)/(?P.*?)(\?.*)?' @classmethod - def suitable(cls, url): - return any(re.match(regex, url) for regex in (cls._EMISSION_URL, cls._VIDEOS_URL)) - - # TODO implement Live Stream - # def extractLiveStream(self, url): - # video_lang = url.split('/')[-4] - # info = self.grep_webpage( - # url, - # r'src="(.*?/videothek_js.*?\.js)', - # 0, - # [ - # (1, 'url', u'Invalid URL: %s' % url) - # ] - # ) - # http_host = url.split('/')[2] - # next_url = 'http://%s%s' % (http_host, compat_urllib_parse.unquote(info.get('url'))) - # info = self.grep_webpage( - # next_url, - # r'(s_artestras_scst_geoFRDE_' + video_lang + '.*?)\'.*?' + - # '(http://.*?\.swf).*?' + - # '(rtmp://.*?)\'', - # re.DOTALL, - # [ - # (1, 'path', u'could not extract video path: %s' % url), - # (2, 'player', u'could not extract video player: %s' % url), - # (3, 'url', u'could not extract video url: %s' % url) - # ] - # ) - # video_url = u'%s/%s' % (info.get('url'), info.get('path')) + def _extract_url_info(cls, url): + mobj = re.match(cls._VALID_URL, url) + lang = mobj.group('lang') + # This is not a real id, it can be for example AJT for the news + # http://www.arte.tv/guide/fr/emissions/AJT/arte-journal + video_id = mobj.group('id') + return video_id, lang def _real_extract(self, url): - mobj = re.match(self._EMISSION_URL, url) - if mobj is not None: - name = mobj.group('name') - # This is not a real id, it can be for example AJT for the news - # http://www.arte.tv/guide/fr/emissions/AJT/arte-journal - video_id = mobj.group('id') - return self._extract_emission(url, video_id) - - mobj = re.match(self._VIDEOS_URL, url) - if mobj is not None: - id = mobj.group('id') - return self._extract_video(url, id) - - if re.search(self._LIVE_URL, video_id) is not None: - raise ExtractorError(u'Arte live streams are not yet supported, sorry') - # self.extractLiveStream(url) - # return - - def _extract_emission(self, url, video_id): - """Extract from www.arte.tv/guide""" + video_id, lang = self._extract_url_info(url) webpage = self._download_webpage(url, video_id) - json_url = self._html_search_regex(r'arte_vp_url="(.*?)"', webpage, 'json url') + return self._extract_from_webpage(webpage, video_id, lang) + + def _extract_from_webpage(self, webpage, video_id, lang): + json_url = self._html_search_regex( + [r'arte_vp_url=["\'](.*?)["\']', r'data-url=["\']([^"]+)["\']'], + webpage, 'json vp url', default=None) + if not json_url: + iframe_url = self._html_search_regex( + r']+src=(["\'])(?P.+\bjson_url=.+?)\1', + webpage, 'iframe url', group='url') + json_url = compat_parse_qs( + compat_urllib_parse_urlparse(iframe_url).query)['json_url'][0] + return self._extract_from_json_url(json_url, video_id, lang) - json_info = self._download_webpage(json_url, video_id, 'Downloading info json') - self.report_extraction(video_id) - info = json.loads(json_info) + def _extract_from_json_url(self, json_url, video_id, lang): + info = self._download_json(json_url, video_id) player_info = info['videoJsonPlayer'] - info_dict = {'id': player_info['VID'], - 'title': player_info['VTI'], - 'description': player_info['VDE'], - 'upload_date': unified_strdate(player_info['VDA'].split(' ')[0]), - 'thumbnail': player_info['programImage'], - 'ext': 'flv', - } - - formats = player_info['VSR'].values() - # We order the formats by quality - formats = sorted(formats, key=lambda f: int(f['height'])) - # Pick the best quality - format_info = formats[-1] - if format_info['mediaType'] == u'rtmp': - info_dict['url'] = format_info['streamer'] - info_dict['play_path'] = 'mp4:' + format_info['url'] - else: - info_dict['url'] = format_info['url'] + upload_date_str = player_info.get('shootingDate') + if not upload_date_str: + upload_date_str = player_info.get('VDA', '').split(' ')[0] - return info_dict + title = player_info['VTI'].strip() + subtitle = player_info.get('VSU', '').strip() + if subtitle: + title += ' - %s' % subtitle + + info_dict = { + 'id': player_info['VID'], + 'title': title, + 'description': player_info.get('VDE'), + 'upload_date': unified_strdate(upload_date_str), + 'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'), + } + qfunc = qualities(['HQ', 'MQ', 'EQ', 'SQ']) + + formats = [] + for format_id, format_dict in player_info['VSR'].items(): + f = dict(format_dict) + versionCode = f.get('versionCode') - def _extract_video(self, url, video_id): - """Extract from videos.arte.tv""" - config_xml_url = url.replace('/videos/', '/do_delegate/videos/') - config_xml_url = config_xml_url.replace('.html', ',view,asPlayerXml.xml') - config_xml = self._download_webpage(config_xml_url, video_id) - config_xml_url = self._html_search_regex(r'