]>
Raphaël G. Git Repositories - youtubedl/blob - youtube_dl/extractor/arte.py
4 import xml
.etree
.ElementTree
6 from .common
import InfoExtractor
16 # There are different sources of video in arte.tv, the extraction process
17 # is different for each one. The videos usually expire in 7 days, so we can't
20 class ArteTvIE(InfoExtractor
):
21 _VIDEOS_URL
= r
'(?:http://)?videos.arte.tv/(?P<lang>fr|de)/.*-(?P<id>.*?).html'
22 _LIVEWEB_URL
= r
'(?:http://)?liveweb.arte.tv/(?P<lang>fr|de)/(?P<subpage>.+?)/(?P<name>.+)'
23 _LIVE_URL
= r
'index-[0-9]+\.html$'
28 def suitable(cls
, url
):
29 return any(re
.match(regex
, url
) for regex
in (cls
._VIDEOS
_URL
, cls
._LIVEWEB
_URL
))
31 # TODO implement Live Stream
32 # from ..utils import compat_urllib_parse
33 # def extractLiveStream(self, url):
34 # video_lang = url.split('/')[-4]
35 # info = self.grep_webpage(
37 # r'src="(.*?/videothek_js.*?\.js)',
40 # (1, 'url', u'Invalid URL: %s' % url)
43 # http_host = url.split('/')[2]
44 # next_url = 'http://%s%s' % (http_host, compat_urllib_parse.unquote(info.get('url')))
45 # info = self.grep_webpage(
47 # r'(s_artestras_scst_geoFRDE_' + video_lang + '.*?)\'.*?' +
48 # '(http://.*?\.swf).*?' +
52 # (1, 'path', u'could not extract video path: %s' % url),
53 # (2, 'player', u'could not extract video player: %s' % url),
54 # (3, 'url', u'could not extract video url: %s' % url)
57 # video_url = u'%s/%s' % (info.get('url'), info.get('path'))
59 def _real_extract(self
, url
):
60 mobj
= re
.match(self
._VIDEOS
_URL
, url
)
63 lang
= mobj
.group('lang')
64 return self
._extract
_video
(url
, id, lang
)
66 mobj
= re
.match(self
._LIVEWEB
_URL
, url
)
68 name
= mobj
.group('name')
69 lang
= mobj
.group('lang')
70 return self
._extract
_liveweb
(url
, name
, lang
)
72 if re
.search(self
._LIVE
_URL
, video_id
) is not None:
73 raise ExtractorError(u
'Arte live streams are not yet supported, sorry')
74 # self.extractLiveStream(url)
77 def _extract_video(self
, url
, video_id
, lang
):
78 """Extract from videos.arte.tv"""
79 ref_xml_url
= url
.replace('/videos/', '/do_delegate/videos/')
80 ref_xml_url
= ref_xml_url
.replace('.html', ',view,asPlayerXml.xml')
81 ref_xml
= self
._download
_webpage
(ref_xml_url
, video_id
, note
=u
'Downloading metadata')
82 ref_xml_doc
= xml
.etree
.ElementTree
.fromstring(ref_xml
)
83 config_node
= find_xpath_attr(ref_xml_doc
, './/video', 'lang', lang
)
84 config_xml_url
= config_node
.attrib
['ref']
85 config_xml
= self
._download
_webpage
(config_xml_url
, video_id
, note
=u
'Downloading configuration')
87 video_urls
= list(re
.finditer(r
'<url quality="(?P<quality>.*?)">(?P<url>.*?)</url>', config_xml
))
89 quality
= m
.group('quality')
94 # We pick the best quality
95 video_urls
= sorted(video_urls
, key
=_key
)
96 video_url
= list(video_urls
)[-1].group('url')
98 title
= self
._html
_search
_regex
(r
'<name>(.*?)</name>', config_xml
, 'title')
99 thumbnail
= self
._html
_search
_regex
(r
'<firstThumbnailUrl>(.*?)</firstThumbnailUrl>',
100 config_xml
, 'thumbnail')
101 return {'id': video_id
,
103 'thumbnail': thumbnail
,
108 def _extract_liveweb(self
, url
, name
, lang
):
109 """Extract form http://liveweb.arte.tv/"""
110 webpage
= self
._download
_webpage
(url
, name
)
111 video_id
= self
._search
_regex
(r
'eventId=(\d+?)("|&)', webpage
, u
'event id')
112 config_xml
= self
._download
_webpage
('http://download.liveweb.arte.tv/o21/liveweb/events/event-%s.xml' % video_id
,
113 video_id
, u
'Downloading information')
114 config_doc
= xml
.etree
.ElementTree
.fromstring(config_xml
.encode('utf-8'))
115 event_doc
= config_doc
.find('event')
116 url_node
= event_doc
.find('video').find('urlHd')
118 url_node
= video_doc
.find('urlSd')
120 return {'id': video_id
,
121 'title': event_doc
.find('name%s' % lang
.capitalize()).text
,
122 'url': url_node
.text
.replace('MP4', 'mp4'),
124 'thumbnail': self
._og
_search
_thumbnail
(webpage
),
128 class ArteTVPlus7IE(InfoExtractor
):
129 IE_NAME
= u
'arte.tv:+7'
130 _VALID_URL
= r
'https?://www\.arte.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?'
133 def _extract_url_info(cls
, url
):
134 mobj
= re
.match(cls
._VALID
_URL
, url
)
135 lang
= mobj
.group('lang')
136 # This is not a real id, it can be for example AJT for the news
137 # http://www.arte.tv/guide/fr/emissions/AJT/arte-journal
138 video_id
= mobj
.group('id')
139 return video_id
, lang
141 def _real_extract(self
, url
):
142 video_id
, lang
= self
._extract
_url
_info
(url
)
143 webpage
= self
._download
_webpage
(url
, video_id
)
144 return self
._extract
_from
_webpage
(webpage
, video_id
, lang
)
146 def _extract_from_webpage(self
, webpage
, video_id
, lang
):
147 json_url
= self
._html
_search
_regex
(r
'arte_vp_url="(.*?)"', webpage
, 'json url')
149 json_info
= self
._download
_webpage
(json_url
, video_id
, 'Downloading info json')
150 self
.report_extraction(video_id
)
151 info
= json
.loads(json_info
)
152 player_info
= info
['videoJsonPlayer']
155 'id': player_info
['VID'],
156 'title': player_info
['VTI'],
157 'description': player_info
.get('VDE'),
158 'upload_date': unified_strdate(player_info
.get('VDA', '').split(' ')[0]),
159 'thumbnail': player_info
.get('programImage') or player_info
.get('VTU', {}).get('IUR'),
162 all_formats
= player_info
['VSR'].values()
163 # Some formats use the m3u8 protocol
164 all_formats
= list(filter(lambda f
: f
.get('videoFormat') != 'M3U8', all_formats
))
166 if f
.get('versionCode') is None:
168 # Return true if that format is in the language of the url
173 regexes
= [r
'VO?%s' % l
, r
'VO?.-ST%s' % l
]
174 return any(re
.match(r
, f
['versionCode']) for r
in regexes
)
175 # Some formats may not be in the same language as the url
176 formats
= filter(_match_lang
, all_formats
)
177 formats
= list(formats
) # in python3 filter returns an iterator
179 # Some videos are only available in the 'Originalversion'
180 # they aren't tagged as being in French or German
181 if all(f
['versionCode'] == 'VO' for f
in all_formats
):
182 formats
= all_formats
184 raise ExtractorError(u
'The formats list is empty')
186 if re
.match(r
'[A-Z]Q', formats
[0]['quality']) is not None:
188 return ['HQ', 'MQ', 'EQ', 'SQ'].index(f
['quality'])
192 # Sort first by quality
193 int(f
.get('height',-1)),
194 int(f
.get('bitrate',-1)),
195 # The original version with subtitles has lower relevance
196 re
.match(r
'VO-ST(F|A)', f
.get('versionCode', '')) is None,
197 # The version with sourds/mal subtitles has also lower relevance
198 re
.match(r
'VO?(F|A)-STM\1', f
.get('versionCode', '')) is None,
200 formats
= sorted(formats
, key
=sort_key
)
201 def _format(format_info
):
203 height
= format_info
.get('height')
204 if height
is not None:
205 quality
= compat_str(height
)
206 bitrate
= format_info
.get('bitrate')
207 if bitrate
is not None:
208 quality
+= '-%d' % bitrate
209 if format_info
.get('versionCode') is not None:
210 format_id
= u
'%s-%s' % (quality
, format_info
['versionCode'])
214 'format_id': format_id
,
215 'format_note': format_info
.get('versionLibelle'),
216 'width': format_info
.get('width'),
219 if format_info
['mediaType'] == u
'rtmp':
220 info
['url'] = format_info
['streamer']
221 info
['play_path'] = 'mp4:' + format_info
['url']
224 info
['url'] = format_info
['url']
225 info
['ext'] = determine_ext(info
['url'])
227 info_dict
['formats'] = [_format(f
) for f
in formats
]
232 # It also uses the arte_vp_url url from the webpage to extract the information
233 class ArteTVCreativeIE(ArteTVPlus7IE
):
234 IE_NAME
= u
'arte.tv:creative'
235 _VALID_URL
= r
'https?://creative\.arte\.tv/(?P<lang>fr|de)/magazine?/(?P<id>.+)'
238 u
'url': u
'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design',
239 u
'file': u
'050489-002.mp4',
241 u
'title': u
'Agentur Amateur / Agence Amateur #2 : Corporate Design',
246 class ArteTVFutureIE(ArteTVPlus7IE
):
247 IE_NAME
= u
'arte.tv:future'
248 _VALID_URL
= r
'https?://future\.arte\.tv/(?P<lang>fr|de)/(thema|sujet)/.*?#article-anchor-(?P<id>\d+)'
251 u
'url': u
'http://future.arte.tv/fr/sujet/info-sciences#article-anchor-7081',
252 u
'file': u
'050940-003.mp4',
254 u
'title': u
'Les champignons au secours de la planète',
258 def _real_extract(self
, url
):
259 anchor_id
, lang
= self
._extract
_url
_info
(url
)
260 webpage
= self
._download
_webpage
(url
, anchor_id
)
261 row
= get_element_by_id(anchor_id
, webpage
)
262 return self
._extract
_from
_webpage
(row
, anchor_id
, lang
)