- video_id = mobj.group('id')
- if video_id is None:
- webpage = self._download_webpage(url, mobj.group('path'))
- video_id = self._search_regex(r'videoId = "(\d+)";', webpage, u'video id')
- info_url = self._VIDEO_INFO_TEMPLATE % video_id
- info_page = self._download_webpage(info_url,video_id,
- u'Downloading video info')
-
- self.report_extraction(video_id)
- doc = xml.etree.ElementTree.fromstring(info_page.encode('utf-8'))
- video_info = [video for video in doc if video.find('ID').text == video_id][0]
- infos = video_info.find('INFOS')
- media = video_info.find('MEDIA')
- formats = [media.find('VIDEOS/%s' % format)
- for format in ['BAS_DEBIT', 'HAUT_DEBIT', 'HD']]
- video_url = [format.text for format in formats if format is not None][-1]
-
- return {'id': video_id,
- 'title': u'%s - %s' % (infos.find('TITRAGE/TITRE').text,
- infos.find('TITRAGE/SOUS_TITRE').text),
- 'url': video_url,
- 'ext': 'flv',
- 'upload_date': unified_strdate(infos.find('PUBLICATION/DATE').text),
- 'thumbnail': media.find('IMAGES/GRAND').text,
- 'description': infos.find('DESCRIPTION').text,
- 'view_count': int(infos.find('NB_VUES').text),
- }
+
+ site_id = self._SITE_ID_MAP[compat_urllib_parse_urlparse(url).netloc.rsplit('.', 2)[-2]]
+
+ # Beware, some subclasses do not define an id group
+ display_id = remove_end(dict_get(mobj.groupdict(), ('display_id', 'id', 'vid')), '.html')
+
+ webpage = self._download_webpage(url, display_id)
+ video_id = self._search_regex(
+ [r'<canal:player[^>]+?videoId=(["\'])(?P<id>\d+)',
+ r'id=["\']canal_video_player(?P<id>\d+)',
+ r'data-video=["\'](?P<id>\d+)'],
+ webpage, 'video id', default=mobj.group('vid'), group='id')
+
+ info_url = self._VIDEO_INFO_TEMPLATE % (site_id, video_id)
+ video_data = self._download_json(info_url, video_id, 'Downloading video JSON')
+
+ if isinstance(video_data, list):
+ video_data = [video for video in video_data if video.get('ID') == video_id][0]
+ media = video_data['MEDIA']
+ infos = video_data['INFOS']
+
+ preference = qualities(['MOBILE', 'BAS_DEBIT', 'HAUT_DEBIT', 'HD'])
+
+ fmt_url = next(iter(media.get('VIDEOS')))
+ if '/geo' in fmt_url.lower():
+ response = self._request_webpage(
+ HEADRequest(fmt_url), video_id,
+ 'Checking if the video is georestricted')
+ if '/blocage' in response.geturl():
+ raise ExtractorError(
+ 'The video is not available in your country',
+ expected=True)
+
+ formats = []
+ for format_id, format_url in media['VIDEOS'].items():
+ if not format_url:
+ continue
+ if format_id == 'HLS':
+ formats.extend(self._extract_m3u8_formats(
+ format_url, video_id, 'mp4', 'm3u8_native', m3u8_id=format_id, fatal=False))
+ elif format_id == 'HDS':
+ formats.extend(self._extract_f4m_formats(
+ format_url + '?hdcore=2.11.3', video_id, f4m_id=format_id, fatal=False))
+ else:
+ formats.append({
+ # the secret extracted ya function in http://player.canalplus.fr/common/js/canalPlayer.js
+ 'url': format_url + '?secret=pqzerjlsmdkjfoiuerhsdlfknaes',
+ 'format_id': format_id,
+ 'preference': preference(format_id),
+ })
+ self._sort_formats(formats)
+
+ thumbnails = [{
+ 'id': image_id,
+ 'url': image_url,
+ } for image_id, image_url in media.get('images', {}).items()]
+
+ titrage = infos['TITRAGE']
+
+ return {
+ 'id': video_id,
+ 'display_id': display_id,
+ 'title': '%s - %s' % (titrage['TITRE'],
+ titrage['SOUS_TITRE']),
+ 'upload_date': unified_strdate(infos.get('PUBLICATION', {}).get('DATE')),
+ 'thumbnails': thumbnails,
+ 'description': infos.get('DESCRIPTION'),
+ 'duration': int_or_none(infos.get('DURATION')),
+ 'view_count': int_or_none(infos.get('NB_VUES')),
+ 'like_count': int_or_none(infos.get('NB_LIKES')),
+ 'comment_count': int_or_none(infos.get('NB_COMMENTS')),
+ 'formats': formats,
+ }