]>
Raphaël G. Git Repositories - youtubedl/blob - youtube_dl/extractor/canalplus.py
25b2d4efe5d54e1c3264f906a3105ad05dd2ca3f
2 from __future__
import unicode_literals
6 from .common
import InfoExtractor
17 class CanalplusIE(InfoExtractor
):
18 IE_DESC
= 'canalplus.fr, piwiplus.fr and d8.tv'
19 _VALID_URL
= r
'https?://(?:www\.(?P<site>canalplus\.fr|piwiplus\.fr|d8\.tv|itele\.fr)/.*?/(?P<path>.*)|player\.canalplus\.fr/#/(?P<id>[0-9]+))'
20 _VIDEO_INFO_TEMPLATE
= 'http://service.canal-plus.com/video/rest/getVideosLiees/%s/%s?format=json'
22 'canalplus.fr': 'cplus',
23 'piwiplus.fr': 'teletoon',
29 'url': 'http://www.canalplus.fr/c-emissions/pid1830-c-zapping.html?vid=1263092',
30 'md5': '12164a6f14ff6df8bd628e8ba9b10b78',
34 'title': 'Le Zapping - 13/05/15',
35 'description': 'md5:09738c0d06be4b5d06a0940edb0da73f',
36 'upload_date': '20150513',
39 'url': 'http://www.piwiplus.fr/videos-piwi/pid1405-le-labyrinthe-boing-super-ranger.html?vid=1108190',
43 'title': 'Le labyrinthe - Boing super ranger',
44 'description': 'md5:4cea7a37153be42c1ba2c1d3064376ff',
45 'upload_date': '20140724',
47 'skip': 'Only works from France',
49 'url': 'http://www.d8.tv/d8-docs-mags/pid6589-d8-campagne-intime.html',
53 'title': 'Campagne intime - Documentaire exceptionnel',
54 'description': 'md5:d2643b799fb190846ae09c61e59a859f',
55 'upload_date': '20131108',
57 'skip': 'videos get deleted after a while',
59 'url': 'http://www.itele.fr/france/video/aubervilliers-un-lycee-en-colere-111559',
60 'md5': '38b8f7934def74f0d6f3ba6c036a5f82',
64 'title': 'Aubervilliers : un lycée en colère - Le 11/02/2015 à 06h45',
65 'description': 'md5:8216206ec53426ea6321321f3b3c16db',
66 'upload_date': '20150211',
70 def _real_extract(self
, url
):
71 mobj
= re
.match(self
._VALID
_URL
, url
)
72 video_id
= mobj
.groupdict().get('id')
74 site_id
= self
._SITE
_ID
_MAP
[mobj
.group('site') or 'canal']
76 # Beware, some subclasses do not define an id group
77 display_id
= url_basename(mobj
.group('path'))
80 webpage
= self
._download
_webpage
(url
, display_id
)
81 video_id
= self
._search
_regex
(
82 [r
'<canal:player[^>]+?videoId=(["\'])(?P
<id>\d
+)', r'id=["\']canal_video_player(?P<id>\d+)'],
83 webpage, 'video id', group='id')
85 info_url = self._VIDEO_INFO_TEMPLATE % (site_id, video_id)
86 video_data = self._download_json(info_url, video_id, 'Downloading video JSON')
88 if isinstance(video_data, list):
89 video_data = [video for video in video_data if video.get('ID') == video_id][0]
90 media = video_data['MEDIA']
91 infos = video_data['INFOS']
93 preference = qualities(['MOBILE', 'BAS_DEBIT', 'HAUT_DEBIT', 'HD'])
95 fmt_url = next(iter(media.get('VIDEOS')))
96 if '/geo' in fmt_url.lower():
97 response = self._request_webpage(
98 HEADRequest(fmt_url), video_id,
99 'Checking if the video is georestricted')
100 if '/blocage' in response.geturl():
101 raise ExtractorError(
102 'The video is not available in your country',
106 for format_id, format_url in media['VIDEOS'].items():
109 if format_id == 'HLS':
110 formats.extend(self._extract_m3u8_formats(
111 format_url, video_id, 'mp4', 'm3u8_native', m3u8_id=format_id, fatal=False))
112 elif format_id == 'HDS':
113 formats.extend(self._extract_f4m_formats(
114 format_url + '?hdcore=2.11.3', video_id, f4m_id=format_id, fatal=False))
117 # the secret extracted ya function in http://player.canalplus.fr/common/js/canalPlayer.js
118 'url': format_url + '?secret=pqzerjlsmdkjfoiuerhsdlfknaes',
119 'format_id': format_id,
120 'preference': preference(format_id),
122 self._sort_formats(formats)
127 } for image_id, image_url in media.get('images', {}).items()]
129 titrage = infos['TITRAGE']
133 'display_id': display_id,
134 'title': '%s - %s' % (titrage['TITRE'],
135 titrage['SOUS_TITRE']),
136 'upload_date': unified_strdate(infos.get('PUBLICATION', {}).get('DATE')),
137 'thumbnails': thumbnails,
138 'description': infos.get('DESCRIPTION'),
139 'duration': int_or_none(infos.get('DURATION')),
140 'view_count': int_or_none(infos.get('NB_VUES')),
141 'like_count': int_or_none(infos.get('NB_LIKES')),
142 'comment_count': int_or_none(infos.get('NB_COMMENTS')),