]> Raphaël G. Git Repositories - youtubedl/blob - youtube_dl/extractor/telecinco.py
debian/control: Add 'libfribidi-bin | bidiv' to Suggests.
[youtubedl] / youtube_dl / extractor / telecinco.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import json
5 import re
6
7 from .common import InfoExtractor
8 from .ooyala import OoyalaIE
9 from ..utils import (
10 clean_html,
11 determine_ext,
12 int_or_none,
13 str_or_none,
14 try_get,
15 urljoin,
16 )
17
18
19 class TelecincoIE(InfoExtractor):
20 IE_DESC = 'telecinco.es, cuatro.com and mediaset.es'
21 _VALID_URL = r'https?://(?:www\.)?(?:telecinco\.es|cuatro\.com|mediaset\.es)/(?:[^/]+/)+(?P<id>.+?)\.html'
22
23 _TESTS = [{
24 'url': 'http://www.telecinco.es/robinfood/temporada-01/t01xp14/Bacalao-cocochas-pil-pil_0_1876350223.html',
25 'info_dict': {
26 'id': '1876350223',
27 'title': 'Bacalao con kokotxas al pil-pil',
28 'description': 'md5:716caf5601e25c3c5ab6605b1ae71529',
29 },
30 'playlist': [{
31 'md5': 'adb28c37238b675dad0f042292f209a7',
32 'info_dict': {
33 'id': 'JEA5ijCnF6p5W08A1rNKn7',
34 'ext': 'mp4',
35 'title': 'Con Martín Berasategui, hacer un bacalao al pil-pil es fácil y divertido',
36 'duration': 662,
37 },
38 }]
39 }, {
40 'url': 'http://www.cuatro.com/deportes/futbol/barcelona/Leo_Messi-Champions-Roma_2_2052780128.html',
41 'md5': '9468140ebc300fbb8b9d65dc6e5c4b43',
42 'info_dict': {
43 'id': 'jn24Od1zGLG4XUZcnUnZB6',
44 'ext': 'mp4',
45 'title': '¿Quién es este ex futbolista con el que hablan Leo Messi y Luis Suárez?',
46 'description': 'md5:a62ecb5f1934fc787107d7b9a2262805',
47 'duration': 79,
48 },
49 }, {
50 'url': 'http://www.mediaset.es/12meses/campanas/doylacara/conlatratanohaytrato/Ayudame-dar-cara-trata-trato_2_1986630220.html',
51 'md5': 'ae2dc6b7b50b2392076a51c0f70e01f6',
52 'info_dict': {
53 'id': 'aywerkD2Sv1vGNqq9b85Q2',
54 'ext': 'mp4',
55 'title': '#DOYLACARA. Con la trata no hay trato',
56 'description': 'md5:2771356ff7bfad9179c5f5cd954f1477',
57 'duration': 50,
58 },
59 }, {
60 # video in opening's content
61 'url': 'https://www.telecinco.es/vivalavida/fiorella-sobrina-edmundo-arrocet-entrevista_18_2907195140.html',
62 'info_dict': {
63 'id': '2907195140',
64 'title': 'La surrealista entrevista a la sobrina de Edmundo Arrocet: "No puedes venir aquí y tomarnos por tontos"',
65 'description': 'md5:73f340a7320143d37ab895375b2bf13a',
66 },
67 'playlist': [{
68 'md5': 'adb28c37238b675dad0f042292f209a7',
69 'info_dict': {
70 'id': 'TpI2EttSDAReWpJ1o0NVh2',
71 'ext': 'mp4',
72 'title': 'La surrealista entrevista a la sobrina de Edmundo Arrocet: "No puedes venir aquí y tomarnos por tontos"',
73 'duration': 1015,
74 },
75 }],
76 'params': {
77 'skip_download': True,
78 },
79 }, {
80 'url': 'http://www.telecinco.es/informativos/nacional/Pablo_Iglesias-Informativos_Telecinco-entrevista-Pedro_Piqueras_2_1945155182.html',
81 'only_matching': True,
82 }, {
83 'url': 'http://www.telecinco.es/espanasinirmaslejos/Espana-gran-destino-turistico_2_1240605043.html',
84 'only_matching': True,
85 }, {
86 # ooyala video
87 'url': 'http://www.cuatro.com/chesterinlove/a-carta/chester-chester_in_love-chester_edu_2_2331030022.html',
88 'only_matching': True,
89 }]
90
91 def _parse_content(self, content, url):
92 video_id = content['dataMediaId']
93 if content.get('dataCmsId') == 'ooyala':
94 return self.url_result(
95 'ooyala:%s' % video_id, OoyalaIE.ie_key(), video_id)
96 config_url = urljoin(url, content['dataConfig'])
97 config = self._download_json(
98 config_url, video_id, 'Downloading config JSON')
99 title = config['info']['title']
100
101 def mmc_url(mmc_type):
102 return re.sub(
103 r'/(?:flash|html5)\.json', '/%s.json' % mmc_type,
104 config['services']['mmc'])
105
106 duration = None
107 formats = []
108 for mmc_type in ('flash', 'html5'):
109 mmc = self._download_json(
110 mmc_url(mmc_type), video_id,
111 'Downloading %s mmc JSON' % mmc_type, fatal=False)
112 if not mmc:
113 continue
114 if not duration:
115 duration = int_or_none(mmc.get('duration'))
116 for location in mmc['locations']:
117 gat = self._proto_relative_url(location.get('gat'), 'http:')
118 gcp = location.get('gcp')
119 ogn = location.get('ogn')
120 if None in (gat, gcp, ogn):
121 continue
122 token_data = {
123 'gcp': gcp,
124 'ogn': ogn,
125 'sta': 0,
126 }
127 media = self._download_json(
128 gat, video_id, data=json.dumps(token_data).encode('utf-8'),
129 headers={
130 'Content-Type': 'application/json;charset=utf-8',
131 'Referer': url,
132 }, fatal=False) or {}
133 stream = media.get('stream') or media.get('file')
134 if not stream:
135 continue
136 ext = determine_ext(stream)
137 if ext == 'f4m':
138 formats.extend(self._extract_f4m_formats(
139 stream + '&hdcore=3.2.0&plugin=aasp-3.2.0.77.18',
140 video_id, f4m_id='hds', fatal=False))
141 elif ext == 'm3u8':
142 formats.extend(self._extract_m3u8_formats(
143 stream, video_id, 'mp4', 'm3u8_native',
144 m3u8_id='hls', fatal=False))
145 self._sort_formats(formats)
146
147 return {
148 'id': video_id,
149 'title': title,
150 'formats': formats,
151 'thumbnail': content.get('dataPoster') or config.get('poster', {}).get('imageUrl'),
152 'duration': duration,
153 }
154
155 def _real_extract(self, url):
156 display_id = self._match_id(url)
157 webpage = self._download_webpage(url, display_id)
158 article = self._parse_json(self._search_regex(
159 r'window\.\$REACTBASE_STATE\.article(?:_multisite)?\s*=\s*({.+})',
160 webpage, 'article'), display_id)['article']
161 title = article.get('title')
162 description = clean_html(article.get('leadParagraph')) or ''
163 if article.get('editorialType') != 'VID':
164 entries = []
165 body = [article.get('opening')]
166 body.extend(try_get(article, lambda x: x['body'], list) or [])
167 for p in body:
168 if not isinstance(p, dict):
169 continue
170 content = p.get('content')
171 if not content:
172 continue
173 type_ = p.get('type')
174 if type_ == 'paragraph':
175 content_str = str_or_none(content)
176 if content_str:
177 description += content_str
178 continue
179 if type_ == 'video' and isinstance(content, dict):
180 entries.append(self._parse_content(content, url))
181 return self.playlist_result(
182 entries, str_or_none(article.get('id')), title, description)
183 content = article['opening']['content']
184 info = self._parse_content(content, url)
185 info.update({
186 'description': description,
187 })
188 return info