]> Raphaël G. Git Repositories - youtubedl/blob - youtube_dl/extractor/theplatform.py
Register new version in the changelog.
[youtubedl] / youtube_dl / extractor / theplatform.py
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals
3
4 import re
5 import time
6 import hmac
7 import binascii
8 import hashlib
9
10
11 from .common import InfoExtractor
12 from ..compat import (
13 compat_parse_qs,
14 compat_urllib_parse_urlparse,
15 )
16 from ..utils import (
17 determine_ext,
18 ExtractorError,
19 xpath_with_ns,
20 unsmuggle_url,
21 int_or_none,
22 url_basename,
23 float_or_none,
24 )
25
26 default_ns = 'http://www.w3.org/2005/SMIL21/Language'
27 _x = lambda p: xpath_with_ns(p, {'smil': default_ns})
28
29
30 class ThePlatformBaseIE(InfoExtractor):
31 def _extract_theplatform_smil(self, smil_url, video_id, note='Downloading SMIL data'):
32 meta = self._download_xml(smil_url, video_id, note=note)
33 try:
34 error_msg = next(
35 n.attrib['abstract']
36 for n in meta.findall(_x('.//smil:ref'))
37 if n.attrib.get('title') == 'Geographic Restriction' or n.attrib.get('title') == 'Expired')
38 except StopIteration:
39 pass
40 else:
41 raise ExtractorError(error_msg, expected=True)
42
43 formats = self._parse_smil_formats(
44 meta, smil_url, video_id, namespace=default_ns,
45 # the parameters are from syfy.com, other sites may use others,
46 # they also work for nbc.com
47 f4m_params={'g': 'UXWGVKRWHFSP', 'hdcore': '3.0.3'},
48 transform_rtmp_url=lambda streamer, src: (streamer, 'mp4:' + src))
49
50 for _format in formats:
51 ext = determine_ext(_format['url'])
52 if ext == 'once':
53 _format['ext'] = 'mp4'
54
55 self._sort_formats(formats)
56
57 subtitles = self._parse_smil_subtitles(meta, default_ns)
58
59 return formats, subtitles
60
61 def get_metadata(self, path, video_id):
62 info_url = 'http://link.theplatform.com/s/%s?format=preview' % path
63 info = self._download_json(info_url, video_id)
64
65 subtitles = {}
66 captions = info.get('captions')
67 if isinstance(captions, list):
68 for caption in captions:
69 lang, src, mime = caption.get('lang', 'en'), caption.get('src'), caption.get('type')
70 subtitles[lang] = [{
71 'ext': 'srt' if mime == 'text/srt' else 'ttml',
72 'url': src,
73 }]
74
75 return {
76 'title': info['title'],
77 'subtitles': subtitles,
78 'description': info['description'],
79 'thumbnail': info['defaultThumbnailUrl'],
80 'duration': int_or_none(info.get('duration'), 1000),
81 }
82
83
84 class ThePlatformIE(ThePlatformBaseIE):
85 _VALID_URL = r'''(?x)
86 (?:https?://(?:link|player)\.theplatform\.com/[sp]/(?P<provider_id>[^/]+)/
87 (?:(?P<media>(?:[^/]+/)+select/media/)|(?P<config>(?:[^/\?]+/(?:swf|config)|onsite)/select/))?
88 |theplatform:)(?P<id>[^/\?&]+)'''
89
90 _TESTS = [{
91 # from http://www.metacafe.com/watch/cb-e9I_cZgTgIPd/blackberrys_big_bold_z30/
92 'url': 'http://link.theplatform.com/s/dJ5BDC/e9I_cZgTgIPd/meta.smil?format=smil&Tracking=true&mbr=true',
93 'info_dict': {
94 'id': 'e9I_cZgTgIPd',
95 'ext': 'flv',
96 'title': 'Blackberry\'s big, bold Z30',
97 'description': 'The Z30 is Blackberry\'s biggest, baddest mobile messaging device yet.',
98 'duration': 247,
99 },
100 'params': {
101 # rtmp download
102 'skip_download': True,
103 },
104 }, {
105 # from http://www.cnet.com/videos/tesla-model-s-a-second-step-towards-a-cleaner-motoring-future/
106 'url': 'http://link.theplatform.com/s/kYEXFC/22d_qsQ6MIRT',
107 'info_dict': {
108 'id': '22d_qsQ6MIRT',
109 'ext': 'flv',
110 'description': 'md5:ac330c9258c04f9d7512cf26b9595409',
111 'title': 'Tesla Model S: A second step towards a cleaner motoring future',
112 },
113 'params': {
114 # rtmp download
115 'skip_download': True,
116 }
117 }, {
118 'url': 'https://player.theplatform.com/p/D6x-PC/pulse_preview/embed/select/media/yMBg9E8KFxZD',
119 'info_dict': {
120 'id': 'yMBg9E8KFxZD',
121 'ext': 'mp4',
122 'description': 'md5:644ad9188d655b742f942bf2e06b002d',
123 'title': 'HIGHLIGHTS: USA bag first ever series Cup win',
124 }
125 }, {
126 'url': 'http://player.theplatform.com/p/NnzsPC/widget/select/media/4Y0TlYUr_ZT7',
127 'only_matching': True,
128 }, {
129 'url': 'http://player.theplatform.com/p/2E2eJC/nbcNewsOffsite?guid=tdy_or_siri_150701',
130 'md5': '734f3790fb5fc4903da391beeebc4836',
131 'info_dict': {
132 'id': 'tdy_or_siri_150701',
133 'ext': 'mp4',
134 'title': 'iPhone Siri’s sassy response to a math question has people talking',
135 'description': 'md5:a565d1deadd5086f3331d57298ec6333',
136 'duration': 83.0,
137 'thumbnail': 're:^https?://.*\.jpg$',
138 'timestamp': 1435752600,
139 'upload_date': '20150701',
140 'categories': ['Today/Shows/Orange Room', 'Today/Sections/Money', 'Today/Topics/Tech', "Today/Topics/Editor's picks"],
141 },
142 }]
143
144 @staticmethod
145 def _sign_url(url, sig_key, sig_secret, life=600, include_qs=False):
146 flags = '10' if include_qs else '00'
147 expiration_date = '%x' % (int(time.time()) + life)
148
149 def str_to_hex(str):
150 return binascii.b2a_hex(str.encode('ascii')).decode('ascii')
151
152 def hex_to_str(hex):
153 return binascii.a2b_hex(hex)
154
155 relative_path = url.split('http://link.theplatform.com/s/')[1].split('?')[0]
156 clear_text = hex_to_str(flags + expiration_date + str_to_hex(relative_path))
157 checksum = hmac.new(sig_key.encode('ascii'), clear_text, hashlib.sha1).hexdigest()
158 sig = flags + expiration_date + checksum + str_to_hex(sig_secret)
159 return '%s&sig=%s' % (url, sig)
160
161 def _real_extract(self, url):
162 url, smuggled_data = unsmuggle_url(url, {})
163
164 mobj = re.match(self._VALID_URL, url)
165 provider_id = mobj.group('provider_id')
166 video_id = mobj.group('id')
167
168 if not provider_id:
169 provider_id = 'dJ5BDC'
170
171 path = provider_id
172 if mobj.group('media'):
173 path += '/media'
174 path += '/' + video_id
175
176 qs_dict = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
177 if 'guid' in qs_dict:
178 webpage = self._download_webpage(url, video_id)
179 scripts = re.findall(r'<script[^>]+src="([^"]+)"', webpage)
180 feed_id = None
181 # feed id usually locates in the last script.
182 # Seems there's no pattern for the interested script filename, so
183 # I try one by one
184 for script in reversed(scripts):
185 feed_script = self._download_webpage(script, video_id, 'Downloading feed script')
186 feed_id = self._search_regex(r'defaultFeedId\s*:\s*"([^"]+)"', feed_script, 'default feed id', default=None)
187 if feed_id is not None:
188 break
189 if feed_id is None:
190 raise ExtractorError('Unable to find feed id')
191 return self.url_result('http://feed.theplatform.com/f/%s/%s?byGuid=%s' % (
192 provider_id, feed_id, qs_dict['guid'][0]))
193
194 if smuggled_data.get('force_smil_url', False):
195 smil_url = url
196 elif mobj.group('config'):
197 config_url = url + '&form=json'
198 config_url = config_url.replace('swf/', 'config/')
199 config_url = config_url.replace('onsite/', 'onsite/config/')
200 config = self._download_json(config_url, video_id, 'Downloading config')
201 if 'releaseUrl' in config:
202 release_url = config['releaseUrl']
203 else:
204 release_url = 'http://link.theplatform.com/s/%s?mbr=true' % path
205 smil_url = release_url + '&format=SMIL&formats=MPEG4&manifest=f4m'
206 else:
207 smil_url = 'http://link.theplatform.com/s/%s/meta.smil?format=smil&mbr=true' % path
208
209 sig = smuggled_data.get('sig')
210 if sig:
211 smil_url = self._sign_url(smil_url, sig['key'], sig['secret'])
212
213 formats, subtitles = self._extract_theplatform_smil(smil_url, video_id)
214
215 ret = self.get_metadata(path, video_id)
216 combined_subtitles = self._merge_subtitles(ret.get('subtitles', {}), subtitles)
217 ret.update({
218 'id': video_id,
219 'formats': formats,
220 'subtitles': combined_subtitles,
221 })
222
223 return ret
224
225
226 class ThePlatformFeedIE(ThePlatformBaseIE):
227 _URL_TEMPLATE = '%s//feed.theplatform.com/f/%s/%s?form=json&byGuid=%s'
228 _VALID_URL = r'https?://feed\.theplatform\.com/f/(?P<provider_id>[^/]+)/(?P<feed_id>[^?/]+)\?(?:[^&]+&)*byGuid=(?P<id>[a-zA-Z0-9_]+)'
229 _TEST = {
230 # From http://player.theplatform.com/p/7wvmTC/MSNBCEmbeddedOffSite?guid=n_hardball_5biden_140207
231 'url': 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207',
232 'md5': '22d2b84f058d3586efcd99e57d59d314',
233 'info_dict': {
234 'id': 'n_hardball_5biden_140207',
235 'ext': 'mp4',
236 'title': 'The Biden factor: will Joe run in 2016?',
237 'description': 'Could Vice President Joe Biden be preparing a 2016 campaign? Mark Halperin and Sam Stein weigh in.',
238 'thumbnail': 're:^https?://.*\.jpg$',
239 'upload_date': '20140208',
240 'timestamp': 1391824260,
241 'duration': 467.0,
242 'categories': ['MSNBC/Issues/Democrats', 'MSNBC/Issues/Elections/Election 2016'],
243 },
244 }
245
246 def _real_extract(self, url):
247 mobj = re.match(self._VALID_URL, url)
248
249 video_id = mobj.group('id')
250 provider_id = mobj.group('provider_id')
251 feed_id = mobj.group('feed_id')
252
253 real_url = self._URL_TEMPLATE % (self.http_scheme(), provider_id, feed_id, video_id)
254 feed = self._download_json(real_url, video_id)
255 entry = feed['entries'][0]
256
257 formats = []
258 subtitles = {}
259 first_video_id = None
260 duration = None
261 for item in entry['media$content']:
262 smil_url = item['plfile$url'] + '&format=SMIL&Tracking=true&Embedded=true&formats=MPEG4,F4M'
263 cur_video_id = url_basename(smil_url)
264 if first_video_id is None:
265 first_video_id = cur_video_id
266 duration = float_or_none(item.get('plfile$duration'))
267 cur_formats, cur_subtitles = self._extract_theplatform_smil(smil_url, video_id, 'Downloading SMIL data for %s' % cur_video_id)
268 formats.extend(cur_formats)
269 subtitles = self._merge_subtitles(subtitles, cur_subtitles)
270
271 self._sort_formats(formats)
272
273 thumbnails = [{
274 'url': thumbnail['plfile$url'],
275 'width': int_or_none(thumbnail.get('plfile$width')),
276 'height': int_or_none(thumbnail.get('plfile$height')),
277 } for thumbnail in entry.get('media$thumbnails', [])]
278
279 timestamp = int_or_none(entry.get('media$availableDate'), scale=1000)
280 categories = [item['media$name'] for item in entry.get('media$categories', [])]
281
282 ret = self.get_metadata('%s/%s' % (provider_id, first_video_id), video_id)
283 subtitles = self._merge_subtitles(subtitles, ret['subtitles'])
284 ret.update({
285 'id': video_id,
286 'formats': formats,
287 'subtitles': subtitles,
288 'thumbnails': thumbnails,
289 'duration': duration,
290 'timestamp': timestamp,
291 'categories': categories,
292 })
293
294 return ret