]> Raphaël G. Git Repositories - youtubedl/blob - youtube_dl/extractor/srgssr.py
New upstream version 2017.02.07
[youtubedl] / youtube_dl / extractor / srgssr.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5
6 from .common import InfoExtractor
7 from ..compat import compat_urllib_parse_urlparse
8 from ..utils import (
9 ExtractorError,
10 parse_iso8601,
11 qualities,
12 )
13
14
15 class SRGSSRIE(InfoExtractor):
16 _VALID_URL = r'(?:https?://tp\.srgssr\.ch/p(?:/[^/]+)+\?urn=urn|srgssr):(?P<bu>srf|rts|rsi|rtr|swi):(?:[^:]+:)?(?P<type>video|audio):(?P<id>[0-9a-f\-]{36}|\d+)'
17
18 _ERRORS = {
19 'AGERATING12': 'To protect children under the age of 12, this video is only available between 8 p.m. and 6 a.m.',
20 'AGERATING18': 'To protect children under the age of 18, this video is only available between 11 p.m. and 5 a.m.',
21 # 'ENDDATE': 'For legal reasons, this video was only available for a specified period of time.',
22 'GEOBLOCK': 'For legal reasons, this video is only available in Switzerland.',
23 'LEGAL': 'The video cannot be transmitted for legal reasons.',
24 'STARTDATE': 'This video is not yet available. Please try again later.',
25 }
26
27 def _get_tokenized_src(self, url, video_id, format_id):
28 sp = compat_urllib_parse_urlparse(url).path.split('/')
29 token = self._download_json(
30 'http://tp.srgssr.ch/akahd/token?acl=/%s/%s/*' % (sp[1], sp[2]),
31 video_id, 'Downloading %s token' % format_id, fatal=False) or {}
32 auth_params = token.get('token', {}).get('authparams')
33 if auth_params:
34 url += '?' + auth_params
35 return url
36
37 def get_media_data(self, bu, media_type, media_id):
38 media_data = self._download_json(
39 'http://il.srgssr.ch/integrationlayer/1.0/ue/%s/%s/play/%s.json' % (bu, media_type, media_id),
40 media_id)[media_type.capitalize()]
41
42 if media_data.get('block') and media_data['block'] in self._ERRORS:
43 raise ExtractorError('%s said: %s' % (
44 self.IE_NAME, self._ERRORS[media_data['block']]), expected=True)
45
46 return media_data
47
48 def _real_extract(self, url):
49 bu, media_type, media_id = re.match(self._VALID_URL, url).groups()
50
51 media_data = self.get_media_data(bu, media_type, media_id)
52
53 metadata = media_data['AssetMetadatas']['AssetMetadata'][0]
54 title = metadata['title']
55 description = metadata.get('description')
56 created_date = media_data.get('createdDate') or metadata.get('createdDate')
57 timestamp = parse_iso8601(created_date)
58
59 thumbnails = [{
60 'id': image.get('id'),
61 'url': image['url'],
62 } for image in media_data.get('Image', {}).get('ImageRepresentations', {}).get('ImageRepresentation', [])]
63
64 preference = qualities(['LQ', 'MQ', 'SD', 'HQ', 'HD'])
65 formats = []
66 for source in media_data.get('Playlists', {}).get('Playlist', []) + media_data.get('Downloads', {}).get('Download', []):
67 protocol = source.get('@protocol')
68 for asset in source['url']:
69 asset_url = asset['text']
70 quality = asset['@quality']
71 format_id = '%s-%s' % (protocol, quality)
72 if protocol.startswith('HTTP-HDS') or protocol.startswith('HTTP-HLS'):
73 asset_url = self._get_tokenized_src(asset_url, media_id, format_id)
74 if protocol.startswith('HTTP-HDS'):
75 formats.extend(self._extract_f4m_formats(
76 asset_url + ('?' if '?' not in asset_url else '&') + 'hdcore=3.4.0',
77 media_id, f4m_id=format_id, fatal=False))
78 elif protocol.startswith('HTTP-HLS'):
79 formats.extend(self._extract_m3u8_formats(
80 asset_url, media_id, 'mp4', 'm3u8_native',
81 m3u8_id=format_id, fatal=False))
82 else:
83 formats.append({
84 'format_id': format_id,
85 'url': asset_url,
86 'preference': preference(quality),
87 'ext': 'flv' if protocol == 'RTMP' else None,
88 })
89 self._sort_formats(formats)
90
91 return {
92 'id': media_id,
93 'title': title,
94 'description': description,
95 'timestamp': timestamp,
96 'thumbnails': thumbnails,
97 'formats': formats,
98 }
99
100
101 class SRGSSRPlayIE(InfoExtractor):
102 IE_DESC = 'srf.ch, rts.ch, rsi.ch, rtr.ch and swissinfo.ch play sites'
103 _VALID_URL = r'https?://(?:(?:www|play)\.)?(?P<bu>srf|rts|rsi|rtr|swissinfo)\.ch/play/(?:tv|radio)/[^/]+/(?P<type>video|audio)/[^?]+\?id=(?P<id>[0-9a-f\-]{36}|\d+)'
104
105 _TESTS = [{
106 'url': 'http://www.srf.ch/play/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5',
107 'md5': 'da6b5b3ac9fa4761a942331cef20fcb3',
108 'info_dict': {
109 'id': '28e1a57d-5b76-4399-8ab3-9097f071e6c5',
110 'ext': 'mp4',
111 'upload_date': '20130701',
112 'title': 'Snowden beantragt Asyl in Russland',
113 'timestamp': 1372713995,
114 }
115 }, {
116 # No Speichern (Save) button
117 'url': 'http://www.srf.ch/play/tv/top-gear/video/jaguar-xk120-shadow-und-tornado-dampflokomotive?id=677f5829-e473-4823-ac83-a1087fe97faa',
118 'md5': '0a274ce38fda48c53c01890651985bc6',
119 'info_dict': {
120 'id': '677f5829-e473-4823-ac83-a1087fe97faa',
121 'ext': 'flv',
122 'upload_date': '20130710',
123 'title': 'Jaguar XK120, Shadow und Tornado-Dampflokomotive',
124 'description': 'md5:88604432b60d5a38787f152dec89cd56',
125 'timestamp': 1373493600,
126 },
127 }, {
128 'url': 'http://www.rtr.ch/play/radio/actualitad/audio/saira-tujetsch-tuttina-cuntinuar-cun-sedrun-muster-turissem?id=63cb0778-27f8-49af-9284-8c7a8c6d15fc',
129 'info_dict': {
130 'id': '63cb0778-27f8-49af-9284-8c7a8c6d15fc',
131 'ext': 'mp3',
132 'upload_date': '20151013',
133 'title': 'Saira: Tujetsch - tuttina cuntinuar cun Sedrun Mustér Turissem',
134 'timestamp': 1444750398,
135 },
136 'params': {
137 # rtmp download
138 'skip_download': True,
139 },
140 }, {
141 'url': 'http://www.rts.ch/play/tv/-/video/le-19h30?id=6348260',
142 'md5': '67a2a9ae4e8e62a68d0e9820cc9782df',
143 'info_dict': {
144 'id': '6348260',
145 'display_id': '6348260',
146 'ext': 'mp4',
147 'duration': 1796,
148 'title': 'Le 19h30',
149 'description': '',
150 'uploader': '19h30',
151 'upload_date': '20141201',
152 'timestamp': 1417458600,
153 'thumbnail': r're:^https?://.*\.image',
154 'view_count': int,
155 },
156 'params': {
157 # m3u8 download
158 'skip_download': True,
159 }
160 }]
161
162 def _real_extract(self, url):
163 bu, media_type, media_id = re.match(self._VALID_URL, url).groups()
164 # other info can be extracted from url + '&layout=json'
165 return self.url_result('srgssr:%s:%s:%s' % (bu[:3], media_type, media_id), 'SRGSSR')