]> Raphaël G. Git Repositories - youtubedl/blob - youtube_dl/extractor/srgssr.py
debian/control: Update list of supported sites/extractors.
[youtubedl] / youtube_dl / extractor / srgssr.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5
6 from .common import InfoExtractor
7 from ..compat import compat_urllib_parse_urlparse
8 from ..utils import (
9 ExtractorError,
10 parse_iso8601,
11 qualities,
12 )
13
14
15 class SRGSSRIE(InfoExtractor):
16 _VALID_URL = r'(?:https?://tp\.srgssr\.ch/p(?:/[^/]+)+\?urn=urn|srgssr):(?P<bu>srf|rts|rsi|rtr|swi):(?:[^:]+:)?(?P<type>video|audio):(?P<id>[0-9a-f\-]{36}|\d+)'
17 _GEO_BYPASS = False
18 _GEO_COUNTRIES = ['CH']
19
20 _ERRORS = {
21 'AGERATING12': 'To protect children under the age of 12, this video is only available between 8 p.m. and 6 a.m.',
22 'AGERATING18': 'To protect children under the age of 18, this video is only available between 11 p.m. and 5 a.m.',
23 # 'ENDDATE': 'For legal reasons, this video was only available for a specified period of time.',
24 'GEOBLOCK': 'For legal reasons, this video is only available in Switzerland.',
25 'LEGAL': 'The video cannot be transmitted for legal reasons.',
26 'STARTDATE': 'This video is not yet available. Please try again later.',
27 }
28
29 def _get_tokenized_src(self, url, video_id, format_id):
30 sp = compat_urllib_parse_urlparse(url).path.split('/')
31 token = self._download_json(
32 'http://tp.srgssr.ch/akahd/token?acl=/%s/%s/*' % (sp[1], sp[2]),
33 video_id, 'Downloading %s token' % format_id, fatal=False) or {}
34 auth_params = token.get('token', {}).get('authparams')
35 if auth_params:
36 url += '?' + auth_params
37 return url
38
39 def get_media_data(self, bu, media_type, media_id):
40 media_data = self._download_json(
41 'http://il.srgssr.ch/integrationlayer/1.0/ue/%s/%s/play/%s.json' % (bu, media_type, media_id),
42 media_id)[media_type.capitalize()]
43
44 if media_data.get('block') and media_data['block'] in self._ERRORS:
45 message = self._ERRORS[media_data['block']]
46 if media_data['block'] == 'GEOBLOCK':
47 self.raise_geo_restricted(
48 msg=message, countries=self._GEO_COUNTRIES)
49 raise ExtractorError(
50 '%s said: %s' % (self.IE_NAME, message), expected=True)
51
52 return media_data
53
54 def _real_extract(self, url):
55 bu, media_type, media_id = re.match(self._VALID_URL, url).groups()
56
57 media_data = self.get_media_data(bu, media_type, media_id)
58
59 metadata = media_data['AssetMetadatas']['AssetMetadata'][0]
60 title = metadata['title']
61 description = metadata.get('description')
62 created_date = media_data.get('createdDate') or metadata.get('createdDate')
63 timestamp = parse_iso8601(created_date)
64
65 thumbnails = [{
66 'id': image.get('id'),
67 'url': image['url'],
68 } for image in media_data.get('Image', {}).get('ImageRepresentations', {}).get('ImageRepresentation', [])]
69
70 preference = qualities(['LQ', 'MQ', 'SD', 'HQ', 'HD'])
71 formats = []
72 for source in media_data.get('Playlists', {}).get('Playlist', []) + media_data.get('Downloads', {}).get('Download', []):
73 protocol = source.get('@protocol')
74 for asset in source['url']:
75 asset_url = asset['text']
76 quality = asset['@quality']
77 format_id = '%s-%s' % (protocol, quality)
78 if protocol.startswith('HTTP-HDS') or protocol.startswith('HTTP-HLS'):
79 asset_url = self._get_tokenized_src(asset_url, media_id, format_id)
80 if protocol.startswith('HTTP-HDS'):
81 formats.extend(self._extract_f4m_formats(
82 asset_url + ('?' if '?' not in asset_url else '&') + 'hdcore=3.4.0',
83 media_id, f4m_id=format_id, fatal=False))
84 elif protocol.startswith('HTTP-HLS'):
85 formats.extend(self._extract_m3u8_formats(
86 asset_url, media_id, 'mp4', 'm3u8_native',
87 m3u8_id=format_id, fatal=False))
88 else:
89 formats.append({
90 'format_id': format_id,
91 'url': asset_url,
92 'preference': preference(quality),
93 'ext': 'flv' if protocol == 'RTMP' else None,
94 })
95 self._sort_formats(formats)
96
97 return {
98 'id': media_id,
99 'title': title,
100 'description': description,
101 'timestamp': timestamp,
102 'thumbnails': thumbnails,
103 'formats': formats,
104 }
105
106
107 class SRGSSRPlayIE(InfoExtractor):
108 IE_DESC = 'srf.ch, rts.ch, rsi.ch, rtr.ch and swissinfo.ch play sites'
109 _VALID_URL = r'https?://(?:(?:www|play)\.)?(?P<bu>srf|rts|rsi|rtr|swissinfo)\.ch/play/(?:tv|radio)/[^/]+/(?P<type>video|audio)/[^?]+\?id=(?P<id>[0-9a-f\-]{36}|\d+)'
110
111 _TESTS = [{
112 'url': 'http://www.srf.ch/play/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5',
113 'md5': 'da6b5b3ac9fa4761a942331cef20fcb3',
114 'info_dict': {
115 'id': '28e1a57d-5b76-4399-8ab3-9097f071e6c5',
116 'ext': 'mp4',
117 'upload_date': '20130701',
118 'title': 'Snowden beantragt Asyl in Russland',
119 'timestamp': 1372713995,
120 }
121 }, {
122 # No Speichern (Save) button
123 'url': 'http://www.srf.ch/play/tv/top-gear/video/jaguar-xk120-shadow-und-tornado-dampflokomotive?id=677f5829-e473-4823-ac83-a1087fe97faa',
124 'md5': '0a274ce38fda48c53c01890651985bc6',
125 'info_dict': {
126 'id': '677f5829-e473-4823-ac83-a1087fe97faa',
127 'ext': 'flv',
128 'upload_date': '20130710',
129 'title': 'Jaguar XK120, Shadow und Tornado-Dampflokomotive',
130 'description': 'md5:88604432b60d5a38787f152dec89cd56',
131 'timestamp': 1373493600,
132 },
133 }, {
134 'url': 'http://www.rtr.ch/play/radio/actualitad/audio/saira-tujetsch-tuttina-cuntinuar-cun-sedrun-muster-turissem?id=63cb0778-27f8-49af-9284-8c7a8c6d15fc',
135 'info_dict': {
136 'id': '63cb0778-27f8-49af-9284-8c7a8c6d15fc',
137 'ext': 'mp3',
138 'upload_date': '20151013',
139 'title': 'Saira: Tujetsch - tuttina cuntinuar cun Sedrun Mustér Turissem',
140 'timestamp': 1444750398,
141 },
142 'params': {
143 # rtmp download
144 'skip_download': True,
145 },
146 }, {
147 'url': 'http://www.rts.ch/play/tv/-/video/le-19h30?id=6348260',
148 'md5': '67a2a9ae4e8e62a68d0e9820cc9782df',
149 'info_dict': {
150 'id': '6348260',
151 'display_id': '6348260',
152 'ext': 'mp4',
153 'duration': 1796,
154 'title': 'Le 19h30',
155 'description': '',
156 'uploader': '19h30',
157 'upload_date': '20141201',
158 'timestamp': 1417458600,
159 'thumbnail': r're:^https?://.*\.image',
160 'view_count': int,
161 },
162 'params': {
163 # m3u8 download
164 'skip_download': True,
165 }
166 }]
167
168 def _real_extract(self, url):
169 bu, media_type, media_id = re.match(self._VALID_URL, url).groups()
170 # other info can be extracted from url + '&layout=json'
171 return self.url_result('srgssr:%s:%s:%s' % (bu[:3], media_type, media_id), 'SRGSSR')