]> Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/vrv.py
debian/README.source: Update steps used to create new release.
[youtubedl] / youtube_dl / extractor / vrv.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import base64
5 import json
6 import hashlib
7 import hmac
8 import random
9 import string
10 import time
11
12 from .common import InfoExtractor
13 from ..compat import (
14 compat_urllib_parse_urlencode,
15 compat_urllib_parse,
16 )
17 from ..utils import (
18 float_or_none,
19 int_or_none,
20 )
21
22
23 class VRVBaseIE(InfoExtractor):
24 _API_DOMAIN = None
25 _API_PARAMS = {}
26 _CMS_SIGNING = {}
27
28 def _call_api(self, path, video_id, note, data=None):
29 base_url = self._API_DOMAIN + '/core/' + path
30 encoded_query = compat_urllib_parse_urlencode({
31 'oauth_consumer_key': self._API_PARAMS['oAuthKey'],
32 'oauth_nonce': ''.join([random.choice(string.ascii_letters) for _ in range(32)]),
33 'oauth_signature_method': 'HMAC-SHA1',
34 'oauth_timestamp': int(time.time()),
35 'oauth_version': '1.0',
36 })
37 headers = self.geo_verification_headers()
38 if data:
39 data = json.dumps(data).encode()
40 headers['Content-Type'] = 'application/json'
41 method = 'POST' if data else 'GET'
42 base_string = '&'.join([method, compat_urllib_parse.quote(base_url, ''), compat_urllib_parse.quote(encoded_query, '')])
43 oauth_signature = base64.b64encode(hmac.new(
44 (self._API_PARAMS['oAuthSecret'] + '&').encode('ascii'),
45 base_string.encode(), hashlib.sha1).digest()).decode()
46 encoded_query += '&oauth_signature=' + compat_urllib_parse.quote(oauth_signature, '')
47 return self._download_json(
48 '?'.join([base_url, encoded_query]), video_id,
49 note='Downloading %s JSON metadata' % note, headers=headers, data=data)
50
51 def _call_cms(self, path, video_id, note):
52 if not self._CMS_SIGNING:
53 self._CMS_SIGNING = self._call_api('index', video_id, 'CMS Signing')['cms_signing']
54 return self._download_json(
55 self._API_DOMAIN + path, video_id, query=self._CMS_SIGNING,
56 note='Downloading %s JSON metadata' % note, headers=self.geo_verification_headers())
57
58 def _set_api_params(self, webpage, video_id):
59 if not self._API_PARAMS:
60 self._API_PARAMS = self._parse_json(self._search_regex(
61 r'window\.__APP_CONFIG__\s*=\s*({.+?})</script>',
62 webpage, 'api config'), video_id)['cxApiParams']
63 self._API_DOMAIN = self._API_PARAMS.get('apiDomain', 'https://api.vrv.co')
64
65 def _get_cms_resource(self, resource_key, video_id):
66 return self._call_api(
67 'cms_resource', video_id, 'resource path', data={
68 'resource_key': resource_key,
69 })['__links__']['cms_resource']['href']
70
71
72 class VRVIE(VRVBaseIE):
73 IE_NAME = 'vrv'
74 _VALID_URL = r'https?://(?:www\.)?vrv\.co/watch/(?P<id>[A-Z0-9]+)'
75 _TESTS = [{
76 'url': 'https://vrv.co/watch/GR9PNZ396/Hidden-America-with-Jonah-Ray:BOSTON-WHERE-THE-PAST-IS-THE-PRESENT',
77 'info_dict': {
78 'id': 'GR9PNZ396',
79 'ext': 'mp4',
80 'title': 'BOSTON: WHERE THE PAST IS THE PRESENT',
81 'description': 'md5:4ec8844ac262ca2df9e67c0983c6b83f',
82 'uploader_id': 'seeso',
83 },
84 'params': {
85 # m3u8 download
86 'skip_download': True,
87 },
88 }]
89
90 def _extract_vrv_formats(self, url, video_id, stream_format, audio_lang, hardsub_lang):
91 if not url or stream_format not in ('hls', 'dash'):
92 return []
93 assert audio_lang or hardsub_lang
94 stream_id_list = []
95 if audio_lang:
96 stream_id_list.append('audio-%s' % audio_lang)
97 if hardsub_lang:
98 stream_id_list.append('hardsub-%s' % hardsub_lang)
99 stream_id = '-'.join(stream_id_list)
100 format_id = '%s-%s' % (stream_format, stream_id)
101 if stream_format == 'hls':
102 adaptive_formats = self._extract_m3u8_formats(
103 url, video_id, 'mp4', m3u8_id=format_id,
104 note='Downloading %s m3u8 information' % stream_id,
105 fatal=False)
106 elif stream_format == 'dash':
107 adaptive_formats = self._extract_mpd_formats(
108 url, video_id, mpd_id=format_id,
109 note='Downloading %s MPD information' % stream_id,
110 fatal=False)
111 if audio_lang:
112 for f in adaptive_formats:
113 if f.get('acodec') != 'none':
114 f['language'] = audio_lang
115 return adaptive_formats
116
117 def _real_extract(self, url):
118 video_id = self._match_id(url)
119 webpage = self._download_webpage(
120 url, video_id,
121 headers=self.geo_verification_headers())
122 media_resource = self._parse_json(self._search_regex(
123 [
124 r'window\.__INITIAL_STATE__\s*=\s*({.+?})(?:</script>|;)',
125 r'window\.__INITIAL_STATE__\s*=\s*({.+})'
126 ], webpage, 'inital state'), video_id).get('watch', {}).get('mediaResource') or {}
127
128 video_data = media_resource.get('json')
129 if not video_data:
130 self._set_api_params(webpage, video_id)
131 episode_path = self._get_cms_resource(
132 'cms:/episodes/' + video_id, video_id)
133 video_data = self._call_cms(episode_path, video_id, 'video')
134 title = video_data['title']
135
136 streams_json = media_resource.get('streams', {}).get('json', {})
137 if not streams_json:
138 self._set_api_params(webpage, video_id)
139 streams_path = video_data['__links__']['streams']['href']
140 streams_json = self._call_cms(streams_path, video_id, 'streams')
141
142 audio_locale = streams_json.get('audio_locale')
143 formats = []
144 for stream_type, streams in streams_json.get('streams', {}).items():
145 if stream_type in ('adaptive_hls', 'adaptive_dash'):
146 for stream in streams.values():
147 formats.extend(self._extract_vrv_formats(
148 stream.get('url'), video_id, stream_type.split('_')[1],
149 audio_locale, stream.get('hardsub_locale')))
150 self._sort_formats(formats)
151
152 subtitles = {}
153 for subtitle in streams_json.get('subtitles', {}).values():
154 subtitle_url = subtitle.get('url')
155 if not subtitle_url:
156 continue
157 subtitles.setdefault(subtitle.get('locale', 'en-US'), []).append({
158 'url': subtitle_url,
159 'ext': subtitle.get('format', 'ass'),
160 })
161
162 thumbnails = []
163 for thumbnail in video_data.get('images', {}).get('thumbnails', []):
164 thumbnail_url = thumbnail.get('source')
165 if not thumbnail_url:
166 continue
167 thumbnails.append({
168 'url': thumbnail_url,
169 'width': int_or_none(thumbnail.get('width')),
170 'height': int_or_none(thumbnail.get('height')),
171 })
172
173 return {
174 'id': video_id,
175 'title': title,
176 'formats': formats,
177 'subtitles': subtitles,
178 'thumbnails': thumbnails,
179 'description': video_data.get('description'),
180 'duration': float_or_none(video_data.get('duration_ms'), 1000),
181 'uploader_id': video_data.get('channel_id'),
182 'series': video_data.get('series_title'),
183 'season': video_data.get('season_title'),
184 'season_number': int_or_none(video_data.get('season_number')),
185 'season_id': video_data.get('season_id'),
186 'episode': title,
187 'episode_number': int_or_none(video_data.get('episode_number')),
188 'episode_id': video_data.get('production_episode_id'),
189 }
190
191
192 class VRVSeriesIE(VRVBaseIE):
193 IE_NAME = 'vrv:series'
194 _VALID_URL = r'https?://(?:www\.)?vrv\.co/series/(?P<id>[A-Z0-9]+)'
195 _TEST = {
196 'url': 'https://vrv.co/series/G68VXG3G6/The-Perfect-Insider',
197 'info_dict': {
198 'id': 'G68VXG3G6',
199 },
200 'playlist_mincount': 11,
201 }
202
203 def _real_extract(self, url):
204 series_id = self._match_id(url)
205 webpage = self._download_webpage(
206 url, series_id,
207 headers=self.geo_verification_headers())
208
209 self._set_api_params(webpage, series_id)
210 seasons_path = self._get_cms_resource(
211 'cms:/seasons?series_id=' + series_id, series_id)
212 seasons_data = self._call_cms(seasons_path, series_id, 'seasons')
213
214 entries = []
215 for season in seasons_data.get('items', []):
216 episodes_path = season['__links__']['season/episodes']['href']
217 episodes = self._call_cms(episodes_path, series_id, 'episodes')
218 for episode in episodes.get('items', []):
219 episode_id = episode['id']
220 entries.append(self.url_result(
221 'https://vrv.co/watch/' + episode_id,
222 'VRV', episode_id, episode.get('title')))
223
224 return self.playlist_result(entries, series_id)