]> Raphaël G. Git Repositories - youtubedl/blob - youtube_dl/extractor/twitch.py
debian/copyright: Update my copyright years.
[youtubedl] / youtube_dl / extractor / twitch.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import itertools
5 import re
6 import random
7
8 from .common import InfoExtractor
9 from ..compat import (
10 compat_str,
11 compat_urllib_parse,
12 compat_urllib_request,
13 )
14 from ..utils import (
15 ExtractorError,
16 parse_iso8601,
17 )
18
19
20 class TwitchBaseIE(InfoExtractor):
21 _VALID_URL_BASE = r'https?://(?:www\.)?twitch\.tv'
22
23 _API_BASE = 'https://api.twitch.tv'
24 _USHER_BASE = 'http://usher.twitch.tv'
25 _LOGIN_URL = 'https://secure.twitch.tv/user/login'
26
27 def _handle_error(self, response):
28 if not isinstance(response, dict):
29 return
30 error = response.get('error')
31 if error:
32 raise ExtractorError(
33 '%s returned error: %s - %s' % (self.IE_NAME, error, response.get('message')),
34 expected=True)
35
36 def _download_json(self, url, video_id, note='Downloading JSON metadata'):
37 headers = {
38 'Referer': 'http://api.twitch.tv/crossdomain/receiver.html?v=2',
39 'X-Requested-With': 'XMLHttpRequest',
40 }
41 for cookie in self._downloader.cookiejar:
42 if cookie.name == 'api_token':
43 headers['Twitch-Api-Token'] = cookie.value
44 request = compat_urllib_request.Request(url, headers=headers)
45 response = super(TwitchBaseIE, self)._download_json(request, video_id, note)
46 self._handle_error(response)
47 return response
48
49 def _real_initialize(self):
50 self._login()
51
52 def _login(self):
53 (username, password) = self._get_login_info()
54 if username is None:
55 return
56
57 login_page = self._download_webpage(
58 self._LOGIN_URL, None, 'Downloading login page')
59
60 authenticity_token = self._search_regex(
61 r'<input name="authenticity_token" type="hidden" value="([^"]+)"',
62 login_page, 'authenticity token')
63
64 login_form = {
65 'utf8': '✓'.encode('utf-8'),
66 'authenticity_token': authenticity_token,
67 'redirect_on_login': '',
68 'embed_form': 'false',
69 'mp_source_action': '',
70 'follow': '',
71 'user[login]': username,
72 'user[password]': password,
73 }
74
75 request = compat_urllib_request.Request(
76 self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
77 request.add_header('Referer', self._LOGIN_URL)
78 response = self._download_webpage(
79 request, None, 'Logging in as %s' % username)
80
81 m = re.search(
82 r"id=([\"'])login_error_message\1[^>]*>(?P<msg>[^<]+)", response)
83 if m:
84 raise ExtractorError(
85 'Unable to login: %s' % m.group('msg').strip(), expected=True)
86
87
88 class TwitchItemBaseIE(TwitchBaseIE):
89 def _download_info(self, item, item_id):
90 return self._extract_info(self._download_json(
91 '%s/kraken/videos/%s%s' % (self._API_BASE, item, item_id), item_id,
92 'Downloading %s info JSON' % self._ITEM_TYPE))
93
94 def _extract_media(self, item_id):
95 info = self._download_info(self._ITEM_SHORTCUT, item_id)
96 response = self._download_json(
97 '%s/api/videos/%s%s' % (self._API_BASE, self._ITEM_SHORTCUT, item_id), item_id,
98 'Downloading %s playlist JSON' % self._ITEM_TYPE)
99 entries = []
100 chunks = response['chunks']
101 qualities = list(chunks.keys())
102 for num, fragment in enumerate(zip(*chunks.values()), start=1):
103 formats = []
104 for fmt_num, fragment_fmt in enumerate(fragment):
105 format_id = qualities[fmt_num]
106 fmt = {
107 'url': fragment_fmt['url'],
108 'format_id': format_id,
109 'quality': 1 if format_id == 'live' else 0,
110 }
111 m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
112 if m:
113 fmt['height'] = int(m.group('height'))
114 formats.append(fmt)
115 self._sort_formats(formats)
116 entry = dict(info)
117 entry['id'] = '%s_%d' % (entry['id'], num)
118 entry['title'] = '%s part %d' % (entry['title'], num)
119 entry['formats'] = formats
120 entries.append(entry)
121 return self.playlist_result(entries, info['id'], info['title'])
122
123 def _extract_info(self, info):
124 return {
125 'id': info['_id'],
126 'title': info['title'],
127 'description': info['description'],
128 'duration': info['length'],
129 'thumbnail': info['preview'],
130 'uploader': info['channel']['display_name'],
131 'uploader_id': info['channel']['name'],
132 'timestamp': parse_iso8601(info['recorded_at']),
133 'view_count': info['views'],
134 }
135
136 def _real_extract(self, url):
137 return self._extract_media(self._match_id(url))
138
139
140 class TwitchVideoIE(TwitchItemBaseIE):
141 IE_NAME = 'twitch:video'
142 _VALID_URL = r'%s/[^/]+/b/(?P<id>[^/]+)' % TwitchBaseIE._VALID_URL_BASE
143 _ITEM_TYPE = 'video'
144 _ITEM_SHORTCUT = 'a'
145
146 _TEST = {
147 'url': 'http://www.twitch.tv/riotgames/b/577357806',
148 'info_dict': {
149 'id': 'a577357806',
150 'title': 'Worlds Semifinals - Star Horn Royal Club vs. OMG',
151 },
152 'playlist_mincount': 12,
153 }
154
155
156 class TwitchChapterIE(TwitchItemBaseIE):
157 IE_NAME = 'twitch:chapter'
158 _VALID_URL = r'%s/[^/]+/c/(?P<id>[^/]+)' % TwitchBaseIE._VALID_URL_BASE
159 _ITEM_TYPE = 'chapter'
160 _ITEM_SHORTCUT = 'c'
161
162 _TESTS = [{
163 'url': 'http://www.twitch.tv/acracingleague/c/5285812',
164 'info_dict': {
165 'id': 'c5285812',
166 'title': 'ACRL Off Season - Sports Cars @ Nordschleife',
167 },
168 'playlist_mincount': 3,
169 }, {
170 'url': 'http://www.twitch.tv/tsm_theoddone/c/2349361',
171 'only_matching': True,
172 }]
173
174
175 class TwitchVodIE(TwitchItemBaseIE):
176 IE_NAME = 'twitch:vod'
177 _VALID_URL = r'%s/[^/]+/v/(?P<id>[^/]+)' % TwitchBaseIE._VALID_URL_BASE
178 _ITEM_TYPE = 'vod'
179 _ITEM_SHORTCUT = 'v'
180
181 _TEST = {
182 'url': 'http://www.twitch.tv/ksptv/v/3622000',
183 'info_dict': {
184 'id': 'v3622000',
185 'ext': 'mp4',
186 'title': '''KSPTV: Squadcast: "Everyone's on vacation so here's Dahud" Edition!''',
187 'thumbnail': 're:^https?://.*\.jpg$',
188 'duration': 6951,
189 'timestamp': 1419028564,
190 'upload_date': '20141219',
191 'uploader': 'KSPTV',
192 'uploader_id': 'ksptv',
193 'view_count': int,
194 },
195 'params': {
196 # m3u8 download
197 'skip_download': True,
198 },
199 }
200
201 def _real_extract(self, url):
202 item_id = self._match_id(url)
203 info = self._download_info(self._ITEM_SHORTCUT, item_id)
204 access_token = self._download_json(
205 '%s/api/vods/%s/access_token' % (self._API_BASE, item_id), item_id,
206 'Downloading %s access token' % self._ITEM_TYPE)
207 formats = self._extract_m3u8_formats(
208 '%s/vod/%s?nauth=%s&nauthsig=%s'
209 % (self._USHER_BASE, item_id, access_token['token'], access_token['sig']),
210 item_id, 'mp4')
211 info['formats'] = formats
212 return info
213
214
215 class TwitchPlaylistBaseIE(TwitchBaseIE):
216 _PLAYLIST_URL = '%s/kraken/channels/%%s/videos/?offset=%%d&limit=%%d' % TwitchBaseIE._API_BASE
217 _PAGE_LIMIT = 100
218
219 def _extract_playlist(self, channel_id):
220 info = self._download_json(
221 '%s/kraken/channels/%s' % (self._API_BASE, channel_id),
222 channel_id, 'Downloading channel info JSON')
223 channel_name = info.get('display_name') or info.get('name')
224 entries = []
225 offset = 0
226 limit = self._PAGE_LIMIT
227 for counter in itertools.count(1):
228 response = self._download_json(
229 self._PLAYLIST_URL % (channel_id, offset, limit),
230 channel_id, 'Downloading %s videos JSON page %d' % (self._PLAYLIST_TYPE, counter))
231 page_entries = self._extract_playlist_page(response)
232 if not page_entries:
233 break
234 entries.extend(page_entries)
235 offset += limit
236 return self.playlist_result(
237 [self.url_result(entry) for entry in set(entries)],
238 channel_id, channel_name)
239
240 def _extract_playlist_page(self, response):
241 videos = response.get('videos')
242 return [video['url'] for video in videos] if videos else []
243
244 def _real_extract(self, url):
245 return self._extract_playlist(self._match_id(url))
246
247
248 class TwitchProfileIE(TwitchPlaylistBaseIE):
249 IE_NAME = 'twitch:profile'
250 _VALID_URL = r'%s/(?P<id>[^/]+)/profile/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
251 _PLAYLIST_TYPE = 'profile'
252
253 _TEST = {
254 'url': 'http://www.twitch.tv/vanillatv/profile',
255 'info_dict': {
256 'id': 'vanillatv',
257 'title': 'VanillaTV',
258 },
259 'playlist_mincount': 412,
260 }
261
262
263 class TwitchPastBroadcastsIE(TwitchPlaylistBaseIE):
264 IE_NAME = 'twitch:past_broadcasts'
265 _VALID_URL = r'%s/(?P<id>[^/]+)/profile/past_broadcasts/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
266 _PLAYLIST_URL = TwitchPlaylistBaseIE._PLAYLIST_URL + '&broadcasts=true'
267 _PLAYLIST_TYPE = 'past broadcasts'
268
269 _TEST = {
270 'url': 'http://www.twitch.tv/spamfish/profile/past_broadcasts',
271 'info_dict': {
272 'id': 'spamfish',
273 'title': 'Spamfish',
274 },
275 'playlist_mincount': 54,
276 }
277
278
279 class TwitchBookmarksIE(TwitchPlaylistBaseIE):
280 IE_NAME = 'twitch:bookmarks'
281 _VALID_URL = r'%s/(?P<id>[^/]+)/profile/bookmarks/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
282 _PLAYLIST_URL = '%s/api/bookmark/?user=%%s&offset=%%d&limit=%%d' % TwitchBaseIE._API_BASE
283 _PLAYLIST_TYPE = 'bookmarks'
284
285 _TEST = {
286 'url': 'http://www.twitch.tv/ognos/profile/bookmarks',
287 'info_dict': {
288 'id': 'ognos',
289 'title': 'Ognos',
290 },
291 'playlist_mincount': 3,
292 }
293
294 def _extract_playlist_page(self, response):
295 entries = []
296 for bookmark in response.get('bookmarks', []):
297 video = bookmark.get('video')
298 if not video:
299 continue
300 entries.append(video['url'])
301 return entries
302
303
304 class TwitchStreamIE(TwitchBaseIE):
305 IE_NAME = 'twitch:stream'
306 _VALID_URL = r'%s/(?P<id>[^/]+)/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
307
308 _TEST = {
309 'url': 'http://www.twitch.tv/shroomztv',
310 'info_dict': {
311 'id': '12772022048',
312 'display_id': 'shroomztv',
313 'ext': 'mp4',
314 'title': 're:^ShroomzTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
315 'description': 'H1Z1 - lonewolfing with ShroomzTV | A3 Battle Royale later - @ShroomzTV',
316 'is_live': True,
317 'timestamp': 1421928037,
318 'upload_date': '20150122',
319 'uploader': 'ShroomzTV',
320 'uploader_id': 'shroomztv',
321 'view_count': int,
322 },
323 'params': {
324 # m3u8 download
325 'skip_download': True,
326 },
327 }
328
329 def _real_extract(self, url):
330 channel_id = self._match_id(url)
331
332 stream = self._download_json(
333 '%s/kraken/streams/%s' % (self._API_BASE, channel_id), channel_id,
334 'Downloading stream JSON').get('stream')
335
336 # Fallback on profile extraction if stream is offline
337 if not stream:
338 return self.url_result(
339 'http://www.twitch.tv/%s/profile' % channel_id,
340 'TwitchProfile', channel_id)
341
342 access_token = self._download_json(
343 '%s/api/channels/%s/access_token' % (self._API_BASE, channel_id), channel_id,
344 'Downloading channel access token')
345
346 query = {
347 'allow_source': 'true',
348 'p': random.randint(1000000, 10000000),
349 'player': 'twitchweb',
350 'segment_preference': '4',
351 'sig': access_token['sig'],
352 'token': access_token['token'],
353 }
354
355 formats = self._extract_m3u8_formats(
356 '%s/api/channel/hls/%s.m3u8?%s'
357 % (self._USHER_BASE, channel_id, compat_urllib_parse.urlencode(query).encode('utf-8')),
358 channel_id, 'mp4')
359
360 # prefer the 'source' stream, the others are limited to 30 fps
361 def _sort_source(f):
362 if f.get('m3u8_media') is not None and f['m3u8_media'].get('NAME') == 'Source':
363 return 1
364 return 0
365 formats = sorted(formats, key=_sort_source)
366
367 view_count = stream.get('viewers')
368 timestamp = parse_iso8601(stream.get('created_at'))
369
370 channel = stream['channel']
371 title = self._live_title(channel.get('display_name') or channel.get('name'))
372 description = channel.get('status')
373
374 thumbnails = []
375 for thumbnail_key, thumbnail_url in stream['preview'].items():
376 m = re.search(r'(?P<width>\d+)x(?P<height>\d+)\.jpg$', thumbnail_key)
377 if not m:
378 continue
379 thumbnails.append({
380 'url': thumbnail_url,
381 'width': int(m.group('width')),
382 'height': int(m.group('height')),
383 })
384
385 return {
386 'id': compat_str(stream['_id']),
387 'display_id': channel_id,
388 'title': title,
389 'description': description,
390 'thumbnails': thumbnails,
391 'uploader': channel.get('display_name'),
392 'uploader_id': channel.get('name'),
393 'timestamp': timestamp,
394 'view_count': view_count,
395 'formats': formats,
396 'is_live': True,
397 }