# coding: utf-8
from __future__ import unicode_literals
+import collections
import itertools
-import re
+import json
import random
+import re
from .common import InfoExtractor
from ..compat import (
- compat_HTTPError,
+ compat_kwargs,
compat_parse_qs,
compat_str,
+ compat_urlparse,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
)
from ..utils import (
clean_html,
ExtractorError,
+ float_or_none,
int_or_none,
- js_to_json,
- orderedSet,
parse_duration,
parse_iso8601,
+ qualities,
+ str_or_none,
+ try_get,
+ unified_timestamp,
update_url_query,
- urlencode_postdata,
+ url_or_none,
urljoin,
)
class TwitchBaseIE(InfoExtractor):
- _VALID_URL_BASE = r'https?://(?:(?:www|go)\.)?twitch\.tv'
+ _VALID_URL_BASE = r'https?://(?:(?:www|go|m)\.)?twitch\.tv'
_API_BASE = 'https://api.twitch.tv'
_USHER_BASE = 'https://usher.ttvnw.net'
- _LOGIN_URL = 'https://www.twitch.tv/login'
- _CLIENT_ID = 'jzkbprff40iqj646a697cyrvl0zt2m6'
+ _LOGIN_FORM_URL = 'https://www.twitch.tv/login'
+ _LOGIN_POST_URL = 'https://passport.twitch.tv/login'
+ _CLIENT_ID = 'kimne78kx3ncx6brgo4mv6wki5h1ko'
_NETRC_MACHINE = 'twitch'
def _handle_error(self, response):
'%s returned error: %s - %s' % (self.IE_NAME, error, response.get('message')),
expected=True)
- def _call_api(self, path, item_id, note):
+ def _call_api(self, path, item_id, *args, **kwargs):
+ headers = kwargs.get('headers', {}).copy()
+ headers.update({
+ 'Accept': 'application/vnd.twitchtv.v5+json; charset=UTF-8',
+ 'Client-ID': self._CLIENT_ID,
+ })
+ kwargs.update({
+ 'headers': headers,
+ 'expected_status': (400, 410),
+ })
response = self._download_json(
- '%s/%s' % (self._API_BASE, path), item_id, note,
- headers={'Client-ID': self._CLIENT_ID})
+ '%s/%s' % (self._API_BASE, path), item_id,
+ *args, **compat_kwargs(kwargs))
self._handle_error(response)
return response
self._login()
def _login(self):
- (username, password) = self._get_login_info()
+ username, password = self._get_login_info()
if username is None:
return
page_url = urlh.geturl()
post_url = self._search_regex(
r'<form[^>]+action=(["\'])(?P<url>.+?)\1', page,
- 'post url', default=page_url, group='url')
+ 'post url', default=self._LOGIN_POST_URL, group='url')
post_url = urljoin(page_url, post_url)
- headers = {'Referer': page_url}
-
- try:
- response = self._download_json(
- post_url, None, note,
- data=urlencode_postdata(form),
- headers=headers)
- except ExtractorError as e:
- if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400:
- response = self._parse_json(
- e.cause.read().decode('utf-8'), None)
- fail(response['message'])
- raise
-
- redirect_url = urljoin(post_url, response['redirect'])
+ headers = {
+ 'Referer': page_url,
+ 'Origin': page_url,
+ 'Content-Type': 'text/plain;charset=UTF-8',
+ }
+
+ response = self._download_json(
+ post_url, None, note, data=json.dumps(form).encode(),
+ headers=headers, expected_status=400)
+ error = response.get('error_description') or response.get('error_code')
+ if error:
+ fail(error)
+
+ if 'Authenticated successfully' in response.get('message', ''):
+ return None, None
+
+ redirect_url = urljoin(
+ post_url,
+ response.get('redirect') or response['redirect_path'])
return self._download_webpage_handle(
redirect_url, None, 'Downloading login redirect page',
headers=headers)
login_page, handle = self._download_webpage_handle(
- self._LOGIN_URL, None, 'Downloading login page')
+ self._LOGIN_FORM_URL, None, 'Downloading login page')
# Some TOR nodes and public proxies are blocked completely
if 'blacklist_message' in login_page:
login_page, handle, 'Logging in', {
'username': username,
'password': password,
+ 'client_id': self._CLIENT_ID,
})
+ # Successful login
+ if not redirect_page:
+ return
+
if re.search(r'(?i)<form[^>]+id="two-factor-submit"', redirect_page) is not None:
# TODO: Add mechanism to request an SMS or phone call
tfa_token = self._get_tfa_info('two-factor authentication token')
def _prefer_source(self, formats):
try:
source = next(f for f in formats if f['format_id'] == 'Source')
- source['preference'] = 10
+ source['quality'] = 10
except StopIteration:
- pass # No Source stream present
+ for f in formats:
+ if '/chunked/' in f['url']:
+ f.update({
+ 'quality': 10,
+ 'format_note': 'Source',
+ })
self._sort_formats(formats)
+ def _download_access_token(self, channel_name):
+ return self._call_api(
+ 'api/channels/%s/access_token' % channel_name, channel_name,
+ 'Downloading access token JSON')
-class TwitchItemBaseIE(TwitchBaseIE):
- def _download_info(self, item, item_id):
- return self._extract_info(self._call_api(
- 'kraken/videos/%s%s' % (item, item_id), item_id,
- 'Downloading %s info JSON' % self._ITEM_TYPE))
-
- def _extract_media(self, item_id):
- info = self._download_info(self._ITEM_SHORTCUT, item_id)
- response = self._call_api(
- 'api/videos/%s%s' % (self._ITEM_SHORTCUT, item_id), item_id,
- 'Downloading %s playlist JSON' % self._ITEM_TYPE)
- entries = []
- chunks = response['chunks']
- qualities = list(chunks.keys())
- for num, fragment in enumerate(zip(*chunks.values()), start=1):
- formats = []
- for fmt_num, fragment_fmt in enumerate(fragment):
- format_id = qualities[fmt_num]
- fmt = {
- 'url': fragment_fmt['url'],
- 'format_id': format_id,
- 'quality': 1 if format_id == 'live' else 0,
- }
- m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
- if m:
- fmt['height'] = int(m.group('height'))
- formats.append(fmt)
- self._sort_formats(formats)
- entry = dict(info)
- entry['id'] = '%s_%d' % (entry['id'], num)
- entry['title'] = '%s part %d' % (entry['title'], num)
- entry['formats'] = formats
- entries.append(entry)
- return self.playlist_result(entries, info['id'], info['title'])
-
- def _extract_info(self, info):
- return {
- 'id': info['_id'],
- 'title': info.get('title') or 'Untitled Broadcast',
- 'description': info.get('description'),
- 'duration': int_or_none(info.get('length')),
- 'thumbnail': info.get('preview'),
- 'uploader': info.get('channel', {}).get('display_name'),
- 'uploader_id': info.get('channel', {}).get('name'),
- 'timestamp': parse_iso8601(info.get('recorded_at')),
- 'view_count': int_or_none(info.get('views')),
- }
-
- def _real_extract(self, url):
- return self._extract_media(self._match_id(url))
-
-
-class TwitchVideoIE(TwitchItemBaseIE):
- IE_NAME = 'twitch:video'
- _VALID_URL = r'%s/[^/]+/b/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
- _ITEM_TYPE = 'video'
- _ITEM_SHORTCUT = 'a'
-
- _TEST = {
- 'url': 'http://www.twitch.tv/riotgames/b/577357806',
- 'info_dict': {
- 'id': 'a577357806',
- 'title': 'Worlds Semifinals - Star Horn Royal Club vs. OMG',
- },
- 'playlist_mincount': 12,
- 'skip': 'HTTP Error 404: Not Found',
- }
-
-
-class TwitchChapterIE(TwitchItemBaseIE):
- IE_NAME = 'twitch:chapter'
- _VALID_URL = r'%s/[^/]+/c/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
- _ITEM_TYPE = 'chapter'
- _ITEM_SHORTCUT = 'c'
-
- _TESTS = [{
- 'url': 'http://www.twitch.tv/acracingleague/c/5285812',
- 'info_dict': {
- 'id': 'c5285812',
- 'title': 'ACRL Off Season - Sports Cars @ Nordschleife',
- },
- 'playlist_mincount': 3,
- 'skip': 'HTTP Error 404: Not Found',
- }, {
- 'url': 'http://www.twitch.tv/tsm_theoddone/c/2349361',
- 'only_matching': True,
- }]
+ def _extract_channel_id(self, token, channel_name):
+ return compat_str(self._parse_json(token, channel_name)['channel_id'])
-class TwitchVodIE(TwitchItemBaseIE):
+class TwitchVodIE(TwitchBaseIE):
IE_NAME = 'twitch:vod'
_VALID_URL = r'''(?x)
https?://
(?:
- (?:(?:www|go)\.)?twitch\.tv/(?:[^/]+/v|videos)/|
- player\.twitch\.tv/\?.*?\bvideo=v
+ (?:(?:www|go|m)\.)?twitch\.tv/(?:[^/]+/v(?:ideo)?|videos)/|
+ player\.twitch\.tv/\?.*?\bvideo=v?
)
(?P<id>\d+)
'''
}, {
'url': 'https://www.twitch.tv/videos/6528877',
'only_matching': True,
+ }, {
+ 'url': 'https://m.twitch.tv/beagsandjam/v/247478721',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://www.twitch.tv/northernlion/video/291940395',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://player.twitch.tv/?video=480452374',
+ 'only_matching': True,
}]
+ def _download_info(self, item_id):
+ return self._extract_info(
+ self._call_api(
+ 'kraken/videos/%s' % item_id, item_id,
+ 'Downloading video info JSON'))
+
+ @staticmethod
+ def _extract_info(info):
+ status = info.get('status')
+ if status == 'recording':
+ is_live = True
+ elif status == 'recorded':
+ is_live = False
+ else:
+ is_live = None
+ _QUALITIES = ('small', 'medium', 'large')
+ quality_key = qualities(_QUALITIES)
+ thumbnails = []
+ preview = info.get('preview')
+ if isinstance(preview, dict):
+ for thumbnail_id, thumbnail_url in preview.items():
+ thumbnail_url = url_or_none(thumbnail_url)
+ if not thumbnail_url:
+ continue
+ if thumbnail_id not in _QUALITIES:
+ continue
+ thumbnails.append({
+ 'url': thumbnail_url,
+ 'preference': quality_key(thumbnail_id),
+ })
+ return {
+ 'id': info['_id'],
+ 'title': info.get('title') or 'Untitled Broadcast',
+ 'description': info.get('description'),
+ 'duration': int_or_none(info.get('length')),
+ 'thumbnails': thumbnails,
+ 'uploader': info.get('channel', {}).get('display_name'),
+ 'uploader_id': info.get('channel', {}).get('name'),
+ 'timestamp': parse_iso8601(info.get('recorded_at')),
+ 'view_count': int_or_none(info.get('views')),
+ 'is_live': is_live,
+ }
+
def _real_extract(self, url):
- item_id = self._match_id(url)
+ vod_id = self._match_id(url)
- info = self._download_info(self._ITEM_SHORTCUT, item_id)
+ info = self._download_info(vod_id)
access_token = self._call_api(
- 'api/vods/%s/access_token' % item_id, item_id,
+ 'api/vods/%s/access_token' % vod_id, vod_id,
'Downloading %s access token' % self._ITEM_TYPE)
formats = self._extract_m3u8_formats(
- '%s/vod/%s?%s' % (
- self._USHER_BASE, item_id,
+ '%s/vod/%s.m3u8?%s' % (
+ self._USHER_BASE, vod_id,
compat_urllib_parse_urlencode({
'allow_source': 'true',
'allow_audio_only': 'true',
'allow_spectre': 'true',
'player': 'twitchweb',
+ 'playlist_include_framerate': 'true',
'nauth': access_token['token'],
'nauthsig': access_token['sig'],
})),
- item_id, 'mp4', entry_protocol='m3u8_native')
+ vod_id, 'mp4', entry_protocol='m3u8_native')
self._prefer_source(formats)
info['formats'] = formats
info['subtitles'] = {
'rechat': [{
'url': update_url_query(
- 'https://rechat.twitch.tv/rechat-messages', {
- 'video_id': 'v%s' % item_id,
- 'start': info['timestamp'],
+ 'https://api.twitch.tv/v5/videos/%s/comments' % vod_id, {
+ 'client_id': self._CLIENT_ID,
}),
'ext': 'json',
}],
return info
-class TwitchPlaylistBaseIE(TwitchBaseIE):
- _PLAYLIST_PATH = 'kraken/channels/%s/videos/?offset=%d&limit=%d'
+def _make_video_result(node):
+ assert isinstance(node, dict)
+ video_id = node.get('id')
+ if not video_id:
+ return
+ return {
+ '_type': 'url_transparent',
+ 'ie_key': TwitchVodIE.ie_key(),
+ 'id': video_id,
+ 'url': 'https://www.twitch.tv/videos/%s' % video_id,
+ 'title': node.get('title'),
+ 'thumbnail': node.get('previewThumbnailURL'),
+ 'duration': float_or_none(node.get('lengthSeconds')),
+ 'view_count': int_or_none(node.get('viewCount')),
+ }
+
+
+class TwitchGraphQLBaseIE(TwitchBaseIE):
_PAGE_LIMIT = 100
- def _extract_playlist(self, channel_id):
- info = self._call_api(
- 'kraken/channels/%s' % channel_id,
- channel_id, 'Downloading channel info JSON')
- channel_name = info.get('display_name') or info.get('name')
+ def _download_gql(self, video_id, op, variables, sha256_hash, note, fatal=True):
+ return self._download_json(
+ 'https://gql.twitch.tv/gql', video_id, note,
+ data=json.dumps({
+ 'operationName': op,
+ 'variables': variables,
+ 'extensions': {
+ 'persistedQuery': {
+ 'version': 1,
+ 'sha256Hash': sha256_hash,
+ }
+ }
+ }).encode(),
+ headers={
+ 'Content-Type': 'text/plain;charset=UTF-8',
+ 'Client-ID': self._CLIENT_ID,
+ }, fatal=fatal)
+
+
+class TwitchCollectionIE(TwitchGraphQLBaseIE):
+ _VALID_URL = r'https?://(?:(?:www|go|m)\.)?twitch\.tv/collections/(?P<id>[^/]+)'
+
+ _TESTS = [{
+ 'url': 'https://www.twitch.tv/collections/wlDCoH0zEBZZbQ',
+ 'info_dict': {
+ 'id': 'wlDCoH0zEBZZbQ',
+ 'title': 'Overthrow Nook, capitalism for children',
+ },
+ 'playlist_mincount': 13,
+ }]
+
+ _OPERATION_NAME = 'CollectionSideBar'
+ _SHA256_HASH = '27111f1b382effad0b6def325caef1909c733fe6a4fbabf54f8d491ef2cf2f14'
+
+ def _real_extract(self, url):
+ collection_id = self._match_id(url)
+ collection = self._download_gql(
+ collection_id, self._OPERATION_NAME,
+ {'collectionID': collection_id}, self._SHA256_HASH,
+ 'Downloading collection GraphQL')['data']['collection']
+ title = collection.get('title')
entries = []
+ for edge in collection['items']['edges']:
+ if not isinstance(edge, dict):
+ continue
+ node = edge.get('node')
+ if not isinstance(node, dict):
+ continue
+ video = _make_video_result(node)
+ if video:
+ entries.append(video)
+ return self.playlist_result(
+ entries, playlist_id=collection_id, playlist_title=title)
+
+
+class TwitchPlaylistBaseIE(TwitchGraphQLBaseIE):
+ def _entries(self, channel_name, *args):
+ cursor = None
+ variables_common = self._make_variables(channel_name, *args)
+ entries_key = '%ss' % self._ENTRY_KIND
+ for page_num in itertools.count(1):
+ variables = variables_common.copy()
+ variables['limit'] = self._PAGE_LIMIT
+ if cursor:
+ variables['cursor'] = cursor
+ page = self._download_gql(
+ channel_name, self._OPERATION_NAME, variables,
+ self._SHA256_HASH,
+ 'Downloading %ss GraphQL page %s' % (self._NODE_KIND, page_num),
+ fatal=False)
+ if not page:
+ break
+ edges = try_get(
+ page, lambda x: x['data']['user'][entries_key]['edges'], list)
+ if not edges:
+ break
+ for edge in edges:
+ if not isinstance(edge, dict):
+ continue
+ if edge.get('__typename') != self._EDGE_KIND:
+ continue
+ node = edge.get('node')
+ if not isinstance(node, dict):
+ continue
+ if node.get('__typename') != self._NODE_KIND:
+ continue
+ entry = self._extract_entry(node)
+ if entry:
+ cursor = edge.get('cursor')
+ yield entry
+ if not cursor or not isinstance(cursor, compat_str):
+ break
+
+ # Deprecated kraken v5 API
+ def _entries_kraken(self, channel_name, broadcast_type, sort):
+ access_token = self._download_access_token(channel_name)
+ channel_id = self._extract_channel_id(access_token['token'], channel_name)
offset = 0
- limit = self._PAGE_LIMIT
- broken_paging_detected = False
counter_override = None
for counter in itertools.count(1):
response = self._call_api(
- self._PLAYLIST_PATH % (channel_id, offset, limit),
+ 'kraken/channels/%s/videos/' % channel_id,
channel_id,
- 'Downloading %s JSON page %s'
- % (self._PLAYLIST_TYPE, counter_override or counter))
- page_entries = self._extract_playlist_page(response)
- if not page_entries:
+ 'Downloading video JSON page %s' % (counter_override or counter),
+ query={
+ 'offset': offset,
+ 'limit': self._PAGE_LIMIT,
+ 'broadcast_type': broadcast_type,
+ 'sort': sort,
+ })
+ videos = response.get('videos')
+ if not isinstance(videos, list):
break
+ for video in videos:
+ if not isinstance(video, dict):
+ continue
+ video_url = url_or_none(video.get('url'))
+ if not video_url:
+ continue
+ yield {
+ '_type': 'url_transparent',
+ 'ie_key': TwitchVodIE.ie_key(),
+ 'id': video.get('_id'),
+ 'url': video_url,
+ 'title': video.get('title'),
+ 'description': video.get('description'),
+ 'timestamp': unified_timestamp(video.get('published_at')),
+ 'duration': float_or_none(video.get('length')),
+ 'view_count': int_or_none(video.get('views')),
+ 'language': video.get('language'),
+ }
+ offset += self._PAGE_LIMIT
total = int_or_none(response.get('_total'))
- # Since the beginning of March 2016 twitch's paging mechanism
- # is completely broken on the twitch side. It simply ignores
- # a limit and returns the whole offset number of videos.
- # Working around by just requesting all videos at once.
- # Upd: pagination bug was fixed by twitch on 15.03.2016.
- if not broken_paging_detected and total and len(page_entries) > limit:
- self.report_warning(
- 'Twitch pagination is broken on twitch side, requesting all videos at once',
- channel_id)
- broken_paging_detected = True
- offset = total
- counter_override = '(all at once)'
- continue
- entries.extend(page_entries)
- if broken_paging_detected or total and len(page_entries) >= total:
+ if total and offset >= total:
break
- offset += limit
- return self.playlist_result(
- [self.url_result(entry) for entry in orderedSet(entries)],
- channel_id, channel_name)
- def _extract_playlist_page(self, response):
- videos = response.get('videos')
- return [video['url'] for video in videos] if videos else []
- def _real_extract(self, url):
- return self._extract_playlist(self._match_id(url))
+class TwitchVideosIE(TwitchPlaylistBaseIE):
+ _VALID_URL = r'https?://(?:(?:www|go|m)\.)?twitch\.tv/(?P<id>[^/]+)/(?:videos|profile)'
-
-class TwitchProfileIE(TwitchPlaylistBaseIE):
- IE_NAME = 'twitch:profile'
- _VALID_URL = r'%s/(?P<id>[^/]+)/profile/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
- _PLAYLIST_TYPE = 'profile'
-
- _TEST = {
- 'url': 'http://www.twitch.tv/vanillatv/profile',
+ _TESTS = [{
+ # All Videos sorted by Date
+ 'url': 'https://www.twitch.tv/spamfish/videos?filter=all',
'info_dict': {
- 'id': 'vanillatv',
- 'title': 'VanillaTV',
+ 'id': 'spamfish',
+ 'title': 'spamfish - All Videos sorted by Date',
+ },
+ 'playlist_mincount': 924,
+ }, {
+ # All Videos sorted by Popular
+ 'url': 'https://www.twitch.tv/spamfish/videos?filter=all&sort=views',
+ 'info_dict': {
+ 'id': 'spamfish',
+ 'title': 'spamfish - All Videos sorted by Popular',
+ },
+ 'playlist_mincount': 931,
+ }, {
+ # Past Broadcasts sorted by Date
+ 'url': 'https://www.twitch.tv/spamfish/videos?filter=archives',
+ 'info_dict': {
+ 'id': 'spamfish',
+ 'title': 'spamfish - Past Broadcasts sorted by Date',
},
- 'playlist_mincount': 412,
+ 'playlist_mincount': 27,
+ }, {
+ # Highlights sorted by Date
+ 'url': 'https://www.twitch.tv/spamfish/videos?filter=highlights',
+ 'info_dict': {
+ 'id': 'spamfish',
+ 'title': 'spamfish - Highlights sorted by Date',
+ },
+ 'playlist_mincount': 901,
+ }, {
+ # Uploads sorted by Date
+ 'url': 'https://www.twitch.tv/esl_csgo/videos?filter=uploads&sort=time',
+ 'info_dict': {
+ 'id': 'esl_csgo',
+ 'title': 'esl_csgo - Uploads sorted by Date',
+ },
+ 'playlist_mincount': 5,
+ }, {
+ # Past Premieres sorted by Date
+ 'url': 'https://www.twitch.tv/spamfish/videos?filter=past_premieres',
+ 'info_dict': {
+ 'id': 'spamfish',
+ 'title': 'spamfish - Past Premieres sorted by Date',
+ },
+ 'playlist_mincount': 1,
+ }, {
+ 'url': 'https://www.twitch.tv/spamfish/videos/all',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://m.twitch.tv/spamfish/videos/all',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://www.twitch.tv/spamfish/videos',
+ 'only_matching': True,
+ }]
+
+ Broadcast = collections.namedtuple('Broadcast', ['type', 'label'])
+
+ _DEFAULT_BROADCAST = Broadcast(None, 'All Videos')
+ _BROADCASTS = {
+ 'archives': Broadcast('ARCHIVE', 'Past Broadcasts'),
+ 'highlights': Broadcast('HIGHLIGHT', 'Highlights'),
+ 'uploads': Broadcast('UPLOAD', 'Uploads'),
+ 'past_premieres': Broadcast('PAST_PREMIERE', 'Past Premieres'),
+ 'all': _DEFAULT_BROADCAST,
}
+ _DEFAULT_SORTED_BY = 'Date'
+ _SORTED_BY = {
+ 'time': _DEFAULT_SORTED_BY,
+ 'views': 'Popular',
+ }
-class TwitchVideosBaseIE(TwitchPlaylistBaseIE):
- _VALID_URL_VIDEOS_BASE = r'%s/(?P<id>[^/]+)/videos' % TwitchBaseIE._VALID_URL_BASE
- _PLAYLIST_PATH = TwitchPlaylistBaseIE._PLAYLIST_PATH + '&broadcast_type='
+ _SHA256_HASH = 'a937f1d22e269e39a03b509f65a7490f9fc247d7f83d6ac1421523e3b68042cb'
+ _OPERATION_NAME = 'FilterableVideoTower_Videos'
+ _ENTRY_KIND = 'video'
+ _EDGE_KIND = 'VideoEdge'
+ _NODE_KIND = 'Video'
+ @classmethod
+ def suitable(cls, url):
+ return (False
+ if any(ie.suitable(url) for ie in (
+ TwitchVideosClipsIE,
+ TwitchVideosCollectionsIE))
+ else super(TwitchVideosIE, cls).suitable(url))
-class TwitchAllVideosIE(TwitchVideosBaseIE):
- IE_NAME = 'twitch:videos:all'
- _VALID_URL = r'%s/all' % TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE
- _PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'archive,upload,highlight'
- _PLAYLIST_TYPE = 'all videos'
+ @staticmethod
+ def _make_variables(channel_name, broadcast_type, sort):
+ return {
+ 'channelOwnerLogin': channel_name,
+ 'broadcastType': broadcast_type,
+ 'videoSort': sort.upper(),
+ }
- _TEST = {
- 'url': 'https://www.twitch.tv/spamfish/videos/all',
- 'info_dict': {
- 'id': 'spamfish',
- 'title': 'Spamfish',
- },
- 'playlist_mincount': 869,
- }
+ @staticmethod
+ def _extract_entry(node):
+ return _make_video_result(node)
+
+ def _real_extract(self, url):
+ channel_name = self._match_id(url)
+ qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+ filter = qs.get('filter', ['all'])[0]
+ sort = qs.get('sort', ['time'])[0]
+ broadcast = self._BROADCASTS.get(filter, self._DEFAULT_BROADCAST)
+ return self.playlist_result(
+ self._entries(channel_name, broadcast.type, sort),
+ playlist_id=channel_name,
+ playlist_title='%s - %s sorted by %s'
+ % (channel_name, broadcast.label,
+ self._SORTED_BY.get(sort, self._DEFAULT_SORTED_BY)))
-class TwitchUploadsIE(TwitchVideosBaseIE):
- IE_NAME = 'twitch:videos:uploads'
- _VALID_URL = r'%s/uploads' % TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE
- _PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'upload'
- _PLAYLIST_TYPE = 'uploads'
+class TwitchVideosClipsIE(TwitchPlaylistBaseIE):
+ _VALID_URL = r'https?://(?:(?:www|go|m)\.)?twitch\.tv/(?P<id>[^/]+)/(?:clips|videos/*?\?.*?\bfilter=clips)'
- _TEST = {
- 'url': 'https://www.twitch.tv/spamfish/videos/uploads',
+ _TESTS = [{
+ # Clips
+ 'url': 'https://www.twitch.tv/vanillatv/clips?filter=clips&range=all',
'info_dict': {
- 'id': 'spamfish',
- 'title': 'Spamfish',
+ 'id': 'vanillatv',
+ 'title': 'vanillatv - Clips Top All',
},
- 'playlist_mincount': 0,
+ 'playlist_mincount': 1,
+ }, {
+ 'url': 'https://www.twitch.tv/dota2ruhub/videos?filter=clips&range=7d',
+ 'only_matching': True,
+ }]
+
+ Clip = collections.namedtuple('Clip', ['filter', 'label'])
+
+ _DEFAULT_CLIP = Clip('LAST_WEEK', 'Top 7D')
+ _RANGE = {
+ '24hr': Clip('LAST_DAY', 'Top 24H'),
+ '7d': _DEFAULT_CLIP,
+ '30d': Clip('LAST_MONTH', 'Top 30D'),
+ 'all': Clip('ALL_TIME', 'Top All'),
}
+ # NB: values other than 20 result in skipped videos
+ _PAGE_LIMIT = 20
-class TwitchPastBroadcastsIE(TwitchVideosBaseIE):
- IE_NAME = 'twitch:videos:past-broadcasts'
- _VALID_URL = r'%s/past-broadcasts' % TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE
- _PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'archive'
- _PLAYLIST_TYPE = 'past broadcasts'
+ _SHA256_HASH = 'b73ad2bfaecfd30a9e6c28fada15bd97032c83ec77a0440766a56fe0bd632777'
+ _OPERATION_NAME = 'ClipsCards__User'
+ _ENTRY_KIND = 'clip'
+ _EDGE_KIND = 'ClipEdge'
+ _NODE_KIND = 'Clip'
- _TEST = {
- 'url': 'https://www.twitch.tv/spamfish/videos/past-broadcasts',
- 'info_dict': {
- 'id': 'spamfish',
- 'title': 'Spamfish',
- },
- 'playlist_mincount': 0,
- }
+ @staticmethod
+ def _make_variables(channel_name, filter):
+ return {
+ 'login': channel_name,
+ 'criteria': {
+ 'filter': filter,
+ },
+ }
+
+ @staticmethod
+ def _extract_entry(node):
+ assert isinstance(node, dict)
+ clip_url = url_or_none(node.get('url'))
+ if not clip_url:
+ return
+ return {
+ '_type': 'url_transparent',
+ 'ie_key': TwitchClipsIE.ie_key(),
+ 'id': node.get('id'),
+ 'url': clip_url,
+ 'title': node.get('title'),
+ 'thumbnail': node.get('thumbnailURL'),
+ 'duration': float_or_none(node.get('durationSeconds')),
+ 'timestamp': unified_timestamp(node.get('createdAt')),
+ 'view_count': int_or_none(node.get('viewCount')),
+ 'language': node.get('language'),
+ }
+
+ def _real_extract(self, url):
+ channel_name = self._match_id(url)
+ qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+ range = qs.get('range', ['7d'])[0]
+ clip = self._RANGE.get(range, self._DEFAULT_CLIP)
+ return self.playlist_result(
+ self._entries(channel_name, clip.filter),
+ playlist_id=channel_name,
+ playlist_title='%s - Clips %s' % (channel_name, clip.label))
-class TwitchHighlightsIE(TwitchVideosBaseIE):
- IE_NAME = 'twitch:videos:highlights'
- _VALID_URL = r'%s/highlights' % TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE
- _PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'highlight'
- _PLAYLIST_TYPE = 'highlights'
+class TwitchVideosCollectionsIE(TwitchPlaylistBaseIE):
+ _VALID_URL = r'https?://(?:(?:www|go|m)\.)?twitch\.tv/(?P<id>[^/]+)/videos/*?\?.*?\bfilter=collections'
- _TEST = {
- 'url': 'https://www.twitch.tv/spamfish/videos/highlights',
+ _TESTS = [{
+ # Collections
+ 'url': 'https://www.twitch.tv/spamfish/videos?filter=collections',
'info_dict': {
'id': 'spamfish',
- 'title': 'Spamfish',
+ 'title': 'spamfish - Collections',
},
- 'playlist_mincount': 805,
- }
+ 'playlist_mincount': 3,
+ }]
+
+ _SHA256_HASH = '07e3691a1bad77a36aba590c351180439a40baefc1c275356f40fc7082419a84'
+ _OPERATION_NAME = 'ChannelCollectionsContent'
+ _ENTRY_KIND = 'collection'
+ _EDGE_KIND = 'CollectionsItemEdge'
+ _NODE_KIND = 'Collection'
+
+ @staticmethod
+ def _make_variables(channel_name):
+ return {
+ 'ownerLogin': channel_name,
+ }
+
+ @staticmethod
+ def _extract_entry(node):
+ assert isinstance(node, dict)
+ collection_id = node.get('id')
+ if not collection_id:
+ return
+ return {
+ '_type': 'url_transparent',
+ 'ie_key': TwitchCollectionIE.ie_key(),
+ 'id': collection_id,
+ 'url': 'https://www.twitch.tv/collections/%s' % collection_id,
+ 'title': node.get('title'),
+ 'thumbnail': node.get('thumbnailURL'),
+ 'duration': float_or_none(node.get('lengthSeconds')),
+ 'timestamp': unified_timestamp(node.get('updatedAt')),
+ 'view_count': int_or_none(node.get('viewCount')),
+ }
+
+ def _real_extract(self, url):
+ channel_name = self._match_id(url)
+ return self.playlist_result(
+ self._entries(channel_name), playlist_id=channel_name,
+ playlist_title='%s - Collections' % channel_name)
class TwitchStreamIE(TwitchBaseIE):
_VALID_URL = r'''(?x)
https?://
(?:
- (?:(?:www|go)\.)?twitch\.tv/|
+ (?:(?:www|go|m)\.)?twitch\.tv/|
player\.twitch\.tv/\?.*?\bchannel=
)
(?P<id>[^/#?]+)
}, {
'url': 'https://go.twitch.tv/food',
'only_matching': True,
+ }, {
+ 'url': 'https://m.twitch.tv/food',
+ 'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return (False
if any(ie.suitable(url) for ie in (
- TwitchVideoIE,
- TwitchChapterIE,
TwitchVodIE,
- TwitchProfileIE,
- TwitchAllVideosIE,
- TwitchUploadsIE,
- TwitchPastBroadcastsIE,
- TwitchHighlightsIE))
+ TwitchCollectionIE,
+ TwitchVideosIE,
+ TwitchVideosClipsIE,
+ TwitchVideosCollectionsIE,
+ TwitchClipsIE))
else super(TwitchStreamIE, cls).suitable(url))
def _real_extract(self, url):
- channel_id = self._match_id(url)
+ channel_name = self._match_id(url)
+
+ access_token = self._download_access_token(channel_name)
+
+ token = access_token['token']
+ channel_id = self._extract_channel_id(token, channel_name)
stream = self._call_api(
- 'kraken/streams/%s?stream_type=all' % channel_id, channel_id,
- 'Downloading stream JSON').get('stream')
+ 'kraken/streams/%s?stream_type=all' % channel_id,
+ channel_id, 'Downloading stream JSON').get('stream')
if not stream:
raise ExtractorError('%s is offline' % channel_id, expected=True)
# (e.g. http://www.twitch.tv/TWITCHPLAYSPOKEMON) that will lead to constructing
# an invalid m3u8 URL. Working around by use of original channel name from stream
# JSON and fallback to lowercase if it's not available.
- channel_id = stream.get('channel', {}).get('name') or channel_id.lower()
-
- access_token = self._call_api(
- 'api/channels/%s/access_token' % channel_id, channel_id,
- 'Downloading channel access token')
+ channel_name = try_get(
+ stream, lambda x: x['channel']['name'],
+ compat_str) or channel_name.lower()
query = {
'allow_source': 'true',
'allow_spectre': 'true',
'p': random.randint(1000000, 10000000),
'player': 'twitchweb',
+ 'playlist_include_framerate': 'true',
'segment_preference': '4',
'sig': access_token['sig'].encode('utf-8'),
- 'token': access_token['token'].encode('utf-8'),
+ 'token': token.encode('utf-8'),
}
formats = self._extract_m3u8_formats(
'%s/api/channel/hls/%s.m3u8?%s'
- % (self._USHER_BASE, channel_id, compat_urllib_parse_urlencode(query)),
+ % (self._USHER_BASE, channel_name, compat_urllib_parse_urlencode(query)),
channel_id, 'mp4')
self._prefer_source(formats)
})
return {
- 'id': compat_str(stream['_id']),
- 'display_id': channel_id,
+ 'id': str_or_none(stream.get('_id')) or channel_id,
+ 'display_id': channel_name,
'title': title,
'description': description,
'thumbnails': thumbnails,
}
-class TwitchClipsIE(InfoExtractor):
+class TwitchClipsIE(TwitchBaseIE):
IE_NAME = 'twitch:clips'
- _VALID_URL = r'https?://clips\.twitch\.tv/(?:[^/]+/)*(?P<id>[^/?#&]+)'
+ _VALID_URL = r'''(?x)
+ https?://
+ (?:
+ clips\.twitch\.tv/(?:embed\?.*?\bclip=|(?:[^/]+/)*)|
+ (?:(?:www|go|m)\.)?twitch\.tv/[^/]+/clip/
+ )
+ (?P<id>[^/?#&]+)
+ '''
_TESTS = [{
- 'url': 'https://clips.twitch.tv/ea/AggressiveCobraPoooound',
+ 'url': 'https://clips.twitch.tv/FaintLightGullWholeWheat',
'md5': '761769e1eafce0ffebfb4089cb3847cd',
'info_dict': {
- 'id': 'AggressiveCobraPoooound',
+ 'id': '42850523',
'ext': 'mp4',
'title': 'EA Play 2016 Live from the Novo Theatre',
'thumbnail': r're:^https?://.*\.jpg',
+ 'timestamp': 1465767393,
+ 'upload_date': '20160612',
'creator': 'EA',
'uploader': 'stereotype_',
- 'uploader_id': 'stereotype_',
+ 'uploader_id': '43566419',
},
}, {
# multiple formats
'url': 'https://clips.twitch.tv/rflegendary/UninterestedBeeDAESuppy',
'only_matching': True,
+ }, {
+ 'url': 'https://www.twitch.tv/sergeynixon/clip/StormyThankfulSproutFutureMan',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://clips.twitch.tv/embed?clip=InquisitiveBreakableYogurtJebaited',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://m.twitch.tv/rossbroadcast/clip/ConfidentBraveHumanChefFrank',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://go.twitch.tv/rossbroadcast/clip/ConfidentBraveHumanChefFrank',
+ 'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
- webpage = self._download_webpage(url, video_id)
-
- clip = self._parse_json(
- self._search_regex(
- r'(?s)clipInfo\s*=\s*({.+?});', webpage, 'clip info'),
- video_id, transform_source=js_to_json)
-
- title = clip.get('title') or clip.get('channel_title') or self._og_search_title(webpage)
-
- formats = [{
- 'url': option['source'],
- 'format_id': option.get('quality'),
- 'height': int_or_none(option.get('quality')),
- } for option in clip.get('quality_options', []) if option.get('source')]
-
- if not formats:
- formats = [{
- 'url': clip['clip_video_url'],
- }]
+ clip = self._download_json(
+ 'https://gql.twitch.tv/gql', video_id, data=json.dumps({
+ 'query': '''{
+ clip(slug: "%s") {
+ broadcaster {
+ displayName
+ }
+ createdAt
+ curator {
+ displayName
+ id
+ }
+ durationSeconds
+ id
+ tiny: thumbnailURL(width: 86, height: 45)
+ small: thumbnailURL(width: 260, height: 147)
+ medium: thumbnailURL(width: 480, height: 272)
+ title
+ videoQualities {
+ frameRate
+ quality
+ sourceURL
+ }
+ viewCount
+ }
+}''' % video_id,
+ }).encode(), headers={
+ 'Client-ID': self._CLIENT_ID,
+ })['data']['clip']
+
+ if not clip:
+ raise ExtractorError(
+ 'This clip is no longer available', expected=True)
+ formats = []
+ for option in clip.get('videoQualities', []):
+ if not isinstance(option, dict):
+ continue
+ source = url_or_none(option.get('sourceURL'))
+ if not source:
+ continue
+ formats.append({
+ 'url': source,
+ 'format_id': option.get('quality'),
+ 'height': int_or_none(option.get('quality')),
+ 'fps': int_or_none(option.get('frameRate')),
+ })
self._sort_formats(formats)
+ thumbnails = []
+ for thumbnail_id in ('tiny', 'small', 'medium'):
+ thumbnail_url = clip.get(thumbnail_id)
+ if not thumbnail_url:
+ continue
+ thumb = {
+ 'id': thumbnail_id,
+ 'url': thumbnail_url,
+ }
+ mobj = re.search(r'-(\d+)x(\d+)\.', thumbnail_url)
+ if mobj:
+ thumb.update({
+ 'height': int(mobj.group(2)),
+ 'width': int(mobj.group(1)),
+ })
+ thumbnails.append(thumb)
+
return {
- 'id': video_id,
- 'title': title,
- 'thumbnail': self._og_search_thumbnail(webpage),
- 'creator': clip.get('broadcaster_display_name') or clip.get('broadcaster_login'),
- 'uploader': clip.get('curator_login'),
- 'uploader_id': clip.get('curator_display_name'),
+ 'id': clip.get('id') or video_id,
+ 'title': clip.get('title') or video_id,
'formats': formats,
+ 'duration': int_or_none(clip.get('durationSeconds')),
+ 'views': int_or_none(clip.get('viewCount')),
+ 'timestamp': unified_timestamp(clip.get('createdAt')),
+ 'thumbnails': thumbnails,
+ 'creator': try_get(clip, lambda x: x['broadcaster']['displayName'], compat_str),
+ 'uploader': try_get(clip, lambda x: x['curator']['displayName'], compat_str),
+ 'uploader_id': try_get(clip, lambda x: x['curator']['id'], compat_str),
}