import re
from .common import InfoExtractor
+from ..compat import (
+ compat_HTTPError,
+ compat_str,
+)
from ..utils import (
ExtractorError,
int_or_none,
- strip_or_none,
- unescapeHTML,
+ str_or_none,
urlencode_postdata,
)
class RoosterTeethIE(InfoExtractor):
- _VALID_URL = r'https?://(?:.+?\.)?roosterteeth\.com/episode/(?P<id>[^/?#&]+)'
+ _VALID_URL = r'https?://(?:.+?\.)?roosterteeth\.com/(?:episode|watch)/(?P<id>[^/?#&]+)'
_LOGIN_URL = 'https://roosterteeth.com/login'
_NETRC_MACHINE = 'roosterteeth'
_TESTS = [{
'url': 'http://roosterteeth.com/episode/million-dollars-but-season-2-million-dollars-but-the-game-announcement',
'md5': 'e2bd7764732d785ef797700a2489f212',
'info_dict': {
- 'id': '26576',
+ 'id': '9156',
'display_id': 'million-dollars-but-season-2-million-dollars-but-the-game-announcement',
'ext': 'mp4',
- 'title': 'Million Dollars, But...: Million Dollars, But... The Game Announcement',
- 'description': 'md5:0cc3b21986d54ed815f5faeccd9a9ca5',
+ 'title': 'Million Dollars, But... The Game Announcement',
+ 'description': 'md5:168a54b40e228e79f4ddb141e89fe4f5',
'thumbnail': r're:^https?://.*\.png$',
'series': 'Million Dollars, But...',
'episode': 'Million Dollars, But... The Game Announcement',
- 'comment_count': int,
},
}, {
'url': 'http://achievementhunter.roosterteeth.com/episode/off-topic-the-achievement-hunter-podcast-2016-i-didn-t-think-it-would-pass-31',
# only available for FIRST members
'url': 'http://roosterteeth.com/episode/rt-docs-the-world-s-greatest-head-massage-the-world-s-greatest-head-massage-an-asmr-journey-part-one',
'only_matching': True,
+ }, {
+ 'url': 'https://roosterteeth.com/watch/million-dollars-but-season-2-million-dollars-but-the-game-announcement',
+ 'only_matching': True,
}]
def _login(self):
- (username, password) = self._get_login_info()
+ username, password = self._get_login_info()
if username is None:
return
login_request = self._download_webpage(
self._LOGIN_URL, None,
- note='Logging in as %s' % username,
+ note='Logging in',
data=urlencode_postdata(login_form),
headers={
'Referer': self._LOGIN_URL,
def _real_extract(self, url):
display_id = self._match_id(url)
-
- webpage = self._download_webpage(url, display_id)
-
- episode = strip_or_none(unescapeHTML(self._search_regex(
- (r'videoTitle\s*=\s*(["\'])(?P<title>(?:(?!\1).)+)\1',
- r'<title>(?P<title>[^<]+)</title>'), webpage, 'title',
- default=None, group='title')))
-
- title = strip_or_none(self._og_search_title(
- webpage, default=None)) or episode
-
- m3u8_url = self._search_regex(
- r'file\s*:\s*(["\'])(?P<url>http.+?\.m3u8.*?)\1',
- webpage, 'm3u8 url', default=None, group='url')
-
- if not m3u8_url:
- if re.search(r'<div[^>]+class=["\']non-sponsor', webpage):
- self.raise_login_required(
- '%s is only available for FIRST members' % display_id)
-
- if re.search(r'<div[^>]+class=["\']golive-gate', webpage):
- self.raise_login_required('%s is not available yet' % display_id)
-
- raise ExtractorError('Unable to extract m3u8 URL')
+ api_episode_url = 'https://svod-be.roosterteeth.com/api/v1/episodes/%s' % display_id
+
+ try:
+ m3u8_url = self._download_json(
+ api_episode_url + '/videos', display_id,
+ 'Downloading video JSON metadata')['data'][0]['attributes']['url']
+ except ExtractorError as e:
+ if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
+ if self._parse_json(e.cause.read().decode(), display_id).get('access') is False:
+ self.raise_login_required(
+ '%s is only available for FIRST members' % display_id)
+ raise
formats = self._extract_m3u8_formats(
- m3u8_url, display_id, ext='mp4',
- entry_protocol='m3u8_native', m3u8_id='hls')
+ m3u8_url, display_id, 'mp4', 'm3u8_native', m3u8_id='hls')
self._sort_formats(formats)
- description = strip_or_none(self._og_search_description(webpage))
- thumbnail = self._proto_relative_url(self._og_search_thumbnail(webpage))
-
- series = self._search_regex(
- (r'<h2>More ([^<]+)</h2>', r'<a[^>]+>See All ([^<]+) Videos<'),
- webpage, 'series', fatal=False)
-
- comment_count = int_or_none(self._search_regex(
- r'>Comments \((\d+)\)<', webpage,
- 'comment count', fatal=False))
-
- video_id = self._search_regex(
- (r'containerId\s*=\s*["\']episode-(\d+)\1',
- r'<div[^<]+id=["\']episode-(\d+)'), webpage,
- 'video id', default=display_id)
+ episode = self._download_json(
+ api_episode_url, display_id,
+ 'Downloading episode JSON metadata')['data'][0]
+ attributes = episode['attributes']
+ title = attributes.get('title') or attributes['display_title']
+ video_id = compat_str(episode['id'])
+
+ thumbnails = []
+ for image in episode.get('included', {}).get('images', []):
+ if image.get('type') == 'episode_image':
+ img_attributes = image.get('attributes') or {}
+ for k in ('thumb', 'small', 'medium', 'large'):
+ img_url = img_attributes.get(k)
+ if img_url:
+ thumbnails.append({
+ 'id': k,
+ 'url': img_url,
+ })
return {
'id': video_id,
'display_id': display_id,
'title': title,
- 'description': description,
- 'thumbnail': thumbnail,
- 'series': series,
- 'episode': episode,
- 'comment_count': comment_count,
+ 'description': attributes.get('description') or attributes.get('caption'),
+ 'thumbnails': thumbnails,
+ 'series': attributes.get('show_title'),
+ 'season_number': int_or_none(attributes.get('season_number')),
+ 'season_id': attributes.get('season_id'),
+ 'episode': title,
+ 'episode_number': int_or_none(attributes.get('number')),
+ 'episode_id': str_or_none(episode.get('uuid')),
'formats': formats,
+ 'channel_id': attributes.get('channel_id'),
+ 'duration': int_or_none(attributes.get('length')),
}