import re
from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ int_or_none,
+ merge_dicts,
+ str_to_int,
+ unified_strdate,
+ url_or_none,
+)
class RedTubeIE(InfoExtractor):
- _VALID_URL = r'http://(?:www\.)?redtube\.com/(?P<id>[0-9]+)'
- _TEST = {
+ _VALID_URL = r'https?://(?:(?:www\.)?redtube\.com/|embed\.redtube\.com/\?.*?\bid=)(?P<id>[0-9]+)'
+ _TESTS = [{
'url': 'http://www.redtube.com/66418',
- 'file': '66418.mp4',
- # md5 varies from time to time, as in
- # https://travis-ci.org/rg3/youtube-dl/jobs/14052463#L295
- #'md5': u'7b8c22b5e7098a3e1c09709df1126d2d',
+ 'md5': 'fc08071233725f26b8f014dba9590005',
'info_dict': {
- "title": "Sucked on a toilet",
- "age_limit": 18,
+ 'id': '66418',
+ 'ext': 'mp4',
+ 'title': 'Sucked on a toilet',
+ 'upload_date': '20110811',
+ 'duration': 596,
+ 'view_count': int,
+ 'age_limit': 18,
}
- }
+ }, {
+ 'url': 'http://embed.redtube.com/?bgcolor=000000&id=1443286',
+ 'only_matching': True,
+ }]
+
+ @staticmethod
+ def _extract_urls(webpage):
+ return re.findall(
+ r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//embed\.redtube\.com/\?.*?\bid=\d+)',
+ webpage)
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(
+ 'http://www.redtube.com/%s' % video_id, video_id)
+
+ ERRORS = (
+ (('video-deleted-info', '>This video has been removed'), 'has been removed'),
+ (('private_video_text', '>This video is private', '>Send a friend request to its owner to be able to view it'), 'is private'),
+ )
- video_id = mobj.group('id')
- video_extension = 'mp4'
- webpage = self._download_webpage(url, video_id)
+ for patterns, message in ERRORS:
+ if any(p in webpage for p in patterns):
+ raise ExtractorError(
+ 'Video %s %s' % (video_id, message), expected=True)
- self.report_extraction(video_id)
+ info = self._search_json_ld(webpage, video_id, default={})
- video_url = self._html_search_regex(
- r'<source src="(.+?)" type="video/mp4">', webpage, u'video URL')
+ if not info.get('title'):
+ info['title'] = self._html_search_regex(
+ (r'<h(\d)[^>]+class="(?:video_title_text|videoTitle)[^"]*">(?P<title>(?:(?!\1).)+)</h\1>',
+ r'(?:videoTitle|title)\s*:\s*(["\'])(?P<title>(?:(?!\1).)+)\1',),
+ webpage, 'title', group='title',
+ default=None) or self._og_search_title(webpage)
- video_title = self._html_search_regex(
- r'<h1 class="videoTitle[^"]*">(.+?)</h1>',
- webpage, u'title')
+ formats = []
+ sources = self._parse_json(
+ self._search_regex(
+ r'sources\s*:\s*({.+?})', webpage, 'source', default='{}'),
+ video_id, fatal=False)
+ if sources and isinstance(sources, dict):
+ for format_id, format_url in sources.items():
+ if format_url:
+ formats.append({
+ 'url': format_url,
+ 'format_id': format_id,
+ 'height': int_or_none(format_id),
+ })
+ medias = self._parse_json(
+ self._search_regex(
+ r'mediaDefinition\s*:\s*(\[.+?\])', webpage,
+ 'media definitions', default='{}'),
+ video_id, fatal=False)
+ if medias and isinstance(medias, list):
+ for media in medias:
+ format_url = url_or_none(media.get('videoUrl'))
+ if not format_url:
+ continue
+ format_id = media.get('quality')
+ formats.append({
+ 'url': format_url,
+ 'format_id': format_id,
+ 'height': int_or_none(format_id),
+ })
+ if not formats:
+ video_url = self._html_search_regex(
+ r'<source src="(.+?)" type="video/mp4">', webpage, 'video URL')
+ formats.append({'url': video_url})
+ self._sort_formats(formats)
- video_thumbnail = self._html_search_regex(
- r'playerInnerHTML.+?<img\s+src="(.+?)"',
- webpage, u'thumbnail', fatal=False)
+ thumbnail = self._og_search_thumbnail(webpage)
+ upload_date = unified_strdate(self._search_regex(
+ r'<span[^>]+>(?:ADDED|Published on) ([^<]+)<',
+ webpage, 'upload date', default=None))
+ duration = int_or_none(self._og_search_property(
+ 'video:duration', webpage, default=None) or self._search_regex(
+ r'videoDuration\s*:\s*(\d+)', webpage, 'duration', default=None))
+ view_count = str_to_int(self._search_regex(
+ (r'<div[^>]*>Views</div>\s*<div[^>]*>\s*([\d,.]+)',
+ r'<span[^>]*>VIEWS</span>\s*</td>\s*<td>\s*([\d,.]+)',
+ r'<span[^>]+\bclass=["\']video_view_count[^>]*>\s*([\d,.]+)'),
+ webpage, 'view count', default=None))
# No self-labeling, but they describe themselves as
# "Home of Videos Porno"
age_limit = 18
- return {
+ return merge_dicts(info, {
'id': video_id,
- 'url': video_url,
- 'ext': video_extension,
- 'title': video_title,
- 'thumbnail': video_thumbnail,
+ 'ext': 'mp4',
+ 'thumbnail': thumbnail,
+ 'upload_date': upload_date,
+ 'duration': duration,
+ 'view_count': view_count,
'age_limit': age_limit,
- }
+ 'formats': formats,
+ })