X-Git-Url: https://git.rapsys.eu/youtubedl/blobdiff_plain/9dc487f48b50767cf540fa36c3de2c386fd74c04..647c9744516f7b5be3611b67e02201fb0146a638:/youtube_dl/extractor/redtube.py?ds=sidebyside diff --git a/youtube_dl/extractor/redtube.py b/youtube_dl/extractor/redtube.py index 721fc3a..10311a8 100644 --- a/youtube_dl/extractor/redtube.py +++ b/youtube_dl/extractor/redtube.py @@ -1,41 +1,55 @@ from __future__ import unicode_literals +import re + from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, str_to_int, unified_strdate, + url_or_none, ) class RedTubeIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?redtube\.com/(?P[0-9]+)' - _TEST = { + _VALID_URL = r'https?://(?:(?:www\.)?redtube\.com/|embed\.redtube\.com/\?.*?\bid=)(?P[0-9]+)' + _TESTS = [{ 'url': 'http://www.redtube.com/66418', - 'md5': '7b8c22b5e7098a3e1c09709df1126d2d', + 'md5': 'fc08071233725f26b8f014dba9590005', 'info_dict': { 'id': '66418', 'ext': 'mp4', 'title': 'Sucked on a toilet', - 'upload_date': '20120831', + 'upload_date': '20110811', 'duration': 596, 'view_count': int, 'age_limit': 18, } - } + }, { + 'url': 'http://embed.redtube.com/?bgcolor=000000&id=1443286', + 'only_matching': True, + }] + + @staticmethod + def _extract_urls(webpage): + return re.findall( + r']+?src=["\'](?P(?:https?:)?//embed\.redtube\.com/\?.*?\bid=\d+)', + webpage) def _real_extract(self, url): video_id = self._match_id(url) - webpage = self._download_webpage(url, video_id) + webpage = self._download_webpage( + 'http://www.redtube.com/%s' % video_id, video_id) if any(s in webpage for s in ['video-deleted-info', '>This video has been removed']): raise ExtractorError('Video %s has been removed' % video_id, expected=True) title = self._html_search_regex( - (r'

(?P.+?)</h1>', - r'videoTitle\s*:\s*(["\'])(?P<title>)\1'), - webpage, 'title', group='title') + (r'<h(\d)[^>]+class="(?:video_title_text|videoTitle)[^"]*">(?P<title>(?:(?!\1).)+)</h\1>', + r'(?:videoTitle|title)\s*:\s*(["\'])(?P<title>(?:(?!\1).)+)\1',), + webpage, 'title', group='title', + default=None) or self._og_search_title(webpage) formats = [] sources = self._parse_json( @@ -50,7 +64,23 @@ class RedTubeIE(InfoExtractor): 'format_id': format_id, 'height': int_or_none(format_id), }) - else: + medias = self._parse_json( + self._search_regex( + r'mediaDefinition\s*:\s*(\[.+?\])', webpage, + 'media definitions', default='{}'), + video_id, fatal=False) + if medias and isinstance(medias, list): + for media in medias: + format_url = url_or_none(media.get('videoUrl')) + if not format_url: + continue + format_id = media.get('quality') + formats.append({ + 'url': format_url, + 'format_id': format_id, + 'height': int_or_none(format_id), + }) + if not formats: video_url = self._html_search_regex( r'<source src="(.+?)" type="video/mp4">', webpage, 'video URL') formats.append({'url': video_url}) @@ -58,12 +88,14 @@ class RedTubeIE(InfoExtractor): thumbnail = self._og_search_thumbnail(webpage) upload_date = unified_strdate(self._search_regex( - r'<span[^>]+class="added-time"[^>]*>ADDED ([^<]+)<', + r'<span[^>]+>ADDED ([^<]+)<', webpage, 'upload date', fatal=False)) - duration = int_or_none(self._search_regex( - r'videoDuration\s*:\s*(\d+)', webpage, 'duration', fatal=False)) + duration = int_or_none(self._og_search_property( + 'video:duration', webpage, default=None) or self._search_regex( + r'videoDuration\s*:\s*(\d+)', webpage, 'duration', default=None)) view_count = str_to_int(self._search_regex( - r'<span[^>]*>VIEWS</span></td>\s*<td>([\d,.]+)', + (r'<div[^>]*>Views</div>\s*<div[^>]*>\s*([\d,.]+)', + r'<span[^>]*>VIEWS</span>\s*</td>\s*<td>\s*([\d,.]+)'), webpage, 'view count', fatal=False)) # No self-labeling, but they describe themselves as