]> Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/redtube.py
Prepare to upload
[youtubedl] / youtube_dl / extractor / redtube.py
1 from __future__ import unicode_literals
2
3 import re
4
5 from .common import InfoExtractor
6 from ..utils import (
7 ExtractorError,
8 int_or_none,
9 str_to_int,
10 unified_strdate,
11 url_or_none,
12 )
13
14
15 class RedTubeIE(InfoExtractor):
16 _VALID_URL = r'https?://(?:(?:www\.)?redtube\.com/|embed\.redtube\.com/\?.*?\bid=)(?P<id>[0-9]+)'
17 _TESTS = [{
18 'url': 'http://www.redtube.com/66418',
19 'md5': 'fc08071233725f26b8f014dba9590005',
20 'info_dict': {
21 'id': '66418',
22 'ext': 'mp4',
23 'title': 'Sucked on a toilet',
24 'upload_date': '20110811',
25 'duration': 596,
26 'view_count': int,
27 'age_limit': 18,
28 }
29 }, {
30 'url': 'http://embed.redtube.com/?bgcolor=000000&id=1443286',
31 'only_matching': True,
32 }]
33
34 @staticmethod
35 def _extract_urls(webpage):
36 return re.findall(
37 r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//embed\.redtube\.com/\?.*?\bid=\d+)',
38 webpage)
39
40 def _real_extract(self, url):
41 video_id = self._match_id(url)
42 webpage = self._download_webpage(
43 'http://www.redtube.com/%s' % video_id, video_id)
44
45 if any(s in webpage for s in ['video-deleted-info', '>This video has been removed']):
46 raise ExtractorError('Video %s has been removed' % video_id, expected=True)
47
48 title = self._html_search_regex(
49 (r'<h(\d)[^>]+class="(?:video_title_text|videoTitle)[^"]*">(?P<title>(?:(?!\1).)+)</h\1>',
50 r'(?:videoTitle|title)\s*:\s*(["\'])(?P<title>(?:(?!\1).)+)\1',),
51 webpage, 'title', group='title',
52 default=None) or self._og_search_title(webpage)
53
54 formats = []
55 sources = self._parse_json(
56 self._search_regex(
57 r'sources\s*:\s*({.+?})', webpage, 'source', default='{}'),
58 video_id, fatal=False)
59 if sources and isinstance(sources, dict):
60 for format_id, format_url in sources.items():
61 if format_url:
62 formats.append({
63 'url': format_url,
64 'format_id': format_id,
65 'height': int_or_none(format_id),
66 })
67 medias = self._parse_json(
68 self._search_regex(
69 r'mediaDefinition\s*:\s*(\[.+?\])', webpage,
70 'media definitions', default='{}'),
71 video_id, fatal=False)
72 if medias and isinstance(medias, list):
73 for media in medias:
74 format_url = url_or_none(media.get('videoUrl'))
75 if not format_url:
76 continue
77 format_id = media.get('quality')
78 formats.append({
79 'url': format_url,
80 'format_id': format_id,
81 'height': int_or_none(format_id),
82 })
83 if not formats:
84 video_url = self._html_search_regex(
85 r'<source src="(.+?)" type="video/mp4">', webpage, 'video URL')
86 formats.append({'url': video_url})
87 self._sort_formats(formats)
88
89 thumbnail = self._og_search_thumbnail(webpage)
90 upload_date = unified_strdate(self._search_regex(
91 r'<span[^>]+>ADDED ([^<]+)<',
92 webpage, 'upload date', fatal=False))
93 duration = int_or_none(self._og_search_property(
94 'video:duration', webpage, default=None) or self._search_regex(
95 r'videoDuration\s*:\s*(\d+)', webpage, 'duration', default=None))
96 view_count = str_to_int(self._search_regex(
97 (r'<div[^>]*>Views</div>\s*<div[^>]*>\s*([\d,.]+)',
98 r'<span[^>]*>VIEWS</span>\s*</td>\s*<td>\s*([\d,.]+)'),
99 webpage, 'view count', fatal=False))
100
101 # No self-labeling, but they describe themselves as
102 # "Home of Videos Porno"
103 age_limit = 18
104
105 return {
106 'id': video_id,
107 'ext': 'mp4',
108 'title': title,
109 'thumbnail': thumbnail,
110 'upload_date': upload_date,
111 'duration': duration,
112 'view_count': view_count,
113 'age_limit': age_limit,
114 'formats': formats,
115 }