]> Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/xhamster.py
Prepare for upload.
[youtubedl] / youtube_dl / extractor / xhamster.py
1 from __future__ import unicode_literals
2
3 import re
4
5 from .common import InfoExtractor
6 from ..utils import (
7 ExtractorError,
8 unified_strdate,
9 str_to_int,
10 int_or_none,
11 parse_duration,
12 )
13
14
15 class XHamsterIE(InfoExtractor):
16 """Information Extractor for xHamster"""
17 _VALID_URL = r'http://(?:.+?\.)?xhamster\.com/movies/(?P<id>[0-9]+)/(?P<seo>.+?)\.html(?:\?.*)?'
18 _TESTS = [
19 {
20 'url': 'http://xhamster.com/movies/1509445/femaleagent_shy_beauty_takes_the_bait.html',
21 'info_dict': {
22 'id': '1509445',
23 'ext': 'mp4',
24 'title': 'FemaleAgent Shy beauty takes the bait',
25 'upload_date': '20121014',
26 'uploader_id': 'Ruseful2011',
27 'duration': 893,
28 'age_limit': 18,
29 }
30 },
31 {
32 'url': 'http://xhamster.com/movies/2221348/britney_spears_sexy_booty.html?hd',
33 'info_dict': {
34 'id': '2221348',
35 'ext': 'mp4',
36 'title': 'Britney Spears Sexy Booty',
37 'upload_date': '20130914',
38 'uploader_id': 'jojo747400',
39 'duration': 200,
40 'age_limit': 18,
41 }
42 }
43 ]
44
45 def _real_extract(self, url):
46 def extract_video_url(webpage):
47 mp4 = re.search(r'<video\s+.*?file="([^"]+)".*?>', webpage)
48 if mp4 is None:
49 raise ExtractorError('Unable to extract media URL')
50 else:
51 return mp4.group(1)
52
53 def is_hd(webpage):
54 return '<div class=\'icon iconHD\'' in webpage
55
56 mobj = re.match(self._VALID_URL, url)
57
58 video_id = mobj.group('id')
59 seo = mobj.group('seo')
60 mrss_url = 'http://xhamster.com/movies/%s/%s.html' % (video_id, seo)
61 webpage = self._download_webpage(mrss_url, video_id)
62
63 title = self._html_search_regex(r'<title>(?P<title>.+?) - xHamster\.com</title>', webpage, 'title')
64
65 # Only a few videos have an description
66 mobj = re.search(r'<span>Description: </span>([^<]+)', webpage)
67 description = mobj.group(1) if mobj else None
68
69 upload_date = self._html_search_regex(r'hint=\'(\d{4}-\d{2}-\d{2}) \d{2}:\d{2}:\d{2} [A-Z]{3,4}\'',
70 webpage, 'upload date', fatal=False)
71 if upload_date:
72 upload_date = unified_strdate(upload_date)
73
74 uploader_id = self._html_search_regex(r'<a href=\'/user/[^>]+>(?P<uploader_id>[^<]+)',
75 webpage, 'uploader id', default='anonymous')
76
77 thumbnail = self._html_search_regex(r'<video\s+.*?poster="([^"]+)".*?>', webpage, 'thumbnail', fatal=False)
78
79 duration = parse_duration(self._html_search_regex(r'<span>Runtime:</span> (\d+:\d+)</div>',
80 webpage, 'duration', fatal=False))
81
82 view_count = self._html_search_regex(r'<span>Views:</span> ([^<]+)</div>', webpage, 'view count', fatal=False)
83 if view_count:
84 view_count = str_to_int(view_count)
85
86 mobj = re.search(r"hint='(?P<likecount>\d+) Likes / (?P<dislikecount>\d+) Dislikes'", webpage)
87 (like_count, dislike_count) = (mobj.group('likecount'), mobj.group('dislikecount')) if mobj else (None, None)
88
89 mobj = re.search(r'</label>Comments \((?P<commentcount>\d+)\)</div>', webpage)
90 comment_count = mobj.group('commentcount') if mobj else 0
91
92 age_limit = self._rta_search(webpage)
93
94 hd = is_hd(webpage)
95
96 video_url = extract_video_url(webpage)
97 formats = [{
98 'url': video_url,
99 'format_id': 'hd' if hd else 'sd',
100 'preference': 1,
101 }]
102
103 if not hd:
104 mrss_url = self._search_regex(r'<link rel="canonical" href="([^"]+)', webpage, 'mrss_url')
105 webpage = self._download_webpage(mrss_url + '?hd', video_id, note='Downloading HD webpage')
106 if is_hd(webpage):
107 video_url = extract_video_url(webpage)
108 formats.append({
109 'url': video_url,
110 'format_id': 'hd',
111 'preference': 2,
112 })
113
114 self._sort_formats(formats)
115
116 return {
117 'id': video_id,
118 'title': title,
119 'description': description,
120 'upload_date': upload_date,
121 'uploader_id': uploader_id,
122 'thumbnail': thumbnail,
123 'duration': duration,
124 'view_count': view_count,
125 'like_count': int_or_none(like_count),
126 'dislike_count': int_or_none(dislike_count),
127 'comment_count': int_or_none(comment_count),
128 'age_limit': age_limit,
129 'formats': formats,
130 }