]> Raphaël G. Git Repositories - youtubedl/blob - youtube_dl/extractor/pornhub.py
debian/control: Add B-D on pandoc and zip. Closes: #828905.
[youtubedl] / youtube_dl / extractor / pornhub.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import itertools
5 import os
6 import re
7
8 from .common import InfoExtractor
9 from ..compat import (
10 compat_HTTPError,
11 compat_urllib_parse_unquote,
12 compat_urllib_parse_unquote_plus,
13 compat_urllib_parse_urlparse,
14 )
15 from ..utils import (
16 ExtractorError,
17 int_or_none,
18 orderedSet,
19 sanitized_Request,
20 str_to_int,
21 )
22 from ..aes import (
23 aes_decrypt_text
24 )
25
26
27 class PornHubIE(InfoExtractor):
28 _VALID_URL = r'https?://(?:[a-z]+\.)?pornhub\.com/(?:view_video\.php\?viewkey=|embed/)(?P<id>[0-9a-z]+)'
29 _TESTS = [{
30 'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
31 'md5': '1e19b41231a02eba417839222ac9d58e',
32 'info_dict': {
33 'id': '648719015',
34 'ext': 'mp4',
35 'title': 'Seductive Indian beauty strips down and fingers her pink pussy',
36 'uploader': 'Babes',
37 'duration': 361,
38 'view_count': int,
39 'like_count': int,
40 'dislike_count': int,
41 'comment_count': int,
42 'age_limit': 18,
43 },
44 }, {
45 # non-ASCII title
46 'url': 'http://www.pornhub.com/view_video.php?viewkey=1331683002',
47 'info_dict': {
48 'id': '1331683002',
49 'ext': 'mp4',
50 'title': '重庆婷婷女王足交',
51 'uploader': 'cj397186295',
52 'duration': 1753,
53 'view_count': int,
54 'like_count': int,
55 'dislike_count': int,
56 'comment_count': int,
57 'age_limit': 18,
58 },
59 'params': {
60 'skip_download': True,
61 },
62 }, {
63 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
64 'only_matching': True,
65 }, {
66 'url': 'http://fr.pornhub.com/view_video.php?viewkey=ph55ca2f9760862',
67 'only_matching': True,
68 }]
69
70 @classmethod
71 def _extract_url(cls, webpage):
72 mobj = re.search(
73 r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?pornhub\.com/embed/\d+)\1', webpage)
74 if mobj:
75 return mobj.group('url')
76
77 def _extract_count(self, pattern, webpage, name):
78 return str_to_int(self._search_regex(
79 pattern, webpage, '%s count' % name, fatal=False))
80
81 def _real_extract(self, url):
82 video_id = self._match_id(url)
83
84 req = sanitized_Request(
85 'http://www.pornhub.com/view_video.php?viewkey=%s' % video_id)
86 req.add_header('Cookie', 'age_verified=1')
87 webpage = self._download_webpage(req, video_id)
88
89 error_msg = self._html_search_regex(
90 r'(?s)<div class="userMessageSection[^"]*".*?>(.*?)</div>',
91 webpage, 'error message', default=None)
92 if error_msg:
93 error_msg = re.sub(r'\s+', ' ', error_msg)
94 raise ExtractorError(
95 'PornHub said: %s' % error_msg,
96 expected=True, video_id=video_id)
97
98 # video_title from flashvars contains whitespace instead of non-ASCII (see
99 # http://www.pornhub.com/view_video.php?viewkey=1331683002), not relying
100 # on that anymore.
101 title = self._html_search_meta(
102 'twitter:title', webpage, default=None) or self._search_regex(
103 (r'<h1[^>]+class=["\']title["\'][^>]*>(?P<title>[^<]+)',
104 r'<div[^>]+data-video-title=(["\'])(?P<title>.+?)\1',
105 r'shareTitle\s*=\s*(["\'])(?P<title>.+?)\1'),
106 webpage, 'title', group='title')
107
108 flashvars = self._parse_json(
109 self._search_regex(
110 r'var\s+flashvars_\d+\s*=\s*({.+?});', webpage, 'flashvars', default='{}'),
111 video_id)
112 if flashvars:
113 thumbnail = flashvars.get('image_url')
114 duration = int_or_none(flashvars.get('video_duration'))
115 else:
116 title, thumbnail, duration = [None] * 3
117
118 video_uploader = self._html_search_regex(
119 r'(?s)From:&nbsp;.+?<(?:a href="/users/|a href="/channels/|span class="username)[^>]+>(.+?)<',
120 webpage, 'uploader', fatal=False)
121
122 view_count = self._extract_count(
123 r'<span class="count">([\d,\.]+)</span> views', webpage, 'view')
124 like_count = self._extract_count(
125 r'<span class="votesUp">([\d,\.]+)</span>', webpage, 'like')
126 dislike_count = self._extract_count(
127 r'<span class="votesDown">([\d,\.]+)</span>', webpage, 'dislike')
128 comment_count = self._extract_count(
129 r'All Comments\s*<span>\(([\d,.]+)\)', webpage, 'comment')
130
131 video_urls = list(map(compat_urllib_parse_unquote, re.findall(r"player_quality_[0-9]{3}p\s*=\s*'([^']+)'", webpage)))
132 if webpage.find('"encrypted":true') != -1:
133 password = compat_urllib_parse_unquote_plus(
134 self._search_regex(r'"video_title":"([^"]+)', webpage, 'password'))
135 video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
136
137 formats = []
138 for video_url in video_urls:
139 path = compat_urllib_parse_urlparse(video_url).path
140 extension = os.path.splitext(path)[1][1:]
141 format = path.split('/')[5].split('_')[:2]
142 format = '-'.join(format)
143
144 m = re.match(r'^(?P<height>[0-9]+)[pP]-(?P<tbr>[0-9]+)[kK]$', format)
145 if m is None:
146 height = None
147 tbr = None
148 else:
149 height = int(m.group('height'))
150 tbr = int(m.group('tbr'))
151
152 formats.append({
153 'url': video_url,
154 'ext': extension,
155 'format': format,
156 'format_id': format,
157 'tbr': tbr,
158 'height': height,
159 })
160 self._sort_formats(formats)
161
162 return {
163 'id': video_id,
164 'uploader': video_uploader,
165 'title': title,
166 'thumbnail': thumbnail,
167 'duration': duration,
168 'view_count': view_count,
169 'like_count': like_count,
170 'dislike_count': dislike_count,
171 'comment_count': comment_count,
172 'formats': formats,
173 'age_limit': 18,
174 }
175
176
177 class PornHubPlaylistBaseIE(InfoExtractor):
178 def _extract_entries(self, webpage):
179 return [
180 self.url_result(
181 'http://www.pornhub.com/%s' % video_url,
182 PornHubIE.ie_key(), video_title=title)
183 for video_url, title in orderedSet(re.findall(
184 r'href="/?(view_video\.php\?.*\bviewkey=[\da-z]+[^"]*)"[^>]*\s+title="([^"]+)"',
185 webpage))
186 ]
187
188 def _real_extract(self, url):
189 playlist_id = self._match_id(url)
190
191 webpage = self._download_webpage(url, playlist_id)
192
193 entries = self._extract_entries(webpage)
194
195 playlist = self._parse_json(
196 self._search_regex(
197 r'playlistObject\s*=\s*({.+?});', webpage, 'playlist'),
198 playlist_id)
199
200 return self.playlist_result(
201 entries, playlist_id, playlist.get('title'), playlist.get('description'))
202
203
204 class PornHubPlaylistIE(PornHubPlaylistBaseIE):
205 _VALID_URL = r'https?://(?:www\.)?pornhub\.com/playlist/(?P<id>\d+)'
206 _TESTS = [{
207 'url': 'http://www.pornhub.com/playlist/6201671',
208 'info_dict': {
209 'id': '6201671',
210 'title': 'P0p4',
211 },
212 'playlist_mincount': 35,
213 }]
214
215
216 class PornHubUserVideosIE(PornHubPlaylistBaseIE):
217 _VALID_URL = r'https?://(?:www\.)?pornhub\.com/users/(?P<id>[^/]+)/videos'
218 _TESTS = [{
219 'url': 'http://www.pornhub.com/users/zoe_ph/videos/public',
220 'info_dict': {
221 'id': 'zoe_ph',
222 },
223 'playlist_mincount': 171,
224 }, {
225 'url': 'http://www.pornhub.com/users/rushandlia/videos',
226 'only_matching': True,
227 }]
228
229 def _real_extract(self, url):
230 user_id = self._match_id(url)
231
232 entries = []
233 for page_num in itertools.count(1):
234 try:
235 webpage = self._download_webpage(
236 url, user_id, 'Downloading page %d' % page_num,
237 query={'page': page_num})
238 except ExtractorError as e:
239 if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
240 break
241 page_entries = self._extract_entries(webpage)
242 if not page_entries:
243 break
244 entries.extend(page_entries)
245
246 return self.playlist_result(entries, user_id)