]> Raphaël G. Git Repositories - youtubedl/blob - youtube_dl/extractor/pornhub.py
Start new release.
[youtubedl] / youtube_dl / extractor / pornhub.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import functools
5 import itertools
6 import operator
7 import re
8
9 from .common import InfoExtractor
10 from ..compat import (
11 compat_HTTPError,
12 compat_str,
13 )
14 from ..utils import (
15 ExtractorError,
16 int_or_none,
17 js_to_json,
18 orderedSet,
19 remove_quotes,
20 str_to_int,
21 url_or_none,
22 )
23
24
25 class PornHubIE(InfoExtractor):
26 IE_DESC = 'PornHub and Thumbzilla'
27 _VALID_URL = r'''(?x)
28 https?://
29 (?:
30 (?:[^/]+\.)?(?P<host>pornhub\.(?:com|net))/(?:(?:view_video\.php|video/show)\?viewkey=|embed/)|
31 (?:www\.)?thumbzilla\.com/video/
32 )
33 (?P<id>[\da-z]+)
34 '''
35 _TESTS = [{
36 'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
37 'md5': '1e19b41231a02eba417839222ac9d58e',
38 'info_dict': {
39 'id': '648719015',
40 'ext': 'mp4',
41 'title': 'Seductive Indian beauty strips down and fingers her pink pussy',
42 'uploader': 'Babes',
43 'upload_date': '20130628',
44 'duration': 361,
45 'view_count': int,
46 'like_count': int,
47 'dislike_count': int,
48 'comment_count': int,
49 'age_limit': 18,
50 'tags': list,
51 'categories': list,
52 },
53 }, {
54 # non-ASCII title
55 'url': 'http://www.pornhub.com/view_video.php?viewkey=1331683002',
56 'info_dict': {
57 'id': '1331683002',
58 'ext': 'mp4',
59 'title': '重庆婷婷女王足交',
60 'uploader': 'Unknown',
61 'upload_date': '20150213',
62 'duration': 1753,
63 'view_count': int,
64 'like_count': int,
65 'dislike_count': int,
66 'comment_count': int,
67 'age_limit': 18,
68 'tags': list,
69 'categories': list,
70 },
71 'params': {
72 'skip_download': True,
73 },
74 }, {
75 # subtitles
76 'url': 'https://www.pornhub.com/view_video.php?viewkey=ph5af5fef7c2aa7',
77 'info_dict': {
78 'id': 'ph5af5fef7c2aa7',
79 'ext': 'mp4',
80 'title': 'BFFS - Cute Teen Girls Share Cock On the Floor',
81 'uploader': 'BFFs',
82 'duration': 622,
83 'view_count': int,
84 'like_count': int,
85 'dislike_count': int,
86 'comment_count': int,
87 'age_limit': 18,
88 'tags': list,
89 'categories': list,
90 'subtitles': {
91 'en': [{
92 "ext": 'srt'
93 }]
94 },
95 },
96 'params': {
97 'skip_download': True,
98 },
99 }, {
100 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
101 'only_matching': True,
102 }, {
103 # removed at the request of cam4.com
104 'url': 'http://fr.pornhub.com/view_video.php?viewkey=ph55ca2f9760862',
105 'only_matching': True,
106 }, {
107 # removed at the request of the copyright owner
108 'url': 'http://www.pornhub.com/view_video.php?viewkey=788152859',
109 'only_matching': True,
110 }, {
111 # removed by uploader
112 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph572716d15a111',
113 'only_matching': True,
114 }, {
115 # private video
116 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph56fd731fce6b7',
117 'only_matching': True,
118 }, {
119 'url': 'https://www.thumbzilla.com/video/ph56c6114abd99a/horny-girlfriend-sex',
120 'only_matching': True,
121 }, {
122 'url': 'http://www.pornhub.com/video/show?viewkey=648719015',
123 'only_matching': True,
124 }, {
125 'url': 'https://www.pornhub.net/view_video.php?viewkey=203640933',
126 'only_matching': True,
127 }]
128
129 @staticmethod
130 def _extract_urls(webpage):
131 return re.findall(
132 r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//(?:www\.)?pornhub\.(?:com|net)/embed/[\da-z]+)',
133 webpage)
134
135 def _extract_count(self, pattern, webpage, name):
136 return str_to_int(self._search_regex(
137 pattern, webpage, '%s count' % name, fatal=False))
138
139 def _real_extract(self, url):
140 mobj = re.match(self._VALID_URL, url)
141 host = mobj.group('host') or 'pornhub.com'
142 video_id = mobj.group('id')
143
144 self._set_cookie(host, 'age_verified', '1')
145
146 def dl_webpage(platform):
147 self._set_cookie(host, 'platform', platform)
148 return self._download_webpage(
149 'http://www.%s/view_video.php?viewkey=%s' % (host, video_id),
150 video_id, 'Downloading %s webpage' % platform)
151
152 webpage = dl_webpage('pc')
153
154 error_msg = self._html_search_regex(
155 r'(?s)<div[^>]+class=(["\'])(?:(?!\1).)*\b(?:removed|userMessageSection)\b(?:(?!\1).)*\1[^>]*>(?P<error>.+?)</div>',
156 webpage, 'error message', default=None, group='error')
157 if error_msg:
158 error_msg = re.sub(r'\s+', ' ', error_msg)
159 raise ExtractorError(
160 'PornHub said: %s' % error_msg,
161 expected=True, video_id=video_id)
162
163 # video_title from flashvars contains whitespace instead of non-ASCII (see
164 # http://www.pornhub.com/view_video.php?viewkey=1331683002), not relying
165 # on that anymore.
166 title = self._html_search_meta(
167 'twitter:title', webpage, default=None) or self._search_regex(
168 (r'<h1[^>]+class=["\']title["\'][^>]*>(?P<title>[^<]+)',
169 r'<div[^>]+data-video-title=(["\'])(?P<title>.+?)\1',
170 r'shareTitle\s*=\s*(["\'])(?P<title>.+?)\1'),
171 webpage, 'title', group='title')
172
173 video_urls = []
174 video_urls_set = set()
175 subtitles = {}
176
177 flashvars = self._parse_json(
178 self._search_regex(
179 r'var\s+flashvars_\d+\s*=\s*({.+?});', webpage, 'flashvars', default='{}'),
180 video_id)
181 if flashvars:
182 subtitle_url = url_or_none(flashvars.get('closedCaptionsFile'))
183 if subtitle_url:
184 subtitles.setdefault('en', []).append({
185 'url': subtitle_url,
186 'ext': 'srt',
187 })
188 thumbnail = flashvars.get('image_url')
189 duration = int_or_none(flashvars.get('video_duration'))
190 media_definitions = flashvars.get('mediaDefinitions')
191 if isinstance(media_definitions, list):
192 for definition in media_definitions:
193 if not isinstance(definition, dict):
194 continue
195 video_url = definition.get('videoUrl')
196 if not video_url or not isinstance(video_url, compat_str):
197 continue
198 if video_url in video_urls_set:
199 continue
200 video_urls_set.add(video_url)
201 video_urls.append(
202 (video_url, int_or_none(definition.get('quality'))))
203 else:
204 thumbnail, duration = [None] * 2
205
206 if not video_urls:
207 tv_webpage = dl_webpage('tv')
208
209 assignments = self._search_regex(
210 r'(var.+?mediastring.+?)</script>', tv_webpage,
211 'encoded url').split(';')
212
213 js_vars = {}
214
215 def parse_js_value(inp):
216 inp = re.sub(r'/\*(?:(?!\*/).)*?\*/', '', inp)
217 if '+' in inp:
218 inps = inp.split('+')
219 return functools.reduce(
220 operator.concat, map(parse_js_value, inps))
221 inp = inp.strip()
222 if inp in js_vars:
223 return js_vars[inp]
224 return remove_quotes(inp)
225
226 for assn in assignments:
227 assn = assn.strip()
228 if not assn:
229 continue
230 assn = re.sub(r'var\s+', '', assn)
231 vname, value = assn.split('=', 1)
232 js_vars[vname] = parse_js_value(value)
233
234 video_url = js_vars['mediastring']
235 if video_url not in video_urls_set:
236 video_urls.append((video_url, None))
237 video_urls_set.add(video_url)
238
239 for mobj in re.finditer(
240 r'<a[^>]+\bclass=["\']downloadBtn\b[^>]+\bhref=(["\'])(?P<url>(?:(?!\1).)+)\1',
241 webpage):
242 video_url = mobj.group('url')
243 if video_url not in video_urls_set:
244 video_urls.append((video_url, None))
245 video_urls_set.add(video_url)
246
247 upload_date = None
248 formats = []
249 for video_url, height in video_urls:
250 if not upload_date:
251 upload_date = self._search_regex(
252 r'/(\d{6}/\d{2})/', video_url, 'upload data', default=None)
253 if upload_date:
254 upload_date = upload_date.replace('/', '')
255 tbr = None
256 mobj = re.search(r'(?P<height>\d+)[pP]?_(?P<tbr>\d+)[kK]', video_url)
257 if mobj:
258 if not height:
259 height = int(mobj.group('height'))
260 tbr = int(mobj.group('tbr'))
261 formats.append({
262 'url': video_url,
263 'format_id': '%dp' % height if height else None,
264 'height': height,
265 'tbr': tbr,
266 })
267 self._sort_formats(formats)
268
269 video_uploader = self._html_search_regex(
270 r'(?s)From:&nbsp;.+?<(?:a\b[^>]+\bhref=["\']/(?:(?:user|channel)s|model|pornstar)/|span\b[^>]+\bclass=["\']username)[^>]+>(.+?)<',
271 webpage, 'uploader', fatal=False)
272
273 view_count = self._extract_count(
274 r'<span class="count">([\d,\.]+)</span> views', webpage, 'view')
275 like_count = self._extract_count(
276 r'<span class="votesUp">([\d,\.]+)</span>', webpage, 'like')
277 dislike_count = self._extract_count(
278 r'<span class="votesDown">([\d,\.]+)</span>', webpage, 'dislike')
279 comment_count = self._extract_count(
280 r'All Comments\s*<span>\(([\d,.]+)\)', webpage, 'comment')
281
282 page_params = self._parse_json(self._search_regex(
283 r'page_params\.zoneDetails\[([\'"])[^\'"]+\1\]\s*=\s*(?P<data>{[^}]+})',
284 webpage, 'page parameters', group='data', default='{}'),
285 video_id, transform_source=js_to_json, fatal=False)
286 tags = categories = None
287 if page_params:
288 tags = page_params.get('tags', '').split(',')
289 categories = page_params.get('categories', '').split(',')
290
291 return {
292 'id': video_id,
293 'uploader': video_uploader,
294 'upload_date': upload_date,
295 'title': title,
296 'thumbnail': thumbnail,
297 'duration': duration,
298 'view_count': view_count,
299 'like_count': like_count,
300 'dislike_count': dislike_count,
301 'comment_count': comment_count,
302 'formats': formats,
303 'age_limit': 18,
304 'tags': tags,
305 'categories': categories,
306 'subtitles': subtitles,
307 }
308
309
310 class PornHubPlaylistBaseIE(InfoExtractor):
311 def _extract_entries(self, webpage, host):
312 # Only process container div with main playlist content skipping
313 # drop-down menu that uses similar pattern for videos (see
314 # https://github.com/rg3/youtube-dl/issues/11594).
315 container = self._search_regex(
316 r'(?s)(<div[^>]+class=["\']container.+)', webpage,
317 'container', default=webpage)
318
319 return [
320 self.url_result(
321 'http://www.%s/%s' % (host, video_url),
322 PornHubIE.ie_key(), video_title=title)
323 for video_url, title in orderedSet(re.findall(
324 r'href="/?(view_video\.php\?.*\bviewkey=[\da-z]+[^"]*)"[^>]*\s+title="([^"]+)"',
325 container))
326 ]
327
328 def _real_extract(self, url):
329 mobj = re.match(self._VALID_URL, url)
330 host = mobj.group('host')
331 playlist_id = mobj.group('id')
332
333 webpage = self._download_webpage(url, playlist_id)
334
335 entries = self._extract_entries(webpage, host)
336
337 playlist = self._parse_json(
338 self._search_regex(
339 r'(?:playlistObject|PLAYLIST_VIEW)\s*=\s*({.+?});', webpage,
340 'playlist', default='{}'),
341 playlist_id, fatal=False)
342 title = playlist.get('title') or self._search_regex(
343 r'>Videos\s+in\s+(.+?)\s+[Pp]laylist<', webpage, 'title', fatal=False)
344
345 return self.playlist_result(
346 entries, playlist_id, title, playlist.get('description'))
347
348
349 class PornHubPlaylistIE(PornHubPlaylistBaseIE):
350 _VALID_URL = r'https?://(?:[^/]+\.)?(?P<host>pornhub\.(?:com|net))/playlist/(?P<id>\d+)'
351 _TESTS = [{
352 'url': 'http://www.pornhub.com/playlist/4667351',
353 'info_dict': {
354 'id': '4667351',
355 'title': 'Nataly Hot',
356 },
357 'playlist_mincount': 2,
358 }, {
359 'url': 'https://de.pornhub.com/playlist/4667351',
360 'only_matching': True,
361 }]
362
363
364 class PornHubUserVideosIE(PornHubPlaylistBaseIE):
365 _VALID_URL = r'https?://(?:[^/]+\.)?(?P<host>pornhub\.(?:com|net))/(?:(?:user|channel)s|model|pornstar)/(?P<id>[^/]+)/videos'
366 _TESTS = [{
367 'url': 'http://www.pornhub.com/users/zoe_ph/videos/public',
368 'info_dict': {
369 'id': 'zoe_ph',
370 },
371 'playlist_mincount': 171,
372 }, {
373 'url': 'http://www.pornhub.com/users/rushandlia/videos',
374 'only_matching': True,
375 }, {
376 # default sorting as Top Rated Videos
377 'url': 'https://www.pornhub.com/channels/povd/videos',
378 'info_dict': {
379 'id': 'povd',
380 },
381 'playlist_mincount': 293,
382 }, {
383 # Top Rated Videos
384 'url': 'https://www.pornhub.com/channels/povd/videos?o=ra',
385 'only_matching': True,
386 }, {
387 # Most Recent Videos
388 'url': 'https://www.pornhub.com/channels/povd/videos?o=da',
389 'only_matching': True,
390 }, {
391 # Most Viewed Videos
392 'url': 'https://www.pornhub.com/channels/povd/videos?o=vi',
393 'only_matching': True,
394 }, {
395 'url': 'http://www.pornhub.com/users/zoe_ph/videos/public',
396 'only_matching': True,
397 }, {
398 'url': 'https://www.pornhub.com/model/jayndrea/videos/upload',
399 'only_matching': True,
400 }, {
401 'url': 'https://www.pornhub.com/pornstar/jenny-blighe/videos/upload',
402 'only_matching': True,
403 }]
404
405 def _real_extract(self, url):
406 mobj = re.match(self._VALID_URL, url)
407 host = mobj.group('host')
408 user_id = mobj.group('id')
409
410 entries = []
411 for page_num in itertools.count(1):
412 try:
413 webpage = self._download_webpage(
414 url, user_id, 'Downloading page %d' % page_num,
415 query={'page': page_num})
416 except ExtractorError as e:
417 if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
418 break
419 raise
420 page_entries = self._extract_entries(webpage, host)
421 if not page_entries:
422 break
423 entries.extend(page_entries)
424
425 return self.playlist_result(entries, user_id)