]> Raphaël G. Git Repositories - youtubedl/blob - youtube_dl/extractor/pornhub.py
cb59d526f9be7d7f4f033bed697bae363173ec98
[youtubedl] / youtube_dl / extractor / pornhub.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import functools
5 import itertools
6 import operator
7 import re
8
9 from .common import InfoExtractor
10 from ..compat import (
11 compat_HTTPError,
12 compat_str,
13 compat_urllib_request,
14 )
15 from .openload import PhantomJSwrapper
16 from ..utils import (
17 determine_ext,
18 ExtractorError,
19 int_or_none,
20 orderedSet,
21 remove_quotes,
22 str_to_int,
23 url_or_none,
24 )
25
26
27 class PornHubBaseIE(InfoExtractor):
28 def _download_webpage_handle(self, *args, **kwargs):
29 def dl(*args, **kwargs):
30 return super(PornHubBaseIE, self)._download_webpage_handle(*args, **kwargs)
31
32 webpage, urlh = dl(*args, **kwargs)
33
34 if any(re.search(p, webpage) for p in (
35 r'<body\b[^>]+\bonload=["\']go\(\)',
36 r'document\.cookie\s*=\s*["\']RNKEY=',
37 r'document\.location\.reload\(true\)')):
38 url_or_request = args[0]
39 url = (url_or_request.get_full_url()
40 if isinstance(url_or_request, compat_urllib_request.Request)
41 else url_or_request)
42 phantom = PhantomJSwrapper(self, required_version='2.0')
43 phantom.get(url, html=webpage)
44 webpage, urlh = dl(*args, **kwargs)
45
46 return webpage, urlh
47
48
49 class PornHubIE(PornHubBaseIE):
50 IE_DESC = 'PornHub and Thumbzilla'
51 _VALID_URL = r'''(?x)
52 https?://
53 (?:
54 (?:[^/]+\.)?(?P<host>pornhub\.(?:com|net))/(?:(?:view_video\.php|video/show)\?viewkey=|embed/)|
55 (?:www\.)?thumbzilla\.com/video/
56 )
57 (?P<id>[\da-z]+)
58 '''
59 _TESTS = [{
60 'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
61 'md5': '1e19b41231a02eba417839222ac9d58e',
62 'info_dict': {
63 'id': '648719015',
64 'ext': 'mp4',
65 'title': 'Seductive Indian beauty strips down and fingers her pink pussy',
66 'uploader': 'Babes',
67 'upload_date': '20130628',
68 'duration': 361,
69 'view_count': int,
70 'like_count': int,
71 'dislike_count': int,
72 'comment_count': int,
73 'age_limit': 18,
74 'tags': list,
75 'categories': list,
76 },
77 }, {
78 # non-ASCII title
79 'url': 'http://www.pornhub.com/view_video.php?viewkey=1331683002',
80 'info_dict': {
81 'id': '1331683002',
82 'ext': 'mp4',
83 'title': '重庆婷婷女王足交',
84 'uploader': 'Unknown',
85 'upload_date': '20150213',
86 'duration': 1753,
87 'view_count': int,
88 'like_count': int,
89 'dislike_count': int,
90 'comment_count': int,
91 'age_limit': 18,
92 'tags': list,
93 'categories': list,
94 },
95 'params': {
96 'skip_download': True,
97 },
98 }, {
99 # subtitles
100 'url': 'https://www.pornhub.com/view_video.php?viewkey=ph5af5fef7c2aa7',
101 'info_dict': {
102 'id': 'ph5af5fef7c2aa7',
103 'ext': 'mp4',
104 'title': 'BFFS - Cute Teen Girls Share Cock On the Floor',
105 'uploader': 'BFFs',
106 'duration': 622,
107 'view_count': int,
108 'like_count': int,
109 'dislike_count': int,
110 'comment_count': int,
111 'age_limit': 18,
112 'tags': list,
113 'categories': list,
114 'subtitles': {
115 'en': [{
116 "ext": 'srt'
117 }]
118 },
119 },
120 'params': {
121 'skip_download': True,
122 },
123 }, {
124 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
125 'only_matching': True,
126 }, {
127 # removed at the request of cam4.com
128 'url': 'http://fr.pornhub.com/view_video.php?viewkey=ph55ca2f9760862',
129 'only_matching': True,
130 }, {
131 # removed at the request of the copyright owner
132 'url': 'http://www.pornhub.com/view_video.php?viewkey=788152859',
133 'only_matching': True,
134 }, {
135 # removed by uploader
136 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph572716d15a111',
137 'only_matching': True,
138 }, {
139 # private video
140 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph56fd731fce6b7',
141 'only_matching': True,
142 }, {
143 'url': 'https://www.thumbzilla.com/video/ph56c6114abd99a/horny-girlfriend-sex',
144 'only_matching': True,
145 }, {
146 'url': 'http://www.pornhub.com/video/show?viewkey=648719015',
147 'only_matching': True,
148 }, {
149 'url': 'https://www.pornhub.net/view_video.php?viewkey=203640933',
150 'only_matching': True,
151 }]
152
153 @staticmethod
154 def _extract_urls(webpage):
155 return re.findall(
156 r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//(?:www\.)?pornhub\.(?:com|net)/embed/[\da-z]+)',
157 webpage)
158
159 def _extract_count(self, pattern, webpage, name):
160 return str_to_int(self._search_regex(
161 pattern, webpage, '%s count' % name, fatal=False))
162
163 def _real_extract(self, url):
164 mobj = re.match(self._VALID_URL, url)
165 host = mobj.group('host') or 'pornhub.com'
166 video_id = mobj.group('id')
167
168 self._set_cookie(host, 'age_verified', '1')
169
170 def dl_webpage(platform):
171 self._set_cookie(host, 'platform', platform)
172 return self._download_webpage(
173 'https://www.%s/view_video.php?viewkey=%s' % (host, video_id),
174 video_id, 'Downloading %s webpage' % platform)
175
176 webpage = dl_webpage('pc')
177
178 error_msg = self._html_search_regex(
179 r'(?s)<div[^>]+class=(["\'])(?:(?!\1).)*\b(?:removed|userMessageSection)\b(?:(?!\1).)*\1[^>]*>(?P<error>.+?)</div>',
180 webpage, 'error message', default=None, group='error')
181 if error_msg:
182 error_msg = re.sub(r'\s+', ' ', error_msg)
183 raise ExtractorError(
184 'PornHub said: %s' % error_msg,
185 expected=True, video_id=video_id)
186
187 # video_title from flashvars contains whitespace instead of non-ASCII (see
188 # http://www.pornhub.com/view_video.php?viewkey=1331683002), not relying
189 # on that anymore.
190 title = self._html_search_meta(
191 'twitter:title', webpage, default=None) or self._search_regex(
192 (r'<h1[^>]+class=["\']title["\'][^>]*>(?P<title>[^<]+)',
193 r'<div[^>]+data-video-title=(["\'])(?P<title>.+?)\1',
194 r'shareTitle\s*=\s*(["\'])(?P<title>.+?)\1'),
195 webpage, 'title', group='title')
196
197 video_urls = []
198 video_urls_set = set()
199 subtitles = {}
200
201 flashvars = self._parse_json(
202 self._search_regex(
203 r'var\s+flashvars_\d+\s*=\s*({.+?});', webpage, 'flashvars', default='{}'),
204 video_id)
205 if flashvars:
206 subtitle_url = url_or_none(flashvars.get('closedCaptionsFile'))
207 if subtitle_url:
208 subtitles.setdefault('en', []).append({
209 'url': subtitle_url,
210 'ext': 'srt',
211 })
212 thumbnail = flashvars.get('image_url')
213 duration = int_or_none(flashvars.get('video_duration'))
214 media_definitions = flashvars.get('mediaDefinitions')
215 if isinstance(media_definitions, list):
216 for definition in media_definitions:
217 if not isinstance(definition, dict):
218 continue
219 video_url = definition.get('videoUrl')
220 if not video_url or not isinstance(video_url, compat_str):
221 continue
222 if video_url in video_urls_set:
223 continue
224 video_urls_set.add(video_url)
225 video_urls.append(
226 (video_url, int_or_none(definition.get('quality'))))
227 else:
228 thumbnail, duration = [None] * 2
229
230 if not video_urls:
231 tv_webpage = dl_webpage('tv')
232
233 assignments = self._search_regex(
234 r'(var.+?mediastring.+?)</script>', tv_webpage,
235 'encoded url').split(';')
236
237 js_vars = {}
238
239 def parse_js_value(inp):
240 inp = re.sub(r'/\*(?:(?!\*/).)*?\*/', '', inp)
241 if '+' in inp:
242 inps = inp.split('+')
243 return functools.reduce(
244 operator.concat, map(parse_js_value, inps))
245 inp = inp.strip()
246 if inp in js_vars:
247 return js_vars[inp]
248 return remove_quotes(inp)
249
250 for assn in assignments:
251 assn = assn.strip()
252 if not assn:
253 continue
254 assn = re.sub(r'var\s+', '', assn)
255 vname, value = assn.split('=', 1)
256 js_vars[vname] = parse_js_value(value)
257
258 video_url = js_vars['mediastring']
259 if video_url not in video_urls_set:
260 video_urls.append((video_url, None))
261 video_urls_set.add(video_url)
262
263 for mobj in re.finditer(
264 r'<a[^>]+\bclass=["\']downloadBtn\b[^>]+\bhref=(["\'])(?P<url>(?:(?!\1).)+)\1',
265 webpage):
266 video_url = mobj.group('url')
267 if video_url not in video_urls_set:
268 video_urls.append((video_url, None))
269 video_urls_set.add(video_url)
270
271 upload_date = None
272 formats = []
273 for video_url, height in video_urls:
274 if not upload_date:
275 upload_date = self._search_regex(
276 r'/(\d{6}/\d{2})/', video_url, 'upload data', default=None)
277 if upload_date:
278 upload_date = upload_date.replace('/', '')
279 if determine_ext(video_url) == 'mpd':
280 formats.extend(self._extract_mpd_formats(
281 video_url, video_id, mpd_id='dash', fatal=False))
282 continue
283 tbr = None
284 mobj = re.search(r'(?P<height>\d+)[pP]?_(?P<tbr>\d+)[kK]', video_url)
285 if mobj:
286 if not height:
287 height = int(mobj.group('height'))
288 tbr = int(mobj.group('tbr'))
289 formats.append({
290 'url': video_url,
291 'format_id': '%dp' % height if height else None,
292 'height': height,
293 'tbr': tbr,
294 })
295 self._sort_formats(formats)
296
297 video_uploader = self._html_search_regex(
298 r'(?s)From:&nbsp;.+?<(?:a\b[^>]+\bhref=["\']/(?:(?:user|channel)s|model|pornstar)/|span\b[^>]+\bclass=["\']username)[^>]+>(.+?)<',
299 webpage, 'uploader', fatal=False)
300
301 view_count = self._extract_count(
302 r'<span class="count">([\d,\.]+)</span> views', webpage, 'view')
303 like_count = self._extract_count(
304 r'<span class="votesUp">([\d,\.]+)</span>', webpage, 'like')
305 dislike_count = self._extract_count(
306 r'<span class="votesDown">([\d,\.]+)</span>', webpage, 'dislike')
307 comment_count = self._extract_count(
308 r'All Comments\s*<span>\(([\d,.]+)\)', webpage, 'comment')
309
310 def extract_list(meta_key):
311 div = self._search_regex(
312 r'(?s)<div[^>]+\bclass=["\'].*?\b%sWrapper[^>]*>(.+?)</div>'
313 % meta_key, webpage, meta_key, default=None)
314 if div:
315 return re.findall(r'<a[^>]+\bhref=[^>]+>([^<]+)', div)
316
317 return {
318 'id': video_id,
319 'uploader': video_uploader,
320 'upload_date': upload_date,
321 'title': title,
322 'thumbnail': thumbnail,
323 'duration': duration,
324 'view_count': view_count,
325 'like_count': like_count,
326 'dislike_count': dislike_count,
327 'comment_count': comment_count,
328 'formats': formats,
329 'age_limit': 18,
330 'tags': extract_list('tags'),
331 'categories': extract_list('categories'),
332 'subtitles': subtitles,
333 }
334
335
336 class PornHubPlaylistBaseIE(PornHubBaseIE):
337 def _extract_entries(self, webpage, host):
338 # Only process container div with main playlist content skipping
339 # drop-down menu that uses similar pattern for videos (see
340 # https://github.com/ytdl-org/youtube-dl/issues/11594).
341 container = self._search_regex(
342 r'(?s)(<div[^>]+class=["\']container.+)', webpage,
343 'container', default=webpage)
344
345 return [
346 self.url_result(
347 'http://www.%s/%s' % (host, video_url),
348 PornHubIE.ie_key(), video_title=title)
349 for video_url, title in orderedSet(re.findall(
350 r'href="/?(view_video\.php\?.*\bviewkey=[\da-z]+[^"]*)"[^>]*\s+title="([^"]+)"',
351 container))
352 ]
353
354 def _real_extract(self, url):
355 mobj = re.match(self._VALID_URL, url)
356 host = mobj.group('host')
357 playlist_id = mobj.group('id')
358
359 webpage = self._download_webpage(url, playlist_id)
360
361 entries = self._extract_entries(webpage, host)
362
363 playlist = self._parse_json(
364 self._search_regex(
365 r'(?:playlistObject|PLAYLIST_VIEW)\s*=\s*({.+?});', webpage,
366 'playlist', default='{}'),
367 playlist_id, fatal=False)
368 title = playlist.get('title') or self._search_regex(
369 r'>Videos\s+in\s+(.+?)\s+[Pp]laylist<', webpage, 'title', fatal=False)
370
371 return self.playlist_result(
372 entries, playlist_id, title, playlist.get('description'))
373
374
375 class PornHubPlaylistIE(PornHubPlaylistBaseIE):
376 _VALID_URL = r'https?://(?:[^/]+\.)?(?P<host>pornhub\.(?:com|net))/playlist/(?P<id>\d+)'
377 _TESTS = [{
378 'url': 'http://www.pornhub.com/playlist/4667351',
379 'info_dict': {
380 'id': '4667351',
381 'title': 'Nataly Hot',
382 },
383 'playlist_mincount': 2,
384 }, {
385 'url': 'https://de.pornhub.com/playlist/4667351',
386 'only_matching': True,
387 }]
388
389
390 class PornHubUserVideosIE(PornHubPlaylistBaseIE):
391 _VALID_URL = r'https?://(?:[^/]+\.)?(?P<host>pornhub\.(?:com|net))/(?:(?:user|channel)s|model|pornstar)/(?P<id>[^/]+)/videos'
392 _TESTS = [{
393 'url': 'http://www.pornhub.com/users/zoe_ph/videos/public',
394 'info_dict': {
395 'id': 'zoe_ph',
396 },
397 'playlist_mincount': 171,
398 }, {
399 'url': 'http://www.pornhub.com/users/rushandlia/videos',
400 'only_matching': True,
401 }, {
402 # default sorting as Top Rated Videos
403 'url': 'https://www.pornhub.com/channels/povd/videos',
404 'info_dict': {
405 'id': 'povd',
406 },
407 'playlist_mincount': 293,
408 }, {
409 # Top Rated Videos
410 'url': 'https://www.pornhub.com/channels/povd/videos?o=ra',
411 'only_matching': True,
412 }, {
413 # Most Recent Videos
414 'url': 'https://www.pornhub.com/channels/povd/videos?o=da',
415 'only_matching': True,
416 }, {
417 # Most Viewed Videos
418 'url': 'https://www.pornhub.com/channels/povd/videos?o=vi',
419 'only_matching': True,
420 }, {
421 'url': 'http://www.pornhub.com/users/zoe_ph/videos/public',
422 'only_matching': True,
423 }, {
424 'url': 'https://www.pornhub.com/model/jayndrea/videos/upload',
425 'only_matching': True,
426 }, {
427 'url': 'https://www.pornhub.com/pornstar/jenny-blighe/videos/upload',
428 'only_matching': True,
429 }]
430
431 def _real_extract(self, url):
432 mobj = re.match(self._VALID_URL, url)
433 host = mobj.group('host')
434 user_id = mobj.group('id')
435
436 entries = []
437 for page_num in itertools.count(1):
438 try:
439 webpage = self._download_webpage(
440 url, user_id, 'Downloading page %d' % page_num,
441 query={'page': page_num})
442 except ExtractorError as e:
443 if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
444 break
445 raise
446 page_entries = self._extract_entries(webpage, host)
447 if not page_entries:
448 break
449 entries.extend(page_entries)
450
451 return self.playlist_result(entries, user_id)