1 from __future__
import unicode_literals
6 from .common
import InfoExtractor
8 compat_urllib_parse_unquote
,
9 compat_urllib_parse_unquote_plus
,
10 compat_urllib_parse_urlparse
,
22 class PornHubIE(InfoExtractor
):
23 _VALID_URL
= r
'https?://(?:[a-z]+\.)?pornhub\.com/(?:view_video\.php\?viewkey=|embed/)(?P<id>[0-9a-z]+)'
25 'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
26 'md5': '882f488fa1f0026f023f33576004a2ed',
31 "title": "Seductive Indian beauty strips down and fingers her pink pussy",
35 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
36 'only_matching': True,
38 'url': 'http://fr.pornhub.com/view_video.php?viewkey=ph55ca2f9760862',
39 'only_matching': True,
43 def _extract_url(cls
, webpage
):
45 r
'<iframe[^>]+?src=(["\'])(?P
<url
>(?
:https?
:)?
//(?
:www\
.)?pornhub\
.com
/embed
/\d
+)\
1', webpage)
47 return mobj.group('url
')
49 def _extract_count(self, pattern, webpage, name):
50 return str_to_int(self._search_regex(
51 pattern, webpage, '%s count
' % name, fatal=False))
53 def _real_extract(self, url):
54 video_id = self._match_id(url)
56 req = sanitized_Request(
57 'http
://www
.pornhub
.com
/view_video
.php?viewkey
=%s' % video_id)
58 req.add_header('Cookie
', 'age_verified
=1')
59 webpage = self._download_webpage(req, video_id)
61 error_msg = self._html_search_regex(
62 r'(?s
)<div
class="userMessageSection[^"]*".*?>(.*?)</div>',
63 webpage, 'error message', default=None)
65 error_msg = re.sub(r'\s+', ' ', error_msg)
67 'PornHub said: %s' % error_msg,
68 expected=True, video_id=video_id)
70 video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, 'title')
71 video_uploader = self._html_search_regex(
72 r'(?s)From: .+?<(?:a href="/users
/|a href
="/channels/|span class="username
)[^
>]+>(.+?
)<',
73 webpage, 'uploader
', fatal=False)
74 thumbnail = self._html_search_regex(r'"image_url":"([^"]+)', webpage, 'thumbnail
', fatal=False)
76 thumbnail = compat_urllib_parse_unquote(thumbnail)
78 view_count = self._extract_count(
79 r'<span
class="count">([\d
,\
.]+)</span
> views
', webpage, 'view
')
80 like_count = self._extract_count(
81 r'<span
class="votesUp">([\d
,\
.]+)</span
>', webpage, 'like
')
82 dislike_count = self._extract_count(
83 r'<span
class="votesDown">([\d
,\
.]+)</span
>', webpage, 'dislike
')
84 comment_count = self._extract_count(
85 r'All Comments\s
*<span
>\
(([\d
,.]+)\
)', webpage, 'comment
')
87 video_urls = list(map(compat_urllib_parse_unquote, re.findall(r"player_quality_[0-9]{3}p\s*=\s*'([^
']+)'", webpage)))
88 if webpage.find('"encrypted
":true') != -1:
89 password = compat_urllib_parse_unquote_plus(
90 self._search_regex(r'"video_title
":"([^
"]+)', webpage, 'password'))
91 video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
94 for video_url in video_urls:
95 path = compat_urllib_parse_urlparse(video_url).path
96 extension = os.path.splitext(path)[1][1:]
97 format = path.split('/')[5].split('_')[:2]
98 format = "-".join(format)
100 m = re.match(r'^(?P<height>[0-9]+)[pP]-(?P<tbr>[0-9]+)[kK]$', format)
105 height = int(m.group('height'))
106 tbr = int(m.group('tbr'))
116 self._sort_formats(formats)
120 'uploader': video_uploader,
121 'title': video_title,
122 'thumbnail': thumbnail,
123 'view_count': view_count,
124 'like_count': like_count,
125 'dislike_count': dislike_count,
126 'comment_count': comment_count,
132 class PornHubPlaylistIE(InfoExtractor):
133 _VALID_URL = r'https?://(?:www\.)?pornhub\.com/playlist/(?P<id>\d+)'
135 'url': 'http://www.pornhub.com/playlist/6201671',
140 'playlist_mincount': 35,
143 def _real_extract(self, url):
144 playlist_id = self._match_id(url)
146 webpage = self._download_webpage(url, playlist_id)
149 self.url_result('http://www.pornhub.com/%s' % video_url, 'PornHub')
150 for video_url in set(re.findall('href="/?
(view_video\
.php
\?viewkey
=\d
+[^
"]*)"', webpage))
153 playlist = self._parse_json(
155 r'playlistObject\s
*=\s
*({.+?
});', webpage, 'playlist
'),
158 return self.playlist_result(
159 entries, playlist_id, playlist.get('title
'), playlist.get('description
'))