_VALID_URL = r'''(?x)
https?://
(?:
- (?:[^/]+\.)?pornhub\.com/(?:(?:view_video\.php|video/show)\?viewkey=|embed/)|
+ (?:[^/]+\.)?(?P<host>pornhub\.(?:com|net))/(?:(?:view_video\.php|video/show)\?viewkey=|embed/)|
(?:www\.)?thumbzilla\.com/video/
)
(?P<id>[\da-z]+)
'ext': 'mp4',
'title': 'Seductive Indian beauty strips down and fingers her pink pussy',
'uploader': 'Babes',
+ 'upload_date': '20130628',
'duration': 361,
'view_count': int,
'like_count': int,
'ext': 'mp4',
'title': '重庆婷婷女王足交',
'uploader': 'Unknown',
+ 'upload_date': '20150213',
'duration': 1753,
'view_count': int,
'like_count': int,
}, {
'url': 'http://www.pornhub.com/video/show?viewkey=648719015',
'only_matching': True,
+ }, {
+ 'url': 'https://www.pornhub.net/view_video.php?viewkey=203640933',
+ 'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
return re.findall(
- r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//(?:www\.)?pornhub\.com/embed/[\da-z]+)',
+ r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//(?:www\.)?pornhub\.(?:com|net)/embed/[\da-z]+)',
webpage)
def _extract_count(self, pattern, webpage, name):
pattern, webpage, '%s count' % name, fatal=False))
def _real_extract(self, url):
- video_id = self._match_id(url)
+ mobj = re.match(self._VALID_URL, url)
+ host = mobj.group('host') or 'pornhub.com'
+ video_id = mobj.group('id')
- self._set_cookie('pornhub.com', 'age_verified', '1')
+ self._set_cookie(host, 'age_verified', '1')
def dl_webpage(platform):
- self._set_cookie('pornhub.com', 'platform', platform)
+ self._set_cookie(host, 'platform', platform)
return self._download_webpage(
- 'http://www.pornhub.com/view_video.php?viewkey=%s' % video_id,
+ 'http://www.%s/view_video.php?viewkey=%s' % (host, video_id),
video_id, 'Downloading %s webpage' % platform)
webpage = dl_webpage('pc')
video_urls.append((video_url, None))
video_urls_set.add(video_url)
+ upload_date = None
formats = []
for video_url, height in video_urls:
+ if not upload_date:
+ upload_date = self._search_regex(
+ r'/(\d{6}/\d{2})/', video_url, 'upload data', default=None)
+ if upload_date:
+ upload_date = upload_date.replace('/', '')
tbr = None
mobj = re.search(r'(?P<height>\d+)[pP]?_(?P<tbr>\d+)[kK]', video_url)
if mobj:
return {
'id': video_id,
'uploader': video_uploader,
+ 'upload_date': upload_date,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
class PornHubPlaylistBaseIE(InfoExtractor):
- def _extract_entries(self, webpage):
+ def _extract_entries(self, webpage, host):
# Only process container div with main playlist content skipping
# drop-down menu that uses similar pattern for videos (see
# https://github.com/rg3/youtube-dl/issues/11594).
return [
self.url_result(
- 'http://www.pornhub.com/%s' % video_url,
+ 'http://www.%s/%s' % (host, video_url),
PornHubIE.ie_key(), video_title=title)
for video_url, title in orderedSet(re.findall(
r'href="/?(view_video\.php\?.*\bviewkey=[\da-z]+[^"]*)"[^>]*\s+title="([^"]+)"',
]
def _real_extract(self, url):
- playlist_id = self._match_id(url)
+ mobj = re.match(self._VALID_URL, url)
+ host = mobj.group('host')
+ playlist_id = mobj.group('id')
webpage = self._download_webpage(url, playlist_id)
- entries = self._extract_entries(webpage)
+ entries = self._extract_entries(webpage, host)
playlist = self._parse_json(
self._search_regex(
class PornHubPlaylistIE(PornHubPlaylistBaseIE):
- _VALID_URL = r'https?://(?:[^/]+\.)?pornhub\.com/playlist/(?P<id>\d+)'
+ _VALID_URL = r'https?://(?:[^/]+\.)?(?P<host>pornhub\.(?:com|net))/playlist/(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.pornhub.com/playlist/4667351',
'info_dict': {
class PornHubUserVideosIE(PornHubPlaylistBaseIE):
- _VALID_URL = r'https?://(?:[^/]+\.)?pornhub\.com/(?:(?:user|channel)s|model|pornstar)/(?P<id>[^/]+)/videos'
+ _VALID_URL = r'https?://(?:[^/]+\.)?(?P<host>pornhub\.(?:com|net))/(?:(?:user|channel)s|model|pornstar)/(?P<id>[^/]+)/videos'
_TESTS = [{
'url': 'http://www.pornhub.com/users/zoe_ph/videos/public',
'info_dict': {
}]
def _real_extract(self, url):
- user_id = self._match_id(url)
+ mobj = re.match(self._VALID_URL, url)
+ host = mobj.group('host')
+ user_id = mobj.group('id')
entries = []
for page_num in itertools.count(1):
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
break
raise
- page_entries = self._extract_entries(webpage)
+ page_entries = self._extract_entries(webpage, host)
if not page_entries:
break
entries.extend(page_entries)