]+\bhref=[^>]+>([^<]+)', div)
return {
'id': video_id,
- 'url': video_url,
'uploader': video_uploader,
+ 'upload_date': upload_date,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
@@ -218,25 +325,26 @@ class PornHubIE(InfoExtractor):
'like_count': like_count,
'dislike_count': dislike_count,
'comment_count': comment_count,
- # 'formats': formats,
+ 'formats': formats,
'age_limit': 18,
- 'tags': tags,
- 'categories': categories,
+ 'tags': extract_list('tags'),
+ 'categories': extract_list('categories'),
+ 'subtitles': subtitles,
}
-class PornHubPlaylistBaseIE(InfoExtractor):
- def _extract_entries(self, webpage):
+class PornHubPlaylistBaseIE(PornHubBaseIE):
+ def _extract_entries(self, webpage, host):
# Only process container div with main playlist content skipping
# drop-down menu that uses similar pattern for videos (see
- # https://github.com/rg3/youtube-dl/issues/11594).
+ # https://github.com/ytdl-org/youtube-dl/issues/11594).
container = self._search_regex(
r'(?s)(]+class=["\']container.+)', webpage,
'container', default=webpage)
return [
self.url_result(
- 'http://www.pornhub.com/%s' % video_url,
+ 'http://www.%s/%s' % (host, video_url),
PornHubIE.ie_key(), video_title=title)
for video_url, title in orderedSet(re.findall(
r'href="/?(view_video\.php\?.*\bviewkey=[\da-z]+[^"]*)"[^>]*\s+title="([^"]+)"',
@@ -244,11 +352,13 @@ class PornHubPlaylistBaseIE(InfoExtractor):
]
def _real_extract(self, url):
- playlist_id = self._match_id(url)
+ mobj = re.match(self._VALID_URL, url)
+ host = mobj.group('host')
+ playlist_id = mobj.group('id')
webpage = self._download_webpage(url, playlist_id)
- entries = self._extract_entries(webpage)
+ entries = self._extract_entries(webpage, host)
playlist = self._parse_json(
self._search_regex(
@@ -263,7 +373,7 @@ class PornHubPlaylistBaseIE(InfoExtractor):
class PornHubPlaylistIE(PornHubPlaylistBaseIE):
- _VALID_URL = r'https?://(?:www\.)?pornhub\.com/playlist/(?P\d+)'
+ _VALID_URL = r'https?://(?:[^/]+\.)?(?Ppornhub\.(?:com|net))/playlist/(?P\d+)'
_TESTS = [{
'url': 'http://www.pornhub.com/playlist/4667351',
'info_dict': {
@@ -271,11 +381,14 @@ class PornHubPlaylistIE(PornHubPlaylistBaseIE):
'title': 'Nataly Hot',
},
'playlist_mincount': 2,
+ }, {
+ 'url': 'https://de.pornhub.com/playlist/4667351',
+ 'only_matching': True,
}]
class PornHubUserVideosIE(PornHubPlaylistBaseIE):
- _VALID_URL = r'https?://(?:www\.)?pornhub\.com/users/(?P[^/]+)/videos'
+ _VALID_URL = r'https?://(?:[^/]+\.)?(?Ppornhub\.(?:com|net))/(?:(?:user|channel)s|model|pornstar)/(?P[^/]+)/videos'
_TESTS = [{
'url': 'http://www.pornhub.com/users/zoe_ph/videos/public',
'info_dict': {
@@ -285,10 +398,40 @@ class PornHubUserVideosIE(PornHubPlaylistBaseIE):
}, {
'url': 'http://www.pornhub.com/users/rushandlia/videos',
'only_matching': True,
+ }, {
+ # default sorting as Top Rated Videos
+ 'url': 'https://www.pornhub.com/channels/povd/videos',
+ 'info_dict': {
+ 'id': 'povd',
+ },
+ 'playlist_mincount': 293,
+ }, {
+ # Top Rated Videos
+ 'url': 'https://www.pornhub.com/channels/povd/videos?o=ra',
+ 'only_matching': True,
+ }, {
+ # Most Recent Videos
+ 'url': 'https://www.pornhub.com/channels/povd/videos?o=da',
+ 'only_matching': True,
+ }, {
+ # Most Viewed Videos
+ 'url': 'https://www.pornhub.com/channels/povd/videos?o=vi',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.pornhub.com/users/zoe_ph/videos/public',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://www.pornhub.com/model/jayndrea/videos/upload',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://www.pornhub.com/pornstar/jenny-blighe/videos/upload',
+ 'only_matching': True,
}]
def _real_extract(self, url):
- user_id = self._match_id(url)
+ mobj = re.match(self._VALID_URL, url)
+ host = mobj.group('host')
+ user_id = mobj.group('id')
entries = []
for page_num in itertools.count(1):
@@ -300,7 +443,7 @@ class PornHubUserVideosIE(PornHubPlaylistBaseIE):
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
break
raise
- page_entries = self._extract_entries(webpage)
+ page_entries = self._extract_entries(webpage, host)
if not page_entries:
break
entries.extend(page_entries)