+ return self._extract_videos(name, 'https://vimeo.com/groups/%s' % name)
+
+
+class VimeoReviewIE(VimeoBaseInfoExtractor):
+ IE_NAME = 'vimeo:review'
+ IE_DESC = 'Review pages on vimeo'
+ _VALID_URL = r'(?P<url>https://vimeo\.com/[^/]+/review/(?P<id>[^/]+)/[0-9a-f]{10})'
+ _TESTS = [{
+ 'url': 'https://vimeo.com/user21297594/review/75524534/3c257a1b5d',
+ 'md5': 'c507a72f780cacc12b2248bb4006d253',
+ 'info_dict': {
+ 'id': '75524534',
+ 'ext': 'mp4',
+ 'title': "DICK HARDWICK 'Comedian'",
+ 'uploader': 'Richard Hardwick',
+ 'uploader_id': 'user21297594',
+ }
+ }, {
+ 'note': 'video player needs Referer',
+ 'url': 'https://vimeo.com/user22258446/review/91613211/13f927e053',
+ 'md5': '6295fdab8f4bf6a002d058b2c6dce276',
+ 'info_dict': {
+ 'id': '91613211',
+ 'ext': 'mp4',
+ 'title': 're:(?i)^Death by dogma versus assembling agile . Sander Hoogendoorn',
+ 'uploader': 'DevWeek Events',
+ 'duration': 2773,
+ 'thumbnail': r're:^https?://.*\.jpg$',
+ 'uploader_id': 'user22258446',
+ }
+ }, {
+ 'note': 'Password protected',
+ 'url': 'https://vimeo.com/user37284429/review/138823582/c4d865efde',
+ 'info_dict': {
+ 'id': '138823582',
+ 'ext': 'mp4',
+ 'title': 'EFFICIENT PICKUP MASTERCLASS MODULE 1',
+ 'uploader': 'TMB',
+ 'uploader_id': 'user37284429',
+ },
+ 'params': {
+ 'videopassword': 'holygrail',
+ },
+ 'skip': 'video gone',
+ }]
+
+ def _real_initialize(self):
+ self._login()
+
+ def _get_config_url(self, webpage_url, video_id, video_password_verified=False):
+ webpage = self._download_webpage(webpage_url, video_id)
+ config_url = self._html_search_regex(
+ r'data-config-url=(["\'])(?P<url>(?:(?!\1).)+)\1', webpage,
+ 'config URL', default=None, group='url')
+ if not config_url:
+ data = self._parse_json(self._search_regex(
+ r'window\s*=\s*_extend\(window,\s*({.+?})\);', webpage, 'data',
+ default=NO_DEFAULT if video_password_verified else '{}'), video_id)
+ config = data.get('vimeo_esi', {}).get('config', {})
+ config_url = config.get('configUrl') or try_get(config, lambda x: x['clipData']['configUrl'])
+ if config_url is None:
+ self._verify_video_password(webpage_url, video_id, webpage)
+ config_url = self._get_config_url(
+ webpage_url, video_id, video_password_verified=True)
+ return config_url
+
+ def _real_extract(self, url):
+ page_url, video_id = re.match(self._VALID_URL, url).groups()
+ config_url = self._get_config_url(url, video_id)
+ config = self._download_json(config_url, video_id)
+ info_dict = self._parse_config(config, video_id)
+ source_format = self._extract_original_format(page_url, video_id)
+ if source_format:
+ info_dict['formats'].append(source_format)
+ self._vimeo_sort_formats(info_dict['formats'])
+ info_dict['id'] = video_id
+ return info_dict
+
+
+class VimeoWatchLaterIE(VimeoChannelIE):
+ IE_NAME = 'vimeo:watchlater'
+ IE_DESC = 'Vimeo watch later list, "vimeowatchlater" keyword (requires authentication)'
+ _VALID_URL = r'https://vimeo\.com/(?:home/)?watchlater|:vimeowatchlater'
+ _TITLE = 'Watch Later'
+ _LOGIN_REQUIRED = True
+ _TESTS = [{
+ 'url': 'https://vimeo.com/watchlater',
+ 'only_matching': True,
+ }]
+
+ def _real_initialize(self):
+ self._login()
+
+ def _page_url(self, base_url, pagenum):
+ url = '%s/page:%d/' % (base_url, pagenum)
+ request = sanitized_Request(url)
+ # Set the header to get a partial html page with the ids,
+ # the normal page doesn't contain them.
+ request.add_header('X-Requested-With', 'XMLHttpRequest')
+ return request
+
+ def _real_extract(self, url):
+ return self._extract_videos('watchlater', 'https://vimeo.com/watchlater')
+
+
+class VimeoLikesIE(InfoExtractor):
+ _VALID_URL = r'https://(?:www\.)?vimeo\.com/(?P<id>[^/]+)/likes/?(?:$|[?#]|sort:)'
+ IE_NAME = 'vimeo:likes'
+ IE_DESC = 'Vimeo user likes'
+ _TESTS = [{
+ 'url': 'https://vimeo.com/user755559/likes/',
+ 'playlist_mincount': 293,
+ 'info_dict': {
+ 'id': 'user755559_likes',
+ 'description': 'See all the videos urza likes',
+ 'title': 'Videos urza likes',
+ },
+ }, {
+ 'url': 'https://vimeo.com/stormlapse/likes',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ user_id = self._match_id(url)
+ webpage = self._download_webpage(url, user_id)
+ page_count = self._int(
+ self._search_regex(
+ r'''(?x)<li><a\s+href="[^"]+"\s+data-page="([0-9]+)">
+ .*?</a></li>\s*<li\s+class="pagination_next">
+ ''', webpage, 'page count', default=1),
+ 'page count', fatal=True)
+ PAGE_SIZE = 12
+ title = self._html_search_regex(
+ r'(?s)<h1>(.+?)</h1>', webpage, 'title', fatal=False)
+ description = self._html_search_meta('description', webpage)
+
+ def _get_page(idx):
+ page_url = 'https://vimeo.com/%s/likes/page:%d/sort:date' % (
+ user_id, idx + 1)
+ webpage = self._download_webpage(
+ page_url, user_id,
+ note='Downloading page %d/%d' % (idx + 1, page_count))
+ video_list = self._search_regex(
+ r'(?s)<ol class="js-browse_list[^"]+"[^>]*>(.*?)</ol>',
+ webpage, 'video content')
+ paths = re.findall(
+ r'<li[^>]*>\s*<a\s+href="([^"]+)"', video_list)
+ for path in paths:
+ yield {
+ '_type': 'url',
+ 'url': compat_urlparse.urljoin(page_url, path),
+ }
+
+ pl = InAdvancePagedList(_get_page, page_count, PAGE_SIZE)
+
+ return {
+ '_type': 'playlist',
+ 'id': '%s_likes' % user_id,
+ 'title': title,
+ 'description': description,
+ 'entries': pl,
+ }
+
+
+class VHXEmbedIE(InfoExtractor):
+ IE_NAME = 'vhx:embed'
+ _VALID_URL = r'https?://embed\.vhx\.tv/videos/(?P<id>\d+)'
+
+ def _call_api(self, video_id, access_token, path='', query=None):
+ return self._download_json(
+ 'https://api.vhx.tv/videos/' + video_id + path, video_id, headers={
+ 'Authorization': 'Bearer ' + access_token,
+ }, query=query)
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+ credentials = self._parse_json(self._search_regex(
+ r'(?s)credentials\s*:\s*({.+?}),', webpage,
+ 'config'), video_id, js_to_json)
+ access_token = credentials['access_token']
+
+ query = {}
+ for k, v in credentials.items():
+ if k in ('authorization', 'authUserToken', 'ticket') and v and v != 'undefined':
+ if k == 'authUserToken':
+ query['auth_user_token'] = v
+ else:
+ query[k] = v
+ files = self._call_api(video_id, access_token, '/files', query)
+
+ formats = []
+ for f in files:
+ href = try_get(f, lambda x: x['_links']['source']['href'])
+ if not href:
+ continue
+ method = f.get('method')
+ if method == 'hls':
+ formats.extend(self._extract_m3u8_formats(
+ href, video_id, 'mp4', 'm3u8_native',
+ m3u8_id='hls', fatal=False))
+ elif method == 'dash':
+ formats.extend(self._extract_mpd_formats(
+ href, video_id, mpd_id='dash', fatal=False))
+ else:
+ fmt = {
+ 'filesize': int_or_none(try_get(f, lambda x: x['size']['bytes'])),
+ 'format_id': 'http',
+ 'preference': 1,
+ 'url': href,
+ 'vcodec': f.get('codec'),
+ }
+ quality = f.get('quality')
+ if quality:
+ fmt.update({
+ 'format_id': 'http-' + quality,
+ 'height': int_or_none(self._search_regex(r'(\d+)p', quality, 'height', default=None)),
+ })
+ formats.append(fmt)
+ self._sort_formats(formats)
+
+ video_data = self._call_api(video_id, access_token)
+ title = video_data.get('title') or video_data['name']
+
+ subtitles = {}
+ for subtitle in try_get(video_data, lambda x: x['tracks']['subtitles'], list) or []:
+ lang = subtitle.get('srclang') or subtitle.get('label')
+ for _link in subtitle.get('_links', {}).values():
+ href = _link.get('href')
+ if not href:
+ continue
+ subtitles.setdefault(lang, []).append({
+ 'url': href,
+ })
+
+ q = qualities(['small', 'medium', 'large', 'source'])
+ thumbnails = []
+ for thumbnail_id, thumbnail_url in video_data.get('thumbnail', {}).items():
+ thumbnails.append({
+ 'id': thumbnail_id,
+ 'url': thumbnail_url,
+ 'preference': q(thumbnail_id),
+ })
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': video_data.get('description'),
+ 'duration': int_or_none(try_get(video_data, lambda x: x['duration']['seconds'])),
+ 'formats': formats,
+ 'subtitles': subtitles,
+ 'thumbnails': thumbnails,
+ 'timestamp': unified_timestamp(video_data.get('created_at')),
+ 'view_count': int_or_none(video_data.get('plays_count')),
+ }