+ 'upload_date': video_upload_date,
+ 'title': video_title,
+ 'thumbnail': video_thumbnail,
+ 'description': video_description,
+ 'duration': video_duration,
+ 'formats': formats,
+ 'webpage_url': url,
+ 'view_count': view_count,
+ 'like_count': like_count,
+ 'comment_count': comment_count,
+ 'subtitles': subtitles,
+ }
+
+
+class VimeoChannelIE(VimeoBaseInfoExtractor):
+ IE_NAME = 'vimeo:channel'
+ _VALID_URL = r'https://vimeo\.com/channels/(?P<id>[^/?#]+)/?(?:$|[?#])'
+ _MORE_PAGES_INDICATOR = r'<a.+?rel="next"'
+ _TITLE = None
+ _TITLE_RE = r'<link rel="alternate"[^>]+?title="(.*?)"'
+ _TESTS = [{
+ 'url': 'https://vimeo.com/channels/tributes',
+ 'info_dict': {
+ 'id': 'tributes',
+ 'title': 'Vimeo Tributes',
+ },
+ 'playlist_mincount': 25,
+ }]
+
+ def _page_url(self, base_url, pagenum):
+ return '%s/videos/page:%d/' % (base_url, pagenum)
+
+ def _extract_list_title(self, webpage):
+ return self._TITLE or self._html_search_regex(self._TITLE_RE, webpage, 'list title')
+
+ def _login_list_password(self, page_url, list_id, webpage):
+ login_form = self._search_regex(
+ r'(?s)<form[^>]+?id="pw_form"(.*?)</form>',
+ webpage, 'login form', default=None)
+ if not login_form:
+ return webpage
+
+ password = self._downloader.params.get('videopassword', None)
+ if password is None:
+ raise ExtractorError('This album is protected by a password, use the --video-password option', expected=True)
+ fields = self._hidden_inputs(login_form)
+ token, vuid = self._extract_xsrft_and_vuid(webpage)
+ fields['token'] = token
+ fields['password'] = password
+ post = urlencode_postdata(encode_dict(fields))
+ password_path = self._search_regex(
+ r'action="([^"]+)"', login_form, 'password URL')
+ password_url = compat_urlparse.urljoin(page_url, password_path)
+ password_request = compat_urllib_request.Request(password_url, post)
+ password_request.add_header('Content-type', 'application/x-www-form-urlencoded')
+ password_request.add_header('Cookie', 'vuid=%s' % vuid)
+ self._set_cookie('vimeo.com', 'xsrft', token)
+
+ return self._download_webpage(
+ password_request, list_id,
+ 'Verifying the password', 'Wrong password')
+
+ def _extract_videos(self, list_id, base_url):
+ video_ids = []
+ for pagenum in itertools.count(1):
+ page_url = self._page_url(base_url, pagenum)
+ webpage = self._download_webpage(
+ page_url, list_id,
+ 'Downloading page %s' % pagenum)
+
+ if pagenum == 1:
+ webpage = self._login_list_password(page_url, list_id, webpage)
+
+ video_ids.extend(re.findall(r'id="clip_(\d+?)"', webpage))
+ if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
+ break
+
+ entries = [self.url_result('https://vimeo.com/%s' % video_id, 'Vimeo')
+ for video_id in video_ids]
+ return {'_type': 'playlist',
+ 'id': list_id,
+ 'title': self._extract_list_title(webpage),
+ 'entries': entries,
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ channel_id = mobj.group('id')
+ return self._extract_videos(channel_id, 'https://vimeo.com/channels/%s' % channel_id)
+
+
+class VimeoUserIE(VimeoChannelIE):
+ IE_NAME = 'vimeo:user'
+ _VALID_URL = r'https://vimeo\.com/(?!(?:[0-9]+|watchlater)(?:$|[?#/]))(?P<name>[^/]+)(?:/videos|[#?]|$)'
+ _TITLE_RE = r'<a[^>]+?class="user">([^<>]+?)</a>'
+ _TESTS = [{
+ 'url': 'https://vimeo.com/nkistudio/videos',
+ 'info_dict': {
+ 'title': 'Nki',
+ 'id': 'nkistudio',
+ },
+ 'playlist_mincount': 66,
+ }]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ name = mobj.group('name')
+ return self._extract_videos(name, 'https://vimeo.com/%s' % name)
+
+
+class VimeoAlbumIE(VimeoChannelIE):
+ IE_NAME = 'vimeo:album'
+ _VALID_URL = r'https://vimeo\.com/album/(?P<id>\d+)'
+ _TITLE_RE = r'<header id="page_header">\n\s*<h1>(.*?)</h1>'
+ _TESTS = [{
+ 'url': 'https://vimeo.com/album/2632481',
+ 'info_dict': {
+ 'id': '2632481',
+ 'title': 'Staff Favorites: November 2013',
+ },
+ 'playlist_mincount': 13,
+ }, {
+ 'note': 'Password-protected album',
+ 'url': 'https://vimeo.com/album/3253534',
+ 'info_dict': {
+ 'title': 'test',
+ 'id': '3253534',
+ },
+ 'playlist_count': 1,
+ 'params': {
+ 'videopassword': 'youtube-dl',
+ }
+ }]
+
+ def _page_url(self, base_url, pagenum):
+ return '%s/page:%d/' % (base_url, pagenum)
+
+ def _real_extract(self, url):
+ album_id = self._match_id(url)
+ return self._extract_videos(album_id, 'https://vimeo.com/album/%s' % album_id)
+
+
+class VimeoGroupsIE(VimeoAlbumIE):
+ IE_NAME = 'vimeo:group'
+ _VALID_URL = r'https://vimeo\.com/groups/(?P<name>[^/]+)'
+ _TESTS = [{
+ 'url': 'https://vimeo.com/groups/rolexawards',
+ 'info_dict': {
+ 'id': 'rolexawards',
+ 'title': 'Rolex Awards for Enterprise',
+ },
+ 'playlist_mincount': 73,
+ }]
+
+ def _extract_list_title(self, webpage):
+ return self._og_search_title(webpage)
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ name = mobj.group('name')
+ return self._extract_videos(name, 'https://vimeo.com/groups/%s' % name)
+
+
+class VimeoReviewIE(InfoExtractor):
+ IE_NAME = 'vimeo:review'
+ IE_DESC = 'Review pages on vimeo'
+ _VALID_URL = r'https://vimeo\.com/[^/]+/review/(?P<id>[^/]+)'
+ _TESTS = [{
+ 'url': 'https://vimeo.com/user21297594/review/75524534/3c257a1b5d',
+ 'md5': 'c507a72f780cacc12b2248bb4006d253',
+ 'info_dict': {
+ 'id': '75524534',
+ 'ext': 'mp4',
+ 'title': "DICK HARDWICK 'Comedian'",
+ 'uploader': 'Richard Hardwick',
+ }
+ }, {
+ 'note': 'video player needs Referer',
+ 'url': 'https://vimeo.com/user22258446/review/91613211/13f927e053',
+ 'md5': '6295fdab8f4bf6a002d058b2c6dce276',
+ 'info_dict': {
+ 'id': '91613211',
+ 'ext': 'mp4',
+ 'title': 're:(?i)^Death by dogma versus assembling agile . Sander Hoogendoorn',
+ 'uploader': 'DevWeek Events',
+ 'duration': 2773,
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ }
+ }]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ player_url = 'https://player.vimeo.com/player/' + video_id
+ return self.url_result(player_url, 'Vimeo', video_id)
+
+
+class VimeoWatchLaterIE(VimeoChannelIE):
+ IE_NAME = 'vimeo:watchlater'
+ IE_DESC = 'Vimeo watch later list, "vimeowatchlater" keyword (requires authentication)'
+ _VALID_URL = r'https://vimeo\.com/(?:home/)?watchlater|:vimeowatchlater'
+ _TITLE = 'Watch Later'
+ _LOGIN_REQUIRED = True
+ _TESTS = [{
+ 'url': 'https://vimeo.com/watchlater',
+ 'only_matching': True,
+ }]
+
+ def _real_initialize(self):
+ self._login()
+
+ def _page_url(self, base_url, pagenum):
+ url = '%s/page:%d/' % (base_url, pagenum)
+ request = compat_urllib_request.Request(url)
+ # Set the header to get a partial html page with the ids,
+ # the normal page doesn't contain them.
+ request.add_header('X-Requested-With', 'XMLHttpRequest')
+ return request
+
+ def _real_extract(self, url):
+ return self._extract_videos('watchlater', 'https://vimeo.com/watchlater')
+
+
+class VimeoLikesIE(InfoExtractor):
+ _VALID_URL = r'https://(?:www\.)?vimeo\.com/user(?P<id>[0-9]+)/likes/?(?:$|[?#]|sort:)'
+ IE_NAME = 'vimeo:likes'
+ IE_DESC = 'Vimeo user likes'
+ _TEST = {
+ 'url': 'https://vimeo.com/user755559/likes/',
+ 'playlist_mincount': 293,
+ "info_dict": {
+ 'id': 'user755559_likes',
+ "description": "See all the videos urza likes",
+ "title": 'Videos urza likes',
+ },
+ }
+
+ def _real_extract(self, url):
+ user_id = self._match_id(url)
+ webpage = self._download_webpage(url, user_id)
+ page_count = self._int(
+ self._search_regex(
+ r'''(?x)<li><a\s+href="[^"]+"\s+data-page="([0-9]+)">
+ .*?</a></li>\s*<li\s+class="pagination_next">
+ ''', webpage, 'page count'),
+ 'page count', fatal=True)
+ PAGE_SIZE = 12
+ title = self._html_search_regex(
+ r'(?s)<h1>(.+?)</h1>', webpage, 'title', fatal=False)
+ description = self._html_search_meta('description', webpage)
+
+ def _get_page(idx):
+ page_url = 'https://vimeo.com/user%s/likes/page:%d/sort:date' % (
+ user_id, idx + 1)
+ webpage = self._download_webpage(
+ page_url, user_id,
+ note='Downloading page %d/%d' % (idx + 1, page_count))
+ video_list = self._search_regex(
+ r'(?s)<ol class="js-browse_list[^"]+"[^>]*>(.*?)</ol>',
+ webpage, 'video content')
+ paths = re.findall(
+ r'<li[^>]*>\s*<a\s+href="([^"]+)"', video_list)
+ for path in paths:
+ yield {
+ '_type': 'url',
+ 'url': compat_urlparse.urljoin(page_url, path),
+ }
+
+ pl = InAdvancePagedList(_get_page, page_count, PAGE_SIZE)
+
+ return {
+ '_type': 'playlist',
+ 'id': 'user%s_likes' % user_id,
+ 'title': title,
+ 'description': description,
+ 'entries': pl,
+ }