+        return self._extract_videos('watchlater', 'https://vimeo.com/watchlater')
+
+
+class VimeoLikesIE(InfoExtractor):
+    _VALID_URL = r'https://(?:www\.)?vimeo\.com/(?P<id>[^/]+)/likes/?(?:$|[?#]|sort:)'
+    IE_NAME = 'vimeo:likes'
+    IE_DESC = 'Vimeo user likes'
+    _TESTS = [{
+        'url': 'https://vimeo.com/user755559/likes/',
+        'playlist_mincount': 293,
+        'info_dict': {
+            'id': 'user755559_likes',
+            'description': 'See all the videos urza likes',
+            'title': 'Videos urza likes',
+        },
+    }, {
+        'url': 'https://vimeo.com/stormlapse/likes',
+        'only_matching': True,
+    }]
+
+    def _real_extract(self, url):
+        user_id = self._match_id(url)
+        webpage = self._download_webpage(url, user_id)
+        page_count = self._int(
+            self._search_regex(
+                r'''(?x)<li><a\s+href="[^"]+"\s+data-page="([0-9]+)">
+                    .*?</a></li>\s*<li\s+class="pagination_next">
+                ''', webpage, 'page count', default=1),
+            'page count', fatal=True)
+        PAGE_SIZE = 12
+        title = self._html_search_regex(
+            r'(?s)<h1>(.+?)</h1>', webpage, 'title', fatal=False)
+        description = self._html_search_meta('description', webpage)
+
+        def _get_page(idx):
+            page_url = 'https://vimeo.com/%s/likes/page:%d/sort:date' % (
+                user_id, idx + 1)
+            webpage = self._download_webpage(
+                page_url, user_id,
+                note='Downloading page %d/%d' % (idx + 1, page_count))
+            video_list = self._search_regex(
+                r'(?s)<ol class="js-browse_list[^"]+"[^>]*>(.*?)</ol>',
+                webpage, 'video content')
+            paths = re.findall(
+                r'<li[^>]*>\s*<a\s+href="([^"]+)"', video_list)
+            for path in paths:
+                yield {
+                    '_type': 'url',
+                    'url': compat_urlparse.urljoin(page_url, path),
+                }
+
+        pl = InAdvancePagedList(_get_page, page_count, PAGE_SIZE)
+
+        return {
+            '_type': 'playlist',
+            'id': '%s_likes' % user_id,
+            'title': title,
+            'description': description,
+            'entries': pl,
+        }
+
+
+class VHXEmbedIE(InfoExtractor):
+    IE_NAME = 'vhx:embed'
+    _VALID_URL = r'https?://embed\.vhx\.tv/videos/(?P<id>\d+)'
+
+    def _call_api(self, video_id, access_token, path='', query=None):
+        return self._download_json(
+            'https://api.vhx.tv/videos/' + video_id + path, video_id, headers={
+                'Authorization': 'Bearer ' + access_token,
+            }, query=query)
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+        credentials = self._parse_json(self._search_regex(
+            r'(?s)credentials\s*:\s*({.+?}),', webpage,
+            'config'), video_id, js_to_json)
+        access_token = credentials['access_token']
+
+        query = {}
+        for k, v in credentials.items():
+            if k in ('authorization', 'authUserToken', 'ticket') and v and v != 'undefined':
+                if k == 'authUserToken':
+                    query['auth_user_token'] = v
+                else:
+                    query[k] = v
+        files = self._call_api(video_id, access_token, '/files', query)
+
+        formats = []
+        for f in files:
+            href = try_get(f, lambda x: x['_links']['source']['href'])
+            if not href:
+                continue
+            method = f.get('method')
+            if method == 'hls':
+                formats.extend(self._extract_m3u8_formats(
+                    href, video_id, 'mp4', 'm3u8_native',
+                    m3u8_id='hls', fatal=False))
+            elif method == 'dash':
+                formats.extend(self._extract_mpd_formats(
+                    href, video_id, mpd_id='dash', fatal=False))
+            else:
+                fmt = {
+                    'filesize': int_or_none(try_get(f, lambda x: x['size']['bytes'])),
+                    'format_id': 'http',
+                    'preference': 1,
+                    'url': href,
+                    'vcodec': f.get('codec'),
+                }
+                quality = f.get('quality')
+                if quality:
+                    fmt.update({
+                        'format_id': 'http-' + quality,
+                        'height': int_or_none(self._search_regex(r'(\d+)p', quality, 'height', default=None)),
+                    })
+                formats.append(fmt)
+        self._sort_formats(formats)
+
+        video_data = self._call_api(video_id, access_token)
+        title = video_data.get('title') or video_data['name']
+
+        subtitles = {}
+        for subtitle in try_get(video_data, lambda x: x['tracks']['subtitles'], list) or []:
+            lang = subtitle.get('srclang') or subtitle.get('label')
+            for _link in subtitle.get('_links', {}).values():
+                href = _link.get('href')
+                if not href:
+                    continue
+                subtitles.setdefault(lang, []).append({
+                    'url': href,
+                })
+
+        q = qualities(['small', 'medium', 'large', 'source'])
+        thumbnails = []
+        for thumbnail_id, thumbnail_url in video_data.get('thumbnail', {}).items():
+            thumbnails.append({
+                'id': thumbnail_id,
+                'url': thumbnail_url,
+                'preference': q(thumbnail_id),
+            })
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': video_data.get('description'),
+            'duration': int_or_none(try_get(video_data, lambda x: x['duration']['seconds'])),
+            'formats': formats,
+            'subtitles': subtitles,
+            'thumbnails': thumbnails,
+            'timestamp': unified_timestamp(video_data.get('created_at')),
+            'view_count': int_or_none(video_data.get('plays_count')),
+        }