+ title = self._og_search_title(webpage, default=None)
+ if title is None:
+ title = self._html_search_regex(
+ r'(?s)<span\s+id="video_title"[^>]*>(.*?)</span>', webpage,
+ 'title')
+
+ return {
+ 'id': video_id,
+ 'formats': formats,
+ 'uploader': info['owner.screenname'],
+ 'timestamp': timestamp,
+ 'title': title,
+ 'description': description,
+ 'subtitles': video_subtitles,
+ 'thumbnail': info['thumbnail_url'],
+ 'age_limit': age_limit,
+ 'view_count': view_count,
+ 'duration': info['duration']
+ }
+
+ def _check_error(self, info):
+ error = info.get('error')
+ if error:
+ title = error.get('title') or error['message']
+ # See https://developer.dailymotion.com/api#access-error
+ if error.get('code') == 'DM007':
+ self.raise_geo_restricted(msg=title)
+ raise ExtractorError(
+ '%s said: %s' % (self.IE_NAME, title), expected=True)
+
+ def _get_subtitles(self, video_id, webpage):
+ try:
+ sub_list = self._download_webpage(
+ 'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id,
+ video_id, note=False)
+ except ExtractorError as err:
+ self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
+ return {}
+ info = json.loads(sub_list)
+ if (info['total'] > 0):
+ sub_lang_list = dict((l['language'], [{'url': l['url'], 'ext': 'srt'}]) for l in info['list'])
+ return sub_lang_list
+ self._downloader.report_warning('video doesn\'t have subtitles')
+ return {}
+
+
+class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
+ IE_NAME = 'dailymotion:playlist'
+ _VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>x[0-9a-z]+)'
+ _TESTS = [{
+ 'url': 'http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q',
+ 'info_dict': {
+ 'title': 'SPORT',
+ 'id': 'xv4bw',
+ },
+ 'playlist_mincount': 20,
+ }]
+ _PAGE_SIZE = 100
+
+ def _fetch_page(self, playlist_id, authorizaion, page):
+ page += 1
+ videos = self._download_json(
+ 'https://graphql.api.dailymotion.com',
+ playlist_id, 'Downloading page %d' % page,
+ data=json.dumps({
+ 'query': '''{
+ collection(xid: "%s") {
+ videos(first: %d, page: %d) {
+ pageInfo {
+ hasNextPage
+ nextPage
+ }
+ edges {
+ node {
+ xid
+ url
+ }
+ }
+ }
+ }
+}''' % (playlist_id, self._PAGE_SIZE, page)
+ }).encode(), headers={
+ 'Authorization': authorizaion,
+ 'Origin': 'https://www.dailymotion.com',
+ })['data']['collection']['videos']
+ for edge in videos['edges']:
+ node = edge['node']
+ yield self.url_result(
+ node['url'], DailymotionIE.ie_key(), node['xid'])
+
+ def _real_extract(self, url):
+ playlist_id = self._match_id(url)
+ webpage = self._download_webpage(url, playlist_id)
+ api = self._parse_json(self._search_regex(
+ r'__PLAYER_CONFIG__\s*=\s*({.+?});',
+ webpage, 'player config'), playlist_id)['context']['api']
+ auth = self._download_json(
+ api.get('auth_url', 'https://graphql.api.dailymotion.com/oauth/token'),
+ playlist_id, data=urlencode_postdata({
+ 'client_id': api.get('client_id', 'f1a362d288c1b98099c7'),
+ 'client_secret': api.get('client_secret', 'eea605b96e01c796ff369935357eca920c5da4c5'),
+ 'grant_type': 'client_credentials',
+ }))
+ authorizaion = '%s %s' % (auth.get('token_type', 'Bearer'), auth['access_token'])
+ entries = OnDemandPagedList(functools.partial(
+ self._fetch_page, playlist_id, authorizaion), self._PAGE_SIZE)
+ return self.playlist_result(
+ entries, playlist_id,
+ self._og_search_title(webpage))
+
+
+class DailymotionUserIE(DailymotionBaseInfoExtractor):
+ IE_NAME = 'dailymotion:user'
+ _VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/(?!(?:embed|swf|#|video|playlist)/)(?:(?:old/)?user/)?(?P<user>[^/]+)'
+ _MORE_PAGES_INDICATOR = r'(?s)<div class="pages[^"]*">.*?<a\s+class="[^"]*?icon-arrow_right[^"]*?"'
+ _PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s'
+ _TESTS = [{
+ 'url': 'https://www.dailymotion.com/user/nqtv',
+ 'info_dict': {
+ 'id': 'nqtv',
+ 'title': 'Rémi Gaillard',
+ },
+ 'playlist_mincount': 100,
+ }, {
+ 'url': 'http://www.dailymotion.com/user/UnderProject',
+ 'info_dict': {
+ 'id': 'UnderProject',
+ 'title': 'UnderProject',
+ },
+ 'playlist_mincount': 1800,
+ 'expected_warnings': [
+ 'Stopped at duplicated page',
+ ],
+ 'skip': 'Takes too long time',
+ }]
+
+ def _extract_entries(self, id):
+ video_ids = set()
+ processed_urls = set()
+ for pagenum in itertools.count(1):
+ page_url = self._PAGE_TEMPLATE % (id, pagenum)
+ webpage, urlh = self._download_webpage_handle_no_ff(
+ page_url, id, 'Downloading page %s' % pagenum)
+ if urlh.geturl() in processed_urls:
+ self.report_warning('Stopped at duplicated page %s, which is the same as %s' % (
+ page_url, urlh.geturl()), id)
+ break
+
+ processed_urls.add(urlh.geturl())
+
+ for video_id in re.findall(r'data-xid="(.+?)"', webpage):
+ if video_id not in video_ids:
+ yield self.url_result(
+ 'http://www.dailymotion.com/video/%s' % video_id,
+ DailymotionIE.ie_key(), video_id)
+ video_ids.add(video_id)
+
+ if re.search(self._MORE_PAGES_INDICATOR, webpage) is None: