X-Git-Url: https://git.rapsys.eu/youtubedl/blobdiff_plain/685ad32bb6a8afcce0e261d42caf1a1d2bf16aa3..acb27260e91c30efdb413d0a6fa6279adcc98469:/youtube_dl/extractor/dailymotion.py diff --git a/youtube_dl/extractor/dailymotion.py b/youtube_dl/extractor/dailymotion.py index 259806f..936c13c 100644 --- a/youtube_dl/extractor/dailymotion.py +++ b/youtube_dl/extractor/dailymotion.py @@ -1,3 +1,6 @@ +# coding: utf-8 +from __future__ import unicode_literals + import re import json import itertools @@ -8,58 +11,80 @@ from .subtitles import SubtitlesInfoExtractor from ..utils import ( compat_urllib_request, compat_str, - get_element_by_attribute, - get_element_by_id, - + orderedSet, + str_to_int, + int_or_none, ExtractorError, + unescapeHTML, ) + class DailymotionBaseInfoExtractor(InfoExtractor): @staticmethod def _build_request(url): """Build a request with the family filter disabled""" request = compat_urllib_request.Request(url) request.add_header('Cookie', 'family_filter=off') + request.add_header('Cookie', 'ff=off') return request + class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor): """Information Extractor for Dailymotion""" - _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/(?:embed/)?video/([^/]+)' - IE_NAME = u'dailymotion' + _VALID_URL = r'(?i)(?:https?://)?(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(embed|#)/)?video/(?P[^/?_]+)' + IE_NAME = 'dailymotion' + + _FORMATS = [ + ('stream_h264_ld_url', 'ld'), + ('stream_h264_url', 'standard'), + ('stream_h264_hq_url', 'hq'), + ('stream_h264_hd_url', 'hd'), + ('stream_h264_hd1080_url', 'hd180'), + ] + _TESTS = [ { - u'url': u'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech', - u'file': u'x33vw9.mp4', - u'md5': u'392c4b85a60a90dc4792da41ce3144eb', - u'info_dict': { - u"uploader": u"Amphora Alex and Van .", - u"title": u"Tutoriel de Youtubeur\"DL DES VIDEO DE YOUTUBE\"" + 'url': 'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech', + 'md5': '392c4b85a60a90dc4792da41ce3144eb', + 'info_dict': { + 'id': 'x33vw9', + 'ext': 'mp4', + 'uploader': 'Amphora Alex and Van .', + 'title': 'Tutoriel de Youtubeur"DL DES VIDEO DE YOUTUBE"', } }, # Vevo video { - u'url': u'http://www.dailymotion.com/video/x149uew_katy-perry-roar-official_musi', - u'file': u'USUV71301934.mp4', - u'info_dict': { - u'title': u'Roar (Official)', - u'uploader': u'Katy Perry', - u'upload_date': u'20130905', + 'url': 'http://www.dailymotion.com/video/x149uew_katy-perry-roar-official_musi', + 'info_dict': { + 'title': 'Roar (Official)', + 'id': 'USUV71301934', + 'ext': 'mp4', + 'uploader': 'Katy Perry', + 'upload_date': '20130905', }, - u'params': { - u'skip_download': True, + 'params': { + 'skip_download': True, }, - u'skip': u'VEVO is only available in some countries', + 'skip': 'VEVO is only available in some countries', }, + # age-restricted video + { + 'url': 'http://www.dailymotion.com/video/xyh2zz_leanna-decker-cyber-girl-of-the-year-desires-nude-playboy-plus_redband', + 'md5': '0d667a7b9cebecc3c89ee93099c4159d', + 'info_dict': { + 'id': 'xyh2zz', + 'ext': 'mp4', + 'title': 'Leanna Decker - Cyber Girl Of The Year Desires Nude [Playboy Plus]', + 'uploader': 'HotWaves1012', + 'age_limit': 18, + } + } ] def _real_extract(self, url): - # Extract id and simplified title from URL - mobj = re.match(self._VALID_URL, url) - - video_id = mobj.group(1).split('_')[0].split('?')[0] - - video_extension = 'mp4' + video_id = self._match_id(url) url = 'http://www.dailymotion.com/video/%s' % video_id # Retrieve video webpage to extract further information @@ -71,17 +96,14 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor): # It may just embed a vevo video: m_vevo = re.search( - r'[\w]*)', + r'[\w]*)', webpage) if m_vevo is not None: vevo_id = m_vevo.group('id') - self.to_screen(u'Vevo video detected: %s' % vevo_id) - return self.url_result(u'vevo:%s' % vevo_id, ie='Vevo') + self.to_screen('Vevo video detected: %s' % vevo_id) + return self.url_result('vevo:%s' % vevo_id, ie='Vevo') - video_uploader = self._search_regex([r'(?im)[^<]+?]+?>([^<]+?)', - # Looking for official user - r'<(?:span|a) .*?rel="author".*?>([^<]+?)([0-9]{2})-([0-9]{2})-([0-9]{4})', webpage) @@ -90,106 +112,137 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor): embed_url = 'http://www.dailymotion.com/embed/video/%s' % video_id embed_page = self._download_webpage(embed_url, video_id, - u'Downloading embed page') + 'Downloading embed page') info = self._search_regex(r'var info = ({.*?}),$', embed_page, - 'video info', flags=re.MULTILINE) + 'video info', flags=re.MULTILINE) info = json.loads(info) if info.get('error') is not None: msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title'] raise ExtractorError(msg, expected=True) - # TODO: support choosing qualities - - for key in ['stream_h264_hd1080_url','stream_h264_hd_url', - 'stream_h264_hq_url','stream_h264_url', - 'stream_h264_ld_url']: - if info.get(key):#key in info and info[key]: - max_quality = key - self.to_screen(u'Using %s' % key) - break - else: - raise ExtractorError(u'Unable to extract video URL') - video_url = info[max_quality] + formats = [] + for (key, format_id) in self._FORMATS: + video_url = info.get(key) + if video_url is not None: + m_size = re.search(r'H264-(\d+)x(\d+)', video_url) + if m_size is not None: + width, height = map(int_or_none, (m_size.group(1), m_size.group(2))) + else: + width, height = None, None + formats.append({ + 'url': video_url, + 'ext': 'mp4', + 'format_id': format_id, + 'width': width, + 'height': height, + }) + if not formats: + raise ExtractorError('Unable to extract video URL') # subtitles - video_subtitles = self.extract_subtitles(video_id) + video_subtitles = self.extract_subtitles(video_id, webpage) if self._downloader.params.get('listsubtitles', False): - self._list_available_subtitles(video_id) + self._list_available_subtitles(video_id, webpage) return - return [{ - 'id': video_id, - 'url': video_url, - 'uploader': video_uploader, - 'upload_date': video_upload_date, - 'title': self._og_search_title(webpage), - 'ext': video_extension, - 'subtitles': video_subtitles, - 'thumbnail': info['thumbnail_url'] - }] - - def _get_available_subtitles(self, video_id): + view_count = str_to_int(self._search_regex( + r'video_views_count[^>]+>\s+([\d\.,]+)', + webpage, 'view count', fatal=False)) + + title = self._og_search_title(webpage, default=None) + if title is None: + title = self._html_search_regex( + r'(?s)]*>(.*?)', webpage, + 'title') + + return { + 'id': video_id, + 'formats': formats, + 'uploader': info['owner.screenname'], + 'upload_date': video_upload_date, + 'title': title, + 'subtitles': video_subtitles, + 'thumbnail': info['thumbnail_url'], + 'age_limit': age_limit, + 'view_count': view_count, + } + + def _get_available_subtitles(self, video_id, webpage): try: sub_list = self._download_webpage( 'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id, video_id, note=False) except ExtractorError as err: - self._downloader.report_warning(u'unable to download video subtitles: %s' % compat_str(err)) + self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err)) return {} info = json.loads(sub_list) if (info['total'] > 0): sub_lang_list = dict((l['language'], l['url']) for l in info['list']) return sub_lang_list - self._downloader.report_warning(u'video doesn\'t have subtitles') + self._downloader.report_warning('video doesn\'t have subtitles') return {} class DailymotionPlaylistIE(DailymotionBaseInfoExtractor): - IE_NAME = u'dailymotion:playlist' + IE_NAME = 'dailymotion:playlist' _VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P.+?)/' - _MORE_PAGES_INDICATOR = r'' + _MORE_PAGES_INDICATOR = r'(?s)
.*?[^/]+)' - _MORE_PAGES_INDICATOR = r'' + IE_NAME = 'dailymotion:user' + _VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/user/(?P[^/]+)' _PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s' + _TESTS = [{ + 'url': 'https://www.dailymotion.com/user/nqtv', + 'info_dict': { + 'id': 'nqtv', + 'title': 'Rémi Gaillard', + }, + 'playlist_mincount': 100, + }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) user = mobj.group('user') webpage = self._download_webpage(url, user) - full_user = self._html_search_regex( - r'(.*?)' % re.escape(user), + webpage, 'user')) return { '_type': 'playlist',