]> Raphaël G. Git Repositories - youtubedl/blob - youtube_dl/extractor/dailymotion.py
Annotate changelog with bug being closed.
[youtubedl] / youtube_dl / extractor / dailymotion.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import base64
5 import functools
6 import hashlib
7 import itertools
8 import json
9 import random
10 import re
11 import string
12
13 from .common import InfoExtractor
14 from ..compat import compat_struct_pack
15 from ..utils import (
16 determine_ext,
17 error_to_compat_str,
18 ExtractorError,
19 int_or_none,
20 mimetype2ext,
21 OnDemandPagedList,
22 parse_iso8601,
23 sanitized_Request,
24 str_to_int,
25 try_get,
26 unescapeHTML,
27 update_url_query,
28 url_or_none,
29 urlencode_postdata,
30 )
31
32
33 class DailymotionBaseInfoExtractor(InfoExtractor):
34 @staticmethod
35 def _build_request(url):
36 """Build a request with the family filter disabled"""
37 request = sanitized_Request(url)
38 request.add_header('Cookie', 'family_filter=off; ff=off')
39 return request
40
41 def _download_webpage_handle_no_ff(self, url, *args, **kwargs):
42 request = self._build_request(url)
43 return self._download_webpage_handle(request, *args, **kwargs)
44
45 def _download_webpage_no_ff(self, url, *args, **kwargs):
46 request = self._build_request(url)
47 return self._download_webpage(request, *args, **kwargs)
48
49
50 class DailymotionIE(DailymotionBaseInfoExtractor):
51 _VALID_URL = r'(?i)https?://(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(?:(?:embed|swf|#)/)?video|swf)/(?P<id>[^/?_]+)'
52 IE_NAME = 'dailymotion'
53
54 _FORMATS = [
55 ('stream_h264_ld_url', 'ld'),
56 ('stream_h264_url', 'standard'),
57 ('stream_h264_hq_url', 'hq'),
58 ('stream_h264_hd_url', 'hd'),
59 ('stream_h264_hd1080_url', 'hd180'),
60 ]
61
62 _TESTS = [{
63 'url': 'http://www.dailymotion.com/video/x5kesuj_office-christmas-party-review-jason-bateman-olivia-munn-t-j-miller_news',
64 'md5': '074b95bdee76b9e3654137aee9c79dfe',
65 'info_dict': {
66 'id': 'x5kesuj',
67 'ext': 'mp4',
68 'title': 'Office Christmas Party Review – Jason Bateman, Olivia Munn, T.J. Miller',
69 'description': 'Office Christmas Party Review - Jason Bateman, Olivia Munn, T.J. Miller',
70 'thumbnail': r're:^https?:.*\.(?:jpg|png)$',
71 'duration': 187,
72 'timestamp': 1493651285,
73 'upload_date': '20170501',
74 'uploader': 'Deadline',
75 'uploader_id': 'x1xm8ri',
76 'age_limit': 0,
77 },
78 }, {
79 'url': 'https://www.dailymotion.com/video/x2iuewm_steam-machine-models-pricing-listed-on-steam-store-ign-news_videogames',
80 'md5': '2137c41a8e78554bb09225b8eb322406',
81 'info_dict': {
82 'id': 'x2iuewm',
83 'ext': 'mp4',
84 'title': 'Steam Machine Models, Pricing Listed on Steam Store - IGN News',
85 'description': 'Several come bundled with the Steam Controller.',
86 'thumbnail': r're:^https?:.*\.(?:jpg|png)$',
87 'duration': 74,
88 'timestamp': 1425657362,
89 'upload_date': '20150306',
90 'uploader': 'IGN',
91 'uploader_id': 'xijv66',
92 'age_limit': 0,
93 'view_count': int,
94 },
95 'skip': 'video gone',
96 }, {
97 # Vevo video
98 'url': 'http://www.dailymotion.com/video/x149uew_katy-perry-roar-official_musi',
99 'info_dict': {
100 'title': 'Roar (Official)',
101 'id': 'USUV71301934',
102 'ext': 'mp4',
103 'uploader': 'Katy Perry',
104 'upload_date': '20130905',
105 },
106 'params': {
107 'skip_download': True,
108 },
109 'skip': 'VEVO is only available in some countries',
110 }, {
111 # age-restricted video
112 'url': 'http://www.dailymotion.com/video/xyh2zz_leanna-decker-cyber-girl-of-the-year-desires-nude-playboy-plus_redband',
113 'md5': '0d667a7b9cebecc3c89ee93099c4159d',
114 'info_dict': {
115 'id': 'xyh2zz',
116 'ext': 'mp4',
117 'title': 'Leanna Decker - Cyber Girl Of The Year Desires Nude [Playboy Plus]',
118 'uploader': 'HotWaves1012',
119 'age_limit': 18,
120 },
121 'skip': 'video gone',
122 }, {
123 # geo-restricted, player v5
124 'url': 'http://www.dailymotion.com/video/xhza0o',
125 'only_matching': True,
126 }, {
127 # with subtitles
128 'url': 'http://www.dailymotion.com/video/x20su5f_the-power-of-nightmares-1-the-rise-of-the-politics-of-fear-bbc-2004_news',
129 'only_matching': True,
130 }, {
131 'url': 'http://www.dailymotion.com/swf/video/x3n92nf',
132 'only_matching': True,
133 }, {
134 'url': 'http://www.dailymotion.com/swf/x3ss1m_funny-magic-trick-barry-and-stuart_fun',
135 'only_matching': True,
136 }]
137
138 @staticmethod
139 def _extract_urls(webpage):
140 # Look for embedded Dailymotion player
141 matches = re.findall(
142 r'<(?:(?:embed|iframe)[^>]+?src=|input[^>]+id=[\'"]dmcloudUrlEmissionSelect[\'"][^>]+value=)(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.com/(?:embed|swf)/video/.+?)\1', webpage)
143 return list(map(lambda m: unescapeHTML(m[1]), matches))
144
145 def _real_extract(self, url):
146 video_id = self._match_id(url)
147
148 webpage = self._download_webpage_no_ff(
149 'https://www.dailymotion.com/video/%s' % video_id, video_id)
150
151 age_limit = self._rta_search(webpage)
152
153 description = self._og_search_description(
154 webpage, default=None) or self._html_search_meta(
155 'description', webpage, 'description')
156
157 view_count_str = self._search_regex(
158 (r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserPlays:([\s\d,.]+)"',
159 r'video_views_count[^>]+>\s+([\s\d\,.]+)'),
160 webpage, 'view count', default=None)
161 if view_count_str:
162 view_count_str = re.sub(r'\s', '', view_count_str)
163 view_count = str_to_int(view_count_str)
164 comment_count = int_or_none(self._search_regex(
165 r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserComments:(\d+)"',
166 webpage, 'comment count', default=None))
167
168 player_v5 = self._search_regex(
169 [r'buildPlayer\(({.+?})\);\n', # See https://github.com/ytdl-org/youtube-dl/issues/7826
170 r'playerV5\s*=\s*dmp\.create\([^,]+?,\s*({.+?})\);',
171 r'buildPlayer\(({.+?})\);',
172 r'var\s+config\s*=\s*({.+?});',
173 # New layout regex (see https://github.com/ytdl-org/youtube-dl/issues/13580)
174 r'__PLAYER_CONFIG__\s*=\s*({.+?});'],
175 webpage, 'player v5', default=None)
176 if player_v5:
177 player = self._parse_json(player_v5, video_id, fatal=False) or {}
178 metadata = try_get(player, lambda x: x['metadata'], dict)
179 if not metadata:
180 metadata_url = url_or_none(try_get(
181 player, lambda x: x['context']['metadata_template_url1']))
182 if metadata_url:
183 metadata_url = metadata_url.replace(':videoId', video_id)
184 else:
185 metadata_url = update_url_query(
186 'https://www.dailymotion.com/player/metadata/video/%s'
187 % video_id, {
188 'embedder': url,
189 'integration': 'inline',
190 'GK_PV5_NEON': '1',
191 })
192 metadata = self._download_json(
193 metadata_url, video_id, 'Downloading metadata JSON')
194
195 if try_get(metadata, lambda x: x['error']['type']) == 'password_protected':
196 password = self._downloader.params.get('videopassword')
197 if password:
198 r = int(metadata['id'][1:], 36)
199 us64e = lambda x: base64.urlsafe_b64encode(x).decode().strip('=')
200 t = ''.join(random.choice(string.ascii_letters) for i in range(10))
201 n = us64e(compat_struct_pack('I', r))
202 i = us64e(hashlib.md5(('%s%d%s' % (password, r, t)).encode()).digest())
203 metadata = self._download_json(
204 'http://www.dailymotion.com/player/metadata/video/p' + i + t + n, video_id)
205
206 self._check_error(metadata)
207
208 formats = []
209 for quality, media_list in metadata['qualities'].items():
210 for media in media_list:
211 media_url = media.get('url')
212 if not media_url:
213 continue
214 type_ = media.get('type')
215 if type_ == 'application/vnd.lumberjack.manifest':
216 continue
217 ext = mimetype2ext(type_) or determine_ext(media_url)
218 if ext == 'm3u8':
219 m3u8_formats = self._extract_m3u8_formats(
220 media_url, video_id, 'mp4', preference=-1,
221 m3u8_id='hls', fatal=False)
222 for f in m3u8_formats:
223 f['url'] = f['url'].split('#')[0]
224 formats.append(f)
225 elif ext == 'f4m':
226 formats.extend(self._extract_f4m_formats(
227 media_url, video_id, preference=-1, f4m_id='hds', fatal=False))
228 else:
229 f = {
230 'url': media_url,
231 'format_id': 'http-%s' % quality,
232 'ext': ext,
233 }
234 m = re.search(r'H264-(?P<width>\d+)x(?P<height>\d+)', media_url)
235 if m:
236 f.update({
237 'width': int(m.group('width')),
238 'height': int(m.group('height')),
239 })
240 formats.append(f)
241 self._sort_formats(formats)
242
243 title = metadata['title']
244 duration = int_or_none(metadata.get('duration'))
245 timestamp = int_or_none(metadata.get('created_time'))
246 thumbnail = metadata.get('poster_url')
247 uploader = metadata.get('owner', {}).get('screenname')
248 uploader_id = metadata.get('owner', {}).get('id')
249
250 subtitles = {}
251 subtitles_data = metadata.get('subtitles', {}).get('data', {})
252 if subtitles_data and isinstance(subtitles_data, dict):
253 for subtitle_lang, subtitle in subtitles_data.items():
254 subtitles[subtitle_lang] = [{
255 'ext': determine_ext(subtitle_url),
256 'url': subtitle_url,
257 } for subtitle_url in subtitle.get('urls', [])]
258
259 return {
260 'id': video_id,
261 'title': title,
262 'description': description,
263 'thumbnail': thumbnail,
264 'duration': duration,
265 'timestamp': timestamp,
266 'uploader': uploader,
267 'uploader_id': uploader_id,
268 'age_limit': age_limit,
269 'view_count': view_count,
270 'comment_count': comment_count,
271 'formats': formats,
272 'subtitles': subtitles,
273 }
274
275 # vevo embed
276 vevo_id = self._search_regex(
277 r'<link rel="video_src" href="[^"]*?vevo\.com[^"]*?video=(?P<id>[\w]*)',
278 webpage, 'vevo embed', default=None)
279 if vevo_id:
280 return self.url_result('vevo:%s' % vevo_id, 'Vevo')
281
282 # fallback old player
283 embed_page = self._download_webpage_no_ff(
284 'https://www.dailymotion.com/embed/video/%s' % video_id,
285 video_id, 'Downloading embed page')
286
287 timestamp = parse_iso8601(self._html_search_meta(
288 'video:release_date', webpage, 'upload date'))
289
290 info = self._parse_json(
291 self._search_regex(
292 r'var info = ({.*?}),$', embed_page,
293 'video info', flags=re.MULTILINE),
294 video_id)
295
296 self._check_error(info)
297
298 formats = []
299 for (key, format_id) in self._FORMATS:
300 video_url = info.get(key)
301 if video_url is not None:
302 m_size = re.search(r'H264-(\d+)x(\d+)', video_url)
303 if m_size is not None:
304 width, height = map(int_or_none, (m_size.group(1), m_size.group(2)))
305 else:
306 width, height = None, None
307 formats.append({
308 'url': video_url,
309 'ext': 'mp4',
310 'format_id': format_id,
311 'width': width,
312 'height': height,
313 })
314 self._sort_formats(formats)
315
316 # subtitles
317 video_subtitles = self.extract_subtitles(video_id, webpage)
318
319 title = self._og_search_title(webpage, default=None)
320 if title is None:
321 title = self._html_search_regex(
322 r'(?s)<span\s+id="video_title"[^>]*>(.*?)</span>', webpage,
323 'title')
324
325 return {
326 'id': video_id,
327 'formats': formats,
328 'uploader': info['owner.screenname'],
329 'timestamp': timestamp,
330 'title': title,
331 'description': description,
332 'subtitles': video_subtitles,
333 'thumbnail': info['thumbnail_url'],
334 'age_limit': age_limit,
335 'view_count': view_count,
336 'duration': info['duration']
337 }
338
339 def _check_error(self, info):
340 error = info.get('error')
341 if error:
342 title = error.get('title') or error['message']
343 # See https://developer.dailymotion.com/api#access-error
344 if error.get('code') == 'DM007':
345 self.raise_geo_restricted(msg=title)
346 raise ExtractorError(
347 '%s said: %s' % (self.IE_NAME, title), expected=True)
348
349 def _get_subtitles(self, video_id, webpage):
350 try:
351 sub_list = self._download_webpage(
352 'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id,
353 video_id, note=False)
354 except ExtractorError as err:
355 self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
356 return {}
357 info = json.loads(sub_list)
358 if (info['total'] > 0):
359 sub_lang_list = dict((l['language'], [{'url': l['url'], 'ext': 'srt'}]) for l in info['list'])
360 return sub_lang_list
361 self._downloader.report_warning('video doesn\'t have subtitles')
362 return {}
363
364
365 class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
366 IE_NAME = 'dailymotion:playlist'
367 _VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>x[0-9a-z]+)'
368 _TESTS = [{
369 'url': 'http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q',
370 'info_dict': {
371 'title': 'SPORT',
372 'id': 'xv4bw',
373 },
374 'playlist_mincount': 20,
375 }]
376 _PAGE_SIZE = 100
377
378 def _fetch_page(self, playlist_id, authorizaion, page):
379 page += 1
380 videos = self._download_json(
381 'https://graphql.api.dailymotion.com',
382 playlist_id, 'Downloading page %d' % page,
383 data=json.dumps({
384 'query': '''{
385 collection(xid: "%s") {
386 videos(first: %d, page: %d) {
387 pageInfo {
388 hasNextPage
389 nextPage
390 }
391 edges {
392 node {
393 xid
394 url
395 }
396 }
397 }
398 }
399 }''' % (playlist_id, self._PAGE_SIZE, page)
400 }).encode(), headers={
401 'Authorization': authorizaion,
402 'Origin': 'https://www.dailymotion.com',
403 })['data']['collection']['videos']
404 for edge in videos['edges']:
405 node = edge['node']
406 yield self.url_result(
407 node['url'], DailymotionIE.ie_key(), node['xid'])
408
409 def _real_extract(self, url):
410 playlist_id = self._match_id(url)
411 webpage = self._download_webpage(url, playlist_id)
412 api = self._parse_json(self._search_regex(
413 r'__PLAYER_CONFIG__\s*=\s*({.+?});',
414 webpage, 'player config'), playlist_id)['context']['api']
415 auth = self._download_json(
416 api.get('auth_url', 'https://graphql.api.dailymotion.com/oauth/token'),
417 playlist_id, data=urlencode_postdata({
418 'client_id': api.get('client_id', 'f1a362d288c1b98099c7'),
419 'client_secret': api.get('client_secret', 'eea605b96e01c796ff369935357eca920c5da4c5'),
420 'grant_type': 'client_credentials',
421 }))
422 authorizaion = '%s %s' % (auth.get('token_type', 'Bearer'), auth['access_token'])
423 entries = OnDemandPagedList(functools.partial(
424 self._fetch_page, playlist_id, authorizaion), self._PAGE_SIZE)
425 return self.playlist_result(
426 entries, playlist_id,
427 self._og_search_title(webpage))
428
429
430 class DailymotionUserIE(DailymotionBaseInfoExtractor):
431 IE_NAME = 'dailymotion:user'
432 _VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/(?!(?:embed|swf|#|video|playlist)/)(?:(?:old/)?user/)?(?P<user>[^/]+)'
433 _MORE_PAGES_INDICATOR = r'(?s)<div class="pages[^"]*">.*?<a\s+class="[^"]*?icon-arrow_right[^"]*?"'
434 _PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s'
435 _TESTS = [{
436 'url': 'https://www.dailymotion.com/user/nqtv',
437 'info_dict': {
438 'id': 'nqtv',
439 'title': 'Rémi Gaillard',
440 },
441 'playlist_mincount': 100,
442 }, {
443 'url': 'http://www.dailymotion.com/user/UnderProject',
444 'info_dict': {
445 'id': 'UnderProject',
446 'title': 'UnderProject',
447 },
448 'playlist_mincount': 1800,
449 'expected_warnings': [
450 'Stopped at duplicated page',
451 ],
452 'skip': 'Takes too long time',
453 }]
454
455 def _extract_entries(self, id):
456 video_ids = set()
457 processed_urls = set()
458 for pagenum in itertools.count(1):
459 page_url = self._PAGE_TEMPLATE % (id, pagenum)
460 webpage, urlh = self._download_webpage_handle_no_ff(
461 page_url, id, 'Downloading page %s' % pagenum)
462 if urlh.geturl() in processed_urls:
463 self.report_warning('Stopped at duplicated page %s, which is the same as %s' % (
464 page_url, urlh.geturl()), id)
465 break
466
467 processed_urls.add(urlh.geturl())
468
469 for video_id in re.findall(r'data-xid="(.+?)"', webpage):
470 if video_id not in video_ids:
471 yield self.url_result(
472 'http://www.dailymotion.com/video/%s' % video_id,
473 DailymotionIE.ie_key(), video_id)
474 video_ids.add(video_id)
475
476 if re.search(self._MORE_PAGES_INDICATOR, webpage) is None:
477 break
478
479 def _real_extract(self, url):
480 mobj = re.match(self._VALID_URL, url)
481 user = mobj.group('user')
482 webpage = self._download_webpage(
483 'https://www.dailymotion.com/user/%s' % user, user)
484 full_user = unescapeHTML(self._html_search_regex(
485 r'<a class="nav-image" title="([^"]+)" href="/%s">' % re.escape(user),
486 webpage, 'user'))
487
488 return {
489 '_type': 'playlist',
490 'id': user,
491 'title': full_user,
492 'entries': self._extract_entries(user),
493 }