]> Raphaël G. Git Repositories - youtubedl/blob - youtube_dl/extractor/niconico.py
Update changelog
[youtubedl] / youtube_dl / extractor / niconico.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import json
5 import datetime
6
7 from .common import InfoExtractor
8 from ..compat import (
9 compat_parse_qs,
10 compat_urlparse,
11 )
12 from ..utils import (
13 determine_ext,
14 dict_get,
15 ExtractorError,
16 int_or_none,
17 float_or_none,
18 parse_duration,
19 parse_iso8601,
20 remove_start,
21 try_get,
22 unified_timestamp,
23 urlencode_postdata,
24 xpath_text,
25 )
26
27
28 class NiconicoIE(InfoExtractor):
29 IE_NAME = 'niconico'
30 IE_DESC = 'ニコニコ動画'
31
32 _TESTS = [{
33 'url': 'http://www.nicovideo.jp/watch/sm22312215',
34 'md5': 'd1a75c0823e2f629128c43e1212760f9',
35 'info_dict': {
36 'id': 'sm22312215',
37 'ext': 'mp4',
38 'title': 'Big Buck Bunny',
39 'thumbnail': r're:https?://.*',
40 'uploader': 'takuya0301',
41 'uploader_id': '2698420',
42 'upload_date': '20131123',
43 'timestamp': int, # timestamp is unstable
44 'description': '(c) copyright 2008, Blender Foundation / www.bigbuckbunny.org',
45 'duration': 33,
46 'view_count': int,
47 'comment_count': int,
48 },
49 'skip': 'Requires an account',
50 }, {
51 # File downloaded with and without credentials are different, so omit
52 # the md5 field
53 'url': 'http://www.nicovideo.jp/watch/nm14296458',
54 'info_dict': {
55 'id': 'nm14296458',
56 'ext': 'swf',
57 'title': '【鏡音リン】Dance on media【オリジナル】take2!',
58 'description': 'md5:689f066d74610b3b22e0f1739add0f58',
59 'thumbnail': r're:https?://.*',
60 'uploader': 'りょうた',
61 'uploader_id': '18822557',
62 'upload_date': '20110429',
63 'timestamp': 1304065916,
64 'duration': 209,
65 },
66 'skip': 'Requires an account',
67 }, {
68 # 'video exists but is marked as "deleted"
69 # md5 is unstable
70 'url': 'http://www.nicovideo.jp/watch/sm10000',
71 'info_dict': {
72 'id': 'sm10000',
73 'ext': 'unknown_video',
74 'description': 'deleted',
75 'title': 'ドラえもんエターナル第3話「決戦第3新東京市」<前編>',
76 'thumbnail': r're:https?://.*',
77 'upload_date': '20071224',
78 'timestamp': int, # timestamp field has different value if logged in
79 'duration': 304,
80 'view_count': int,
81 },
82 'skip': 'Requires an account',
83 }, {
84 'url': 'http://www.nicovideo.jp/watch/so22543406',
85 'info_dict': {
86 'id': '1388129933',
87 'ext': 'mp4',
88 'title': '【第1回】RADIOアニメロミックス ラブライブ!~のぞえりRadio Garden~',
89 'description': 'md5:b27d224bb0ff53d3c8269e9f8b561cf1',
90 'thumbnail': r're:https?://.*',
91 'timestamp': 1388851200,
92 'upload_date': '20140104',
93 'uploader': 'アニメロチャンネル',
94 'uploader_id': '312',
95 },
96 'skip': 'The viewing period of the video you were searching for has expired.',
97 }, {
98 # video not available via `getflv`; "old" HTML5 video
99 'url': 'http://www.nicovideo.jp/watch/sm1151009',
100 'md5': '8fa81c364eb619d4085354eab075598a',
101 'info_dict': {
102 'id': 'sm1151009',
103 'ext': 'mp4',
104 'title': 'マスターシステム本体内蔵のスペハリのメインテーマ(PSG版)',
105 'description': 'md5:6ee077e0581ff5019773e2e714cdd0b7',
106 'thumbnail': r're:https?://.*',
107 'duration': 184,
108 'timestamp': 1190868283,
109 'upload_date': '20070927',
110 'uploader': 'denden2',
111 'uploader_id': '1392194',
112 'view_count': int,
113 'comment_count': int,
114 },
115 'skip': 'Requires an account',
116 }, {
117 # "New" HTML5 video
118 # md5 is unstable
119 'url': 'http://www.nicovideo.jp/watch/sm31464864',
120 'info_dict': {
121 'id': 'sm31464864',
122 'ext': 'mp4',
123 'title': '新作TVアニメ「戦姫絶唱シンフォギアAXZ」PV 最高画質',
124 'description': 'md5:e52974af9a96e739196b2c1ca72b5feb',
125 'timestamp': 1498514060,
126 'upload_date': '20170626',
127 'uploader': 'ゲスト',
128 'uploader_id': '40826363',
129 'thumbnail': r're:https?://.*',
130 'duration': 198,
131 'view_count': int,
132 'comment_count': int,
133 },
134 'skip': 'Requires an account',
135 }, {
136 # Video without owner
137 'url': 'http://www.nicovideo.jp/watch/sm18238488',
138 'md5': 'd265680a1f92bdcbbd2a507fc9e78a9e',
139 'info_dict': {
140 'id': 'sm18238488',
141 'ext': 'mp4',
142 'title': '【実写版】ミュータントタートルズ',
143 'description': 'md5:15df8988e47a86f9e978af2064bf6d8e',
144 'timestamp': 1341160408,
145 'upload_date': '20120701',
146 'uploader': None,
147 'uploader_id': None,
148 'thumbnail': r're:https?://.*',
149 'duration': 5271,
150 'view_count': int,
151 'comment_count': int,
152 },
153 'skip': 'Requires an account',
154 }, {
155 'url': 'http://sp.nicovideo.jp/watch/sm28964488?ss_pos=1&cp_in=wt_tg',
156 'only_matching': True,
157 }]
158
159 _VALID_URL = r'https?://(?:www\.|secure\.|sp\.)?nicovideo\.jp/watch/(?P<id>(?:[a-z]{2})?[0-9]+)'
160 _NETRC_MACHINE = 'niconico'
161
162 def _real_initialize(self):
163 self._login()
164
165 def _login(self):
166 (username, password) = self._get_login_info()
167 # No authentication to be performed
168 if not username:
169 return True
170
171 # Log in
172 login_ok = True
173 login_form_strs = {
174 'mail_tel': username,
175 'password': password,
176 }
177 urlh = self._request_webpage(
178 'https://account.nicovideo.jp/api/v1/login', None,
179 note='Logging in', errnote='Unable to log in',
180 data=urlencode_postdata(login_form_strs))
181 if urlh is False:
182 login_ok = False
183 else:
184 parts = compat_urlparse.urlparse(urlh.geturl())
185 if compat_parse_qs(parts.query).get('message', [None])[0] == 'cant_login':
186 login_ok = False
187 if not login_ok:
188 self._downloader.report_warning('unable to log in: bad username or password')
189 return login_ok
190
191 def _extract_format_for_quality(self, api_data, video_id, audio_quality, video_quality):
192 def yesno(boolean):
193 return 'yes' if boolean else 'no'
194
195 session_api_data = api_data['video']['dmcInfo']['session_api']
196 session_api_endpoint = session_api_data['urls'][0]
197
198 format_id = '-'.join(map(lambda s: remove_start(s['id'], 'archive_'), [video_quality, audio_quality]))
199
200 session_response = self._download_json(
201 session_api_endpoint['url'], video_id,
202 query={'_format': 'json'},
203 headers={'Content-Type': 'application/json'},
204 note='Downloading JSON metadata for %s' % format_id,
205 data=json.dumps({
206 'session': {
207 'client_info': {
208 'player_id': session_api_data['player_id'],
209 },
210 'content_auth': {
211 'auth_type': session_api_data['auth_types'][session_api_data['protocols'][0]],
212 'content_key_timeout': session_api_data['content_key_timeout'],
213 'service_id': 'nicovideo',
214 'service_user_id': session_api_data['service_user_id']
215 },
216 'content_id': session_api_data['content_id'],
217 'content_src_id_sets': [{
218 'content_src_ids': [{
219 'src_id_to_mux': {
220 'audio_src_ids': [audio_quality['id']],
221 'video_src_ids': [video_quality['id']],
222 }
223 }]
224 }],
225 'content_type': 'movie',
226 'content_uri': '',
227 'keep_method': {
228 'heartbeat': {
229 'lifetime': session_api_data['heartbeat_lifetime']
230 }
231 },
232 'priority': session_api_data['priority'],
233 'protocol': {
234 'name': 'http',
235 'parameters': {
236 'http_parameters': {
237 'parameters': {
238 'http_output_download_parameters': {
239 'use_ssl': yesno(session_api_endpoint['is_ssl']),
240 'use_well_known_port': yesno(session_api_endpoint['is_well_known_port']),
241 }
242 }
243 }
244 }
245 },
246 'recipe_id': session_api_data['recipe_id'],
247 'session_operation_auth': {
248 'session_operation_auth_by_signature': {
249 'signature': session_api_data['signature'],
250 'token': session_api_data['token'],
251 }
252 },
253 'timing_constraint': 'unlimited'
254 }
255 }))
256
257 resolution = video_quality.get('resolution', {})
258
259 return {
260 'url': session_response['data']['session']['content_uri'],
261 'format_id': format_id,
262 'ext': 'mp4', # Session API are used in HTML5, which always serves mp4
263 'abr': float_or_none(audio_quality.get('bitrate'), 1000),
264 'vbr': float_or_none(video_quality.get('bitrate'), 1000),
265 'height': resolution.get('height'),
266 'width': resolution.get('width'),
267 }
268
269 def _real_extract(self, url):
270 video_id = self._match_id(url)
271
272 # Get video webpage. We are not actually interested in it for normal
273 # cases, but need the cookies in order to be able to download the
274 # info webpage
275 webpage, handle = self._download_webpage_handle(
276 'http://www.nicovideo.jp/watch/' + video_id, video_id)
277 if video_id.startswith('so'):
278 video_id = self._match_id(handle.geturl())
279
280 api_data = self._parse_json(self._html_search_regex(
281 'data-api-data="([^"]+)"', webpage,
282 'API data', default='{}'), video_id)
283
284 def _format_id_from_url(video_url):
285 return 'economy' if video_real_url.endswith('low') else 'normal'
286
287 try:
288 video_real_url = api_data['video']['smileInfo']['url']
289 except KeyError: # Flash videos
290 # Get flv info
291 flv_info_webpage = self._download_webpage(
292 'http://flapi.nicovideo.jp/api/getflv/' + video_id + '?as3=1',
293 video_id, 'Downloading flv info')
294
295 flv_info = compat_urlparse.parse_qs(flv_info_webpage)
296 if 'url' not in flv_info:
297 if 'deleted' in flv_info:
298 raise ExtractorError('The video has been deleted.',
299 expected=True)
300 elif 'closed' in flv_info:
301 raise ExtractorError('Niconico videos now require logging in',
302 expected=True)
303 elif 'error' in flv_info:
304 raise ExtractorError('%s reports error: %s' % (
305 self.IE_NAME, flv_info['error'][0]), expected=True)
306 else:
307 raise ExtractorError('Unable to find video URL')
308
309 video_info_xml = self._download_xml(
310 'http://ext.nicovideo.jp/api/getthumbinfo/' + video_id,
311 video_id, note='Downloading video info page')
312
313 def get_video_info(items):
314 if not isinstance(items, list):
315 items = [items]
316 for item in items:
317 ret = xpath_text(video_info_xml, './/' + item)
318 if ret:
319 return ret
320
321 video_real_url = flv_info['url'][0]
322
323 extension = get_video_info('movie_type')
324 if not extension:
325 extension = determine_ext(video_real_url)
326
327 formats = [{
328 'url': video_real_url,
329 'ext': extension,
330 'format_id': _format_id_from_url(video_real_url),
331 }]
332 else:
333 formats = []
334
335 dmc_info = api_data['video'].get('dmcInfo')
336 if dmc_info: # "New" HTML5 videos
337 quality_info = dmc_info['quality']
338 for audio_quality in quality_info['audios']:
339 for video_quality in quality_info['videos']:
340 if not audio_quality['available'] or not video_quality['available']:
341 continue
342 formats.append(self._extract_format_for_quality(
343 api_data, video_id, audio_quality, video_quality))
344
345 self._sort_formats(formats)
346 else: # "Old" HTML5 videos
347 formats = [{
348 'url': video_real_url,
349 'ext': 'mp4',
350 'format_id': _format_id_from_url(video_real_url),
351 }]
352
353 def get_video_info(items):
354 return dict_get(api_data['video'], items)
355
356 # Start extracting information
357 title = get_video_info('title')
358 if not title:
359 title = self._og_search_title(webpage, default=None)
360 if not title:
361 title = self._html_search_regex(
362 r'<span[^>]+class="videoHeaderTitle"[^>]*>([^<]+)</span>',
363 webpage, 'video title')
364
365 watch_api_data_string = self._html_search_regex(
366 r'<div[^>]+id="watchAPIDataContainer"[^>]+>([^<]+)</div>',
367 webpage, 'watch api data', default=None)
368 watch_api_data = self._parse_json(watch_api_data_string, video_id) if watch_api_data_string else {}
369 video_detail = watch_api_data.get('videoDetail', {})
370
371 thumbnail = (
372 get_video_info(['thumbnail_url', 'thumbnailURL']) or
373 self._html_search_meta('image', webpage, 'thumbnail', default=None) or
374 video_detail.get('thumbnail'))
375
376 description = get_video_info('description')
377
378 timestamp = (parse_iso8601(get_video_info('first_retrieve')) or
379 unified_timestamp(get_video_info('postedDateTime')))
380 if not timestamp:
381 match = self._html_search_meta('datePublished', webpage, 'date published', default=None)
382 if match:
383 timestamp = parse_iso8601(match.replace('+', ':00+'))
384 if not timestamp and video_detail.get('postedAt'):
385 timestamp = parse_iso8601(
386 video_detail['postedAt'].replace('/', '-'),
387 delimiter=' ', timezone=datetime.timedelta(hours=9))
388
389 view_count = int_or_none(get_video_info(['view_counter', 'viewCount']))
390 if not view_count:
391 match = self._html_search_regex(
392 r'>Views: <strong[^>]*>([^<]+)</strong>',
393 webpage, 'view count', default=None)
394 if match:
395 view_count = int_or_none(match.replace(',', ''))
396 view_count = view_count or video_detail.get('viewCount')
397
398 comment_count = (int_or_none(get_video_info('comment_num')) or
399 video_detail.get('commentCount') or
400 try_get(api_data, lambda x: x['thread']['commentCount']))
401 if not comment_count:
402 match = self._html_search_regex(
403 r'>Comments: <strong[^>]*>([^<]+)</strong>',
404 webpage, 'comment count', default=None)
405 if match:
406 comment_count = int_or_none(match.replace(',', ''))
407
408 duration = (parse_duration(
409 get_video_info('length') or
410 self._html_search_meta(
411 'video:duration', webpage, 'video duration', default=None)) or
412 video_detail.get('length') or
413 get_video_info('duration'))
414
415 webpage_url = get_video_info('watch_url') or url
416
417 # Note: cannot use api_data.get('owner', {}) because owner may be set to "null"
418 # in the JSON, which will cause None to be returned instead of {}.
419 owner = try_get(api_data, lambda x: x.get('owner'), dict) or {}
420 uploader_id = get_video_info(['ch_id', 'user_id']) or owner.get('id')
421 uploader = get_video_info(['ch_name', 'user_nickname']) or owner.get('nickname')
422
423 return {
424 'id': video_id,
425 'title': title,
426 'formats': formats,
427 'thumbnail': thumbnail,
428 'description': description,
429 'uploader': uploader,
430 'timestamp': timestamp,
431 'uploader_id': uploader_id,
432 'view_count': view_count,
433 'comment_count': comment_count,
434 'duration': duration,
435 'webpage_url': webpage_url,
436 }
437
438
439 class NiconicoPlaylistIE(InfoExtractor):
440 _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/mylist/(?P<id>\d+)'
441
442 _TEST = {
443 'url': 'http://www.nicovideo.jp/mylist/27411728',
444 'info_dict': {
445 'id': '27411728',
446 'title': 'AKB48のオールナイトニッポン',
447 },
448 'playlist_mincount': 225,
449 }
450
451 def _real_extract(self, url):
452 list_id = self._match_id(url)
453 webpage = self._download_webpage(url, list_id)
454
455 entries_json = self._search_regex(r'Mylist\.preload\(\d+, (\[.*\])\);',
456 webpage, 'entries')
457 entries = json.loads(entries_json)
458 entries = [{
459 '_type': 'url',
460 'ie_key': NiconicoIE.ie_key(),
461 'url': ('http://www.nicovideo.jp/watch/%s' %
462 entry['item_data']['video_id']),
463 } for entry in entries]
464
465 return {
466 '_type': 'playlist',
467 'title': self._search_regex(r'\s+name: "(.*?)"', webpage, 'title'),
468 'id': list_id,
469 'entries': entries,
470 }