]> Raphaël G. Git Repositories - youtubedl/blob - youtube_dl/extractor/niconico.py
New upstream version 2017.09.24
[youtubedl] / youtube_dl / extractor / niconico.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import json
5 import datetime
6
7 from .common import InfoExtractor
8 from ..compat import (
9 compat_parse_qs,
10 compat_urlparse,
11 )
12 from ..utils import (
13 determine_ext,
14 dict_get,
15 ExtractorError,
16 int_or_none,
17 float_or_none,
18 parse_duration,
19 parse_iso8601,
20 remove_start,
21 try_get,
22 unified_timestamp,
23 urlencode_postdata,
24 xpath_text,
25 )
26
27
28 class NiconicoIE(InfoExtractor):
29 IE_NAME = 'niconico'
30 IE_DESC = 'ニコニコ動画'
31
32 _TESTS = [{
33 'url': 'http://www.nicovideo.jp/watch/sm22312215',
34 'md5': 'd1a75c0823e2f629128c43e1212760f9',
35 'info_dict': {
36 'id': 'sm22312215',
37 'ext': 'mp4',
38 'title': 'Big Buck Bunny',
39 'thumbnail': r're:https?://.*',
40 'uploader': 'takuya0301',
41 'uploader_id': '2698420',
42 'upload_date': '20131123',
43 'timestamp': 1385182762,
44 'description': '(c) copyright 2008, Blender Foundation / www.bigbuckbunny.org',
45 'duration': 33,
46 'view_count': int,
47 'comment_count': int,
48 },
49 'skip': 'Requires an account',
50 }, {
51 # File downloaded with and without credentials are different, so omit
52 # the md5 field
53 'url': 'http://www.nicovideo.jp/watch/nm14296458',
54 'info_dict': {
55 'id': 'nm14296458',
56 'ext': 'swf',
57 'title': '【鏡音リン】Dance on media【オリジナル】take2!',
58 'description': 'md5:689f066d74610b3b22e0f1739add0f58',
59 'thumbnail': r're:https?://.*',
60 'uploader': 'りょうた',
61 'uploader_id': '18822557',
62 'upload_date': '20110429',
63 'timestamp': 1304065916,
64 'duration': 209,
65 },
66 'skip': 'Requires an account',
67 }, {
68 # 'video exists but is marked as "deleted"
69 # md5 is unstable
70 'url': 'http://www.nicovideo.jp/watch/sm10000',
71 'info_dict': {
72 'id': 'sm10000',
73 'ext': 'unknown_video',
74 'description': 'deleted',
75 'title': 'ドラえもんエターナル第3話「決戦第3新東京市」<前編>',
76 'thumbnail': r're:https?://.*',
77 'upload_date': '20071224',
78 'timestamp': int, # timestamp field has different value if logged in
79 'duration': 304,
80 'view_count': int,
81 },
82 'skip': 'Requires an account',
83 }, {
84 'url': 'http://www.nicovideo.jp/watch/so22543406',
85 'info_dict': {
86 'id': '1388129933',
87 'ext': 'mp4',
88 'title': '【第1回】RADIOアニメロミックス ラブライブ!~のぞえりRadio Garden~',
89 'description': 'md5:b27d224bb0ff53d3c8269e9f8b561cf1',
90 'thumbnail': r're:https?://.*',
91 'timestamp': 1388851200,
92 'upload_date': '20140104',
93 'uploader': 'アニメロチャンネル',
94 'uploader_id': '312',
95 },
96 'skip': 'The viewing period of the video you were searching for has expired.',
97 }, {
98 # video not available via `getflv`; "old" HTML5 video
99 'url': 'http://www.nicovideo.jp/watch/sm1151009',
100 'md5': '8fa81c364eb619d4085354eab075598a',
101 'info_dict': {
102 'id': 'sm1151009',
103 'ext': 'mp4',
104 'title': 'マスターシステム本体内蔵のスペハリのメインテーマ(PSG版)',
105 'description': 'md5:6ee077e0581ff5019773e2e714cdd0b7',
106 'thumbnail': r're:https?://.*',
107 'duration': 184,
108 'timestamp': 1190868283,
109 'upload_date': '20070927',
110 'uploader': 'denden2',
111 'uploader_id': '1392194',
112 'view_count': int,
113 'comment_count': int,
114 },
115 'skip': 'Requires an account',
116 }, {
117 # "New" HTML5 video
118 'url': 'http://www.nicovideo.jp/watch/sm31464864',
119 'md5': '351647b4917660986dc0fa8864085135',
120 'info_dict': {
121 'id': 'sm31464864',
122 'ext': 'mp4',
123 'title': '新作TVアニメ「戦姫絶唱シンフォギアAXZ」PV 最高画質',
124 'description': 'md5:e52974af9a96e739196b2c1ca72b5feb',
125 'timestamp': 1498514060,
126 'upload_date': '20170626',
127 'uploader': 'ゲス',
128 'uploader_id': '40826363',
129 'thumbnail': r're:https?://.*',
130 'duration': 198,
131 'view_count': int,
132 'comment_count': int,
133 },
134 'skip': 'Requires an account',
135 }, {
136 'url': 'http://sp.nicovideo.jp/watch/sm28964488?ss_pos=1&cp_in=wt_tg',
137 'only_matching': True,
138 }]
139
140 _VALID_URL = r'https?://(?:www\.|secure\.|sp\.)?nicovideo\.jp/watch/(?P<id>(?:[a-z]{2})?[0-9]+)'
141 _NETRC_MACHINE = 'niconico'
142
143 def _real_initialize(self):
144 self._login()
145
146 def _login(self):
147 (username, password) = self._get_login_info()
148 # No authentication to be performed
149 if not username:
150 return True
151
152 # Log in
153 login_ok = True
154 login_form_strs = {
155 'mail_tel': username,
156 'password': password,
157 }
158 urlh = self._request_webpage(
159 'https://account.nicovideo.jp/api/v1/login', None,
160 note='Logging in', errnote='Unable to log in',
161 data=urlencode_postdata(login_form_strs))
162 if urlh is False:
163 login_ok = False
164 else:
165 parts = compat_urlparse.urlparse(urlh.geturl())
166 if compat_parse_qs(parts.query).get('message', [None])[0] == 'cant_login':
167 login_ok = False
168 if not login_ok:
169 self._downloader.report_warning('unable to log in: bad username or password')
170 return login_ok
171
172 def _extract_format_for_quality(self, api_data, video_id, audio_quality, video_quality):
173 def yesno(boolean):
174 return 'yes' if boolean else 'no'
175
176 session_api_data = api_data['video']['dmcInfo']['session_api']
177 session_api_endpoint = session_api_data['urls'][0]
178
179 format_id = '-'.join(map(lambda s: remove_start(s['id'], 'archive_'), [video_quality, audio_quality]))
180
181 session_response = self._download_json(
182 session_api_endpoint['url'], video_id,
183 query={'_format': 'json'},
184 headers={'Content-Type': 'application/json'},
185 note='Downloading JSON metadata for %s' % format_id,
186 data=json.dumps({
187 'session': {
188 'client_info': {
189 'player_id': session_api_data['player_id'],
190 },
191 'content_auth': {
192 'auth_type': session_api_data['auth_types'][session_api_data['protocols'][0]],
193 'content_key_timeout': session_api_data['content_key_timeout'],
194 'service_id': 'nicovideo',
195 'service_user_id': session_api_data['service_user_id']
196 },
197 'content_id': session_api_data['content_id'],
198 'content_src_id_sets': [{
199 'content_src_ids': [{
200 'src_id_to_mux': {
201 'audio_src_ids': [audio_quality['id']],
202 'video_src_ids': [video_quality['id']],
203 }
204 }]
205 }],
206 'content_type': 'movie',
207 'content_uri': '',
208 'keep_method': {
209 'heartbeat': {
210 'lifetime': session_api_data['heartbeat_lifetime']
211 }
212 },
213 'priority': session_api_data['priority'],
214 'protocol': {
215 'name': 'http',
216 'parameters': {
217 'http_parameters': {
218 'parameters': {
219 'http_output_download_parameters': {
220 'use_ssl': yesno(session_api_endpoint['is_ssl']),
221 'use_well_known_port': yesno(session_api_endpoint['is_well_known_port']),
222 }
223 }
224 }
225 }
226 },
227 'recipe_id': session_api_data['recipe_id'],
228 'session_operation_auth': {
229 'session_operation_auth_by_signature': {
230 'signature': session_api_data['signature'],
231 'token': session_api_data['token'],
232 }
233 },
234 'timing_constraint': 'unlimited'
235 }
236 }))
237
238 resolution = video_quality.get('resolution', {})
239
240 return {
241 'url': session_response['data']['session']['content_uri'],
242 'format_id': format_id,
243 'ext': 'mp4', # Session API are used in HTML5, which always serves mp4
244 'abr': float_or_none(audio_quality.get('bitrate'), 1000),
245 'vbr': float_or_none(video_quality.get('bitrate'), 1000),
246 'height': resolution.get('height'),
247 'width': resolution.get('width'),
248 }
249
250 def _real_extract(self, url):
251 video_id = self._match_id(url)
252
253 # Get video webpage. We are not actually interested in it for normal
254 # cases, but need the cookies in order to be able to download the
255 # info webpage
256 webpage, handle = self._download_webpage_handle(
257 'http://www.nicovideo.jp/watch/' + video_id, video_id)
258 if video_id.startswith('so'):
259 video_id = self._match_id(handle.geturl())
260
261 api_data = self._parse_json(self._html_search_regex(
262 'data-api-data="([^"]+)"', webpage,
263 'API data', default='{}'), video_id)
264
265 def _format_id_from_url(video_url):
266 return 'economy' if video_real_url.endswith('low') else 'normal'
267
268 try:
269 video_real_url = api_data['video']['smileInfo']['url']
270 except KeyError: # Flash videos
271 # Get flv info
272 flv_info_webpage = self._download_webpage(
273 'http://flapi.nicovideo.jp/api/getflv/' + video_id + '?as3=1',
274 video_id, 'Downloading flv info')
275
276 flv_info = compat_urlparse.parse_qs(flv_info_webpage)
277 if 'url' not in flv_info:
278 if 'deleted' in flv_info:
279 raise ExtractorError('The video has been deleted.',
280 expected=True)
281 elif 'closed' in flv_info:
282 raise ExtractorError('Niconico videos now require logging in',
283 expected=True)
284 elif 'error' in flv_info:
285 raise ExtractorError('%s reports error: %s' % (
286 self.IE_NAME, flv_info['error'][0]), expected=True)
287 else:
288 raise ExtractorError('Unable to find video URL')
289
290 video_info_xml = self._download_xml(
291 'http://ext.nicovideo.jp/api/getthumbinfo/' + video_id,
292 video_id, note='Downloading video info page')
293
294 def get_video_info(items):
295 if not isinstance(items, list):
296 items = [items]
297 for item in items:
298 ret = xpath_text(video_info_xml, './/' + item)
299 if ret:
300 return ret
301
302 video_real_url = flv_info['url'][0]
303
304 extension = get_video_info('movie_type')
305 if not extension:
306 extension = determine_ext(video_real_url)
307
308 formats = [{
309 'url': video_real_url,
310 'ext': extension,
311 'format_id': _format_id_from_url(video_real_url),
312 }]
313 else:
314 formats = []
315
316 dmc_info = api_data['video'].get('dmcInfo')
317 if dmc_info: # "New" HTML5 videos
318 quality_info = dmc_info['quality']
319 for audio_quality in quality_info['audios']:
320 for video_quality in quality_info['videos']:
321 if not audio_quality['available'] or not video_quality['available']:
322 continue
323 formats.append(self._extract_format_for_quality(
324 api_data, video_id, audio_quality, video_quality))
325
326 self._sort_formats(formats)
327 else: # "Old" HTML5 videos
328 formats = [{
329 'url': video_real_url,
330 'ext': 'mp4',
331 'format_id': _format_id_from_url(video_real_url),
332 }]
333
334 def get_video_info(items):
335 return dict_get(api_data['video'], items)
336
337 # Start extracting information
338 title = get_video_info('title')
339 if not title:
340 title = self._og_search_title(webpage, default=None)
341 if not title:
342 title = self._html_search_regex(
343 r'<span[^>]+class="videoHeaderTitle"[^>]*>([^<]+)</span>',
344 webpage, 'video title')
345
346 watch_api_data_string = self._html_search_regex(
347 r'<div[^>]+id="watchAPIDataContainer"[^>]+>([^<]+)</div>',
348 webpage, 'watch api data', default=None)
349 watch_api_data = self._parse_json(watch_api_data_string, video_id) if watch_api_data_string else {}
350 video_detail = watch_api_data.get('videoDetail', {})
351
352 thumbnail = (
353 get_video_info(['thumbnail_url', 'thumbnailURL']) or
354 self._html_search_meta('image', webpage, 'thumbnail', default=None) or
355 video_detail.get('thumbnail'))
356
357 description = get_video_info('description')
358
359 timestamp = (parse_iso8601(get_video_info('first_retrieve')) or
360 unified_timestamp(get_video_info('postedDateTime')))
361 if not timestamp:
362 match = self._html_search_meta('datePublished', webpage, 'date published', default=None)
363 if match:
364 timestamp = parse_iso8601(match.replace('+', ':00+'))
365 if not timestamp and video_detail.get('postedAt'):
366 timestamp = parse_iso8601(
367 video_detail['postedAt'].replace('/', '-'),
368 delimiter=' ', timezone=datetime.timedelta(hours=9))
369
370 view_count = int_or_none(get_video_info(['view_counter', 'viewCount']))
371 if not view_count:
372 match = self._html_search_regex(
373 r'>Views: <strong[^>]*>([^<]+)</strong>',
374 webpage, 'view count', default=None)
375 if match:
376 view_count = int_or_none(match.replace(',', ''))
377 view_count = view_count or video_detail.get('viewCount')
378
379 comment_count = (int_or_none(get_video_info('comment_num')) or
380 video_detail.get('commentCount') or
381 try_get(api_data, lambda x: x['thread']['commentCount']))
382 if not comment_count:
383 match = self._html_search_regex(
384 r'>Comments: <strong[^>]*>([^<]+)</strong>',
385 webpage, 'comment count', default=None)
386 if match:
387 comment_count = int_or_none(match.replace(',', ''))
388
389 duration = (parse_duration(
390 get_video_info('length') or
391 self._html_search_meta(
392 'video:duration', webpage, 'video duration', default=None)) or
393 video_detail.get('length') or
394 get_video_info('duration'))
395
396 webpage_url = get_video_info('watch_url') or url
397
398 owner = api_data.get('owner', {})
399 uploader_id = get_video_info(['ch_id', 'user_id']) or owner.get('id')
400 uploader = get_video_info(['ch_name', 'user_nickname']) or owner.get('nickname')
401
402 return {
403 'id': video_id,
404 'title': title,
405 'formats': formats,
406 'thumbnail': thumbnail,
407 'description': description,
408 'uploader': uploader,
409 'timestamp': timestamp,
410 'uploader_id': uploader_id,
411 'view_count': view_count,
412 'comment_count': comment_count,
413 'duration': duration,
414 'webpage_url': webpage_url,
415 }
416
417
418 class NiconicoPlaylistIE(InfoExtractor):
419 _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/mylist/(?P<id>\d+)'
420
421 _TEST = {
422 'url': 'http://www.nicovideo.jp/mylist/27411728',
423 'info_dict': {
424 'id': '27411728',
425 'title': 'AKB48のオールナイトニッポン',
426 },
427 'playlist_mincount': 225,
428 }
429
430 def _real_extract(self, url):
431 list_id = self._match_id(url)
432 webpage = self._download_webpage(url, list_id)
433
434 entries_json = self._search_regex(r'Mylist\.preload\(\d+, (\[.*\])\);',
435 webpage, 'entries')
436 entries = json.loads(entries_json)
437 entries = [{
438 '_type': 'url',
439 'ie_key': NiconicoIE.ie_key(),
440 'url': ('http://www.nicovideo.jp/watch/%s' %
441 entry['item_data']['video_id']),
442 } for entry in entries]
443
444 return {
445 '_type': 'playlist',
446 'title': self._search_regex(r'\s+name: "(.*?)"', webpage, 'title'),
447 'id': list_id,
448 'entries': entries,
449 }