]>
Raphaël G. Git Repositories - youtubedl/blob - youtube_dl/extractor/niconico.py
df7f528be2d4c8da7cfe826609608754f2f8e088
2 from __future__
import unicode_literals
7 from .common
import InfoExtractor
28 class NiconicoIE(InfoExtractor
):
33 'url': 'http://www.nicovideo.jp/watch/sm22312215',
34 'md5': 'd1a75c0823e2f629128c43e1212760f9',
38 'title': 'Big Buck Bunny',
39 'thumbnail': r
're:https?://.*',
40 'uploader': 'takuya0301',
41 'uploader_id': '2698420',
42 'upload_date': '20131123',
43 'timestamp': int, # timestamp is unstable
44 'description': '(c) copyright 2008, Blender Foundation / www.bigbuckbunny.org',
49 'skip': 'Requires an account',
51 # File downloaded with and without credentials are different, so omit
53 'url': 'http://www.nicovideo.jp/watch/nm14296458',
57 'title': '【鏡音リン】Dance on media【オリジナル】take2!',
58 'description': 'md5:689f066d74610b3b22e0f1739add0f58',
59 'thumbnail': r
're:https?://.*',
61 'uploader_id': '18822557',
62 'upload_date': '20110429',
63 'timestamp': 1304065916,
66 'skip': 'Requires an account',
68 # 'video exists but is marked as "deleted"
70 'url': 'http://www.nicovideo.jp/watch/sm10000',
73 'ext': 'unknown_video',
74 'description': 'deleted',
75 'title': 'ドラえもんエターナル第3話「決戦第3新東京市」<前編>',
76 'thumbnail': r
're:https?://.*',
77 'upload_date': '20071224',
78 'timestamp': int, # timestamp field has different value if logged in
82 'skip': 'Requires an account',
84 'url': 'http://www.nicovideo.jp/watch/so22543406',
88 'title': '【第1回】RADIOアニメロミックス ラブライブ!~のぞえりRadio Garden~',
89 'description': 'md5:b27d224bb0ff53d3c8269e9f8b561cf1',
90 'thumbnail': r
're:https?://.*',
91 'timestamp': 1388851200,
92 'upload_date': '20140104',
93 'uploader': 'アニメロチャンネル',
96 'skip': 'The viewing period of the video you were searching for has expired.',
98 # video not available via `getflv`; "old" HTML5 video
99 'url': 'http://www.nicovideo.jp/watch/sm1151009',
100 'md5': '8fa81c364eb619d4085354eab075598a',
104 'title': 'マスターシステム本体内蔵のスペハリのメインテーマ(PSG版)',
105 'description': 'md5:6ee077e0581ff5019773e2e714cdd0b7',
106 'thumbnail': r
're:https?://.*',
108 'timestamp': 1190868283,
109 'upload_date': '20070927',
110 'uploader': 'denden2',
111 'uploader_id': '1392194',
113 'comment_count': int,
115 'skip': 'Requires an account',
119 'url': 'http://www.nicovideo.jp/watch/sm31464864',
123 'title': '新作TVアニメ「戦姫絶唱シンフォギアAXZ」PV 最高画質',
124 'description': 'md5:e52974af9a96e739196b2c1ca72b5feb',
125 'timestamp': 1498514060,
126 'upload_date': '20170626',
128 'uploader_id': '40826363',
129 'thumbnail': r
're:https?://.*',
132 'comment_count': int,
134 'skip': 'Requires an account',
136 # Video without owner
137 'url': 'http://www.nicovideo.jp/watch/sm18238488',
138 'md5': 'd265680a1f92bdcbbd2a507fc9e78a9e',
142 'title': '【実写版】ミュータントタートルズ',
143 'description': 'md5:15df8988e47a86f9e978af2064bf6d8e',
144 'timestamp': 1341160408,
145 'upload_date': '20120701',
148 'thumbnail': r
're:https?://.*',
151 'comment_count': int,
153 'skip': 'Requires an account',
155 'url': 'http://sp.nicovideo.jp/watch/sm28964488?ss_pos=1&cp_in=wt_tg',
156 'only_matching': True,
159 _VALID_URL
= r
'https?://(?:www\.|secure\.|sp\.)?nicovideo\.jp/watch/(?P<id>(?:[a-z]{2})?[0-9]+)'
160 _NETRC_MACHINE
= 'niconico'
162 def _real_initialize(self
):
166 (username
, password
) = self
._get
_login
_info
()
167 # No authentication to be performed
174 'mail_tel': username
,
175 'password': password
,
177 urlh
= self
._request
_webpage
(
178 'https://account.nicovideo.jp/api/v1/login', None,
179 note
='Logging in', errnote
='Unable to log in',
180 data
=urlencode_postdata(login_form_strs
))
184 parts
= compat_urlparse
.urlparse(urlh
.geturl())
185 if compat_parse_qs(parts
.query
).get('message', [None])[0] == 'cant_login':
188 self
._downloader
.report_warning('unable to log in: bad username or password')
191 def _extract_format_for_quality(self
, api_data
, video_id
, audio_quality
, video_quality
):
193 return 'yes' if boolean
else 'no'
195 session_api_data
= api_data
['video']['dmcInfo']['session_api']
196 session_api_endpoint
= session_api_data
['urls'][0]
198 format_id
= '-'.join(map(lambda s
: remove_start(s
['id'], 'archive_'), [video_quality
, audio_quality
]))
200 session_response
= self
._download
_json
(
201 session_api_endpoint
['url'], video_id
,
202 query
={'_format': 'json'},
203 headers
={'Content-Type': 'application/json'},
204 note
='Downloading JSON metadata for %s' % format_id
,
208 'player_id': session_api_data
['player_id'],
211 'auth_type': session_api_data
['auth_types'][session_api_data
['protocols'][0]],
212 'content_key_timeout': session_api_data
['content_key_timeout'],
213 'service_id': 'nicovideo',
214 'service_user_id': session_api_data
['service_user_id']
216 'content_id': session_api_data
['content_id'],
217 'content_src_id_sets': [{
218 'content_src_ids': [{
220 'audio_src_ids': [audio_quality
['id']],
221 'video_src_ids': [video_quality
['id']],
225 'content_type': 'movie',
229 'lifetime': session_api_data
['heartbeat_lifetime']
232 'priority': session_api_data
['priority'],
238 'http_output_download_parameters': {
239 'use_ssl': yesno(session_api_endpoint
['is_ssl']),
240 'use_well_known_port': yesno(session_api_endpoint
['is_well_known_port']),
246 'recipe_id': session_api_data
['recipe_id'],
247 'session_operation_auth': {
248 'session_operation_auth_by_signature': {
249 'signature': session_api_data
['signature'],
250 'token': session_api_data
['token'],
253 'timing_constraint': 'unlimited'
257 resolution
= video_quality
.get('resolution', {})
260 'url': session_response
['data']['session']['content_uri'],
261 'format_id': format_id
,
262 'ext': 'mp4', # Session API are used in HTML5, which always serves mp4
263 'abr': float_or_none(audio_quality
.get('bitrate'), 1000),
264 'vbr': float_or_none(video_quality
.get('bitrate'), 1000),
265 'height': resolution
.get('height'),
266 'width': resolution
.get('width'),
269 def _real_extract(self
, url
):
270 video_id
= self
._match
_id
(url
)
272 # Get video webpage. We are not actually interested in it for normal
273 # cases, but need the cookies in order to be able to download the
275 webpage
, handle
= self
._download
_webpage
_handle
(
276 'http://www.nicovideo.jp/watch/' + video_id
, video_id
)
277 if video_id
.startswith('so'):
278 video_id
= self
._match
_id
(handle
.geturl())
280 api_data
= self
._parse
_json
(self
._html
_search
_regex
(
281 'data-api-data="([^"]+)"', webpage
,
282 'API data', default
='{}'), video_id
)
284 def _format_id_from_url(video_url
):
285 return 'economy' if video_real_url
.endswith('low') else 'normal'
288 video_real_url
= api_data
['video']['smileInfo']['url']
289 except KeyError: # Flash videos
291 flv_info_webpage
= self
._download
_webpage
(
292 'http://flapi.nicovideo.jp/api/getflv/' + video_id
+ '?as3=1',
293 video_id
, 'Downloading flv info')
295 flv_info
= compat_urlparse
.parse_qs(flv_info_webpage
)
296 if 'url' not in flv_info
:
297 if 'deleted' in flv_info
:
298 raise ExtractorError('The video has been deleted.',
300 elif 'closed' in flv_info
:
301 raise ExtractorError('Niconico videos now require logging in',
303 elif 'error' in flv_info
:
304 raise ExtractorError('%s reports error: %s' % (
305 self
.IE_NAME
, flv_info
['error'][0]), expected
=True)
307 raise ExtractorError('Unable to find video URL')
309 video_info_xml
= self
._download
_xml
(
310 'http://ext.nicovideo.jp/api/getthumbinfo/' + video_id
,
311 video_id
, note
='Downloading video info page')
313 def get_video_info(items
):
314 if not isinstance(items
, list):
317 ret
= xpath_text(video_info_xml
, './/' + item
)
321 video_real_url
= flv_info
['url'][0]
323 extension
= get_video_info('movie_type')
325 extension
= determine_ext(video_real_url
)
328 'url': video_real_url
,
330 'format_id': _format_id_from_url(video_real_url
),
335 dmc_info
= api_data
['video'].get('dmcInfo')
336 if dmc_info
: # "New" HTML5 videos
337 quality_info
= dmc_info
['quality']
338 for audio_quality
in quality_info
['audios']:
339 for video_quality
in quality_info
['videos']:
340 if not audio_quality
['available'] or not video_quality
['available']:
342 formats
.append(self
._extract
_format
_for
_quality
(
343 api_data
, video_id
, audio_quality
, video_quality
))
345 self
._sort
_formats
(formats
)
346 else: # "Old" HTML5 videos
348 'url': video_real_url
,
350 'format_id': _format_id_from_url(video_real_url
),
353 def get_video_info(items
):
354 return dict_get(api_data
['video'], items
)
356 # Start extracting information
357 title
= get_video_info('title')
359 title
= self
._og
_search
_title
(webpage
, default
=None)
361 title
= self
._html
_search
_regex
(
362 r
'<span[^>]+class="videoHeaderTitle"[^>]*>([^<]+)</span>',
363 webpage
, 'video title')
365 watch_api_data_string
= self
._html
_search
_regex
(
366 r
'<div[^>]+id="watchAPIDataContainer"[^>]+>([^<]+)</div>',
367 webpage
, 'watch api data', default
=None)
368 watch_api_data
= self
._parse
_json
(watch_api_data_string
, video_id
) if watch_api_data_string
else {}
369 video_detail
= watch_api_data
.get('videoDetail', {})
372 get_video_info(['thumbnail_url', 'thumbnailURL']) or
373 self
._html
_search
_meta
('image', webpage
, 'thumbnail', default
=None) or
374 video_detail
.get('thumbnail'))
376 description
= get_video_info('description')
378 timestamp
= (parse_iso8601(get_video_info('first_retrieve')) or
379 unified_timestamp(get_video_info('postedDateTime')))
381 match
= self
._html
_search
_meta
('datePublished', webpage
, 'date published', default
=None)
383 timestamp
= parse_iso8601(match
.replace('+', ':00+'))
384 if not timestamp
and video_detail
.get('postedAt'):
385 timestamp
= parse_iso8601(
386 video_detail
['postedAt'].replace('/', '-'),
387 delimiter
=' ', timezone
=datetime
.timedelta(hours
=9))
389 view_count
= int_or_none(get_video_info(['view_counter', 'viewCount']))
391 match
= self
._html
_search
_regex
(
392 r
'>Views: <strong[^>]*>([^<]+)</strong>',
393 webpage
, 'view count', default
=None)
395 view_count
= int_or_none(match
.replace(',', ''))
396 view_count
= view_count
or video_detail
.get('viewCount')
398 comment_count
= (int_or_none(get_video_info('comment_num')) or
399 video_detail
.get('commentCount') or
400 try_get(api_data
, lambda x
: x
['thread']['commentCount']))
401 if not comment_count
:
402 match
= self
._html
_search
_regex
(
403 r
'>Comments: <strong[^>]*>([^<]+)</strong>',
404 webpage
, 'comment count', default
=None)
406 comment_count
= int_or_none(match
.replace(',', ''))
408 duration
= (parse_duration(
409 get_video_info('length') or
410 self
._html
_search
_meta
(
411 'video:duration', webpage
, 'video duration', default
=None)) or
412 video_detail
.get('length') or
413 get_video_info('duration'))
415 webpage_url
= get_video_info('watch_url') or url
417 # Note: cannot use api_data.get('owner', {}) because owner may be set to "null"
418 # in the JSON, which will cause None to be returned instead of {}.
419 owner
= try_get(api_data
, lambda x
: x
.get('owner'), dict) or {}
420 uploader_id
= get_video_info(['ch_id', 'user_id']) or owner
.get('id')
421 uploader
= get_video_info(['ch_name', 'user_nickname']) or owner
.get('nickname')
427 'thumbnail': thumbnail
,
428 'description': description
,
429 'uploader': uploader
,
430 'timestamp': timestamp
,
431 'uploader_id': uploader_id
,
432 'view_count': view_count
,
433 'comment_count': comment_count
,
434 'duration': duration
,
435 'webpage_url': webpage_url
,
439 class NiconicoPlaylistIE(InfoExtractor
):
440 _VALID_URL
= r
'https?://(?:www\.)?nicovideo\.jp/mylist/(?P<id>\d+)'
443 'url': 'http://www.nicovideo.jp/mylist/27411728',
446 'title': 'AKB48のオールナイトニッポン',
448 'playlist_mincount': 225,
451 def _real_extract(self
, url
):
452 list_id
= self
._match
_id
(url
)
453 webpage
= self
._download
_webpage
(url
, list_id
)
455 entries_json
= self
._search
_regex
(r
'Mylist\.preload\(\d+, (\[.*\])\);',
457 entries
= json
.loads(entries_json
)
460 'ie_key': NiconicoIE
.ie_key(),
461 'url': ('http://www.nicovideo.jp/watch/%s' %
462 entry
['item_data']['video_id']),
463 } for entry
in entries
]
467 'title': self
._search
_regex
(r
'\s+name: "(.*?)"', webpage
, 'title'),