2 from __future__
import unicode_literals
8 from .common
import InfoExtractor
11 compat_urllib_request
,
24 class NiconicoIE(InfoExtractor
):
29 'url': 'http://www.nicovideo.jp/watch/sm22312215',
30 'md5': 'd1a75c0823e2f629128c43e1212760f9',
34 'title': 'Big Buck Bunny',
35 'uploader': 'takuya0301',
36 'uploader_id': '2698420',
37 'upload_date': '20131123',
38 'timestamp': 1385182762,
39 'description': '(c) copyright 2008, Blender Foundation / www.bigbuckbunny.org',
43 # File downloaded with and without credentials are different, so omit
45 'url': 'http://www.nicovideo.jp/watch/nm14296458',
49 'title': '【鏡音リン】Dance on media【オリジナル】take2!',
50 'description': 'md5:689f066d74610b3b22e0f1739add0f58',
52 'uploader_id': '18822557',
53 'upload_date': '20110429',
54 'timestamp': 1304065916,
58 # 'video exists but is marked as "deleted"
60 'url': 'http://www.nicovideo.jp/watch/sm10000',
63 'ext': 'unknown_video',
64 'description': 'deleted',
65 'title': 'ドラえもんエターナル第3話「決戦第3新東京市」<前編>',
66 'upload_date': '20071224',
67 'timestamp': 1198527840, # timestamp field has different value if logged in
71 'url': 'http://www.nicovideo.jp/watch/so22543406',
75 'title': '【第1回】RADIOアニメロミックス ラブライブ!~のぞえりRadio Garden~',
76 'description': 'md5:b27d224bb0ff53d3c8269e9f8b561cf1',
77 'timestamp': 1388851200,
78 'upload_date': '20140104',
79 'uploader': 'アニメロチャンネル',
84 _VALID_URL
= r
'https?://(?:www\.|secure\.)?nicovideo\.jp/watch/(?P<id>(?:[a-z]{2})?[0-9]+)'
85 _NETRC_MACHINE
= 'niconico'
86 # Determine whether the downloader used authentication to download video
87 _AUTHENTICATED
= False
89 def _real_initialize(self
):
93 (username
, password
) = self
._get
_login
_info
()
94 # No authentication to be performed
101 'password': password
,
103 # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
105 login_form
= dict((k
.encode('utf-8'), v
.encode('utf-8')) for k
, v
in login_form_strs
.items())
106 login_data
= compat_urllib_parse
.urlencode(login_form
).encode('utf-8')
107 request
= compat_urllib_request
.Request(
108 'https://secure.nicovideo.jp/secure/login', login_data
)
109 login_results
= self
._download
_webpage
(
110 request
, None, note
='Logging in', errnote
='Unable to log in')
111 if re
.search(r
'(?i)<h1 class="mb8p4">Log in error</h1>', login_results
) is not None:
112 self
._downloader
.report_warning('unable to log in: bad username or password')
115 self
._AUTHENTICATED
= True
118 def _real_extract(self
, url
):
119 video_id
= self
._match
_id
(url
)
121 # Get video webpage. We are not actually interested in it for normal
122 # cases, but need the cookies in order to be able to download the
124 webpage
, handle
= self
._download
_webpage
_handle
(
125 'http://www.nicovideo.jp/watch/' + video_id
, video_id
)
126 if video_id
.startswith('so'):
127 video_id
= self
._match
_id
(handle
.geturl())
129 video_info
= self
._download
_xml
(
130 'http://ext.nicovideo.jp/api/getthumbinfo/' + video_id
, video_id
,
131 note
='Downloading video info page')
133 if self
._AUTHENTICATED
:
135 flv_info_webpage
= self
._download
_webpage
(
136 'http://flapi.nicovideo.jp/api/getflv/' + video_id
+ '?as3=1',
137 video_id
, 'Downloading flv info')
139 # Get external player info
140 ext_player_info
= self
._download
_webpage
(
141 'http://ext.nicovideo.jp/thumb_watch/' + video_id
, video_id
)
142 thumb_play_key
= self
._search
_regex
(
143 r
'\'thumbPlayKey
\'\s
*:\s
*\'(.*?
)\'', ext_player_info, 'thumbPlayKey
')
146 flv_info_data = compat_urllib_parse.urlencode({
150 flv_info_request = compat_urllib_request.Request(
151 'http
://ext
.nicovideo
.jp
/thumb_watch
', flv_info_data,
152 {'Content
-Type
': 'application
/x
-www
-form
-urlencoded
'})
153 flv_info_webpage = self._download_webpage(
154 flv_info_request, video_id,
155 note='Downloading flv info
', errnote='Unable to download flv info
')
157 flv_info = compat_urlparse.parse_qs(flv_info_webpage)
158 if 'url
' not in flv_info:
159 if 'deleted
' in flv_info:
160 raise ExtractorError('The video has been deleted
.',
163 raise ExtractorError('Unable to find video URL
')
165 video_real_url = flv_info['url
'][0]
167 # Start extracting information
168 title = xpath_text(video_info, './/title
')
170 title = self._og_search_title(webpage, default=None)
172 title = self._html_search_regex(
173 r'<span
[^
>]+class="videoHeaderTitle"[^
>]*>([^
<]+)</span
>',
174 webpage, 'video title
')
176 watch_api_data_string = self._html_search_regex(
177 r'<div
[^
>]+id="watchAPIDataContainer"[^
>]+>([^
<]+)</div
>',
178 webpage, 'watch api data
', default=None)
179 watch_api_data = self._parse_json(watch_api_data_string, video_id) if watch_api_data_string else {}
180 video_detail = watch_api_data.get('videoDetail
', {})
182 extension = xpath_text(video_info, './/movie_type
')
184 extension = determine_ext(video_real_url)
185 video_format = extension.upper()
188 xpath_text(video_info, './/thumbnail_url
') or
189 self._html_search_meta('image
', webpage, 'thumbnail
', default=None) or
190 video_detail.get('thumbnail
'))
192 description = xpath_text(video_info, './/description
')
194 timestamp = parse_iso8601(xpath_text(video_info, './/first_retrieve
'))
196 match = self._html_search_meta('datePublished
', webpage, 'date published
', default=None)
198 timestamp = parse_iso8601(match.replace('+', ':00+'))
199 if not timestamp and video_detail.get('postedAt
'):
200 timestamp = parse_iso8601(
201 video_detail['postedAt
'].replace('/', '-'),
202 delimiter=' ', timezone=datetime.timedelta(hours=9))
204 view_count = int_or_none(xpath_text(video_info, './/view_counter
'))
206 match = self._html_search_regex(
207 r'>Views
: <strong
[^
>]*>([^
<]+)</strong
>',
208 webpage, 'view count
', default=None)
210 view_count = int_or_none(match.replace(',', ''))
211 view_count = view_count or video_detail.get('viewCount
')
213 comment_count = int_or_none(xpath_text(video_info, './/comment_num
'))
214 if not comment_count:
215 match = self._html_search_regex(
216 r'>Comments
: <strong
[^
>]*>([^
<]+)</strong
>',
217 webpage, 'comment count
', default=None)
219 comment_count = int_or_none(match.replace(',', ''))
220 comment_count = comment_count or video_detail.get('commentCount
')
222 duration = (parse_duration(
223 xpath_text(video_info, './/length
') or
224 self._html_search_meta(
225 'video
:duration
', webpage, 'video duration
', default=None)) or
226 video_detail.get('length
'))
228 webpage_url = xpath_text(video_info, './/watch_url
') or url
230 if video_info.find('.//ch_id
') is not None:
231 uploader_id = video_info.find('.//ch_id
').text
232 uploader = video_info.find('.//ch_name
').text
233 elif video_info.find('.//user_id
') is not None:
234 uploader_id = video_info.find('.//user_id
').text
235 uploader = video_info.find('.//user_nickname
').text
237 uploader_id = uploader = None
241 'url
': video_real_url,
244 'format
': video_format,
245 'thumbnail
': thumbnail,
246 'description
': description,
247 'uploader
': uploader,
248 'timestamp
': timestamp,
249 'uploader_id
': uploader_id,
250 'view_count
': view_count,
251 'comment_count
': comment_count,
252 'duration
': duration,
253 'webpage_url
': webpage_url,
257 class NiconicoPlaylistIE(InfoExtractor):
258 _VALID_URL = r'https?
://www\
.nicovideo\
.jp
/mylist
/(?P
<id>\d
+)'
261 'url
': 'http
://www
.nicovideo
.jp
/mylist
/27411728',
264 'title
': 'AKB48のオールナイトニッポン
',
266 'playlist_mincount
': 225,
269 def _real_extract(self, url):
270 list_id = self._match_id(url)
271 webpage = self._download_webpage(url, list_id)
273 entries_json = self._search_regex(r'Mylist\
.preload\
(\d
+, (\
[.*\
])\
);',
275 entries = json.loads(entries_json)
278 'ie_key
': NiconicoIE.ie_key(),
279 'url
': ('http
://www
.nicovideo
.jp
/watch
/%s' %
280 entry['item_data
']['video_id
']),
281 } for entry in entries]
285 'title
': self._search_regex(r'\s
+name
: "(.*?)"', webpage, 'title
'),