2 from __future__
import unicode_literals
8 from .common
import InfoExtractor
25 class NiconicoIE(InfoExtractor
):
30 'url': 'http://www.nicovideo.jp/watch/sm22312215',
31 'md5': 'd1a75c0823e2f629128c43e1212760f9',
35 'title': 'Big Buck Bunny',
36 'uploader': 'takuya0301',
37 'uploader_id': '2698420',
38 'upload_date': '20131123',
39 'timestamp': 1385182762,
40 'description': '(c) copyright 2008, Blender Foundation / www.bigbuckbunny.org',
44 # File downloaded with and without credentials are different, so omit
46 'url': 'http://www.nicovideo.jp/watch/nm14296458',
50 'title': '【鏡音リン】Dance on media【オリジナル】take2!',
51 'description': 'md5:689f066d74610b3b22e0f1739add0f58',
53 'uploader_id': '18822557',
54 'upload_date': '20110429',
55 'timestamp': 1304065916,
59 # 'video exists but is marked as "deleted"
61 'url': 'http://www.nicovideo.jp/watch/sm10000',
64 'ext': 'unknown_video',
65 'description': 'deleted',
66 'title': 'ドラえもんエターナル第3話「決戦第3新東京市」<前編>',
67 'upload_date': '20071224',
68 'timestamp': 1198527840, # timestamp field has different value if logged in
72 'url': 'http://www.nicovideo.jp/watch/so22543406',
76 'title': '【第1回】RADIOアニメロミックス ラブライブ!~のぞえりRadio Garden~',
77 'description': 'md5:b27d224bb0ff53d3c8269e9f8b561cf1',
78 'timestamp': 1388851200,
79 'upload_date': '20140104',
80 'uploader': 'アニメロチャンネル',
85 _VALID_URL
= r
'https?://(?:www\.|secure\.)?nicovideo\.jp/watch/(?P<id>(?:[a-z]{2})?[0-9]+)'
86 _NETRC_MACHINE
= 'niconico'
87 # Determine whether the downloader used authentication to download video
88 _AUTHENTICATED
= False
90 def _real_initialize(self
):
94 (username
, password
) = self
._get
_login
_info
()
95 # No authentication to be performed
102 'password': password
,
104 login_data
= compat_urllib_parse
.urlencode(encode_dict(login_form_strs
)).encode('utf-8')
105 request
= sanitized_Request(
106 'https://secure.nicovideo.jp/secure/login', login_data
)
107 login_results
= self
._download
_webpage
(
108 request
, None, note
='Logging in', errnote
='Unable to log in')
109 if re
.search(r
'(?i)<h1 class="mb8p4">Log in error</h1>', login_results
) is not None:
110 self
._downloader
.report_warning('unable to log in: bad username or password')
113 self
._AUTHENTICATED
= True
116 def _real_extract(self
, url
):
117 video_id
= self
._match
_id
(url
)
119 # Get video webpage. We are not actually interested in it for normal
120 # cases, but need the cookies in order to be able to download the
122 webpage
, handle
= self
._download
_webpage
_handle
(
123 'http://www.nicovideo.jp/watch/' + video_id
, video_id
)
124 if video_id
.startswith('so'):
125 video_id
= self
._match
_id
(handle
.geturl())
127 video_info
= self
._download
_xml
(
128 'http://ext.nicovideo.jp/api/getthumbinfo/' + video_id
, video_id
,
129 note
='Downloading video info page')
131 if self
._AUTHENTICATED
:
133 flv_info_webpage
= self
._download
_webpage
(
134 'http://flapi.nicovideo.jp/api/getflv/' + video_id
+ '?as3=1',
135 video_id
, 'Downloading flv info')
137 # Get external player info
138 ext_player_info
= self
._download
_webpage
(
139 'http://ext.nicovideo.jp/thumb_watch/' + video_id
, video_id
)
140 thumb_play_key
= self
._search
_regex
(
141 r
'\'thumbPlayKey
\'\s
*:\s
*\'(.*?
)\'', ext_player_info, 'thumbPlayKey
')
144 flv_info_data = compat_urllib_parse.urlencode({
148 flv_info_request = sanitized_Request(
149 'http
://ext
.nicovideo
.jp
/thumb_watch
', flv_info_data,
150 {'Content
-Type
': 'application
/x
-www
-form
-urlencoded
'})
151 flv_info_webpage = self._download_webpage(
152 flv_info_request, video_id,
153 note='Downloading flv info
', errnote='Unable to download flv info
')
155 flv_info = compat_urlparse.parse_qs(flv_info_webpage)
156 if 'url
' not in flv_info:
157 if 'deleted
' in flv_info:
158 raise ExtractorError('The video has been deleted
.',
161 raise ExtractorError('Unable to find video URL
')
163 video_real_url = flv_info['url
'][0]
165 # Start extracting information
166 title = xpath_text(video_info, './/title
')
168 title = self._og_search_title(webpage, default=None)
170 title = self._html_search_regex(
171 r'<span
[^
>]+class="videoHeaderTitle"[^
>]*>([^
<]+)</span
>',
172 webpage, 'video title
')
174 watch_api_data_string = self._html_search_regex(
175 r'<div
[^
>]+id="watchAPIDataContainer"[^
>]+>([^
<]+)</div
>',
176 webpage, 'watch api data
', default=None)
177 watch_api_data = self._parse_json(watch_api_data_string, video_id) if watch_api_data_string else {}
178 video_detail = watch_api_data.get('videoDetail
', {})
180 extension = xpath_text(video_info, './/movie_type
')
182 extension = determine_ext(video_real_url)
185 xpath_text(video_info, './/thumbnail_url
') or
186 self._html_search_meta('image
', webpage, 'thumbnail
', default=None) or
187 video_detail.get('thumbnail
'))
189 description = xpath_text(video_info, './/description
')
191 timestamp = parse_iso8601(xpath_text(video_info, './/first_retrieve
'))
193 match = self._html_search_meta('datePublished
', webpage, 'date published
', default=None)
195 timestamp = parse_iso8601(match.replace('+', ':00+'))
196 if not timestamp and video_detail.get('postedAt
'):
197 timestamp = parse_iso8601(
198 video_detail['postedAt
'].replace('/', '-'),
199 delimiter=' ', timezone=datetime.timedelta(hours=9))
201 view_count = int_or_none(xpath_text(video_info, './/view_counter
'))
203 match = self._html_search_regex(
204 r'>Views
: <strong
[^
>]*>([^
<]+)</strong
>',
205 webpage, 'view count
', default=None)
207 view_count = int_or_none(match.replace(',', ''))
208 view_count = view_count or video_detail.get('viewCount
')
210 comment_count = int_or_none(xpath_text(video_info, './/comment_num
'))
211 if not comment_count:
212 match = self._html_search_regex(
213 r'>Comments
: <strong
[^
>]*>([^
<]+)</strong
>',
214 webpage, 'comment count
', default=None)
216 comment_count = int_or_none(match.replace(',', ''))
217 comment_count = comment_count or video_detail.get('commentCount
')
219 duration = (parse_duration(
220 xpath_text(video_info, './/length
') or
221 self._html_search_meta(
222 'video
:duration
', webpage, 'video duration
', default=None)) or
223 video_detail.get('length
'))
225 webpage_url = xpath_text(video_info, './/watch_url
') or url
227 if video_info.find('.//ch_id
') is not None:
228 uploader_id = video_info.find('.//ch_id
').text
229 uploader = video_info.find('.//ch_name
').text
230 elif video_info.find('.//user_id
') is not None:
231 uploader_id = video_info.find('.//user_id
').text
232 uploader = video_info.find('.//user_nickname
').text
234 uploader_id = uploader = None
238 'url
': video_real_url,
241 'format_id
': 'economy
' if video_real_url.endswith('low
') else 'normal
',
242 'thumbnail
': thumbnail,
243 'description
': description,
244 'uploader
': uploader,
245 'timestamp
': timestamp,
246 'uploader_id
': uploader_id,
247 'view_count
': view_count,
248 'comment_count
': comment_count,
249 'duration
': duration,
250 'webpage_url
': webpage_url,
254 class NiconicoPlaylistIE(InfoExtractor):
255 _VALID_URL = r'https?
://www\
.nicovideo\
.jp
/mylist
/(?P
<id>\d
+)'
258 'url
': 'http
://www
.nicovideo
.jp
/mylist
/27411728',
261 'title
': 'AKB48のオールナイトニッポン
',
263 'playlist_mincount
': 225,
266 def _real_extract(self, url):
267 list_id = self._match_id(url)
268 webpage = self._download_webpage(url, list_id)
270 entries_json = self._search_regex(r'Mylist\
.preload\
(\d
+, (\
[.*\
])\
);',
272 entries = json.loads(entries_json)
275 'ie_key
': NiconicoIE.ie_key(),
276 'url
': ('http
://www
.nicovideo
.jp
/watch
/%s' %
277 entry['item_data
']['video_id
']),
278 } for entry in entries]
282 'title
': self._search_regex(r'\s
+name
: "(.*?)"', webpage, 'title
'),