from __future__ import unicode_literals
import re
-import itertools
-import json
-import xml.etree.ElementTree as ET
from .common import InfoExtractor
+from ..compat import compat_str
from ..utils import (
int_or_none,
- unified_strdate,
+ unescapeHTML,
ExtractorError,
+ xpath_text,
)
class BiliBiliIE(InfoExtractor):
- _VALID_URL = r'http://www\.bilibili\.(?:tv|com)/video/av(?P<id>[0-9]+)/'
+ _VALID_URL = r'http://www\.bilibili\.(?:tv|com)/video/av(?P<id>\d+)(?:/index_(?P<page_num>\d+).html)?'
_TESTS = [{
'url': 'http://www.bilibili.tv/video/av1074402/',
'md5': '2c301e4dab317596e837c3e7633e7d86',
'info_dict': {
- 'id': '1074402_part1',
+ 'id': '1554319',
'ext': 'flv',
'title': '【金坷垃】金泡沫',
- 'duration': 308,
+ 'duration': 308313,
'upload_date': '20140420',
'thumbnail': 're:^https?://.+\.jpg',
+ 'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923',
+ 'timestamp': 1397983878,
+ 'uploader': '菊子桑',
},
}, {
'url': 'http://www.bilibili.com/video/av1041170/',
'info_dict': {
'id': '1041170',
'title': '【BD1080P】刀语【诸神&异域】',
+ 'description': '这是个神奇的故事~每个人不留弹幕不给走哦~切利哦!~',
+ 'uploader': '枫叶逝去',
+ 'timestamp': 1396501299,
},
'playlist_count': 9,
}]
def _real_extract(self, url):
- video_id = self._match_id(url)
- webpage = self._download_webpage(url, video_id)
-
- if '(此视频不存在或被删除)' in webpage:
- raise ExtractorError(
- 'The video does not exist or was deleted', expected=True)
-
- if '>你没有权限浏览! 由于版权相关问题 我们不对您所在的地区提供服务<' in webpage:
- raise ExtractorError(
- 'The video is not available in your region due to copyright reasons',
- expected=True)
-
- video_code = self._search_regex(
- r'(?s)<div itemprop="video".*?>(.*?)</div>', webpage, 'video code')
-
- title = self._html_search_meta(
- 'media:title', video_code, 'title', fatal=True)
- duration_str = self._html_search_meta(
- 'duration', video_code, 'duration')
- if duration_str is None:
- duration = None
- else:
- duration_mobj = re.match(
- r'^T(?:(?P<hours>[0-9]+)H)?(?P<minutes>[0-9]+)M(?P<seconds>[0-9]+)S$',
- duration_str)
- duration = (
- int_or_none(duration_mobj.group('hours'), default=0) * 3600 +
- int(duration_mobj.group('minutes')) * 60 +
- int(duration_mobj.group('seconds')))
- upload_date = unified_strdate(self._html_search_meta(
- 'uploadDate', video_code, fatal=False))
- thumbnail = self._html_search_meta(
- 'thumbnailUrl', video_code, 'thumbnail', fatal=False)
-
- cid = self._search_regex(r'cid=(\d+)', webpage, 'cid')
-
- entries = []
-
- lq_page = self._download_webpage(
- 'http://interface.bilibili.com/v_cdn_play?appkey=1&cid=%s' % cid,
- video_id,
- note='Downloading LQ video info'
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ page_num = mobj.group('page_num') or '1'
+
+ view_data = self._download_json(
+ 'http://api.bilibili.com/view?type=json&appkey=8e9fc618fbd41e28&id=%s&page=%s' % (video_id, page_num),
+ video_id)
+ if 'error' in view_data:
+ raise ExtractorError('%s said: %s' % (self.IE_NAME, view_data['error']), expected=True)
+
+ cid = view_data['cid']
+ title = unescapeHTML(view_data['title'])
+
+ doc = self._download_xml(
+ 'http://interface.bilibili.com/v_cdn_play?appkey=8e9fc618fbd41e28&cid=%s' % cid,
+ cid,
+ 'Downloading page %s/%s' % (page_num, view_data['pages'])
)
- try:
- err_info = json.loads(lq_page)
- raise ExtractorError(
- 'BiliBili said: ' + err_info['error_text'], expected=True)
- except ValueError:
- pass
- lq_doc = ET.fromstring(lq_page)
- lq_durls = lq_doc.findall('./durl')
+ if xpath_text(doc, './result') == 'error':
+ raise ExtractorError('%s said: %s' % (self.IE_NAME, xpath_text(doc, './message')), expected=True)
- hq_doc = self._download_xml(
- 'http://interface.bilibili.com/playurl?appkey=1&cid=%s' % cid,
- video_id,
- note='Downloading HQ video info',
- fatal=False,
- )
- if hq_doc is not False:
- hq_durls = hq_doc.findall('./durl')
- assert len(lq_durls) == len(hq_durls)
- else:
- hq_durls = itertools.repeat(None)
+ entries = []
- i = 1
- for lq_durl, hq_durl in zip(lq_durls, hq_durls):
+ for durl in doc.findall('./durl'):
+ size = xpath_text(durl, ['./filesize', './size'])
formats = [{
- 'format_id': 'lq',
- 'quality': 1,
- 'url': lq_durl.find('./url').text,
- 'filesize': int_or_none(
- lq_durl.find('./size'), get_attr='text'),
+ 'url': durl.find('./url').text,
+ 'filesize': int_or_none(size),
+ 'ext': 'flv',
}]
- if hq_durl is not None:
- formats.append({
- 'format_id': 'hq',
- 'quality': 2,
- 'ext': 'flv',
- 'url': hq_durl.find('./url').text,
- 'filesize': int_or_none(
- hq_durl.find('./size'), get_attr='text'),
- })
- self._sort_formats(formats)
+ backup_urls = durl.find('./backup_url')
+ if backup_urls is not None:
+ for backup_url in backup_urls.findall('./url'):
+ formats.append({'url': backup_url.text})
+ formats.reverse()
entries.append({
- 'id': '%s_part%d' % (video_id, i),
+ 'id': '%s_part%s' % (cid, xpath_text(durl, './order')),
'title': title,
+ 'duration': int_or_none(xpath_text(durl, './length'), 1000),
'formats': formats,
- 'duration': duration,
- 'upload_date': upload_date,
- 'thumbnail': thumbnail,
})
- i += 1
-
- return {
- '_type': 'multi_video',
- 'entries': entries,
- 'id': video_id,
- 'title': title
+ info = {
+ 'id': compat_str(cid),
+ 'title': title,
+ 'description': view_data.get('description'),
+ 'thumbnail': view_data.get('pic'),
+ 'uploader': view_data.get('author'),
+ 'timestamp': int_or_none(view_data.get('created')),
+ 'view_count': int_or_none(view_data.get('play')),
+ 'duration': int_or_none(xpath_text(doc, './timelength')),
}
+
+ if len(entries) == 1:
+ entries[0].update(info)
+ return entries[0]
+ else:
+ info.update({
+ '_type': 'multi_video',
+ 'id': video_id,
+ 'entries': entries,
+ })
+ return info