]> Raphaël G. Git Repositories - youtubedl/blobdiff - youtube_dl/extractor/bilibili.py
Update changelog.
[youtubedl] / youtube_dl / extractor / bilibili.py
index 2103ed73aad860738bba5108ee27a86fd921d29c..b17047b399b6630fe2334aa24f0a5e97aed8506f 100644 (file)
 # coding: utf-8
 from __future__ import unicode_literals
 
 # coding: utf-8
 from __future__ import unicode_literals
 
+import calendar
+import datetime
 import re
 import re
-import itertools
-import json
-import xml.etree.ElementTree as ET
 
 from .common import InfoExtractor
 
 from .common import InfoExtractor
+from ..compat import (
+    compat_etree_fromstring,
+    compat_str,
+    compat_parse_qs,
+    compat_xml_parse_error,
+)
 from ..utils import (
 from ..utils import (
-    int_or_none,
-    unified_strdate,
     ExtractorError,
     ExtractorError,
+    int_or_none,
+    float_or_none,
+    xpath_text,
 )
 
 
 class BiliBiliIE(InfoExtractor):
 )
 
 
 class BiliBiliIE(InfoExtractor):
-    _VALID_URL = r'http://www\.bilibili\.(?:tv|com)/video/av(?P<id>[0-9]+)/'
+    _VALID_URL = r'https?://www\.bilibili\.(?:tv|com)/video/av(?P<id>\d+)'
 
     _TESTS = [{
         'url': 'http://www.bilibili.tv/video/av1074402/',
 
     _TESTS = [{
         'url': 'http://www.bilibili.tv/video/av1074402/',
-        'md5': '2c301e4dab317596e837c3e7633e7d86',
+        'md5': '5f7d29e1a2872f3df0cf76b1f87d3788',
         'info_dict': {
         'info_dict': {
-            'id': '1074402_part1',
+            'id': '1554319',
             'ext': 'flv',
             'title': '【金坷垃】金泡沫',
             'ext': 'flv',
             'title': '【金坷垃】金泡沫',
-            'duration': 308,
+            'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923',
+            'duration': 308.067,
+            'timestamp': 1398012660,
             'upload_date': '20140420',
             'thumbnail': 're:^https?://.+\.jpg',
             'upload_date': '20140420',
             'thumbnail': 're:^https?://.+\.jpg',
+            'uploader': '菊子桑',
+            'uploader_id': '156160',
         },
     }, {
         'url': 'http://www.bilibili.com/video/av1041170/',
         'info_dict': {
             'id': '1041170',
             'title': '【BD1080P】刀语【诸神&异域】',
         },
     }, {
         'url': 'http://www.bilibili.com/video/av1041170/',
         'info_dict': {
             'id': '1041170',
             'title': '【BD1080P】刀语【诸神&异域】',
+            'description': '这是个神奇的故事~每个人不留弹幕不给走哦~切利哦!~',
         },
         'playlist_count': 9,
         },
         'playlist_count': 9,
+    }, {
+        'url': 'http://www.bilibili.com/video/av4808130/',
+        'info_dict': {
+            'id': '4808130',
+            'title': '【长篇】哆啦A梦443【钉铛】',
+            'description': '(2016.05.27)来组合客人的脸吧&amp;amp;寻母六千里锭 抱歉,又轮到周日上班现在才到家 封面www.pixiv.net/member_illust.php?mode=medium&amp;amp;illust_id=56912929',
+        },
+        'playlist': [{
+            'md5': '55cdadedf3254caaa0d5d27cf20a8f9c',
+            'info_dict': {
+                'id': '4808130_part1',
+                'ext': 'flv',
+                'title': '【长篇】哆啦A梦443【钉铛】',
+                'description': '(2016.05.27)来组合客人的脸吧&amp;amp;寻母六千里锭 抱歉,又轮到周日上班现在才到家 封面www.pixiv.net/member_illust.php?mode=medium&amp;amp;illust_id=56912929',
+                'timestamp': 1464564180,
+                'upload_date': '20160529',
+                'uploader': '喜欢拉面',
+                'uploader_id': '151066',
+            },
+        }, {
+            'md5': '926f9f67d0c482091872fbd8eca7ea3d',
+            'info_dict': {
+                'id': '4808130_part2',
+                'ext': 'flv',
+                'title': '【长篇】哆啦A梦443【钉铛】',
+                'description': '(2016.05.27)来组合客人的脸吧&amp;amp;寻母六千里锭 抱歉,又轮到周日上班现在才到家 封面www.pixiv.net/member_illust.php?mode=medium&amp;amp;illust_id=56912929',
+                'timestamp': 1464564180,
+                'upload_date': '20160529',
+                'uploader': '喜欢拉面',
+                'uploader_id': '151066',
+            },
+        }, {
+            'md5': '4b7b225b968402d7c32348c646f1fd83',
+            'info_dict': {
+                'id': '4808130_part3',
+                'ext': 'flv',
+                'title': '【长篇】哆啦A梦443【钉铛】',
+                'description': '(2016.05.27)来组合客人的脸吧&amp;amp;寻母六千里锭 抱歉,又轮到周日上班现在才到家 封面www.pixiv.net/member_illust.php?mode=medium&amp;amp;illust_id=56912929',
+                'timestamp': 1464564180,
+                'upload_date': '20160529',
+                'uploader': '喜欢拉面',
+                'uploader_id': '151066',
+            },
+        }, {
+            'md5': '7b795e214166501e9141139eea236e91',
+            'info_dict': {
+                'id': '4808130_part4',
+                'ext': 'flv',
+                'title': '【长篇】哆啦A梦443【钉铛】',
+                'description': '(2016.05.27)来组合客人的脸吧&amp;amp;寻母六千里锭 抱歉,又轮到周日上班现在才到家 封面www.pixiv.net/member_illust.php?mode=medium&amp;amp;illust_id=56912929',
+                'timestamp': 1464564180,
+                'upload_date': '20160529',
+                'uploader': '喜欢拉面',
+                'uploader_id': '151066',
+            },
+        }],
+    }, {
+        # Missing upload time
+        'url': 'http://www.bilibili.com/video/av1867637/',
+        'info_dict': {
+            'id': '2880301',
+            'ext': 'flv',
+            'title': '【HDTV】【喜剧】岳父岳母真难当 (2014)【法国票房冠军】',
+            'description': '一个信奉天主教的法国旧式传统资产阶级家庭中有四个女儿。三个女儿却分别找了阿拉伯、犹太、中国丈夫,老夫老妻唯独期盼剩下未嫁的小女儿能找一个信奉天主教的法国白人,结果没想到小女儿找了一位非裔黑人……【这次应该不会跳帧了】',
+            'uploader': '黑夜为猫',
+            'uploader_id': '610729',
+        },
+        'params': {
+            # Just to test metadata extraction
+            'skip_download': True,
+        },
+        'expected_warnings': ['upload time'],
     }]
 
     }]
 
+    # BiliBili blocks keys from time to time. The current key is extracted from
+    # the Android client
+    # TODO: find the sign algorithm used in the flash player
+    _APP_KEY = '86385cdc024c0f6c'
+
     def _real_extract(self, url):
     def _real_extract(self, url):
-        video_id = self._match_id(url)
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
         webpage = self._download_webpage(url, video_id)
 
         webpage = self._download_webpage(url, video_id)
 
-        if self._search_regex(r'(此视频不存在或被删除)', webpage, 'error message', default=None):
-            raise ExtractorError('The video does not exist or was deleted', expected=True)
-        video_code = self._search_regex(
-            r'(?s)<div itemprop="video".*?>(.*?)</div>', webpage, 'video code')
-
-        title = self._html_search_meta(
-            'media:title', video_code, 'title', fatal=True)
-        duration_str = self._html_search_meta(
-            'duration', video_code, 'duration')
-        if duration_str is None:
-            duration = None
-        else:
-            duration_mobj = re.match(
-                r'^T(?:(?P<hours>[0-9]+)H)?(?P<minutes>[0-9]+)M(?P<seconds>[0-9]+)S$',
-                duration_str)
-            duration = (
-                int_or_none(duration_mobj.group('hours'), default=0) * 3600 +
-                int(duration_mobj.group('minutes')) * 60 +
-                int(duration_mobj.group('seconds')))
-        upload_date = unified_strdate(self._html_search_meta(
-            'uploadDate', video_code, fatal=False))
-        thumbnail = self._html_search_meta(
-            'thumbnailUrl', video_code, 'thumbnail', fatal=False)
-
-        cid = self._search_regex(r'cid=(\d+)', webpage, 'cid')
+        params = compat_parse_qs(self._search_regex(
+            [r'EmbedPlayer\([^)]+,\s*"([^"]+)"\)',
+             r'<iframe[^>]+src="https://secure\.bilibili\.com/secure,([^"]+)"'],
+            webpage, 'player parameters'))
+        cid = params['cid'][0]
 
 
-        entries = []
+        info_xml_str = self._download_webpage(
+            'http://interface.bilibili.com/v_cdn_play',
+            cid, query={'appkey': self._APP_KEY, 'cid': cid},
+            note='Downloading video info page')
 
 
-        lq_page = self._download_webpage(
-            'http://interface.bilibili.com/v_cdn_play?appkey=1&cid=%s' % cid,
-            video_id,
-            note='Downloading LQ video info'
-        )
+        err_msg = None
+        durls = None
+        info_xml = None
         try:
         try:
-            err_info = json.loads(lq_page)
-            raise ExtractorError(
-                'BiliBili said: ' + err_info['error_text'], expected=True)
-        except ValueError:
-            pass
-
-        lq_doc = ET.fromstring(lq_page)
-        lq_durls = lq_doc.findall('./durl')
-
-        hq_doc = self._download_xml(
-            'http://interface.bilibili.com/playurl?appkey=1&cid=%s' % cid,
-            video_id,
-            note='Downloading HQ video info',
-            fatal=False,
-        )
-        if hq_doc is not False:
-            hq_durls = hq_doc.findall('./durl')
-            assert len(lq_durls) == len(hq_durls)
+            info_xml = compat_etree_fromstring(info_xml_str.encode('utf-8'))
+        except compat_xml_parse_error:
+            info_json = self._parse_json(info_xml_str, video_id, fatal=False)
+            err_msg = (info_json or {}).get('error_text')
         else:
         else:
-            hq_durls = itertools.repeat(None)
+            err_msg = xpath_text(info_xml, './message')
+
+        if info_xml is not None:
+            durls = info_xml.findall('./durl')
+        if not durls:
+            if err_msg:
+                raise ExtractorError('%s said: %s' % (self.IE_NAME, err_msg), expected=True)
+            else:
+                raise ExtractorError('No videos found!')
+
+        entries = []
 
 
-        i = 1
-        for lq_durl, hq_durl in zip(lq_durls, hq_durls):
+        for durl in durls:
+            size = xpath_text(durl, ['./filesize', './size'])
             formats = [{
             formats = [{
-                'format_id': 'lq',
-                'quality': 1,
-                'url': lq_durl.find('./url').text,
-                'filesize': int_or_none(
-                    lq_durl.find('./size'), get_attr='text'),
+                'url': durl.find('./url').text,
+                'filesize': int_or_none(size),
             }]
             }]
-            if hq_durl:
+            for backup_url in durl.findall('./backup_url/url'):
                 formats.append({
                 formats.append({
-                    'format_id': 'hq',
-                    'quality': 2,
-                    'ext': 'flv',
-                    'url': hq_durl.find('./url').text,
-                    'filesize': int_or_none(
-                        hq_durl.find('./size'), get_attr='text'),
+                    'url': backup_url.text,
+                    # backup URLs have lower priorities
+                    'preference': -2 if 'hd.mp4' in backup_url.text else -3,
                 })
                 })
+
             self._sort_formats(formats)
 
             entries.append({
             self._sort_formats(formats)
 
             entries.append({
-                'id': '%s_part%d' % (video_id, i),
-                'title': title,
+                'id': '%s_part%s' % (cid, xpath_text(durl, './order')),
+                'duration': int_or_none(xpath_text(durl, './length'), 1000),
                 'formats': formats,
                 'formats': formats,
-                'duration': duration,
-                'upload_date': upload_date,
-                'thumbnail': thumbnail,
             })
 
             })
 
-            i += 1
-
-        return {
-            '_type': 'multi_video',
-            'entries': entries,
-            'id': video_id,
-            'title': title
+        title = self._html_search_regex('<h1[^>]+title="([^"]+)">', webpage, 'title')
+        description = self._html_search_meta('description', webpage)
+        datetime_str = self._html_search_regex(
+            r'<time[^>]+datetime="([^"]+)"', webpage, 'upload time', fatal=False)
+        timestamp = None
+        if datetime_str:
+            timestamp = calendar.timegm(datetime.datetime.strptime(datetime_str, '%Y-%m-%dT%H:%M').timetuple())
+
+        # TODO 'view_count' requires deobfuscating Javascript
+        info = {
+            'id': compat_str(cid),
+            'title': title,
+            'description': description,
+            'timestamp': timestamp,
+            'thumbnail': self._html_search_meta('thumbnailUrl', webpage),
+            'duration': float_or_none(xpath_text(info_xml, './timelength'), scale=1000),
         }
         }
+
+        uploader_mobj = re.search(
+            r'<a[^>]+href="https?://space\.bilibili\.com/(?P<id>\d+)"[^>]+title="(?P<name>[^"]+)"',
+            webpage)
+        if uploader_mobj:
+            info.update({
+                'uploader': uploader_mobj.group('name'),
+                'uploader_id': uploader_mobj.group('id'),
+            })
+
+        for entry in entries:
+            entry.update(info)
+
+        if len(entries) == 1:
+            return entries[0]
+        else:
+            for idx, entry in enumerate(entries):
+                entry['id'] = '%s_part%d' % (video_id, (idx + 1))
+
+            return {
+                '_type': 'multi_video',
+                'id': video_id,
+                'title': title,
+                'description': description,
+                'entries': entries,
+            }