]> Raphaël G. Git Repositories - youtubedl/blobdiff - youtube_dl/extractor/tudou.py
debian/control: Add recommends on aria2 | wget | curl to use external downloaders.
[youtubedl] / youtube_dl / extractor / tudou.py
index 7a3891b89b736fb05f4c09d441d7eb56e68d8dcd..c89de5ba4a46bb261987d8dbee5f55b3d05492da 100644 (file)
@@ -1,5 +1,7 @@
 # coding: utf-8
 
 # coding: utf-8
 
+from __future__ import unicode_literals
+
 import re
 import json
 
 import re
 import json
 
@@ -7,72 +9,79 @@ from .common import InfoExtractor
 
 
 class TudouIE(InfoExtractor):
 
 
 class TudouIE(InfoExtractor):
-    _VALID_URL = r'(?:http://)?(?:www\.)?tudou\.com/(?:listplay|programs|albumplay)/(?:view|(.+?))/(?:([^/]+)|([^/]+))(?:\.html)?'
+    _VALID_URL = r'https?://(?:www\.)?tudou\.com/(?:listplay|programs(?:/view)?|albumplay)/.*?/(?P<id>[^/?#]+?)(?:\.html)?/?(?:$|[?#])'
     _TESTS = [{
     _TESTS = [{
-        u'url': u'http://www.tudou.com/listplay/zzdE77v6Mmo/2xN2duXMxmw.html',
-        u'file': u'159448201.f4v',
-        u'md5': u'140a49ed444bd22f93330985d8475fcb',
-        u'info_dict': {
-            u"title": u"卡马乔国足开大脚长传冲吊集锦"
+        'url': 'http://www.tudou.com/listplay/zzdE77v6Mmo/2xN2duXMxmw.html',
+        'md5': '140a49ed444bd22f93330985d8475fcb',
+        'info_dict': {
+            'id': '159448201',
+            'ext': 'f4v',
+            'title': '卡马乔国足开大脚长传冲吊集锦',
+            'thumbnail': 're:^https?://.*\.jpg$',
+        }
+    }, {
+        'url': 'http://www.tudou.com/programs/view/ajX3gyhL0pc/',
+        'info_dict': {
+            'id': '117049447',
+            'ext': 'f4v',
+            'title': 'La Sylphide-Bolshoi-Ekaterina Krysanova & Vyacheslav Lopatin 2012',
+            'thumbnail': 're:^https?://.*\.jpg$',
         }
         }
-    },
-    {
-        u'url': u'http://www.tudou.com/albumplay/TenTw_JgiPM/PzsAs5usU9A.html',
-        u'file': u'todo.mp4',
-        u'md5': u'todo.mp4',
-        u'info_dict': {
-            u'title': u'todo.mp4',
-        },
-        u'add_ie': [u'Youku'],
-        u'skip': u'Only works from China'
     }]
 
     }]
 
-    def _url_for_id(self, id, quality = None):
-        info_url = "http://v2.tudou.com/f?id="+str(id)
+    def _url_for_id(self, id, quality=None):
+        info_url = "http://v2.tudou.com/f?id=" + str(id)
         if quality:
             info_url += '&hd' + quality
         webpage = self._download_webpage(info_url, id, "Opening the info webpage")
         if quality:
             info_url += '&hd' + quality
         webpage = self._download_webpage(info_url, id, "Opening the info webpage")
-        final_url = self._html_search_regex('>(.+?)</f>',webpage, 'video url')
+        final_url = self._html_search_regex('>(.+?)</f>', webpage, 'video url')
         return final_url
 
     def _real_extract(self, url):
         return final_url
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group(2)
+        video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
         m = re.search(r'vcode:\s*[\'"](.+?)[\'"]', webpage)
         if m and m.group(1):
             return {
                 '_type': 'url',
         webpage = self._download_webpage(url, video_id)
 
         m = re.search(r'vcode:\s*[\'"](.+?)[\'"]', webpage)
         if m and m.group(1):
             return {
                 '_type': 'url',
-                'url': u'youku:' + m.group(1),
+                'url': 'youku:' + m.group(1),
                 'ie_key': 'Youku'
             }
 
         title = self._search_regex(
                 'ie_key': 'Youku'
             }
 
         title = self._search_regex(
-            r",kw:\s*['\"](.+?)[\"']", webpage, u'title')
+            r",kw:\s*['\"](.+?)[\"']", webpage, 'title')
         thumbnail_url = self._search_regex(
         thumbnail_url = self._search_regex(
-            r",pic:\s*[\"'](.+?)[\"']", webpage, u'thumbnail URL', fatal=False)
+            r",pic:\s*[\"'](.+?)[\"']", webpage, 'thumbnail URL', fatal=False)
 
         segs_json = self._search_regex(r'segs: \'(.*)\'', webpage, 'segments')
         segments = json.loads(segs_json)
         # It looks like the keys are the arguments that have to be passed as
         # the hd field in the request url, we pick the higher
 
         segs_json = self._search_regex(r'segs: \'(.*)\'', webpage, 'segments')
         segments = json.loads(segs_json)
         # It looks like the keys are the arguments that have to be passed as
         # the hd field in the request url, we pick the higher
-        quality = sorted(segments.keys())[-1]
+        # Also, filter non-number qualities (see issue #3643).
+        quality = sorted(filter(lambda k: k.isdigit(), segments.keys()),
+                         key=lambda k: int(k))[-1]
         parts = segments[quality]
         result = []
         len_parts = len(parts)
         if len_parts > 1:
         parts = segments[quality]
         result = []
         len_parts = len(parts)
         if len_parts > 1:
-            self.to_screen(u'%s: found %s parts' % (video_id, len_parts))
+            self.to_screen('%s: found %s parts' % (video_id, len_parts))
         for part in parts:
             part_id = part['k']
             final_url = self._url_for_id(part_id, quality)
             ext = (final_url.split('?')[0]).split('.')[-1]
         for part in parts:
             part_id = part['k']
             final_url = self._url_for_id(part_id, quality)
             ext = (final_url.split('?')[0]).split('.')[-1]
-            part_info = {'id': part_id,
-                          'url': final_url,
-                          'ext': ext,
-                          'title': title,
-                          'thumbnail': thumbnail_url,
-                          }
+            part_info = {
+                'id': '%s' % part_id,
+                'url': final_url,
+                'ext': ext,
+                'title': title,
+                'thumbnail': thumbnail_url,
+            }
             result.append(part_info)
 
             result.append(part_info)
 
-        return result
+        return {
+            '_type': 'multi_video',
+            'entries': result,
+            'id': video_id,
+            'title': title,
+        }