]> Raphaël G. Git Repositories - youtubedl/blobdiff - youtube_dl/extractor/tudou.py
Add bug number that we are closing.
[youtubedl] / youtube_dl / extractor / tudou.py
index 9ca860ab084f523c7dc5ed7b16b28ff0e91c3324..1405b73f76ad5166d45d9a9eb9687c49fa8a0bde 100644 (file)
@@ -1,16 +1,34 @@
+# coding: utf-8
+
 import re
+import json
 
 from .common import InfoExtractor
 
 
 class TudouIE(InfoExtractor):
-    _VALID_URL = r'(?:http://)?(?:www\.)?tudou\.com/(?:listplay|programs)/(?:view|(.+?))/(?:([^/]+)|([^/]+)\.html)'
+    _VALID_URL = r'(?:http://)?(?:www\.)?tudou\.com/(?:listplay|programs)/(?:view|(.+?))/(?:([^/]+)|([^/]+))(?:\.html)?'
+    _TEST = {
+        u'url': u'http://www.tudou.com/listplay/zzdE77v6Mmo/2xN2duXMxmw.html',
+        u'file': u'159448201.f4v',
+        u'md5': u'140a49ed444bd22f93330985d8475fcb',
+        u'info_dict': {
+            u"title": u"卡马乔国足开大脚长传冲吊集锦"
+        }
+    }
+
+    def _url_for_id(self, id, quality = None):
+        info_url = "http://v2.tudou.com/f?id="+str(id)
+        if quality:
+            info_url += '&hd' + quality
+        webpage = self._download_webpage(info_url, id, "Opening the info webpage")
+        final_url = self._html_search_regex('>(.+?)</f>',webpage, 'video url')
+        return final_url
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group(2).replace('.html','')
+        video_id = mobj.group(2)
         webpage = self._download_webpage(url, video_id)
-        video_id = re.search('"k":(.+?),',webpage).group(1)
         title = re.search(",kw:\"(.+)\"",webpage)
         if title is None:
             title = re.search(",kw: \'(.+)\'",webpage)
@@ -19,14 +37,27 @@ class TudouIE(InfoExtractor):
         if thumbnail_url is None:
             thumbnail_url = re.search(",pic:\"(.+?)\"",webpage)
         thumbnail_url = thumbnail_url.group(1)
-        info_url = "http://v2.tudou.com/f?id="+str(video_id)
-        webpage = self._download_webpage(info_url, video_id, "Opening the info webpage")
-        final_url = re.search('\>(.+?)\<\/f\>',webpage).group(1)
-        ext = (final_url.split('?')[0]).split('.')[-1]
-        return [{
-            'id':        video_id,
-            'url':       final_url,
-            'ext':       ext,
-            'title':     title,
-            'thumbnail': thumbnail_url,
-        }]
+
+        segs_json = self._search_regex(r'segs: \'(.*)\'', webpage, 'segments')
+        segments = json.loads(segs_json)
+        # It looks like the keys are the arguments that have to be passed as
+        # the hd field in the request url, we pick the higher
+        quality = sorted(segments.keys())[-1]
+        parts = segments[quality]
+        result = []
+        len_parts = len(parts)
+        if len_parts > 1:
+            self.to_screen(u'%s: found %s parts' % (video_id, len_parts))
+        for part in parts:
+            part_id = part['k']
+            final_url = self._url_for_id(part_id, quality)
+            ext = (final_url.split('?')[0]).split('.')[-1]
+            part_info = {'id': part_id,
+                          'url': final_url,
+                          'ext': ext,
+                          'title': title,
+                          'thumbnail': thumbnail_url,
+                          }
+            result.append(part_info)
+
+        return result