]> Raphaël G. Git Repositories - youtubedl/blob - youtube_dl/extractor/tudou.py
161e47624b383dfe76ea1e2ea6ec73395a97db53
[youtubedl] / youtube_dl / extractor / tudou.py
1 # coding: utf-8
2
3 from __future__ import unicode_literals
4
5 import re
6 import json
7
8 from .common import InfoExtractor
9
10
11 class TudouIE(InfoExtractor):
12 _VALID_URL = r'(?:http://)?(?:www\.)?tudou\.com/(?:listplay|programs|albumplay)/(?:view|(.+?))/(?:([^/]+)|([^/]+))(?:\.html)?'
13 _TESTS = [{
14 'url': 'http://www.tudou.com/listplay/zzdE77v6Mmo/2xN2duXMxmw.html',
15 'md5': '140a49ed444bd22f93330985d8475fcb',
16 'info_dict': {
17 'id': '159448201',
18 'ext': 'f4v',
19 'title': '卡马乔国足开大脚长传冲吊集锦',
20 'thumbnail': 're:^https?://.*\.jpg$',
21 }
22 }, {
23 'url': 'http://www.tudou.com/programs/view/ajX3gyhL0pc/',
24 'info_dict': {
25 'id': '117049447',
26 'ext': 'f4v',
27 'title': 'La Sylphide-Bolshoi-Ekaterina Krysanova & Vyacheslav Lopatin 2012',
28 'thumbnail': 're:^https?://.*\.jpg$',
29 }
30 }, {
31 'url': 'http://www.tudou.com/albumplay/TenTw_JgiPM/PzsAs5usU9A.html',
32 'info_dict': {
33 'title': 'todo.mp4',
34 },
35 'add_ie': ['Youku'],
36 'skip': 'Only works from China'
37 }]
38
39 def _url_for_id(self, id, quality=None):
40 info_url = "http://v2.tudou.com/f?id=" + str(id)
41 if quality:
42 info_url += '&hd' + quality
43 webpage = self._download_webpage(info_url, id, "Opening the info webpage")
44 final_url = self._html_search_regex('>(.+?)</f>', webpage, 'video url')
45 return final_url
46
47 def _real_extract(self, url):
48 mobj = re.match(self._VALID_URL, url)
49 video_id = mobj.group(2)
50 webpage = self._download_webpage(url, video_id)
51
52 m = re.search(r'vcode:\s*[\'"](.+?)[\'"]', webpage)
53 if m and m.group(1):
54 return {
55 '_type': 'url',
56 'url': 'youku:' + m.group(1),
57 'ie_key': 'Youku'
58 }
59
60 title = self._search_regex(
61 r",kw:\s*['\"](.+?)[\"']", webpage, 'title')
62 thumbnail_url = self._search_regex(
63 r",pic:\s*[\"'](.+?)[\"']", webpage, 'thumbnail URL', fatal=False)
64
65 segs_json = self._search_regex(r'segs: \'(.*)\'', webpage, 'segments')
66 segments = json.loads(segs_json)
67 # It looks like the keys are the arguments that have to be passed as
68 # the hd field in the request url, we pick the higher
69 # Also, filter non-number qualities (see issue #3643).
70 quality = sorted(filter(lambda k: k.isdigit(), segments.keys()),
71 key=lambda k: int(k))[-1]
72 parts = segments[quality]
73 result = []
74 len_parts = len(parts)
75 if len_parts > 1:
76 self.to_screen('%s: found %s parts' % (video_id, len_parts))
77 for part in parts:
78 part_id = part['k']
79 final_url = self._url_for_id(part_id, quality)
80 ext = (final_url.split('?')[0]).split('.')[-1]
81 part_info = {
82 'id': '%s' % part_id,
83 'url': final_url,
84 'ext': ext,
85 'title': title,
86 'thumbnail': thumbnail_url,
87 }
88 result.append(part_info)
89
90 return result