]> Raphaël G. Git Repositories - youtubedl/blob - youtube_dl/extractor/sohu.py
Update changelog.
[youtubedl] / youtube_dl / extractor / sohu.py
1 # encoding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5
6 from .common import InfoExtractor
7 from .common import compat_str
8
9
10 class SohuIE(InfoExtractor):
11 _VALID_URL = r'https?://(?P<mytv>my\.)?tv\.sohu\.com/.+?/(?(mytv)|n)(?P<id>\d+)\.shtml.*?'
12
13 _TEST = {
14 'url': 'http://tv.sohu.com/20130724/n382479172.shtml#super',
15 'md5': 'bde8d9a6ffd82c63a1eefaef4eeefec7',
16 'info_dict': {
17 'id': '382479172',
18 'ext': 'mp4',
19 'title': 'MV:Far East Movement《The Illest》',
20 },
21 'skip': 'Only available from China',
22 }
23
24 def _real_extract(self, url):
25
26 def _fetch_data(vid_id, mytv=False):
27 if mytv:
28 base_data_url = 'http://my.tv.sohu.com/play/videonew.do?vid='
29 else:
30 base_data_url = 'http://hot.vrs.sohu.com/vrs_flash.action?vid='
31
32 return self._download_json(
33 base_data_url + vid_id, video_id,
34 'Downloading JSON data for %s' % vid_id)
35
36 mobj = re.match(self._VALID_URL, url)
37 video_id = mobj.group('id')
38 mytv = mobj.group('mytv') is not None
39
40 webpage = self._download_webpage(url, video_id)
41 raw_title = self._html_search_regex(
42 r'(?s)<title>(.+?)</title>',
43 webpage, 'video title')
44 title = raw_title.partition('-')[0].strip()
45
46 vid = self._html_search_regex(
47 r'var vid ?= ?["\'](\d+)["\']',
48 webpage, 'video path')
49 vid_data = _fetch_data(vid, mytv)
50
51 formats_json = {}
52 for format_id in ('nor', 'high', 'super', 'ori', 'h2644k', 'h2654k'):
53 vid_id = vid_data['data'].get('%sVid' % format_id)
54 if not vid_id:
55 continue
56 vid_id = compat_str(vid_id)
57 formats_json[format_id] = vid_data if vid == vid_id else _fetch_data(vid_id, mytv)
58
59 part_count = vid_data['data']['totalBlocks']
60
61 playlist = []
62 for i in range(part_count):
63 formats = []
64 for format_id, format_data in formats_json.items():
65 allot = format_data['allot']
66 prot = format_data['prot']
67
68 data = format_data['data']
69 clips_url = data['clipsURL']
70 su = data['su']
71
72 part_str = self._download_webpage(
73 'http://%s/?prot=%s&file=%s&new=%s' %
74 (allot, prot, clips_url[i], su[i]),
75 video_id,
76 'Downloading %s video URL part %d of %d'
77 % (format_id, i + 1, part_count))
78
79 part_info = part_str.split('|')
80 video_url = '%s%s?key=%s' % (part_info[0], su[i], part_info[3])
81
82 formats.append({
83 'url': video_url,
84 'format_id': format_id,
85 'filesize': data['clipsBytes'][i],
86 'width': data['width'],
87 'height': data['height'],
88 'fps': data['fps'],
89 })
90 self._sort_formats(formats)
91
92 playlist.append({
93 'id': '%s_part%d' % (video_id, i + 1),
94 'title': title,
95 'duration': vid_data['data']['clipsDuration'][i],
96 'formats': formats,
97 })
98
99 if len(playlist) == 1:
100 info = playlist[0]
101 info['id'] = video_id
102 else:
103 info = {
104 '_type': 'playlist',
105 'entries': playlist,
106 'id': video_id,
107 }
108
109 return info