from __future__ import unicode_literals
import base64
+import itertools
import random
+import re
import string
import time
from .common import InfoExtractor
from ..compat import (
- compat_urllib_parse,
compat_ord,
+ compat_str,
+ compat_urllib_parse_urlencode,
)
from ..utils import (
ExtractorError,
- sanitized_Request,
+ get_element_by_attribute,
+ try_get,
)
'params': {
'videopassword': '100600',
},
+ }, {
+ # /play/get.json contains streams with "channel_type":"tail"
+ 'url': 'http://v.youku.com/v_show/id_XOTUxMzg4NDMy.html',
+ 'info_dict': {
+ 'id': 'XOTUxMzg4NDMy',
+ 'title': '我的世界☆明月庄主☆车震猎杀☆杀人艺术Minecraft',
+ },
+ 'playlist_count': 6,
}]
def construct_video_urls(self, data):
fileid_dict = {}
for stream in data['stream']:
+ if stream.get('channel_type') == 'tail':
+ continue
format = stream.get('stream_type')
- fileid = stream['stream_fileid']
+ fileid = try_get(
+ stream, lambda x: x['segs'][0]['fileid'],
+ compat_str) or stream['stream_fileid']
fileid_dict[format] = fileid
def get_fileid(format, n):
# generate video_urls
video_urls_dict = {}
for stream in data['stream']:
+ if stream.get('channel_type') == 'tail':
+ continue
format = stream.get('stream_type')
video_urls = []
for dt in stream['segs']:
'_00' + \
'/st/' + self.parse_ext_l(format) + \
'/fileid/' + get_fileid(format, n) + '?' + \
- compat_urllib_parse.urlencode(param)
+ compat_urllib_parse_urlencode(param)
video_urls.append(video_url)
video_urls_dict[format] = video_urls
headers = {
'Referer': req_url,
}
+ headers.update(self.geo_verification_headers())
self._set_cookie('youku.com', 'xreferrer', 'http://www.youku.com')
- req = sanitized_Request(req_url, headers=headers)
- cn_verification_proxy = self._downloader.params.get('cn_verification_proxy')
- if cn_verification_proxy:
- req.add_header('Ytdl-request-proxy', cn_verification_proxy)
-
- raw_data = self._download_json(req, video_id, note=note)
+ raw_data = self._download_json(req_url, video_id, note=note, headers=headers)
return raw_data['data']
# which one has all
} for i in range(max(len(v.get('segs')) for v in data['stream']))]
for stream in data['stream']:
+ if stream.get('channel_type') == 'tail':
+ continue
fm = stream.get('stream_type')
video_urls = video_urls_dict[fm]
for video_url, seg, entry in zip(video_urls, stream['segs'], entries):
'format_id': self.get_format_name(fm),
'ext': self.parse_ext_l(fm),
'filesize': int(seg['size']),
+ 'width': stream.get('width'),
+ 'height': stream.get('height'),
})
return {
'title': title,
'entries': entries,
}
+
+
+class YoukuShowIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?youku\.com/show_page/id_(?P<id>[0-9a-z]+)\.html'
+ IE_NAME = 'youku:show'
+
+ _TEST = {
+ 'url': 'http://www.youku.com/show_page/id_zc7c670be07ff11e48b3f.html',
+ 'info_dict': {
+ 'id': 'zc7c670be07ff11e48b3f',
+ 'title': '花千骨 未删减版',
+ 'description': 'md5:578d4f2145ae3f9128d9d4d863312910',
+ },
+ 'playlist_count': 50,
+ }
+
+ _PAGE_SIZE = 40
+
+ def _find_videos_in_page(self, webpage):
+ videos = re.findall(
+ r'<li><a[^>]+href="(?P<url>https?://v\.youku\.com/[^"]+)"[^>]+title="(?P<title>[^"]+)"', webpage)
+ return [
+ self.url_result(video_url, YoukuIE.ie_key(), title)
+ for video_url, title in videos]
+
+ def _real_extract(self, url):
+ show_id = self._match_id(url)
+ webpage = self._download_webpage(url, show_id)
+
+ entries = self._find_videos_in_page(webpage)
+
+ playlist_title = self._html_search_regex(
+ r'<span[^>]+class="name">([^<]+)</span>', webpage, 'playlist title', fatal=False)
+ detail_div = get_element_by_attribute('class', 'detail', webpage) or ''
+ playlist_description = self._html_search_regex(
+ r'<span[^>]+style="display:none"[^>]*>([^<]+)</span>',
+ detail_div, 'playlist description', fatal=False)
+
+ for idx in itertools.count(1):
+ episodes_page = self._download_webpage(
+ 'http://www.youku.com/show_episode/id_%s.html' % show_id,
+ show_id, query={'divid': 'reload_%d' % (idx * self._PAGE_SIZE + 1)},
+ note='Downloading episodes page %d' % idx)
+ new_entries = self._find_videos_in_page(episodes_page)
+ entries.extend(new_entries)
+ if len(new_entries) < self._PAGE_SIZE:
+ break
+
+ return self.playlist_result(entries, show_id, playlist_title, playlist_description)