]> Raphaël G. Git Repositories - youtubedl/blobdiff - youtube_dl/extractor/youku.py
Initiate new release.
[youtubedl] / youtube_dl / extractor / youku.py
index 900eb2abac2bcc346eb8b07782ac017a25a44c24..73ebe57598a281a4debcb6ea671ebf081b63c610 100644 (file)
@@ -2,18 +2,22 @@
 from __future__ import unicode_literals
 
 import base64
 from __future__ import unicode_literals
 
 import base64
+import itertools
 import random
 import random
+import re
 import string
 import time
 
 from .common import InfoExtractor
 from ..compat import (
 import string
 import time
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_urllib_parse,
     compat_ord,
     compat_ord,
+    compat_str,
+    compat_urllib_parse_urlencode,
 )
 from ..utils import (
     ExtractorError,
 )
 from ..utils import (
     ExtractorError,
-    sanitized_Request,
+    get_element_by_attribute,
+    try_get,
 )
 
 
 )
 
 
@@ -64,6 +68,14 @@ class YoukuIE(InfoExtractor):
         'params': {
             'videopassword': '100600',
         },
         'params': {
             'videopassword': '100600',
         },
+    }, {
+        # /play/get.json contains streams with "channel_type":"tail"
+        'url': 'http://v.youku.com/v_show/id_XOTUxMzg4NDMy.html',
+        'info_dict': {
+            'id': 'XOTUxMzg4NDMy',
+            'title': '我的世界☆明月庄主☆车震猎杀☆杀人艺术Minecraft',
+        },
+        'playlist_count': 6,
     }]
 
     def construct_video_urls(self, data):
     }]
 
     def construct_video_urls(self, data):
@@ -92,8 +104,12 @@ class YoukuIE(InfoExtractor):
 
         fileid_dict = {}
         for stream in data['stream']:
 
         fileid_dict = {}
         for stream in data['stream']:
+            if stream.get('channel_type') == 'tail':
+                continue
             format = stream.get('stream_type')
             format = stream.get('stream_type')
-            fileid = stream['stream_fileid']
+            fileid = try_get(
+                stream, lambda x: x['segs'][0]['fileid'],
+                compat_str) or stream['stream_fileid']
             fileid_dict[format] = fileid
 
         def get_fileid(format, n):
             fileid_dict[format] = fileid
 
         def get_fileid(format, n):
@@ -117,6 +133,8 @@ class YoukuIE(InfoExtractor):
         # generate video_urls
         video_urls_dict = {}
         for stream in data['stream']:
         # generate video_urls
         video_urls_dict = {}
         for stream in data['stream']:
+            if stream.get('channel_type') == 'tail':
+                continue
             format = stream.get('stream_type')
             video_urls = []
             for dt in stream['segs']:
             format = stream.get('stream_type')
             video_urls = []
             for dt in stream['segs']:
@@ -138,7 +156,7 @@ class YoukuIE(InfoExtractor):
                     '_00' + \
                     '/st/' + self.parse_ext_l(format) + \
                     '/fileid/' + get_fileid(format, n) + '?' + \
                     '_00' + \
                     '/st/' + self.parse_ext_l(format) + \
                     '/fileid/' + get_fileid(format, n) + '?' + \
-                    compat_urllib_parse.urlencode(param)
+                    compat_urllib_parse_urlencode(param)
                 video_urls.append(video_url)
             video_urls_dict[format] = video_urls
 
                 video_urls.append(video_url)
             video_urls_dict[format] = video_urls
 
@@ -203,14 +221,10 @@ class YoukuIE(InfoExtractor):
             headers = {
                 'Referer': req_url,
             }
             headers = {
                 'Referer': req_url,
             }
+            headers.update(self.geo_verification_headers())
             self._set_cookie('youku.com', 'xreferrer', 'http://www.youku.com')
             self._set_cookie('youku.com', 'xreferrer', 'http://www.youku.com')
-            req = sanitized_Request(req_url, headers=headers)
 
 
-            cn_verification_proxy = self._downloader.params.get('cn_verification_proxy')
-            if cn_verification_proxy:
-                req.add_header('Ytdl-request-proxy', cn_verification_proxy)
-
-            raw_data = self._download_json(req, video_id, note=note)
+            raw_data = self._download_json(req_url, video_id, note=note, headers=headers)
 
             return raw_data['data']
 
 
             return raw_data['data']
 
@@ -253,6 +267,8 @@ class YoukuIE(InfoExtractor):
             # which one has all
         } for i in range(max(len(v.get('segs')) for v in data['stream']))]
         for stream in data['stream']:
             # which one has all
         } for i in range(max(len(v.get('segs')) for v in data['stream']))]
         for stream in data['stream']:
+            if stream.get('channel_type') == 'tail':
+                continue
             fm = stream.get('stream_type')
             video_urls = video_urls_dict[fm]
             for video_url, seg, entry in zip(video_urls, stream['segs'], entries):
             fm = stream.get('stream_type')
             video_urls = video_urls_dict[fm]
             for video_url, seg, entry in zip(video_urls, stream['segs'], entries):
@@ -261,6 +277,8 @@ class YoukuIE(InfoExtractor):
                     'format_id': self.get_format_name(fm),
                     'ext': self.parse_ext_l(fm),
                     'filesize': int(seg['size']),
                     'format_id': self.get_format_name(fm),
                     'ext': self.parse_ext_l(fm),
                     'filesize': int(seg['size']),
+                    'width': stream.get('width'),
+                    'height': stream.get('height'),
                 })
 
         return {
                 })
 
         return {
@@ -269,3 +287,52 @@ class YoukuIE(InfoExtractor):
             'title': title,
             'entries': entries,
         }
             'title': title,
             'entries': entries,
         }
+
+
+class YoukuShowIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?youku\.com/show_page/id_(?P<id>[0-9a-z]+)\.html'
+    IE_NAME = 'youku:show'
+
+    _TEST = {
+        'url': 'http://www.youku.com/show_page/id_zc7c670be07ff11e48b3f.html',
+        'info_dict': {
+            'id': 'zc7c670be07ff11e48b3f',
+            'title': '花千骨 未删减版',
+            'description': 'md5:578d4f2145ae3f9128d9d4d863312910',
+        },
+        'playlist_count': 50,
+    }
+
+    _PAGE_SIZE = 40
+
+    def _find_videos_in_page(self, webpage):
+        videos = re.findall(
+            r'<li><a[^>]+href="(?P<url>https?://v\.youku\.com/[^"]+)"[^>]+title="(?P<title>[^"]+)"', webpage)
+        return [
+            self.url_result(video_url, YoukuIE.ie_key(), title)
+            for video_url, title in videos]
+
+    def _real_extract(self, url):
+        show_id = self._match_id(url)
+        webpage = self._download_webpage(url, show_id)
+
+        entries = self._find_videos_in_page(webpage)
+
+        playlist_title = self._html_search_regex(
+            r'<span[^>]+class="name">([^<]+)</span>', webpage, 'playlist title', fatal=False)
+        detail_div = get_element_by_attribute('class', 'detail', webpage) or ''
+        playlist_description = self._html_search_regex(
+            r'<span[^>]+style="display:none"[^>]*>([^<]+)</span>',
+            detail_div, 'playlist description', fatal=False)
+
+        for idx in itertools.count(1):
+            episodes_page = self._download_webpage(
+                'http://www.youku.com/show_episode/id_%s.html' % show_id,
+                show_id, query={'divid': 'reload_%d' % (idx * self._PAGE_SIZE + 1)},
+                note='Downloading episodes page %d' % idx)
+            new_entries = self._find_videos_in_page(episodes_page)
+            entries.extend(new_entries)
+            if len(new_entries) < self._PAGE_SIZE:
+                break
+
+        return self.playlist_result(entries, show_id, playlist_title, playlist_description)