]> Raphaƫl G. Git Repositories - youtubedl/blobdiff - youtube_dl/extractor/viki.py
Annotate changelog bug being closed.
[youtubedl] / youtube_dl / extractor / viki.py
index 15f31529822bcba124cfb12bcb9e56566b3bfba7..6816dacb665e2253a132cfe678999a1129860a0b 100644 (file)
@@ -2,22 +2,22 @@ from __future__ import unicode_literals
 
 import re
 
+from ..compat import compat_urlparse
 from ..utils import (
     ExtractorError,
     unescapeHTML,
     unified_strdate,
     US_RATINGS,
 )
-from .subtitles import SubtitlesInfoExtractor
+from .common import InfoExtractor
 
 
-class VikiIE(SubtitlesInfoExtractor):
+class VikiIE(InfoExtractor):
     IE_NAME = 'viki'
 
     _VALID_URL = r'^https?://(?:www\.)?viki\.com/videos/(?P<id>[0-9]+v)'
     _TEST = {
         'url': 'http://www.viki.com/videos/1023585v-heirs-episode-14',
-        'md5': 'a21454021c2646f5433514177e2caa5f',
         'info_dict': {
             'id': '1023585v',
             'ext': 'mp4',
@@ -31,8 +31,7 @@ class VikiIE(SubtitlesInfoExtractor):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group(1)
+        video_id = self._match_id(url)
 
         webpage = self._download_webpage(url, video_id)
         title = self._og_search_title(webpage)
@@ -71,9 +70,6 @@ class VikiIE(SubtitlesInfoExtractor):
 
         # subtitles
         video_subtitles = self.extract_subtitles(video_id, info_webpage)
-        if self._downloader.params.get('listsubtitles', False):
-            self._list_available_subtitles(video_id, info_webpage)
-            return
 
         return {
             'id': video_id,
@@ -87,12 +83,15 @@ class VikiIE(SubtitlesInfoExtractor):
             'upload_date': upload_date,
         }
 
-    def _get_available_subtitles(self, video_id, info_webpage):
+    def _get_subtitles(self, video_id, info_webpage):
         res = {}
-        for sturl_html in re.findall(r'<track src="([^"]+)"/>', info_webpage):
+        for sturl_html in re.findall(r'<track src="([^"]+)"', info_webpage):
             sturl = unescapeHTML(sturl_html)
             m = re.search(r'/(?P<lang>[a-z]+)\.vtt', sturl)
             if not m:
                 continue
-            res[m.group('lang')] = sturl
+            res[m.group('lang')] = [{
+                'url': compat_urlparse.urljoin('http://www.viki.com', sturl),
+                'ext': 'vtt',
+            }]
         return res