]> Raphaƫl G. Git Repositories - youtubedl/blobdiff - youtube_dl/extractor/ustream.py
debian/changelog: Finalize release.
[youtubedl] / youtube_dl / extractor / ustream.py
index cfe3a6840af834a1f4740c7c4f7290c63b591096..488b10df96e298c683cd02287e2da0c49f21a1cc 100644 (file)
@@ -1,36 +1,88 @@
+from __future__ import unicode_literals
+
+import json
 import re
 
 from .common import InfoExtractor
 import re
 
 from .common import InfoExtractor
+from ..utils import (
+    compat_urlparse,
+    get_meta_content,
+)
 
 
 class UstreamIE(InfoExtractor):
 
 
 class UstreamIE(InfoExtractor):
-    _VALID_URL = r'https?://www\.ustream\.tv/recorded/(?P<videoID>\d+)'
-    IE_NAME = u'ustream'
+    _VALID_URL = r'https?://www\.ustream\.tv/(?P<type>recorded|embed|embed/recorded)/(?P<videoID>\d+)'
+    IE_NAME = 'ustream'
+    _TEST = {
+        'url': 'http://www.ustream.tv/recorded/20274954',
+        'md5': '088f151799e8f572f84eb62f17d73e5c',
+        'info_dict': {
+            'id': '20274954',
+            'ext': 'flv',
+            'uploader': 'Young Americans for Liberty',
+            'title': 'Young Americans for Liberty February 7, 2012 2:28 AM',
+        },
+    }
 
     def _real_extract(self, url):
         m = re.match(self._VALID_URL, url)
         video_id = m.group('videoID')
 
 
     def _real_extract(self, url):
         m = re.match(self._VALID_URL, url)
         video_id = m.group('videoID')
 
-        video_url = u'http://tcdn.ustream.tv/video/%s' % video_id
+        # some sites use this embed format (see: http://github.com/rg3/youtube-dl/issues/2990)
+        if m.group('type') == 'embed/recorded':
+            video_id = m.group('videoID')
+            desktop_url = 'http://www.ustream.tv/recorded/' + video_id
+            return self.url_result(desktop_url, 'Ustream')
+        if m.group('type') == 'embed':
+            video_id = m.group('videoID')
+            webpage = self._download_webpage(url, video_id)
+            desktop_video_id = self._html_search_regex(
+                r'ContentVideoIds=\["([^"]*?)"\]', webpage, 'desktop_video_id')
+            desktop_url = 'http://www.ustream.tv/recorded/' + desktop_video_id
+            return self.url_result(desktop_url, 'Ustream')
+
+        video_url = 'http://tcdn.ustream.tv/video/%s' % video_id
         webpage = self._download_webpage(url, video_id)
 
         self.report_extraction(video_id)
 
         video_title = self._html_search_regex(r'data-title="(?P<title>.+)"',
         webpage = self._download_webpage(url, video_id)
 
         self.report_extraction(video_id)
 
         video_title = self._html_search_regex(r'data-title="(?P<title>.+)"',
-            webpage, u'title')
+            webpage, 'title')
 
         uploader = self._html_search_regex(r'data-content-type="channel".*?>(?P<uploader>.*?)</a>',
 
         uploader = self._html_search_regex(r'data-content-type="channel".*?>(?P<uploader>.*?)</a>',
-            webpage, u'uploader', fatal=False, flags=re.DOTALL)
+            webpage, 'uploader', fatal=False, flags=re.DOTALL)
 
         thumbnail = self._html_search_regex(r'<link rel="image_src" href="(?P<thumb>.*?)"',
 
         thumbnail = self._html_search_regex(r'<link rel="image_src" href="(?P<thumb>.*?)"',
-            webpage, u'thumbnail', fatal=False)
-
-        info = {
-                'id': video_id,
-                'url': video_url,
-                'ext': 'flv',
-                'title': video_title,
-                'uploader': uploader,
-                'thumbnail': thumbnail,
-               }
-        return info
+            webpage, 'thumbnail', fatal=False)
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'ext': 'flv',
+            'title': video_title,
+            'uploader': uploader,
+            'thumbnail': thumbnail,
+        }
+
+
+class UstreamChannelIE(InfoExtractor):
+    _VALID_URL = r'https?://www\.ustream\.tv/channel/(?P<slug>.+)'
+    IE_NAME = 'ustream:channel'
+
+    def _real_extract(self, url):
+        m = re.match(self._VALID_URL, url)
+        slug = m.group('slug')
+        webpage = self._download_webpage(url, slug)
+        channel_id = get_meta_content('ustream:channel_id', webpage)
+
+        BASE = 'http://www.ustream.tv'
+        next_url = '/ajax/socialstream/videos/%s/1.json' % channel_id
+        video_ids = []
+        while next_url:
+            reply = json.loads(self._download_webpage(compat_urlparse.urljoin(BASE, next_url), channel_id))
+            video_ids.extend(re.findall(r'data-content-id="(\d.*)"', reply['data']))
+            next_url = reply['nextUrl']
+
+        urls = ['http://www.ustream.tv/recorded/' + vid for vid in video_ids]
+        url_entries = [self.url_result(eurl, 'Ustream') for eurl in urls]
+        return self.playlist_result(url_entries, channel_id)