]> Raphaƫl G. Git Repositories - youtubedl/blobdiff - youtube_dl/extractor/bambuser.py
debian/README.source: Adapt according to changes in git-builpackage.
[youtubedl] / youtube_dl / extractor / bambuser.py
index b80508efed09a7ccece8e6980706e7083d3b96e9..c193e66cad7275cffb6ee96e051d567b9262e773 100644 (file)
@@ -1,34 +1,36 @@
+from __future__ import unicode_literals
+
 import re
 import json
 import itertools
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
     compat_urllib_request,
 )
 
 
 class BambuserIE(InfoExtractor):
-    IE_NAME = u'bambuser'
+    IE_NAME = 'bambuser'
     _VALID_URL = r'https?://bambuser\.com/v/(?P<id>\d+)'
     _API_KEY = '005f64509e19a868399060af746a00aa'
 
     _TEST = {
-        u'url': u'http://bambuser.com/v/4050584',
+        'url': 'http://bambuser.com/v/4050584',
         # MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388
-        #u'md5': u'fba8f7693e48fd4e8641b3fd5539a641',
-        u'info_dict': {
-            u'id': u'4050584',
-            u'ext': u'flv',
-            u'title': u'Education engineering days - lightning talks',
-            u'duration': 3741,
-            u'uploader': u'pixelversity',
-            u'uploader_id': u'344706',
+        # 'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
+        'info_dict': {
+            'id': '4050584',
+            'ext': 'flv',
+            'title': 'Education engineering days - lightning talks',
+            'duration': 3741,
+            'uploader': 'pixelversity',
+            'uploader_id': '344706',
         },
-        u'params': {
+        'params': {
             # It doesn't respect the 'Range' header, it would download the whole video
             # caused the travis builds to fail: https://travis-ci.org/rg3/youtube-dl/jobs/14493845#L59
-            u'skip_download': True,
+            'skip_download': True,
         },
     }
 
@@ -36,7 +38,7 @@ class BambuserIE(InfoExtractor):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
         info_url = ('http://player-c.api.bambuser.com/getVideo.json?'
-            '&api_key=%s&vid=%s' % (self._API_KEY, video_id))
+                    '&api_key=%s&vid=%s' % (self._API_KEY, video_id))
         info_json = self._download_webpage(info_url, video_id)
         info = json.loads(info_json)['result']
 
@@ -48,15 +50,22 @@ class BambuserIE(InfoExtractor):
             'duration': int(info['length']),
             'view_count': int(info['views_total']),
             'uploader': info['username'],
-            'uploader_id': info['uid'],
+            'uploader_id': info['owner']['uid'],
         }
 
 
 class BambuserChannelIE(InfoExtractor):
-    IE_NAME = u'bambuser:channel'
-    _VALID_URL = r'http://bambuser.com/channel/(?P<user>.*?)(?:/|#|\?|$)'
+    IE_NAME = 'bambuser:channel'
+    _VALID_URL = r'https?://bambuser\.com/channel/(?P<user>.*?)(?:/|#|\?|$)'
     # The maximum number we can get with each request
     _STEP = 50
+    _TEST = {
+        'url': 'http://bambuser.com/channel/pixelversity',
+        'info_dict': {
+            'title': 'pixelversity',
+        },
+        'playlist_mincount': 60,
+    }
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
@@ -64,17 +73,18 @@ class BambuserChannelIE(InfoExtractor):
         urls = []
         last_id = ''
         for i in itertools.count(1):
-            req_url = ('http://bambuser.com/xhr-api/index.php?username={user}'
+            req_url = (
+                'http://bambuser.com/xhr-api/index.php?username={user}'
                 '&sort=created&access_mode=0%2C1%2C2&limit={count}'
                 '&method=broadcast&format=json&vid_older_than={last}'
-                ).format(user=user, count=self._STEP, last=last_id)
+            ).format(user=user, count=self._STEP, last=last_id)
             req = compat_urllib_request.Request(req_url)
             # Without setting this header, we wouldn't get any result
             req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
-            info_json = self._download_webpage(req, user,
-                u'Downloading page %d' % i)
-            results = json.loads(info_json)['result']
-            if len(results) == 0:
+            data = self._download_json(
+                req, user, 'Downloading page %d' % i)
+            results = data['result']
+            if not results:
                 break
             last_id = results[-1]['vid']
             urls.extend(self.url_result(v['page'], 'Bambuser') for v in results)