]> Raphaƫl G. Git Repositories - youtubedl/blobdiff - youtube_dl/extractor/bambuser.py
debian/control: Mark compliance with policy 4.1.1. No further changes needed.
[youtubedl] / youtube_dl / extractor / bambuser.py
index ccd31c4c7093d54e86df50a42600d08e12e55005..0eb1930c2d24bb01acd57292d4fe7e1b9a330bff 100644 (file)
@@ -1,12 +1,16 @@
 from __future__ import unicode_literals
 
 import re
 from __future__ import unicode_literals
 
 import re
-import json
 import itertools
 
 from .common import InfoExtractor
 import itertools
 
 from .common import InfoExtractor
+from ..compat import compat_str
 from ..utils import (
 from ..utils import (
-    compat_urllib_request,
+    ExtractorError,
+    float_or_none,
+    int_or_none,
+    sanitized_Request,
+    urlencode_postdata,
 )
 
 
 )
 
 
@@ -14,11 +18,13 @@ class BambuserIE(InfoExtractor):
     IE_NAME = 'bambuser'
     _VALID_URL = r'https?://bambuser\.com/v/(?P<id>\d+)'
     _API_KEY = '005f64509e19a868399060af746a00aa'
     IE_NAME = 'bambuser'
     _VALID_URL = r'https?://bambuser\.com/v/(?P<id>\d+)'
     _API_KEY = '005f64509e19a868399060af746a00aa'
+    _LOGIN_URL = 'https://bambuser.com/user'
+    _NETRC_MACHINE = 'bambuser'
 
     _TEST = {
         'url': 'http://bambuser.com/v/4050584',
         # MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388
 
     _TEST = {
         'url': 'http://bambuser.com/v/4050584',
         # MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388
-        #u'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
+        # 'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
         'info_dict': {
             'id': '4050584',
             'ext': 'flv',
         'info_dict': {
             'id': '4050584',
             'ext': 'flv',
@@ -26,6 +32,9 @@ class BambuserIE(InfoExtractor):
             'duration': 3741,
             'uploader': 'pixelversity',
             'uploader_id': '344706',
             'duration': 3741,
             'uploader': 'pixelversity',
             'uploader_id': '344706',
+            'timestamp': 1382976692,
+            'upload_date': '20131028',
+            'view_count': int,
         },
         'params': {
             # It doesn't respect the 'Range' header, it would download the whole video
         },
         'params': {
             # It doesn't respect the 'Range' header, it would download the whole video
@@ -34,23 +43,60 @@ class BambuserIE(InfoExtractor):
         },
     }
 
         },
     }
 
+    def _login(self):
+        (username, password) = self._get_login_info()
+        if username is None:
+            return
+
+        login_form = {
+            'form_id': 'user_login',
+            'op': 'Log in',
+            'name': username,
+            'pass': password,
+        }
+
+        request = sanitized_Request(
+            self._LOGIN_URL, urlencode_postdata(login_form))
+        request.add_header('Referer', self._LOGIN_URL)
+        response = self._download_webpage(
+            request, None, 'Logging in as %s' % username)
+
+        login_error = self._html_search_regex(
+            r'(?s)<div class="messages error">(.+?)</div>',
+            response, 'login error', default=None)
+        if login_error:
+            raise ExtractorError(
+                'Unable to login: %s' % login_error, expected=True)
+
+    def _real_initialize(self):
+        self._login()
+
     def _real_extract(self, url):
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-        info_url = ('http://player-c.api.bambuser.com/getVideo.json?'
-            '&api_key=%s&vid=%s' % (self._API_KEY, video_id))
-        info_json = self._download_webpage(info_url, video_id)
-        info = json.loads(info_json)['result']
+        video_id = self._match_id(url)
+
+        info = self._download_json(
+            'http://player-c.api.bambuser.com/getVideo.json?api_key=%s&vid=%s'
+            % (self._API_KEY, video_id), video_id)
+
+        error = info.get('error')
+        if error:
+            raise ExtractorError(
+                '%s returned error: %s' % (self.IE_NAME, error), expected=True)
+
+        result = info['result']
 
         return {
             'id': video_id,
 
         return {
             'id': video_id,
-            'title': info['title'],
-            'url': info['url'],
-            'thumbnail': info.get('preview'),
-            'duration': int(info['length']),
-            'view_count': int(info['views_total']),
-            'uploader': info['username'],
-            'uploader_id': info['uid'],
+            'title': result['title'],
+            'url': result['url'],
+            'thumbnail': result.get('preview'),
+            'duration': int_or_none(result.get('length')),
+            'uploader': result.get('username'),
+            'uploader_id': compat_str(result.get('owner', {}).get('uid')),
+            'timestamp': int_or_none(result.get('created')),
+            'fps': float_or_none(result.get('framerate')),
+            'view_count': int_or_none(result.get('views_total')),
+            'comment_count': int_or_none(result.get('comment_count')),
         }
 
 
         }
 
 
@@ -59,6 +105,13 @@ class BambuserChannelIE(InfoExtractor):
     _VALID_URL = r'https?://bambuser\.com/channel/(?P<user>.*?)(?:/|#|\?|$)'
     # The maximum number we can get with each request
     _STEP = 50
     _VALID_URL = r'https?://bambuser\.com/channel/(?P<user>.*?)(?:/|#|\?|$)'
     # The maximum number we can get with each request
     _STEP = 50
+    _TEST = {
+        'url': 'http://bambuser.com/channel/pixelversity',
+        'info_dict': {
+            'title': 'pixelversity',
+        },
+        'playlist_mincount': 60,
+    }
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
@@ -66,17 +119,18 @@ class BambuserChannelIE(InfoExtractor):
         urls = []
         last_id = ''
         for i in itertools.count(1):
         urls = []
         last_id = ''
         for i in itertools.count(1):
-            req_url = ('http://bambuser.com/xhr-api/index.php?username={user}'
+            req_url = (
+                'http://bambuser.com/xhr-api/index.php?username={user}'
                 '&sort=created&access_mode=0%2C1%2C2&limit={count}'
                 '&method=broadcast&format=json&vid_older_than={last}'
                 '&sort=created&access_mode=0%2C1%2C2&limit={count}'
                 '&method=broadcast&format=json&vid_older_than={last}'
-                ).format(user=user, count=self._STEP, last=last_id)
-            req = compat_urllib_request.Request(req_url)
+            ).format(user=user, count=self._STEP, last=last_id)
+            req = sanitized_Request(req_url)
             # Without setting this header, we wouldn't get any result
             req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
             # Without setting this header, we wouldn't get any result
             req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
-            info_json = self._download_webpage(req, user,
-                'Downloading page %d' % i)
-            results = json.loads(info_json)['result']
-            if len(results) == 0:
+            data = self._download_json(
+                req, user, 'Downloading page %d' % i)
+            results = data['result']
+            if not results:
                 break
             last_id = results[-1]['vid']
             urls.extend(self.url_result(v['page'], 'Bambuser') for v in results)
                 break
             last_id = results[-1]['vid']
             urls.extend(self.url_result(v['page'], 'Bambuser') for v in results)