+from __future__ import unicode_literals
+
import re
import json
import itertools
class BambuserIE(InfoExtractor):
- IE_NAME = u'bambuser'
+ IE_NAME = 'bambuser'
_VALID_URL = r'https?://bambuser\.com/v/(?P<id>\d+)'
_API_KEY = '005f64509e19a868399060af746a00aa'
_TEST = {
- u'url': u'http://bambuser.com/v/4050584',
- u'md5': u'fba8f7693e48fd4e8641b3fd5539a641',
- u'info_dict': {
- u'id': u'4050584',
- u'ext': u'flv',
- u'title': u'Education engineering days - lightning talks',
- u'duration': 3741,
- u'uploader': u'pixelversity',
- u'uploader_id': u'344706',
+ 'url': 'http://bambuser.com/v/4050584',
+ # MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388
+ #u'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
+ 'info_dict': {
+ 'id': '4050584',
+ 'ext': 'flv',
+ 'title': 'Education engineering days - lightning talks',
+ 'duration': 3741,
+ 'uploader': 'pixelversity',
+ 'uploader_id': '344706',
+ },
+ 'params': {
+ # It doesn't respect the 'Range' header, it would download the whole video
+ # caused the travis builds to fail: https://travis-ci.org/rg3/youtube-dl/jobs/14493845#L59
+ 'skip_download': True,
},
}
class BambuserChannelIE(InfoExtractor):
- IE_NAME = u'bambuser:channel'
- _VALID_URL = r'http://bambuser.com/channel/(?P<user>.*?)(?:/|#|\?|$)'
+ IE_NAME = 'bambuser:channel'
+ _VALID_URL = r'https?://bambuser\.com/channel/(?P<user>.*?)(?:/|#|\?|$)'
# The maximum number we can get with each request
_STEP = 50
+ _TEST = {
+ 'url': 'http://bambuser.com/channel/pixelversity',
+ 'info_dict': {
+ 'title': 'pixelversity',
+ },
+ 'playlist_mincount': 60,
+ }
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
req = compat_urllib_request.Request(req_url)
# Without setting this header, we wouldn't get any result
req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
- info_json = self._download_webpage(req, user,
- u'Downloading page %d' % i)
- results = json.loads(info_json)['result']
- if len(results) == 0:
+ data = self._download_json(
+ req, user, 'Downloading page %d' % i)
+ results = data['result']
+ if not results:
break
last_id = results[-1]['vid']
urls.extend(self.url_result(v['page'], 'Bambuser') for v in results)