]>
Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/bambuser.py
   1 from __future__ 
import unicode_literals
 
   6 from .common 
import InfoExtractor
 
  19 class BambuserIE(InfoExtractor
): 
  21     _VALID_URL 
= r
'https?://bambuser\.com/v/(?P<id>\d+)' 
  22     _API_KEY 
= '005f64509e19a868399060af746a00aa' 
  23     _LOGIN_URL 
= 'https://bambuser.com/user' 
  24     _NETRC_MACHINE 
= 'bambuser' 
  27         'url': 'http://bambuser.com/v/4050584', 
  28         # MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388 
  29         # 'md5': 'fba8f7693e48fd4e8641b3fd5539a641', 
  33             'title': 'Education engineering days - lightning talks', 
  35             'uploader': 'pixelversity', 
  36             'uploader_id': '344706', 
  37             'timestamp': 1382976692, 
  38             'upload_date': '20131028', 
  42             # It doesn't respect the 'Range' header, it would download the whole video 
  43             # caused the travis builds to fail: https://travis-ci.org/rg3/youtube-dl/jobs/14493845#L59 
  44             'skip_download': True, 
  49         (username
, password
) = self
._get
_login
_info
() 
  54             'form_id': 'user_login', 
  60         request 
= sanitized_Request( 
  61             self
._LOGIN
_URL
, compat_urllib_parse
.urlencode(login_form
).encode('utf-8')) 
  62         request
.add_header('Referer', self
._LOGIN
_URL
) 
  63         response 
= self
._download
_webpage
( 
  64             request
, None, 'Logging in as %s' % username
) 
  66         login_error 
= self
._html
_search
_regex
( 
  67             r
'(?s)<div class="messages error">(.+?)</div>', 
  68             response
, 'login error', default
=None) 
  71                 'Unable to login: %s' % login_error
, expected
=True) 
  73     def _real_initialize(self
): 
  76     def _real_extract(self
, url
): 
  77         video_id 
= self
._match
_id
(url
) 
  79         info 
= self
._download
_json
( 
  80             'http://player-c.api.bambuser.com/getVideo.json?api_key=%s&vid=%s' 
  81             % (self
._API
_KEY
, video_id
), video_id
) 
  83         error 
= info
.get('error') 
  86                 '%s returned error: %s' % (self
.IE_NAME
, error
), expected
=True) 
  88         result 
= info
['result'] 
  92             'title': result
['title'], 
  94             'thumbnail': result
.get('preview'), 
  95             'duration': int_or_none(result
.get('length')), 
  96             'uploader': result
.get('username'), 
  97             'uploader_id': compat_str(result
.get('owner', {}).get('uid')), 
  98             'timestamp': int_or_none(result
.get('created')), 
  99             'fps': float_or_none(result
.get('framerate')), 
 100             'view_count': int_or_none(result
.get('views_total')), 
 101             'comment_count': int_or_none(result
.get('comment_count')), 
 105 class BambuserChannelIE(InfoExtractor
): 
 106     IE_NAME 
= 'bambuser:channel' 
 107     _VALID_URL 
= r
'https?://bambuser\.com/channel/(?P<user>.*?)(?:/|#|\?|$)' 
 108     # The maximum number we can get with each request 
 111         'url': 'http://bambuser.com/channel/pixelversity', 
 113             'title': 'pixelversity', 
 115         'playlist_mincount': 60, 
 118     def _real_extract(self
, url
): 
 119         mobj 
= re
.match(self
._VALID
_URL
, url
) 
 120         user 
= mobj
.group('user') 
 123         for i 
in itertools
.count(1): 
 125                 'http://bambuser.com/xhr-api/index.php?username={user}' 
 126                 '&sort=created&access_mode=0%2C1%2C2&limit={count}' 
 127                 '&method=broadcast&format=json&vid_older_than={last}' 
 128             ).format(user
=user
, count
=self
._STEP
, last
=last_id
) 
 129             req 
= sanitized_Request(req_url
) 
 130             # Without setting this header, we wouldn't get any result 
 131             req
.add_header('Referer', 'http://bambuser.com/channel/%s' % user
) 
 132             data 
= self
._download
_json
( 
 133                 req
, user
, 'Downloading page %d' % i
) 
 134             results 
= data
['result'] 
 137             last_id 
= results
[-1]['vid'] 
 138             urls
.extend(self
.url_result(v
['page'], 'Bambuser') for v 
in results
)