]> Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/bambuser.py
Merge tag 'upstream/2015.07.21'
[youtubedl] / youtube_dl / extractor / bambuser.py
1 from __future__ import unicode_literals
2
3 import re
4 import itertools
5
6 from .common import InfoExtractor
7 from ..compat import (
8 compat_urllib_parse,
9 compat_urllib_request,
10 compat_str,
11 )
12 from ..utils import (
13 ExtractorError,
14 int_or_none,
15 float_or_none,
16 )
17
18
19 class BambuserIE(InfoExtractor):
20 IE_NAME = 'bambuser'
21 _VALID_URL = r'https?://bambuser\.com/v/(?P<id>\d+)'
22 _API_KEY = '005f64509e19a868399060af746a00aa'
23 _LOGIN_URL = 'https://bambuser.com/user'
24 _NETRC_MACHINE = 'bambuser'
25
26 _TEST = {
27 'url': 'http://bambuser.com/v/4050584',
28 # MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388
29 # 'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
30 'info_dict': {
31 'id': '4050584',
32 'ext': 'flv',
33 'title': 'Education engineering days - lightning talks',
34 'duration': 3741,
35 'uploader': 'pixelversity',
36 'uploader_id': '344706',
37 'timestamp': 1382976692,
38 'upload_date': '20131028',
39 'view_count': int,
40 },
41 'params': {
42 # It doesn't respect the 'Range' header, it would download the whole video
43 # caused the travis builds to fail: https://travis-ci.org/rg3/youtube-dl/jobs/14493845#L59
44 'skip_download': True,
45 },
46 }
47
48 def _login(self):
49 (username, password) = self._get_login_info()
50 if username is None:
51 return
52
53 login_form = {
54 'form_id': 'user_login',
55 'op': 'Log in',
56 'name': username,
57 'pass': password,
58 }
59
60 request = compat_urllib_request.Request(
61 self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
62 request.add_header('Referer', self._LOGIN_URL)
63 response = self._download_webpage(
64 request, None, 'Logging in as %s' % username)
65
66 login_error = self._html_search_regex(
67 r'(?s)<div class="messages error">(.+?)</div>',
68 response, 'login error', default=None)
69 if login_error:
70 raise ExtractorError(
71 'Unable to login: %s' % login_error, expected=True)
72
73 def _real_initialize(self):
74 self._login()
75
76 def _real_extract(self, url):
77 video_id = self._match_id(url)
78
79 info = self._download_json(
80 'http://player-c.api.bambuser.com/getVideo.json?api_key=%s&vid=%s'
81 % (self._API_KEY, video_id), video_id)
82
83 error = info.get('error')
84 if error:
85 raise ExtractorError(
86 '%s returned error: %s' % (self.IE_NAME, error), expected=True)
87
88 result = info['result']
89
90 return {
91 'id': video_id,
92 'title': result['title'],
93 'url': result['url'],
94 'thumbnail': result.get('preview'),
95 'duration': int_or_none(result.get('length')),
96 'uploader': result.get('username'),
97 'uploader_id': compat_str(result.get('owner', {}).get('uid')),
98 'timestamp': int_or_none(result.get('created')),
99 'fps': float_or_none(result.get('framerate')),
100 'view_count': int_or_none(result.get('views_total')),
101 'comment_count': int_or_none(result.get('comment_count')),
102 }
103
104
105 class BambuserChannelIE(InfoExtractor):
106 IE_NAME = 'bambuser:channel'
107 _VALID_URL = r'https?://bambuser\.com/channel/(?P<user>.*?)(?:/|#|\?|$)'
108 # The maximum number we can get with each request
109 _STEP = 50
110 _TEST = {
111 'url': 'http://bambuser.com/channel/pixelversity',
112 'info_dict': {
113 'title': 'pixelversity',
114 },
115 'playlist_mincount': 60,
116 }
117
118 def _real_extract(self, url):
119 mobj = re.match(self._VALID_URL, url)
120 user = mobj.group('user')
121 urls = []
122 last_id = ''
123 for i in itertools.count(1):
124 req_url = (
125 'http://bambuser.com/xhr-api/index.php?username={user}'
126 '&sort=created&access_mode=0%2C1%2C2&limit={count}'
127 '&method=broadcast&format=json&vid_older_than={last}'
128 ).format(user=user, count=self._STEP, last=last_id)
129 req = compat_urllib_request.Request(req_url)
130 # Without setting this header, we wouldn't get any result
131 req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
132 data = self._download_json(
133 req, user, 'Downloading page %d' % i)
134 results = data['result']
135 if not results:
136 break
137 last_id = results[-1]['vid']
138 urls.extend(self.url_result(v['page'], 'Bambuser') for v in results)
139
140 return {
141 '_type': 'playlist',
142 'title': user,
143 'entries': urls,
144 }