]> Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/facebook.py
Update the changelog.
[youtubedl] / youtube_dl / extractor / facebook.py
1 from __future__ import unicode_literals
2
3 import json
4 import re
5 import socket
6
7 from .common import InfoExtractor
8 from ..compat import (
9 compat_etree_fromstring,
10 compat_http_client,
11 compat_urllib_error,
12 compat_urllib_parse_unquote,
13 compat_urllib_parse_unquote_plus,
14 )
15 from ..utils import (
16 error_to_compat_str,
17 ExtractorError,
18 limit_length,
19 sanitized_Request,
20 urlencode_postdata,
21 get_element_by_id,
22 clean_html,
23 )
24
25
26 class FacebookIE(InfoExtractor):
27 _VALID_URL = r'''(?x)
28 (?:
29 https?://
30 (?:\w+\.)?facebook\.com/
31 (?:[^#]*?\#!/)?
32 (?:
33 (?:
34 video/video\.php|
35 photo\.php|
36 video\.php|
37 video/embed|
38 story\.php
39 )\?(?:.*?)(?:v|video_id|story_fbid)=|
40 [^/]+/videos/(?:[^/]+/)?|
41 [^/]+/posts/|
42 groups/[^/]+/permalink/
43 )|
44 facebook:
45 )
46 (?P<id>[0-9]+)
47 '''
48 _LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
49 _CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
50 _NETRC_MACHINE = 'facebook'
51 IE_NAME = 'facebook'
52
53 _CHROME_USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.97 Safari/537.36'
54
55 _VIDEO_PAGE_TEMPLATE = 'https://www.facebook.com/video/video.php?v=%s'
56
57 _TESTS = [{
58 'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
59 'md5': '6a40d33c0eccbb1af76cf0485a052659',
60 'info_dict': {
61 'id': '637842556329505',
62 'ext': 'mp4',
63 'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
64 'uploader': 'Tennis on Facebook',
65 }
66 }, {
67 'note': 'Video without discernible title',
68 'url': 'https://www.facebook.com/video.php?v=274175099429670',
69 'info_dict': {
70 'id': '274175099429670',
71 'ext': 'mp4',
72 'title': 'Facebook video #274175099429670',
73 'uploader': 'Asif Nawab Butt',
74 },
75 'expected_warnings': [
76 'title'
77 ]
78 }, {
79 'note': 'Video with DASH manifest',
80 'url': 'https://www.facebook.com/video.php?v=957955867617029',
81 'md5': '54706e4db4f5ad58fbad82dde1f1213f',
82 'info_dict': {
83 'id': '957955867617029',
84 'ext': 'mp4',
85 'title': 'When you post epic content on instagram.com/433 8 million followers, this is ...',
86 'uploader': 'Demy de Zeeuw',
87 },
88 }, {
89 'url': 'https://www.facebook.com/maxlayn/posts/10153807558977570',
90 'md5': '037b1fa7f3c2d02b7a0d7bc16031ecc6',
91 'info_dict': {
92 'id': '544765982287235',
93 'ext': 'mp4',
94 'title': '"What are you doing running in the snow?"',
95 'uploader': 'FailArmy',
96 }
97 }, {
98 'url': 'https://m.facebook.com/story.php?story_fbid=1035862816472149&id=116132035111903',
99 'md5': '1deb90b6ac27f7efcf6d747c8a27f5e3',
100 'info_dict': {
101 'id': '1035862816472149',
102 'ext': 'mp4',
103 'title': 'What the Flock Is Going On In New Zealand Credit: ViralHog',
104 'uploader': 'S. Saint',
105 },
106 }, {
107 'note': 'swf params escaped',
108 'url': 'https://www.facebook.com/barackobama/posts/10153664894881749',
109 'md5': '97ba073838964d12c70566e0085c2b91',
110 'info_dict': {
111 'id': '10153664894881749',
112 'ext': 'mp4',
113 'title': 'Facebook video #10153664894881749',
114 },
115 }, {
116 'url': 'https://www.facebook.com/video.php?v=10204634152394104',
117 'only_matching': True,
118 }, {
119 'url': 'https://www.facebook.com/amogood/videos/1618742068337349/?fref=nf',
120 'only_matching': True,
121 }, {
122 'url': 'https://www.facebook.com/ChristyClarkForBC/videos/vb.22819070941/10153870694020942/?type=2&theater',
123 'only_matching': True,
124 }, {
125 'url': 'facebook:544765982287235',
126 'only_matching': True,
127 }, {
128 'url': 'https://www.facebook.com/groups/164828000315060/permalink/764967300301124/',
129 'only_matching': True,
130 }]
131
132 def _login(self):
133 (useremail, password) = self._get_login_info()
134 if useremail is None:
135 return
136
137 login_page_req = sanitized_Request(self._LOGIN_URL)
138 self._set_cookie('facebook.com', 'locale', 'en_US')
139 login_page = self._download_webpage(login_page_req, None,
140 note='Downloading login page',
141 errnote='Unable to download login page')
142 lsd = self._search_regex(
143 r'<input type="hidden" name="lsd" value="([^"]*)"',
144 login_page, 'lsd')
145 lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd')
146
147 login_form = {
148 'email': useremail,
149 'pass': password,
150 'lsd': lsd,
151 'lgnrnd': lgnrnd,
152 'next': 'http://facebook.com/home.php',
153 'default_persistent': '0',
154 'legacy_return': '1',
155 'timezone': '-60',
156 'trynum': '1',
157 }
158 request = sanitized_Request(self._LOGIN_URL, urlencode_postdata(login_form))
159 request.add_header('Content-Type', 'application/x-www-form-urlencoded')
160 try:
161 login_results = self._download_webpage(request, None,
162 note='Logging in', errnote='unable to fetch login page')
163 if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
164 error = self._html_search_regex(
165 r'(?s)<div[^>]+class=(["\']).*?login_error_box.*?\1[^>]*><div[^>]*>.*?</div><div[^>]*>(?P<error>.+?)</div>',
166 login_results, 'login error', default=None, group='error')
167 if error:
168 raise ExtractorError('Unable to login: %s' % error, expected=True)
169 self._downloader.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.')
170 return
171
172 fb_dtsg = self._search_regex(
173 r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg', default=None)
174 h = self._search_regex(
175 r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h', default=None)
176
177 if not fb_dtsg or not h:
178 return
179
180 check_form = {
181 'fb_dtsg': fb_dtsg,
182 'h': h,
183 'name_action_selected': 'dont_save',
184 }
185 check_req = sanitized_Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
186 check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
187 check_response = self._download_webpage(check_req, None,
188 note='Confirming login')
189 if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
190 self._downloader.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.')
191 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
192 self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err))
193 return
194
195 def _real_initialize(self):
196 self._login()
197
198 def _extract_from_url(self, url, video_id, fatal_if_no_video=True):
199 req = sanitized_Request(url)
200 req.add_header('User-Agent', self._CHROME_USER_AGENT)
201 webpage = self._download_webpage(req, video_id)
202
203 video_data = None
204
205 BEFORE = '{swf.addParam(param[0], param[1]);});'
206 AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
207 m = re.search(re.escape(BEFORE) + '(?:\n|\\\\n)(.*?)' + re.escape(AFTER), webpage)
208 if m:
209 swf_params = m.group(1).replace('\\\\', '\\').replace('\\"', '"')
210 data = dict(json.loads(swf_params))
211 params_raw = compat_urllib_parse_unquote(data['params'])
212 video_data = json.loads(params_raw)['video_data']
213
214 def video_data_list2dict(video_data):
215 ret = {}
216 for item in video_data:
217 format_id = item['stream_type']
218 ret.setdefault(format_id, []).append(item)
219 return ret
220
221 if not video_data:
222 server_js_data = self._parse_json(self._search_regex(
223 r'handleServerJS\(({.+})\);', webpage, 'server js data', default='{}'), video_id)
224 for item in server_js_data.get('instances', []):
225 if item[1][0] == 'VideoConfig':
226 video_data = video_data_list2dict(item[2][0]['videoData'])
227 break
228
229 if not video_data:
230 if not fatal_if_no_video:
231 return webpage, False
232 m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
233 if m_msg is not None:
234 raise ExtractorError(
235 'The video is not available, Facebook said: "%s"' % m_msg.group(1),
236 expected=True)
237 else:
238 raise ExtractorError('Cannot parse data')
239
240 formats = []
241 for format_id, f in video_data.items():
242 if f and isinstance(f, dict):
243 f = [f]
244 if not f or not isinstance(f, list):
245 continue
246 for quality in ('sd', 'hd'):
247 for src_type in ('src', 'src_no_ratelimit'):
248 src = f[0].get('%s_%s' % (quality, src_type))
249 if src:
250 preference = -10 if format_id == 'progressive' else 0
251 if quality == 'hd':
252 preference += 5
253 formats.append({
254 'format_id': '%s_%s_%s' % (format_id, quality, src_type),
255 'url': src,
256 'preference': preference,
257 })
258 dash_manifest = f[0].get('dash_manifest')
259 if dash_manifest:
260 formats.extend(self._parse_mpd_formats(
261 compat_etree_fromstring(compat_urllib_parse_unquote_plus(dash_manifest))))
262 if not formats:
263 raise ExtractorError('Cannot find video formats')
264
265 self._sort_formats(formats)
266
267 video_title = self._html_search_regex(
268 r'<h2\s+[^>]*class="uiHeaderTitle"[^>]*>([^<]*)</h2>', webpage, 'title',
269 default=None)
270 if not video_title:
271 video_title = self._html_search_regex(
272 r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
273 webpage, 'alternative title', default=None)
274 video_title = limit_length(video_title, 80)
275 if not video_title:
276 video_title = 'Facebook video #%s' % video_id
277 uploader = clean_html(get_element_by_id('fbPhotoPageAuthorName', webpage))
278
279 info_dict = {
280 'id': video_id,
281 'title': video_title,
282 'formats': formats,
283 'uploader': uploader,
284 }
285
286 return webpage, info_dict
287
288 def _real_extract(self, url):
289 video_id = self._match_id(url)
290
291 real_url = self._VIDEO_PAGE_TEMPLATE % video_id if url.startswith('facebook:') else url
292 webpage, info_dict = self._extract_from_url(real_url, video_id, fatal_if_no_video=False)
293
294 if info_dict:
295 return info_dict
296
297 if '/posts/' in url:
298 entries = [
299 self.url_result('facebook:%s' % vid, FacebookIE.ie_key())
300 for vid in self._parse_json(
301 self._search_regex(
302 r'(["\'])video_ids\1\s*:\s*(?P<ids>\[.+?\])',
303 webpage, 'video ids', group='ids'),
304 video_id)]
305
306 return self.playlist_result(entries, video_id)
307 else:
308 _, info_dict = self._extract_from_url(
309 self._VIDEO_PAGE_TEMPLATE % video_id,
310 video_id, fatal_if_no_video=True)
311 return info_dict