]> Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/facebook.py
Tidy up the changelog.
[youtubedl] / youtube_dl / extractor / facebook.py
1 from __future__ import unicode_literals
2
3 import json
4 import re
5 import socket
6
7 from .common import InfoExtractor
8 from ..compat import (
9 compat_http_client,
10 compat_str,
11 compat_urllib_error,
12 compat_urllib_parse_unquote,
13 compat_urllib_request,
14 )
15 from ..utils import (
16 ExtractorError,
17 limit_length,
18 urlencode_postdata,
19 get_element_by_id,
20 clean_html,
21 )
22
23
24 class FacebookIE(InfoExtractor):
25 _VALID_URL = r'''(?x)
26 https?://(?:\w+\.)?facebook\.com/
27 (?:[^#]*?\#!/)?
28 (?:
29 (?:video/video\.php|photo\.php|video\.php|video/embed)\?(?:.*?)
30 (?:v|video_id)=|
31 [^/]+/videos/(?:[^/]+/)?
32 )
33 (?P<id>[0-9]+)
34 (?:.*)'''
35 _LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
36 _CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
37 _NETRC_MACHINE = 'facebook'
38 IE_NAME = 'facebook'
39 _TESTS = [{
40 'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
41 'md5': '6a40d33c0eccbb1af76cf0485a052659',
42 'info_dict': {
43 'id': '637842556329505',
44 'ext': 'mp4',
45 'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
46 'uploader': 'Tennis on Facebook',
47 }
48 }, {
49 'note': 'Video without discernible title',
50 'url': 'https://www.facebook.com/video.php?v=274175099429670',
51 'info_dict': {
52 'id': '274175099429670',
53 'ext': 'mp4',
54 'title': 'Facebook video #274175099429670',
55 'uploader': 'Asif Nawab Butt',
56 },
57 'expected_warnings': [
58 'title'
59 ]
60 }, {
61 'url': 'https://www.facebook.com/video.php?v=10204634152394104',
62 'only_matching': True,
63 }, {
64 'url': 'https://www.facebook.com/amogood/videos/1618742068337349/?fref=nf',
65 'only_matching': True,
66 }, {
67 'url': 'https://www.facebook.com/ChristyClarkForBC/videos/vb.22819070941/10153870694020942/?type=2&theater',
68 'only_matching': True,
69 }]
70
71 def _login(self):
72 (useremail, password) = self._get_login_info()
73 if useremail is None:
74 return
75
76 login_page_req = compat_urllib_request.Request(self._LOGIN_URL)
77 login_page_req.add_header('Cookie', 'locale=en_US')
78 login_page = self._download_webpage(login_page_req, None,
79 note='Downloading login page',
80 errnote='Unable to download login page')
81 lsd = self._search_regex(
82 r'<input type="hidden" name="lsd" value="([^"]*)"',
83 login_page, 'lsd')
84 lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd')
85
86 login_form = {
87 'email': useremail,
88 'pass': password,
89 'lsd': lsd,
90 'lgnrnd': lgnrnd,
91 'next': 'http://facebook.com/home.php',
92 'default_persistent': '0',
93 'legacy_return': '1',
94 'timezone': '-60',
95 'trynum': '1',
96 }
97 request = compat_urllib_request.Request(self._LOGIN_URL, urlencode_postdata(login_form))
98 request.add_header('Content-Type', 'application/x-www-form-urlencoded')
99 try:
100 login_results = self._download_webpage(request, None,
101 note='Logging in', errnote='unable to fetch login page')
102 if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
103 self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
104 return
105
106 check_form = {
107 'fb_dtsg': self._search_regex(r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg'),
108 'h': self._search_regex(
109 r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h'),
110 'name_action_selected': 'dont_save',
111 }
112 check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
113 check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
114 check_response = self._download_webpage(check_req, None,
115 note='Confirming login')
116 if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
117 self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.')
118 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
119 self._downloader.report_warning('unable to log in: %s' % compat_str(err))
120 return
121
122 def _real_initialize(self):
123 self._login()
124
125 def _real_extract(self, url):
126 video_id = self._match_id(url)
127 url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
128 webpage = self._download_webpage(url, video_id)
129
130 BEFORE = '{swf.addParam(param[0], param[1]);});\n'
131 AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
132 m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage)
133 if not m:
134 m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
135 if m_msg is not None:
136 raise ExtractorError(
137 'The video is not available, Facebook said: "%s"' % m_msg.group(1),
138 expected=True)
139 else:
140 raise ExtractorError('Cannot parse data')
141 data = dict(json.loads(m.group(1)))
142 params_raw = compat_urllib_parse_unquote(data['params'])
143 params = json.loads(params_raw)
144
145 formats = []
146 for format_id, f in params['video_data'].items():
147 if not f or not isinstance(f, list):
148 continue
149 for quality in ('sd', 'hd'):
150 for src_type in ('src', 'src_no_ratelimit'):
151 src = f[0].get('%s_%s' % (quality, src_type))
152 if src:
153 formats.append({
154 'format_id': '%s_%s_%s' % (format_id, quality, src_type),
155 'url': src,
156 'preference': -10 if format_id == 'progressive' else 0,
157 })
158 if not formats:
159 raise ExtractorError('Cannot find video formats')
160
161 video_title = self._html_search_regex(
162 r'<h2\s+[^>]*class="uiHeaderTitle"[^>]*>([^<]*)</h2>', webpage, 'title',
163 default=None)
164 if not video_title:
165 video_title = self._html_search_regex(
166 r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
167 webpage, 'alternative title', fatal=False)
168 video_title = limit_length(video_title, 80)
169 if not video_title:
170 video_title = 'Facebook video #%s' % video_id
171 uploader = clean_html(get_element_by_id('fbPhotoPageAuthorName', webpage))
172
173 return {
174 'id': video_id,
175 'title': video_title,
176 'formats': formats,
177 'uploader': uploader,
178 }