]> Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/facebook.py
debian/control: Add recommends on aria2 | wget | curl to use external downloaders.
[youtubedl] / youtube_dl / extractor / facebook.py
1 from __future__ import unicode_literals
2
3 import json
4 import re
5 import socket
6
7 from .common import InfoExtractor
8 from ..compat import (
9 compat_http_client,
10 compat_str,
11 compat_urllib_error,
12 compat_urllib_parse,
13 compat_urllib_request,
14 )
15 from ..utils import (
16 ExtractorError,
17 int_or_none,
18 limit_length,
19 urlencode_postdata,
20 )
21
22
23 class FacebookIE(InfoExtractor):
24 _VALID_URL = r'''(?x)
25 https?://(?:\w+\.)?facebook\.com/
26 (?:[^#]*?\#!/)?
27 (?:video/video\.php|photo\.php|video\.php|video/embed)\?(?:.*?)
28 (?:v|video_id)=(?P<id>[0-9]+)
29 (?:.*)'''
30 _LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
31 _CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
32 _NETRC_MACHINE = 'facebook'
33 IE_NAME = 'facebook'
34 _TESTS = [{
35 'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
36 'md5': '6a40d33c0eccbb1af76cf0485a052659',
37 'info_dict': {
38 'id': '637842556329505',
39 'ext': 'mp4',
40 'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
41 }
42 }, {
43 'note': 'Video without discernible title',
44 'url': 'https://www.facebook.com/video.php?v=274175099429670',
45 'info_dict': {
46 'id': '274175099429670',
47 'ext': 'mp4',
48 'title': 'Facebook video #274175099429670',
49 }
50 }, {
51 'url': 'https://www.facebook.com/video.php?v=10204634152394104',
52 'only_matching': True,
53 }]
54
55 def _login(self):
56 (useremail, password) = self._get_login_info()
57 if useremail is None:
58 return
59
60 login_page_req = compat_urllib_request.Request(self._LOGIN_URL)
61 login_page_req.add_header('Cookie', 'locale=en_US')
62 login_page = self._download_webpage(login_page_req, None,
63 note='Downloading login page',
64 errnote='Unable to download login page')
65 lsd = self._search_regex(
66 r'<input type="hidden" name="lsd" value="([^"]*)"',
67 login_page, 'lsd')
68 lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd')
69
70 login_form = {
71 'email': useremail,
72 'pass': password,
73 'lsd': lsd,
74 'lgnrnd': lgnrnd,
75 'next': 'http://facebook.com/home.php',
76 'default_persistent': '0',
77 'legacy_return': '1',
78 'timezone': '-60',
79 'trynum': '1',
80 }
81 request = compat_urllib_request.Request(self._LOGIN_URL, urlencode_postdata(login_form))
82 request.add_header('Content-Type', 'application/x-www-form-urlencoded')
83 try:
84 login_results = self._download_webpage(request, None,
85 note='Logging in', errnote='unable to fetch login page')
86 if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
87 self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
88 return
89
90 check_form = {
91 'fb_dtsg': self._search_regex(r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg'),
92 'h': self._search_regex(
93 r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h'),
94 'name_action_selected': 'dont_save',
95 }
96 check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
97 check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
98 check_response = self._download_webpage(check_req, None,
99 note='Confirming login')
100 if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
101 self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.')
102 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
103 self._downloader.report_warning('unable to log in: %s' % compat_str(err))
104 return
105
106 def _real_initialize(self):
107 self._login()
108
109 def _real_extract(self, url):
110 video_id = self._match_id(url)
111 url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
112 webpage = self._download_webpage(url, video_id)
113
114 BEFORE = '{swf.addParam(param[0], param[1]);});\n'
115 AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
116 m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage)
117 if not m:
118 m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
119 if m_msg is not None:
120 raise ExtractorError(
121 'The video is not available, Facebook said: "%s"' % m_msg.group(1),
122 expected=True)
123 else:
124 raise ExtractorError('Cannot parse data')
125 data = dict(json.loads(m.group(1)))
126 params_raw = compat_urllib_parse.unquote(data['params'])
127 params = json.loads(params_raw)
128 video_data = params['video_data'][0]
129 video_url = video_data.get('hd_src')
130 if not video_url:
131 video_url = video_data['sd_src']
132 if not video_url:
133 raise ExtractorError('Cannot find video URL')
134
135 video_title = self._html_search_regex(
136 r'<h2 class="uiHeaderTitle">([^<]*)</h2>', webpage, 'title',
137 fatal=False)
138 if not video_title:
139 video_title = self._html_search_regex(
140 r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
141 webpage, 'alternative title', default=None)
142 video_title = limit_length(video_title, 80)
143 if not video_title:
144 video_title = 'Facebook video #%s' % video_id
145
146 return {
147 'id': video_id,
148 'title': video_title,
149 'url': video_url,
150 'duration': int_or_none(video_data.get('video_duration')),
151 'thumbnail': video_data.get('thumbnail_src'),
152 }