]> Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/instagram.py
7e0e838f05a5e4a527cd9cf89e84f884ac1b2498
[youtubedl] / youtube_dl / extractor / instagram.py
1 from __future__ import unicode_literals
2
3 import itertools
4 import hashlib
5 import json
6 import re
7
8 from .common import InfoExtractor
9 from ..compat import (
10 compat_str,
11 compat_HTTPError,
12 )
13 from ..utils import (
14 ExtractorError,
15 get_element_by_attribute,
16 int_or_none,
17 lowercase_escape,
18 std_headers,
19 try_get,
20 url_or_none,
21 )
22
23
24 class InstagramIE(InfoExtractor):
25 _VALID_URL = r'(?P<url>https?://(?:www\.)?instagram\.com/p/(?P<id>[^/?#&]+))'
26 _TESTS = [{
27 'url': 'https://instagram.com/p/aye83DjauH/?foo=bar#abc',
28 'md5': '0d2da106a9d2631273e192b372806516',
29 'info_dict': {
30 'id': 'aye83DjauH',
31 'ext': 'mp4',
32 'title': 'Video by naomipq',
33 'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
34 'thumbnail': r're:^https?://.*\.jpg',
35 'timestamp': 1371748545,
36 'upload_date': '20130620',
37 'uploader_id': 'naomipq',
38 'uploader': 'Naomi Leonor Phan-Quang',
39 'like_count': int,
40 'comment_count': int,
41 'comments': list,
42 },
43 }, {
44 # missing description
45 'url': 'https://www.instagram.com/p/BA-pQFBG8HZ/?taken-by=britneyspears',
46 'info_dict': {
47 'id': 'BA-pQFBG8HZ',
48 'ext': 'mp4',
49 'title': 'Video by britneyspears',
50 'thumbnail': r're:^https?://.*\.jpg',
51 'timestamp': 1453760977,
52 'upload_date': '20160125',
53 'uploader_id': 'britneyspears',
54 'uploader': 'Britney Spears',
55 'like_count': int,
56 'comment_count': int,
57 'comments': list,
58 },
59 'params': {
60 'skip_download': True,
61 },
62 }, {
63 # multi video post
64 'url': 'https://www.instagram.com/p/BQ0eAlwhDrw/',
65 'playlist': [{
66 'info_dict': {
67 'id': 'BQ0dSaohpPW',
68 'ext': 'mp4',
69 'title': 'Video 1',
70 },
71 }, {
72 'info_dict': {
73 'id': 'BQ0dTpOhuHT',
74 'ext': 'mp4',
75 'title': 'Video 2',
76 },
77 }, {
78 'info_dict': {
79 'id': 'BQ0dT7RBFeF',
80 'ext': 'mp4',
81 'title': 'Video 3',
82 },
83 }],
84 'info_dict': {
85 'id': 'BQ0eAlwhDrw',
86 'title': 'Post by instagram',
87 'description': 'md5:0f9203fc6a2ce4d228da5754bcf54957',
88 },
89 }, {
90 'url': 'https://instagram.com/p/-Cmh1cukG2/',
91 'only_matching': True,
92 }, {
93 'url': 'http://instagram.com/p/9o6LshA7zy/embed/',
94 'only_matching': True,
95 }]
96
97 @staticmethod
98 def _extract_embed_url(webpage):
99 mobj = re.search(
100 r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?instagram\.com/p/[^/]+/embed.*?)\1',
101 webpage)
102 if mobj:
103 return mobj.group('url')
104
105 blockquote_el = get_element_by_attribute(
106 'class', 'instagram-media', webpage)
107 if blockquote_el is None:
108 return
109
110 mobj = re.search(
111 r'<a[^>]+href=([\'"])(?P<link>[^\'"]+)\1', blockquote_el)
112 if mobj:
113 return mobj.group('link')
114
115 def _real_extract(self, url):
116 mobj = re.match(self._VALID_URL, url)
117 video_id = mobj.group('id')
118 url = mobj.group('url')
119
120 webpage = self._download_webpage(url, video_id)
121
122 (video_url, description, thumbnail, timestamp, uploader,
123 uploader_id, like_count, comment_count, comments, height,
124 width) = [None] * 11
125
126 shared_data = self._parse_json(
127 self._search_regex(
128 r'window\._sharedData\s*=\s*({.+?});',
129 webpage, 'shared data', default='{}'),
130 video_id, fatal=False)
131 if shared_data:
132 media = try_get(
133 shared_data,
134 (lambda x: x['entry_data']['PostPage'][0]['graphql']['shortcode_media'],
135 lambda x: x['entry_data']['PostPage'][0]['media']),
136 dict)
137 if media:
138 video_url = media.get('video_url')
139 height = int_or_none(media.get('dimensions', {}).get('height'))
140 width = int_or_none(media.get('dimensions', {}).get('width'))
141 description = try_get(
142 media, lambda x: x['edge_media_to_caption']['edges'][0]['node']['text'],
143 compat_str) or media.get('caption')
144 thumbnail = media.get('display_src')
145 timestamp = int_or_none(media.get('taken_at_timestamp') or media.get('date'))
146 uploader = media.get('owner', {}).get('full_name')
147 uploader_id = media.get('owner', {}).get('username')
148
149 def get_count(key, kind):
150 return int_or_none(try_get(
151 media, (lambda x: x['edge_media_%s' % key]['count'],
152 lambda x: x['%ss' % kind]['count'])))
153 like_count = get_count('preview_like', 'like')
154 comment_count = get_count('to_comment', 'comment')
155
156 comments = [{
157 'author': comment.get('user', {}).get('username'),
158 'author_id': comment.get('user', {}).get('id'),
159 'id': comment.get('id'),
160 'text': comment.get('text'),
161 'timestamp': int_or_none(comment.get('created_at')),
162 } for comment in media.get(
163 'comments', {}).get('nodes', []) if comment.get('text')]
164 if not video_url:
165 edges = try_get(
166 media, lambda x: x['edge_sidecar_to_children']['edges'],
167 list) or []
168 if edges:
169 entries = []
170 for edge_num, edge in enumerate(edges, start=1):
171 node = try_get(edge, lambda x: x['node'], dict)
172 if not node:
173 continue
174 node_video_url = url_or_none(node.get('video_url'))
175 if not node_video_url:
176 continue
177 entries.append({
178 'id': node.get('shortcode') or node['id'],
179 'title': 'Video %d' % edge_num,
180 'url': node_video_url,
181 'thumbnail': node.get('display_url'),
182 'width': int_or_none(try_get(node, lambda x: x['dimensions']['width'])),
183 'height': int_or_none(try_get(node, lambda x: x['dimensions']['height'])),
184 'view_count': int_or_none(node.get('video_view_count')),
185 })
186 return self.playlist_result(
187 entries, video_id,
188 'Post by %s' % uploader_id if uploader_id else None,
189 description)
190
191 if not video_url:
192 video_url = self._og_search_video_url(webpage, secure=False)
193
194 formats = [{
195 'url': video_url,
196 'width': width,
197 'height': height,
198 }]
199
200 if not uploader_id:
201 uploader_id = self._search_regex(
202 r'"owner"\s*:\s*{\s*"username"\s*:\s*"(.+?)"',
203 webpage, 'uploader id', fatal=False)
204
205 if not description:
206 description = self._search_regex(
207 r'"caption"\s*:\s*"(.+?)"', webpage, 'description', default=None)
208 if description is not None:
209 description = lowercase_escape(description)
210
211 if not thumbnail:
212 thumbnail = self._og_search_thumbnail(webpage)
213
214 return {
215 'id': video_id,
216 'formats': formats,
217 'ext': 'mp4',
218 'title': 'Video by %s' % uploader_id,
219 'description': description,
220 'thumbnail': thumbnail,
221 'timestamp': timestamp,
222 'uploader_id': uploader_id,
223 'uploader': uploader,
224 'like_count': like_count,
225 'comment_count': comment_count,
226 'comments': comments,
227 }
228
229
230 class InstagramUserIE(InfoExtractor):
231 _VALID_URL = r'https?://(?:www\.)?instagram\.com/(?P<id>[^/]{2,})/?(?:$|[?#])'
232 IE_DESC = 'Instagram user profile'
233 IE_NAME = 'instagram:user'
234 _TEST = {
235 'url': 'https://instagram.com/porsche',
236 'info_dict': {
237 'id': 'porsche',
238 'title': 'porsche',
239 },
240 'playlist_count': 5,
241 'params': {
242 'extract_flat': True,
243 'skip_download': True,
244 'playlistend': 5,
245 }
246 }
247
248 _gis_tmpl = None
249
250 def _entries(self, data):
251 def get_count(suffix):
252 return int_or_none(try_get(
253 node, lambda x: x['edge_media_' + suffix]['count']))
254
255 uploader_id = data['entry_data']['ProfilePage'][0]['graphql']['user']['id']
256 csrf_token = data['config']['csrf_token']
257 rhx_gis = data.get('rhx_gis') or '3c7ca9dcefcf966d11dacf1f151335e8'
258
259 self._set_cookie('instagram.com', 'ig_pr', '1')
260
261 cursor = ''
262 for page_num in itertools.count(1):
263 variables = json.dumps({
264 'id': uploader_id,
265 'first': 12,
266 'after': cursor,
267 })
268
269 if self._gis_tmpl:
270 gis_tmpls = [self._gis_tmpl]
271 else:
272 gis_tmpls = [
273 '%s' % rhx_gis,
274 '',
275 '%s:%s' % (rhx_gis, csrf_token),
276 '%s:%s:%s' % (rhx_gis, csrf_token, std_headers['User-Agent']),
277 ]
278
279 for gis_tmpl in gis_tmpls:
280 try:
281 media = self._download_json(
282 'https://www.instagram.com/graphql/query/', uploader_id,
283 'Downloading JSON page %d' % page_num, headers={
284 'X-Requested-With': 'XMLHttpRequest',
285 'X-Instagram-GIS': hashlib.md5(
286 ('%s:%s' % (gis_tmpl, variables)).encode('utf-8')).hexdigest(),
287 }, query={
288 'query_hash': '42323d64886122307be10013ad2dcc44',
289 'variables': variables,
290 })['data']['user']['edge_owner_to_timeline_media']
291 self._gis_tmpl = gis_tmpl
292 break
293 except ExtractorError as e:
294 if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
295 if gis_tmpl != gis_tmpls[-1]:
296 continue
297 raise
298
299 edges = media.get('edges')
300 if not edges or not isinstance(edges, list):
301 break
302
303 for edge in edges:
304 node = edge.get('node')
305 if not node or not isinstance(node, dict):
306 continue
307 if node.get('__typename') != 'GraphVideo' and node.get('is_video') is not True:
308 continue
309 video_id = node.get('shortcode')
310 if not video_id:
311 continue
312
313 info = self.url_result(
314 'https://instagram.com/p/%s/' % video_id,
315 ie=InstagramIE.ie_key(), video_id=video_id)
316
317 description = try_get(
318 node, lambda x: x['edge_media_to_caption']['edges'][0]['node']['text'],
319 compat_str)
320 thumbnail = node.get('thumbnail_src') or node.get('display_src')
321 timestamp = int_or_none(node.get('taken_at_timestamp'))
322
323 comment_count = get_count('to_comment')
324 like_count = get_count('preview_like')
325 view_count = int_or_none(node.get('video_view_count'))
326
327 info.update({
328 'description': description,
329 'thumbnail': thumbnail,
330 'timestamp': timestamp,
331 'comment_count': comment_count,
332 'like_count': like_count,
333 'view_count': view_count,
334 })
335
336 yield info
337
338 page_info = media.get('page_info')
339 if not page_info or not isinstance(page_info, dict):
340 break
341
342 has_next_page = page_info.get('has_next_page')
343 if not has_next_page:
344 break
345
346 cursor = page_info.get('end_cursor')
347 if not cursor or not isinstance(cursor, compat_str):
348 break
349
350 def _real_extract(self, url):
351 username = self._match_id(url)
352
353 webpage = self._download_webpage(url, username)
354
355 data = self._parse_json(
356 self._search_regex(
357 r'sharedData\s*=\s*({.+?})\s*;\s*[<\n]', webpage, 'data'),
358 username)
359
360 return self.playlist_result(
361 self._entries(data), username, username)