]> Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/instagram.py
d/p/skip-style-checks-for-flake8: Add metadata to please lintian.
[youtubedl] / youtube_dl / extractor / instagram.py
1 from __future__ import unicode_literals
2
3 import itertools
4 import hashlib
5 import json
6 import re
7
8 from .common import InfoExtractor
9 from ..compat import (
10 compat_str,
11 compat_HTTPError,
12 )
13 from ..utils import (
14 ExtractorError,
15 get_element_by_attribute,
16 int_or_none,
17 lowercase_escape,
18 std_headers,
19 try_get,
20 url_or_none,
21 )
22
23
24 class InstagramIE(InfoExtractor):
25 _VALID_URL = r'(?P<url>https?://(?:www\.)?instagram\.com/(?:p|tv)/(?P<id>[^/?#&]+))'
26 _TESTS = [{
27 'url': 'https://instagram.com/p/aye83DjauH/?foo=bar#abc',
28 'md5': '0d2da106a9d2631273e192b372806516',
29 'info_dict': {
30 'id': 'aye83DjauH',
31 'ext': 'mp4',
32 'title': 'Video by naomipq',
33 'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
34 'thumbnail': r're:^https?://.*\.jpg',
35 'timestamp': 1371748545,
36 'upload_date': '20130620',
37 'uploader_id': 'naomipq',
38 'uploader': 'Naomi Leonor Phan-Quang',
39 'like_count': int,
40 'comment_count': int,
41 'comments': list,
42 },
43 }, {
44 # missing description
45 'url': 'https://www.instagram.com/p/BA-pQFBG8HZ/?taken-by=britneyspears',
46 'info_dict': {
47 'id': 'BA-pQFBG8HZ',
48 'ext': 'mp4',
49 'title': 'Video by britneyspears',
50 'thumbnail': r're:^https?://.*\.jpg',
51 'timestamp': 1453760977,
52 'upload_date': '20160125',
53 'uploader_id': 'britneyspears',
54 'uploader': 'Britney Spears',
55 'like_count': int,
56 'comment_count': int,
57 'comments': list,
58 },
59 'params': {
60 'skip_download': True,
61 },
62 }, {
63 # multi video post
64 'url': 'https://www.instagram.com/p/BQ0eAlwhDrw/',
65 'playlist': [{
66 'info_dict': {
67 'id': 'BQ0dSaohpPW',
68 'ext': 'mp4',
69 'title': 'Video 1',
70 },
71 }, {
72 'info_dict': {
73 'id': 'BQ0dTpOhuHT',
74 'ext': 'mp4',
75 'title': 'Video 2',
76 },
77 }, {
78 'info_dict': {
79 'id': 'BQ0dT7RBFeF',
80 'ext': 'mp4',
81 'title': 'Video 3',
82 },
83 }],
84 'info_dict': {
85 'id': 'BQ0eAlwhDrw',
86 'title': 'Post by instagram',
87 'description': 'md5:0f9203fc6a2ce4d228da5754bcf54957',
88 },
89 }, {
90 'url': 'https://instagram.com/p/-Cmh1cukG2/',
91 'only_matching': True,
92 }, {
93 'url': 'http://instagram.com/p/9o6LshA7zy/embed/',
94 'only_matching': True,
95 }, {
96 'url': 'https://www.instagram.com/tv/aye83DjauH/',
97 'only_matching': True,
98 }]
99
100 @staticmethod
101 def _extract_embed_url(webpage):
102 mobj = re.search(
103 r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?instagram\.com/p/[^/]+/embed.*?)\1',
104 webpage)
105 if mobj:
106 return mobj.group('url')
107
108 blockquote_el = get_element_by_attribute(
109 'class', 'instagram-media', webpage)
110 if blockquote_el is None:
111 return
112
113 mobj = re.search(
114 r'<a[^>]+href=([\'"])(?P<link>[^\'"]+)\1', blockquote_el)
115 if mobj:
116 return mobj.group('link')
117
118 def _real_extract(self, url):
119 mobj = re.match(self._VALID_URL, url)
120 video_id = mobj.group('id')
121 url = mobj.group('url')
122
123 webpage = self._download_webpage(url, video_id)
124
125 (video_url, description, thumbnail, timestamp, uploader,
126 uploader_id, like_count, comment_count, comments, height,
127 width) = [None] * 11
128
129 shared_data = self._parse_json(
130 self._search_regex(
131 r'window\._sharedData\s*=\s*({.+?});',
132 webpage, 'shared data', default='{}'),
133 video_id, fatal=False)
134 if shared_data:
135 media = try_get(
136 shared_data,
137 (lambda x: x['entry_data']['PostPage'][0]['graphql']['shortcode_media'],
138 lambda x: x['entry_data']['PostPage'][0]['media']),
139 dict)
140 if media:
141 video_url = media.get('video_url')
142 height = int_or_none(media.get('dimensions', {}).get('height'))
143 width = int_or_none(media.get('dimensions', {}).get('width'))
144 description = try_get(
145 media, lambda x: x['edge_media_to_caption']['edges'][0]['node']['text'],
146 compat_str) or media.get('caption')
147 thumbnail = media.get('display_src')
148 timestamp = int_or_none(media.get('taken_at_timestamp') or media.get('date'))
149 uploader = media.get('owner', {}).get('full_name')
150 uploader_id = media.get('owner', {}).get('username')
151
152 def get_count(key, kind):
153 return int_or_none(try_get(
154 media, (lambda x: x['edge_media_%s' % key]['count'],
155 lambda x: x['%ss' % kind]['count'])))
156 like_count = get_count('preview_like', 'like')
157 comment_count = get_count('to_comment', 'comment')
158
159 comments = [{
160 'author': comment.get('user', {}).get('username'),
161 'author_id': comment.get('user', {}).get('id'),
162 'id': comment.get('id'),
163 'text': comment.get('text'),
164 'timestamp': int_or_none(comment.get('created_at')),
165 } for comment in media.get(
166 'comments', {}).get('nodes', []) if comment.get('text')]
167 if not video_url:
168 edges = try_get(
169 media, lambda x: x['edge_sidecar_to_children']['edges'],
170 list) or []
171 if edges:
172 entries = []
173 for edge_num, edge in enumerate(edges, start=1):
174 node = try_get(edge, lambda x: x['node'], dict)
175 if not node:
176 continue
177 node_video_url = url_or_none(node.get('video_url'))
178 if not node_video_url:
179 continue
180 entries.append({
181 'id': node.get('shortcode') or node['id'],
182 'title': 'Video %d' % edge_num,
183 'url': node_video_url,
184 'thumbnail': node.get('display_url'),
185 'width': int_or_none(try_get(node, lambda x: x['dimensions']['width'])),
186 'height': int_or_none(try_get(node, lambda x: x['dimensions']['height'])),
187 'view_count': int_or_none(node.get('video_view_count')),
188 })
189 return self.playlist_result(
190 entries, video_id,
191 'Post by %s' % uploader_id if uploader_id else None,
192 description)
193
194 if not video_url:
195 video_url = self._og_search_video_url(webpage, secure=False)
196
197 formats = [{
198 'url': video_url,
199 'width': width,
200 'height': height,
201 }]
202
203 if not uploader_id:
204 uploader_id = self._search_regex(
205 r'"owner"\s*:\s*{\s*"username"\s*:\s*"(.+?)"',
206 webpage, 'uploader id', fatal=False)
207
208 if not description:
209 description = self._search_regex(
210 r'"caption"\s*:\s*"(.+?)"', webpage, 'description', default=None)
211 if description is not None:
212 description = lowercase_escape(description)
213
214 if not thumbnail:
215 thumbnail = self._og_search_thumbnail(webpage)
216
217 return {
218 'id': video_id,
219 'formats': formats,
220 'ext': 'mp4',
221 'title': 'Video by %s' % uploader_id,
222 'description': description,
223 'thumbnail': thumbnail,
224 'timestamp': timestamp,
225 'uploader_id': uploader_id,
226 'uploader': uploader,
227 'like_count': like_count,
228 'comment_count': comment_count,
229 'comments': comments,
230 }
231
232
233 class InstagramPlaylistIE(InfoExtractor):
234 # A superclass for handling any kind of query based on GraphQL which
235 # results in a playlist.
236
237 _gis_tmpl = None # used to cache GIS request type
238
239 def _parse_graphql(self, webpage, item_id):
240 # Reads a webpage and returns its GraphQL data.
241 return self._parse_json(
242 self._search_regex(
243 r'sharedData\s*=\s*({.+?})\s*;\s*[<\n]', webpage, 'data'),
244 item_id)
245
246 def _extract_graphql(self, data, url):
247 # Parses GraphQL queries containing videos and generates a playlist.
248 def get_count(suffix):
249 return int_or_none(try_get(
250 node, lambda x: x['edge_media_' + suffix]['count']))
251
252 uploader_id = self._match_id(url)
253 csrf_token = data['config']['csrf_token']
254 rhx_gis = data.get('rhx_gis') or '3c7ca9dcefcf966d11dacf1f151335e8'
255
256 cursor = ''
257 for page_num in itertools.count(1):
258 variables = {
259 'first': 12,
260 'after': cursor,
261 }
262 variables.update(self._query_vars_for(data))
263 variables = json.dumps(variables)
264
265 if self._gis_tmpl:
266 gis_tmpls = [self._gis_tmpl]
267 else:
268 gis_tmpls = [
269 '%s' % rhx_gis,
270 '',
271 '%s:%s' % (rhx_gis, csrf_token),
272 '%s:%s:%s' % (rhx_gis, csrf_token, std_headers['User-Agent']),
273 ]
274
275 # try all of the ways to generate a GIS query, and not only use the
276 # first one that works, but cache it for future requests
277 for gis_tmpl in gis_tmpls:
278 try:
279 json_data = self._download_json(
280 'https://www.instagram.com/graphql/query/', uploader_id,
281 'Downloading JSON page %d' % page_num, headers={
282 'X-Requested-With': 'XMLHttpRequest',
283 'X-Instagram-GIS': hashlib.md5(
284 ('%s:%s' % (gis_tmpl, variables)).encode('utf-8')).hexdigest(),
285 }, query={
286 'query_hash': self._QUERY_HASH,
287 'variables': variables,
288 })
289 media = self._parse_timeline_from(json_data)
290 self._gis_tmpl = gis_tmpl
291 break
292 except ExtractorError as e:
293 # if it's an error caused by a bad query, and there are
294 # more GIS templates to try, ignore it and keep trying
295 if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
296 if gis_tmpl != gis_tmpls[-1]:
297 continue
298 raise
299
300 edges = media.get('edges')
301 if not edges or not isinstance(edges, list):
302 break
303
304 for edge in edges:
305 node = edge.get('node')
306 if not node or not isinstance(node, dict):
307 continue
308 if node.get('__typename') != 'GraphVideo' and node.get('is_video') is not True:
309 continue
310 video_id = node.get('shortcode')
311 if not video_id:
312 continue
313
314 info = self.url_result(
315 'https://instagram.com/p/%s/' % video_id,
316 ie=InstagramIE.ie_key(), video_id=video_id)
317
318 description = try_get(
319 node, lambda x: x['edge_media_to_caption']['edges'][0]['node']['text'],
320 compat_str)
321 thumbnail = node.get('thumbnail_src') or node.get('display_src')
322 timestamp = int_or_none(node.get('taken_at_timestamp'))
323
324 comment_count = get_count('to_comment')
325 like_count = get_count('preview_like')
326 view_count = int_or_none(node.get('video_view_count'))
327
328 info.update({
329 'description': description,
330 'thumbnail': thumbnail,
331 'timestamp': timestamp,
332 'comment_count': comment_count,
333 'like_count': like_count,
334 'view_count': view_count,
335 })
336
337 yield info
338
339 page_info = media.get('page_info')
340 if not page_info or not isinstance(page_info, dict):
341 break
342
343 has_next_page = page_info.get('has_next_page')
344 if not has_next_page:
345 break
346
347 cursor = page_info.get('end_cursor')
348 if not cursor or not isinstance(cursor, compat_str):
349 break
350
351 def _real_extract(self, url):
352 user_or_tag = self._match_id(url)
353 webpage = self._download_webpage(url, user_or_tag)
354 data = self._parse_graphql(webpage, user_or_tag)
355
356 self._set_cookie('instagram.com', 'ig_pr', '1')
357
358 return self.playlist_result(
359 self._extract_graphql(data, url), user_or_tag, user_or_tag)
360
361
362 class InstagramUserIE(InstagramPlaylistIE):
363 _VALID_URL = r'https?://(?:www\.)?instagram\.com/(?P<id>[^/]{2,})/?(?:$|[?#])'
364 IE_DESC = 'Instagram user profile'
365 IE_NAME = 'instagram:user'
366 _TEST = {
367 'url': 'https://instagram.com/porsche',
368 'info_dict': {
369 'id': 'porsche',
370 'title': 'porsche',
371 },
372 'playlist_count': 5,
373 'params': {
374 'extract_flat': True,
375 'skip_download': True,
376 'playlistend': 5,
377 }
378 }
379
380 _QUERY_HASH = '42323d64886122307be10013ad2dcc44',
381
382 @staticmethod
383 def _parse_timeline_from(data):
384 # extracts the media timeline data from a GraphQL result
385 return data['data']['user']['edge_owner_to_timeline_media']
386
387 @staticmethod
388 def _query_vars_for(data):
389 # returns a dictionary of variables to add to the timeline query based
390 # on the GraphQL of the original page
391 return {
392 'id': data['entry_data']['ProfilePage'][0]['graphql']['user']['id']
393 }
394
395
396 class InstagramTagIE(InstagramPlaylistIE):
397 _VALID_URL = r'https?://(?:www\.)?instagram\.com/explore/tags/(?P<id>[^/]+)'
398 IE_DESC = 'Instagram hashtag search'
399 IE_NAME = 'instagram:tag'
400 _TEST = {
401 'url': 'https://instagram.com/explore/tags/lolcats',
402 'info_dict': {
403 'id': 'lolcats',
404 'title': 'lolcats',
405 },
406 'playlist_count': 50,
407 'params': {
408 'extract_flat': True,
409 'skip_download': True,
410 'playlistend': 50,
411 }
412 }
413
414 _QUERY_HASH = 'f92f56d47dc7a55b606908374b43a314',
415
416 @staticmethod
417 def _parse_timeline_from(data):
418 # extracts the media timeline data from a GraphQL result
419 return data['data']['hashtag']['edge_hashtag_to_media']
420
421 @staticmethod
422 def _query_vars_for(data):
423 # returns a dictionary of variables to add to the timeline query based
424 # on the GraphQL of the original page
425 return {
426 'tag_name':
427 data['entry_data']['TagPage'][0]['graphql']['hashtag']['name']
428 }