]> Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/instagram.py
Merge tag 'upstream/2017.03.07'
[youtubedl] / youtube_dl / extractor / instagram.py
1 from __future__ import unicode_literals
2
3 import re
4
5 from .common import InfoExtractor
6 from ..compat import compat_str
7 from ..utils import (
8 get_element_by_attribute,
9 int_or_none,
10 limit_length,
11 lowercase_escape,
12 try_get,
13 )
14
15
16 class InstagramIE(InfoExtractor):
17 _VALID_URL = r'(?P<url>https?://(?:www\.)?instagram\.com/p/(?P<id>[^/?#&]+))'
18 _TESTS = [{
19 'url': 'https://instagram.com/p/aye83DjauH/?foo=bar#abc',
20 'md5': '0d2da106a9d2631273e192b372806516',
21 'info_dict': {
22 'id': 'aye83DjauH',
23 'ext': 'mp4',
24 'title': 'Video by naomipq',
25 'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
26 'thumbnail': r're:^https?://.*\.jpg',
27 'timestamp': 1371748545,
28 'upload_date': '20130620',
29 'uploader_id': 'naomipq',
30 'uploader': 'Naomi Leonor Phan-Quang',
31 'like_count': int,
32 'comment_count': int,
33 'comments': list,
34 },
35 }, {
36 # missing description
37 'url': 'https://www.instagram.com/p/BA-pQFBG8HZ/?taken-by=britneyspears',
38 'info_dict': {
39 'id': 'BA-pQFBG8HZ',
40 'ext': 'mp4',
41 'title': 'Video by britneyspears',
42 'thumbnail': r're:^https?://.*\.jpg',
43 'timestamp': 1453760977,
44 'upload_date': '20160125',
45 'uploader_id': 'britneyspears',
46 'uploader': 'Britney Spears',
47 'like_count': int,
48 'comment_count': int,
49 'comments': list,
50 },
51 'params': {
52 'skip_download': True,
53 },
54 }, {
55 # multi video post
56 'url': 'https://www.instagram.com/p/BQ0eAlwhDrw/',
57 'playlist': [{
58 'info_dict': {
59 'id': 'BQ0dSaohpPW',
60 'ext': 'mp4',
61 'title': 'Video 1',
62 },
63 }, {
64 'info_dict': {
65 'id': 'BQ0dTpOhuHT',
66 'ext': 'mp4',
67 'title': 'Video 2',
68 },
69 }, {
70 'info_dict': {
71 'id': 'BQ0dT7RBFeF',
72 'ext': 'mp4',
73 'title': 'Video 3',
74 },
75 }],
76 'info_dict': {
77 'id': 'BQ0eAlwhDrw',
78 'title': 'Post by instagram',
79 'description': 'md5:0f9203fc6a2ce4d228da5754bcf54957',
80 },
81 }, {
82 'url': 'https://instagram.com/p/-Cmh1cukG2/',
83 'only_matching': True,
84 }, {
85 'url': 'http://instagram.com/p/9o6LshA7zy/embed/',
86 'only_matching': True,
87 }]
88
89 @staticmethod
90 def _extract_embed_url(webpage):
91 mobj = re.search(
92 r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?instagram\.com/p/[^/]+/embed.*?)\1',
93 webpage)
94 if mobj:
95 return mobj.group('url')
96
97 blockquote_el = get_element_by_attribute(
98 'class', 'instagram-media', webpage)
99 if blockquote_el is None:
100 return
101
102 mobj = re.search(
103 r'<a[^>]+href=([\'"])(?P<link>[^\'"]+)\1', blockquote_el)
104 if mobj:
105 return mobj.group('link')
106
107 def _real_extract(self, url):
108 mobj = re.match(self._VALID_URL, url)
109 video_id = mobj.group('id')
110 url = mobj.group('url')
111
112 webpage = self._download_webpage(url, video_id)
113
114 (video_url, description, thumbnail, timestamp, uploader,
115 uploader_id, like_count, comment_count, height, width) = [None] * 10
116
117 shared_data = self._parse_json(
118 self._search_regex(
119 r'window\._sharedData\s*=\s*({.+?});',
120 webpage, 'shared data', default='{}'),
121 video_id, fatal=False)
122 if shared_data:
123 media = try_get(
124 shared_data, lambda x: x['entry_data']['PostPage'][0]['media'], dict)
125 if media:
126 video_url = media.get('video_url')
127 height = int_or_none(media.get('dimensions', {}).get('height'))
128 width = int_or_none(media.get('dimensions', {}).get('width'))
129 description = media.get('caption')
130 thumbnail = media.get('display_src')
131 timestamp = int_or_none(media.get('date'))
132 uploader = media.get('owner', {}).get('full_name')
133 uploader_id = media.get('owner', {}).get('username')
134 like_count = int_or_none(media.get('likes', {}).get('count'))
135 comment_count = int_or_none(media.get('comments', {}).get('count'))
136 comments = [{
137 'author': comment.get('user', {}).get('username'),
138 'author_id': comment.get('user', {}).get('id'),
139 'id': comment.get('id'),
140 'text': comment.get('text'),
141 'timestamp': int_or_none(comment.get('created_at')),
142 } for comment in media.get(
143 'comments', {}).get('nodes', []) if comment.get('text')]
144 if not video_url:
145 edges = try_get(
146 media, lambda x: x['edge_sidecar_to_children']['edges'],
147 list) or []
148 if edges:
149 entries = []
150 for edge_num, edge in enumerate(edges, start=1):
151 node = try_get(edge, lambda x: x['node'], dict)
152 if not node:
153 continue
154 node_video_url = try_get(node, lambda x: x['video_url'], compat_str)
155 if not node_video_url:
156 continue
157 entries.append({
158 'id': node.get('shortcode') or node['id'],
159 'title': 'Video %d' % edge_num,
160 'url': node_video_url,
161 'thumbnail': node.get('display_url'),
162 'width': int_or_none(try_get(node, lambda x: x['dimensions']['width'])),
163 'height': int_or_none(try_get(node, lambda x: x['dimensions']['height'])),
164 'view_count': int_or_none(node.get('video_view_count')),
165 })
166 return self.playlist_result(
167 entries, video_id,
168 'Post by %s' % uploader_id if uploader_id else None,
169 description)
170
171 if not video_url:
172 video_url = self._og_search_video_url(webpage, secure=False)
173
174 formats = [{
175 'url': video_url,
176 'width': width,
177 'height': height,
178 }]
179
180 if not uploader_id:
181 uploader_id = self._search_regex(
182 r'"owner"\s*:\s*{\s*"username"\s*:\s*"(.+?)"',
183 webpage, 'uploader id', fatal=False)
184
185 if not description:
186 description = self._search_regex(
187 r'"caption"\s*:\s*"(.+?)"', webpage, 'description', default=None)
188 if description is not None:
189 description = lowercase_escape(description)
190
191 if not thumbnail:
192 thumbnail = self._og_search_thumbnail(webpage)
193
194 return {
195 'id': video_id,
196 'formats': formats,
197 'ext': 'mp4',
198 'title': 'Video by %s' % uploader_id,
199 'description': description,
200 'thumbnail': thumbnail,
201 'timestamp': timestamp,
202 'uploader_id': uploader_id,
203 'uploader': uploader,
204 'like_count': like_count,
205 'comment_count': comment_count,
206 'comments': comments,
207 }
208
209
210 class InstagramUserIE(InfoExtractor):
211 _VALID_URL = r'https?://(?:www\.)?instagram\.com/(?P<username>[^/]{2,})/?(?:$|[?#])'
212 IE_DESC = 'Instagram user profile'
213 IE_NAME = 'instagram:user'
214 _TEST = {
215 'url': 'https://instagram.com/porsche',
216 'info_dict': {
217 'id': 'porsche',
218 'title': 'porsche',
219 },
220 'playlist_mincount': 2,
221 'playlist': [{
222 'info_dict': {
223 'id': '614605558512799803_462752227',
224 'ext': 'mp4',
225 'title': '#Porsche Intelligent Performance.',
226 'thumbnail': r're:^https?://.*\.jpg',
227 'uploader': 'Porsche',
228 'uploader_id': 'porsche',
229 'timestamp': 1387486713,
230 'upload_date': '20131219',
231 },
232 }],
233 'params': {
234 'extract_flat': True,
235 'skip_download': True,
236 }
237 }
238
239 def _real_extract(self, url):
240 mobj = re.match(self._VALID_URL, url)
241 uploader_id = mobj.group('username')
242
243 entries = []
244 page_count = 0
245 media_url = 'http://instagram.com/%s/media' % uploader_id
246 while True:
247 page = self._download_json(
248 media_url, uploader_id,
249 note='Downloading page %d ' % (page_count + 1),
250 )
251 page_count += 1
252
253 for it in page['items']:
254 if it.get('type') != 'video':
255 continue
256 like_count = int_or_none(it.get('likes', {}).get('count'))
257 user = it.get('user', {})
258
259 formats = [{
260 'format_id': k,
261 'height': v.get('height'),
262 'width': v.get('width'),
263 'url': v['url'],
264 } for k, v in it['videos'].items()]
265 self._sort_formats(formats)
266
267 thumbnails_el = it.get('images', {})
268 thumbnail = thumbnails_el.get('thumbnail', {}).get('url')
269
270 # In some cases caption is null, which corresponds to None
271 # in python. As a result, it.get('caption', {}) gives None
272 title = (it.get('caption') or {}).get('text', it['id'])
273
274 entries.append({
275 'id': it['id'],
276 'title': limit_length(title, 80),
277 'formats': formats,
278 'thumbnail': thumbnail,
279 'webpage_url': it.get('link'),
280 'uploader': user.get('full_name'),
281 'uploader_id': user.get('username'),
282 'like_count': like_count,
283 'timestamp': int_or_none(it.get('created_time')),
284 })
285
286 if not page['items']:
287 break
288 max_id = page['items'][-1]['id'].split('_')[0]
289 media_url = (
290 'http://instagram.com/%s/media?max_id=%s' % (
291 uploader_id, max_id))
292
293 return {
294 '_type': 'playlist',
295 'entries': entries,
296 'id': uploader_id,
297 'title': uploader_id,
298 }