]> Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/reddit.py
01c85ee016306aa30b44c7521e736e6094b14f9c
[youtubedl] / youtube_dl / extractor / reddit.py
1 from __future__ import unicode_literals
2
3 from .common import InfoExtractor
4 from ..utils import (
5 ExtractorError,
6 int_or_none,
7 float_or_none,
8 )
9
10
11 class RedditIE(InfoExtractor):
12 _VALID_URL = r'https?://v\.redd\.it/(?P<id>[^/?#&]+)'
13 _TEST = {
14 # from https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/
15 'url': 'https://v.redd.it/zv89llsvexdz',
16 'md5': '655d06ace653ea3b87bccfb1b27ec99d',
17 'info_dict': {
18 'id': 'zv89llsvexdz',
19 'ext': 'mp4',
20 'title': 'zv89llsvexdz',
21 },
22 'params': {
23 'format': 'bestvideo',
24 },
25 }
26
27 def _real_extract(self, url):
28 video_id = self._match_id(url)
29
30 formats = self._extract_m3u8_formats(
31 'https://v.redd.it/%s/HLSPlaylist.m3u8' % video_id, video_id,
32 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)
33
34 formats.extend(self._extract_mpd_formats(
35 'https://v.redd.it/%s/DASHPlaylist.mpd' % video_id, video_id,
36 mpd_id='dash', fatal=False))
37
38 return {
39 'id': video_id,
40 'title': video_id,
41 'formats': formats,
42 }
43
44
45 class RedditRIE(InfoExtractor):
46 _VALID_URL = r'https?://(?:www\.)?reddit\.com/r/[^/]+/comments/(?P<id>[^/]+)'
47 _TESTS = [{
48 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',
49 'info_dict': {
50 'id': 'zv89llsvexdz',
51 'ext': 'mp4',
52 'title': 'That small heart attack.',
53 'thumbnail': r're:^https?://.*\.jpg$',
54 'timestamp': 1501941939,
55 'upload_date': '20170805',
56 'uploader': 'Antw87',
57 'like_count': int,
58 'dislike_count': int,
59 'comment_count': int,
60 'age_limit': 0,
61 },
62 'params': {
63 'format': 'bestvideo',
64 'skip_download': True,
65 },
66 }, {
67 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',
68 'only_matching': True,
69 }, {
70 # imgur
71 'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
72 'only_matching': True,
73 }, {
74 # streamable
75 'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/',
76 'only_matching': True,
77 }, {
78 # youtube
79 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/',
80 'only_matching': True,
81 }]
82
83 def _real_extract(self, url):
84 video_id = self._match_id(url)
85
86 data = self._download_json(
87 url + '.json', video_id)[0]['data']['children'][0]['data']
88
89 video_url = data['url']
90
91 # Avoid recursing into the same reddit URL
92 if 'reddit.com/' in video_url and '/%s/' % video_id in video_url:
93 raise ExtractorError('No media found', expected=True)
94
95 over_18 = data.get('over_18')
96 if over_18 is True:
97 age_limit = 18
98 elif over_18 is False:
99 age_limit = 0
100 else:
101 age_limit = None
102
103 return {
104 '_type': 'url_transparent',
105 'url': video_url,
106 'title': data.get('title'),
107 'thumbnail': data.get('thumbnail'),
108 'timestamp': float_or_none(data.get('created_utc')),
109 'uploader': data.get('author'),
110 'like_count': int_or_none(data.get('ups')),
111 'dislike_count': int_or_none(data.get('downs')),
112 'comment_count': int_or_none(data.get('num_comments')),
113 'age_limit': age_limit,
114 }