]> Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/reddit.py
Prepare to upload
[youtubedl] / youtube_dl / extractor / reddit.py
1 from __future__ import unicode_literals
2
3 import re
4
5 from .common import InfoExtractor
6 from ..utils import (
7 ExtractorError,
8 int_or_none,
9 float_or_none,
10 )
11
12
13 class RedditIE(InfoExtractor):
14 _VALID_URL = r'https?://v\.redd\.it/(?P<id>[^/?#&]+)'
15 _TEST = {
16 # from https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/
17 'url': 'https://v.redd.it/zv89llsvexdz',
18 'md5': '0a070c53eba7ec4534d95a5a1259e253',
19 'info_dict': {
20 'id': 'zv89llsvexdz',
21 'ext': 'mp4',
22 'title': 'zv89llsvexdz',
23 },
24 'params': {
25 'format': 'bestvideo',
26 },
27 }
28
29 def _real_extract(self, url):
30 video_id = self._match_id(url)
31
32 formats = self._extract_m3u8_formats(
33 'https://v.redd.it/%s/HLSPlaylist.m3u8' % video_id, video_id,
34 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)
35
36 formats.extend(self._extract_mpd_formats(
37 'https://v.redd.it/%s/DASHPlaylist.mpd' % video_id, video_id,
38 mpd_id='dash', fatal=False))
39
40 self._sort_formats(formats)
41
42 return {
43 'id': video_id,
44 'title': video_id,
45 'formats': formats,
46 }
47
48
49 class RedditRIE(InfoExtractor):
50 _VALID_URL = r'(?P<url>https?://(?:[^/]+\.)?reddit\.com/r/[^/]+/comments/(?P<id>[^/?#&]+))'
51 _TESTS = [{
52 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',
53 'info_dict': {
54 'id': 'zv89llsvexdz',
55 'ext': 'mp4',
56 'title': 'That small heart attack.',
57 'thumbnail': r're:^https?://.*\.jpg$',
58 'timestamp': 1501941939,
59 'upload_date': '20170805',
60 'uploader': 'Antw87',
61 'like_count': int,
62 'dislike_count': int,
63 'comment_count': int,
64 'age_limit': 0,
65 },
66 'params': {
67 'format': 'bestvideo',
68 'skip_download': True,
69 },
70 }, {
71 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',
72 'only_matching': True,
73 }, {
74 # imgur
75 'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
76 'only_matching': True,
77 }, {
78 # imgur @ old reddit
79 'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
80 'only_matching': True,
81 }, {
82 # streamable
83 'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/',
84 'only_matching': True,
85 }, {
86 # youtube
87 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/',
88 'only_matching': True,
89 }, {
90 # reddit video @ nm reddit
91 'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/',
92 'only_matching': True,
93 }]
94
95 def _real_extract(self, url):
96 mobj = re.match(self._VALID_URL, url)
97 url, video_id = mobj.group('url', 'id')
98
99 video_id = self._match_id(url)
100
101 data = self._download_json(
102 url + '/.json', video_id)[0]['data']['children'][0]['data']
103
104 video_url = data['url']
105
106 # Avoid recursing into the same reddit URL
107 if 'reddit.com/' in video_url and '/%s/' % video_id in video_url:
108 raise ExtractorError('No media found', expected=True)
109
110 over_18 = data.get('over_18')
111 if over_18 is True:
112 age_limit = 18
113 elif over_18 is False:
114 age_limit = 0
115 else:
116 age_limit = None
117
118 return {
119 '_type': 'url_transparent',
120 'url': video_url,
121 'title': data.get('title'),
122 'thumbnail': data.get('thumbnail'),
123 'timestamp': float_or_none(data.get('created_utc')),
124 'uploader': data.get('author'),
125 'like_count': int_or_none(data.get('ups')),
126 'dislike_count': int_or_none(data.get('downs')),
127 'comment_count': int_or_none(data.get('num_comments')),
128 'age_limit': age_limit,
129 }