]> Raphaël G. Git Repositories - youtubedl/blob - youtube_dl/extractor/puhutv.py
debian/{compat,control}: Update to debhelper compatibility level 12.
[youtubedl] / youtube_dl / extractor / puhutv.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 from .common import InfoExtractor
5 from ..compat import (
6 compat_HTTPError,
7 compat_str,
8 )
9 from ..utils import (
10 ExtractorError,
11 int_or_none,
12 float_or_none,
13 parse_resolution,
14 str_or_none,
15 try_get,
16 unified_timestamp,
17 url_or_none,
18 urljoin,
19 )
20
21
22 class PuhuTVIE(InfoExtractor):
23 _VALID_URL = r'https?://(?:www\.)?puhutv\.com/(?P<id>[^/?#&]+)-izle'
24 IE_NAME = 'puhutv'
25 _TESTS = [{
26 # film
27 'url': 'https://puhutv.com/sut-kardesler-izle',
28 'md5': 'fbd8f2d8e7681f8bcd51b592475a6ae7',
29 'info_dict': {
30 'id': '5085',
31 'display_id': 'sut-kardesler',
32 'ext': 'mp4',
33 'title': 'Süt Kardeşler',
34 'description': 'md5:405fd024df916ca16731114eb18e511a',
35 'thumbnail': r're:^https?://.*\.jpg$',
36 'duration': 4832.44,
37 'creator': 'Arzu Film',
38 'timestamp': 1469778212,
39 'upload_date': '20160729',
40 'release_year': 1976,
41 'view_count': int,
42 'tags': ['Aile', 'Komedi', 'Klasikler'],
43 },
44 }, {
45 # episode, geo restricted, bypassable with --geo-verification-proxy
46 'url': 'https://puhutv.com/jet-sosyete-1-bolum-izle',
47 'only_matching': True,
48 }, {
49 # 4k, with subtitles
50 'url': 'https://puhutv.com/dip-1-bolum-izle',
51 'only_matching': True,
52 }]
53 _SUBTITLE_LANGS = {
54 'English': 'en',
55 'Deutsch': 'de',
56 'عربى': 'ar'
57 }
58
59 def _real_extract(self, url):
60 display_id = self._match_id(url)
61
62 info = self._download_json(
63 urljoin(url, '/api/slug/%s-izle' % display_id),
64 display_id)['data']
65
66 video_id = compat_str(info['id'])
67 title = info.get('name') or info['title']['name']
68 if info.get('display_name'):
69 title = '%s %s' % (title, info.get('display_name'))
70
71 try:
72 videos = self._download_json(
73 'https://puhutv.com/api/assets/%s/videos' % video_id,
74 display_id, 'Downloading video JSON',
75 headers=self.geo_verification_headers())
76 except ExtractorError as e:
77 if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
78 self.raise_geo_restricted()
79 raise
80
81 formats = []
82 for video in videos['data']['videos']:
83 media_url = url_or_none(video.get('url'))
84 if not media_url:
85 continue
86 playlist = video.get('is_playlist')
87 if video.get('stream_type') == 'hls' and playlist is True:
88 formats.extend(self._extract_m3u8_formats(
89 media_url, video_id, 'mp4', entry_protocol='m3u8_native',
90 m3u8_id='hls', fatal=False))
91 continue
92 quality = int_or_none(video.get('quality'))
93 f = {
94 'url': media_url,
95 'ext': 'mp4',
96 'height': quality
97 }
98 video_format = video.get('video_format')
99 if video_format == 'hls' and playlist is False:
100 format_id = 'hls'
101 f['protocol'] = 'm3u8_native'
102 elif video_format == 'mp4':
103 format_id = 'http'
104
105 else:
106 continue
107 if quality:
108 format_id += '-%sp' % quality
109 f['format_id'] = format_id
110 formats.append(f)
111 self._sort_formats(formats)
112
113 description = try_get(
114 info, lambda x: x['title']['description'],
115 compat_str) or info.get('description')
116 timestamp = unified_timestamp(info.get('created_at'))
117 creator = try_get(
118 info, lambda x: x['title']['producer']['name'], compat_str)
119
120 duration = float_or_none(
121 try_get(info, lambda x: x['content']['duration_in_ms'], int),
122 scale=1000)
123 view_count = try_get(info, lambda x: x['content']['watch_count'], int)
124
125 images = try_get(
126 info, lambda x: x['content']['images']['wide'], dict) or {}
127 thumbnails = []
128 for image_id, image_url in images.items():
129 if not isinstance(image_url, compat_str):
130 continue
131 if not image_url.startswith(('http', '//')):
132 image_url = 'https://%s' % image_url
133 t = parse_resolution(image_id)
134 t.update({
135 'id': image_id,
136 'url': image_url
137 })
138 thumbnails.append(t)
139
140 release_year = try_get(info, lambda x: x['title']['released_at'], int)
141
142 season_number = int_or_none(info.get('season_number'))
143 season_id = str_or_none(info.get('season_id'))
144 episode_number = int_or_none(info.get('episode_number'))
145
146 tags = []
147 for genre in try_get(info, lambda x: x['title']['genres'], list) or []:
148 if not isinstance(genre, dict):
149 continue
150 genre_name = genre.get('name')
151 if genre_name and isinstance(genre_name, compat_str):
152 tags.append(genre_name)
153
154 subtitles = {}
155 for subtitle in try_get(
156 info, lambda x: x['content']['subtitles'], list) or []:
157 if not isinstance(subtitle, dict):
158 continue
159 lang = subtitle.get('language')
160 sub_url = url_or_none(subtitle.get('url'))
161 if not lang or not isinstance(lang, compat_str) or not sub_url:
162 continue
163 subtitles[self._SUBTITLE_LANGS.get(lang, lang)] = [{
164 'url': sub_url
165 }]
166
167 return {
168 'id': video_id,
169 'display_id': display_id,
170 'title': title,
171 'description': description,
172 'season_id': season_id,
173 'season_number': season_number,
174 'episode_number': episode_number,
175 'release_year': release_year,
176 'timestamp': timestamp,
177 'creator': creator,
178 'view_count': view_count,
179 'duration': duration,
180 'tags': tags,
181 'subtitles': subtitles,
182 'thumbnails': thumbnails,
183 'formats': formats
184 }
185
186
187 class PuhuTVSerieIE(InfoExtractor):
188 _VALID_URL = r'https?://(?:www\.)?puhutv\.com/(?P<id>[^/?#&]+)-detay'
189 IE_NAME = 'puhutv:serie'
190 _TESTS = [{
191 'url': 'https://puhutv.com/deniz-yildizi-detay',
192 'info_dict': {
193 'title': 'Deniz Yıldızı',
194 'id': 'deniz-yildizi',
195 },
196 'playlist_mincount': 205,
197 }, {
198 # a film detail page which is using same url with serie page
199 'url': 'https://puhutv.com/kaybedenler-kulubu-detay',
200 'only_matching': True,
201 }]
202
203 def _extract_entries(self, seasons):
204 for season in seasons:
205 season_id = season.get('id')
206 if not season_id:
207 continue
208 page = 1
209 has_more = True
210 while has_more is True:
211 season = self._download_json(
212 'https://galadriel.puhutv.com/seasons/%s' % season_id,
213 season_id, 'Downloading page %s' % page, query={
214 'page': page,
215 'per': 40,
216 })
217 episodes = season.get('episodes')
218 if isinstance(episodes, list):
219 for ep in episodes:
220 slug_path = str_or_none(ep.get('slugPath'))
221 if not slug_path:
222 continue
223 video_id = str_or_none(int_or_none(ep.get('id')))
224 yield self.url_result(
225 'https://puhutv.com/%s' % slug_path,
226 ie=PuhuTVIE.ie_key(), video_id=video_id,
227 video_title=ep.get('name') or ep.get('eventLabel'))
228 page += 1
229 has_more = season.get('hasMore')
230
231 def _real_extract(self, url):
232 playlist_id = self._match_id(url)
233
234 info = self._download_json(
235 urljoin(url, '/api/slug/%s-detay' % playlist_id),
236 playlist_id)['data']
237
238 seasons = info.get('seasons')
239 if seasons:
240 return self.playlist_result(
241 self._extract_entries(seasons), playlist_id, info.get('name'))
242
243 # For films, these are using same url with series
244 video_id = info.get('slug') or info['assets'][0]['slug']
245 return self.url_result(
246 'https://puhutv.com/%s-izle' % video_id,
247 PuhuTVIE.ie_key(), video_id)