]>
Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/dramafever.py
2 from __future__
import unicode_literals
7 from .common
import InfoExtractor
23 class DramaFeverBaseIE(InfoExtractor
):
24 _NETRC_MACHINE
= 'dramafever'
26 _CONSUMER_SECRET
= 'DA59dtVXYLxajktV'
28 _consumer_secret
= None
30 def _get_consumer_secret(self
):
31 mainjs
= self
._download
_webpage
(
32 'http://www.dramafever.com/static/51afe95/df2014/scripts/main.js',
33 None, 'Downloading main.js', fatal
=False)
35 return self
._CONSUMER
_SECRET
36 return self
._search
_regex
(
37 r
"var\s+cs\s*=\s*'([^']+)'", mainjs
,
38 'consumer secret', default
=self
._CONSUMER
_SECRET
)
40 def _real_initialize(self
):
41 self
._consumer
_secret
= self
._get
_consumer
_secret
()
45 username
, password
= self
._get
_login
_info
()
55 response
= self
._download
_json
(
56 'https://www.dramafever.com/api/users/login', None, 'Logging in',
57 data
=json
.dumps(login_form
).encode('utf-8'), headers
={
58 'x-consumer-key': self
._consumer
_secret
,
60 except ExtractorError
as e
:
61 if isinstance(e
.cause
, compat_HTTPError
) and e
.cause
.code
in (403, 404):
62 response
= self
._parse
_json
(
63 e
.cause
.read().decode('utf-8'), None)
68 if response
.get('result') or response
.get('guid') or response
.get('user_guid'):
71 errors
= response
.get('errors')
72 if errors
and isinstance(errors
, list):
74 message
= error
.get('message') or error
['reason']
75 raise ExtractorError('Unable to login: %s' % message
, expected
=True)
76 raise ExtractorError('Unable to log in')
79 class DramaFeverIE(DramaFeverBaseIE
):
80 IE_NAME
= 'dramafever'
81 _VALID_URL
= r
'https?://(?:www\.)?dramafever\.com/(?:[^/]+/)?drama/(?P<id>[0-9]+/[0-9]+)(?:/|$)'
83 'url': 'https://www.dramafever.com/drama/4274/1/Heirs/',
87 'title': 'Heirs - Episode 1',
88 'description': 'md5:362a24ba18209f6276e032a651c50bc2',
89 'thumbnail': r
're:^https?://.*\.jpg',
91 'timestamp': 1381354993,
92 'upload_date': '20131009',
95 'episode': 'Episode 1',
100 'skip_download': True,
103 'url': 'http://www.dramafever.com/drama/4826/4/Mnet_Asian_Music_Awards_2015/?ap=1',
107 'title': 'Mnet Asian Music Awards 2015',
108 'description': 'md5:3ff2ee8fedaef86e076791c909cf2e91',
109 'episode': 'Mnet Asian Music Awards 2015 - Part 3',
111 'thumbnail': r
're:^https?://.*\.jpg',
112 'timestamp': 1450213200,
113 'upload_date': '20151215',
118 'skip_download': True,
121 'url': 'https://www.dramafever.com/zh-cn/drama/4972/15/Doctor_Romantic/',
122 'only_matching': True,
125 def _call_api(self
, path
, video_id
, note
, fatal
=False):
126 return self
._download
_json
(
127 'https://www.dramafever.com/api/5/' + path
,
128 video_id
, note
=note
, headers
={
129 'x-consumer-key': self
._consumer
_secret
,
132 def _get_subtitles(self
, video_id
):
134 subs
= self
._call
_api
(
135 'video/%s/subtitles/webvtt/' % video_id
, video_id
,
136 'Downloading subtitles JSON', fatal
=False)
137 if not subs
or not isinstance(subs
, list):
140 if not isinstance(sub
, dict):
142 sub_url
= url_or_none(sub
.get('url'))
145 subtitles
.setdefault(
146 sub
.get('code') or sub
.get('language') or 'en', []).append({
151 def _real_extract(self
, url
):
152 video_id
= self
._match
_id
(url
).replace('/', '.')
154 series_id
, episode_number
= video_id
.split('.')
156 video
= self
._call
_api
(
157 'series/%s/episodes/%s/' % (series_id
, episode_number
), video_id
,
158 'Downloading video JSON')
161 download_assets
= video
.get('download_assets')
162 if download_assets
and isinstance(download_assets
, dict):
163 for format_id
, format_dict
in download_assets
.items():
164 if not isinstance(format_dict
, dict):
166 format_url
= url_or_none(format_dict
.get('url'))
171 'format_id': format_id
,
172 'filesize': int_or_none(video
.get('filesize')),
175 stream
= self
._call
_api
(
176 'video/%s/stream/' % video_id
, video_id
, 'Downloading stream JSON',
179 stream_url
= stream
.get('stream_url')
181 formats
.extend(self
._extract
_m
3u8_formats
(
182 stream_url
, video_id
, 'mp4', entry_protocol
='m3u8_native',
183 m3u8_id
='hls', fatal
=False))
184 self
._sort
_formats
(formats
)
186 title
= video
.get('title') or 'Episode %s' % episode_number
187 description
= video
.get('description')
188 thumbnail
= video
.get('thumbnail')
189 timestamp
= unified_timestamp(video
.get('release_date'))
190 duration
= parse_duration(video
.get('duration'))
191 age_limit
= parse_age_limit(video
.get('tv_rating'))
192 series
= video
.get('series_title')
193 season_number
= int_or_none(video
.get('season'))
196 title
= '%s - %s' % (series
, title
)
198 subtitles
= self
.extract_subtitles(video_id
)
203 'description': description
,
204 'thumbnail': thumbnail
,
205 'duration': duration
,
206 'timestamp': timestamp
,
207 'age_limit': age_limit
,
209 'season_number': season_number
,
210 'episode_number': int_or_none(episode_number
),
212 'subtitles': subtitles
,
216 class DramaFeverSeriesIE(DramaFeverBaseIE
):
217 IE_NAME
= 'dramafever:series'
218 _VALID_URL
= r
'https?://(?:www\.)?dramafever\.com/(?:[^/]+/)?drama/(?P<id>[0-9]+)(?:/(?:(?!\d+(?:/|$)).+)?)?$'
220 'url': 'http://www.dramafever.com/drama/4512/Cooking_with_Shin/',
223 'title': 'Cooking with Shin',
224 'description': 'md5:84a3f26e3cdc3fb7f500211b3593b5c1',
228 'url': 'http://www.dramafever.com/drama/124/IRIS/',
232 'description': 'md5:b3a30e587cf20c59bd1c01ec0ee1b862',
234 'playlist_count': 20,
237 _PAGE_SIZE
= 60 # max is 60 (see http://api.drama9.com/#get--api-4-episode-series-)
239 def _real_extract(self
, url
):
240 series_id
= self
._match
_id
(url
)
242 series
= self
._download
_json
(
243 'http://www.dramafever.com/api/4/series/query/?cs=%s&series_id=%s'
244 % (self
._consumer
_secret
, series_id
),
245 series_id
, 'Downloading series JSON')['series'][series_id
]
247 title
= clean_html(series
['name'])
248 description
= clean_html(series
.get('description') or series
.get('description_short'))
251 for page_num
in itertools
.count(1):
252 episodes
= self
._download
_json
(
253 'http://www.dramafever.com/api/4/episode/series/?cs=%s&series_id=%s&page_size=%d&page_number=%d'
254 % (self
._consumer
_secret
, series_id
, self
._PAGE
_SIZE
, page_num
),
255 series_id
, 'Downloading episodes JSON page #%d' % page_num
)
256 for episode
in episodes
.get('value', []):
257 episode_url
= episode
.get('episode_url')
260 entries
.append(self
.url_result(
261 compat_urlparse
.urljoin(url
, episode_url
),
262 'DramaFever', episode
.get('guid')))
263 if page_num
== episodes
['num_pages']:
266 return self
.playlist_result(entries
, series_id
, title
, description
)