]>
Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/animeondemand.py
1 from __future__
import unicode_literals
5 from .common
import InfoExtractor
19 class AnimeOnDemandIE(InfoExtractor
):
20 _VALID_URL
= r
'https?://(?:www\.)?anime-on-demand\.de/anime/(?P<id>\d+)'
21 _LOGIN_URL
= 'https://www.anime-on-demand.de/users/sign_in'
22 _APPLY_HTML5_URL
= 'https://www.anime-on-demand.de/html5apply'
23 _NETRC_MACHINE
= 'animeondemand'
26 'url': 'https://www.anime-on-demand.de/anime/161',
29 'title': 'Grimgar, Ashes and Illusions (OmU)',
30 'description': 'md5:6681ce3c07c7189d255ac6ab23812d31',
32 'playlist_mincount': 4,
34 # Film wording is used instead of Episode, ger/jap, Dub/OmU
35 'url': 'https://www.anime-on-demand.de/anime/39',
36 'only_matching': True,
38 # Episodes without titles, jap, OmU
39 'url': 'https://www.anime-on-demand.de/anime/162',
40 'only_matching': True,
42 # ger/jap, Dub/OmU, account required
43 'url': 'https://www.anime-on-demand.de/anime/169',
44 'only_matching': True,
46 # Full length film, non-series, ger/jap, Dub/OmU, account required
47 'url': 'https://www.anime-on-demand.de/anime/185',
48 'only_matching': True,
52 (username
, password
) = self
._get
_login
_info
()
56 login_page
= self
._download
_webpage
(
57 self
._LOGIN
_URL
, None, 'Downloading login page')
59 if '>Our licensing terms allow the distribution of animes only to German-speaking countries of Europe' in login_page
:
60 self
.raise_geo_restricted(
61 '%s is only available in German-speaking countries of Europe' % self
.IE_NAME
)
63 login_form
= self
._form
_hidden
_inputs
('new_user', login_page
)
66 'user[login]': username
,
67 'user[password]': password
,
70 post_url
= self
._search
_regex
(
71 r
'<form[^>]+action=(["\'])(?P
<url
>.+?
)\
1', login_page,
72 'post url
', default=self._LOGIN_URL, group='url
')
74 if not post_url.startswith('http
'):
75 post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
77 request = sanitized_Request(
78 post_url, urlencode_postdata(login_form))
79 request.add_header('Referer
', self._LOGIN_URL)
81 response = self._download_webpage(
82 request, None, 'Logging
in as %s' % username)
84 if all(p not in response for p in ('>Logout
<', 'href
="/users/sign_out"')):
85 error = self._search_regex(
86 r'<p
class="alert alert-danger">(.+?
)</p
>',
87 response, 'error
', default=None)
89 raise ExtractorError('Unable to login
: %s' % error, expected=True)
90 raise ExtractorError('Unable to log
in')
92 def _real_initialize(self):
95 def _real_extract(self, url):
96 anime_id = self._match_id(url)
98 webpage = self._download_webpage(url, anime_id)
100 if 'data
-playlist
=' not in webpage:
101 self._download_webpage(
102 self._APPLY_HTML5_URL, anime_id,
103 'Activating HTML5 beta
', 'Unable to
apply HTML5 beta
')
104 webpage = self._download_webpage(url, anime_id)
106 csrf_token = self._html_search_meta(
107 'csrf
-token
', webpage, 'csrf token
', fatal=True)
109 anime_title = self._html_search_regex(
110 r'(?s
)<h1
[^
>]+itemprop
="name"[^
>]*>(.+?
)</h1
>',
111 webpage, 'anime name
')
112 anime_description = self._html_search_regex(
113 r'(?s
)<div
[^
>]+itemprop
="description"[^
>]*>(.+?
)</div
>',
114 webpage, 'anime description
', default=None)
118 def extract_info(html, video_id, num=None):
119 title, description = [None] * 2
122 for input_ in re.findall(
123 r'<input[^
>]+class=["\'].*?streamstarter_html5[^>]+>', html):
124 attributes = extract_attributes(input_)
126 for playlist_key in ('data-playlist', 'data-otherplaylist'):
127 playlist_url = attributes.get(playlist_key)
128 if isinstance(playlist_url, compat_str) and re.match(
129 r'/?[\da-zA-Z]+', playlist_url):
130 playlist_urls.append(attributes[playlist_key])
131 if not playlist_urls:
134 lang = attributes.get('data-lang')
135 lang_note = attributes.get('value')
137 for playlist_url in playlist_urls:
138 kind = self._search_regex(
139 r'videomaterialurl/\d+/([^/]+)/',
140 playlist_url, 'media kind', default=None)
143 format_id_list.append(lang)
145 format_id_list.append(kind)
146 if not format_id_list and num is not None:
147 format_id_list.append(compat_str(num))
148 format_id = '-'.join(format_id_list)
149 format_note = ', '.join(filter(None, (kind, lang_note)))
150 request = sanitized_Request(
151 compat_urlparse.urljoin(url, playlist_url),
153 'X-Requested-With': 'XMLHttpRequest',
154 'X-CSRF-Token': csrf_token,
156 'Accept': 'application/json, text/javascript, */*; q=0.01',
158 playlist = self._download_json(
159 request, video_id, 'Downloading %s playlist JSON' % format_id,
163 start_video = playlist.get('startvideo', 0)
164 playlist = playlist.get('playlist')
165 if not playlist or not isinstance(playlist, list):
167 playlist = playlist[start_video]
168 title = playlist.get('title')
171 description = playlist.get('description')
172 for source in playlist.get('sources', []):
173 file_ = source.get('file')
176 ext = determine_ext(file_)
177 format_id_list = [lang, kind]
179 format_id_list.append('hls')
180 elif source.get('type') == 'video/dash' or ext == 'mpd':
181 format_id_list.append('dash')
182 format_id = '-'.join(filter(None, format_id_list))
184 file_formats = self._extract_m3u8_formats(
185 file_, video_id, 'mp4',
186 entry_protocol='m3u8_native', m3u8_id=format_id, fatal=False)
187 elif source.get('type') == 'video/dash' or ext == 'mpd':
189 file_formats = self._extract_mpd_formats(
190 file_, video_id, mpd_id=format_id, fatal=False)
193 for f in file_formats:
196 'format_note': format_note,
198 formats.extend(file_formats)
202 'description': description,
206 def extract_entries(html, video_id, common_info, num=None):
207 info = extract_info(html, video_id, num)
210 self._sort_formats(info['formats'])
211 f = common_info.copy()
215 # Extract teaser/trailer only when full episode is not available
216 if not info['formats']:
218 r'data-dialog-header=(["\'])(?P
<title
>.+?
)\
1[^
>]+href
=(["\'])(?P<href>.+?)\3[^>]*>(?P<kind>Teaser|Trailer)<',
221 f = common_info.copy()
223 'id': '%s-%s' % (f['id'], m.group('kind').lower()),
224 'title': m.group('title'),
225 'url': compat_urlparse.urljoin(url, m.group('href')),
229 def extract_episodes(html):
230 for num, episode_html in enumerate(re.findall(
231 r'(?s)<h3[^>]+class="episodebox
-title
".+?>Episodeninhalt<', html), 1):
232 episodebox_title = self._search_regex(
233 (r'class="episodebox
-title
"[^>]+title=(["\'])(?P
<title
>.+?
)\
1',
234 r'class="episodebox-title"[^
>]+>(?P
<title
>.+?
)<'),
235 episode_html, 'episodebox title
', default=None, group='title
')
236 if not episodebox_title:
239 episode_number = int(self._search_regex(
240 r'(?
:Episode|Film
)\s
*(\d
+)',
241 episodebox_title, 'episode number
', default=num))
242 episode_title = self._search_regex(
243 r'(?
:Episode|Film
)\s
*\d
+\s
*-\s
*(.+)',
244 episodebox_title, 'episode title
', default=None)
246 video_id = 'episode
-%d' % episode_number
250 'series
': anime_title,
251 'episode
': episode_title,
252 'episode_number
': episode_number,
255 extract_entries(episode_html, video_id, common_info)
257 def extract_film(html, video_id):
260 'title
': anime_title,
261 'description
': anime_description,
263 extract_entries(html, video_id, common_info)
265 extract_episodes(webpage)
268 extract_film(webpage, anime_id)
270 return self.playlist_result(entries, anime_id, anime_title, anime_description)