]>
Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/animeondemand.py
   1 from __future__ 
import unicode_literals
 
   5 from .common 
import InfoExtractor
 
  19 class AnimeOnDemandIE(InfoExtractor
): 
  20     _VALID_URL 
= r
'https?://(?:www\.)?anime-on-demand\.de/anime/(?P<id>\d+)' 
  21     _LOGIN_URL 
= 'https://www.anime-on-demand.de/users/sign_in' 
  22     _APPLY_HTML5_URL 
= 'https://www.anime-on-demand.de/html5apply' 
  23     _NETRC_MACHINE 
= 'animeondemand' 
  25         'url': 'https://www.anime-on-demand.de/anime/161', 
  28             'title': 'Grimgar, Ashes and Illusions (OmU)', 
  29             'description': 'md5:6681ce3c07c7189d255ac6ab23812d31', 
  31         'playlist_mincount': 4, 
  33         # Film wording is used instead of Episode 
  34         'url': 'https://www.anime-on-demand.de/anime/39', 
  35         'only_matching': True, 
  37         # Episodes without titles 
  38         'url': 'https://www.anime-on-demand.de/anime/162', 
  39         'only_matching': True, 
  41         # ger/jap, Dub/OmU, account required 
  42         'url': 'https://www.anime-on-demand.de/anime/169', 
  43         'only_matching': True, 
  47         (username
, password
) = self
._get
_login
_info
() 
  51         login_page 
= self
._download
_webpage
( 
  52             self
._LOGIN
_URL
, None, 'Downloading login page') 
  54         if '>Our licensing terms allow the distribution of animes only to German-speaking countries of Europe' in login_page
: 
  55             self
.raise_geo_restricted( 
  56                 '%s is only available in German-speaking countries of Europe' % self
.IE_NAME
) 
  58         login_form 
= self
._form
_hidden
_inputs
('new_user', login_page
) 
  61             'user[login]': username
, 
  62             'user[password]': password
, 
  65         post_url 
= self
._search
_regex
( 
  66             r
'<form[^>]+action=(["\'])(?P
<url
>.+?
)\
1', login_page, 
  67             'post url
', default=self._LOGIN_URL, group='url
') 
  69         if not post_url.startswith('http
'): 
  70             post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url) 
  72         request = sanitized_Request( 
  73             post_url, urlencode_postdata(login_form)) 
  74         request.add_header('Referer
', self._LOGIN_URL) 
  76         response = self._download_webpage( 
  77             request, None, 'Logging 
in as %s' % username) 
  79         if all(p not in response for p in ('>Logout
<', 'href
="/users/sign_out"')): 
  80             error = self._search_regex( 
  81                 r'<p 
class="alert alert-danger">(.+?
)</p
>', 
  82                 response, 'error
', default=None) 
  84                 raise ExtractorError('Unable to login
: %s' % error, expected=True) 
  85             raise ExtractorError('Unable to log 
in') 
  87     def _real_initialize(self): 
  90     def _real_extract(self, url): 
  91         anime_id = self._match_id(url) 
  93         webpage = self._download_webpage(url, anime_id) 
  95         if 'data
-playlist
=' not in webpage: 
  96             self._download_webpage( 
  97                 self._APPLY_HTML5_URL, anime_id, 
  98                 'Activating HTML5 beta
', 'Unable to 
apply HTML5 beta
') 
  99             webpage = self._download_webpage(url, anime_id) 
 101         csrf_token = self._html_search_meta( 
 102             'csrf
-token
', webpage, 'csrf token
', fatal=True) 
 104         anime_title = self._html_search_regex( 
 105             r'(?s
)<h1
[^
>]+itemprop
="name"[^
>]*>(.+?
)</h1
>', 
 106             webpage, 'anime name
') 
 107         anime_description = self._html_search_regex( 
 108             r'(?s
)<div
[^
>]+itemprop
="description"[^
>]*>(.+?
)</div
>', 
 109             webpage, 'anime description
', default=None) 
 113         for num, episode_html in enumerate(re.findall( 
 114                 r'(?s
)<h3
[^
>]+class="episodebox-title".+?
>Episodeninhalt
<', webpage), 1): 
 115             episodebox_title = self._search_regex( 
 116                 (r'class="episodebox-title"[^
>]+title
=(["\'])(?P<title>.+?)\1', 
 117                  r'class="episodebox
-title
"[^>]+>(?P<title>.+?)<'), 
 118                 episode_html, 'episodebox title', default=None, group='title') 
 119             if not episodebox_title: 
 122             episode_number = int(self._search_regex( 
 123                 r'(?:Episode|Film)\s*(\d+)', 
 124                 episodebox_title, 'episode number', default=num)) 
 125             episode_title = self._search_regex( 
 126                 r'(?:Episode|Film)\s*\d+\s*-\s*(.+)', 
 127                 episodebox_title, 'episode title', default=None) 
 129             video_id = 'episode-%d' % episode_number 
 133                 'series': anime_title, 
 134                 'episode': episode_title, 
 135                 'episode_number': episode_number, 
 140             for input_ in re.findall( 
 141                     r'<input[^>]+class=["\'].*?streamstarter_html5
[^
>]+>', episode_html): 
 142                 attributes = extract_attributes(input_) 
 144                 for playlist_key in ('data
-playlist
', 'data
-otherplaylist
'): 
 145                     playlist_url = attributes.get(playlist_key) 
 146                     if isinstance(playlist_url, compat_str) and re.match( 
 147                             r'/?
[\da
-zA
-Z
]+', playlist_url): 
 148                         playlist_urls.append(attributes[playlist_key]) 
 149                 if not playlist_urls: 
 152                 lang = attributes.get('data
-lang
') 
 153                 lang_note = attributes.get('value
') 
 155                 for playlist_url in playlist_urls: 
 156                     kind = self._search_regex( 
 157                         r'videomaterialurl
/\d
+/([^
/]+)/', 
 158                         playlist_url, 'media kind
', default=None) 
 161                         format_id_list.append(lang) 
 163                         format_id_list.append(kind) 
 164                     if not format_id_list: 
 165                         format_id_list.append(compat_str(num)) 
 166                     format_id = '-'.join(format_id_list) 
 167                     format_note = ', '.join(filter(None, (kind, lang_note))) 
 168                     request = sanitized_Request( 
 169                         compat_urlparse.urljoin(url, playlist_url), 
 171                             'X
-Requested
-With
': 'XMLHttpRequest
', 
 172                             'X
-CSRF
-Token
': csrf_token, 
 174                             'Accept
': 'application
/json
, text
/javascript
, */*; q
=0.01', 
 176                     playlist = self._download_json( 
 177                         request, video_id, 'Downloading 
%s playlist JSON
' % format_id, 
 181                     start_video = playlist.get('startvideo
', 0) 
 182                     playlist = playlist.get('playlist
') 
 183                     if not playlist or not isinstance(playlist, list): 
 185                     playlist = playlist[start_video] 
 186                     title = playlist.get('title
') 
 189                     description = playlist.get('description
') 
 190                     for source in playlist.get('sources
', []): 
 191                         file_ = source.get('file') 
 194                         ext = determine_ext(file_) 
 195                         format_id_list = [lang, kind] 
 197                             format_id_list.append('hls
') 
 198                         elif source.get('type') == 'video
/dash
' or ext == 'mpd
': 
 199                             format_id_list.append('dash
') 
 200                         format_id = '-'.join(filter(None, format_id_list)) 
 202                             file_formats = self._extract_m3u8_formats( 
 203                                 file_, video_id, 'mp4
', 
 204                                 entry_protocol='m3u8_native
', m3u8_id=format_id, fatal=False) 
 205                         elif source.get('type') == 'video
/dash
' or ext == 'mpd
': 
 207                             file_formats = self._extract_mpd_formats( 
 208                                 file_, video_id, mpd_id=format_id, fatal=False) 
 211                         for f in file_formats: 
 214                                 'format_note
': format_note, 
 216                         formats.extend(file_formats) 
 219                 self._sort_formats(formats) 
 220                 f = common_info.copy() 
 223                     'description
': description, 
 228             # Extract teaser only when full episode is not available 
 231                     r'data
-dialog
-header
=(["\'])(?P<title>.+?)\1[^>]+href=(["\'])(?P
<href
>.+?
)\
3[^
>]*>Teaser
<', 
 234                     f = common_info.copy() 
 236                         'id': '%s-teaser
' % f['id'], 
 237                         'title
': m.group('title
'), 
 238                         'url
': compat_urlparse.urljoin(url, m.group('href
')), 
 242         return self.playlist_result(entries, anime_id, anime_title, anime_description)