2 from __future__ 
import unicode_literals
 
   8 import xml
.etree
.ElementTree
 
  10 from hashlib 
import sha1
 
  11 from math 
import pow, sqrt
, floor
 
  12 from .common 
import InfoExtractor
 
  13 from ..compat 
import ( 
  15     compat_urllib_parse_unquote
, 
  16     compat_urllib_request
, 
  30 class CrunchyrollIE(InfoExtractor
): 
  31     _VALID_URL 
= r
'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.(?:com|fr)/(?:media(?:-|/\?id=)|[^/]*/[^/?&]*?)(?P<video_id>[0-9]+))(?:[/?&]|$)' 
  32     _NETRC_MACHINE 
= 'crunchyroll' 
  34         'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513', 
  38             'title': 'Wanna be the Strongest in the World Episode 1 – An Idol-Wrestler is Born!', 
  39             'description': 'md5:2d17137920c64f2f49981a7797d275ef', 
  40             'thumbnail': 'http://img1.ak.crunchyroll.com/i/spire1-tmb/20c6b5e10f1a47b10516877d3c039cae1380951166_full.jpg', 
  41             'uploader': 'Yomiuri Telecasting Corporation (YTV)', 
  42             'upload_date': '20131013', 
  43             'url': 're:(?!.*&)', 
  47             'skip_download': True, 
  50         'url': 'http://www.crunchyroll.com/media-589804/culture-japan-1', 
  54             'title': 'Culture Japan Episode 1 – Rebuilding Japan after the 3.11', 
  55             'description': 'md5:fe2743efedb49d279552926d0bd0cd9e', 
  56             'thumbnail': 're:^https?://.*\.jpg$', 
  57             'uploader': 'Danny Choo Network', 
  58             'upload_date': '20120213', 
  62             'skip_download': True, 
  66         'url': 'http://www.crunchyroll.fr/girl-friend-beta/episode-11-goodbye-la-mode-661697', 
  67         'only_matching': True, 
  74         '1080': ('80', '108'), 
  78         (username
, password
) = self
._get
_login
_info
() 
  82         login_url 
= 'https://www.crunchyroll.com/?a=formhandler' 
  83         data 
= urlencode_postdata({ 
  84             'formname': 'RpcApiUser_Login', 
  88         login_request 
= compat_urllib_request
.Request(login_url
, data
) 
  89         login_request
.add_header('Content-Type', 'application/x-www-form-urlencoded') 
  90         self
._download
_webpage
(login_request
, None, False, 'Wrong login info') 
  92     def _real_initialize(self
): 
  95     def _decrypt_subtitles(self
, data
, iv
, id): 
  96         data 
= bytes_to_intlist(base64
.b64decode(data
.encode('utf-8'))) 
  97         iv 
= bytes_to_intlist(base64
.b64decode(iv
.encode('utf-8'))) 
 100         def obfuscate_key_aux(count
, modulo
, start
): 
 102             for _ 
in range(count
): 
 103                 output
.append(output
[-1] + output
[-2]) 
 104             # cut off start values 
 106             output 
= list(map(lambda x
: x 
% modulo 
+ 33, output
)) 
 109         def obfuscate_key(key
): 
 110             num1 
= int(floor(pow(2, 25) * sqrt(6.9))) 
 111             num2 
= (num1 ^ key
) << 5 
 113             num4 
= num3 ^ 
(num3 
>> 3) ^ num2
 
 114             prefix 
= intlist_to_bytes(obfuscate_key_aux(20, 97, (1, 2))) 
 115             shaHash 
= bytes_to_intlist(sha1(prefix 
+ str(num4
).encode('ascii')).digest()) 
 116             # Extend 160 Bit hash to 256 Bit 
 117             return shaHash 
+ [0] * 12 
 119         key 
= obfuscate_key(id) 
 121         decrypted_data 
= intlist_to_bytes(aes_cbc_decrypt(data
, key
, iv
)) 
 122         return zlib
.decompress(decrypted_data
) 
 124     def _convert_subtitles_to_srt(self
, sub_root
): 
 127         for i
, event 
in enumerate(sub_root
.findall('./events/event'), 1): 
 128             start 
= event
.attrib
['start'].replace('.', ',') 
 129             end 
= event
.attrib
['end'].replace('.', ',') 
 130             text 
= event
.attrib
['text'].replace('\\N', '\n') 
 131             output 
+= '%d\n%s --> %s\n%s\n\n' % (i
, start
, end
, text
) 
 134     def _convert_subtitles_to_ass(self
, sub_root
): 
 137         def ass_bool(strvalue
): 
 143         output 
= '[Script Info]\n' 
 144         output 
+= 'Title: %s\n' % sub_root
.attrib
["title"] 
 145         output 
+= 'ScriptType: v4.00+\n' 
 146         output 
+= 'WrapStyle: %s\n' % sub_root
.attrib
["wrap_style"] 
 147         output 
+= 'PlayResX: %s\n' % sub_root
.attrib
["play_res_x"] 
 148         output 
+= 'PlayResY: %s\n' % sub_root
.attrib
["play_res_y"] 
 149         output 
+= """ScaledBorderAndShadow: yes 
 152 Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding 
 154         for style 
in sub_root
.findall('./styles/style'): 
 155             output 
+= 'Style: ' + style
.attrib
["name"] 
 156             output 
+= ',' + style
.attrib
["font_name"] 
 157             output 
+= ',' + style
.attrib
["font_size"] 
 158             output 
+= ',' + style
.attrib
["primary_colour"] 
 159             output 
+= ',' + style
.attrib
["secondary_colour"] 
 160             output 
+= ',' + style
.attrib
["outline_colour"] 
 161             output 
+= ',' + style
.attrib
["back_colour"] 
 162             output 
+= ',' + ass_bool(style
.attrib
["bold"]) 
 163             output 
+= ',' + ass_bool(style
.attrib
["italic"]) 
 164             output 
+= ',' + ass_bool(style
.attrib
["underline"]) 
 165             output 
+= ',' + ass_bool(style
.attrib
["strikeout"]) 
 166             output 
+= ',' + style
.attrib
["scale_x"] 
 167             output 
+= ',' + style
.attrib
["scale_y"] 
 168             output 
+= ',' + style
.attrib
["spacing"] 
 169             output 
+= ',' + style
.attrib
["angle"] 
 170             output 
+= ',' + style
.attrib
["border_style"] 
 171             output 
+= ',' + style
.attrib
["outline"] 
 172             output 
+= ',' + style
.attrib
["shadow"] 
 173             output 
+= ',' + style
.attrib
["alignment"] 
 174             output 
+= ',' + style
.attrib
["margin_l"] 
 175             output 
+= ',' + style
.attrib
["margin_r"] 
 176             output 
+= ',' + style
.attrib
["margin_v"] 
 177             output 
+= ',' + style
.attrib
["encoding"] 
 182 Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text 
 184         for event 
in sub_root
.findall('./events/event'): 
 185             output 
+= 'Dialogue: 0' 
 186             output 
+= ',' + event
.attrib
["start"] 
 187             output 
+= ',' + event
.attrib
["end"] 
 188             output 
+= ',' + event
.attrib
["style"] 
 189             output 
+= ',' + event
.attrib
["name"] 
 190             output 
+= ',' + event
.attrib
["margin_l"] 
 191             output 
+= ',' + event
.attrib
["margin_r"] 
 192             output 
+= ',' + event
.attrib
["margin_v"] 
 193             output 
+= ',' + event
.attrib
["effect"] 
 194             output 
+= ',' + event
.attrib
["text"] 
 199     def _extract_subtitles(self
, subtitle
): 
 200         sub_root 
= xml
.etree
.ElementTree
.fromstring(subtitle
) 
 203             'data': self
._convert
_subtitles
_to
_srt
(sub_root
), 
 206             'data': self
._convert
_subtitles
_to
_ass
(sub_root
), 
 209     def _get_subtitles(self
, video_id
, webpage
): 
 211         for sub_id
, sub_name 
in re
.findall(r
'\?ssid=([0-9]+)" title="([^"]+)', webpage
): 
 212             sub_page 
= self
._download
_webpage
( 
 213                 'http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id
, 
 214                 video_id
, note
='Downloading subtitles for ' + sub_name
) 
 215             id = self
._search
_regex
(r
'id=\'([0-9]+)', sub_page, 'subtitle_id
', fatal=False) 
 216             iv = self._search_regex(r'<iv
>([^
<]+)', sub_page, 'subtitle_iv
', fatal=False) 
 217             data = self._search_regex(r'<data
>([^
<]+)', sub_page, 'subtitle_data
', fatal=False) 
 218             if not id or not iv or not data: 
 220             subtitle = self._decrypt_subtitles(data, iv, id).decode('utf
-8') 
 221             lang_code = self._search_regex(r'lang_code
=["\']([^"\']+)', subtitle, 'subtitle_lang_code
', fatal=False) 
 224             subtitles[lang_code] = self._extract_subtitles(subtitle) 
 227     def _real_extract(self, url): 
 228         mobj = re.match(self._VALID_URL, url) 
 229         video_id = mobj.group('video_id
') 
 231         if mobj.group('prefix
') == 'm
': 
 232             mobile_webpage = self._download_webpage(url, video_id, 'Downloading mobile webpage
') 
 233             webpage_url = self._search_regex(r'<link rel
="canonical" href
="([^"]+)" />', mobile_webpage, 'webpage_url') 
 235             webpage_url = 'http://www.' + mobj.group('url') 
 237         webpage = self._download_webpage(webpage_url, video_id, 'Downloading webpage') 
 238         note_m = self._html_search_regex(r'<div class="showmedia
-trailer
-notice
">(.+?)</div>', webpage, 'trailer-notice', default='') 
 240             raise ExtractorError(note_m) 
 242         mobj = re.search(r'Page\.messaging_box_controller\.addItems\(\[(?P<msg>{.+?})\]\)', webpage) 
 244             msg = json.loads(mobj.group('msg')) 
 245             if msg.get('type') == 'error': 
 246                 raise ExtractorError('crunchyroll returned error: %s' % msg['message_body'], expected=True) 
 248         video_title = self._html_search_regex(r'<h1[^>]*>(.+?)</h1>', webpage, 'video_title', flags=re.DOTALL) 
 249         video_title = re.sub(r' {2,}', ' ', video_title) 
 250         video_description = self._html_search_regex(r'"description
":"([^
"]+)', webpage, 'video_description', default='') 
 251         if not video_description: 
 252             video_description = None 
 253         video_upload_date = self._html_search_regex(r'<div>Availability for free users:(.+?)</div>', webpage, 'video_upload_date', fatal=False, flags=re.DOTALL) 
 254         if video_upload_date: 
 255             video_upload_date = unified_strdate(video_upload_date) 
 256         video_uploader = self._html_search_regex(r'<div>\s*Publisher:(.+?)</div>', webpage, 'video_uploader', fatal=False, flags=re.DOTALL) 
 258         playerdata_url = compat_urllib_parse_unquote(self._html_search_regex(r'"config_url
":"([^
"]+)', webpage, 'playerdata_url')) 
 259         playerdata_req = compat_urllib_request.Request(playerdata_url) 
 260         playerdata_req.data = compat_urllib_parse.urlencode({'current_page': webpage_url}) 
 261         playerdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded') 
 262         playerdata = self._download_webpage(playerdata_req, video_id, note='Downloading media info') 
 264         stream_id = self._search_regex(r'<media_id>([^<]+)', playerdata, 'stream_id') 
 265         video_thumbnail = self._search_regex(r'<episode_image_url>([^<]+)', playerdata, 'thumbnail', fatal=False) 
 268         for fmt in re.findall(r'showmedia\.([0-9]{3,4})p', webpage): 
 269             stream_quality, stream_format = self._FORMAT_IDS[fmt] 
 270             video_format = fmt + 'p' 
 271             streamdata_req = compat_urllib_request.Request( 
 272                 'http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=%s&video_quality=%s' 
 273                 % (stream_id, stream_format, stream_quality), 
 274                 compat_urllib_parse.urlencode({'current_page': url}).encode('utf-8')) 
 275             streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded') 
 276             streamdata = self._download_xml( 
 277                 streamdata_req, video_id, 
 278                 note='Downloading media info for %s' % video_format) 
 279             stream_info = streamdata.find('./{default}preload/stream_info') 
 280             video_url = stream_info.find('./host').text 
 281             video_play_path = stream_info.find('./file').text 
 284                 'play_path': video_play_path, 
 286                 'format': video_format, 
 287                 'format_id': video_format, 
 290         subtitles = self.extract_subtitles(video_id, webpage) 
 294             'title': video_title, 
 295             'description': video_description, 
 296             'thumbnail': video_thumbnail, 
 297             'uploader': video_uploader, 
 298             'upload_date': video_upload_date, 
 299             'subtitles': subtitles, 
 304 class CrunchyrollShowPlaylistIE(InfoExtractor): 
 305     IE_NAME = "crunchyroll
:playlist
" 
 306     _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?$' 
 309         'url': 'http://www.crunchyroll.com/a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi', 
 311             'id': 'a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi', 
 312             'title': 'A Bridge to the Starry Skies - Hoshizora e Kakaru Hashi' 
 314         'playlist_count': 13, 
 317     def _real_extract(self, url): 
 318         show_id = self._match_id(url) 
 320         webpage = self._download_webpage(url, show_id) 
 321         title = self._html_search_regex( 
 322             r'(?s)<h1[^>]*>\s*<span itemprop="name
">(.*?)</span>', 
 324         episode_paths = re.findall( 
 325             r'(?s)<li id="showview_videos_media_
[0-9]+"[^>]+>.*?<a href="([^
"]+)"', 
 328             self.url_result('http
://www
.crunchyroll
.com
' + ep, 'Crunchyroll
') 
 329             for ep in episode_paths