2 from __future__ 
import unicode_literals
 
   8 from hashlib 
import sha1
 
   9 from math 
import pow, sqrt
, floor
 
  10 from .common 
import InfoExtractor
 
  11 from .vrv 
import VRVIE
 
  12 from ..compat 
import ( 
  15     compat_etree_fromstring
, 
  17     compat_urllib_parse_urlencode
, 
  18     compat_urllib_request
, 
  40 class CrunchyrollBaseIE(InfoExtractor
): 
  41     _LOGIN_URL 
= 'https://www.crunchyroll.com/login' 
  42     _LOGIN_FORM 
= 'login_form' 
  43     _NETRC_MACHINE 
= 'crunchyroll' 
  45     def _call_rpc_api(self
, method
, video_id
, note
=None, data
=None): 
  47         data
['req'] = 'RpcApi' + method
 
  48         data 
= compat_urllib_parse_urlencode(data
).encode('utf-8') 
  49         return self
._download
_xml
( 
  50             'https://www.crunchyroll.com/xml/', 
  51             video_id
, note
, fatal
=False, data
=data
, headers
={ 
  52                 'Content-Type': 'application/x-www-form-urlencoded', 
  56         username
, password 
= self
._get
_login
_info
() 
  60         login_page 
= self
._download
_webpage
( 
  61             self
._LOGIN
_URL
, None, 'Downloading login page') 
  63         def is_logged(webpage
): 
  64             return 'href="/logout"' in webpage
 
  67         if is_logged(login_page
): 
  70         login_form_str 
= self
._search
_regex
( 
  71             r
'(?P<form><form[^>]+?id=(["\'])%s\
2[^
>]*>)' % self._LOGIN_FORM, 
  72             login_page, 'login form
', group='form
') 
  74         post_url = extract_attributes(login_form_str).get('action
') 
  76             post_url = self._LOGIN_URL 
  77         elif not post_url.startswith('http
'): 
  78             post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url) 
  80         login_form = self._form_hidden_inputs(self._LOGIN_FORM, login_page) 
  83             'login_form
[name
]': username, 
  84             'login_form
[password
]': password, 
  87         response = self._download_webpage( 
  88             post_url, None, 'Logging 
in', 'Wrong login info
', 
  89             data=urlencode_postdata(login_form), 
  90             headers={'Content
-Type
': 'application
/x
-www
-form
-urlencoded
'}) 
  93         if is_logged(response): 
  96         error = self._html_search_regex( 
  97             '(?s
)<ul
[^
>]+class=["\']messages["\'][^
>]*>(.+?
)</ul
>', 
  98             response, 'error message
', default=None) 
 100             raise ExtractorError('Unable to login
: %s' % error, expected=True) 
 102         raise ExtractorError('Unable to log 
in') 
 104     def _real_initialize(self): 
 108     def _add_skip_wall(url): 
 109         parsed_url = compat_urlparse.urlparse(url) 
 110         qs = compat_urlparse.parse_qs(parsed_url.query) 
 111         # Always force skip_wall to bypass maturity wall, namely 18+ confirmation message: 
 112         # > This content may be inappropriate for some people. 
 113         # > Are you sure you want to continue? 
 114         # since it's 
not disabled by default 
in crunchyroll account
's settings. 
 115         # See https://github.com/ytdl-org/youtube-dl/issues/7202. 
 116         qs['skip_wall
'] = ['1'] 
 117         return compat_urlparse.urlunparse( 
 118             parsed_url._replace(query=compat_urllib_parse_urlencode(qs, True))) 
 121 class CrunchyrollIE(CrunchyrollBaseIE, VRVIE): 
 122     IE_NAME = 'crunchyroll
' 
 123     _VALID_URL = r'https?
://(?
:(?P
<prefix
>www|m
)\
.)?
(?P
<url
>crunchyroll\
.(?
:com|fr
)/(?
:media(?
:-|
/\?id=)|
(?
:[^
/]*/){1,2}[^
/?
&]*?
)(?P
<video_id
>[0-9]+))(?
:[/?
&]|$
)' 
 125         'url
': 'http
://www
.crunchyroll
.com
/wanna
-be
-the
-strongest
-in-the
-world
/episode
-1-an
-idol
-wrestler
-is-born
-645513', 
 129             'title
': 'Wanna be the Strongest 
in the World Episode 
1 – An Idol
-Wrestler 
is Born
!', 
 130             'description
': 'md5
:2d17137920c64f2f49981a7797d275ef
', 
 131             'thumbnail
': r're
:^https?
://.*\
.jpg$
', 
 132             'uploader
': 'Yomiuri Telecasting 
Corporation (YTV
)', 
 133             'upload_date
': '20131013', 
 134             'url
': 're
:(?
!.*&
)', 
 138             'skip_download
': True, 
 140         'skip
': 'Video gone
', 
 142         'url
': 'http
://www
.crunchyroll
.com
/media
-589804/culture
-japan
-1', 
 146             'title
': 'Culture Japan Episode 
1 – Rebuilding Japan after the 
3.11', 
 147             'description
': 'md5
:2fbc01f90b87e8e9137296f37b461c12
', 
 148             'thumbnail
': r're
:^https?
://.*\
.jpg$
', 
 149             'uploader
': 'Danny Choo Network
', 
 150             'upload_date
': '20120213', 
 154             'skip_download
': True, 
 156         'skip
': 'Video gone
', 
 158         'url
': 'http
://www
.crunchyroll
.com
/rezero
-starting
-life
-in-another
-world
-/episode
-5-the
-morning
-of
-our
-promise
-is-still
-distant
-702409', 
 163             'description
': compat_str, 
 164             'thumbnail
': r're
:^https?
://.*\
.jpg$
', 
 165             'uploader
': 'Re
:Zero Partners
', 
 166             'timestamp
': 1462098900, 
 167             'upload_date
': '20160501', 
 171             'skip_download
': True, 
 174         'url
': 'http
://www
.crunchyroll
.com
/konosuba
-gods
-blessing
-on
-this
-wonderful
-world
/episode
-1-give
-me
-deliverance
-from-this
-judicial
-injustice
-727589', 
 179             'description
': compat_str, 
 180             'thumbnail
': r're
:^https?
://.*\
.jpg$
', 
 181             'uploader
': 'Kadokawa Pictures Inc
.', 
 182             'timestamp
': 1484130900, 
 183             'upload_date
': '20170111', 
 184             'series
': compat_str, 
 185             'season
': "KONOSUBA -God's blessing on this wonderful world
! 2", 
 187             'episode': 'Give Me Deliverance From This Judicial Injustice!', 
 192             'skip_download': True, 
 195         'url': 'http://www.crunchyroll.fr/girl-friend-beta/episode-11-goodbye-la-mode-661697', 
 196         'only_matching': True, 
 198         # geo-restricted (US), 18+ maturity wall, non-premium available 
 199         'url': 'http://www.crunchyroll.com/cosplay-complex-ova/episode-1-the-birth-of-the-cosplay-club-565617', 
 200         'only_matching': True, 
 202         # A description with double quotes 
 203         'url': 'http://www.crunchyroll.com/11eyes/episode-1-piros-jszaka-red-night-535080', 
 208             'description': compat_str, 
 209             'uploader': 'Marvelous AQL Inc.', 
 210             'timestamp': 1255512600, 
 211             'upload_date': '20091014', 
 214             # Just test metadata extraction 
 215             'skip_download': True, 
 218         # make sure we can extract an uploader name that's not a link 
 219         'url': 'http://www.crunchyroll.com/hakuoki-reimeiroku/episode-1-dawn-of-the-divine-warriors-606899', 
 223             'title': 'Hakuoki Reimeiroku Episode 1 – Dawn of the Divine Warriors', 
 224             'description': 'Ryunosuke was left to die, but Serizawa-san asked him a simple question "Do you want to live?
"', 
 225             'uploader': 'Geneon Entertainment', 
 226             'upload_date': '20120717', 
 229             # just test metadata extraction 
 230             'skip_download': True, 
 232         'skip': 'Video gone', 
 234         # A video with a vastly different season name compared to the series name 
 235         'url': 'http://www.crunchyroll.com/nyarko-san-another-crawling-chaos/episode-1-test-590532', 
 240             'description': compat_str, 
 241             'uploader': 'TV TOKYO', 
 242             'timestamp': 1330956000, 
 243             'upload_date': '20120305', 
 244             'series': 'Nyarko-san: Another Crawling Chaos', 
 245             'season': 'Haiyoru! Nyaruani (ONA)', 
 248             # Just test metadata extraction 
 249             'skip_download': True, 
 252         'url': 'http://www.crunchyroll.com/media-723735', 
 253         'only_matching': True, 
 255         'url': 'https://www.crunchyroll.com/en-gb/mob-psycho-100/episode-2-urban-legends-encountering-rumors-780921', 
 256         'only_matching': True, 
 260         '360': ('60', '106'), 
 261         '480': ('61', '106'), 
 262         '720': ('62', '106'), 
 263         '1080': ('80', '108'), 
 266     def _download_webpage(self, url_or_request, *args, **kwargs): 
 267         request = (url_or_request if isinstance(url_or_request, compat_urllib_request.Request) 
 268                    else sanitized_Request(url_or_request)) 
 269         # Accept-Language must be set explicitly to accept any language to avoid issues 
 270         # similar to https://github.com/ytdl-org/youtube-dl/issues/6797. 
 271         # Along with IP address Crunchyroll uses Accept-Language to guess whether georestriction 
 272         # should be imposed or not (from what I can see it just takes the first language 
 273         # ignoring the priority and requires it to correspond the IP). By the way this causes 
 274         # Crunchyroll to not work in georestriction cases in some browsers that don't place 
 275         # the locale lang first in header. However allowing any language seems to workaround the issue. 
 276         request.add_header('Accept-Language', '*') 
 277         return super(CrunchyrollBaseIE, self)._download_webpage(request, *args, **kwargs) 
 279     def _decrypt_subtitles(self, data, iv, id): 
 280         data = bytes_to_intlist(compat_b64decode(data)) 
 281         iv = bytes_to_intlist(compat_b64decode(iv)) 
 284         def obfuscate_key_aux(count, modulo, start): 
 286             for _ in range(count): 
 287                 output.append(output[-1] + output[-2]) 
 288             # cut off start values 
 290             output = list(map(lambda x: x % modulo + 33, output)) 
 293         def obfuscate_key(key): 
 294             num1 = int(floor(pow(2, 25) * sqrt(6.9))) 
 295             num2 = (num1 ^ key) << 5 
 297             num4 = num3 ^ (num3 >> 3) ^ num2 
 298             prefix = intlist_to_bytes(obfuscate_key_aux(20, 97, (1, 2))) 
 299             shaHash = bytes_to_intlist(sha1(prefix + str(num4).encode('ascii')).digest()) 
 300             # Extend 160 Bit hash to 256 Bit 
 301             return shaHash + [0] * 12 
 303         key = obfuscate_key(id) 
 305         decrypted_data = intlist_to_bytes(aes_cbc_decrypt(data, key, iv)) 
 306         return zlib.decompress(decrypted_data) 
 308     def _convert_subtitles_to_srt(self, sub_root): 
 311         for i, event in enumerate(sub_root.findall('./events/event'), 1): 
 312             start = event.attrib['start'].replace('.', ',') 
 313             end = event.attrib['end'].replace('.', ',') 
 314             text = event.attrib['text'].replace('\\N', '\n') 
 315             output += '%d\n%s --> %s\n%s\n\n' % (i, start, end, text) 
 318     def _convert_subtitles_to_ass(self, sub_root): 
 321         def ass_bool(strvalue): 
 327         output = '[Script Info]\n' 
 328         output += 'Title: %s\n' % sub_root.attrib['title'] 
 329         output += 'ScriptType: v4.00+\n' 
 330         output += 'WrapStyle: %s\n' % sub_root.attrib['wrap_style'] 
 331         output += 'PlayResX: %s\n' % sub_root.attrib['play_res_x'] 
 332         output += 'PlayResY: %s\n' % sub_root.attrib['play_res_y'] 
 335 Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding 
 337         for style in sub_root.findall('./styles/style'): 
 338             output += 'Style: ' + style.attrib['name'] 
 339             output += ',' + style.attrib['font_name'] 
 340             output += ',' + style.attrib['font_size'] 
 341             output += ',' + style.attrib['primary_colour'] 
 342             output += ',' + style.attrib['secondary_colour'] 
 343             output += ',' + style.attrib['outline_colour'] 
 344             output += ',' + style.attrib['back_colour'] 
 345             output += ',' + ass_bool(style.attrib['bold']) 
 346             output += ',' + ass_bool(style.attrib['italic']) 
 347             output += ',' + ass_bool(style.attrib['underline']) 
 348             output += ',' + ass_bool(style.attrib['strikeout']) 
 349             output += ',' + style.attrib['scale_x'] 
 350             output += ',' + style.attrib['scale_y'] 
 351             output += ',' + style.attrib['spacing'] 
 352             output += ',' + style.attrib['angle'] 
 353             output += ',' + style.attrib['border_style'] 
 354             output += ',' + style.attrib['outline'] 
 355             output += ',' + style.attrib['shadow'] 
 356             output += ',' + style.attrib['alignment'] 
 357             output += ',' + style.attrib['margin_l'] 
 358             output += ',' + style.attrib['margin_r'] 
 359             output += ',' + style.attrib['margin_v'] 
 360             output += ',' + style.attrib['encoding'] 
 365 Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text 
 367         for event in sub_root.findall('./events/event'): 
 368             output += 'Dialogue: 0' 
 369             output += ',' + event.attrib['start'] 
 370             output += ',' + event.attrib['end'] 
 371             output += ',' + event.attrib['style'] 
 372             output += ',' + event.attrib['name'] 
 373             output += ',' + event.attrib['margin_l'] 
 374             output += ',' + event.attrib['margin_r'] 
 375             output += ',' + event.attrib['margin_v'] 
 376             output += ',' + event.attrib['effect'] 
 377             output += ',' + event.attrib['text'] 
 382     def _extract_subtitles(self, subtitle): 
 383         sub_root = compat_etree_fromstring(subtitle) 
 386             'data': self._convert_subtitles_to_srt(sub_root), 
 389             'data': self._convert_subtitles_to_ass(sub_root), 
 392     def _get_subtitles(self, video_id, webpage): 
 394         for sub_id, sub_name in re.findall(r'\bssid=([0-9]+)"[^
>]+?
\btitle
="([^"]+)', webpage): 
 395             sub_doc = self._call_rpc_api( 
 396                 'Subtitle_GetXml
', video_id, 
 397                 'Downloading subtitles 
for ' + sub_name, data={ 
 398                     'subtitle_script_id
': sub_id, 
 400             if not isinstance(sub_doc, compat_etree_Element): 
 402             sid = sub_doc.get('id') 
 403             iv = xpath_text(sub_doc, 'iv
', 'subtitle iv
') 
 404             data = xpath_text(sub_doc, 'data
', 'subtitle data
') 
 405             if not sid or not iv or not data: 
 407             subtitle = self._decrypt_subtitles(data, iv, sid).decode('utf
-8') 
 408             lang_code = self._search_regex(r'lang_code
=["\']([^"\']+)', subtitle, 'subtitle_lang_code
', fatal=False) 
 411             subtitles[lang_code] = self._extract_subtitles(subtitle) 
 414     def _real_extract(self, url): 
 415         mobj = re.match(self._VALID_URL, url) 
 416         video_id = mobj.group('video_id
') 
 418         if mobj.group('prefix
') == 'm
': 
 419             mobile_webpage = self._download_webpage(url, video_id, 'Downloading mobile webpage
') 
 420             webpage_url = self._search_regex(r'<link rel
="canonical" href
="([^"]+)" />', mobile_webpage, 'webpage_url') 
 422             webpage_url = 'http://www.' + mobj.group('url') 
 424         webpage = self._download_webpage( 
 425             self._add_skip_wall(webpage_url), video_id, 
 426             headers=self.geo_verification_headers()) 
 427         note_m = self._html_search_regex( 
 428             r'<div class="showmedia
-trailer
-notice
">(.+?)</div>', 
 429             webpage, 'trailer-notice', default='') 
 431             raise ExtractorError(note_m) 
 433         mobj = re.search(r'Page\.messaging_box_controller\.addItems\(\[(?P<msg>{.+?})\]\)', webpage) 
 435             msg = json.loads(mobj.group('msg')) 
 436             if msg.get('type') == 'error': 
 437                 raise ExtractorError('crunchyroll returned error: %s' % msg['message_body'], expected=True) 
 439         if 'To view this, please log in to verify you are 18 or older.' in webpage: 
 440             self.raise_login_required() 
 442         media = self._parse_json(self._search_regex( 
 443             r'vilos\.config\.media\s*=\s*({.+?});', 
 444             webpage, 'vilos media', default='{}'), video_id) 
 445         media_metadata = media.get('metadata') or {} 
 447         language = self._search_regex( 
 448             r'(?:vilos\.config\.player\.language|LOCALE)\s*=\s*(["\'])(?P
<lang
>(?
:(?
!\
1).)+)\
1', 
 449             webpage, 'language
', default=None, group='lang
') 
 451         video_title = self._html_search_regex( 
 452             (r'(?s
)<h1
[^
>]*>((?
:(?
!<h1
).)*?
<(?
:span
[^
>]+itemprop
=["\']title["\']|meta
[^
>]+itemprop
=["\']position["\'])[^
>]*>(?
:(?
!<h1
).)+?
)</h1
>', 
 453              r'<title
>(.+?
),\s
+-\s
+.+? Crunchyroll
'), 
 454             webpage, 'video_title
', default=None) 
 456             video_title = re.sub(r'^Watch\s
+', '', self._og_search_description(webpage)) 
 457         video_title = re.sub(r' {2,}', ' ', video_title) 
 458         video_description = (self._parse_json(self._html_search_regex( 
 459             r'<script
[^
>]*>\s
*.+?\
[media_id
=%s\
].+?
({.+?
"description"\s
*:.+?
})\
);' % video_id, 
 460             webpage, 'description
', default='{}'), video_id) or media_metadata).get('description
') 
 461         if video_description: 
 462             video_description = lowercase_escape(video_description.replace(r'\r\n', '\n')) 
 463         video_uploader = self._html_search_regex( 
 464             # try looking for both an uploader that's a link 
and one that
's not 
 465             [r'<a
[^
>]+href
="/publisher/[^"]+"[^>]*>([^<]+)</a>', r'<div>\s*Publisher:\s*<span>\s*(.+?)\s*</span>\s*</div>'], 
 466             webpage, 'video_uploader', default=False) 
 469         for stream in media.get('streams', []): 
 470             audio_lang = stream.get('audio_lang') 
 471             hardsub_lang = stream.get('hardsub_lang') 
 472             vrv_formats = self._extract_vrv_formats( 
 473                 stream.get('url'), video_id, stream.get('format'), 
 474                 audio_lang, hardsub_lang) 
 475             for f in vrv_formats: 
 478                 language_preference = 0 
 479                 if audio_lang == language: 
 480                     language_preference += 1 
 481                 if hardsub_lang == language: 
 482                     language_preference += 1 
 483                 if language_preference: 
 484                     f['language_preference'] = language_preference 
 485             formats.extend(vrv_formats) 
 488             for a, fmt in re.findall(r'(<a[^>]+token=["\']showmedia\
.([0-9]{3,4})p
["\'][^>]+>)', webpage): 
 489                 attrs = extract_attributes(a) 
 490                 href = attrs.get('href') 
 491                 if href and '/freetrial' in href: 
 493                 available_fmts.append(fmt) 
 494             if not available_fmts: 
 495                 for p in (r'token=["\']showmedia\
.([0-9]{3,4})p
"', r'showmedia\.([0-9]{3,4})p'): 
 496                     available_fmts = re.findall(p, webpage) 
 499             if not available_fmts: 
 500                 available_fmts = self._FORMAT_IDS.keys() 
 501             video_encode_ids = [] 
 503             for fmt in available_fmts: 
 504                 stream_quality, stream_format = self._FORMAT_IDS[fmt] 
 505                 video_format = fmt + 'p' 
 507                 streamdata = self._call_rpc_api( 
 508                     'VideoPlayer_GetStandardConfig', video_id, 
 509                     'Downloading media info for %s' % video_format, data={ 
 510                         'media_id': video_id, 
 511                         'video_format': stream_format, 
 512                         'video_quality': stream_quality, 
 515                 if isinstance(streamdata, compat_etree_Element): 
 516                     stream_info = streamdata.find('./{default}preload/stream_info') 
 517                     if stream_info is not None: 
 518                         stream_infos.append(stream_info) 
 519                 stream_info = self._call_rpc_api( 
 520                     'VideoEncode_GetStreamInfo', video_id, 
 521                     'Downloading stream info for %s' % video_format, data={ 
 522                         'media_id': video_id, 
 523                         'video_format': stream_format, 
 524                         'video_encode_quality': stream_quality, 
 526                 if isinstance(stream_info, compat_etree_Element): 
 527                     stream_infos.append(stream_info) 
 528                 for stream_info in stream_infos: 
 529                     video_encode_id = xpath_text(stream_info, './video_encode_id') 
 530                     if video_encode_id in video_encode_ids: 
 532                     video_encode_ids.append(video_encode_id) 
 534                     video_file = xpath_text(stream_info, './file') 
 537                     if video_file.startswith('http'): 
 538                         formats.extend(self._extract_m3u8_formats( 
 539                             video_file, video_id, 'mp4', entry_protocol='m3u8_native', 
 540                             m3u8_id='hls', fatal=False)) 
 543                     video_url = xpath_text(stream_info, './host') 
 546                     metadata = stream_info.find('./metadata') 
 548                         'format': video_format, 
 549                         'height': int_or_none(xpath_text(metadata, './height')), 
 550                         'width': int_or_none(xpath_text(metadata, './width')), 
 553                     if '.fplive.net/' in video_url: 
 554                         video_url = re.sub(r'^rtmpe?://', 'http://', video_url.strip()) 
 555                         parsed_video_url = compat_urlparse.urlparse(video_url) 
 556                         direct_video_url = compat_urlparse.urlunparse(parsed_video_url._replace( 
 557                             netloc='v.lvlt.crcdn.net', 
 558                             path='%s/%s' % (remove_end(parsed_video_url.path, '/'), video_file.split(':')[-1]))) 
 559                         if self._is_valid_url(direct_video_url, video_id, video_format): 
 561                                 'format_id': 'http-' + video_format, 
 562                                 'url': direct_video_url, 
 564                             formats.append(format_info) 
 568                         'format_id': 'rtmp-' + video_format, 
 570                         'play_path': video_file, 
 573                     formats.append(format_info) 
 574         self._sort_formats(formats, ('preference', 'language_preference', 'height', 'width', 'tbr', 'fps')) 
 576         metadata = self._call_rpc_api( 
 577             'VideoPlayer_GetMediaMetadata', video_id, 
 578             note='Downloading media info', data={ 
 579                 'media_id': video_id, 
 583         for subtitle in media.get('subtitles', []): 
 584             subtitle_url = subtitle.get('url') 
 587             subtitles.setdefault(subtitle.get('language', 'enUS'), []).append({ 
 589                 'ext': subtitle.get('format', 'ass'), 
 592             subtitles = self.extract_subtitles(video_id, webpage) 
 594         # webpage provide more accurate data than series_title from XML 
 595         series = self._html_search_regex( 
 596             r'(?s)<h\d[^>]+\bid=["\']showmedia_about_episode_num
[^
>]+>(.+?
)</h\d
', 
 597             webpage, 'series
', fatal=False) 
 599         season = episode = episode_number = duration = thumbnail = None 
 601         if isinstance(metadata, compat_etree_Element): 
 602             season = xpath_text(metadata, 'series_title
') 
 603             episode = xpath_text(metadata, 'episode_title
') 
 604             episode_number = int_or_none(xpath_text(metadata, 'episode_number
')) 
 605             duration = float_or_none(media_metadata.get('duration
'), 1000) 
 606             thumbnail = xpath_text(metadata, 'episode_image_url
') 
 609             episode = media_metadata.get('title
') 
 610         if not episode_number: 
 611             episode_number = int_or_none(media_metadata.get('episode_number
')) 
 613             thumbnail = media_metadata.get('thumbnail
', {}).get('url
') 
 615         season_number = int_or_none(self._search_regex( 
 616             r'(?s
)<h\d
[^
>]+id=["\']showmedia_about_episode_num[^>]+>.+?</h\d>\s*<h4>\s*Season (\d+)', 
 617             webpage, 'season number', default=None)) 
 619         info = self._search_json_ld(webpage, video_id, default={}) 
 623             'title': video_title, 
 624             'description': video_description, 
 625             'duration': duration, 
 626             'thumbnail': thumbnail, 
 627             'uploader': video_uploader, 
 630             'season_number': season_number, 
 632             'episode_number': episode_number, 
 633             'subtitles': subtitles, 
 638 class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE): 
 639     IE_NAME = 'crunchyroll:playlist' 
 640     _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login|media-\d+))(?P<id>[\w\-]+))/?(?:\?|$)' 
 643         'url': 'http://www.crunchyroll.com/a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi', 
 645             'id': 'a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi', 
 646             'title': 'A Bridge to the Starry Skies - Hoshizora e Kakaru Hashi' 
 648         'playlist_count': 13, 
 650         # geo-restricted (US), 18+ maturity wall, non-premium available 
 651         'url': 'http://www.crunchyroll.com/cosplay-complex-ova', 
 653             'id': 'cosplay-complex-ova', 
 654             'title': 'Cosplay Complex OVA' 
 657         'skip': 'Georestricted', 
 659         # geo-restricted (US), 18+ maturity wall, non-premium will be available since 2015.11.14 
 660         'url': 'http://www.crunchyroll.com/ladies-versus-butlers?skip_wall=1', 
 661         'only_matching': True, 
 664     def _real_extract(self, url): 
 665         show_id = self._match_id(url) 
 667         webpage = self._download_webpage( 
 668             self._add_skip_wall(url), show_id, 
 669             headers=self.geo_verification_headers()) 
 670         title = self._html_search_meta('name', webpage, default=None) 
 672         episode_paths = re.findall( 
 673             r'(?s)<li id="showview_videos_media_(\d
+)"[^>]+>.*?<a href="([^
"]+)"', 
 676             self.url_result('http
://www
.crunchyroll
.com
' + ep, 'Crunchyroll
', ep_id) 
 677             for ep_id, ep in episode_paths