- m = re.match(self._VALID_URL, url)
- video_id = m.group('videoID')
-
- webpage = self._download_webpage(url, video_id)
-
- json_data = self._search_regex(r'window\.gon.*?gon\.show=(.+?);$',
- webpage, u'json data', flags=re.MULTILINE)
-
- try:
- data = json.loads(json_data)
- except ValueError as e:
- raise ExtractorError(u'Invalid JSON: ' + str(e))
-
- video_url = data['akamai_url'] + '&cbr=256'
- url_parts = compat_urllib_parse_urlparse(video_url)
- video_ext = url_parts.path.rpartition('.')[2]
- info = {
- 'id': video_id,
- 'url': video_url,
- 'ext': video_ext,
- 'title': data['title'],
- 'description': data.get('teaser_text'),
- 'location': data.get('country_of_origin'),
- 'uploader': data.get('host', {}).get('name'),
- 'uploader_id': data.get('host', {}).get('slug'),
- 'thumbnail': data.get('image', {}).get('large_url_2x'),
- 'duration': data.get('duration'),
+ mobj = re.match(self._VALID_URL, url)
+ show_id = mobj.group('show_id')
+ episode_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, episode_id)
+
+ episode = self._parse_json(
+ self._search_regex(
+ r'__INITIAL_STATE__\s*=\s*({.+?})\s*</script>',
+ webpage, 'json data'),
+ episode_id)['episodes'][show_id][episode_id]
+
+ title = episode['title']
+
+ show_title = episode.get('showTitle')
+ if show_title:
+ title = '%s - %s' % (show_title, title)
+
+ formats = [{
+ 'url': update_url_query(episode['audioURL'], query={'cbr': abr}),
+ 'format_id': compat_str(abr),
+ 'abr': abr,
+ 'vcodec': 'none',
+ } for abr in (96, 128, 256)]
+
+ description = clean_html(episode.get('longTeaser'))
+ thumbnail = self._proto_relative_url(episode.get('imageURL', {}).get('landscape'))
+ duration = int_or_none(episode.get('duration'))
+ timestamp = unified_timestamp(episode.get('publishedAt'))
+
+ return {
+ 'id': episode_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'duration': duration,
+ 'timestamp': timestamp,
+ 'formats': formats,