- for m_video, m_name in zip(m_videos,m_names):
- talk_url='http://www.ted.com%s' % m_name.group('talk_url')
- playlist_entries.append(self.url_result(talk_url, 'TED'))
- return self.playlist_result(playlist_entries, playlist_id = playlist_id, playlist_title = playlist_title)
-
- def _talk_info(self, url, video_id=0):
- """Return the video for the talk in the url"""
- m = re.match(self._VALID_URL, url,re.VERBOSE)
- video_name = m.group('name')
- webpage = self._download_webpage(url, video_id, 'Downloading \"%s\" page' % video_name)
- self.report_extraction(video_name)
- # If the url includes the language we get the title translated
- title = self._html_search_regex(r'<span id="altHeadline" >(?P<title>.*)</span>',
- webpage, 'title')
- json_data = self._search_regex(r'<script.*?>var talkDetails = ({.*?})</script>',
- webpage, 'json data')
- info = json.loads(json_data)
- desc = self._html_search_regex(r'<div class="talk-intro">.*?<p.*?>(.*?)</p>',
- webpage, 'description', flags = re.DOTALL)
-
- thumbnail = self._search_regex(r'</span>[\s.]*</div>[\s.]*<img src="(.*?)"',
- webpage, 'thumbnail')
- info = {
- 'id': info['id'],
- 'url': info['htmlStreams'][-1]['file'],
- 'ext': 'mp4',
- 'title': title,
- 'thumbnail': thumbnail,
- 'description': desc,
+ for entry in re.findall(r'(?s)<[^>]+data-ga-context=["\']playlist["\'][^>]*>', webpage):
+ attrs = extract_attributes(entry)
+ entry_url = compat_urlparse.urljoin(url, attrs['href'])
+ playlist_entries.append(self.url_result(entry_url, self.ie_key()))
+
+ final_url = self._og_search_url(webpage, fatal=False)
+ playlist_id = (
+ re.match(self._VALID_URL, final_url).group('playlist_id')
+ if final_url else None)
+
+ return self.playlist_result(
+ playlist_entries, playlist_id=playlist_id,
+ playlist_title=self._og_search_title(webpage, fatal=False),
+ playlist_description=self._og_search_description(webpage))
+
+ def _talk_info(self, url, video_name):
+ webpage = self._download_webpage(url, video_name)
+
+ info = self._extract_info(webpage)
+
+ data = try_get(info, lambda x: x['__INITIAL_DATA__'], dict) or info
+ talk_info = data['talks'][0]
+
+ title = talk_info['title'].strip()
+
+ native_downloads = try_get(
+ talk_info,
+ (lambda x: x['downloads']['nativeDownloads'],
+ lambda x: x['nativeDownloads']),
+ dict) or {}
+
+ formats = [{
+ 'url': format_url,
+ 'format_id': format_id,
+ 'format': format_id,
+ } for (format_id, format_url) in native_downloads.items() if format_url is not None]
+ if formats:
+ for f in formats:
+ finfo = self._NATIVE_FORMATS.get(f['format_id'])
+ if finfo:
+ f.update(finfo)
+
+ player_talk = talk_info['player_talks'][0]
+
+ external = player_talk.get('external')
+ if isinstance(external, dict):
+ service = external.get('service')
+ if isinstance(service, compat_str):
+ ext_url = None
+ if service.lower() == 'youtube':
+ ext_url = external.get('code')
+
+ return self.url_result(ext_url or external['uri'])
+
+ resources_ = player_talk.get('resources') or talk_info.get('resources')
+
+ http_url = None
+ for format_id, resources in resources_.items():
+ if format_id == 'h264':
+ for resource in resources:
+ h264_url = resource.get('file')
+ if not h264_url:
+ continue
+ bitrate = int_or_none(resource.get('bitrate'))
+ formats.append({
+ 'url': h264_url,
+ 'format_id': '%s-%sk' % (format_id, bitrate),
+ 'tbr': bitrate,
+ })
+ if re.search(r'\d+k', h264_url):
+ http_url = h264_url
+ elif format_id == 'rtmp':
+ streamer = talk_info.get('streamer')
+ if not streamer:
+ continue
+ for resource in resources:
+ formats.append({
+ 'format_id': '%s-%s' % (format_id, resource.get('name')),
+ 'url': streamer,
+ 'play_path': resource['file'],
+ 'ext': 'flv',
+ 'width': int_or_none(resource.get('width')),
+ 'height': int_or_none(resource.get('height')),
+ 'tbr': int_or_none(resource.get('bitrate')),
+ })
+ elif format_id == 'hls':
+ if not isinstance(resources, dict):
+ continue
+ stream_url = url_or_none(resources.get('stream'))
+ if not stream_url:
+ continue
+ formats.extend(self._extract_m3u8_formats(
+ stream_url, video_name, 'mp4', m3u8_id=format_id,
+ fatal=False))
+
+ m3u8_formats = list(filter(
+ lambda f: f.get('protocol') == 'm3u8' and f.get('vcodec') != 'none',
+ formats))
+ if http_url:
+ for m3u8_format in m3u8_formats:
+ bitrate = self._search_regex(r'(\d+k)', m3u8_format['url'], 'bitrate', default=None)
+ if not bitrate:
+ continue
+ bitrate_url = re.sub(r'\d+k', bitrate, http_url)
+ if not self._is_valid_url(
+ bitrate_url, video_name, '%s bitrate' % bitrate):
+ continue
+ f = m3u8_format.copy()
+ f.update({
+ 'url': bitrate_url,
+ 'format_id': m3u8_format['format_id'].replace('hls', 'http'),
+ 'protocol': 'http',
+ })
+ if f.get('acodec') == 'none':
+ del f['acodec']
+ formats.append(f)
+
+ audio_download = talk_info.get('audioDownload')
+ if audio_download:
+ formats.append({
+ 'url': audio_download,
+ 'format_id': 'audio',
+ 'vcodec': 'none',
+ })
+
+ self._sort_formats(formats)
+
+ video_id = compat_str(talk_info['id'])
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'uploader': player_talk.get('speaker') or talk_info.get('speaker'),
+ 'thumbnail': player_talk.get('thumb') or talk_info.get('thumb'),
+ 'description': self._og_search_description(webpage),
+ 'subtitles': self._get_subtitles(video_id, talk_info),
+ 'formats': formats,
+ 'duration': float_or_none(talk_info.get('duration')),
+ 'view_count': int_or_none(data.get('viewed_count')),
+ 'comment_count': int_or_none(
+ try_get(data, lambda x: x['comments']['count'])),
+ 'tags': try_get(talk_info, lambda x: x['tags'], list),
+ }
+
+ def _get_subtitles(self, video_id, talk_info):
+ sub_lang_list = {}
+ for language in try_get(
+ talk_info,
+ (lambda x: x['downloads']['languages'],
+ lambda x: x['languages']), list):
+ lang_code = language.get('languageCode') or language.get('ianaCode')
+ if not lang_code:
+ continue
+ sub_lang_list[lang_code] = [
+ {
+ 'url': 'http://www.ted.com/talks/subtitles/id/%s/lang/%s/format/%s' % (video_id, lang_code, ext),
+ 'ext': ext,