import xml.etree.ElementTree
from .common import InfoExtractor
-from ..utils import (
- compat_urllib_parse,
- find_xpath_attr,
- fix_xml_ampersands,
- compat_urlparse,
+from ..compat import (
+ compat_parse_qs,
compat_str,
+ compat_urllib_parse,
+ compat_urllib_parse_urlparse,
compat_urllib_request,
- compat_parse_qs,
-
+ compat_urlparse,
+)
+from ..utils import (
determine_ext,
ExtractorError,
- unsmuggle_url,
+ find_xpath_attr,
+ fix_xml_ampersands,
unescapeHTML,
+ unsmuggle_url,
)
class BrightcoveIE(InfoExtractor):
- _VALID_URL = r'https?://.*brightcove\.com/(services|viewer).*\?(?P<query>.*)'
+ _VALID_URL = r'(?:https?://.*brightcove\.com/(services|viewer).*?\?|brightcove:)(?P<query>.*)'
_FEDERATED_URL_TEMPLATE = 'http://c.brightcove.com/services/viewer/htmlFederated?%s'
_TESTS = [
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=3550052898001&playerKey=AQ%7E%7E%2CAAABmA9XpXk%7E%2C-Kp7jNgisre1fG5OdqpAFUTcs0lP_ZoL',
'info_dict': {
'title': 'Sealife',
+ 'id': '3550319591001',
},
'playlist_mincount': 7,
},
"""
# Fix up some stupid HTML, see https://github.com/rg3/youtube-dl/issues/1553
- object_str = re.sub(r'(<param name="[^"]+" value="[^"]+")>',
+ object_str = re.sub(r'(<param(?:\s+[a-zA-Z0-9_]+="[^"]*")*)>',
lambda m: m.group(1) + '/>', object_str)
# Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608
object_str = object_str.replace('<--', '<!--')
+ # remove namespace to simplify extraction
+ object_str = re.sub(r'(<object[^>]*)(xmlns=".*?")', r'\1', object_str)
object_str = fix_xml_ampersands(object_str)
- object_doc = xml.etree.ElementTree.fromstring(object_str.encode('utf-8'))
+ try:
+ object_doc = xml.etree.ElementTree.fromstring(object_str.encode('utf-8'))
+ except xml.etree.ElementTree.ParseError:
+ return
fv_el = find_xpath_attr(object_doc, './param', 'name', 'flashVars')
if fv_el is not None:
(?:
[^>]+?class=[\'"][^>]*?BrightcoveExperience.*?[\'"] |
[^>]*?>\s*<param\s+name="movie"\s+value="https?://[^/]*brightcove\.com/
- ).+?</object>''',
+ ).+?>\s*</object>''',
webpage)
- return [cls._build_brighcove_url(m) for m in matches]
+ return list(filter(None, [cls._build_brighcove_url(m) for m in matches]))
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
webpage = self._download_webpage(req, video_id)
error_msg = self._html_search_regex(
- r"<h1>We're sorry.</h1>\s*<p>(.*?)</p>", webpage,
+ r"<h1>We're sorry.</h1>([\s\n]*<p>.*?</p>)+", webpage,
'error message', default=None)
if error_msg is not None:
raise ExtractorError(
playlist_info = json_data['videoList']
videos = [self._extract_video_info(video_info) for video_info in playlist_info['mediaCollectionDTO']['videoDTOs']]
- return self.playlist_result(videos, playlist_id=playlist_info['id'],
+ return self.playlist_result(videos, playlist_id='%s' % playlist_info['id'],
playlist_title=playlist_info['mediaCollectionDTO']['displayName'])
def _extract_video_info(self, video_info):
formats = []
for rend in renditions:
url = rend['defaultURL']
+ if not url:
+ continue
+ ext = None
if rend['remote']:
- # This type of renditions are served through akamaihd.net,
- # but they don't use f4m manifests
- url = url.replace('control/', '') + '?&v=3.3.0&fp=13&r=FEEFJ&g=RTSJIMBMPFPB'
- ext = 'flv'
- else:
+ url_comp = compat_urllib_parse_urlparse(url)
+ if url_comp.path.endswith('.m3u8'):
+ formats.extend(
+ self._extract_m3u8_formats(url, info['id'], 'mp4'))
+ continue
+ elif 'akamaihd.net' in url_comp.netloc:
+ # This type of renditions are served through
+ # akamaihd.net, but they don't use f4m manifests
+ url = url.replace('control/', '') + '?&v=3.3.0&fp=13&r=FEEFJ&g=RTSJIMBMPFPB'
+ ext = 'flv'
+ if ext is None:
ext = determine_ext(url)
size = rend.get('size')
formats.append({