from .common import InfoExtractor
from ..compat import (
- compat_etree_fromstring,
compat_str,
compat_urlparse,
+ compat_HTTPError,
)
from ..utils import (
ExtractorError,
class VevoBaseIE(InfoExtractor):
- def _extract_json(self, webpage, video_id, item):
+ def _extract_json(self, webpage, video_id):
return self._parse_json(
self._search_regex(
r'window\.__INITIAL_STORE__\s*=\s*({.+?});\s*</script>',
webpage, 'initial store'),
- video_id)['default'][item]
+ video_id)
class VevoIE(VevoBaseIE):
# no genres available
'url': 'http://www.vevo.com/watch/INS171400764',
'only_matching': True,
+ }, {
+ # Another case available only via the webpage; using streams/streamsV3 formats
+ # Geo-restricted to Netherlands/Germany
+ 'url': 'http://www.vevo.com/watch/boostee/pop-corn-clip-officiel/FR1A91600909',
+ 'only_matching': True,
}]
- _SMIL_BASE_URL = 'http://smil.lvl3.vevo.com'
- _SOURCE_TYPES = {
- 0: 'youtube',
- 1: 'brightcove',
- 2: 'http',
- 3: 'hls_ios',
- 4: 'hls',
- 5: 'smil', # http
- 7: 'f4m_cc',
- 8: 'f4m_ak',
- 9: 'f4m_l3',
- 10: 'ism',
- 13: 'smil', # rtmp
- 18: 'dash',
- }
_VERSIONS = {
0: 'youtube', # only in AuthenticateVideo videoVersions
1: 'level3',
4: 'amazon',
}
- def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
- formats = []
- els = smil.findall('.//{http://www.w3.org/2001/SMIL20/Language}video')
- for el in els:
- src = el.attrib['src']
- m = re.match(r'''(?xi)
- (?P<ext>[a-z0-9]+):
- (?P<path>
- [/a-z0-9]+ # The directory and main part of the URL
- _(?P<tbr>[0-9]+)k
- _(?P<width>[0-9]+)x(?P<height>[0-9]+)
- _(?P<vcodec>[a-z0-9]+)
- _(?P<vbr>[0-9]+)
- _(?P<acodec>[a-z0-9]+)
- _(?P<abr>[0-9]+)
- \.[a-z0-9]+ # File extension
- )''', src)
- if not m:
- continue
-
- format_url = self._SMIL_BASE_URL + m.group('path')
- formats.append({
- 'url': format_url,
- 'format_id': 'smil_' + m.group('tbr'),
- 'vcodec': m.group('vcodec'),
- 'acodec': m.group('acodec'),
- 'tbr': int(m.group('tbr')),
- 'vbr': int(m.group('vbr')),
- 'abr': int(m.group('abr')),
- 'ext': m.group('ext'),
- 'width': int(m.group('width')),
- 'height': int(m.group('height')),
- })
- return formats
-
def _initialize_api(self, video_id):
req = sanitized_Request(
'http://www.vevo.com/auth', data=b'')
note='Retrieving oauth token',
errnote='Unable to retrieve oauth token')
- if 'THIS PAGE IS CURRENTLY UNAVAILABLE IN YOUR REGION' in webpage:
+ if re.search(r'(?i)THIS PAGE IS CURRENTLY UNAVAILABLE IN YOUR REGION', webpage):
self.raise_geo_restricted(
'%s said: This page is currently unavailable in your region' % self.IE_NAME)
self._api_url_template = self.http_scheme() + '//apiv2.vevo.com/%s?token=' + auth_info['access_token']
def _call_api(self, path, *args, **kwargs):
- return self._download_json(self._api_url_template % path, *args, **kwargs)
+ try:
+ data = self._download_json(self._api_url_template % path, *args, **kwargs)
+ except ExtractorError as e:
+ if isinstance(e.cause, compat_HTTPError):
+ errors = self._parse_json(e.cause.read().decode(), None)['errors']
+ error_message = ', '.join([error['message'] for error in errors])
+ raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message), expected=True)
+ raise
+ return data
def _real_extract(self, url):
video_id = self._match_id(url)
- json_url = 'http://api.vevo.com/VideoService/AuthenticateVideo?isrc=%s' % video_id
- response = self._download_json(
- json_url, video_id, 'Downloading video info',
- 'Unable to download info', fatal=False) or {}
- video_info = response.get('video') or {}
+ self._initialize_api(video_id)
+
+ video_info = self._call_api(
+ 'video/%s' % video_id, video_id, 'Downloading api video info',
+ 'Failed to download video info')
+
+ video_versions = self._call_api(
+ 'video/%s/streams' % video_id, video_id,
+ 'Downloading video versions info',
+ 'Failed to download video versions info',
+ fatal=False)
+
+ # Some videos are only available via webpage (e.g.
+ # https://github.com/rg3/youtube-dl/issues/9366)
+ if not video_versions:
+ webpage = self._download_webpage(url, video_id)
+ json_data = self._extract_json(webpage, video_id)
+ if 'streams' in json_data.get('default', {}):
+ video_versions = json_data['default']['streams'][video_id][0]
+ else:
+ video_versions = [
+ value
+ for key, value in json_data['apollo']['data'].items()
+ if key.startswith('%s.streams' % video_id)]
+
+ uploader = None
artist = None
featured_artist = None
- uploader = None
- view_count = None
+ artists = video_info.get('artists')
+ for curr_artist in artists:
+ if curr_artist.get('role') == 'Featured':
+ featured_artist = curr_artist['name']
+ else:
+ artist = uploader = curr_artist['name']
+
formats = []
+ for video_version in video_versions:
+ version = self._VERSIONS.get(video_version.get('version'), 'generic')
+ version_url = video_version.get('url')
+ if not version_url:
+ continue
- if not video_info:
- try:
- self._initialize_api(video_id)
- except ExtractorError:
- ytid = response.get('errorInfo', {}).get('ytid')
- if ytid:
- self.report_warning(
- 'Video is geoblocked, trying with the YouTube video %s' % ytid)
- return self.url_result(ytid, 'Youtube', ytid)
-
- raise
-
- video_info = self._call_api(
- 'video/%s' % video_id, video_id, 'Downloading api video info',
- 'Failed to download video info')
-
- video_versions = self._call_api(
- 'video/%s/streams' % video_id, video_id,
- 'Downloading video versions info',
- 'Failed to download video versions info',
- fatal=False)
-
- # Some videos are only available via webpage (e.g.
- # https://github.com/rg3/youtube-dl/issues/9366)
- if not video_versions:
- webpage = self._download_webpage(url, video_id)
- video_versions = self._extract_json(webpage, video_id, 'streams')[video_id][0]
-
- timestamp = parse_iso8601(video_info.get('releaseDate'))
- artists = video_info.get('artists')
- for curr_artist in artists:
- if curr_artist.get('role') == 'Featured':
- featured_artist = curr_artist['name']
- else:
- artist = uploader = curr_artist['name']
- view_count = int_or_none(video_info.get('views', {}).get('total'))
-
- for video_version in video_versions:
- version = self._VERSIONS.get(video_version['version'])
- version_url = video_version.get('url')
- if not version_url:
+ if '.ism' in version_url:
+ continue
+ elif '.mpd' in version_url:
+ formats.extend(self._extract_mpd_formats(
+ version_url, video_id, mpd_id='dash-%s' % version,
+ note='Downloading %s MPD information' % version,
+ errnote='Failed to download %s MPD information' % version,
+ fatal=False))
+ elif '.m3u8' in version_url:
+ formats.extend(self._extract_m3u8_formats(
+ version_url, video_id, 'mp4', 'm3u8_native',
+ m3u8_id='hls-%s' % version,
+ note='Downloading %s m3u8 information' % version,
+ errnote='Failed to download %s m3u8 information' % version,
+ fatal=False))
+ else:
+ m = re.search(r'''(?xi)
+ _(?P<width>[0-9]+)x(?P<height>[0-9]+)
+ _(?P<vcodec>[a-z0-9]+)
+ _(?P<vbr>[0-9]+)
+ _(?P<acodec>[a-z0-9]+)
+ _(?P<abr>[0-9]+)
+ \.(?P<ext>[a-z0-9]+)''', version_url)
+ if not m:
continue
- if '.ism' in version_url:
- continue
- elif '.mpd' in version_url:
- formats.extend(self._extract_mpd_formats(
- version_url, video_id, mpd_id='dash-%s' % version,
- note='Downloading %s MPD information' % version,
- errnote='Failed to download %s MPD information' % version,
- fatal=False))
- elif '.m3u8' in version_url:
- formats.extend(self._extract_m3u8_formats(
- version_url, video_id, 'mp4', 'm3u8_native',
- m3u8_id='hls-%s' % version,
- note='Downloading %s m3u8 information' % version,
- errnote='Failed to download %s m3u8 information' % version,
- fatal=False))
- else:
- m = re.search(r'''(?xi)
- _(?P<width>[0-9]+)x(?P<height>[0-9]+)
- _(?P<vcodec>[a-z0-9]+)
- _(?P<vbr>[0-9]+)
- _(?P<acodec>[a-z0-9]+)
- _(?P<abr>[0-9]+)
- \.(?P<ext>[a-z0-9]+)''', version_url)
- if not m:
- continue
-
- formats.append({
- 'url': version_url,
- 'format_id': 'http-%s-%s' % (version, video_version['quality']),
- 'vcodec': m.group('vcodec'),
- 'acodec': m.group('acodec'),
- 'vbr': int(m.group('vbr')),
- 'abr': int(m.group('abr')),
- 'ext': m.group('ext'),
- 'width': int(m.group('width')),
- 'height': int(m.group('height')),
- })
- else:
- timestamp = int_or_none(self._search_regex(
- r'/Date\((\d+)\)/',
- video_info['releaseDate'], 'release date', fatal=False),
- scale=1000)
- artists = video_info.get('mainArtists')
- if artists:
- artist = uploader = artists[0]['artistName']
-
- featured_artists = video_info.get('featuredArtists')
- if featured_artists:
- featured_artist = featured_artists[0]['artistName']
-
- smil_parsed = False
- for video_version in video_info['videoVersions']:
- version = self._VERSIONS.get(video_version['version'])
- if version == 'youtube':
- continue
- else:
- source_type = self._SOURCE_TYPES.get(video_version['sourceType'])
- renditions = compat_etree_fromstring(video_version['data'])
- if source_type == 'http':
- for rend in renditions.findall('rendition'):
- attr = rend.attrib
- formats.append({
- 'url': attr['url'],
- 'format_id': 'http-%s-%s' % (version, attr['name']),
- 'height': int_or_none(attr.get('frameheight')),
- 'width': int_or_none(attr.get('frameWidth')),
- 'tbr': int_or_none(attr.get('totalBitrate')),
- 'vbr': int_or_none(attr.get('videoBitrate')),
- 'abr': int_or_none(attr.get('audioBitrate')),
- 'vcodec': attr.get('videoCodec'),
- 'acodec': attr.get('audioCodec'),
- })
- elif source_type == 'hls':
- formats.extend(self._extract_m3u8_formats(
- renditions.find('rendition').attrib['url'], video_id,
- 'mp4', 'm3u8_native', m3u8_id='hls-%s' % version,
- note='Downloading %s m3u8 information' % version,
- errnote='Failed to download %s m3u8 information' % version,
- fatal=False))
- elif source_type == 'smil' and version == 'level3' and not smil_parsed:
- formats.extend(self._extract_smil_formats(
- renditions.find('rendition').attrib['url'], video_id, False))
- smil_parsed = True
+ formats.append({
+ 'url': version_url,
+ 'format_id': 'http-%s-%s' % (version, video_version['quality']),
+ 'vcodec': m.group('vcodec'),
+ 'acodec': m.group('acodec'),
+ 'vbr': int(m.group('vbr')),
+ 'abr': int(m.group('abr')),
+ 'ext': m.group('ext'),
+ 'width': int(m.group('width')),
+ 'height': int(m.group('height')),
+ })
self._sort_formats(formats)
track = video_info['title']
else:
age_limit = None
- duration = video_info.get('duration')
-
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': video_info.get('imageUrl') or video_info.get('thumbnailUrl'),
- 'timestamp': timestamp,
+ 'timestamp': parse_iso8601(video_info.get('releaseDate')),
'uploader': uploader,
- 'duration': duration,
- 'view_count': view_count,
+ 'duration': int_or_none(video_info.get('duration')),
+ 'view_count': int_or_none(video_info.get('views', {}).get('total')),
'age_limit': age_limit,
'track': track,
'artist': uploader,
if video_id:
return self.url_result('vevo:%s' % video_id, VevoIE.ie_key())
- playlists = self._extract_json(webpage, playlist_id, '%ss' % playlist_kind)
+ playlists = self._extract_json(webpage, playlist_id)['default']['%ss' % playlist_kind]
playlist = (list(playlists.values())[0]
if playlist_kind == 'playlist' else playlists[playlist_id])