- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
- json_url = 'http://www.vevo.com/data/video/%s' % video_id
- base_url = 'http://smil.lvl3.vevo.com'
- videos_url = '%s/Video/V2/VFILE/%s/%sr.smil' % (base_url, video_id, video_id.lower())
- info_json = self._download_webpage(json_url, video_id, u'Downloading json info')
- links_webpage = self._download_webpage(videos_url, video_id, u'Downloading videos urls')
-
- self.report_extraction(video_id)
- video_info = json.loads(info_json)
- m_urls = list(re.finditer(r'<video src="(?P<ext>.*?):/?(?P<url>.*?)"', links_webpage))
- if m_urls is None or len(m_urls) == 0:
- raise ExtractorError(u'Unable to extract video url')
- # They are sorted from worst to best quality
- m_url = m_urls[-1]
- video_url = base_url + '/' + m_url.group('url')
- ext = m_url.group('ext')
-
- return {'url': video_url,
- 'ext': ext,
- 'id': video_id,
- 'title': video_info['title'],
- 'thumbnail': video_info['img'],
- 'upload_date': video_info['launchDate'].replace('/',''),
- 'uploader': video_info['Artists'][0]['title'],
- }
+ video_id = self._match_id(url)
+
+ json_url = 'http://videoplayer.vevo.com/VideoService/AuthenticateVideo?isrc=%s' % video_id
+ response = self._download_json(
+ json_url, video_id, 'Downloading video info', 'Unable to download info')
+ video_info = response.get('video') or {}
+ video_versions = video_info.get('videoVersions')
+ uploader = None
+ timestamp = None
+ view_count = None
+ formats = []
+
+ if not video_info:
+ if response.get('statusCode') != 909:
+ ytid = response.get('errorInfo', {}).get('ytid')
+ if ytid:
+ self.report_warning(
+ 'Video is geoblocked, trying with the YouTube video %s' % ytid)
+ return self.url_result(ytid, 'Youtube', ytid)
+
+ if 'statusMessage' in response:
+ raise ExtractorError('%s said: %s' % (
+ self.IE_NAME, response['statusMessage']), expected=True)
+ raise ExtractorError('Unable to extract videos')
+
+ self._initialize_api(video_id)
+ video_info = self._call_api(
+ 'video/%s' % video_id, video_id, 'Downloading api video info',
+ 'Failed to download video info')
+
+ video_versions = self._call_api(
+ 'video/%s/streams' % video_id, video_id,
+ 'Downloading video versions info',
+ 'Failed to download video versions info')
+
+ timestamp = parse_iso8601(video_info.get('releaseDate'))
+ artists = video_info.get('artists')
+ if artists:
+ uploader = artists[0]['name']
+ view_count = int_or_none(video_info.get('views', {}).get('total'))
+
+ for video_version in video_versions:
+ version = self._VERSIONS.get(video_version['version'])
+ version_url = video_version.get('url')
+ if not version_url:
+ continue
+
+ if '.ism' in version_url:
+ continue
+ elif '.mpd' in version_url:
+ formats.extend(self._extract_mpd_formats(
+ version_url, video_id, mpd_id='dash-%s' % version,
+ note='Downloading %s MPD information' % version,
+ errnote='Failed to download %s MPD information' % version,
+ fatal=False))
+ elif '.m3u8' in version_url:
+ formats.extend(self._extract_m3u8_formats(
+ version_url, video_id, 'mp4', 'm3u8_native',
+ m3u8_id='hls-%s' % version,
+ note='Downloading %s m3u8 information' % version,
+ errnote='Failed to download %s m3u8 information' % version,
+ fatal=False))
+ else:
+ m = re.search(r'''(?xi)
+ _(?P<width>[0-9]+)x(?P<height>[0-9]+)
+ _(?P<vcodec>[a-z0-9]+)
+ _(?P<vbr>[0-9]+)
+ _(?P<acodec>[a-z0-9]+)
+ _(?P<abr>[0-9]+)
+ \.(?P<ext>[a-z0-9]+)''', version_url)
+ if not m:
+ continue
+
+ formats.append({
+ 'url': version_url,
+ 'format_id': 'http-%s-%s' % (version, video_version['quality']),
+ 'vcodec': m.group('vcodec'),
+ 'acodec': m.group('acodec'),
+ 'vbr': int(m.group('vbr')),
+ 'abr': int(m.group('abr')),
+ 'ext': m.group('ext'),
+ 'width': int(m.group('width')),
+ 'height': int(m.group('height')),
+ })
+ else:
+ timestamp = int_or_none(self._search_regex(
+ r'/Date\((\d+)\)/',
+ video_info['releaseDate'], 'release date', fatal=False),
+ scale=1000)
+ artists = video_info.get('mainArtists')
+ if artists:
+ uploader = artists[0]['artistName']
+
+ smil_parsed = False
+ for video_version in video_info['videoVersions']:
+ version = self._VERSIONS.get(video_version['version'])
+ if version == 'youtube':
+ continue
+ else:
+ source_type = self._SOURCE_TYPES.get(video_version['sourceType'])
+ renditions = compat_etree_fromstring(video_version['data'])
+ if source_type == 'http':
+ for rend in renditions.findall('rendition'):
+ attr = rend.attrib
+ formats.append({
+ 'url': attr['url'],
+ 'format_id': 'http-%s-%s' % (version, attr['name']),
+ 'height': int_or_none(attr.get('frameheight')),
+ 'width': int_or_none(attr.get('frameWidth')),
+ 'tbr': int_or_none(attr.get('totalBitrate')),
+ 'vbr': int_or_none(attr.get('videoBitrate')),
+ 'abr': int_or_none(attr.get('audioBitrate')),
+ 'vcodec': attr.get('videoCodec'),
+ 'acodec': attr.get('audioCodec'),
+ })
+ elif source_type == 'hls':
+ formats.extend(self._extract_m3u8_formats(
+ renditions.find('rendition').attrib['url'], video_id,
+ 'mp4', 'm3u8_native', m3u8_id='hls-%s' % version,
+ note='Downloading %s m3u8 information' % version,
+ errnote='Failed to download %s m3u8 information' % version,
+ fatal=False))
+ elif source_type == 'smil' and version == 'level3' and not smil_parsed:
+ formats.extend(self._extract_smil_formats(
+ renditions.find('rendition').attrib['url'], video_id, False))
+ smil_parsed = True
+ self._sort_formats(formats)
+
+ title = video_info['title']
+
+ is_explicit = video_info.get('isExplicit')
+ if is_explicit is True:
+ age_limit = 18
+ elif is_explicit is False:
+ age_limit = 0
+ else:
+ age_limit = None
+
+ duration = video_info.get('duration')
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'formats': formats,
+ 'thumbnail': video_info.get('imageUrl') or video_info.get('thumbnailUrl'),
+ 'timestamp': timestamp,
+ 'uploader': uploader,
+ 'duration': duration,
+ 'view_count': view_count,
+ 'age_limit': age_limit,
+ }