from __future__ import unicode_literals
import re
-import json
from .common import InfoExtractor
from ..utils import (
- compat_urlparse,
- get_element_by_attribute,
+ mimetype2ext,
+ qualities,
)
class ImdbIE(InfoExtractor):
IE_NAME = 'imdb'
IE_DESC = 'Internet Movie Database trailers'
- _VALID_URL = r'http://(?:www|m)\.imdb\.com/video/imdb/vi(?P<id>\d+)'
+ _VALID_URL = r'https?://(?:www|m)\.imdb\.com/(?:video/[^/]+/|title/tt\d+.*?#lb-)vi(?P<id>\d+)'
- _TEST = {
+ _TESTS = [{
'url': 'http://www.imdb.com/video/imdb/vi2524815897',
- 'md5': '9f34fa777ade3a6e57a054fdbcb3a068',
'info_dict': {
'id': '2524815897',
'ext': 'mp4',
'title': 'Ice Age: Continental Drift Trailer (No. 2) - IMDb',
'description': 'md5:9061c2219254e5d14e03c25c98e96a81',
}
- }
+ }, {
+ 'url': 'http://www.imdb.com/video/_/vi2524815897',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.imdb.com/title/tt1667889/?ref_=ext_shr_eml_vi#lb-vi2524815897',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.imdb.com/title/tt1667889/#lb-vi2524815897',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
webpage = self._download_webpage('http://www.imdb.com/video/imdb/vi%s' % video_id, video_id)
- descr = get_element_by_attribute('itemprop', 'description', webpage)
- available_formats = re.findall(
- r'case \'(?P<f_id>.*?)\' :$\s+url = \'(?P<path>.*?)\'', webpage,
- flags=re.MULTILINE)
+ descr = self._html_search_regex(
+ r'(?s)<span itemprop="description">(.*?)</span>',
+ webpage, 'description', fatal=False)
+ player_url = 'http://www.imdb.com/video/imdb/vi%s/imdb/single' % video_id
+ player_page = self._download_webpage(
+ player_url, video_id, 'Downloading player page')
+ # the player page contains the info for the default format, we have to
+ # fetch other pages for the rest of the formats
+ extra_formats = re.findall(r'href="(?P<url>%s.*?)".*?>(?P<name>.*?)<' % re.escape(player_url), player_page)
+ format_pages = [
+ self._download_webpage(
+ f_url, video_id, 'Downloading info for %s format' % f_name)
+ for f_url, f_name in extra_formats]
+ format_pages.append(player_page)
+
+ quality = qualities(('SD', '480p', '720p', '1080p'))
formats = []
- for f_id, f_path in available_formats:
- f_path = f_path.strip()
- format_page = self._download_webpage(
- compat_urlparse.urljoin(url, f_path),
- 'Downloading info for %s format' % f_id)
+ for format_page in format_pages:
json_data = self._search_regex(
r'<script[^>]+class="imdb-player-data"[^>]*?>(.*?)</script>',
format_page, 'json data', flags=re.DOTALL)
- info = json.loads(json_data)
- format_info = info['videoPlayerObject']['video']
+ info = self._parse_json(json_data, video_id, fatal=False)
+ if not info:
+ continue
+ format_info = info.get('videoPlayerObject', {}).get('video', {})
+ if not format_info:
+ continue
+ video_info_list = format_info.get('videoInfoList')
+ if not video_info_list or not isinstance(video_info_list, list):
+ continue
+ video_info = video_info_list[0]
+ if not video_info or not isinstance(video_info, dict):
+ continue
+ video_url = video_info.get('videoUrl')
+ if not video_url:
+ continue
+ format_id = format_info.get('ffname')
formats.append({
- 'format_id': f_id,
- 'url': format_info['url'],
+ 'format_id': format_id,
+ 'url': video_url,
+ 'ext': mimetype2ext(video_info.get('videoMimeType')),
+ 'quality': quality(format_id),
})
+ self._sort_formats(formats)
return {
'id': video_id,
class ImdbListIE(InfoExtractor):
IE_NAME = 'imdb:list'
IE_DESC = 'Internet Movie Database lists'
- _VALID_URL = r'http://www\.imdb\.com/list/(?P<id>[\da-zA-Z_-]{11})'
-
+ _VALID_URL = r'https?://www\.imdb\.com/list/(?P<id>[\da-zA-Z_-]{11})'
+ _TEST = {
+ 'url': 'http://www.imdb.com/list/JFs9NWw6XI0',
+ 'info_dict': {
+ 'id': 'JFs9NWw6XI0',
+ 'title': 'March 23, 2012 Releases',
+ },
+ 'playlist_count': 7,
+ }
+
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- list_id = mobj.group('id')
-
- # RSS XML is sometimes malformed
- rss = self._download_webpage('http://rss.imdb.com/list/%s' % list_id, list_id, 'Downloading list RSS')
- list_title = self._html_search_regex(r'<title>(.*?)</title>', rss, 'list title')
-
- # Export is independent of actual author_id, but returns 404 if no author_id is provided.
- # However, passing dummy author_id seems to be enough.
- csv = self._download_webpage('http://www.imdb.com/list/export?list_id=%s&author_id=ur00000000' % list_id,
- list_id, 'Downloading list CSV')
-
- entries = []
- for item in csv.split('\n')[1:]:
- cols = item.split(',')
- if len(cols) < 2:
- continue
- item_id = cols[1][1:-1]
- if item_id.startswith('vi'):
- entries.append(self.url_result('http://www.imdb.com/video/imdb/%s' % item_id, 'Imdb'))
-
+ list_id = self._match_id(url)
+ webpage = self._download_webpage(url, list_id)
+ entries = [
+ self.url_result('http://www.imdb.com' + m, 'Imdb')
+ for m in re.findall(r'href="(/video/imdb/vi[^"]+)"\s+data-type="playlist"', webpage)]
+
+ list_title = self._html_search_regex(
+ r'<h1 class="header">(.*?)</h1>', webpage, 'list title')
+
return self.playlist_result(entries, list_id, list_title)