]> Raphaël G. Git Repositories - youtubedl/blobdiff - youtube_dl/extractor/ivi.py
debian/copyright: use spaces rather than tabs to start continuation lines.
[youtubedl] / youtube_dl / extractor / ivi.py
index 75b543b7cf8ed443bb98f3cd5c492e1c629c28a3..b5a740a01e1d360ca667869edef88f85e3559842 100644 (file)
@@ -1,20 +1,26 @@
-# encoding: utf-8
+# coding: utf-8
 from __future__ import unicode_literals
 
-import re
 import json
+import re
+import sys
 
 from .common import InfoExtractor
 from ..utils import (
-    compat_urllib_request,
     ExtractorError,
+    int_or_none,
+    qualities,
 )
 
 
 class IviIE(InfoExtractor):
     IE_DESC = 'ivi.ru'
     IE_NAME = 'ivi'
-    _VALID_URL = r'https?://(?:www\.)?ivi\.ru/(?:watch/(?:[^/]+/)?|video/player\?.*?videoId=)(?P<videoid>\d+)'
+    _VALID_URL = r'https?://(?:www\.)?ivi\.(?:ru|tv)/(?:watch/(?:[^/]+/)?|video/player\?.*?videoId=)(?P<id>\d+)'
+    _GEO_BYPASS = False
+    _GEO_COUNTRIES = ['RU']
+    _LIGHT_KEY = b'\xf1\x02\x32\xb7\xbc\x5c\x7a\xe8\xf7\x96\xc1\x33\x2b\x27\xa1\x8c'
+    _LIGHT_URL = 'https://api.ivi.ru/light/'
 
     _TESTS = [
         # Single movie
@@ -27,98 +33,183 @@ class IviIE(InfoExtractor):
                 'title': 'Иван Васильевич меняет профессию',
                 'description': 'md5:b924063ea1677c8fe343d8a72ac2195f',
                 'duration': 5498,
-                'thumbnail': 'http://thumbs.ivi.ru/f20.vcp.digitalaccess.ru/contents/d/1/c3c885163a082c29bceeb7b5a267a6.jpg',
+                'thumbnail': r're:^https?://.*\.jpg$',
             },
             'skip': 'Only works from Russia',
         },
-        # Serial's serie
+        # Serial's series
         {
             'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa/9549',
             'md5': '221f56b35e3ed815fde2df71032f4b3e',
             'info_dict': {
                 'id': '9549',
                 'ext': 'mp4',
-                'title': 'Двое из ларца - Серия 1',
+                'title': 'Двое из ларца - Дело Гольдберга (1 часть)',
+                'series': 'Двое из ларца',
+                'season': 'Сезон 1',
+                'season_number': 1,
+                'episode': 'Дело Гольдберга (1 часть)',
+                'episode_number': 1,
                 'duration': 2655,
-                'thumbnail': 'http://thumbs.ivi.ru/f15.vcp.digitalaccess.ru/contents/8/4/0068dc0677041f3336b7c2baad8fc0.jpg',
+                'thumbnail': r're:^https?://.*\.jpg$',
             },
             'skip': 'Only works from Russia',
-         }
+        },
+        {
+            # with MP4-HD720 format
+            'url': 'http://www.ivi.ru/watch/146500',
+            'md5': 'd63d35cdbfa1ea61a5eafec7cc523e1e',
+            'info_dict': {
+                'id': '146500',
+                'ext': 'mp4',
+                'title': 'Кукла',
+                'description': 'md5:ffca9372399976a2d260a407cc74cce6',
+                'duration': 5599,
+                'thumbnail': r're:^https?://.*\.jpg$',
+            },
+            'skip': 'Only works from Russia',
+        },
+        {
+            'url': 'https://www.ivi.tv/watch/33560/',
+            'only_matching': True,
+        },
     ]
 
     # Sorted by quality
-    _known_formats = ['MP4-low-mobile', 'MP4-mobile', 'FLV-lo', 'MP4-lo', 'FLV-hi', 'MP4-hi', 'MP4-SHQ']
+    _KNOWN_FORMATS = (
+        'MP4-low-mobile', 'MP4-mobile', 'FLV-lo', 'MP4-lo', 'FLV-hi', 'MP4-hi',
+        'MP4-SHQ', 'MP4-HD720', 'MP4-HD1080')
 
-    # Sorted by size
-    _known_thumbnails = ['Thumb-120x90', 'Thumb-160', 'Thumb-640x480']
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
 
-    def _extract_description(self, html):
-        m = re.search(r'<meta name="description" content="(?P<description>[^"]+)"/>', html)
-        return m.group('description') if m is not None else None
+        data = json.dumps({
+            'method': 'da.content.get',
+            'params': [
+                video_id, {
+                    'site': 's%d',
+                    'referrer': 'http://www.ivi.ru/watch/%s' % video_id,
+                    'contentid': video_id
+                }
+            ]
+        })
 
-    def _extract_comment_count(self, html):
-        m = re.search('(?s)<a href="#" id="view-comments" class="action-button dim gradient">\s*Комментарии:\s*(?P<commentcount>\d+)\s*</a>', html)
-        return int(m.group('commentcount')) if m is not None else 0
+        bundled = hasattr(sys, 'frozen')
 
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('videoid')
+        for site in (353, 183):
+            content_data = (data % site).encode()
+            if site == 353:
+                if bundled:
+                    continue
+                try:
+                    from Cryptodome.Cipher import Blowfish
+                    from Cryptodome.Hash import CMAC
+                    pycryptodomex_found = True
+                except ImportError:
+                    pycryptodomex_found = False
+                    continue
 
-        api_url = 'http://api.digitalaccess.ru/api/json/'
+                timestamp = (self._download_json(
+                    self._LIGHT_URL, video_id,
+                    'Downloading timestamp JSON', data=json.dumps({
+                        'method': 'da.timestamp.get',
+                        'params': []
+                    }).encode(), fatal=False) or {}).get('result')
+                if not timestamp:
+                    continue
 
-        data = {'method': 'da.content.get',
-                'params': [video_id, {'site': 's183',
-                                      'referrer': 'http://www.ivi.ru/watch/%s' % video_id,
-                                      'contentid': video_id
-                                      }
-                           ]
+                query = {
+                    'ts': timestamp,
+                    'sign': CMAC.new(self._LIGHT_KEY, timestamp.encode() + content_data, Blowfish).hexdigest(),
                 }
+            else:
+                query = {}
 
-        request = compat_urllib_request.Request(api_url, json.dumps(data))
-
-        video_json_page = self._download_webpage(request, video_id, 'Downloading video JSON')
-        video_json = json.loads(video_json_page)
+            video_json = self._download_json(
+                self._LIGHT_URL, video_id,
+                'Downloading video JSON', data=content_data, query=query)
 
-        if 'error' in video_json:
-            error = video_json['error']
-            if error['origin'] == 'NoRedisValidData':
-                raise ExtractorError('Video %s does not exist' % video_id, expected=True)
-            raise ExtractorError('Unable to download video %s: %s' % (video_id, error['message']), expected=True)
+            error = video_json.get('error')
+            if error:
+                origin = error.get('origin')
+                message = error.get('message') or error.get('user_message')
+                extractor_msg = 'Unable to download video %s'
+                if origin == 'NotAllowedForLocation':
+                    self.raise_geo_restricted(message, self._GEO_COUNTRIES)
+                elif origin == 'NoRedisValidData':
+                    extractor_msg = 'Video %s does not exist'
+                elif site == 353:
+                    continue
+                elif bundled:
+                    raise ExtractorError(
+                        'This feature does not work from bundled exe. Run youtube-dl from sources.',
+                        expected=True)
+                elif not pycryptodomex_found:
+                    raise ExtractorError(
+                        'pycryptodomex not found. Please install it.',
+                        expected=True)
+                elif message:
+                    extractor_msg += ': ' + message
+                raise ExtractorError(extractor_msg % video_id, expected=True)
+            else:
+                break
 
         result = video_json['result']
+        title = result['title']
 
-        formats = [{
-            'url': x['url'],
-            'format_id': x['content_format'],
-            'preference': self._known_formats.index(x['content_format']),
-        } for x in result['files'] if x['content_format'] in self._known_formats]
+        quality = qualities(self._KNOWN_FORMATS)
 
+        formats = []
+        for f in result.get('files', []):
+            f_url = f.get('url')
+            content_format = f.get('content_format')
+            if not f_url or '-MDRM-' in content_format or '-FPS-' in content_format:
+                continue
+            formats.append({
+                'url': f_url,
+                'format_id': content_format,
+                'quality': quality(content_format),
+                'filesize': int_or_none(f.get('size_in_bytes')),
+            })
         self._sort_formats(formats)
 
-        if not formats:
-            raise ExtractorError('No media links available for %s' % video_id)
+        compilation = result.get('compilation')
+        episode = title if compilation else None
 
-        duration = result['duration']
-        compilation = result['compilation']
-        title = result['title']
+        title = '%s - %s' % (compilation, title) if compilation is not None else title
+
+        thumbnails = [{
+            'url': preview['url'],
+            'id': preview.get('content_format'),
+        } for preview in result.get('preview', []) if preview.get('url')]
+
+        webpage = self._download_webpage(url, video_id)
 
-        title = '%s - %s' % (compilation, title) if compilation is not None else title  
+        season = self._search_regex(
+            r'<li[^>]+class="season active"[^>]*><a[^>]+>([^<]+)',
+            webpage, 'season', default=None)
+        season_number = int_or_none(self._search_regex(
+            r'<li[^>]+class="season active"[^>]*><a[^>]+data-season(?:-index)?="(\d+)"',
+            webpage, 'season number', default=None))
 
-        previews = result['preview']
-        previews.sort(key=lambda fmt: self._known_thumbnails.index(fmt['content_format']))
-        thumbnail = previews[-1]['url'] if len(previews) > 0 else None
+        episode_number = int_or_none(self._search_regex(
+            r'[^>]+itemprop="episode"[^>]*>\s*<meta[^>]+itemprop="episodeNumber"[^>]+content="(\d+)',
+            webpage, 'episode number', default=None))
 
-        video_page = self._download_webpage(url, video_id, 'Downloading video page')
-        description = self._extract_description(video_page)
-        comment_count = self._extract_comment_count(video_page)
+        description = self._og_search_description(webpage, default=None) or self._html_search_meta(
+            'description', webpage, 'description', default=None)
 
         return {
             'id': video_id,
             'title': title,
-            'thumbnail': thumbnail,
+            'series': compilation,
+            'season': season,
+            'season_number': season_number,
+            'episode': episode,
+            'episode_number': episode_number,
+            'thumbnails': thumbnails,
             'description': description,
-            'duration': duration,
-            'comment_count': comment_count,
+            'duration': int_or_none(result.get('duration')),
             'formats': formats,
         }
 
@@ -144,25 +235,30 @@ class IviCompilationIE(InfoExtractor):
     }]
 
     def _extract_entries(self, html, compilation_id):
-        return [self.url_result('http://www.ivi.ru/watch/%s/%s' % (compilation_id, serie), 'Ivi')
-                for serie in re.findall(r'<strong><a href="/watch/%s/(\d+)">(?:[^<]+)</a></strong>' % compilation_id, html)]
+        return [
+            self.url_result(
+                'http://www.ivi.ru/watch/%s/%s' % (compilation_id, serie), IviIE.ie_key())
+            for serie in re.findall(
+                r'<a\b[^>]+\bhref=["\']/watch/%s/(\d+)["\']' % compilation_id, html)]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         compilation_id = mobj.group('compilationid')
         season_id = mobj.group('seasonid')
 
-        if season_id is not None: # Season link
-            season_page = self._download_webpage(url, compilation_id, 'Downloading season %s web page' % season_id)
+        if season_id is not None:  # Season link
+            season_page = self._download_webpage(
+                url, compilation_id, 'Downloading season %s web page' % season_id)
             playlist_id = '%s/season%s' % (compilation_id, season_id)
             playlist_title = self._html_search_meta('title', season_page, 'title')
             entries = self._extract_entries(season_page, compilation_id)
-        else: # Compilation link            
+        else:  # Compilation link
             compilation_page = self._download_webpage(url, compilation_id, 'Downloading compilation web page')
             playlist_id = compilation_id
             playlist_title = self._html_search_meta('title', compilation_page, 'title')
-            seasons = re.findall(r'<a href="/watch/%s/season(\d+)">[^<]+</a>' % compilation_id, compilation_page)
-            if len(seasons) == 0: # No seasons in this compilation
+            seasons = re.findall(
+                r'<a href="/watch/%s/season(\d+)' % compilation_id, compilation_page)
+            if not seasons:  # No seasons in this compilation
                 entries = self._extract_entries(compilation_page, compilation_id)
             else:
                 entries = []
@@ -172,4 +268,4 @@ class IviCompilationIE(InfoExtractor):
                         compilation_id, 'Downloading season %s web page' % season_id)
                     entries.extend(self._extract_entries(season_page, compilation_id))
 
-        return self.playlist_result(entries, playlist_id, playlist_title)
\ No newline at end of file
+        return self.playlist_result(entries, playlist_id, playlist_title)