]> Raphaƫl G. Git Repositories - youtubedl/blobdiff - youtube_dl/extractor/packtpub.py
debian/control: Add 'libfribidi-bin | bidiv' to Suggests.
[youtubedl] / youtube_dl / extractor / packtpub.py
index bb668c99953254a0cb5e36389d9b97b4e21c67d8..11ad3b3b8367b1b3655d7297cd08386de45323aa 100644 (file)
@@ -1,32 +1,33 @@
 from __future__ import unicode_literals
 
+import json
 import re
 
 from .common import InfoExtractor
 from ..compat import (
-    compat_str,
+    compat_str,
     compat_HTTPError,
 )
 from ..utils import (
     clean_html,
     ExtractorError,
-    remove_end,
+    # remove_end,
+    str_or_none,
     strip_or_none,
     unified_timestamp,
-    urljoin,
-    urlencode_postdata,
+    # urljoin,
 )
 
 
 class PacktPubBaseIE(InfoExtractor):
-    _PACKT_BASE = 'https://www.packtpub.com'
-    _MAPT_REST = '%s/mapt-rest' % _PACKT_BASE
+    _PACKT_BASE = 'https://www.packtpub.com'
+    _STATIC_PRODUCTS_BASE = 'https://static.packt-cdn.com/products/'
 
 
 class PacktPubIE(PacktPubBaseIE):
-    _VALID_URL = r'https?://(?:www\.)?packtpub\.com/mapt/video/[^/]+/(?P<course_id>\d+)/(?P<chapter_id>\d+)/(?P<id>\d+)'
+    _VALID_URL = r'https?://(?:(?:www\.)?packtpub\.com/mapt|subscription\.packtpub\.com)/video/[^/]+/(?P<course_id>\d+)/(?P<chapter_id>[^/]+)/(?P<id>[^/]+)(?:/(?P<display_id>[^/?&#]+))?'
 
-    _TEST = {
+    _TESTS = [{
         'url': 'https://www.packtpub.com/mapt/video/web-development/9781787122215/20528/20530/Project+Intro',
         'md5': '1e74bd6cfd45d7d07666f4684ef58f70',
         'info_dict': {
@@ -37,95 +38,84 @@ class PacktPubIE(PacktPubBaseIE):
             'timestamp': 1490918400,
             'upload_date': '20170331',
         },
-    }
+    }, {
+        'url': 'https://subscription.packtpub.com/video/web_development/9781787122215/20528/20530/project-intro',
+        'only_matching': True,
+    }, {
+        'url': 'https://subscription.packtpub.com/video/programming/9781838988906/p1/video1_1/business-card-project',
+        'only_matching': True,
+    }]
     _NETRC_MACHINE = 'packtpub'
     _TOKEN = None
 
     def _real_initialize(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         if username is None:
             return
-        webpage = self._download_webpage(self._PACKT_BASE, None)
-        login_form = self._form_hidden_inputs(
-            'packt-user-login-form', webpage)
-        login_form.update({
-            'email': username,
-            'password': password,
-        })
-        self._download_webpage(
-            self._PACKT_BASE, None, 'Logging in as %s' % username,
-            data=urlencode_postdata(login_form))
         try:
             self._TOKEN = self._download_json(
-                '%s/users/tokens/sessions' % self._MAPT_REST, None,
-                'Downloading Authorization Token')['data']['token']
+                'https://services.packtpub.com/auth-v1/users/tokens', None,
+                'Downloading Authorization Token', data=json.dumps({
+                    'username': username,
+                    'password': password,
+                }).encode())['data']['access']
         except ExtractorError as e:
-            if isinstance(e.cause, compat_HTTPError) and e.cause.code in (401, 404):
+            if isinstance(e.cause, compat_HTTPError) and e.cause.code in (400, 401, 404):
                 message = self._parse_json(e.cause.read().decode(), None)['message']
                 raise ExtractorError(message, expected=True)
             raise
 
-    def _handle_error(self, response):
-        if response.get('status') != 'success':
-            raise ExtractorError(
-                '% said: %s' % (self.IE_NAME, response['message']),
-                expected=True)
-
-    def _download_json(self, *args, **kwargs):
-        response = super(PacktPubIE, self)._download_json(*args, **kwargs)
-        self._handle_error(response)
-        return response
-
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        course_id, chapter_id, video_id = mobj.group(
-            'course_id', 'chapter_id', 'id')
+        course_id, chapter_id, video_id, display_id = re.match(self._VALID_URL, url).groups()
 
         headers = {}
         if self._TOKEN:
-            headers['Authorization'] = self._TOKEN
-        video = self._download_json(
-            '%s/users/me/products/%s/chapters/%s/sections/%s'
-            % (self._MAPT_REST, course_id, chapter_id, video_id), video_id,
-            'Downloading JSON video', headers=headers)['data']
-
-        content = video.get('content')
-        if not content:
-            self.raise_login_required('This video is locked')
-
-        video_url = content['file']
+            headers['Authorization'] = 'Bearer ' + self._TOKEN
+        try:
+            video_url = self._download_json(
+                'https://services.packtpub.com/products-v1/products/%s/%s/%s' % (course_id, chapter_id, video_id), video_id,
+                'Downloading JSON video', headers=headers)['data']
+        except ExtractorError as e:
+            if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400:
+                self.raise_login_required('This video is locked')
+            raise
 
-        metadata = self._download_json(
-            '%s/products/%s/chapters/%s/sections/%s/metadata'
-            % (self._MAPT_REST, course_id, chapter_id, video_id),
-            video_id)['data']
+        # TODO: find a better way to avoid duplicating course requests
+        # metadata = self._download_json(
+        #     '%s/products/%s/chapters/%s/sections/%s/metadata'
+        #     % (self._MAPT_REST, course_id, chapter_id, video_id),
+        #     video_id)['data']
 
-        title = metadata['pageTitle']
-        course_title = metadata.get('title')
-        if course_title:
-            title = remove_end(title, ' - %s' % course_title)
-        timestamp = unified_timestamp(metadata.get('publicationDate'))
-        thumbnail = urljoin(self._PACKT_BASE, metadata.get('filepath'))
+        title = metadata['pageTitle']
+        course_title = metadata.get('title')
+        if course_title:
+            title = remove_end(title, ' - %s' % course_title)
+        timestamp = unified_timestamp(metadata.get('publicationDate'))
+        thumbnail = urljoin(self._PACKT_BASE, metadata.get('filepath'))
 
         return {
             'id': video_id,
             'url': video_url,
-            'title': title,
-            'thumbnail': thumbnail,
-            'timestamp': timestamp,
+            'title': display_id or video_id,  # title,
+            'thumbnail': thumbnail,
+            'timestamp': timestamp,
         }
 
 
 class PacktPubCourseIE(PacktPubBaseIE):
-    _VALID_URL = r'(?P<url>https?://(?:www\.)?packtpub\.com/mapt/video/[^/]+/(?P<id>\d+))'
-    _TEST = {
+    _VALID_URL = r'(?P<url>https?://(?:(?:www\.)?packtpub\.com/mapt|subscription\.packtpub\.com)/video/[^/]+/(?P<id>\d+))'
+    _TESTS = [{
         'url': 'https://www.packtpub.com/mapt/video/web-development/9781787122215',
         'info_dict': {
             'id': '9781787122215',
             'title': 'Learn Nodejs by building 12 projects [Video]',
+            'description': 'md5:489da8d953f416e51927b60a1c7db0aa',
         },
         'playlist_count': 90,
-    }
+    }, {
+        'url': 'https://subscription.packtpub.com/video/web_development/9781787122215',
+        'only_matching': True,
+    }]
 
     @classmethod
     def suitable(cls, url):
@@ -137,35 +127,38 @@ class PacktPubCourseIE(PacktPubBaseIE):
         url, course_id = mobj.group('url', 'id')
 
         course = self._download_json(
-            '%s/products/%s/metadata' % (self._MAPT_REST, course_id),
-            course_id)['data']
+            self._STATIC_PRODUCTS_BASE + '%s/toc' % course_id, course_id)
+        metadata = self._download_json(
+            self._STATIC_PRODUCTS_BASE + '%s/summary' % course_id,
+            course_id, fatal=False) or {}
 
         entries = []
-        for chapter_num, chapter in enumerate(course['tableOfContents'], 1):
-            if chapter.get('type') != 'chapter':
-                continue
-            children = chapter.get('children')
-            if not isinstance(children, list):
+        for chapter_num, chapter in enumerate(course['chapters'], 1):
+            chapter_id = str_or_none(chapter.get('id'))
+            sections = chapter.get('sections')
+            if not chapter_id or not isinstance(sections, list):
                 continue
             chapter_info = {
                 'chapter': chapter.get('title'),
                 'chapter_number': chapter_num,
-                'chapter_id': chapter.get('id'),
+                'chapter_id': chapter_id,
             }
-            for section in children:
-                if section.get('type') != 'section':
-                    continue
-                section_url = section.get('seoUrl')
-                if not isinstance(section_url, compat_str):
+            for section in sections:
+                section_id = str_or_none(section.get('id'))
+                if not section_id or section.get('contentType') != 'video':
                     continue
                 entry = {
                     '_type': 'url_transparent',
-                    'url': urljoin(url + '/', section_url),
+                    'url': '/'.join([url, chapter_id, section_id]),
                     'title': strip_or_none(section.get('title')),
                     'description': clean_html(section.get('summary')),
+                    'thumbnail': metadata.get('coverImage'),
+                    'timestamp': unified_timestamp(metadata.get('publicationDate')),
                     'ie_key': PacktPubIE.ie_key(),
                 }
                 entry.update(chapter_info)
                 entries.append(entry)
 
-        return self.playlist_result(entries, course_id, course.get('title'))
+        return self.playlist_result(
+            entries, course_id, metadata.get('title'),
+            clean_html(metadata.get('about')))