]> Raphaƫl G. Git Repositories - youtubedl/blobdiff - youtube_dl/extractor/safari.py
New upstream version 2019.09.01
[youtubedl] / youtube_dl / extractor / safari.py
index cc6698f882a5859883372b32dc71578ebe37da8a..bd9ee1647d47d47bfc8d8341139c2bf953ecf158 100644 (file)
@@ -1,24 +1,27 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
+import json
 import re
 
 from .common import InfoExtractor
 
+from ..compat import (
+    compat_parse_qs,
+    compat_str,
+    compat_urlparse,
+)
 from ..utils import (
     ExtractorError,
-    sanitized_Request,
-    std_headers,
-    urlencode_postdata,
     update_url_query,
 )
 
 
 class SafariBaseIE(InfoExtractor):
-    _LOGIN_URL = 'https://www.safaribooksonline.com/accounts/login/'
+    _LOGIN_URL = 'https://learning.oreilly.com/accounts/login/'
     _NETRC_MACHINE = 'safari'
 
-    _API_BASE = 'https://www.safaribooksonline.com/api/v1'
+    _API_BASE = 'https://learning.oreilly.com/api/v1'
     _API_FORMAT = 'json'
 
     LOGGED_IN = False
@@ -27,54 +30,70 @@ class SafariBaseIE(InfoExtractor):
         self._login()
 
     def _login(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         if username is None:
             return
 
-        headers = std_headers.copy()
-        if 'Referer' not in headers:
-            headers['Referer'] = self._LOGIN_URL
-
-        login_page = self._download_webpage(
-            self._LOGIN_URL, None, 'Downloading login form', headers=headers)
+        _, urlh = self._download_webpage_handle(
+            'https://learning.oreilly.com/accounts/login-check/', None,
+            'Downloading login page')
 
-        def is_logged(webpage):
-            return any(re.search(p, webpage) for p in (
-                r'href=["\']/accounts/logout/', r'>Sign Out<'))
+        def is_logged(urlh):
+            return 'learning.oreilly.com/home/' in compat_str(urlh.geturl())
 
-        if is_logged(login_page):
+        if is_logged(urlh):
             self.LOGGED_IN = True
             return
 
-        csrf = self._html_search_regex(
-            r"name='csrfmiddlewaretoken'\s+value='([^']+)'",
-            login_page, 'csrf token')
+        redirect_url = compat_str(urlh.geturl())
+        parsed_url = compat_urlparse.urlparse(redirect_url)
+        qs = compat_parse_qs(parsed_url.query)
+        next_uri = compat_urlparse.urljoin(
+            'https://api.oreilly.com', qs['next'][0])
+
+        auth, urlh = self._download_json_handle(
+            'https://www.oreilly.com/member/auth/login/', None, 'Logging in',
+            data=json.dumps({
+                'email': username,
+                'password': password,
+                'redirect_uri': next_uri,
+            }).encode(), headers={
+                'Content-Type': 'application/json',
+                'Referer': redirect_url,
+            }, expected_status=400)
+
+        credentials = auth.get('credentials')
+        if (not auth.get('logged_in') and not auth.get('redirect_uri')
+                and credentials):
+            raise ExtractorError(
+                'Unable to login: %s' % credentials, expected=True)
 
-        login_form = {
-            'csrfmiddlewaretoken': csrf,
-            'email': username,
-            'password1': password,
-            'login': 'Sign In',
-            'next': '',
-        }
+        # oreilly serves two same instances of the following cookies
+        # in Set-Cookie header and expects first one to be actually set
+        for cookie in ('groot_sessionid', 'orm-jwt', 'orm-rt'):
+            self._apply_first_set_cookie_header(urlh, cookie)
 
-        request = sanitized_Request(
-            self._LOGIN_URL, urlencode_postdata(login_form), headers=headers)
-        login_page = self._download_webpage(
-            request, None, 'Logging in')
+        _, urlh = self._download_webpage_handle(
+            auth.get('redirect_uri') or next_uri, None, 'Completing login',)
 
-        if not is_logged(login_page):
-            raise ExtractorError(
-                'Login failed; make sure your credentials are correct and try again.',
-                expected=True)
+        if is_logged(urlh):
+            self.LOGGED_IN = True
+            return
 
-        self.LOGGED_IN = True
+        raise ExtractorError('Unable to log in')
 
 
 class SafariIE(SafariBaseIE):
     IE_NAME = 'safari'
     IE_DESC = 'safaribooksonline.com online video'
-    _VALID_URL = r'https?://(?:www\.)?safaribooksonline\.com/library/view/[^/]+/(?P<course_id>[^/]+)/(?P<part>[^/?#&]+)\.html'
+    _VALID_URL = r'''(?x)
+                        https?://
+                            (?:www\.)?(?:safaribooksonline|(?:learning\.)?oreilly)\.com/
+                            (?:
+                                library/view/[^/]+/(?P<course_id>[^/]+)/(?P<part>[^/?\#&]+)\.html|
+                                videos/[^/]+/[^/]+/(?P<reference_id>[^-]+-[^/?\#&]+)
+                            )
+                    '''
 
     _TESTS = [{
         'url': 'https://www.safaribooksonline.com/library/view/hadoop-fundamentals-livelessons/9780133392838/part00.html',
@@ -94,22 +113,47 @@ class SafariIE(SafariBaseIE):
     }, {
         'url': 'https://www.safaribooksonline.com/library/view/learning-path-red/9780134664057/RHCE_Introduction.html',
         'only_matching': True,
+    }, {
+        'url': 'https://www.safaribooksonline.com/videos/python-programming-language/9780134217314/9780134217314-PYMC_13_00',
+        'only_matching': True,
+    }, {
+        'url': 'https://learning.oreilly.com/videos/hadoop-fundamentals-livelessons/9780133392838/9780133392838-00_SeriesIntro',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.oreilly.com/library/view/hadoop-fundamentals-livelessons/9780133392838/00_SeriesIntro.html',
+        'only_matching': True,
     }]
 
+    _PARTNER_ID = '1926081'
+    _UICONF_ID = '29375172'
+
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
-        video_id = '%s/%s' % (mobj.group('course_id'), mobj.group('part'))
-
-        webpage = self._download_webpage(url, video_id)
-        reference_id = self._search_regex(
-            r'data-reference-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
-            webpage, 'kaltura reference id', group='id')
-        partner_id = self._search_regex(
-            r'data-partner-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
-            webpage, 'kaltura widget id', group='id')
-        ui_id = self._search_regex(
-            r'data-ui-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
-            webpage, 'kaltura uiconf id', group='id')
+
+        reference_id = mobj.group('reference_id')
+        if reference_id:
+            video_id = reference_id
+            partner_id = self._PARTNER_ID
+            ui_id = self._UICONF_ID
+        else:
+            video_id = '%s-%s' % (mobj.group('course_id'), mobj.group('part'))
+
+            webpage, urlh = self._download_webpage_handle(url, video_id)
+
+            mobj = re.match(self._VALID_URL, urlh.geturl())
+            reference_id = mobj.group('reference_id')
+            if not reference_id:
+                reference_id = self._search_regex(
+                    r'data-reference-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
+                    webpage, 'kaltura reference id', group='id')
+            partner_id = self._search_regex(
+                r'data-partner-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
+                webpage, 'kaltura widget id', default=self._PARTNER_ID,
+                group='id')
+            ui_id = self._search_regex(
+                r'data-ui-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
+                webpage, 'kaltura uiconf id', default=self._UICONF_ID,
+                group='id')
 
         query = {
             'wid': '_%s' % partner_id,
@@ -134,7 +178,7 @@ class SafariIE(SafariBaseIE):
 
 class SafariApiIE(SafariBaseIE):
     IE_NAME = 'safari:api'
-    _VALID_URL = r'https?://(?:www\.)?safaribooksonline\.com/api/v1/book/(?P<course_id>[^/]+)/chapter(?:-content)?/(?P<part>[^/?#&]+)\.html'
+    _VALID_URL = r'https?://(?:www\.)?(?:safaribooksonline|(?:learning\.)?oreilly)\.com/api/v1/book/(?P<course_id>[^/]+)/chapter(?:-content)?/(?P<part>[^/?#&]+)\.html'
 
     _TESTS = [{
         'url': 'https://www.safaribooksonline.com/api/v1/book/9780133392838/chapter/part00.html',
@@ -159,10 +203,15 @@ class SafariCourseIE(SafariBaseIE):
     _VALID_URL = r'''(?x)
                     https?://
                         (?:
-                            (?:www\.)?safaribooksonline\.com/(?:library/view/[^/]+|api/v1/book)|
+                            (?:www\.)?(?:safaribooksonline|(?:learning\.)?oreilly)\.com/
+                            (?:
+                                library/view/[^/]+|
+                                api/v1/book|
+                                videos/[^/]+
+                            )|
                             techbus\.safaribooksonline\.com
                         )
-                        /(?P<id>[^/]+)/?(?:[#?]|$)
+                        /(?P<id>[^/]+)
                     '''
 
     _TESTS = [{
@@ -179,8 +228,22 @@ class SafariCourseIE(SafariBaseIE):
     }, {
         'url': 'http://techbus.safaribooksonline.com/9780134426365',
         'only_matching': True,
+    }, {
+        'url': 'https://www.safaribooksonline.com/videos/python-programming-language/9780134217314',
+        'only_matching': True,
+    }, {
+        'url': 'https://learning.oreilly.com/videos/hadoop-fundamentals-livelessons/9780133392838',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.oreilly.com/library/view/hadoop-fundamentals-livelessons/9780133392838/',
+        'only_matching': True,
     }]
 
+    @classmethod
+    def suitable(cls, url):
+        return (False if SafariIE.suitable(url) or SafariApiIE.suitable(url)
+                else super(SafariCourseIE, cls).suitable(url))
+
     def _real_extract(self, url):
         course_id = self._match_id(url)