]> Raphaël G. Git Repositories - youtubedl/blobdiff - youtube_dl/extractor/googleplus.py
Update upstream source from tag 'upstream/2020.03.24'
[youtubedl] / youtube_dl / extractor / googleplus.py
index 07d994b448040fb80912593b9cdae4ac66e63bbb..6b927bb4477da8ff3f1f5635a6bf5d3a5a590984 100644 (file)
@@ -1,92 +1,73 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
 # coding: utf-8
 from __future__ import unicode_literals
 
-import datetime
 import re
 import re
+import codecs
 
 from .common import InfoExtractor
 
 from .common import InfoExtractor
-from ..utils import (
-    ExtractorError,
-)
+from ..utils import unified_strdate
 
 
 class GooglePlusIE(InfoExtractor):
     IE_DESC = 'Google Plus'
 
 
 class GooglePlusIE(InfoExtractor):
     IE_DESC = 'Google Plus'
-    _VALID_URL = r'https://plus\.google\.com/(?:[^/]+/)*?posts/(?P<id>\w+)'
+    _VALID_URL = r'https?://plus\.google\.com/(?:[^/]+/)*?posts/(?P<id>\w+)'
     IE_NAME = 'plus.google'
     _TEST = {
         'url': 'https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH',
         'info_dict': {
             'id': 'ZButuJc6CtH',
             'ext': 'flv',
     IE_NAME = 'plus.google'
     _TEST = {
         'url': 'https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH',
         'info_dict': {
             'id': 'ZButuJc6CtH',
             'ext': 'flv',
+            'title': '嘆きの天使 降臨',
             'upload_date': '20120613',
             'uploader': '井上ヨシマサ',
             'upload_date': '20120613',
             'uploader': '井上ヨシマサ',
-            'title': '嘆きの天使 降臨',
         }
     }
 
     def _real_extract(self, url):
         }
     }
 
     def _real_extract(self, url):
-        # Extract id from URL
-        mobj = re.match(self._VALID_URL, url)
-
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
 
         # Step 1, Retrieve post webpage to extract further information
         webpage = self._download_webpage(url, video_id, 'Downloading entry webpage')
 
 
         # Step 1, Retrieve post webpage to extract further information
         webpage = self._download_webpage(url, video_id, 'Downloading entry webpage')
 
-        self.report_extraction(video_id)
-
-        # Extract update date
-        upload_date = self._html_search_regex(
+        title = self._og_search_description(webpage).splitlines()[0]
+        upload_date = unified_strdate(self._html_search_regex(
             r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
                     ([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
             r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
                     ([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
-            webpage, 'upload date', fatal=False, flags=re.VERBOSE)
-        if upload_date:
-            # Convert timestring to a format suitable for filename
-            upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
-            upload_date = upload_date.strftime('%Y%m%d')
-
-        # Extract uploader
-        uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
-            webpage, 'uploader', fatal=False)
-
-        # Extract title
-        # Get the first line for title
-        video_title = self._og_search_description(webpage).splitlines()[0]
+            webpage, 'upload date', fatal=False, flags=re.VERBOSE))
+        uploader = self._html_search_regex(
+            r'rel="author".*?>(.*?)</a>', webpage, 'uploader', fatal=False)
 
         # Step 2, Simulate clicking the image box to launch video
         DOMAIN = 'https://plus.google.com/'
 
         # Step 2, Simulate clicking the image box to launch video
         DOMAIN = 'https://plus.google.com/'
-        video_page = self._search_regex(r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
+        video_page = self._search_regex(
+            r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
             webpage, 'video page URL')
         if not video_page.startswith(DOMAIN):
             video_page = DOMAIN + video_page
 
         webpage = self._download_webpage(video_page, video_id, 'Downloading video page')
 
             webpage, 'video page URL')
         if not video_page.startswith(DOMAIN):
             video_page = DOMAIN + video_page
 
         webpage = self._download_webpage(video_page, video_id, 'Downloading video page')
 
-        # Extract video links all sizes
-        pattern = r'\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
-        mobj = re.findall(pattern, webpage)
-        if len(mobj) == 0:
-            raise ExtractorError('Unable to extract video links')
-
-        # Sort in resolution
-        links = sorted(mobj)
+        def unicode_escape(s):
+            decoder = codecs.getdecoder('unicode_escape')
+            return re.sub(
+                r'\\u[0-9a-fA-F]{4,}',
+                lambda m: decoder(m.group(0))[0],
+                s)
 
 
-        # Choose the lowest of the sort, i.e. highest resolution
-        video_url = links[-1]
-        # Only get the url. The resolution part in the tuple has no use anymore
-        video_url = video_url[-1]
-        # Treat escaped \u0026 style hex
-        try:
-            video_url = video_url.decode("unicode_escape")
-        except AttributeError: # Python 3
-            video_url = bytes(video_url, 'ascii').decode('unicode-escape')
+        # Extract video links all sizes
+        formats = [{
+            'url': unicode_escape(video_url),
+            'ext': 'flv',
+            'width': int(width),
+            'height': int(height),
+        } for width, height, video_url in re.findall(
+            r'\d+,(\d+),(\d+),"(https?://[^.]+\.googleusercontent\.com.*?)"', webpage)]
+        self._sort_formats(formats)
 
         return {
             'id': video_id,
 
         return {
             'id': video_id,
-            'url': video_url,
+            'title': title,
             'uploader': uploader,
             'upload_date': upload_date,
             'uploader': uploader,
             'upload_date': upload_date,
-            'title': video_title,
-            'ext': 'flv',
+            'formats': formats,
         }
         }