]> Raphaël G. Git Repositories - youtubedl/blobdiff - youtube_dl/extractor/googleplus.py
debian/control: Mark compliance with policy 4.1.1. No further changes needed.
[youtubedl] / youtube_dl / extractor / googleplus.py
index 2570746b2047a1d1ae0a60b48970b1414f168e40..6b927bb4477da8ff3f1f5635a6bf5d3a5a590984 100644 (file)
@@ -1,98 +1,73 @@
 # coding: utf-8
+from __future__ import unicode_literals
 
-import datetime
 import re
+import codecs
 
 from .common import InfoExtractor
-from ..utils import (
-    ExtractorError,
-)
+from ..utils import unified_strdate
 
 
 class GooglePlusIE(InfoExtractor):
-    IE_DESC = u'Google Plus'
-    _VALID_URL = r'(?:https://)?plus\.google\.com/(?:[^/]+/)*?posts/(\w+)'
-    IE_NAME = u'plus.google'
+    IE_DESC = 'Google Plus'
+    _VALID_URL = r'https?://plus\.google\.com/(?:[^/]+/)*?posts/(?P<id>\w+)'
+    IE_NAME = 'plus.google'
     _TEST = {
-        u"url": u"https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH",
-        u"file": u"ZButuJc6CtH.flv",
-        u"info_dict": {
-            u"upload_date": u"20120613",
-            u"uploader": u"井上ヨシマサ",
-            u"title": u"嘆きの天使 降臨"
+        'url': 'https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH',
+        'info_dict': {
+            'id': 'ZButuJc6CtH',
+            'ext': 'flv',
+            'title': '嘆きの天使 降臨',
+            'upload_date': '20120613',
+            'uploader': '井上ヨシマサ',
         }
     }
 
     def _real_extract(self, url):
-        # Extract id from URL
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        post_url = mobj.group(0)
-        video_id = mobj.group(1)
-
-        video_extension = 'flv'
+        video_id = self._match_id(url)
 
         # Step 1, Retrieve post webpage to extract further information
-        webpage = self._download_webpage(post_url, video_id, u'Downloading entry webpage')
-
-        self.report_extraction(video_id)
+        webpage = self._download_webpage(url, video_id, 'Downloading entry webpage')
 
-        # Extract update date
-        upload_date = self._html_search_regex(
+        title = self._og_search_description(webpage).splitlines()[0]
+        upload_date = unified_strdate(self._html_search_regex(
             r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
                     ([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
-            webpage, u'upload date', fatal=False, flags=re.VERBOSE)
-        if upload_date:
-            # Convert timestring to a format suitable for filename
-            upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
-            upload_date = upload_date.strftime('%Y%m%d')
-
-        # Extract uploader
-        uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
-            webpage, u'uploader', fatal=False)
-
-        # Extract title
-        # Get the first line for title
-        video_title = self._html_search_regex(r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]',
-            webpage, 'title', default=u'NA')
+            webpage, 'upload date', fatal=False, flags=re.VERBOSE))
+        uploader = self._html_search_regex(
+            r'rel="author".*?>(.*?)</a>', webpage, 'uploader', fatal=False)
 
         # Step 2, Simulate clicking the image box to launch video
         DOMAIN = 'https://plus.google.com/'
-        video_page = self._search_regex(r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
-            webpage, u'video page URL')
+        video_page = self._search_regex(
+            r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
+            webpage, 'video page URL')
         if not video_page.startswith(DOMAIN):
             video_page = DOMAIN + video_page
 
-        webpage = self._download_webpage(video_page, video_id, u'Downloading video page')
-
-        # Extract video links on video page
-        """Extract video links of all sizes"""
-        pattern = r'\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
-        mobj = re.findall(pattern, webpage)
-        if len(mobj) == 0:
-            raise ExtractorError(u'Unable to extract video links')
-
-        # Sort in resolution
-        links = sorted(mobj)
-
-        # Choose the lowest of the sort, i.e. highest resolution
-        video_url = links[-1]
-        # Only get the url. The resolution part in the tuple has no use anymore
-        video_url = video_url[-1]
-        # Treat escaped \u0026 style hex
-        try:
-            video_url = video_url.decode("unicode_escape")
-        except AttributeError: # Python 3
-            video_url = bytes(video_url, 'ascii').decode('unicode-escape')
-
-
-        return [{
-            'id':       video_id,
-            'url':      video_url,
+        webpage = self._download_webpage(video_page, video_id, 'Downloading video page')
+
+        def unicode_escape(s):
+            decoder = codecs.getdecoder('unicode_escape')
+            return re.sub(
+                r'\\u[0-9a-fA-F]{4,}',
+                lambda m: decoder(m.group(0))[0],
+                s)
+
+        # Extract video links all sizes
+        formats = [{
+            'url': unicode_escape(video_url),
+            'ext': 'flv',
+            'width': int(width),
+            'height': int(height),
+        } for width, height, video_url in re.findall(
+            r'\d+,(\d+),(\d+),"(https?://[^.]+\.googleusercontent\.com.*?)"', webpage)]
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
             'uploader': uploader,
-            'upload_date':  upload_date,
-            'title':    video_title,
-            'ext':      video_extension,
-        }]
+            'upload_date': upload_date,
+            'formats': formats,
+        }