# coding: utf-8
from __future__ import unicode_literals
-import datetime
import re
+import codecs
from .common import InfoExtractor
-from ..utils import (
- ExtractorError,
-)
+from ..utils import unified_strdate
class GooglePlusIE(InfoExtractor):
'info_dict': {
'id': 'ZButuJc6CtH',
'ext': 'flv',
+ 'title': '嘆きの天使 降臨',
'upload_date': '20120613',
'uploader': '井上ヨシマサ',
- 'title': '嘆きの天使 降臨',
}
}
def _real_extract(self, url):
- # Extract id from URL
- mobj = re.match(self._VALID_URL, url)
-
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
# Step 1, Retrieve post webpage to extract further information
webpage = self._download_webpage(url, video_id, 'Downloading entry webpage')
- self.report_extraction(video_id)
-
- # Extract update date
- upload_date = self._html_search_regex(
+ title = self._og_search_description(webpage).splitlines()[0]
+ upload_date = unified_strdate(self._html_search_regex(
r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
- webpage, 'upload date', fatal=False, flags=re.VERBOSE)
- if upload_date:
- # Convert timestring to a format suitable for filename
- upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
- upload_date = upload_date.strftime('%Y%m%d')
-
- # Extract uploader
- uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
- webpage, 'uploader', fatal=False)
-
- # Extract title
- # Get the first line for title
- video_title = self._html_search_regex(r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]',
- webpage, 'title', default='NA')
+ webpage, 'upload date', fatal=False, flags=re.VERBOSE))
+ uploader = self._html_search_regex(
+ r'rel="author".*?>(.*?)</a>', webpage, 'uploader', fatal=False)
# Step 2, Simulate clicking the image box to launch video
DOMAIN = 'https://plus.google.com/'
- video_page = self._search_regex(r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
+ video_page = self._search_regex(
+ r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
webpage, 'video page URL')
if not video_page.startswith(DOMAIN):
video_page = DOMAIN + video_page
webpage = self._download_webpage(video_page, video_id, 'Downloading video page')
- # Extract video links all sizes
- pattern = r'\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
- mobj = re.findall(pattern, webpage)
- if len(mobj) == 0:
- raise ExtractorError('Unable to extract video links')
-
- # Sort in resolution
- links = sorted(mobj)
+ def unicode_escape(s):
+ decoder = codecs.getdecoder('unicode_escape')
+ return re.sub(
+ r'\\u[0-9a-fA-F]{4,}',
+ lambda m: decoder(m.group(0))[0],
+ s)
- # Choose the lowest of the sort, i.e. highest resolution
- video_url = links[-1]
- # Only get the url. The resolution part in the tuple has no use anymore
- video_url = video_url[-1]
- # Treat escaped \u0026 style hex
- try:
- video_url = video_url.decode("unicode_escape")
- except AttributeError: # Python 3
- video_url = bytes(video_url, 'ascii').decode('unicode-escape')
+ # Extract video links all sizes
+ formats = [{
+ 'url': unicode_escape(video_url),
+ 'ext': 'flv',
+ 'width': int(width),
+ 'height': int(height),
+ } for width, height, video_url in re.findall(
+ r'\d+,(\d+),(\d+),"(https?://[^.]+\.googleusercontent.com.*?)"', webpage)]
+ self._sort_formats(formats)
return {
'id': video_id,
- 'url': video_url,
+ 'title': title,
'uploader': uploader,
'upload_date': upload_date,
- 'title': video_title,
- 'ext': 'flv',
+ 'formats': formats,
}