+# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import base64
+import binascii
import re
+import json
from .common import InfoExtractor
-from ..utils import qualities
+from ..utils import (
+ ExtractorError,
+ qualities,
+)
+from ..compat import compat_ord
class TeamcocoIE(InfoExtractor):
'ext': 'mp4',
'title': 'Conan Becomes A Mary Kay Beauty Consultant',
'description': 'Mary Kay is perhaps the most trusted name in female beauty, so of course Conan is a natural choice to sell their products.',
+ 'duration': 504,
'age_limit': 0,
}
}, {
'ext': 'mp4',
'description': 'Louis C.K. got starstruck by George W. Bush, so what? Part one.',
'title': 'Louis C.K. Interview Pt. 1 11/3/11',
+ 'duration': 288,
'age_limit': 0,
}
+ }, {
+ 'url': 'http://teamcoco.com/video/timothy-olyphant-drinking-whiskey',
+ 'info_dict': {
+ 'id': '88748',
+ 'ext': 'mp4',
+ 'title': 'Timothy Olyphant Raises A Toast To “Justified”',
+ 'description': 'md5:15501f23f020e793aeca761205e42c24',
+ },
+ 'params': {
+ 'skip_download': True, # m3u8 downloads
+ }
}
]
_VIDEO_ID_REGEXES = (
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id')
- webpage = self._download_webpage(url, display_id)
+ webpage, urlh = self._download_webpage_handle(url, display_id)
+ if 'src=expired' in urlh.geturl():
+ raise ExtractorError('This video is expired.', expected=True)
video_id = mobj.group('video_id')
if not video_id:
video_id = self._html_search_regex(
self._VIDEO_ID_REGEXES, webpage, 'video id')
- embed_url = 'http://teamcoco.com/embed/v/%s' % video_id
- embed = self._download_webpage(
- embed_url, video_id, 'Downloading embed page')
+ data = None
+
+ preload_codes = self._html_search_regex(
+ r'(function.+)setTimeout\(function\(\)\{playlist',
+ webpage, 'preload codes')
+ base64_fragments = re.findall(r'"([a-zA-z0-9+/=]+)"', preload_codes)
+ base64_fragments.remove('init')
- encoded_data = self._search_regex(
- r'"preload"\s*:\s*"([^"]+)"', embed, 'encoded data')
- data = self._parse_json(
- base64.b64decode(encoded_data.encode('ascii')).decode('utf-8'), video_id)
+ def _check_sequence(cur_fragments):
+ if not cur_fragments:
+ return
+ for i in range(len(cur_fragments)):
+ cur_sequence = (''.join(cur_fragments[i:] + cur_fragments[:i])).encode('ascii')
+ try:
+ raw_data = base64.b64decode(cur_sequence)
+ if compat_ord(raw_data[0]) == compat_ord('{'):
+ return json.loads(raw_data.decode('utf-8'))
+ except (TypeError, binascii.Error, UnicodeDecodeError, ValueError):
+ continue
+
+ def _check_data():
+ for i in range(len(base64_fragments) + 1):
+ for j in range(i, len(base64_fragments) + 1):
+ data = _check_sequence(base64_fragments[:i] + base64_fragments[j:])
+ if data:
+ return data
+
+ self.to_screen('Try to compute possible data sequence. This may take some time.')
+ data = _check_data()
+
+ if not data:
+ raise ExtractorError(
+ 'Preload information could not be extracted', expected=True)
formats = []
get_quality = qualities(['500k', '480p', '1000k', '720p', '1080p'])
for filed in data['files']:
- m_format = re.search(r'(\d+(k|p))\.mp4', filed['url'])
- if m_format is not None:
- format_id = m_format.group(1)
+ if filed['type'] == 'hls':
+ formats.extend(self._extract_m3u8_formats(
+ filed['url'], video_id, ext='mp4'))
else:
- format_id = filed['bitrate']
- tbr = (
- int(filed['bitrate'])
- if filed['bitrate'].isdigit()
- else None)
-
- formats.append({
- 'url': filed['url'],
- 'ext': 'mp4',
- 'tbr': tbr,
- 'format_id': format_id,
- 'quality': get_quality(format_id),
- })
+ m_format = re.search(r'(\d+(k|p))\.mp4', filed['url'])
+ if m_format is not None:
+ format_id = m_format.group(1)
+ else:
+ format_id = filed['bitrate']
+ tbr = (
+ int(filed['bitrate'])
+ if filed['bitrate'].isdigit()
+ else None)
+
+ formats.append({
+ 'url': filed['url'],
+ 'ext': 'mp4',
+ 'tbr': tbr,
+ 'format_id': format_id,
+ 'quality': get_quality(format_id),
+ })
self._sort_formats(formats)
'title': data['title'],
'thumbnail': data.get('thumb', {}).get('href'),
'description': data.get('teaser'),
+ 'duration': data.get('duration'),
'age_limit': self._family_friendly_search(webpage),
}