]> Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/turner.py
d/p/remove-autoupdate-mechanism.patch: Update patch with metadata at the top.
[youtubedl] / youtube_dl / extractor / turner.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5
6 from .adobepass import AdobePassIE
7 from ..compat import compat_str
8 from ..utils import (
9 xpath_text,
10 int_or_none,
11 determine_ext,
12 parse_duration,
13 xpath_attr,
14 update_url_query,
15 ExtractorError,
16 strip_or_none,
17 )
18
19
20 class TurnerBaseIE(AdobePassIE):
21 _AKAMAI_SPE_TOKEN_CACHE = {}
22
23 def _extract_timestamp(self, video_data):
24 return int_or_none(xpath_attr(video_data, 'dateCreated', 'uts'))
25
26 def _add_akamai_spe_token(self, tokenizer_src, video_url, content_id, ap_data):
27 secure_path = self._search_regex(r'https?://[^/]+(.+/)', video_url, 'secure path') + '*'
28 token = self._AKAMAI_SPE_TOKEN_CACHE.get(secure_path)
29 if not token:
30 query = {
31 'path': secure_path,
32 'videoId': content_id,
33 }
34 if ap_data.get('auth_required'):
35 query['accessToken'] = self._extract_mvpd_auth(ap_data['url'], content_id, ap_data['site_name'], ap_data['site_name'])
36 auth = self._download_xml(
37 tokenizer_src, content_id, query=query)
38 error_msg = xpath_text(auth, 'error/msg')
39 if error_msg:
40 raise ExtractorError(error_msg, expected=True)
41 token = xpath_text(auth, 'token')
42 if not token:
43 return video_url
44 self._AKAMAI_SPE_TOKEN_CACHE[secure_path] = token
45 return video_url + '?hdnea=' + token
46
47 def _extract_cvp_info(self, data_src, video_id, path_data={}, ap_data={}):
48 video_data = self._download_xml(data_src, video_id)
49 video_id = video_data.attrib['id']
50 title = xpath_text(video_data, 'headline', fatal=True)
51 content_id = xpath_text(video_data, 'contentId') or video_id
52 # rtmp_src = xpath_text(video_data, 'akamai/src')
53 # if rtmp_src:
54 # splited_rtmp_src = rtmp_src.split(',')
55 # if len(splited_rtmp_src) == 2:
56 # rtmp_src = splited_rtmp_src[1]
57 # aifp = xpath_text(video_data, 'akamai/aifp', default='')
58
59 urls = []
60 formats = []
61 rex = re.compile(
62 r'(?P<width>[0-9]+)x(?P<height>[0-9]+)(?:_(?P<bitrate>[0-9]+))?')
63 # Possible formats locations: files/file, files/groupFiles/files
64 # and maybe others
65 for video_file in video_data.findall('.//file'):
66 video_url = video_file.text.strip()
67 if not video_url:
68 continue
69 ext = determine_ext(video_url)
70 if video_url.startswith('/mp4:protected/'):
71 continue
72 # TODO Correct extraction for these files
73 # protected_path_data = path_data.get('protected')
74 # if not protected_path_data or not rtmp_src:
75 # continue
76 # protected_path = self._search_regex(
77 # r'/mp4:(.+)\.[a-z0-9]', video_url, 'secure path')
78 # auth = self._download_webpage(
79 # protected_path_data['tokenizer_src'], query={
80 # 'path': protected_path,
81 # 'videoId': content_id,
82 # 'aifp': aifp,
83 # })
84 # token = xpath_text(auth, 'token')
85 # if not token:
86 # continue
87 # video_url = rtmp_src + video_url + '?' + token
88 elif video_url.startswith('/secure/'):
89 secure_path_data = path_data.get('secure')
90 if not secure_path_data:
91 continue
92 video_url = self._add_akamai_spe_token(
93 secure_path_data['tokenizer_src'],
94 secure_path_data['media_src'] + video_url,
95 content_id, ap_data)
96 elif not re.match('https?://', video_url):
97 base_path_data = path_data.get(ext, path_data.get('default', {}))
98 media_src = base_path_data.get('media_src')
99 if not media_src:
100 continue
101 video_url = media_src + video_url
102 if video_url in urls:
103 continue
104 urls.append(video_url)
105 format_id = video_file.get('bitrate')
106 if ext == 'smil':
107 formats.extend(self._extract_smil_formats(
108 video_url, video_id, fatal=False))
109 elif ext == 'm3u8':
110 m3u8_formats = self._extract_m3u8_formats(
111 video_url, video_id, 'mp4',
112 m3u8_id=format_id or 'hls', fatal=False)
113 if '/secure/' in video_url and '?hdnea=' in video_url:
114 for f in m3u8_formats:
115 f['_seekable'] = False
116 formats.extend(m3u8_formats)
117 elif ext == 'f4m':
118 formats.extend(self._extract_f4m_formats(
119 update_url_query(video_url, {'hdcore': '3.7.0'}),
120 video_id, f4m_id=format_id or 'hds', fatal=False))
121 else:
122 f = {
123 'format_id': format_id,
124 'url': video_url,
125 'ext': ext,
126 }
127 mobj = rex.search(format_id + video_url)
128 if mobj:
129 f.update({
130 'width': int(mobj.group('width')),
131 'height': int(mobj.group('height')),
132 'tbr': int_or_none(mobj.group('bitrate')),
133 })
134 elif isinstance(format_id, compat_str):
135 if format_id.isdigit():
136 f['tbr'] = int(format_id)
137 else:
138 mobj = re.match(r'ios_(audio|[0-9]+)$', format_id)
139 if mobj:
140 if mobj.group(1) == 'audio':
141 f.update({
142 'vcodec': 'none',
143 'ext': 'm4a',
144 })
145 else:
146 f['tbr'] = int(mobj.group(1))
147 formats.append(f)
148 self._sort_formats(formats)
149
150 subtitles = {}
151 for source in video_data.findall('closedCaptions/source'):
152 for track in source.findall('track'):
153 track_url = track.get('url')
154 if not isinstance(track_url, compat_str) or track_url.endswith('/big'):
155 continue
156 lang = track.get('lang') or track.get('label') or 'en'
157 subtitles.setdefault(lang, []).append({
158 'url': track_url,
159 'ext': {
160 'scc': 'scc',
161 'webvtt': 'vtt',
162 'smptett': 'tt',
163 }.get(source.get('format'))
164 })
165
166 thumbnails = [{
167 'id': image.get('cut'),
168 'url': image.text,
169 'width': int_or_none(image.get('width')),
170 'height': int_or_none(image.get('height')),
171 } for image in video_data.findall('images/image')]
172
173 is_live = xpath_text(video_data, 'isLive') == 'true'
174
175 return {
176 'id': video_id,
177 'title': self._live_title(title) if is_live else title,
178 'formats': formats,
179 'subtitles': subtitles,
180 'thumbnails': thumbnails,
181 'thumbnail': xpath_text(video_data, 'poster'),
182 'description': strip_or_none(xpath_text(video_data, 'description')),
183 'duration': parse_duration(xpath_text(video_data, 'length') or xpath_text(video_data, 'trt')),
184 'timestamp': self._extract_timestamp(video_data),
185 'upload_date': xpath_attr(video_data, 'metas', 'version'),
186 'series': xpath_text(video_data, 'showTitle'),
187 'season_number': int_or_none(xpath_text(video_data, 'seasonNumber')),
188 'episode_number': int_or_none(xpath_text(video_data, 'episodeNumber')),
189 'is_live': is_live,
190 }