]>
Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/turner.py
2 from __future__
import unicode_literals
6 from .adobepass
import AdobePassIE
7 from ..compat
import compat_str
21 class TurnerBaseIE(AdobePassIE
):
22 _AKAMAI_SPE_TOKEN_CACHE
= {}
24 def _extract_timestamp(self
, video_data
):
25 return int_or_none(xpath_attr(video_data
, 'dateCreated', 'uts'))
27 def _add_akamai_spe_token(self
, tokenizer_src
, video_url
, content_id
, ap_data
, custom_tokenizer_query
=None):
28 secure_path
= self
._search
_regex
(r
'https?://[^/]+(.+/)', video_url
, 'secure path') + '*'
29 token
= self
._AKAMAI
_SPE
_TOKEN
_CACHE
.get(secure_path
)
34 if custom_tokenizer_query
:
35 query
.update(custom_tokenizer_query
)
37 query
['videoId'] = content_id
38 if ap_data
.get('auth_required'):
39 query
['accessToken'] = self
._extract
_mvpd
_auth
(ap_data
['url'], content_id
, ap_data
['site_name'], ap_data
['site_name'])
40 auth
= self
._download
_xml
(
41 tokenizer_src
, content_id
, query
=query
)
42 error_msg
= xpath_text(auth
, 'error/msg')
44 raise ExtractorError(error_msg
, expected
=True)
45 token
= xpath_text(auth
, 'token')
48 self
._AKAMAI
_SPE
_TOKEN
_CACHE
[secure_path
] = token
49 return video_url
+ '?hdnea=' + token
51 def _extract_cvp_info(self
, data_src
, video_id
, path_data
={}, ap_data
={}):
52 video_data
= self
._download
_xml
(data_src
, video_id
)
53 video_id
= video_data
.attrib
['id']
54 title
= xpath_text(video_data
, 'headline', fatal
=True)
55 content_id
= xpath_text(video_data
, 'contentId') or video_id
56 # rtmp_src = xpath_text(video_data, 'akamai/src')
58 # splited_rtmp_src = rtmp_src.split(',')
59 # if len(splited_rtmp_src) == 2:
60 # rtmp_src = splited_rtmp_src[1]
61 # aifp = xpath_text(video_data, 'akamai/aifp', default='')
66 r
'(?P<width>[0-9]+)x(?P<height>[0-9]+)(?:_(?P<bitrate>[0-9]+))?')
67 # Possible formats locations: files/file, files/groupFiles/files
69 for video_file
in video_data
.findall('.//file'):
70 video_url
= video_file
.text
.strip()
73 ext
= determine_ext(video_url
)
74 if video_url
.startswith('/mp4:protected/'):
76 # TODO Correct extraction for these files
77 # protected_path_data = path_data.get('protected')
78 # if not protected_path_data or not rtmp_src:
80 # protected_path = self._search_regex(
81 # r'/mp4:(.+)\.[a-z0-9]', video_url, 'secure path')
82 # auth = self._download_webpage(
83 # protected_path_data['tokenizer_src'], query={
84 # 'path': protected_path,
85 # 'videoId': content_id,
88 # token = xpath_text(auth, 'token')
91 # video_url = rtmp_src + video_url + '?' + token
92 elif video_url
.startswith('/secure/'):
93 secure_path_data
= path_data
.get('secure')
94 if not secure_path_data
:
96 video_url
= self
._add
_akamai
_spe
_token
(
97 secure_path_data
['tokenizer_src'],
98 secure_path_data
['media_src'] + video_url
,
100 elif not re
.match('https?://', video_url
):
101 base_path_data
= path_data
.get(ext
, path_data
.get('default', {}))
102 media_src
= base_path_data
.get('media_src')
105 video_url
= media_src
+ video_url
106 if video_url
in urls
:
108 urls
.append(video_url
)
109 format_id
= video_file
.get('bitrate')
111 formats
.extend(self
._extract
_smil
_formats
(
112 video_url
, video_id
, fatal
=False))
114 m3u8_formats
= self
._extract
_m
3u8_formats
(
115 video_url
, video_id
, 'mp4',
116 m3u8_id
=format_id
or 'hls', fatal
=False)
117 if '/secure/' in video_url
and '?hdnea=' in video_url
:
118 for f
in m3u8_formats
:
119 f
['_seekable'] = False
120 formats
.extend(m3u8_formats
)
122 formats
.extend(self
._extract
_f
4m
_formats
(
123 update_url_query(video_url
, {'hdcore': '3.7.0'}),
124 video_id
, f4m_id
=format_id
or 'hds', fatal
=False))
127 'format_id': format_id
,
131 mobj
= rex
.search(format_id
+ video_url
)
134 'width': int(mobj
.group('width')),
135 'height': int(mobj
.group('height')),
136 'tbr': int_or_none(mobj
.group('bitrate')),
138 elif isinstance(format_id
, compat_str
):
139 if format_id
.isdigit():
140 f
['tbr'] = int(format_id
)
142 mobj
= re
.match(r
'ios_(audio|[0-9]+)$', format_id
)
144 if mobj
.group(1) == 'audio':
150 f
['tbr'] = int(mobj
.group(1))
152 self
._sort
_formats
(formats
)
155 for source
in video_data
.findall('closedCaptions/source'):
156 for track
in source
.findall('track'):
157 track_url
= track
.get('url')
158 if not isinstance(track_url
, compat_str
) or track_url
.endswith('/big'):
160 lang
= track
.get('lang') or track
.get('label') or 'en'
161 subtitles
.setdefault(lang
, []).append({
167 }.get(source
.get('format'))
171 'id': image
.get('cut'),
173 'width': int_or_none(image
.get('width')),
174 'height': int_or_none(image
.get('height')),
175 } for image
in video_data
.findall('images/image')]
177 is_live
= xpath_text(video_data
, 'isLive') == 'true'
181 'title': self
._live
_title
(title
) if is_live
else title
,
183 'subtitles': subtitles
,
184 'thumbnails': thumbnails
,
185 'thumbnail': xpath_text(video_data
, 'poster'),
186 'description': strip_or_none(xpath_text(video_data
, 'description')),
187 'duration': parse_duration(xpath_text(video_data
, 'length') or xpath_text(video_data
, 'trt')),
188 'timestamp': self
._extract
_timestamp
(video_data
),
189 'upload_date': xpath_attr(video_data
, 'metas', 'version'),
190 'series': xpath_text(video_data
, 'showTitle'),
191 'season_number': int_or_none(xpath_text(video_data
, 'seasonNumber')),
192 'episode_number': int_or_none(xpath_text(video_data
, 'episodeNumber')),
196 def _extract_ngtv_info(self
, media_id
, tokenizer_query
, ap_data
=None):
197 streams_data
= self
._download
_json
(
198 'http://medium.ngtv.io/media/%s/tv' % media_id
,
199 media_id
)['media']['tv']
203 for supported_type
in ('unprotected', 'bulkaes'):
204 stream_data
= streams_data
.get(supported_type
, {})
205 m3u8_url
= stream_data
.get('secureUrl') or stream_data
.get('url')
208 if stream_data
.get('playlistProtection') == 'spe':
209 m3u8_url
= self
._add
_akamai
_spe
_token
(
210 'http://token.ngtv.io/token/token_spe',
211 m3u8_url
, media_id
, ap_data
or {}, tokenizer_query
)
212 formats
.extend(self
._extract
_m
3u8_formats
(
213 m3u8_url
, media_id
, 'mp4', m3u8_id
='hls', fatal
=False))
215 duration
= float_or_none(stream_data
.get('totalRuntime'))
218 for chapter
in stream_data
.get('contentSegments', []):
219 start_time
= float_or_none(chapter
.get('start'))
220 chapter_duration
= float_or_none(chapter
.get('duration'))
221 if start_time
is None or chapter_duration
is None:
224 'start_time': start_time
,
225 'end_time': start_time
+ chapter_duration
,
227 self
._sort
_formats
(formats
)
231 'chapters': chapters
,
232 'duration': duration
,