2 from __future__ 
import unicode_literals
 
  11 from .once 
import OnceIE
 
  12 from .adobepass 
import AdobePassIE
 
  13 from ..compat 
import ( 
  15     compat_urllib_parse_urlparse
, 
  30 default_ns 
= 'http://www.w3.org/2005/SMIL21/Language' 
  31 _x 
= lambda p
: xpath_with_ns(p
, {'smil': default_ns
}) 
  34 class ThePlatformBaseIE(OnceIE
): 
  37     def _extract_theplatform_smil(self
, smil_url
, video_id
, note
='Downloading SMIL data'): 
  38         meta 
= self
._download
_xml
( 
  39             smil_url
, video_id
, note
=note
, query
={'format': 'SMIL'}, 
  40             headers
=self
.geo_verification_headers()) 
  41         error_element 
= find_xpath_attr(meta
, _x('.//smil:ref'), 'src') 
  42         if error_element 
is not None: 
  43             exception 
= find_xpath_attr( 
  44                 error_element
, _x('.//smil:param'), 'name', 'exception') 
  45             if exception 
is not None: 
  46                 if exception
.get('value') == 'GeoLocationBlocked': 
  47                     self
.raise_geo_restricted(error_element
.attrib
['abstract']) 
  48                 elif error_element
.attrib
['src'].startswith( 
  49                         'http://link.theplatform.%s/s/errorFiles/Unavailable.' 
  52                         error_element
.attrib
['abstract'], expected
=True) 
  54         smil_formats 
= self
._parse
_smil
_formats
( 
  55             meta
, smil_url
, video_id
, namespace
=default_ns
, 
  56             # the parameters are from syfy.com, other sites may use others, 
  57             # they also work for nbc.com 
  58             f4m_params
={'g': 'UXWGVKRWHFSP', 'hdcore': '3.0.3'}, 
  59             transform_rtmp_url
=lambda streamer
, src
: (streamer
, 'mp4:' + src
)) 
  62         for _format 
in smil_formats
: 
  63             if OnceIE
.suitable(_format
['url']): 
  64                 formats
.extend(self
._extract
_once
_formats
(_format
['url'])) 
  66                 media_url 
= _format
['url'] 
  67                 if determine_ext(media_url
) == 'm3u8': 
  68                     hdnea2 
= self
._get
_cookies
(media_url
).get('hdnea2') 
  70                         _format
['url'] = update_url_query(media_url
, {'hdnea3': hdnea2
.value
}) 
  72                 formats
.append(_format
) 
  74         subtitles 
= self
._parse
_smil
_subtitles
(meta
, default_ns
) 
  76         return formats
, subtitles
 
  78     def _download_theplatform_metadata(self
, path
, video_id
): 
  79         info_url 
= 'http://link.theplatform.%s/s/%s?format=preview' % (self
._TP
_TLD
, path
) 
  80         return self
._download
_json
(info_url
, video_id
) 
  82     def _parse_theplatform_metadata(self
, info
): 
  84         captions 
= info
.get('captions') 
  85         if isinstance(captions
, list): 
  86             for caption 
in captions
: 
  87                 lang
, src
, mime 
= caption
.get('lang', 'en'), caption
.get('src'), caption
.get('type') 
  88                 subtitles
.setdefault(lang
, []).append({ 
  89                     'ext': mimetype2ext(mime
), 
  93         duration 
= info
.get('duration') 
  94         tp_chapters 
= info
.get('chapters', []) 
  97             def _add_chapter(start_time
, end_time
): 
  98                 start_time 
= float_or_none(start_time
, 1000) 
  99                 end_time 
= float_or_none(end_time
, 1000) 
 100                 if start_time 
is None or end_time 
is None: 
 103                     'start_time': start_time
, 
 104                     'end_time': end_time
, 
 107             for chapter 
in tp_chapters
[:-1]: 
 108                 _add_chapter(chapter
.get('startTime'), chapter
.get('endTime')) 
 109             _add_chapter(tp_chapters
[-1].get('startTime'), tp_chapters
[-1].get('endTime') or duration
) 
 112             'title': info
['title'], 
 113             'subtitles': subtitles
, 
 114             'description': info
['description'], 
 115             'thumbnail': info
['defaultThumbnailUrl'], 
 116             'duration': float_or_none(duration
, 1000), 
 117             'timestamp': int_or_none(info
.get('pubDate'), 1000) or None, 
 118             'uploader': info
.get('billingCode'), 
 119             'chapters': chapters
, 
 122     def _extract_theplatform_metadata(self
, path
, video_id
): 
 123         info 
= self
._download
_theplatform
_metadata
(path
, video_id
) 
 124         return self
._parse
_theplatform
_metadata
(info
) 
 127 class ThePlatformIE(ThePlatformBaseIE
, AdobePassIE
): 
 128     _VALID_URL 
= r
'''(?x) 
 129         (?:https?://(?:link|player)\.theplatform\.com/[sp]/(?P<provider_id>[^/]+)/ 
 130            (?:(?:(?:[^/]+/)+select/)?(?P<media>media/(?:guid/\d+/)?)?|(?P<config>(?:[^/\?]+/(?:swf|config)|onsite)/select/))? 
 131          |theplatform:)(?P<id>[^/\?&]+)''' 
 134         # from http://www.metacafe.com/watch/cb-e9I_cZgTgIPd/blackberrys_big_bold_z30/ 
 135         'url': 'http://link.theplatform.com/s/dJ5BDC/e9I_cZgTgIPd/meta.smil?format=smil&Tracking=true&mbr=true', 
 137             'id': 'e9I_cZgTgIPd', 
 139             'title': 'Blackberry\'s big, bold Z30', 
 140             'description': 'The Z30 is Blackberry\'s biggest, baddest mobile messaging device yet.', 
 142             'timestamp': 1383239700, 
 143             'upload_date': '20131031', 
 144             'uploader': 'CBSI-NEW', 
 148             'skip_download': True, 
 150         'skip': '404 Not Found', 
 152         # from http://www.cnet.com/videos/tesla-model-s-a-second-step-towards-a-cleaner-motoring-future/ 
 153         'url': 'http://link.theplatform.com/s/kYEXFC/22d_qsQ6MIRT', 
 155             'id': '22d_qsQ6MIRT', 
 157             'description': 'md5:ac330c9258c04f9d7512cf26b9595409', 
 158             'title': 'Tesla Model S: A second step towards a cleaner motoring future', 
 159             'timestamp': 1426176191, 
 160             'upload_date': '20150312', 
 161             'uploader': 'CBSI-NEW', 
 165             'skip_download': True, 
 168         'url': 'https://player.theplatform.com/p/D6x-PC/pulse_preview/embed/select/media/yMBg9E8KFxZD', 
 170             'id': 'yMBg9E8KFxZD', 
 172             'description': 'md5:644ad9188d655b742f942bf2e06b002d', 
 173             'title': 'HIGHLIGHTS: USA bag first ever series Cup win', 
 177         'url': 'http://player.theplatform.com/p/NnzsPC/widget/select/media/4Y0TlYUr_ZT7', 
 178         'only_matching': True, 
 180         'url': 'http://player.theplatform.com/p/2E2eJC/nbcNewsOffsite?guid=tdy_or_siri_150701', 
 181         'md5': 'fb96bb3d85118930a5b055783a3bd992', 
 183             'id': 'tdy_or_siri_150701', 
 185             'title': 'iPhone Siri’s sassy response to a math question has people talking', 
 186             'description': 'md5:a565d1deadd5086f3331d57298ec6333', 
 188             'thumbnail': r
're:^https?://.*\.jpg$', 
 189             'timestamp': 1435752600, 
 190             'upload_date': '20150701', 
 191             'uploader': 'NBCU-NEWS', 
 194         # From http://www.nbc.com/the-blacklist/video/sir-crispin-crandall/2928790?onid=137781#vc137781=1 
 195         # geo-restricted (US), HLS encrypted with AES-128 
 196         'url': 'http://player.theplatform.com/p/NnzsPC/onsite_universal/select/media/guid/2410887629/2928790?fwsitesection=nbc_the_blacklist_video_library&autoPlay=true&carouselID=137781', 
 197         'only_matching': True, 
 201     def _extract_urls(cls
, webpage
): 
 205                         property=(["'])(?:og:video(?::(?:secure_)?url)?|twitter:player)\1\s+ 
 206                         content=(["'])(?P<url>https?://player\.theplatform\.com/p/.+?)\2 
 209             return [m
.group('url')] 
 211         # Are whitesapces ignored in URLs? 
 212         # https://github.com/rg3/youtube-dl/issues/12044 
 213         matches 
= re
.findall( 
 214             r
'(?s)<(?:iframe|script)[^>]+src=(["\'])((?
:https?
:)?
//player\
.theplatform\
.com
/p
/.+?
)\
1', webpage) 
 216             return [re.sub(r'\s
', '', list(zip(*matches))[1][0])] 
 219     def _sign_url(url, sig_key, sig_secret, life=600, include_qs=False): 
 220         flags = '10' if include_qs else '00' 
 221         expiration_date = '%x' % (int(time.time()) + life) 
 224             return binascii.b2a_hex(str.encode('ascii
')).decode('ascii
') 
 226         def hex_to_bytes(hex): 
 227             return binascii.a2b_hex(hex.encode('ascii
')) 
 229         relative_path = re.match(r'https?
://link\
.theplatform\
.com
/s
/([^?
]+)', url).group(1) 
 230         clear_text = hex_to_bytes(flags + expiration_date + str_to_hex(relative_path)) 
 231         checksum = hmac.new(sig_key.encode('ascii
'), clear_text, hashlib.sha1).hexdigest() 
 232         sig = flags + expiration_date + checksum + str_to_hex(sig_secret) 
 233         return '%s&sig
=%s' % (url, sig) 
 235     def _real_extract(self, url): 
 236         url, smuggled_data = unsmuggle_url(url, {}) 
 238         mobj = re.match(self._VALID_URL, url) 
 239         provider_id = mobj.group('provider_id
') 
 240         video_id = mobj.group('id') 
 243             provider_id = 'dJ5BDC
' 
 245         path = provider_id + '/' 
 246         if mobj.group('media
'): 
 247             path += mobj.group('media
') 
 250         qs_dict = compat_parse_qs(compat_urllib_parse_urlparse(url).query) 
 251         if 'guid
' in qs_dict: 
 252             webpage = self._download_webpage(url, video_id) 
 253             scripts = re.findall(r'<script
[^
>]+src
="([^"]+)"', webpage) 
 255             # feed id usually locates in the last script. 
 256             # Seems there's no pattern for the interested script filename, so 
 258             for script in reversed(scripts): 
 259                 feed_script = self._download_webpage( 
 260                     self._proto_relative_url(script, 'http:'), 
 261                     video_id, 'Downloading feed script') 
 262                 feed_id = self._search_regex( 
 263                     r'defaultFeedId\s*:\s*"([^
"]+)"', feed_script, 
 264                     'default feed 
id', default=None) 
 265                 if feed_id is not None: 
 268                 raise ExtractorError('Unable to find feed 
id') 
 269             return self.url_result('http
://feed
.theplatform
.com
/f
/%s/%s?byGuid
=%s' % ( 
 270                 provider_id, feed_id, qs_dict['guid
'][0])) 
 272         if smuggled_data.get('force_smil_url
', False): 
 274         # Explicitly specified SMIL (see https://github.com/rg3/youtube-dl/issues/7385) 
 275         elif '/guid
/' in url: 
 277             source_url = smuggled_data.get('source_url
') 
 279                 headers['Referer
'] = source_url 
 280             request = sanitized_Request(url, headers=headers) 
 281             webpage = self._download_webpage(request, video_id) 
 282             smil_url = self._search_regex( 
 283                 r'<link
[^
>]+href
=(["\'])(?P<url>.+?)\1[^>]+type=["\']application
/smil\
+xml
', 
 284                 webpage, 'smil url
', group='url
') 
 285             path = self._search_regex( 
 286                 r'link\
.theplatform\
.com
/s
/((?
:[^
/?
#&]+/)+[^/?#&]+)', smil_url, 'path') 
 287             smil_url 
+= '?' if '?' not in smil_url 
else '&' + 'formats=m3u,mpeg4' 
 288         elif mobj
.group('config'): 
 289             config_url 
= url 
+ '&form=json' 
 290             config_url 
= config_url
.replace('swf/', 'config/') 
 291             config_url 
= config_url
.replace('onsite/', 'onsite/config/') 
 292             config 
= self
._download
_json
(config_url
, video_id
, 'Downloading config') 
 293             if 'releaseUrl' in config
: 
 294                 release_url 
= config
['releaseUrl'] 
 296                 release_url 
= 'http://link.theplatform.com/s/%s?mbr=true' % path
 
 297             smil_url 
= release_url 
+ '&formats=MPEG4&manifest=f4m' 
 299             smil_url 
= 'http://link.theplatform.com/s/%s?mbr=true' % path
 
 301         sig 
= smuggled_data
.get('sig') 
 303             smil_url 
= self
._sign
_url
(smil_url
, sig
['key'], sig
['secret']) 
 305         formats
, subtitles 
= self
._extract
_theplatform
_smil
(smil_url
, video_id
) 
 306         self
._sort
_formats
(formats
) 
 308         ret 
= self
._extract
_theplatform
_metadata
(path
, video_id
) 
 309         combined_subtitles 
= self
._merge
_subtitles
(ret
.get('subtitles', {}), subtitles
) 
 313             'subtitles': combined_subtitles
, 
 319 class ThePlatformFeedIE(ThePlatformBaseIE
): 
 320     _URL_TEMPLATE 
= '%s//feed.theplatform.com/f/%s/%s?form=json&%s' 
 321     _VALID_URL 
= r
'https?://feed\.theplatform\.com/f/(?P<provider_id>[^/]+)/(?P<feed_id>[^?/]+)\?(?:[^&]+&)*(?P<filter>by(?:Gui|I)d=(?P<id>[^&]+))' 
 323         # From http://player.theplatform.com/p/7wvmTC/MSNBCEmbeddedOffSite?guid=n_hardball_5biden_140207 
 324         'url': 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207', 
 325         'md5': '6e32495b5073ab414471b615c5ded394', 
 327             'id': 'n_hardball_5biden_140207', 
 329             'title': 'The Biden factor: will Joe run in 2016?', 
 330             'description': 'Could Vice President Joe Biden be preparing a 2016 campaign? Mark Halperin and Sam Stein weigh in.', 
 331             'thumbnail': r
're:^https?://.*\.jpg$', 
 332             'upload_date': '20140208', 
 333             'timestamp': 1391824260, 
 335             'categories': ['MSNBC/Issues/Democrats', 'MSNBC/Issues/Elections/Election 2016'], 
 336             'uploader': 'NBCU-NEWS', 
 339         'url': 'http://feed.theplatform.com/f/2E2eJC/nnd_NBCNews?byGuid=nn_netcast_180306.Copy.01', 
 340         'only_matching': True, 
 343     def _extract_feed_info(self
, provider_id
, feed_id
, filter_query
, video_id
, custom_fields
=None, asset_types_query
={}, account_id
=None): 
 344         real_url 
= self
._URL
_TEMPLATE 
% (self
.http_scheme(), provider_id
, feed_id
, filter_query
) 
 345         entry 
= self
._download
_json
(real_url
, video_id
)['entries'][0] 
 346         main_smil_url 
= 'http://link.theplatform.com/s/%s/media/guid/%d/%s' % (provider_id
, account_id
, entry
['guid']) if account_id 
else entry
.get('plmedia$publicUrl') 
 350         first_video_id 
= None 
 353         for item 
in entry
['media$content']: 
 354             smil_url 
= item
['plfile$url'] 
 355             cur_video_id 
= ThePlatformIE
._match
_id
(smil_url
) 
 356             if first_video_id 
is None: 
 357                 first_video_id 
= cur_video_id
 
 358                 duration 
= float_or_none(item
.get('plfile$duration')) 
 359             file_asset_types 
= item
.get('plfile$assetTypes') or compat_parse_qs(compat_urllib_parse_urlparse(smil_url
).query
)['assetTypes'] 
 360             for asset_type 
in file_asset_types
: 
 361                 if asset_type 
in asset_types
: 
 363                 asset_types
.append(asset_type
) 
 366                     'formats': item
['plfile$format'], 
 367                     'assetTypes': asset_type
, 
 369                 if asset_type 
in asset_types_query
: 
 370                     query
.update(asset_types_query
[asset_type
]) 
 371                 cur_formats
, cur_subtitles 
= self
._extract
_theplatform
_smil
(update_url_query( 
 372                     main_smil_url 
or smil_url
, query
), video_id
, 'Downloading SMIL data for %s' % asset_type
) 
 373                 formats
.extend(cur_formats
) 
 374                 subtitles 
= self
._merge
_subtitles
(subtitles
, cur_subtitles
) 
 376         self
._sort
_formats
(formats
) 
 379             'url': thumbnail
['plfile$url'], 
 380             'width': int_or_none(thumbnail
.get('plfile$width')), 
 381             'height': int_or_none(thumbnail
.get('plfile$height')), 
 382         } for thumbnail 
in entry
.get('media$thumbnails', [])] 
 384         timestamp 
= int_or_none(entry
.get('media$availableDate'), scale
=1000) 
 385         categories 
= [item
['media$name'] for item 
in entry
.get('media$categories', [])] 
 387         ret 
= self
._extract
_theplatform
_metadata
('%s/%s' % (provider_id
, first_video_id
), video_id
) 
 388         subtitles 
= self
._merge
_subtitles
(subtitles
, ret
['subtitles']) 
 392             'subtitles': subtitles
, 
 393             'thumbnails': thumbnails
, 
 394             'duration': duration
, 
 395             'timestamp': timestamp
, 
 396             'categories': categories
, 
 399             ret
.update(custom_fields(entry
)) 
 403     def _real_extract(self
, url
): 
 404         mobj 
= re
.match(self
._VALID
_URL
, url
) 
 406         video_id 
= mobj
.group('id') 
 407         provider_id 
= mobj
.group('provider_id') 
 408         feed_id 
= mobj
.group('feed_id') 
 409         filter_query 
= mobj
.group('filter') 
 411         return self
._extract
_feed
_info
(provider_id
, feed_id
, filter_query
, video_id
)