]> Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/sandia.py
debian/changelog: Annotate with bugs being closed.
[youtubedl] / youtube_dl / extractor / sandia.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import itertools
5 import json
6 import re
7
8 from .common import InfoExtractor
9 from ..compat import compat_urlparse
10 from ..utils import (
11 int_or_none,
12 js_to_json,
13 mimetype2ext,
14 sanitized_Request,
15 unified_strdate,
16 )
17
18
19 class SandiaIE(InfoExtractor):
20 IE_DESC = 'Sandia National Laboratories'
21 _VALID_URL = r'https?://digitalops\.sandia\.gov/Mediasite/Play/(?P<id>[0-9a-f]+)'
22 _TEST = {
23 'url': 'http://digitalops.sandia.gov/Mediasite/Play/24aace4429fc450fb5b38cdbf424a66e1d',
24 'md5': '9422edc9b9a60151727e4b6d8bef393d',
25 'info_dict': {
26 'id': '24aace4429fc450fb5b38cdbf424a66e1d',
27 'ext': 'mp4',
28 'title': 'Xyce Software Training - Section 1',
29 'description': 're:(?s)SAND Number: SAND 2013-7800.{200,}',
30 'upload_date': '20120904',
31 'duration': 7794,
32 }
33 }
34
35 def _real_extract(self, url):
36 video_id = self._match_id(url)
37
38 req = sanitized_Request(url)
39 req.add_header('Cookie', 'MediasitePlayerCaps=ClientPlugins=4')
40 webpage = self._download_webpage(req, video_id)
41
42 js_path = self._search_regex(
43 r'<script type="text/javascript" src="(/Mediasite/FileServer/Presentation/[^"]+)"',
44 webpage, 'JS code URL')
45 js_url = compat_urlparse.urljoin(url, js_path)
46
47 js_code = self._download_webpage(
48 js_url, video_id, note='Downloading player')
49
50 def extract_str(key, **args):
51 return self._search_regex(
52 r'Mediasite\.PlaybackManifest\.%s\s*=\s*(.+);\s*?\n' % re.escape(key),
53 js_code, key, **args)
54
55 def extract_data(key, **args):
56 data_json = extract_str(key, **args)
57 if data_json is None:
58 return data_json
59 return self._parse_json(
60 data_json, video_id, transform_source=js_to_json)
61
62 formats = []
63 for i in itertools.count():
64 fd = extract_data('VideoUrls[%d]' % i, default=None)
65 if fd is None:
66 break
67 formats.append({
68 'format_id': '%s' % i,
69 'format_note': fd['MimeType'].partition('/')[2],
70 'ext': mimetype2ext(fd['MimeType']),
71 'url': fd['Location'],
72 'protocol': 'f4m' if fd['MimeType'] == 'video/x-mp4-fragmented' else None,
73 })
74 self._sort_formats(formats)
75
76 slide_baseurl = compat_urlparse.urljoin(
77 url, extract_data('SlideBaseUrl'))
78 slide_template = slide_baseurl + re.sub(
79 r'\{0:D?([0-9+])\}', r'%0\1d', extract_data('SlideImageFileNameTemplate'))
80 slides = []
81 last_slide_time = 0
82 for i in itertools.count(1):
83 sd = extract_str('Slides[%d]' % i, default=None)
84 if sd is None:
85 break
86 timestamp = int_or_none(self._search_regex(
87 r'^Mediasite\.PlaybackManifest\.CreateSlide\("[^"]*"\s*,\s*([0-9]+),',
88 sd, 'slide %s timestamp' % i, fatal=False))
89 slides.append({
90 'url': slide_template % i,
91 'duration': timestamp - last_slide_time,
92 })
93 last_slide_time = timestamp
94 formats.append({
95 'format_id': 'slides',
96 'protocol': 'slideshow',
97 'url': json.dumps(slides),
98 'preference': -10000, # Downloader not yet written
99 })
100 self._sort_formats(formats)
101
102 title = extract_data('Title')
103 description = extract_data('Description', fatal=False)
104 duration = int_or_none(extract_data(
105 'Duration', fatal=False), scale=1000)
106 upload_date = unified_strdate(extract_data('AirDate', fatal=False))
107
108 return {
109 'id': video_id,
110 'title': title,
111 'description': description,
112 'formats': formats,
113 'upload_date': upload_date,
114 'duration': duration,
115 }