]>
Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/revision3.py
2 from __future__
import unicode_literals
6 from .common
import InfoExtractor
7 from ..compat
import compat_str
16 class Revision3EmbedIE(InfoExtractor
):
17 IE_NAME
= 'revision3:embed'
18 _VALID_URL
= r
'(?:revision3:(?:(?P<playlist_type>[^:]+):)?|https?://(?:(?:(?:www|embed)\.)?(?:revision3|animalist)|(?:(?:api|embed)\.)?seekernetwork)\.com/player/embed\?videoId=)(?P<playlist_id>\d+)'
20 'url': 'http://api.seekernetwork.com/player/embed?videoId=67558',
21 'md5': '83bcd157cab89ad7318dd7b8c9cf1306',
25 'title': 'The Pros & Cons Of Zoos',
26 'description': 'Zoos are often depicted as a terrible place for animals to live, but is there any truth to this?',
27 'uploader_id': 'dnews',
31 _API_KEY
= 'ba9c741bce1b9d8e3defcc22193f3651b8867e62'
33 def _real_extract(self
, url
):
34 mobj
= re
.match(self
._VALID
_URL
, url
)
35 playlist_id
= mobj
.group('playlist_id')
36 playlist_type
= mobj
.group('playlist_type') or 'video_id'
37 video_data
= self
._download
_json
(
38 'http://revision3.com/api/getPlaylist.json', playlist_id
, query
={
39 'api_key': self
._API
_KEY
,
40 'codecs': 'h264,vp8,theora',
41 playlist_type
: playlist_id
,
45 for vcodec
, media
in video_data
['media'].items():
46 for quality_id
, quality
in media
.items():
47 if quality_id
== 'hls':
48 formats
.extend(self
._extract
_m
3u8_formats
(
49 quality
['url'], playlist_id
, 'mp4',
50 'm3u8_native', m3u8_id
='hls', fatal
=False))
53 'url': quality
['url'],
54 'format_id': '%s-%s' % (vcodec
, quality_id
),
55 'tbr': int_or_none(quality
.get('bitrate')),
58 self
._sort
_formats
(formats
)
62 'title': unescapeHTML(video_data
['title']),
63 'description': unescapeHTML(video_data
.get('summary')),
64 'uploader': video_data
.get('show', {}).get('name'),
65 'uploader_id': video_data
.get('show', {}).get('slug'),
66 'duration': int_or_none(video_data
.get('duration')),
71 class Revision3IE(InfoExtractor
):
73 _VALID_URL
= r
'https?://(?:www\.)?(?P<domain>(?:revision3|animalist)\.com)/(?P<id>[^/]+(?:/[^/?#]+)?)'
75 'url': 'http://www.revision3.com/technobuffalo/5-google-predictions-for-2016',
76 'md5': 'd94a72d85d0a829766de4deb8daaf7df',
79 'display_id': 'technobuffalo/5-google-predictions-for-2016',
81 'title': '5 Google Predictions for 2016',
82 'description': 'Google had a great 2015, but it\'s already time to look ahead. Here are our five predictions for 2016.',
83 'upload_date': '20151228',
84 'timestamp': 1451325600,
86 'uploader': 'TechnoBuffalo',
87 'uploader_id': 'technobuffalo',
91 'url': 'http://revision3.com/variant',
92 'only_matching': True,
95 'url': 'http://revision3.com/vr',
96 'only_matching': True,
98 _PAGE_DATA_TEMPLATE
= 'http://www.%s/apiProxy/ddn/%s?domain=%s'
100 def _real_extract(self
, url
):
101 domain
, display_id
= re
.match(self
._VALID
_URL
, url
).groups()
102 site
= domain
.split('.')[0]
103 page_info
= self
._download
_json
(
104 self
._PAGE
_DATA
_TEMPLATE
% (domain
, display_id
, domain
), display_id
)
106 page_data
= page_info
['data']
107 page_type
= page_data
['type']
108 if page_type
in ('episode', 'embed'):
109 show_data
= page_data
['show']['data']
110 page_id
= compat_str(page_data
['id'])
111 video_id
= compat_str(page_data
['video']['data']['id'])
113 preference
= qualities(['mini', 'small', 'medium', 'large'])
117 'preference': preference(image_id
)
118 } for image_id
, image_url
in page_data
.get('images', {}).items()]
122 'display_id': display_id
,
123 'title': unescapeHTML(page_data
['name']),
124 'description': unescapeHTML(page_data
.get('summary')),
125 'timestamp': parse_iso8601(page_data
.get('publishTime'), ' '),
126 'author': page_data
.get('author'),
127 'uploader': show_data
.get('name'),
128 'uploader_id': show_data
.get('slug'),
129 'thumbnails': thumbnails
,
130 'extractor_key': site
,
133 if page_type
== 'embed':
135 '_type': 'url_transparent',
136 'url': page_data
['video']['data']['embed'],
141 '_type': 'url_transparent',
142 'url': 'revision3:%s' % video_id
,
146 list_data
= page_info
[page_type
]['data']
147 episodes_data
= page_info
['episodes']['data']
148 num_episodes
= page_info
['meta']['totalEpisodes']
149 processed_episodes
= 0
155 'url': 'http://%s%s' % (domain
, episode
['path']),
156 'id': compat_str(episode
['id']),
157 'ie_key': 'Revision3',
158 'extractor_key': site
,
159 } for episode
in episodes_data
])
160 processed_episodes
+= len(episodes_data
)
161 if processed_episodes
== num_episodes
:
164 episodes_data
= self
._download
_json
(self
._PAGE
_DATA
_TEMPLATE
% (
165 domain
, display_id
+ '/' + compat_str(page_num
), domain
),
166 display_id
)['episodes']['data']
168 return self
.playlist_result(
169 entries
, compat_str(list_data
['id']),
170 list_data
.get('name'), list_data
.get('summary'))