]> Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/viddler.py
Update upstream source from tag 'upstream/2019.09.28'
[youtubedl] / youtube_dl / extractor / viddler.py
1 from __future__ import unicode_literals
2
3 import re
4
5 from .common import InfoExtractor
6 from ..utils import (
7 float_or_none,
8 int_or_none,
9 )
10
11
12 class ViddlerIE(InfoExtractor):
13 _VALID_URL = r'https?://(?:www\.)?viddler\.com/(?:v|embed|player)/(?P<id>[a-z0-9]+)(?:.+?\bsecret=(\d+))?'
14 _TESTS = [{
15 'url': 'http://www.viddler.com/v/43903784',
16 'md5': '9eee21161d2c7f5b39690c3e325fab2f',
17 'info_dict': {
18 'id': '43903784',
19 'ext': 'mov',
20 'title': 'Video Made Easy',
21 'description': 'md5:6a697ebd844ff3093bd2e82c37b409cd',
22 'uploader': 'viddler',
23 'timestamp': 1335371429,
24 'upload_date': '20120425',
25 'duration': 100.89,
26 'thumbnail': r're:^https?://.*\.jpg$',
27 'view_count': int,
28 'comment_count': int,
29 'categories': ['video content', 'high quality video', 'video made easy', 'how to produce video with limited resources', 'viddler'],
30 }
31 }, {
32 'url': 'http://www.viddler.com/v/4d03aad9/',
33 'md5': 'f12c5a7fa839c47a79363bfdf69404fb',
34 'info_dict': {
35 'id': '4d03aad9',
36 'ext': 'ts',
37 'title': 'WALL-TO-GORTAT',
38 'upload_date': '20150126',
39 'uploader': 'deadspin',
40 'timestamp': 1422285291,
41 'view_count': int,
42 'comment_count': int,
43 }
44 }, {
45 'url': 'http://www.viddler.com/player/221ebbbd/0/',
46 'md5': '740511f61d3d1bb71dc14a0fe01a1c10',
47 'info_dict': {
48 'id': '221ebbbd',
49 'ext': 'mov',
50 'title': 'LETeens-Grammar-snack-third-conditional',
51 'description': ' ',
52 'upload_date': '20140929',
53 'uploader': 'BCLETeens',
54 'timestamp': 1411997190,
55 'view_count': int,
56 'comment_count': int,
57 }
58 }, {
59 # secret protected
60 'url': 'http://www.viddler.com/v/890c0985?secret=34051570',
61 'info_dict': {
62 'id': '890c0985',
63 'ext': 'mp4',
64 'title': 'Complete Property Training - Traineeships',
65 'description': ' ',
66 'upload_date': '20130606',
67 'uploader': 'TiffanyBowtell',
68 'timestamp': 1370496993,
69 'view_count': int,
70 'comment_count': int,
71 },
72 'params': {
73 'skip_download': True,
74 },
75 }]
76
77 def _real_extract(self, url):
78 video_id, secret = re.match(self._VALID_URL, url).groups()
79
80 query = {
81 'video_id': video_id,
82 'key': 'v0vhrt7bg2xq1vyxhkct',
83 }
84 if secret:
85 query['secret'] = secret
86
87 data = self._download_json(
88 'http://api.viddler.com/api/v2/viddler.videos.getPlaybackDetails.json',
89 video_id, headers={'Referer': url}, query=query)['video']
90
91 formats = []
92 for filed in data['files']:
93 if filed.get('status', 'ready') != 'ready':
94 continue
95 format_id = filed.get('profile_id') or filed['profile_name']
96 f = {
97 'format_id': format_id,
98 'format_note': filed['profile_name'],
99 'url': self._proto_relative_url(filed['url']),
100 'width': int_or_none(filed.get('width')),
101 'height': int_or_none(filed.get('height')),
102 'filesize': int_or_none(filed.get('size')),
103 'ext': filed.get('ext'),
104 'source_preference': -1,
105 }
106 formats.append(f)
107
108 if filed.get('cdn_url'):
109 f = f.copy()
110 f['url'] = self._proto_relative_url(filed['cdn_url'], 'http:')
111 f['format_id'] = format_id + '-cdn'
112 f['source_preference'] = 1
113 formats.append(f)
114
115 if filed.get('html5_video_source'):
116 f = f.copy()
117 f['url'] = self._proto_relative_url(filed['html5_video_source'])
118 f['format_id'] = format_id + '-html5'
119 f['source_preference'] = 0
120 formats.append(f)
121 self._sort_formats(formats)
122
123 categories = [
124 t.get('text') for t in data.get('tags', []) if 'text' in t]
125
126 return {
127 'id': video_id,
128 'title': data['title'],
129 'formats': formats,
130 'description': data.get('description'),
131 'timestamp': int_or_none(data.get('upload_time')),
132 'thumbnail': self._proto_relative_url(data.get('thumbnail_url')),
133 'uploader': data.get('author'),
134 'duration': float_or_none(data.get('length')),
135 'view_count': int_or_none(data.get('view_count')),
136 'comment_count': int_or_none(data.get('comment_count')),
137 'categories': categories,
138 }