]> Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/vier.py
Initiate new release.
[youtubedl] / youtube_dl / extractor / vier.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5 import itertools
6
7 from .common import InfoExtractor
8 from ..utils import (
9 urlencode_postdata,
10 int_or_none,
11 unified_strdate,
12 )
13
14
15 class VierIE(InfoExtractor):
16 IE_NAME = 'vier'
17 IE_DESC = 'vier.be and vijf.be'
18 _VALID_URL = r'https?://(?:www\.)?(?P<site>vier|vijf)\.be/(?:[^/]+/videos/(?P<display_id>[^/]+)(?:/(?P<id>\d+))?|video/v3/embed/(?P<embed_id>\d+))'
19 _NETRC_MACHINE = 'vier'
20 _TESTS = [{
21 'url': 'http://www.vier.be/planb/videos/het-wordt-warm-de-moestuin/16129',
22 'md5': 'e4ae2054a6b040ef1e289e20d111b46e',
23 'info_dict': {
24 'id': '16129',
25 'display_id': 'het-wordt-warm-de-moestuin',
26 'ext': 'mp4',
27 'title': 'Het wordt warm in De Moestuin',
28 'description': 'De vele uren werk eisen hun tol. Wim droomt van assistentie...',
29 'upload_date': '20121025',
30 'series': 'Plan B',
31 'tags': ['De Moestuin', 'Moestuin', 'meisjes', 'Tomaat', 'Wim', 'Droom'],
32 },
33 }, {
34 'url': 'http://www.vijf.be/temptationisland/videos/zo-grappig-temptation-island-hosts-moeten-kiezen-tussen-onmogelijke-dilemmas/2561614',
35 'info_dict': {
36 'id': '2561614',
37 'display_id': 'zo-grappig-temptation-island-hosts-moeten-kiezen-tussen-onmogelijke-dilemmas',
38 'ext': 'mp4',
39 'title': 'md5:84f45fe48b8c1fa296a7f6d208d080a7',
40 'description': 'md5:0356d4981e58b8cbee19355cbd51a8fe',
41 'upload_date': '20170228',
42 'series': 'Temptation Island',
43 'tags': list,
44 },
45 'params': {
46 'skip_download': True,
47 },
48 }, {
49 'url': 'http://www.vier.be/janigaat/videos/jani-gaat-naar-tokio-aflevering-4/2674839',
50 'info_dict': {
51 'id': '2674839',
52 'display_id': 'jani-gaat-naar-tokio-aflevering-4',
53 'ext': 'mp4',
54 'title': 'Jani gaat naar Tokio - Aflevering 4',
55 'description': 'md5:aa8d611541db6ae9e863125704511f88',
56 'upload_date': '20170501',
57 'series': 'Jani gaat',
58 'episode_number': 4,
59 'tags': ['Jani Gaat', 'Volledige Aflevering'],
60 },
61 'params': {
62 'skip_download': True,
63 },
64 'skip': 'Requires account credentials',
65 }, {
66 # Requires account credentials but bypassed extraction via v3/embed page
67 # without metadata
68 'url': 'http://www.vier.be/janigaat/videos/jani-gaat-naar-tokio-aflevering-4/2674839',
69 'info_dict': {
70 'id': '2674839',
71 'display_id': 'jani-gaat-naar-tokio-aflevering-4',
72 'ext': 'mp4',
73 'title': 'jani-gaat-naar-tokio-aflevering-4',
74 },
75 'params': {
76 'skip_download': True,
77 },
78 'expected_warnings': ['Log in to extract metadata'],
79 }, {
80 # Without video id in URL
81 'url': 'http://www.vier.be/planb/videos/dit-najaar-plan-b',
82 'only_matching': True,
83 }, {
84 'url': 'http://www.vier.be/video/v3/embed/16129',
85 'only_matching': True,
86 }]
87
88 def _real_initialize(self):
89 self._logged_in = False
90
91 def _login(self, site):
92 username, password = self._get_login_info()
93 if username is None or password is None:
94 return
95
96 login_page = self._download_webpage(
97 'http://www.%s.be/user/login' % site,
98 None, note='Logging in', errnote='Unable to log in',
99 data=urlencode_postdata({
100 'form_id': 'user_login',
101 'name': username,
102 'pass': password,
103 }),
104 headers={'Content-Type': 'application/x-www-form-urlencoded'})
105
106 login_error = self._html_search_regex(
107 r'(?s)<div class="messages error">\s*<div>\s*<h2.+?</h2>(.+?)<',
108 login_page, 'login error', default=None)
109 if login_error:
110 self.report_warning('Unable to log in: %s' % login_error)
111 else:
112 self._logged_in = True
113
114 def _real_extract(self, url):
115 mobj = re.match(self._VALID_URL, url)
116 embed_id = mobj.group('embed_id')
117 display_id = mobj.group('display_id') or embed_id
118 video_id = mobj.group('id') or embed_id
119 site = mobj.group('site')
120
121 if not self._logged_in:
122 self._login(site)
123
124 webpage = self._download_webpage(url, display_id)
125
126 if r'id="user-login"' in webpage:
127 self.report_warning(
128 'Log in to extract metadata', video_id=display_id)
129 webpage = self._download_webpage(
130 'http://www.%s.be/video/v3/embed/%s' % (site, video_id),
131 display_id)
132
133 video_id = self._search_regex(
134 [r'data-nid="(\d+)"', r'"nid"\s*:\s*"(\d+)"'],
135 webpage, 'video id', default=video_id or display_id)
136 application = self._search_regex(
137 [r'data-application="([^"]+)"', r'"application"\s*:\s*"([^"]+)"'],
138 webpage, 'application', default=site + '_vod')
139 filename = self._search_regex(
140 [r'data-filename="([^"]+)"', r'"filename"\s*:\s*"([^"]+)"'],
141 webpage, 'filename')
142
143 playlist_url = 'http://vod.streamcloud.be/%s/_definst_/mp4:%s.mp4/playlist.m3u8' % (application, filename)
144 formats = self._extract_wowza_formats(
145 playlist_url, display_id, skip_protocols=['dash'])
146 self._sort_formats(formats)
147
148 title = self._og_search_title(webpage, default=display_id)
149 description = self._html_search_regex(
150 r'(?s)<div\b[^>]+\bclass=(["\'])[^>]*?\bfield-type-text-with-summary\b[^>]*?\1[^>]*>.*?<p>(?P<value>.+?)</p>',
151 webpage, 'description', default=None, group='value')
152 thumbnail = self._og_search_thumbnail(webpage, default=None)
153 upload_date = unified_strdate(self._html_search_regex(
154 r'(?s)<div\b[^>]+\bclass=(["\'])[^>]*?\bfield-name-post-date\b[^>]*?\1[^>]*>.*?(?P<value>\d{2}/\d{2}/\d{4})',
155 webpage, 'upload date', default=None, group='value'))
156
157 series = self._search_regex(
158 r'data-program=(["\'])(?P<value>(?:(?!\1).)+)\1', webpage,
159 'series', default=None, group='value')
160 episode_number = int_or_none(self._search_regex(
161 r'(?i)aflevering (\d+)', title, 'episode number', default=None))
162 tags = re.findall(r'<a\b[^>]+\bhref=["\']/tags/[^>]+>([^<]+)<', webpage)
163
164 return {
165 'id': video_id,
166 'display_id': display_id,
167 'title': title,
168 'description': description,
169 'thumbnail': thumbnail,
170 'upload_date': upload_date,
171 'series': series,
172 'episode_number': episode_number,
173 'tags': tags,
174 'formats': formats,
175 }
176
177
178 class VierVideosIE(InfoExtractor):
179 IE_NAME = 'vier:videos'
180 _VALID_URL = r'https?://(?:www\.)?(?P<site>vier|vijf)\.be/(?P<program>[^/]+)/videos(?:\?.*\bpage=(?P<page>\d+)|$)'
181 _TESTS = [{
182 'url': 'http://www.vier.be/demoestuin/videos',
183 'info_dict': {
184 'id': 'demoestuin',
185 },
186 'playlist_mincount': 153,
187 }, {
188 'url': 'http://www.vijf.be/temptationisland/videos',
189 'info_dict': {
190 'id': 'temptationisland',
191 },
192 'playlist_mincount': 159,
193 }, {
194 'url': 'http://www.vier.be/demoestuin/videos?page=6',
195 'info_dict': {
196 'id': 'demoestuin-page6',
197 },
198 'playlist_mincount': 20,
199 }, {
200 'url': 'http://www.vier.be/demoestuin/videos?page=7',
201 'info_dict': {
202 'id': 'demoestuin-page7',
203 },
204 'playlist_mincount': 13,
205 }]
206
207 def _real_extract(self, url):
208 mobj = re.match(self._VALID_URL, url)
209 program = mobj.group('program')
210 site = mobj.group('site')
211
212 page_id = mobj.group('page')
213 if page_id:
214 page_id = int(page_id)
215 start_page = page_id
216 playlist_id = '%s-page%d' % (program, page_id)
217 else:
218 start_page = 0
219 playlist_id = program
220
221 entries = []
222 for current_page_id in itertools.count(start_page):
223 current_page = self._download_webpage(
224 'http://www.%s.be/%s/videos?page=%d' % (site, program, current_page_id),
225 program,
226 'Downloading page %d' % (current_page_id + 1))
227 page_entries = [
228 self.url_result('http://www.' + site + '.be' + video_url, 'Vier')
229 for video_url in re.findall(
230 r'<h[23]><a href="(/[^/]+/videos/[^/]+(?:/\d+)?)">', current_page)]
231 entries.extend(page_entries)
232 if page_id or '>Meer<' not in current_page:
233 break
234
235 return self.playlist_result(entries, playlist_id)