]>
Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/vier.py
2 from __future__
import unicode_literals
7 from .common
import InfoExtractor
15 class VierIE(InfoExtractor
):
17 IE_DESC
= 'vier.be and vijf.be'
18 _VALID_URL
= r
'https?://(?:www\.)?(?P<site>vier|vijf)\.be/(?:[^/]+/videos/(?P<display_id>[^/]+)(?:/(?P<id>\d+))?|video/v3/embed/(?P<embed_id>\d+))'
19 _NETRC_MACHINE
= 'vier'
21 'url': 'http://www.vier.be/planb/videos/het-wordt-warm-de-moestuin/16129',
22 'md5': 'e4ae2054a6b040ef1e289e20d111b46e',
25 'display_id': 'het-wordt-warm-de-moestuin',
27 'title': 'Het wordt warm in De Moestuin',
28 'description': 'De vele uren werk eisen hun tol. Wim droomt van assistentie...',
29 'upload_date': '20121025',
31 'tags': ['De Moestuin', 'Moestuin', 'meisjes', 'Tomaat', 'Wim', 'Droom'],
34 'url': 'http://www.vijf.be/temptationisland/videos/zo-grappig-temptation-island-hosts-moeten-kiezen-tussen-onmogelijke-dilemmas/2561614',
37 'display_id': 'zo-grappig-temptation-island-hosts-moeten-kiezen-tussen-onmogelijke-dilemmas',
39 'title': 'md5:84f45fe48b8c1fa296a7f6d208d080a7',
40 'description': 'md5:0356d4981e58b8cbee19355cbd51a8fe',
41 'upload_date': '20170228',
42 'series': 'Temptation Island',
46 'skip_download': True,
49 'url': 'http://www.vier.be/janigaat/videos/jani-gaat-naar-tokio-aflevering-4/2674839',
52 'display_id': 'jani-gaat-naar-tokio-aflevering-4',
54 'title': 'Jani gaat naar Tokio - Aflevering 4',
55 'description': 'md5:aa8d611541db6ae9e863125704511f88',
56 'upload_date': '20170501',
57 'series': 'Jani gaat',
59 'tags': ['Jani Gaat', 'Volledige Aflevering'],
62 'skip_download': True,
64 'skip': 'Requires account credentials',
66 # Requires account credentials but bypassed extraction via v3/embed page
68 'url': 'http://www.vier.be/janigaat/videos/jani-gaat-naar-tokio-aflevering-4/2674839',
71 'display_id': 'jani-gaat-naar-tokio-aflevering-4',
73 'title': 'jani-gaat-naar-tokio-aflevering-4',
76 'skip_download': True,
78 'expected_warnings': ['Log in to extract metadata'],
80 # Without video id in URL
81 'url': 'http://www.vier.be/planb/videos/dit-najaar-plan-b',
82 'only_matching': True,
84 'url': 'http://www.vier.be/video/v3/embed/16129',
85 'only_matching': True,
88 def _real_initialize(self
):
89 self
._logged
_in
= False
91 def _login(self
, site
):
92 username
, password
= self
._get
_login
_info
()
93 if username
is None or password
is None:
96 login_page
= self
._download
_webpage
(
97 'http://www.%s.be/user/login' % site
,
98 None, note
='Logging in', errnote
='Unable to log in',
99 data
=urlencode_postdata({
100 'form_id': 'user_login',
104 headers
={'Content-Type': 'application/x-www-form-urlencoded'})
106 login_error
= self
._html
_search
_regex
(
107 r
'(?s)<div class="messages error">\s*<div>\s*<h2.+?</h2>(.+?)<',
108 login_page
, 'login error', default
=None)
110 self
.report_warning('Unable to log in: %s' % login_error
)
112 self
._logged
_in
= True
114 def _real_extract(self
, url
):
115 mobj
= re
.match(self
._VALID
_URL
, url
)
116 embed_id
= mobj
.group('embed_id')
117 display_id
= mobj
.group('display_id') or embed_id
118 video_id
= mobj
.group('id') or embed_id
119 site
= mobj
.group('site')
121 if not self
._logged
_in
:
124 webpage
= self
._download
_webpage
(url
, display_id
)
126 if r
'id="user-login"' in webpage
:
128 'Log in to extract metadata', video_id
=display_id
)
129 webpage
= self
._download
_webpage
(
130 'http://www.%s.be/video/v3/embed/%s' % (site
, video_id
),
133 video_id
= self
._search
_regex
(
134 [r
'data-nid="(\d+)"', r
'"nid"\s*:\s*"(\d+)"'],
135 webpage
, 'video id', default
=video_id
or display_id
)
136 application
= self
._search
_regex
(
137 [r
'data-application="([^"]+)"', r
'"application"\s*:\s*"([^"]+)"'],
138 webpage
, 'application', default
=site
+ '_vod')
139 filename
= self
._search
_regex
(
140 [r
'data-filename="([^"]+)"', r
'"filename"\s*:\s*"([^"]+)"'],
143 playlist_url
= 'http://vod.streamcloud.be/%s/_definst_/mp4:%s.mp4/playlist.m3u8' % (application
, filename
)
144 formats
= self
._extract
_wowza
_formats
(
145 playlist_url
, display_id
, skip_protocols
=['dash'])
146 self
._sort
_formats
(formats
)
148 title
= self
._og
_search
_title
(webpage
, default
=display_id
)
149 description
= self
._html
_search
_regex
(
150 r
'(?s)<div\b[^>]+\bclass=(["\'])[^
>]*?
\bfield
-type-text
-with-summary
\b[^
>]*?\
1[^
>]*>.*?
<p
>(?P
<value
>.+?
)</p
>',
151 webpage, 'description
', default=None, group='value
')
152 thumbnail = self._og_search_thumbnail(webpage, default=None)
153 upload_date = unified_strdate(self._html_search_regex(
154 r'(?s
)<div
\b[^
>]+\bclass
=(["\'])[^>]*?\bfield-name-post-date\b[^>]*?\1[^>]*>.*?(?P<value>\d{2}/\d{2}/\d{4})',
155 webpage, 'upload date', default=None, group='value'))
157 series = self._search_regex(
158 r'data-program=(["\'])(?P
<value
>(?
:(?
!\
1).)+)\
1', webpage,
159 'series
', default=None, group='value
')
160 episode_number = int_or_none(self._search_regex(
161 r'(?i
)aflevering (\d
+)', title, 'episode number
', default=None))
162 tags = re.findall(r'<a
\b[^
>]+\bhref
=["\']/tags/[^>]+>([^<]+)<', webpage)
166 'display_id': display_id,
168 'description': description,
169 'thumbnail': thumbnail,
170 'upload_date': upload_date,
172 'episode_number': episode_number,
178 class VierVideosIE(InfoExtractor):
179 IE_NAME = 'vier:videos'
180 _VALID_URL = r'https?://(?:www\.)?(?P<site>vier|vijf)\.be/(?P<program>[^/]+)/videos(?:\?.*\bpage=(?P<page>\d+)|$)'
182 'url': 'http://www.vier.be/demoestuin/videos',
186 'playlist_mincount': 153,
188 'url': 'http://www.vijf.be/temptationisland/videos',
190 'id': 'temptationisland',
192 'playlist_mincount': 159,
194 'url': 'http://www.vier.be/demoestuin/videos?page=6',
196 'id': 'demoestuin-page6',
198 'playlist_mincount': 20,
200 'url': 'http://www.vier.be/demoestuin/videos?page=7',
202 'id': 'demoestuin-page7',
204 'playlist_mincount': 13,
207 def _real_extract(self, url):
208 mobj = re.match(self._VALID_URL, url)
209 program = mobj.group('program')
210 site = mobj.group('site')
212 page_id = mobj.group('page')
214 page_id = int(page_id)
216 playlist_id = '%s-page%d' % (program, page_id)
219 playlist_id = program
222 for current_page_id in itertools.count(start_page):
223 current_page = self._download_webpage(
224 'http://www.%s.be/%s/videos?page=%d' % (site, program, current_page_id),
226 'Downloading page %d' % (current_page_id + 1))
228 self.url_result('http://www.' + site + '.be' + video_url, 'Vier')
229 for video_url in re.findall(
230 r'<h[23]><a href="(/[^
/]+/videos
/[^
/]+(?
:/\d
+)?
)">', current_page)]
231 entries.extend(page_entries)
232 if page_id or '>Meer<' not in current_page:
235 return self.playlist_result(entries, playlist_id)