import re
from .common import InfoExtractor
-from ..compat import (
- compat_parse_qs,
- compat_urllib_parse_urlparse,
-)
+from ..compat import compat_str
from ..utils import (
determine_ext,
dict_get,
int_or_none,
- orderedSet,
+ str_or_none,
strip_or_none,
try_get,
- urljoin,
- compat_str,
)
self._adjust_title(info_dict)
return info_dict
- svt_id = self._search_regex(
- r'<video[^>]+data-video-id=["\']([\da-zA-Z-]+)',
- webpage, 'video id')
+ svt_id = try_get(
+ data, lambda x: x['statistics']['dataLake']['content']['id'],
+ compat_str)
+
+ if not svt_id:
+ svt_id = self._search_regex(
+ (r'<video[^>]+data-video-id=["\']([\da-zA-Z-]+)',
+ r'["\']videoSvtId["\']\s*:\s*["\']([\da-zA-Z-]+)',
+ r'"content"\s*:\s*{.*?"id"\s*:\s*"([\da-zA-Z-]+)"',
+ r'["\']svtId["\']\s*:\s*["\']([\da-zA-Z-]+)'),
+ webpage, 'video id')
return self._extract_by_video_id(svt_id, webpage)
class SVTSeriesIE(SVTPlayBaseIE):
- _VALID_URL = r'https?://(?:www\.)?svtplay\.se/(?P<id>[^/?&#]+)'
+ _VALID_URL = r'https?://(?:www\.)?svtplay\.se/(?P<id>[^/?&#]+)(?:.+?\btab=(?P<season_slug>[^&#]+))?'
_TESTS = [{
'url': 'https://www.svtplay.se/rederiet',
'info_dict': {
- 'id': 'rederiet',
+ 'id': '14445680',
'title': 'Rederiet',
- 'description': 'md5:505d491a58f4fcf6eb418ecab947e69e',
+ 'description': 'md5:d9fdfff17f5d8f73468176ecd2836039',
},
'playlist_mincount': 318,
}, {
- 'url': 'https://www.svtplay.se/rederiet?tab=sasong2',
+ 'url': 'https://www.svtplay.se/rederiet?tab=season-2-14445680',
'info_dict': {
- 'id': 'rederiet-sasong2',
+ 'id': 'season-2-14445680',
'title': 'Rederiet - Säsong 2',
- 'description': 'md5:505d491a58f4fcf6eb418ecab947e69e',
+ 'description': 'md5:d9fdfff17f5d8f73468176ecd2836039',
},
- 'playlist_count': 12,
+ 'playlist_mincount': 12,
}]
@classmethod
return False if SVTIE.suitable(url) or SVTPlayIE.suitable(url) else super(SVTSeriesIE, cls).suitable(url)
def _real_extract(self, url):
- series_id = self._match_id(url)
-
- qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
- season_slug = qs.get('tab', [None])[0]
-
- if season_slug:
- series_id += '-%s' % season_slug
-
- webpage = self._download_webpage(
- url, series_id, 'Downloading series page')
-
- root = self._parse_json(
- self._search_regex(
- self._SVTPLAY_RE, webpage, 'content', group='json'),
- series_id)
+ series_slug, season_id = re.match(self._VALID_URL, url).groups()
+
+ series = self._download_json(
+ 'https://api.svt.se/contento/graphql', series_slug,
+ 'Downloading series page', query={
+ 'query': '''{
+ listablesBySlug(slugs: ["%s"]) {
+ associatedContent(include: [productionPeriod, season]) {
+ items {
+ item {
+ ... on Episode {
+ videoSvtId
+ }
+ }
+ }
+ id
+ name
+ }
+ id
+ longDescription
+ name
+ shortDescription
+ }
+}''' % series_slug,
+ })['data']['listablesBySlug'][0]
season_name = None
entries = []
- for season in root['relatedVideoContent']['relatedVideosAccordion']:
+ for season in series['associatedContent']:
if not isinstance(season, dict):
continue
- if season_slug:
- if season.get('slug') != season_slug:
+ if season_id:
+ if season.get('id') != season_id:
continue
season_name = season.get('name')
- videos = season.get('videos')
- if not isinstance(videos, list):
+ items = season.get('items')
+ if not isinstance(items, list):
continue
- for video in videos:
- content_url = video.get('contentUrl')
- if not content_url or not isinstance(content_url, compat_str):
+ for item in items:
+ video = item.get('item') or {}
+ content_id = video.get('videoSvtId')
+ if not content_id or not isinstance(content_id, compat_str):
continue
- entries.append(
- self.url_result(
- urljoin(url, content_url),
- ie=SVTPlayIE.ie_key(),
- video_title=video.get('title')
- ))
-
- metadata = root.get('metaData')
- if not isinstance(metadata, dict):
- metadata = {}
+ entries.append(self.url_result(
+ 'svt:' + content_id, SVTPlayIE.ie_key(), content_id))
- title = metadata.get('title')
- season_name = season_name or season_slug
+ title = series.get('name')
+ season_name = season_name or season_id
if title and season_name:
title = '%s - %s' % (title, season_name)
- elif season_slug:
- title = season_slug
+ elif season_id:
+ title = season_id
return self.playlist_result(
- entries, series_id, title, metadata.get('description'))
+ entries, season_id or series.get('id'), title,
+ dict_get(series, ('longDescription', 'shortDescription')))
class SVTPageIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?svt\.se/(?:[^/]+/)*(?P<id>[^/?&#]+)'
+ _VALID_URL = r'https?://(?:www\.)?svt\.se/(?P<path>(?:[^/]+/)*(?P<id>[^/?&#]+))'
_TESTS = [{
- 'url': 'https://www.svt.se/sport/oseedat/guide-sommartraningen-du-kan-gora-var-och-nar-du-vill',
+ 'url': 'https://www.svt.se/sport/ishockey/bakom-masken-lehners-kamp-mot-mental-ohalsa',
'info_dict': {
- 'id': 'guide-sommartraningen-du-kan-gora-var-och-nar-du-vill',
- 'title': 'GUIDE: Sommarträning du kan göra var och när du vill',
+ 'id': '25298267',
+ 'title': 'Bakom masken – Lehners kamp mot mental ohälsa',
},
- 'playlist_count': 7,
+ 'playlist_count': 4,
}, {
- 'url': 'https://www.svt.se/nyheter/inrikes/ebba-busch-thor-kd-har-delvis-ratt-om-no-go-zoner',
+ 'url': 'https://www.svt.se/nyheter/utrikes/svenska-andrea-ar-en-mil-fran-branderna-i-kalifornien',
'info_dict': {
- 'id': 'ebba-busch-thor-kd-har-delvis-ratt-om-no-go-zoner',
- 'title': 'Ebba Busch Thor har bara delvis rätt om ”no-go-zoner”',
+ 'id': '24243746',
+ 'title': 'Svenska Andrea redo att fly sitt hem i Kalifornien',
},
- 'playlist_count': 1,
+ 'playlist_count': 2,
}, {
# only programTitle
'url': 'http://www.svt.se/sport/ishockey/jagr-tacklar-giroux-under-intervjun',
'info_dict': {
- 'id': '2900353',
+ 'id': '8439V2K',
'ext': 'mp4',
'title': 'Stjärnorna skojar till det - under SVT-intervjun',
'duration': 27,
return False if SVTIE.suitable(url) else super(SVTPageIE, cls).suitable(url)
def _real_extract(self, url):
- playlist_id = self._match_id(url)
+ path, display_id = re.match(self._VALID_URL, url).groups()
- webpage = self._download_webpage(url, playlist_id)
+ article = self._download_json(
+ 'https://api.svt.se/nss-api/page/' + path, display_id,
+ query={'q': 'articles'})['articles']['content'][0]
- entries = [
- self.url_result(
- 'svt:%s' % video_id, ie=SVTPlayIE.ie_key(), video_id=video_id)
- for video_id in orderedSet(re.findall(
- r'data-video-id=["\'](\d+)', webpage))]
+ entries = []
- title = strip_or_none(self._og_search_title(webpage, default=None))
+ def _process_content(content):
+ if content.get('_type') in ('VIDEOCLIP', 'VIDEOEPISODE'):
+ video_id = compat_str(content['image']['svtId'])
+ entries.append(self.url_result(
+ 'svt:' + video_id, SVTPlayIE.ie_key(), video_id))
- return self.playlist_result(entries, playlist_id, title)
+ for media in article.get('media', []):
+ _process_content(media)
+
+ for obj in article.get('structuredBody', []):
+ _process_content(obj.get('content') or {})
+
+ return self.playlist_result(
+ entries, str_or_none(article.get('id')),
+ strip_or_none(article.get('title')))