+version 2020.05.08
+
+Core
+* [downloader/http] Request last data block of exact remaining size
+* [downloader/http] Finish downloading once received data length matches
+ expected
+* [extractor/common] Use compat_cookiejar_Cookie for _set_cookie to always
+ ensure cookie name and value are bytestrings on python 2 (#23256, #24776)
++ [compat] Introduce compat_cookiejar_Cookie
+* [utils] Improve cookie files support
+ + Add support for UTF-8 in cookie files
+ * Skip malformed cookie file entries instead of crashing (invalid entry
+ length, invalid expires at)
+
+Extractors
+* [youtube] Improve signature cipher extraction (#25187, #25188)
+* [iprima] Improve extraction (#25138)
+* [uol] Fix extraction (#22007)
++ [orf] Add support for more radio stations (#24938, #24968)
+* [dailymotion] Fix typo
+- [puhutv] Remove no longer available HTTP formats (#25124)
+
+
+version 2020.05.03
+
+Core
++ [extractor/common] Extract multiple JSON-LD entries
+* [options] Clarify doc on --exec command (#19087, #24883)
+* [extractor/common] Skip malformed ISM manifest XMLs while extracting
+ ISM formats (#24667)
+
+Extractors
+* [crunchyroll] Fix and improve extraction (#25096, #25060)
+* [youtube] Improve player id extraction
+* [youtube] Use redirected video id if any (#25063)
+* [yahoo] Fix GYAO Player extraction and relax URL regular expression
+ (#24178, #24778)
+* [tvplay] Fix Viafree extraction (#15189, #24473, #24789)
+* [tenplay] Relax URL regular expression (#25001)
++ [prosiebensat1] Extract series metadata
+* [prosiebensat1] Improve extraction and remove 7tv.de support (#24948)
+- [prosiebensat1] Remove 7tv.de support (#24948)
+* [youtube] Fix DRM videos detection (#24736)
+* [thisoldhouse] Fix video id extraction (#24548, #24549)
++ [soundcloud] Extract AAC format (#19173, #24708)
+* [youtube] Skip broken multifeed videos (#24711)
+* [nova:embed] Fix extraction (#24700)
+* [motherless] Fix extraction (#24699)
+* [twitch:clips] Extend URL regular expression (#24290, #24642)
+* [tv4] Fix ISM formats extraction (#24667)
+* [tele5] Fix extraction (#24553)
++ [mofosex] Add support for generic embeds (#24633)
++ [youporn] Add support for generic embeds
++ [spankwire] Add support for generic embeds (#24633)
+* [spankwire] Fix extraction (#18924, #20648)
+
+
version 2020.03.24
Core
either the path to the binary or its
containing directory.
--exec CMD Execute a command on the file after
- downloading, similar to find's -exec
- syntax. Example: --exec 'adb push {}
- /sdcard/Music/ && rm {}'
+ downloading and post-processing, similar to
+ find's -exec syntax. Example: --exec 'adb
+ push {} /sdcard/Music/ && rm {}'
--convert-subs FORMAT Convert the subtitles to other format
(currently supported: srt|ass|vtt|lrc)
either the path to the binary or its
containing directory.
--exec CMD Execute a command on the file after
- downloading, similar to find's -exec
- syntax. Example: --exec 'adb push {}
- /sdcard/Music/ && rm {}'
+ downloading and post-processing, similar to
+ find's -exec syntax. Example: --exec 'adb
+ push {} /sdcard/Music/ && rm {}'
--convert-subs FORMAT Convert the subtitles to other format
(currently supported: srt|ass|vtt|lrc)
- **MNetTV**
- **MoeVideo**: LetitBit video services: moevideo.net, playreplay.net and videochart.net
- **Mofosex**
+ - **MofosexEmbed**
- **Mojvideo**
- **Morningstar**: morningstar.com
- **Motherless**
- **Ooyala**
- **OoyalaExternal**
- **OraTV**
+ - **orf:burgenland**: Radio Burgenland
- **orf:fm4**: radio FM4
- **orf:fm4:story**: fm4.orf.at stories
- **orf:iptv**: iptv.ORF.at
+ - **orf:kaernten**: Radio Kärnten
+ - **orf:noe**: Radio Niederösterreich
+ - **orf:oberoesterreich**: Radio Oberösterreich
- **orf:oe1**: Radio Österreich 1
+ - **orf:oe3**: Radio Österreich 3
+ - **orf:salzburg**: Radio Salzburg
+ - **orf:steiermark**: Radio Steiermark
+ - **orf:tirol**: Radio Tirol
- **orf:tvthek**: ORF TVthek
+ - **orf:vorarlberg**: Radio Vorarlberg
+ - **orf:wien**: Radio Wien
- **OsnatelTV**
- **OutsideTV**
- **PacktPub**
assert_cookie_has_value('HTTPONLY_COOKIE')
assert_cookie_has_value('JS_ACCESSIBLE_COOKIE')
+ def test_malformed_cookies(self):
+ cookiejar = YoutubeDLCookieJar('./test/testdata/cookies/malformed_cookies.txt')
+ cookiejar.load(ignore_discard=True, ignore_expires=True)
+ # Cookies should be empty since all malformed cookie file entries
+ # will be ignored
+ self.assertFalse(cookiejar._cookies)
+
if __name__ == '__main__':
unittest.main()
]
+class TestPlayerInfo(unittest.TestCase):
+ def test_youtube_extract_player_info(self):
+ PLAYER_URLS = (
+ ('https://www.youtube.com/s/player/64dddad9/player_ias.vflset/en_US/base.js', '64dddad9'),
+ # obsolete
+ ('https://www.youtube.com/yts/jsbin/player_ias-vfle4-e03/en_US/base.js', 'vfle4-e03'),
+ ('https://www.youtube.com/yts/jsbin/player_ias-vfl49f_g4/en_US/base.js', 'vfl49f_g4'),
+ ('https://www.youtube.com/yts/jsbin/player_ias-vflCPQUIL/en_US/base.js', 'vflCPQUIL'),
+ ('https://www.youtube.com/yts/jsbin/player-vflzQZbt7/en_US/base.js', 'vflzQZbt7'),
+ ('https://www.youtube.com/yts/jsbin/player-en_US-vflaxXRn1/base.js', 'vflaxXRn1'),
+ ('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js', 'vflXGBaUN'),
+ ('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js', 'vflKjOTVq'),
+ ('http://s.ytimg.com/yt/swfbin/watch_as3-vflrEm9Nq.swf', 'vflrEm9Nq'),
+ ('https://s.ytimg.com/yts/swfbin/player-vflenCdZL/watch_as3.swf', 'vflenCdZL'),
+ )
+ for player_url, expected_player_id in PLAYER_URLS:
+ expected_player_type = player_url.split('.')[-1]
+ player_type, player_id = YoutubeIE._extract_player_info(player_url)
+ self.assertEqual(player_type, expected_player_type)
+ self.assertEqual(player_id, expected_player_id)
+
+
class TestSignature(unittest.TestCase):
def setUp(self):
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
--- /dev/null
+# Netscape HTTP Cookie File
+# http://curl.haxx.se/rfc/cookie_spec.html
+# This is a generated file! Do not edit.
+
+# Cookie file entry with invalid number of fields - 6 instead of 7
+www.foobar.foobar FALSE / FALSE 0 COOKIE
+
+# Cookie file entry with invalid expires at
+www.foobar.foobar FALSE / FALSE 1.7976931348623157e+308 COOKIE VALUE
.RE
.TP
.B \-\-exec \f[I]CMD\f[]
-Execute a command on the file after downloading, similar to find\[aq]s
-\-exec syntax.
+Execute a command on the file after downloading and post\-processing,
+similar to find\[aq]s \-exec syntax.
Example: \-\-exec \[aq]adb push {} /sdcard/Music/ && rm {}\[aq]
.RS
.RE
complete --command youtube-dl --long-option prefer-avconv --description 'Prefer avconv over ffmpeg for running the postprocessors'
complete --command youtube-dl --long-option prefer-ffmpeg --description 'Prefer ffmpeg over avconv for running the postprocessors (default)'
complete --command youtube-dl --long-option ffmpeg-location --description 'Location of the ffmpeg/avconv binary; either the path to the binary or its containing directory.'
-complete --command youtube-dl --long-option exec --description 'Execute a command on the file after downloading, similar to find'"'"'s -exec syntax. Example: --exec '"'"'adb push {} /sdcard/Music/ && rm {}'"'"''
+complete --command youtube-dl --long-option exec --description 'Execute a command on the file after downloading and post-processing, similar to find'"'"'s -exec syntax. Example: --exec '"'"'adb push {} /sdcard/Music/ && rm {}'"'"''
complete --command youtube-dl --long-option convert-subs --description 'Convert the subtitles to other format (currently supported: srt|ass|vtt|lrc)'
except ImportError: # Python 2
import cookielib as compat_cookiejar
+if sys.version_info[0] == 2:
+ class compat_cookiejar_Cookie(compat_cookiejar.Cookie):
+ def __init__(self, version, name, value, *args, **kwargs):
+ if isinstance(name, compat_str):
+ name = name.encode()
+ if isinstance(value, compat_str):
+ value = value.encode()
+ compat_cookiejar.Cookie.__init__(self, version, name, value, *args, **kwargs)
+else:
+ compat_cookiejar_Cookie = compat_cookiejar.Cookie
+
try:
import http.cookies as compat_cookies
except ImportError: # Python 2
'compat_basestring',
'compat_chr',
'compat_cookiejar',
+ 'compat_cookiejar_Cookie',
'compat_cookies',
'compat_ctypes_WINFUNCTYPE',
'compat_etree_Element',
while True:
try:
# Download and write
- data_block = ctx.data.read(block_size if not is_test else min(block_size, data_len - byte_counter))
+ data_block = ctx.data.read(block_size if data_len is None else min(block_size, data_len - byte_counter))
# socket.timeout is a subclass of socket.error but may not have
# errno set
except socket.timeout as e:
'elapsed': now - ctx.start_time,
})
- if is_test and byte_counter == data_len:
+ if data_len is not None and byte_counter == data_len:
break
if not is_test and ctx.chunk_size and ctx.data_len is not None and byte_counter < ctx.data_len:
import math
from ..compat import (
- compat_cookiejar,
+ compat_cookiejar_Cookie,
compat_cookies,
compat_etree_Element,
compat_etree_fromstring,
'twitter card player')
def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
- json_ld = self._search_regex(
- JSON_LD_RE, html, 'JSON-LD', group='json_ld', **kwargs)
+ json_ld_list = list(re.finditer(JSON_LD_RE, html))
default = kwargs.get('default', NO_DEFAULT)
- if not json_ld:
- return default if default is not NO_DEFAULT else {}
# JSON-LD may be malformed and thus `fatal` should be respected.
# At the same time `default` may be passed that assumes `fatal=False`
# for _search_regex. Let's simulate the same behavior here as well.
fatal = kwargs.get('fatal', True) if default == NO_DEFAULT else False
- return self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type)
+ json_ld = []
+ for mobj in json_ld_list:
+ json_ld_item = self._parse_json(
+ mobj.group('json_ld'), video_id, fatal=fatal)
+ if not json_ld_item:
+ continue
+ if isinstance(json_ld_item, dict):
+ json_ld.append(json_ld_item)
+ elif isinstance(json_ld_item, (list, tuple)):
+ json_ld.extend(json_ld_item)
+ if json_ld:
+ json_ld = self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type)
+ if json_ld:
+ return json_ld
+ if default is not NO_DEFAULT:
+ return default
+ elif fatal:
+ raise RegexNotFoundError('Unable to extract JSON-LD')
+ else:
+ self._downloader.report_warning('unable to extract JSON-LD %s' % bug_reports_message())
+ return {}
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
if isinstance(json_ld, compat_str):
extract_interaction_statistic(e)
for e in json_ld:
- if isinstance(e.get('@context'), compat_str) and re.match(r'^https?://schema.org/?$', e.get('@context')):
+ if '@context' in e:
item_type = e.get('@type')
if expected_type is not None and expected_type != item_type:
- return info
+ continue
if item_type in ('TVEpisode', 'Episode'):
episode_name = unescapeHTML(e.get('name'))
info.update({
})
elif item_type == 'VideoObject':
extract_video_object(e)
- continue
+ if expected_type is None:
+ continue
+ else:
+ break
video = e.get('video')
if isinstance(video, dict) and video.get('@type') == 'VideoObject':
extract_video_object(video)
- break
+ if expected_type is None:
+ continue
+ else:
+ break
return dict((k, v) for k, v in info.items() if v is not None)
@staticmethod
if res is False:
return []
ism_doc, urlh = res
+ if ism_doc is None:
+ return []
return self._parse_ism_formats(ism_doc, urlh.geturl(), ism_id)
def _set_cookie(self, domain, name, value, expire_time=None, port=None,
path='/', secure=False, discard=False, rest={}, **kwargs):
- cookie = compat_cookiejar.Cookie(
+ cookie = compat_cookiejar_Cookie(
0, name, value, port, port is not None, domain, True,
domain.startswith('.'), path, True, secure, expire_time,
discard, None, None, rest)
compat_b64decode,
compat_etree_Element,
compat_etree_fromstring,
+ compat_str,
compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
intlist_to_bytes,
int_or_none,
lowercase_escape,
+ merge_dicts,
remove_end,
sanitized_Request,
- unified_strdate,
urlencode_postdata,
xpath_text,
)
# rtmp
'skip_download': True,
},
+ 'skip': 'Video gone',
}, {
'url': 'http://www.crunchyroll.com/media-589804/culture-japan-1',
'info_dict': {
'info_dict': {
'id': '702409',
'ext': 'mp4',
- 'title': 'Re:ZERO -Starting Life in Another World- Episode 5 – The Morning of Our Promise Is Still Distant',
- 'description': 'md5:97664de1ab24bbf77a9c01918cb7dca9',
+ 'title': compat_str,
+ 'description': compat_str,
'thumbnail': r're:^https?://.*\.jpg$',
- 'uploader': 'TV TOKYO',
- 'upload_date': '20160508',
+ 'uploader': 'Re:Zero Partners',
+ 'timestamp': 1462098900,
+ 'upload_date': '20160501',
},
'params': {
# m3u8 download
'info_dict': {
'id': '727589',
'ext': 'mp4',
- 'title': "KONOSUBA -God's blessing on this wonderful world! 2 Episode 1 – Give Me Deliverance From This Judicial Injustice!",
- 'description': 'md5:cbcf05e528124b0f3a0a419fc805ea7d',
+ 'title': compat_str,
+ 'description': compat_str,
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'Kadokawa Pictures Inc.',
- 'upload_date': '20170118',
- 'series': "KONOSUBA -God's blessing on this wonderful world!",
+ 'timestamp': 1484130900,
+ 'upload_date': '20170111',
+ 'series': compat_str,
'season': "KONOSUBA -God's blessing on this wonderful world! 2",
'season_number': 2,
'episode': 'Give Me Deliverance From This Judicial Injustice!',
'info_dict': {
'id': '535080',
'ext': 'mp4',
- 'title': '11eyes Episode 1 – Red Night ~ Piros éjszaka',
- 'description': 'Kakeru and Yuka are thrown into an alternate nightmarish world they call "Red Night".',
+ 'title': compat_str,
+ 'description': compat_str,
'uploader': 'Marvelous AQL Inc.',
- 'upload_date': '20091021',
+ 'timestamp': 1255512600,
+ 'upload_date': '20091014',
},
'params': {
# Just test metadata extraction
# just test metadata extraction
'skip_download': True,
},
+ 'skip': 'Video gone',
}, {
# A video with a vastly different season name compared to the series name
'url': 'http://www.crunchyroll.com/nyarko-san-another-crawling-chaos/episode-1-test-590532',
'info_dict': {
'id': '590532',
'ext': 'mp4',
- 'title': 'Haiyoru! Nyaruani (ONA) Episode 1 – Test',
- 'description': 'Mahiro and Nyaruko talk about official certification.',
+ 'title': compat_str,
+ 'description': compat_str,
'uploader': 'TV TOKYO',
+ 'timestamp': 1330956000,
'upload_date': '20120305',
'series': 'Nyarko-san: Another Crawling Chaos',
'season': 'Haiyoru! Nyaruani (ONA)',
webpage, 'language', default=None, group='lang')
video_title = self._html_search_regex(
- r'(?s)<h1[^>]*>((?:(?!<h1).)*?<span[^>]+itemprop=["\']title["\'][^>]*>(?:(?!<h1).)+?)</h1>',
- webpage, 'video_title')
+ (r'(?s)<h1[^>]*>((?:(?!<h1).)*?<(?:span[^>]+itemprop=["\']title["\']|meta[^>]+itemprop=["\']position["\'])[^>]*>(?:(?!<h1).)+?)</h1>',
+ r'<title>(.+?),\s+-\s+.+? Crunchyroll'),
+ webpage, 'video_title', default=None)
+ if not video_title:
+ video_title = re.sub(r'^Watch\s+', '', self._og_search_description(webpage))
video_title = re.sub(r' {2,}', ' ', video_title)
video_description = (self._parse_json(self._html_search_regex(
r'<script[^>]*>\s*.+?\[media_id=%s\].+?({.+?"description"\s*:.+?})\);' % video_id,
webpage, 'description', default='{}'), video_id) or media_metadata).get('description')
if video_description:
video_description = lowercase_escape(video_description.replace(r'\r\n', '\n'))
- video_upload_date = self._html_search_regex(
- [r'<div>Availability for free users:(.+?)</div>', r'<div>[^<>]+<span>\s*(.+?\d{4})\s*</span></div>'],
- webpage, 'video_upload_date', fatal=False, flags=re.DOTALL)
- if video_upload_date:
- video_upload_date = unified_strdate(video_upload_date)
video_uploader = self._html_search_regex(
# try looking for both an uploader that's a link and one that's not
[r'<a[^>]+href="/publisher/[^"]+"[^>]*>([^<]+)</a>', r'<div>\s*Publisher:\s*<span>\s*(.+?)\s*</span>\s*</div>'],
- webpage, 'video_uploader', fatal=False)
+ webpage, 'video_uploader', default=False)
formats = []
for stream in media.get('streams', []):
r'(?s)<h\d[^>]+id=["\']showmedia_about_episode_num[^>]+>.+?</h\d>\s*<h4>\s*Season (\d+)',
webpage, 'season number', default=None))
- return {
+ info = self._search_json_ld(webpage, video_id, default={})
+
+ return merge_dicts({
'id': video_id,
'title': video_title,
'description': video_description,
'duration': duration,
'thumbnail': thumbnail,
'uploader': video_uploader,
- 'upload_date': video_upload_date,
'series': series,
'season': season,
'season_number': season_number,
'episode_number': episode_number,
'subtitles': subtitles,
'formats': formats,
- }
+ }, info)
class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE):
@staticmethod
def _get_cookie_value(cookies, name):
- cookie = cookies.get('name')
+ cookie = cookies.get(name)
if cookie:
return cookie.value
from .mlb import MLBIE
from .mnet import MnetIE
from .moevideo import MoeVideoIE
-from .mofosex import MofosexIE
+from .mofosex import (
+ MofosexIE,
+ MofosexEmbedIE,
+)
from .mojvideo import MojvideoIE
from .morningstar import MorningstarIE
from .motherless import (
ORFFM4IE,
ORFFM4StoryIE,
ORFOE1IE,
+ ORFOE3IE,
+ ORFNOEIE,
+ ORFWIEIE,
+ ORFBGLIE,
+ ORFOOEIE,
+ ORFSTMIE,
+ ORFKTNIE,
+ ORFSBGIE,
+ ORFTIRIE,
+ ORFVBGIE,
ORFIPTVIE,
)
from .outsidetv import OutsideTVIE
from .drtuber import DrTuberIE
from .redtube import RedTubeIE
from .tube8 import Tube8IE
+from .mofosex import MofosexEmbedIE
+from .spankwire import SpankwireIE
+from .youporn import YouPornIE
from .vimeo import VimeoIE
from .dailymotion import DailymotionIE
from .dailymail import DailyMailIE
if tube8_urls:
return self.playlist_from_matches(tube8_urls, video_id, video_title, ie=Tube8IE.ie_key())
+ # Look for embedded Mofosex player
+ mofosex_urls = MofosexEmbedIE._extract_urls(webpage)
+ if mofosex_urls:
+ return self.playlist_from_matches(mofosex_urls, video_id, video_title, ie=MofosexEmbedIE.ie_key())
+
+ # Look for embedded Spankwire player
+ spankwire_urls = SpankwireIE._extract_urls(webpage)
+ if spankwire_urls:
+ return self.playlist_from_matches(spankwire_urls, video_id, video_title, ie=SpankwireIE.ie_key())
+
+ # Look for embedded YouPorn player
+ youporn_urls = YouPornIE._extract_urls(webpage)
+ if youporn_urls:
+ return self.playlist_from_matches(youporn_urls, video_id, video_title, ie=YouPornIE.ie_key())
+
# Look for embedded Tvigle player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//cloud\.tvigle\.ru/video/.+?)\1', webpage)
_GEO_BYPASS = False
_TESTS = [{
- 'url': 'http://play.iprima.cz/gondici-s-r-o-33',
+ 'url': 'https://prima.iprima.cz/particka/92-epizoda',
'info_dict': {
- 'id': 'p136534',
+ 'id': 'p51388',
'ext': 'mp4',
- 'title': 'Gondíci s. r. o. (34)',
- 'description': 'md5:16577c629d006aa91f59ca8d8e7f99bd',
+ 'title': 'Partička (92)',
+ 'description': 'md5:859d53beae4609e6dd7796413f1b6cac',
+ },
+ 'params': {
+ 'skip_download': True, # m3u8 download
+ },
+ }, {
+ 'url': 'https://cnn.iprima.cz/videa/70-epizoda',
+ 'info_dict': {
+ 'id': 'p681554',
+ 'ext': 'mp4',
+ 'title': 'HLAVNÍ ZPRÁVY 3.5.2020',
},
'params': {
'skip_download': True, # m3u8 download
webpage = self._download_webpage(url, video_id)
+ title = self._og_search_title(
+ webpage, default=None) or self._search_regex(
+ r'<h1>([^<]+)', webpage, 'title')
+
video_id = self._search_regex(
(r'<iframe[^>]+\bsrc=["\'](?:https?:)?//(?:api\.play-backend\.iprima\.cz/prehravac/embedded|prima\.iprima\.cz/[^/]+/[^/]+)\?.*?\bid=(p\d+)',
- r'data-product="([^"]+)">'),
+ r'data-product="([^"]+)">',
+ r'id=["\']player-(p\d+)"',
+ r'playerId\s*:\s*["\']player-(p\d+)'),
webpage, 'real id')
playerpage = self._download_webpage(
return {
'id': video_id,
- 'title': self._og_search_title(webpage),
- 'thumbnail': self._og_search_thumbnail(webpage),
+ 'title': title,
+ 'thumbnail': self._og_search_thumbnail(webpage, default=None),
'formats': formats,
- 'description': self._og_search_description(webpage),
+ 'description': self._og_search_description(webpage, default=None),
}
from __future__ import unicode_literals
+import re
+
+from .common import InfoExtractor
from ..utils import (
int_or_none,
str_to_int,
})
return info
+
+
+class MofosexEmbedIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?mofosex\.com/embed/?\?.*?\bvideoid=(?P<id>\d+)'
+ _TESTS = [{
+ 'url': 'https://www.mofosex.com/embed/?videoid=318131&referrer=KM',
+ 'only_matching': True,
+ }]
+
+ @staticmethod
+ def _extract_urls(webpage):
+ return re.findall(
+ r'<iframe[^>]+\bsrc=["\']((?:https?:)?//(?:www\.)?mofosex\.com/embed/?\?.*?\bvideoid=\d+)',
+ webpage)
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ return self.url_result(
+ 'http://www.mofosex.com/videos/{0}/{0}.html'.format(video_id),
+ ie=MofosexIE.ie_key(), video_id=video_id)
'categories': ['Gaming', 'anal', 'reluctant', 'rough', 'Wife'],
'upload_date': '20100913',
'uploader_id': 'famouslyfuckedup',
- 'thumbnail': r're:http://.*\.jpg',
+ 'thumbnail': r're:https?://.*\.jpg',
'age_limit': 18,
}
}, {
'game', 'hairy'],
'upload_date': '20140622',
'uploader_id': 'Sulivana7x',
- 'thumbnail': r're:http://.*\.jpg',
+ 'thumbnail': r're:https?://.*\.jpg',
'age_limit': 18,
},
'skip': '404',
'categories': ['superheroine heroine superher'],
'upload_date': '20140827',
'uploader_id': 'shade0230',
- 'thumbnail': r're:http://.*\.jpg',
+ 'thumbnail': r're:https?://.*\.jpg',
'age_limit': 18,
}
}, {
raise ExtractorError('Video %s is for friends only' % video_id, expected=True)
title = self._html_search_regex(
- r'id="view-upload-title">\s+([^<]+)<', webpage, 'title')
+ (r'(?s)<div[^>]+\bclass=["\']media-meta-title[^>]+>(.+?)</div>',
+ r'id="view-upload-title">\s+([^<]+)<'), webpage, 'title')
video_url = (self._html_search_regex(
(r'setup\(\{\s*["\']file["\']\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1',
r'fileurl\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1'),
or 'http://cdn4.videos.motherlessmedia.com/videos/%s.mp4?fs=opencloud' % video_id)
age_limit = self._rta_search(webpage)
view_count = str_to_int(self._html_search_regex(
- r'<strong>Views</strong>\s+([^<]+)<',
+ (r'>(\d+)\s+Views<', r'<strong>Views</strong>\s+([^<]+)<'),
webpage, 'view count', fatal=False))
like_count = str_to_int(self._html_search_regex(
- r'<strong>Favorited</strong>\s+([^<]+)<',
+ (r'>(\d+)\s+Favorites<', r'<strong>Favorited</strong>\s+([^<]+)<'),
webpage, 'like count', fatal=False))
upload_date = self._html_search_regex(
- r'<strong>Uploaded</strong>\s+([^<]+)<', webpage, 'upload date')
+ (r'class=["\']count[^>]+>(\d+\s+[a-zA-Z]{3}\s+\d{4})<',
+ r'<strong>Uploaded</strong>\s+([^<]+)<'), webpage, 'upload date')
if 'Ago' in upload_date:
days = int(re.search(r'([0-9]+)', upload_date).group(1))
upload_date = (datetime.datetime.now() - datetime.timedelta(days=days)).strftime('%Y%m%d')
from .common import InfoExtractor
from ..utils import (
clean_html,
+ determine_ext,
int_or_none,
js_to_json,
qualities,
webpage = self._download_webpage(url, video_id)
- bitrates = self._parse_json(
+ duration = None
+ formats = []
+
+ player = self._parse_json(
self._search_regex(
- r'(?s)(?:src|bitrates)\s*=\s*({.+?})\s*;', webpage, 'formats'),
- video_id, transform_source=js_to_json)
+ r'Player\.init\s*\([^,]+,\s*({.+?})\s*,\s*{.+?}\s*\)\s*;',
+ webpage, 'player', default='{}'), video_id, fatal=False)
+ if player:
+ for format_id, format_list in player['tracks'].items():
+ if not isinstance(format_list, list):
+ format_list = [format_list]
+ for format_dict in format_list:
+ if not isinstance(format_dict, dict):
+ continue
+ format_url = url_or_none(format_dict.get('src'))
+ format_type = format_dict.get('type')
+ ext = determine_ext(format_url)
+ if (format_type == 'application/x-mpegURL'
+ or format_id == 'HLS' or ext == 'm3u8'):
+ formats.extend(self._extract_m3u8_formats(
+ format_url, video_id, 'mp4',
+ entry_protocol='m3u8_native', m3u8_id='hls',
+ fatal=False))
+ elif (format_type == 'application/dash+xml'
+ or format_id == 'DASH' or ext == 'mpd'):
+ formats.extend(self._extract_mpd_formats(
+ format_url, video_id, mpd_id='dash', fatal=False))
+ else:
+ formats.append({
+ 'url': format_url,
+ })
+ duration = int_or_none(player.get('duration'))
+ else:
+ # Old path, not actual as of 08.04.2020
+ bitrates = self._parse_json(
+ self._search_regex(
+ r'(?s)(?:src|bitrates)\s*=\s*({.+?})\s*;', webpage, 'formats'),
+ video_id, transform_source=js_to_json)
- QUALITIES = ('lq', 'mq', 'hq', 'hd')
- quality_key = qualities(QUALITIES)
+ QUALITIES = ('lq', 'mq', 'hq', 'hd')
+ quality_key = qualities(QUALITIES)
+
+ for format_id, format_list in bitrates.items():
+ if not isinstance(format_list, list):
+ format_list = [format_list]
+ for format_url in format_list:
+ format_url = url_or_none(format_url)
+ if not format_url:
+ continue
+ if format_id == 'hls':
+ formats.extend(self._extract_m3u8_formats(
+ format_url, video_id, ext='mp4',
+ entry_protocol='m3u8_native', m3u8_id='hls',
+ fatal=False))
+ continue
+ f = {
+ 'url': format_url,
+ }
+ f_id = format_id
+ for quality in QUALITIES:
+ if '%s.mp4' % quality in format_url:
+ f_id += '-%s' % quality
+ f.update({
+ 'quality': quality_key(quality),
+ 'format_note': quality.upper(),
+ })
+ break
+ f['format_id'] = f_id
+ formats.append(f)
- formats = []
- for format_id, format_list in bitrates.items():
- if not isinstance(format_list, list):
- format_list = [format_list]
- for format_url in format_list:
- format_url = url_or_none(format_url)
- if not format_url:
- continue
- if format_id == 'hls':
- formats.extend(self._extract_m3u8_formats(
- format_url, video_id, ext='mp4',
- entry_protocol='m3u8_native', m3u8_id='hls',
- fatal=False))
- continue
- f = {
- 'url': format_url,
- }
- f_id = format_id
- for quality in QUALITIES:
- if '%s.mp4' % quality in format_url:
- f_id += '-%s' % quality
- f.update({
- 'quality': quality_key(quality),
- 'format_note': quality.upper(),
- })
- break
- f['format_id'] = f_id
- formats.append(f)
self._sort_formats(formats)
title = self._og_search_title(
r'poster\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage,
'thumbnail', fatal=False, group='value')
duration = int_or_none(self._search_regex(
- r'videoDuration\s*:\s*(\d+)', webpage, 'duration', fatal=False))
+ r'videoDuration\s*:\s*(\d+)', webpage, 'duration',
+ default=duration))
return {
'id': video_id,
class ORFRadioIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- station = mobj.group('station')
show_date = mobj.group('date')
show_id = mobj.group('show')
data = self._download_json(
'http://audioapi.orf.at/%s/api/json/current/broadcast/%s/%s'
- % (station, show_id, show_date), show_id)
+ % (self._API_STATION, show_id, show_date), show_id)
entries = []
for info in data['streams']:
duration = end - start if end and start else None
entries.append({
'id': loop_stream_id.replace('.mp3', ''),
- 'url': 'http://loopstream01.apa.at/?channel=%s&id=%s' % (station, loop_stream_id),
+ 'url': 'http://loopstream01.apa.at/?channel=%s&id=%s' % (self._LOOP_STATION, loop_stream_id),
'title': title,
'description': clean_html(data.get('subtitle')),
'duration': duration,
IE_NAME = 'orf:fm4'
IE_DESC = 'radio FM4'
_VALID_URL = r'https?://(?P<station>fm4)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>4\w+)'
+ _API_STATION = 'fm4'
+ _LOOP_STATION = 'fm4'
_TEST = {
'url': 'http://fm4.orf.at/player/20170107/4CC',
}
+class ORFNOEIE(ORFRadioIE):
+ IE_NAME = 'orf:noe'
+ IE_DESC = 'Radio Niederösterreich'
+ _VALID_URL = r'https?://(?P<station>noe)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
+ _API_STATION = 'noe'
+ _LOOP_STATION = 'oe2n'
+
+ _TEST = {
+ 'url': 'https://noe.orf.at/player/20200423/NGM',
+ 'only_matching': True,
+ }
+
+
+class ORFWIEIE(ORFRadioIE):
+ IE_NAME = 'orf:wien'
+ IE_DESC = 'Radio Wien'
+ _VALID_URL = r'https?://(?P<station>wien)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
+ _API_STATION = 'wie'
+ _LOOP_STATION = 'oe2w'
+
+ _TEST = {
+ 'url': 'https://wien.orf.at/player/20200423/WGUM',
+ 'only_matching': True,
+ }
+
+
+class ORFBGLIE(ORFRadioIE):
+ IE_NAME = 'orf:burgenland'
+ IE_DESC = 'Radio Burgenland'
+ _VALID_URL = r'https?://(?P<station>burgenland)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
+ _API_STATION = 'bgl'
+ _LOOP_STATION = 'oe2b'
+
+ _TEST = {
+ 'url': 'https://burgenland.orf.at/player/20200423/BGM',
+ 'only_matching': True,
+ }
+
+
+class ORFOOEIE(ORFRadioIE):
+ IE_NAME = 'orf:oberoesterreich'
+ IE_DESC = 'Radio Oberösterreich'
+ _VALID_URL = r'https?://(?P<station>ooe)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
+ _API_STATION = 'ooe'
+ _LOOP_STATION = 'oe2o'
+
+ _TEST = {
+ 'url': 'https://ooe.orf.at/player/20200423/OGMO',
+ 'only_matching': True,
+ }
+
+
+class ORFSTMIE(ORFRadioIE):
+ IE_NAME = 'orf:steiermark'
+ IE_DESC = 'Radio Steiermark'
+ _VALID_URL = r'https?://(?P<station>steiermark)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
+ _API_STATION = 'stm'
+ _LOOP_STATION = 'oe2st'
+
+ _TEST = {
+ 'url': 'https://steiermark.orf.at/player/20200423/STGMS',
+ 'only_matching': True,
+ }
+
+
+class ORFKTNIE(ORFRadioIE):
+ IE_NAME = 'orf:kaernten'
+ IE_DESC = 'Radio Kärnten'
+ _VALID_URL = r'https?://(?P<station>kaernten)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
+ _API_STATION = 'ktn'
+ _LOOP_STATION = 'oe2k'
+
+ _TEST = {
+ 'url': 'https://kaernten.orf.at/player/20200423/KGUMO',
+ 'only_matching': True,
+ }
+
+
+class ORFSBGIE(ORFRadioIE):
+ IE_NAME = 'orf:salzburg'
+ IE_DESC = 'Radio Salzburg'
+ _VALID_URL = r'https?://(?P<station>salzburg)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
+ _API_STATION = 'sbg'
+ _LOOP_STATION = 'oe2s'
+
+ _TEST = {
+ 'url': 'https://salzburg.orf.at/player/20200423/SGUM',
+ 'only_matching': True,
+ }
+
+
+class ORFTIRIE(ORFRadioIE):
+ IE_NAME = 'orf:tirol'
+ IE_DESC = 'Radio Tirol'
+ _VALID_URL = r'https?://(?P<station>tirol)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
+ _API_STATION = 'tir'
+ _LOOP_STATION = 'oe2t'
+
+ _TEST = {
+ 'url': 'https://tirol.orf.at/player/20200423/TGUMO',
+ 'only_matching': True,
+ }
+
+
+class ORFVBGIE(ORFRadioIE):
+ IE_NAME = 'orf:vorarlberg'
+ IE_DESC = 'Radio Vorarlberg'
+ _VALID_URL = r'https?://(?P<station>vorarlberg)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
+ _API_STATION = 'vbg'
+ _LOOP_STATION = 'oe2v'
+
+ _TEST = {
+ 'url': 'https://vorarlberg.orf.at/player/20200423/VGUM',
+ 'only_matching': True,
+ }
+
+
+class ORFOE3IE(ORFRadioIE):
+ IE_NAME = 'orf:oe3'
+ IE_DESC = 'Radio Österreich 3'
+ _VALID_URL = r'https?://(?P<station>oe3)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
+ _API_STATION = 'oe3'
+ _LOOP_STATION = 'oe3'
+
+ _TEST = {
+ 'url': 'https://oe3.orf.at/player/20200424/3WEK',
+ 'only_matching': True,
+ }
+
+
class ORFOE1IE(ORFRadioIE):
IE_NAME = 'orf:oe1'
IE_DESC = 'Radio Österreich 1'
_VALID_URL = r'https?://(?P<station>oe1)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
+ _API_STATION = 'oe1'
+ _LOOP_STATION = 'oe1'
_TEST = {
'url': 'http://oe1.orf.at/player/20170108/456544',
determine_ext,
float_or_none,
int_or_none,
+ merge_dicts,
unified_strdate,
)
(?:
(?:beta\.)?
(?:
- prosieben(?:maxx)?|sixx|sat1(?:gold)?|kabeleins(?:doku)?|the-voice-of-germany|7tv|advopedia
+ prosieben(?:maxx)?|sixx|sat1(?:gold)?|kabeleins(?:doku)?|the-voice-of-germany|advopedia
)\.(?:de|at|ch)|
ran\.de|fem\.com|advopedia\.de|galileo\.tv/video
)
'info_dict': {
'id': '2104602',
'ext': 'mp4',
- 'title': 'Episode 18 - Staffel 2',
+ 'title': 'CIRCUS HALLIGALLI - Episode 18 - Staffel 2',
'description': 'md5:8733c81b702ea472e069bc48bb658fc1',
'upload_date': '20131231',
'duration': 5845.04,
+ 'series': 'CIRCUS HALLIGALLI',
+ 'season_number': 2,
+ 'episode': 'Episode 18 - Staffel 2',
+ 'episode_number': 18,
},
},
{
'info_dict': {
'id': '2572814',
'ext': 'mp4',
- 'title': 'Andreas Kümmert: Rocket Man',
+ 'title': 'The Voice of Germany - Andreas Kümmert: Rocket Man',
'description': 'md5:6ddb02b0781c6adf778afea606652e38',
+ 'timestamp': 1382041620,
'upload_date': '20131017',
'duration': 469.88,
},
},
},
{
- 'url': 'http://www.fem.com/wellness/videos/wellness-video-clip-kurztripps-zum-valentinstag.html',
+ 'url': 'http://www.fem.com/videos/beauty-lifestyle/kurztrips-zum-valentinstag',
'info_dict': {
'id': '2156342',
'ext': 'mp4',
'playlist_count': 2,
'skip': 'This video is unavailable',
},
- {
- 'url': 'http://www.7tv.de/circus-halligalli/615-best-of-circus-halligalli-ganze-folge',
- 'info_dict': {
- 'id': '4187506',
- 'ext': 'mp4',
- 'title': 'Best of Circus HalliGalli',
- 'description': 'md5:8849752efd90b9772c9db6fdf87fb9e9',
- 'upload_date': '20151229',
- },
- 'params': {
- 'skip_download': True,
- },
- },
{
# title in <h2 class="subtitle">
'url': 'http://www.prosieben.de/stars/oscar-award/videos/jetzt-erst-enthuellt-das-geheimnis-von-emma-stones-oscar-robe-clip',
r'<div[^>]+id="veeseoDescription"[^>]*>(.+?)</div>',
]
_UPLOAD_DATE_REGEXES = [
- r'<meta property="og:published_time" content="(.+?)">',
r'<span>\s*(\d{2}\.\d{2}\.\d{4} \d{2}:\d{2}) \|\s*<span itemprop="duration"',
r'<footer>\s*(\d{2}\.\d{2}\.\d{4}) \d{2}:\d{2} Uhr',
r'<span style="padding-left: 4px;line-height:20px; color:#404040">(\d{2}\.\d{2}\.\d{4})</span>',
if description is None:
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
- upload_date = unified_strdate(self._html_search_regex(
- self._UPLOAD_DATE_REGEXES, webpage, 'upload date', default=None))
+ upload_date = unified_strdate(
+ self._html_search_meta('og:published_time', webpage,
+ 'upload date', default=None)
+ or self._html_search_regex(self._UPLOAD_DATE_REGEXES,
+ webpage, 'upload date', default=None))
+
+ json_ld = self._search_json_ld(webpage, clip_id, default={})
- info.update({
+ return merge_dicts(info, {
'id': clip_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
- })
- return info
+ }, json_ld)
def _extract_playlist(self, url, webpage):
playlist_id = self._html_search_regex(
urls = []
formats = []
- def add_http_from_hls(m3u8_f):
- http_url = m3u8_f['url'].replace('/hls/', '/mp4/').replace('/chunklist.m3u8', '.mp4')
- if http_url != m3u8_f['url']:
- f = m3u8_f.copy()
- f.update({
- 'format_id': f['format_id'].replace('hls', 'http'),
- 'protocol': 'http',
- 'url': http_url,
- })
- formats.append(f)
-
for video in videos['data']['videos']:
media_url = url_or_none(video.get('url'))
if not media_url or media_url in urls:
playlist = video.get('is_playlist')
if (video.get('stream_type') == 'hls' and playlist is True) or 'playlist.m3u8' in media_url:
- m3u8_formats = self._extract_m3u8_formats(
+ formats.extend(self._extract_m3u8_formats(
media_url, video_id, 'mp4', entry_protocol='m3u8_native',
- m3u8_id='hls', fatal=False)
- for m3u8_f in m3u8_formats:
- formats.append(m3u8_f)
- add_http_from_hls(m3u8_f)
+ m3u8_id='hls', fatal=False))
continue
quality = int_or_none(video.get('quality'))
format_id += '-%sp' % quality
f['format_id'] = format_id
formats.append(f)
- if is_hls:
- add_http_from_hls(f)
self._sort_formats(formats)
creator = try_get(
'comment_count': int,
'repost_count': int,
},
- }
+ },
+ {
+ # with AAC HQ format available via OAuth token
+ 'url': 'https://soundcloud.com/wandw/the-chainsmokers-ft-daya-dont-let-me-down-ww-remix-1',
+ 'only_matching': True,
+ },
]
_API_V2_BASE = 'https://api-v2.soundcloud.com/'
format_id_list = []
if protocol:
format_id_list.append(protocol)
+ ext = f.get('ext')
+ if ext == 'aac':
+ f['abr'] = '256'
for k in ('ext', 'abr'):
v = f.get(k)
if v:
abr = f.get('abr')
if abr:
f['abr'] = int(abr)
+ if protocol == 'hls':
+ protocol = 'm3u8' if ext == 'aac' else 'm3u8_native'
+ else:
+ protocol = 'http'
f.update({
'format_id': '_'.join(format_id_list),
- 'protocol': 'm3u8_native' if protocol == 'hls' else 'http',
+ 'protocol': protocol,
'preference': -10 if preview else None,
})
formats.append(f)
import re
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse_unquote,
- compat_urllib_parse_urlparse,
-)
from ..utils import (
- sanitized_Request,
+ float_or_none,
+ int_or_none,
+ merge_dicts,
+ str_or_none,
str_to_int,
- unified_strdate,
+ url_or_none,
)
-from ..aes import aes_decrypt_text
class SpankwireIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?(?P<url>spankwire\.com/[^/]*/video(?P<id>[0-9]+)/?)'
+ _VALID_URL = r'''(?x)
+ https?://
+ (?:www\.)?spankwire\.com/
+ (?:
+ [^/]+/video|
+ EmbedPlayer\.aspx/?\?.*?\bArticleId=
+ )
+ (?P<id>\d+)
+ '''
_TESTS = [{
# download URL pattern: */<height>P_<tbr>K_<video_id>.mp4
'url': 'http://www.spankwire.com/Buckcherry-s-X-Rated-Music-Video-Crazy-Bitch/video103545/',
- 'md5': '8bbfde12b101204b39e4b9fe7eb67095',
+ 'md5': '5aa0e4feef20aad82cbcae3aed7ab7cd',
'info_dict': {
'id': '103545',
'ext': 'mp4',
'title': 'Buckcherry`s X Rated Music Video Crazy Bitch',
'description': 'Crazy Bitch X rated music video.',
+ 'duration': 222,
'uploader': 'oreusz',
'uploader_id': '124697',
- 'upload_date': '20070507',
+ 'timestamp': 1178587885,
+ 'upload_date': '20070508',
+ 'average_rating': float,
+ 'view_count': int,
+ 'comment_count': int,
'age_limit': 18,
- }
+ 'categories': list,
+ 'tags': list,
+ },
}, {
# download URL pattern: */mp4_<format_id>_<video_id>.mp4
'url': 'http://www.spankwire.com/Titcums-Compiloation-I/video1921551/',
'upload_date': '20150822',
'age_limit': 18,
},
+ 'params': {
+ 'proxy': '127.0.0.1:8118'
+ },
+ 'skip': 'removed',
+ }, {
+ 'url': 'https://www.spankwire.com/EmbedPlayer.aspx/?ArticleId=156156&autostart=true',
+ 'only_matching': True,
}]
+ @staticmethod
+ def _extract_urls(webpage):
+ return re.findall(
+ r'<iframe[^>]+\bsrc=["\']((?:https?:)?//(?:www\.)?spankwire\.com/EmbedPlayer\.aspx/?\?.*?\bArticleId=\d+)',
+ webpage)
+
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
- req = sanitized_Request('http://www.' + mobj.group('url'))
- req.add_header('Cookie', 'age_verified=1')
- webpage = self._download_webpage(req, video_id)
-
- title = self._html_search_regex(
- r'<h1>([^<]+)', webpage, 'title')
- description = self._html_search_regex(
- r'(?s)<div\s+id="descriptionContent">(.+?)</div>',
- webpage, 'description', fatal=False)
- thumbnail = self._html_search_regex(
- r'playerData\.screenShot\s*=\s*["\']([^"\']+)["\']',
- webpage, 'thumbnail', fatal=False)
-
- uploader = self._html_search_regex(
- r'by:\s*<a [^>]*>(.+?)</a>',
- webpage, 'uploader', fatal=False)
- uploader_id = self._html_search_regex(
- r'by:\s*<a href="/(?:user/viewProfile|Profile\.aspx)\?.*?UserId=(\d+).*?"',
- webpage, 'uploader id', fatal=False)
- upload_date = unified_strdate(self._html_search_regex(
- r'</a> on (.+?) at \d+:\d+',
- webpage, 'upload date', fatal=False))
-
- view_count = str_to_int(self._html_search_regex(
- r'<div id="viewsCounter"><span>([\d,\.]+)</span> views</div>',
- webpage, 'view count', fatal=False))
- comment_count = str_to_int(self._html_search_regex(
- r'<span\s+id="spCommentCount"[^>]*>([\d,\.]+)</span>',
- webpage, 'comment count', fatal=False))
-
- videos = re.findall(
- r'playerData\.cdnPath([0-9]{3,})\s*=\s*(?:encodeURIComponent\()?["\']([^"\']+)["\']', webpage)
- heights = [int(video[0]) for video in videos]
- video_urls = list(map(compat_urllib_parse_unquote, [video[1] for video in videos]))
- if webpage.find(r'flashvars\.encrypted = "true"') != -1:
- password = self._search_regex(
- r'flashvars\.video_title = "([^"]+)',
- webpage, 'password').replace('+', ' ')
- video_urls = list(map(
- lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'),
- video_urls))
+ video_id = self._match_id(url)
+
+ video = self._download_json(
+ 'https://www.spankwire.com/api/video/%s.json' % video_id, video_id)
+
+ title = video['title']
formats = []
- for height, video_url in zip(heights, video_urls):
- path = compat_urllib_parse_urlparse(video_url).path
- m = re.search(r'/(?P<height>\d+)[pP]_(?P<tbr>\d+)[kK]', path)
- if m:
- tbr = int(m.group('tbr'))
- height = int(m.group('height'))
- else:
- tbr = None
- formats.append({
- 'url': video_url,
- 'format_id': '%dp' % height,
- 'height': height,
- 'tbr': tbr,
+ videos = video.get('videos')
+ if isinstance(videos, dict):
+ for format_id, format_url in videos.items():
+ video_url = url_or_none(format_url)
+ if not format_url:
+ continue
+ height = int_or_none(self._search_regex(
+ r'(\d+)[pP]', format_id, 'height', default=None))
+ m = re.search(
+ r'/(?P<height>\d+)[pP]_(?P<tbr>\d+)[kK]', video_url)
+ if m:
+ tbr = int(m.group('tbr'))
+ height = height or int(m.group('height'))
+ else:
+ tbr = None
+ formats.append({
+ 'url': video_url,
+ 'format_id': '%dp' % height if height else format_id,
+ 'height': height,
+ 'tbr': tbr,
+ })
+ m3u8_url = url_or_none(video.get('HLS'))
+ if m3u8_url:
+ formats.extend(self._extract_m3u8_formats(
+ m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native',
+ m3u8_id='hls', fatal=False))
+ self._sort_formats(formats, ('height', 'tbr', 'width', 'format_id'))
+
+ view_count = str_to_int(video.get('viewed'))
+
+ thumbnails = []
+ for preference, t in enumerate(('', '2x'), start=0):
+ thumbnail_url = url_or_none(video.get('poster%s' % t))
+ if not thumbnail_url:
+ continue
+ thumbnails.append({
+ 'url': thumbnail_url,
+ 'preference': preference,
})
- self._sort_formats(formats)
- age_limit = self._rta_search(webpage)
+ def extract_names(key):
+ entries_list = video.get(key)
+ if not isinstance(entries_list, list):
+ return
+ entries = []
+ for entry in entries_list:
+ name = str_or_none(entry.get('name'))
+ if name:
+ entries.append(name)
+ return entries
+
+ categories = extract_names('categories')
+ tags = extract_names('tags')
- return {
+ uploader = None
+ info = {}
+
+ webpage = self._download_webpage(
+ 'https://www.spankwire.com/_/video%s/' % video_id, video_id,
+ fatal=False)
+ if webpage:
+ info = self._search_json_ld(webpage, video_id, default={})
+ thumbnail_url = None
+ if 'thumbnail' in info:
+ thumbnail_url = url_or_none(info['thumbnail'])
+ del info['thumbnail']
+ if not thumbnail_url:
+ thumbnail_url = self._og_search_thumbnail(webpage)
+ if thumbnail_url:
+ thumbnails.append({
+ 'url': thumbnail_url,
+ 'preference': 10,
+ })
+ uploader = self._html_search_regex(
+ r'(?s)by\s*<a[^>]+\bclass=["\']uploaded__by[^>]*>(.+?)</a>',
+ webpage, 'uploader', fatal=False)
+ if not view_count:
+ view_count = str_to_int(self._search_regex(
+ r'data-views=["\']([\d,.]+)', webpage, 'view count',
+ fatal=False))
+
+ return merge_dicts({
'id': video_id,
'title': title,
- 'description': description,
- 'thumbnail': thumbnail,
+ 'description': video.get('description'),
+ 'duration': int_or_none(video.get('duration')),
+ 'thumbnails': thumbnails,
'uploader': uploader,
- 'uploader_id': uploader_id,
- 'upload_date': upload_date,
+ 'uploader_id': str_or_none(video.get('userId')),
+ 'timestamp': int_or_none(video.get('time_approved_on')),
+ 'average_rating': float_or_none(video.get('rating')),
'view_count': view_count,
- 'comment_count': comment_count,
+ 'comment_count': int_or_none(video.get('comments')),
+ 'age_limit': 18,
+ 'categories': categories,
+ 'tags': tags,
'formats': formats,
- 'age_limit': age_limit,
- }
+ }, info)
# coding: utf-8
from __future__ import unicode_literals
+import re
+
from .common import InfoExtractor
+from .jwplatform import JWPlatformIE
from .nexx import NexxIE
-from ..compat import compat_urlparse
+from ..compat import (
+ compat_str,
+ compat_urlparse,
+)
+from ..utils import (
+ NO_DEFAULT,
+ try_get,
+)
class Tele5IE(InfoExtractor):
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
video_id = (qs.get('vid') or qs.get('ve_id') or [None])[0]
- if not video_id:
+ NEXX_ID_RE = r'\d{6,}'
+ JWPLATFORM_ID_RE = r'[a-zA-Z0-9]{8}'
+
+ def nexx_result(nexx_id):
+ return self.url_result(
+ 'https://api.nexx.cloud/v3/759/videos/byid/%s' % nexx_id,
+ ie=NexxIE.ie_key(), video_id=nexx_id)
+
+ nexx_id = jwplatform_id = None
+
+ if video_id:
+ if re.match(NEXX_ID_RE, video_id):
+ return nexx_result(video_id)
+ elif re.match(JWPLATFORM_ID_RE, video_id):
+ jwplatform_id = video_id
+
+ if not nexx_id:
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
- video_id = self._html_search_regex(
- (r'id\s*=\s*["\']video-player["\'][^>]+data-id\s*=\s*["\'](\d+)',
- r'\s+id\s*=\s*["\']player_(\d{6,})',
- r'\bdata-id\s*=\s*["\'](\d{6,})'), webpage, 'video id')
+
+ def extract_id(pattern, name, default=NO_DEFAULT):
+ return self._html_search_regex(
+ (r'id\s*=\s*["\']video-player["\'][^>]+data-id\s*=\s*["\'](%s)' % pattern,
+ r'\s+id\s*=\s*["\']player_(%s)' % pattern,
+ r'\bdata-id\s*=\s*["\'](%s)' % pattern), webpage, name,
+ default=default)
+
+ nexx_id = extract_id(NEXX_ID_RE, 'nexx id', default=None)
+ if nexx_id:
+ return nexx_result(nexx_id)
+
+ if not jwplatform_id:
+ jwplatform_id = extract_id(JWPLATFORM_ID_RE, 'jwplatform id')
+
+ media = self._download_json(
+ 'https://cdn.jwplayer.com/v2/media/' + jwplatform_id,
+ display_id)
+ nexx_id = try_get(
+ media, lambda x: x['playlist'][0]['nexx_id'], compat_str)
+
+ if nexx_id:
+ return nexx_result(nexx_id)
return self.url_result(
- 'https://api.nexx.cloud/v3/759/videos/byid/%s' % video_id,
- ie=NexxIE.ie_key(), video_id=video_id)
+ 'jwplatform:%s' % jwplatform_id, ie=JWPlatformIE.ie_key(),
+ video_id=jwplatform_id)
class TenPlayIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?10play\.com\.au/[^/]+/episodes/[^/]+/[^/]+/(?P<id>tpv\d{6}[a-z]{5})'
- _TEST = {
+ _VALID_URL = r'https?://(?:www\.)?10play\.com\.au/(?:[^/]+/)+(?P<id>tpv\d{6}[a-z]{5})'
+ _TESTS = [{
'url': 'https://10play.com.au/masterchef/episodes/season-1/masterchef-s1-ep-1/tpv190718kwzga',
'info_dict': {
'id': '6060533435001',
'format': 'bestvideo',
'skip_download': True,
}
- }
+ }, {
+ 'url': 'https://10play.com.au/how-to-stay-married/web-extras/season-1/terrys-talks-ep-1-embracing-change/tpv190915ylupc',
+ 'only_matching': True,
+ }]
BRIGHTCOVE_URL_TEMPLATE = 'https://players.brightcove.net/2199827728001/cN6vRtRQt_default/index.html?videoId=%s'
def _real_extract(self, url):
}, {
'url': 'https://www.thisoldhouse.com/21113884/s41-e13-paradise-lost',
'only_matching': True,
+ }, {
+ # iframe www.thisoldhouse.com
+ 'url': 'https://www.thisoldhouse.com/21083431/seaside-transformation-the-westerly-project',
+ 'only_matching': True,
}]
_ZYPE_TMPL = 'https://player.zype.com/embed/%s.html?api_key=hsOk_yMSPYNrT22e9pu8hihLXjaZf0JW5jsOWv4ZqyHJFvkJn6rtToHl09tbbsbe'
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
- r'<iframe[^>]+src=[\'"](?:https?:)?//thisoldhouse\.chorus\.build/videos/zype/([0-9a-f]{24})',
+ r'<iframe[^>]+src=[\'"](?:https?:)?//(?:www\.)?thisoldhouse\.(?:chorus\.build|com)/videos/zype/([0-9a-f]{24})',
webpage, 'video id')
return self.url_result(self._ZYPE_TMPL % video_id, 'Zype', video_id)
manifest_url.replace('.m3u8', '.f4m'),
video_id, f4m_id='hds', fatal=False))
formats.extend(self._extract_ism_formats(
- re.sub(r'\.ism/.+?\.m3u8', r'.ism/Manifest', manifest_url),
+ re.sub(r'\.ism/.*?\.m3u8', r'.ism/Manifest', manifest_url),
video_id, ism_id='mss', fatal=False))
if not formats and info.get('is_geo_restricted'):
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
- compat_str,
compat_urlparse,
)
from ..utils import (
int_or_none,
parse_iso8601,
qualities,
- smuggle_url,
try_get,
- unsmuggle_url,
update_url_query,
url_or_none,
)
]
def _real_extract(self, url):
- url, smuggled_data = unsmuggle_url(url, {})
- self._initialize_geo_bypass({
- 'countries': smuggled_data.get('geo_countries'),
- })
-
video_id = self._match_id(url)
geo_country = self._search_regex(
r'https?://[^/]+\.([a-z]{2})', url,
'ext': ext,
}
if video_url.startswith('rtmp'):
- if smuggled_data.get('skip_rtmp'):
- continue
m = re.search(
r'^(?P<url>rtmp://[^/]+/(?P<app>[^/]+))/(?P<playpath>.+)$', video_url)
if not m:
_VALID_URL = r'''(?x)
https?://
(?:www\.)?
- viafree\.
- (?:
- (?:dk|no)/programmer|
- se/program
- )
- /(?:[^/]+/)+(?P<id>[^/?#&]+)
+ viafree\.(?P<country>dk|no|se)
+ /(?P<id>program(?:mer)?/(?:[^/]+/)+[^/?#&]+)
'''
_TESTS = [{
- 'url': 'http://www.viafree.se/program/livsstil/husraddarna/sasong-2/avsnitt-2',
+ 'url': 'http://www.viafree.no/programmer/underholdning/det-beste-vorspielet/sesong-2/episode-1',
'info_dict': {
- 'id': '395375',
+ 'id': '757786',
'ext': 'mp4',
- 'title': 'Husräddarna S02E02',
- 'description': 'md5:4db5c933e37db629b5a2f75dfb34829e',
- 'series': 'Husräddarna',
- 'season': 'Säsong 2',
+ 'title': 'Det beste vorspielet - Sesong 2 - Episode 1',
+ 'description': 'md5:b632cb848331404ccacd8cd03e83b4c3',
+ 'series': 'Det beste vorspielet',
'season_number': 2,
- 'duration': 2576,
- 'timestamp': 1400596321,
- 'upload_date': '20140520',
+ 'duration': 1116,
+ 'timestamp': 1471200600,
+ 'upload_date': '20160814',
},
'params': {
'skip_download': True,
},
- 'add_ie': [TVPlayIE.ie_key()],
}, {
# with relatedClips
'url': 'http://www.viafree.se/program/reality/sommaren-med-youtube-stjarnorna/sasong-1/avsnitt-1',
- 'info_dict': {
- 'id': '758770',
- 'ext': 'mp4',
- 'title': 'Sommaren med YouTube-stjärnorna S01E01',
- 'description': 'md5:2bc69dce2c4bb48391e858539bbb0e3f',
- 'series': 'Sommaren med YouTube-stjärnorna',
- 'season': 'Säsong 1',
- 'season_number': 1,
- 'duration': 1326,
- 'timestamp': 1470905572,
- 'upload_date': '20160811',
- },
- 'params': {
- 'skip_download': True,
- },
- 'add_ie': [TVPlayIE.ie_key()],
+ 'only_matching': True,
}, {
# Different og:image URL schema
'url': 'http://www.viafree.se/program/reality/sommaren-med-youtube-stjarnorna/sasong-1/avsnitt-2',
'only_matching': True,
}, {
- 'url': 'http://www.viafree.no/programmer/underholdning/det-beste-vorspielet/sesong-2/episode-1',
+ 'url': 'http://www.viafree.se/program/livsstil/husraddarna/sasong-2/avsnitt-2',
'only_matching': True,
}, {
'url': 'http://www.viafree.dk/programmer/reality/paradise-hotel/saeson-7/episode-5',
'only_matching': True,
}]
+ _GEO_BYPASS = False
@classmethod
def suitable(cls, url):
return False if TVPlayIE.suitable(url) else super(ViafreeIE, cls).suitable(url)
def _real_extract(self, url):
- video_id = self._match_id(url)
+ country, path = re.match(self._VALID_URL, url).groups()
+ content = self._download_json(
+ 'https://viafree-content.mtg-api.com/viafree-content/v1/%s/path/%s' % (country, path), path)
+ program = content['_embedded']['viafreeBlocks'][0]['_embedded']['program']
+ guid = program['guid']
+ meta = content['meta']
+ title = meta['title']
- webpage = self._download_webpage(url, video_id)
+ try:
+ stream_href = self._download_json(
+ program['_links']['streamLink']['href'], guid,
+ headers=self.geo_verification_headers())['embedded']['prioritizedStreams'][0]['links']['stream']['href']
+ except ExtractorError as e:
+ if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
+ self.raise_geo_restricted(countries=[country])
+ raise
+
+ formats = self._extract_m3u8_formats(stream_href, guid, 'mp4')
+ self._sort_formats(formats)
+ episode = program.get('episode') or {}
- data = self._parse_json(
- self._search_regex(
- r'(?s)window\.App\s*=\s*({.+?})\s*;\s*</script',
- webpage, 'data', default='{}'),
- video_id, transform_source=lambda x: re.sub(
- r'(?s)function\s+[a-zA-Z_][\da-zA-Z_]*\s*\([^)]*\)\s*{[^}]*}\s*',
- 'null', x), fatal=False)
-
- video_id = None
-
- if data:
- video_id = try_get(
- data, lambda x: x['context']['dispatcher']['stores'][
- 'ContentPageProgramStore']['currentVideo']['id'],
- compat_str)
-
- # Fallback #1 (extract from og:image URL schema)
- if not video_id:
- thumbnail = self._og_search_thumbnail(webpage, default=None)
- if thumbnail:
- video_id = self._search_regex(
- # Patterns seen:
- # http://cdn.playapi.mtgx.tv/imagecache/600x315/cloud/content-images/inbox/765166/a2e95e5f1d735bab9f309fa345cc3f25.jpg
- # http://cdn.playapi.mtgx.tv/imagecache/600x315/cloud/content-images/seasons/15204/758770/4a5ba509ca8bc043e1ebd1a76131cdf2.jpg
- r'https?://[^/]+/imagecache/(?:[^/]+/)+(\d{6,})/',
- thumbnail, 'video id', default=None)
-
- # Fallback #2. Extract from raw JSON string.
- # May extract wrong video id if relatedClips is present.
- if not video_id:
- video_id = self._search_regex(
- r'currentVideo["\']\s*:\s*.+?["\']id["\']\s*:\s*["\'](\d{6,})',
- webpage, 'video id')
-
- return self.url_result(
- smuggle_url(
- 'mtg:%s' % video_id,
- {
- 'geo_countries': [
- compat_urlparse.urlparse(url).netloc.rsplit('.', 1)[-1]],
- # rtmp host mtgfs.fplive.net for viafree is unresolvable
- 'skip_rtmp': True,
- }),
- ie=TVPlayIE.ie_key(), video_id=video_id)
+ return {
+ 'id': guid,
+ 'title': title,
+ 'thumbnail': meta.get('image'),
+ 'description': meta.get('description'),
+ 'series': episode.get('seriesTitle'),
+ 'episode_number': int_or_none(episode.get('episodeNumber')),
+ 'season_number': int_or_none(episode.get('seasonNumber')),
+ 'duration': int_or_none(try_get(program, lambda x: x['video']['duration']['milliseconds']), 1000),
+ 'timestamp': parse_iso8601(try_get(program, lambda x: x['availability']['start'])),
+ 'formats': formats,
+ }
class TVPlayHomeIE(InfoExtractor):
class TwitchClipsIE(TwitchBaseIE):
IE_NAME = 'twitch:clips'
- _VALID_URL = r'https?://(?:clips\.twitch\.tv/(?:embed\?.*?\bclip=|(?:[^/]+/)*)|(?:www\.)?twitch\.tv/[^/]+/clip/)(?P<id>[^/?#&]+)'
+ _VALID_URL = r'''(?x)
+ https?://
+ (?:
+ clips\.twitch\.tv/(?:embed\?.*?\bclip=|(?:[^/]+/)*)|
+ (?:(?:www|go|m)\.)?twitch\.tv/[^/]+/clip/
+ )
+ (?P<id>[^/?#&]+)
+ '''
_TESTS = [{
'url': 'https://clips.twitch.tv/FaintLightGullWholeWheat',
}, {
'url': 'https://clips.twitch.tv/embed?clip=InquisitiveBreakableYogurtJebaited',
'only_matching': True,
+ }, {
+ 'url': 'https://m.twitch.tv/rossbroadcast/clip/ConfidentBraveHumanChefFrank',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://go.twitch.tv/rossbroadcast/clip/ConfidentBraveHumanChefFrank',
+ 'only_matching': True,
}]
def _real_extract(self, url):
from __future__ import unicode_literals
from .common import InfoExtractor
+from ..compat import (
+ compat_str,
+ compat_urllib_parse_urlencode,
+)
from ..utils import (
clean_html,
int_or_none,
parse_duration,
+ parse_iso8601,
+ qualities,
update_url_query,
- str_or_none,
)
_VALID_URL = r'https?://(?:.+?\.)?uol\.com\.br/.*?(?:(?:mediaId|v)=|view/(?:[a-z0-9]+/)?|video(?:=|/(?:\d{4}/\d{2}/\d{2}/)?))(?P<id>\d+|[\w-]+-[A-Z0-9]+)'
_TESTS = [{
'url': 'http://player.mais.uol.com.br/player_video_v3.swf?mediaId=15951931',
- 'md5': '25291da27dc45e0afb5718a8603d3816',
+ 'md5': '4f1e26683979715ff64e4e29099cf020',
'info_dict': {
'id': '15951931',
'ext': 'mp4',
'title': 'Miss simpatia é encontrada morta',
'description': 'md5:3f8c11a0c0556d66daf7e5b45ef823b2',
+ 'timestamp': 1470421860,
+ 'upload_date': '20160805',
}
}, {
'url': 'http://tvuol.uol.com.br/video/incendio-destroi-uma-das-maiores-casas-noturnas-de-londres-04024E9A3268D4C95326',
- 'md5': 'e41a2fb7b7398a3a46b6af37b15c00c9',
+ 'md5': '2850a0e8dfa0a7307e04a96c5bdc5bc2',
'info_dict': {
'id': '15954259',
'ext': 'mp4',
'title': 'Incêndio destrói uma das maiores casas noturnas de Londres',
'description': 'Em Londres, um incêndio destruiu uma das maiores boates da cidade. Não há informações sobre vítimas.',
+ 'timestamp': 1470674520,
+ 'upload_date': '20160808',
}
}, {
'url': 'http://mais.uol.com.br/static/uolplayer/index.html?mediaId=15951931',
'only_matching': True,
}]
- _FORMATS = {
- '2': {
- 'width': 640,
- 'height': 360,
- },
- '5': {
- 'width': 1280,
- 'height': 720,
- },
- '6': {
- 'width': 426,
- 'height': 240,
- },
- '7': {
- 'width': 1920,
- 'height': 1080,
- },
- '8': {
- 'width': 192,
- 'height': 144,
- },
- '9': {
- 'width': 568,
- 'height': 320,
- },
- '11': {
- 'width': 640,
- 'height': 360,
- }
- }
-
def _real_extract(self, url):
video_id = self._match_id(url)
- media_id = None
-
- if video_id.isdigit():
- media_id = video_id
-
- if not media_id:
- embed_page = self._download_webpage(
- 'https://jsuol.com.br/c/tv/uol/embed/?params=[embed,%s]' % video_id,
- video_id, 'Downloading embed page', fatal=False)
- if embed_page:
- media_id = self._search_regex(
- (r'uol\.com\.br/(\d+)', r'mediaId=(\d+)'),
- embed_page, 'media id', default=None)
-
- if not media_id:
- webpage = self._download_webpage(url, video_id)
- media_id = self._search_regex(r'mediaId=(\d+)', webpage, 'media id')
video_data = self._download_json(
- 'http://mais.uol.com.br/apiuol/v3/player/getMedia/%s.json' % media_id,
- media_id)['item']
+ # https://api.mais.uol.com.br/apiuol/v4/player/data/[MEDIA_ID]
+ 'https://api.mais.uol.com.br/apiuol/v3/media/detail/' + video_id,
+ video_id)['item']
+ media_id = compat_str(video_data['mediaId'])
title = video_data['title']
+ ver = video_data.get('revision', 2)
- query = {
- 'ver': video_data.get('numRevision', 2),
- 'r': 'http://mais.uol.com.br',
- }
- for k in ('token', 'sign'):
- v = video_data.get(k)
- if v:
- query[k] = v
-
+ uol_formats = self._download_json(
+ 'https://croupier.mais.uol.com.br/v3/formats/%s/jsonp' % media_id,
+ media_id)
+ quality = qualities(['mobile', 'WEBM', '360p', '720p', '1080p'])
formats = []
- for f in video_data.get('formats', []):
+ for format_id, f in uol_formats.items():
+ if not isinstance(f, dict):
+ continue
f_url = f.get('url') or f.get('secureUrl')
if not f_url:
continue
+ query = {
+ 'ver': ver,
+ 'r': 'http://mais.uol.com.br',
+ }
+ for k in ('token', 'sign'):
+ v = f.get(k)
+ if v:
+ query[k] = v
f_url = update_url_query(f_url, query)
- format_id = str_or_none(f.get('id'))
- if format_id == '10':
- formats.extend(self._extract_m3u8_formats(
- f_url, video_id, 'mp4', 'm3u8_native',
- m3u8_id='hls', fatal=False))
+ format_id = format_id
+ if format_id == 'HLS':
+ m3u8_formats = self._extract_m3u8_formats(
+ f_url, media_id, 'mp4', 'm3u8_native',
+ m3u8_id='hls', fatal=False)
+ encoded_query = compat_urllib_parse_urlencode(query)
+ for m3u8_f in m3u8_formats:
+ m3u8_f['extra_param_to_segment_url'] = encoded_query
+ m3u8_f['url'] = update_url_query(m3u8_f['url'], query)
+ formats.extend(m3u8_formats)
continue
- fmt = {
+ formats.append({
'format_id': format_id,
'url': f_url,
- 'source_preference': 1,
- }
- fmt.update(self._FORMATS.get(format_id, {}))
- formats.append(fmt)
- self._sort_formats(formats, ('height', 'width', 'source_preference', 'tbr', 'ext'))
+ 'quality': quality(format_id),
+ 'preference': -1,
+ })
+ self._sort_formats(formats)
tags = []
for tag in video_data.get('tags', []):
continue
tags.append(tag_description)
+ thumbnails = []
+ for q in ('Small', 'Medium', 'Wmedium', 'Large', 'Wlarge', 'Xlarge'):
+ q_url = video_data.get('thumb' + q)
+ if not q_url:
+ continue
+ thumbnails.append({
+ 'id': q,
+ 'url': q_url,
+ })
+
return {
'id': media_id,
'title': title,
- 'description': clean_html(video_data.get('desMedia')),
- 'thumbnail': video_data.get('thumbnail'),
- 'duration': int_or_none(video_data.get('durationSeconds')) or parse_duration(video_data.get('duration')),
+ 'description': clean_html(video_data.get('description')),
+ 'thumbnails': thumbnails,
+ 'duration': parse_duration(video_data.get('duration')),
'tags': tags,
'formats': formats,
+ 'timestamp': parse_iso8601(video_data.get('publishDate'), ' '),
+ 'view_count': int_or_none(video_data.get('viewsQtty')),
}
)
from ..utils import (
clean_html,
+ ExtractorError,
int_or_none,
mimetype2ext,
parse_iso8601,
'url': 'https://gyao.yahoo.co.jp/episode/%E3%81%8D%E3%81%AE%E3%81%86%E4%BD%95%E9%A3%9F%E3%81%B9%E3%81%9F%EF%BC%9F%20%E7%AC%AC2%E8%A9%B1%202019%2F4%2F12%E6%94%BE%E9%80%81%E5%88%86/5cb02352-b725-409e-9f8d-88f947a9f682',
'only_matching': True,
}]
+ _GEO_BYPASS = False
def _real_extract(self, url):
video_id = self._match_id(url).replace('/', ':')
- video = self._download_json(
- 'https://gyao.yahoo.co.jp/dam/v1/videos/' + video_id,
- video_id, query={
- 'fields': 'longDescription,title,videoId',
- }, headers={
- 'X-User-Agent': 'Unknown Pc GYAO!/2.0.0 Web',
- })
+ headers = self.geo_verification_headers()
+ headers['Accept'] = 'application/json'
+ resp = self._download_json(
+ 'https://gyao.yahoo.co.jp/apis/playback/graphql', video_id, query={
+ 'appId': 'dj00aiZpPUNJeDh2cU1RazU3UCZzPWNvbnN1bWVyc2VjcmV0Jng9NTk-',
+ 'query': '''{
+ content(parameter: {contentId: "%s", logicaAgent: PC_WEB}) {
+ video {
+ delivery {
+ id
+ }
+ title
+ }
+ }
+}''' % video_id,
+ }, headers=headers)
+ content = resp['data']['content']
+ if not content:
+ msg = resp['errors'][0]['message']
+ if msg == 'not in japan':
+ self.raise_geo_restricted(countries=['JP'])
+ raise ExtractorError(msg)
+ video = content['video']
return {
'_type': 'url_transparent',
'id': video_id,
'title': video['title'],
'url': smuggle_url(
- 'http://players.brightcove.net/4235717419001/SyG5P0gjb_default/index.html?videoId=' + video['videoId'],
+ 'http://players.brightcove.net/4235717419001/SyG5P0gjb_default/index.html?videoId=' + video['delivery']['id'],
{'geo_countries': ['JP']}),
- 'description': video.get('longDescription'),
'ie_key': BrightcoveNewIE.ie_key(),
}
class YahooGyaOIE(InfoExtractor):
IE_NAME = 'yahoo:gyao'
- _VALID_URL = r'https?://(?:gyao\.yahoo\.co\.jp/(?:p|title/[^/]+)|streaming\.yahoo\.co\.jp/p/y)/(?P<id>\d+/v\d+|[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
+ _VALID_URL = r'https?://(?:gyao\.yahoo\.co\.jp/(?:p|title(?:/[^/]+)?)|streaming\.yahoo\.co\.jp/p/y)/(?P<id>\d+/v\d+|[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
_TESTS = [{
'url': 'https://gyao.yahoo.co.jp/p/00449/v03102/',
'info_dict': {
}, {
'url': 'https://gyao.yahoo.co.jp/title/%E3%81%97%E3%82%83%E3%81%B9%E3%81%8F%E3%82%8A007/5b025a49-b2e5-4dc7-945c-09c6634afacf',
'only_matching': True,
+ }, {
+ 'url': 'https://gyao.yahoo.co.jp/title/5b025a49-b2e5-4dc7-945c-09c6634afacf',
+ 'only_matching': True,
}]
def _real_extract(self, url):
from .common import InfoExtractor
from ..utils import (
int_or_none,
- sanitized_Request,
str_to_int,
unescapeHTML,
unified_strdate,
class YouPornIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?youporn\.com/watch/(?P<id>\d+)/(?P<display_id>[^/?#&]+)'
+ _VALID_URL = r'https?://(?:www\.)?youporn\.com/(?:watch|embed)/(?P<id>\d+)(?:/(?P<display_id>[^/?#&]+))?'
_TESTS = [{
'url': 'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/',
'md5': '3744d24c50438cf5b6f6d59feb5055c2',
'params': {
'skip_download': True,
},
+ }, {
+ 'url': 'https://www.youporn.com/embed/505835/sex-ed-is-it-safe-to-masturbate-daily/',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.youporn.com/watch/505835',
+ 'only_matching': True,
}]
+ @staticmethod
+ def _extract_urls(webpage):
+ return re.findall(
+ r'<iframe[^>]+\bsrc=["\']((?:https?:)?//(?:www\.)?youporn\.com/embed/\d+)',
+ webpage)
+
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
- display_id = mobj.group('display_id')
+ display_id = mobj.group('display_id') or video_id
- request = sanitized_Request(url)
- request.add_header('Cookie', 'age_verified=1')
- webpage = self._download_webpage(request, display_id)
+ webpage = self._download_webpage(
+ 'http://www.youporn.com/watch/%s' % video_id, display_id,
+ headers={'Cookie': 'age_verified=1'})
title = self._html_search_regex(
r'(?s)<div[^>]+class=["\']watchVideoTitle[^>]+>(.+?)</div>',
(?(1).+)? # if we found the ID, everything can follow
$""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
+ _PLAYER_INFO_RE = (
+ r'/(?P<id>[a-zA-Z0-9_-]{8,})/player_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?/base\.(?P<ext>[a-z]+)$',
+ r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.(?P<ext>[a-z]+)$',
+ )
_formats = {
'5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
'only_matching': True,
},
+ {
+ # invalid -> valid video id redirection
+ 'url': 'DJztXj2GPfl',
+ 'info_dict': {
+ 'id': 'DJztXj2GPfk',
+ 'ext': 'mp4',
+ 'title': 'Panjabi MC - Mundian To Bach Ke (The Dictator Soundtrack)',
+ 'description': 'md5:bf577a41da97918e94fa9798d9228825',
+ 'upload_date': '20090125',
+ 'uploader': 'Prochorowka',
+ 'uploader_id': 'Prochorowka',
+ 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Prochorowka',
+ 'artist': 'Panjabi MC',
+ 'track': 'Beware of the Boys (Mundian to Bach Ke) - Motivo Hi-Lectro Remix',
+ 'album': 'Beware of the Boys (Mundian To Bach Ke)',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }
]
def __init__(self, *args, **kwargs):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
- def _extract_signature_function(self, video_id, player_url, example_sig):
- id_m = re.match(
- r'.*?[-.](?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|(?:/[a-z]{2,3}_[A-Z]{2})?/base)?\.(?P<ext>[a-z]+)$',
- player_url)
- if not id_m:
+ @classmethod
+ def _extract_player_info(cls, player_url):
+ for player_re in cls._PLAYER_INFO_RE:
+ id_m = re.search(player_re, player_url)
+ if id_m:
+ break
+ else:
raise ExtractorError('Cannot identify player %r' % player_url)
- player_type = id_m.group('ext')
- player_id = id_m.group('id')
+ return id_m.group('ext'), id_m.group('id')
+
+ def _extract_signature_function(self, video_id, player_url, example_sig):
+ player_type, player_id = self._extract_player_info(player_url)
# Read from filesystem cache
func_id = '%s_%s_%s' % (
# Get video webpage
url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
- video_webpage = self._download_webpage(url, video_id)
+ video_webpage, urlh = self._download_webpage_handle(url, video_id)
+
+ qs = compat_parse_qs(compat_urllib_parse_urlparse(urlh.geturl()).query)
+ video_id = qs.get('v', [None])[0] or video_id
# Attempt to extract SWF player URL
mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
# fields may contain comma as well (see
# https://github.com/ytdl-org/youtube-dl/issues/8536)
feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
+
+ def feed_entry(name):
+ return try_get(feed_data, lambda x: x[name][0], compat_str)
+
+ feed_id = feed_entry('id')
+ if not feed_id:
+ continue
+ feed_title = feed_entry('title')
+ title = video_title
+ if feed_title:
+ title += ' (%s)' % feed_title
entries.append({
'_type': 'url_transparent',
'ie_key': 'Youtube',
'url': smuggle_url(
'%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
{'force_singlefeed': True}),
- 'title': '%s (%s)' % (video_title, feed_data['title'][0]),
+ 'title': title,
})
- feed_ids.append(feed_data['id'][0])
+ feed_ids.append(feed_id)
self.to_screen(
'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
% (', '.join(feed_ids), video_id))
}
for fmt in streaming_formats:
- if fmt.get('drm_families'):
+ if fmt.get('drmFamilies') or fmt.get('drm_families'):
continue
url = url_or_none(fmt.get('url'))
if not url:
- cipher = fmt.get('cipher')
+ cipher = fmt.get('cipher') or fmt.get('signatureCipher')
if not cipher:
continue
url_data = compat_parse_qs(cipher)
if self._downloader.params.get('verbose'):
if player_url is None:
- player_version = 'unknown'
player_desc = 'unknown'
else:
- if player_url.endswith('swf'):
- player_version = self._search_regex(
- r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
- 'flash player', fatal=False)
- player_desc = 'flash player %s' % player_version
- else:
- player_version = self._search_regex(
- [r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js',
- r'(?:www|player(?:_ias)?)[-.]([^/]+)(?:/[a-z]{2,3}_[A-Z]{2})?/base\.js'],
- player_url,
- 'html5 player', fatal=False)
- player_desc = 'html5 player %s' % player_version
-
+ player_type, player_version = self._extract_player_info(player_url)
+ player_desc = '%s player %s' % ('flash' if player_type == 'swf' else 'html5', player_version)
parts_sizes = self._signature_cache_id(encrypted_sig)
self.to_screen('{%s} signature length %s, %s' %
(format_id, parts_sizes, player_desc))
postproc.add_option(
'--exec',
metavar='CMD', dest='exec_cmd',
- help='Execute a command on the file after downloading, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'')
+ help='Execute a command on the file after downloading and post-processing, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'')
postproc.add_option(
'--convert-subs', '--convert-subtitles',
metavar='FORMAT', dest='convertsubtitles', default=None,
import binascii
import calendar
import codecs
+import collections
import contextlib
import ctypes
import datetime
import subprocess
import sys
import tempfile
+import time
import traceback
import xml.etree.ElementTree
import zlib
1. https://curl.haxx.se/docs/http-cookies.html
"""
_HTTPONLY_PREFIX = '#HttpOnly_'
+ _ENTRY_LEN = 7
+ _HEADER = '''# Netscape HTTP Cookie File
+# This file is generated by youtube-dl. Do not edit.
+
+'''
+ _CookieFileEntry = collections.namedtuple(
+ 'CookieFileEntry',
+ ('domain_name', 'include_subdomains', 'path', 'https_only', 'expires_at', 'name', 'value'))
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
+ """
+ Save cookies to a file.
+
+ Most of the code is taken from CPython 3.8 and slightly adapted
+ to support cookie files with UTF-8 in both python 2 and 3.
+ """
+ if filename is None:
+ if self.filename is not None:
+ filename = self.filename
+ else:
+ raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
+
# Store session cookies with `expires` set to 0 instead of an empty
# string
for cookie in self:
if cookie.expires is None:
cookie.expires = 0
- compat_cookiejar.MozillaCookieJar.save(self, filename, ignore_discard, ignore_expires)
+
+ with io.open(filename, 'w', encoding='utf-8') as f:
+ f.write(self._HEADER)
+ now = time.time()
+ for cookie in self:
+ if not ignore_discard and cookie.discard:
+ continue
+ if not ignore_expires and cookie.is_expired(now):
+ continue
+ if cookie.secure:
+ secure = 'TRUE'
+ else:
+ secure = 'FALSE'
+ if cookie.domain.startswith('.'):
+ initial_dot = 'TRUE'
+ else:
+ initial_dot = 'FALSE'
+ if cookie.expires is not None:
+ expires = compat_str(cookie.expires)
+ else:
+ expires = ''
+ if cookie.value is None:
+ # cookies.txt regards 'Set-Cookie: foo' as a cookie
+ # with no name, whereas http.cookiejar regards it as a
+ # cookie with no value.
+ name = ''
+ value = cookie.name
+ else:
+ name = cookie.name
+ value = cookie.value
+ f.write(
+ '\t'.join([cookie.domain, initial_dot, cookie.path,
+ secure, expires, name, value]) + '\n')
def load(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Load cookies from a file."""
else:
raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
+ def prepare_line(line):
+ if line.startswith(self._HTTPONLY_PREFIX):
+ line = line[len(self._HTTPONLY_PREFIX):]
+ # comments and empty lines are fine
+ if line.startswith('#') or not line.strip():
+ return line
+ cookie_list = line.split('\t')
+ if len(cookie_list) != self._ENTRY_LEN:
+ raise compat_cookiejar.LoadError('invalid length %d' % len(cookie_list))
+ cookie = self._CookieFileEntry(*cookie_list)
+ if cookie.expires_at and not cookie.expires_at.isdigit():
+ raise compat_cookiejar.LoadError('invalid expires at %s' % cookie.expires_at)
+ return line
+
cf = io.StringIO()
- with open(filename) as f:
+ with io.open(filename, encoding='utf-8') as f:
for line in f:
- if line.startswith(self._HTTPONLY_PREFIX):
- line = line[len(self._HTTPONLY_PREFIX):]
- cf.write(compat_str(line))
+ try:
+ cf.write(prepare_line(line))
+ except compat_cookiejar.LoadError as e:
+ write_string(
+ 'WARNING: skipping cookie file entry due to %s: %r\n'
+ % (e, line), sys.stderr)
+ continue
cf.seek(0)
self._really_load(cf, filename, ignore_discard, ignore_expires)
# Session cookies are denoted by either `expires` field set to
from __future__ import unicode_literals
-__version__ = '2020.03.24'
+__version__ = '2020.05.08'