+# encoding: utf-8
+
import os
import re
compat_urllib_error,
compat_urllib_parse,
compat_urllib_request,
+ compat_urlparse,
ExtractorError,
+ smuggle_url,
+ unescapeHTML,
)
+from .brightcove import BrightcoveIE
-class GenericIE(InfoExtractor):
- """Generic last-resort information extractor."""
+class GenericIE(InfoExtractor):
+ IE_DESC = u'Generic downloader that works on some sites'
_VALID_URL = r'.*'
IE_NAME = u'generic'
+ _TESTS = [
+ {
+ u'url': u'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html',
+ u'file': u'13601338388002.mp4',
+ u'md5': u'6e15c93721d7ec9e9ca3fdbf07982cfd',
+ u'info_dict': {
+ u"uploader": u"www.hodiho.fr",
+ u"title": u"R\u00e9gis plante sa Jeep"
+ }
+ },
+ # embedded vimeo video
+ {
+ u'add_ie': ['Vimeo'],
+ u'url': u'http://skillsmatter.com/podcast/home/move-semanticsperfect-forwarding-and-rvalue-references',
+ u'file': u'22444065.mp4',
+ u'md5': u'2903896e23df39722c33f015af0666e2',
+ u'info_dict': {
+ u'title': u'ACCU 2011: Move Semantics,Perfect Forwarding, and Rvalue references- Scott Meyers- 13/04/2011',
+ u"uploader_id": u"skillsmatter",
+ u"uploader": u"Skills Matter",
+ }
+ },
+ # bandcamp page with custom domain
+ {
+ u'add_ie': ['Bandcamp'],
+ u'url': u'http://bronyrock.com/track/the-pony-mash',
+ u'file': u'3235767654.mp3',
+ u'info_dict': {
+ u'title': u'The Pony Mash',
+ u'uploader': u'M_Pallante',
+ },
+ u'skip': u'There is a limit of 200 free downloads / month for the test song',
+ },
+ # embedded brightcove video
+ # it also tests brightcove videos that need to set the 'Referer' in the
+ # http requests
+ {
+ u'add_ie': ['Brightcove'],
+ u'url': u'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/',
+ u'info_dict': {
+ u'id': u'2765128793001',
+ u'ext': u'mp4',
+ u'title': u'Le cours de bourse : l’analyse technique',
+ u'description': u'md5:7e9ad046e968cb2d1114004aba466fd9',
+ u'uploader': u'BFM BUSINESS',
+ },
+ u'params': {
+ u'skip_download': True,
+ },
+ },
+ ]
def report_download_webpage(self, video_id):
"""Report webpage download."""
return new_url
def _real_extract(self, url):
- new_url = self._test_redirect(url)
- if new_url: return [self.url_result(new_url)]
+ parsed_url = compat_urlparse.urlparse(url)
+ if not parsed_url.scheme:
+ self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http')
+ return self.url_result('http://' + url)
+
+ try:
+ new_url = self._test_redirect(url)
+ if new_url:
+ return [self.url_result(new_url)]
+ except compat_urllib_error.HTTPError:
+ # This may be a stupid server that doesn't like HEAD, our UA, or so
+ pass
video_id = url.split('/')[-1]
try:
except ValueError:
# since this is the last-resort InfoExtractor, if
# this error is thrown, it'll be thrown here
- raise ExtractorError(u'Invalid URL: %s' % url)
+ raise ExtractorError(u'Failed to download URL: %s' % url)
self.report_extraction(video_id)
+
+ # it's tempting to parse this further, but you would
+ # have to take into account all the variations like
+ # Video Title - Site Name
+ # Site Name | Video Title
+ # Video Title - Tagline | Site Name
+ # and so on and so forth; it's just not practical
+ video_title = self._html_search_regex(r'<title>(.*)</title>',
+ webpage, u'video title', default=u'video', flags=re.DOTALL)
+
+ # Look for BrightCove:
+ bc_url = BrightcoveIE._extract_brightcove_url(webpage)
+ if bc_url is not None:
+ self.to_screen(u'Brightcove video detected.')
+ return self.url_result(bc_url, 'Brightcove')
+
+ # Look for embedded Vimeo player
+ mobj = re.search(
+ r'<iframe[^>]+?src="(https?://player.vimeo.com/video/.+?)"', webpage)
+ if mobj:
+ player_url = unescapeHTML(mobj.group(1))
+ surl = smuggle_url(player_url, {'Referer': url})
+ return self.url_result(surl, 'Vimeo')
+
+ # Look for embedded YouTube player
+ matches = re.findall(
+ r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?youtube.com/embed/.+?)\1', webpage)
+ if matches:
+ urlrs = [self.url_result(unescapeHTML(tuppl[1]), 'Youtube')
+ for tuppl in matches]
+ return self.playlist_result(
+ urlrs, playlist_id=video_id, playlist_title=video_title)
+
+ # Look for embedded Dailymotion player
+ matches = re.findall(
+ r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion.com/embed/video/.+?)\1', webpage)
+ if matches:
+ urlrs = [self.url_result(unescapeHTML(tuppl[1]), 'Dailymotion')
+ for tuppl in matches]
+ return self.playlist_result(
+ urlrs, playlist_id=video_id, playlist_title=video_title)
+
+ # Look for Bandcamp pages with custom domain
+ mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage)
+ if mobj is not None:
+ burl = unescapeHTML(mobj.group(1))
+ # Don't set the extractor because it can be a track url or an album
+ return self.url_result(burl)
+
# Start with something easy: JW Player in SWFObject
mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
if mobj is None:
mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
if mobj is None:
# Broaden the search a little bit: JWPlayer JS loader
- mobj = re.search(r'[^A-Za-z0-9]?file:\s*["\'](http[^\'"&]*)', webpage)
+ mobj = re.search(r'[^A-Za-z0-9]?file["\']?:\s*["\'](http[^\'"]*)', webpage)
if mobj is None:
# Try to find twitter cards info
mobj = re.search(r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage)
if m_video_type is not None:
mobj = re.search(r'<meta.*?property="og:video".*?content="(.*?)"', webpage)
if mobj is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
+ # HTML5 video
+ mobj = re.search(r'<video[^<]*(?:>.*?<source.*?)? src="([^"]+)"', webpage, flags=re.DOTALL)
+ if mobj is None:
+ raise ExtractorError(u'Unsupported URL: %s' % url)
# It's possible that one of the regexes
# matched, but returned an empty group:
if mobj.group(1) is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
+ raise ExtractorError(u'Did not find a valid video URL at %s' % url)
- video_url = compat_urllib_parse.unquote(mobj.group(1))
- video_id = os.path.basename(video_url)
+ video_url = mobj.group(1)
+ video_url = compat_urlparse.urljoin(url, video_url)
+ video_id = compat_urllib_parse.unquote(os.path.basename(video_url))
# here's a fun little line of code for you:
- video_extension = os.path.splitext(video_id)[1][1:]
video_id = os.path.splitext(video_id)[0]
- # it's tempting to parse this further, but you would
- # have to take into account all the variations like
- # Video Title - Site Name
- # Site Name | Video Title
- # Video Title - Tagline | Site Name
- # and so on and so forth; it's just not practical
- video_title = self._html_search_regex(r'<title>(.*)</title>',
- webpage, u'video title')
-
# video uploader is domain name
video_uploader = self._search_regex(r'(?:https?://)?([^/]*)/.*',
url, u'video uploader')
- return [{
+ return {
'id': video_id,
'url': video_url,
'uploader': video_uploader,
'upload_date': None,
'title': video_title,
- 'ext': video_extension,
- }]
+ }