]>
Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/generic.py
   4 from .common 
import InfoExtractor
 
  13 class GenericIE(InfoExtractor
): 
  14     """Generic last-resort information extractor.""" 
  19     def report_download_webpage(self
, video_id
): 
  20         """Report webpage download.""" 
  21         if not self
._downloader
.params
.get('test', False): 
  22             self
._downloader
.report_warning(u
'Falling back on generic information extractor.') 
  23         super(GenericIE
, self
).report_download_webpage(video_id
) 
  25     def report_following_redirect(self
, new_url
): 
  26         """Report information extraction.""" 
  27         self
._downloader
.to_screen(u
'[redirect] Following redirect to %s' % new_url
) 
  29     def _test_redirect(self
, url
): 
  30         """Check if it is a redirect, like url shorteners, in case return the new url.""" 
  31         class HeadRequest(compat_urllib_request
.Request
): 
  35         class HEADRedirectHandler(compat_urllib_request
.HTTPRedirectHandler
): 
  37             Subclass the HTTPRedirectHandler to make it use our 
  38             HeadRequest also on the redirected URL 
  40             def redirect_request(self
, req
, fp
, code
, msg
, headers
, newurl
): 
  41                 if code 
in (301, 302, 303, 307): 
  42                     newurl 
= newurl
.replace(' ', '%20') 
  43                     newheaders 
= dict((k
,v
) for k
,v 
in req
.headers
.items() 
  44                                       if k
.lower() not in ("content-length", "content-type")) 
  45                     return HeadRequest(newurl
, 
  47                                        origin_req_host
=req
.get_origin_req_host(), 
  50                     raise compat_urllib_error
.HTTPError(req
.get_full_url(), code
, msg
, headers
, fp
) 
  52         class HTTPMethodFallback(compat_urllib_request
.BaseHandler
): 
  54             Fallback to GET if HEAD is not allowed (405 HTTP error) 
  56             def http_error_405(self
, req
, fp
, code
, msg
, headers
): 
  60                 newheaders 
= dict((k
,v
) for k
,v 
in req
.headers
.items() 
  61                                   if k
.lower() not in ("content-length", "content-type")) 
  62                 return self
.parent
.open(compat_urllib_request
.Request(req
.get_full_url(), 
  64                                                  origin_req_host
=req
.get_origin_req_host(), 
  68         opener 
= compat_urllib_request
.OpenerDirector() 
  69         for handler 
in [compat_urllib_request
.HTTPHandler
, compat_urllib_request
.HTTPDefaultErrorHandler
, 
  70                         HTTPMethodFallback
, HEADRedirectHandler
, 
  71                         compat_urllib_request
.HTTPErrorProcessor
, compat_urllib_request
.HTTPSHandler
]: 
  72             opener
.add_handler(handler()) 
  74         response 
= opener
.open(HeadRequest(url
)) 
  76             raise ExtractorError(u
'Invalid URL protocol') 
  77         new_url 
= response
.geturl() 
  82         self
.report_following_redirect(new_url
) 
  85     def _real_extract(self
, url
): 
  86         new_url 
= self
._test
_redirect
(url
) 
  87         if new_url
: return [self
.url_result(new_url
)] 
  89         video_id 
= url
.split('/')[-1] 
  91             webpage 
= self
._download
_webpage
(url
, video_id
) 
  93             # since this is the last-resort InfoExtractor, if 
  94             # this error is thrown, it'll be thrown here 
  95             raise ExtractorError(u
'Invalid URL: %s' % url
) 
  97         self
.report_extraction(video_id
) 
  98         # Start with something easy: JW Player in SWFObject 
  99         mobj 
= re
.search(r
'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage) 
 101             # Broaden the search a little bit 
 102             mobj = re.search(r'[^A
-Za
-z0
-9]?
(?
:file|source
)=(http
[^
\'"&]*)', webpage) 
 104             # Broaden the search a little bit: JWPlayer JS loader 
 105             mobj = re.search(r'[^A-Za-z0-9]?file:\s*["\'](http
[^
\'"&]*)', webpage) 
 107             # Try to find twitter cards info 
 108             mobj = re.search(r'<meta (?:property|name)="twitter
:player
:stream
" (?:content|value)="(.+?
)"', webpage) 
 110             # We look for Open Graph info: 
 111             # We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am) 
 112             m_video_type = re.search(r'<meta.*?property="og
:video
:type".*?content="video
/(.*?
)"', webpage) 
 113             # We only look in og:video if the MIME type is a video, don't try if it's a Flash player: 
 114             if m_video_type is not None: 
 115                 mobj = re.search(r'<meta.*?property="og
:video
".*?content="(.*?
)"', webpage) 
 117             raise ExtractorError(u'Invalid URL: %s' % url) 
 119         # It's possible that one of the regexes 
 120         # matched, but returned an empty group: 
 121         if mobj.group(1) is None: 
 122             raise ExtractorError(u'Invalid URL: %s' % url) 
 124         video_url = compat_urllib_parse.unquote(mobj.group(1)) 
 125         video_id = os.path.basename(video_url) 
 127         # here's a fun little line of code for you: 
 128         video_extension = os.path.splitext(video_id)[1][1:] 
 129         video_id = os.path.splitext(video_id)[0] 
 131         # it's tempting to parse this further, but you would 
 132         # have to take into account all the variations like 
 133         #   Video Title - Site Name 
 134         #   Site Name | Video Title 
 135         #   Video Title - Tagline | Site Name 
 136         # and so on and so forth; it's just not practical 
 137         video_title = self._html_search_regex(r'<title>(.*)</title>', 
 138             webpage, u'video title') 
 140         # video uploader is domain name 
 141         video_uploader = self._search_regex(r'(?:https?://)?([^/]*)/.*', 
 142             url, u'video uploader') 
 147             'uploader': video_uploader, 
 149             'title':    video_title, 
 150             'ext':      video_extension,