X-Git-Url: https://git.rapsys.eu/youtubedl/blobdiff_plain/df5478464cde9ccd55331acb878209984dbd568b..73c97fc463f5d7c35527bfaf9a865b359dd75a17:/youtube_dl/extractor/common.py
diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index 64d63e1..fb2d50a 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -3,6 +3,7 @@ import os
import re
import socket
import sys
+import netrc
from ..utils import (
compat_http_client,
@@ -13,6 +14,9 @@ from ..utils import (
clean_html,
compiled_regex_type,
ExtractorError,
+ RegexNotFoundError,
+ sanitize_filename,
+ unescapeHTML,
)
class InfoExtractor(object):
@@ -33,9 +37,13 @@ class InfoExtractor(object):
title: Video title, unescaped.
ext: Video filename extension.
+ Instead of url and ext, formats can also specified.
+
The following fields are optional:
format: The video format, defaults to ext (used for --get-format)
+ thumbnails: A list of dictionaries (with the entries "resolution" and
+ "url") for the varying thumbnails
thumbnail: Full URL to a video thumbnail image.
description: One-line video description.
uploader: Full name of the video uploader.
@@ -43,11 +51,31 @@ class InfoExtractor(object):
uploader_id: Nickname or id of the video uploader.
location: Physical location of the video.
player_url: SWF Player URL (used for rtmpdump).
- subtitles: The subtitle file contents.
+ subtitles: The subtitle file contents as a dictionary in the format
+ {language: subtitles}.
+ view_count: How many users have watched the video on the platform.
urlhandle: [internal] The urlHandle to be used to download the file,
like returned by urllib.request.urlopen
-
- The fields should all be Unicode strings.
+ age_limit: Age restriction for the video, as an integer (years)
+ formats: A list of dictionaries for each format available, it must
+ be ordered from worst to best quality. Potential fields:
+ * url Mandatory. The URL of the video file
+ * ext Will be calculated from url if missing
+ * format A human-readable description of the format
+ ("mp4 container with h264/opus").
+ Calculated from the format_id, width, height.
+ and format_note fields if missing.
+ * format_id A short description of the format
+ ("mp4_h264_opus" or "19")
+ * format_note Additional info about the format
+ ("3D" or "DASH video")
+ * width Width of the video, if known
+ * height Height of the video, if known
+ webpage_url: The url to the video webpage, if given to youtube-dl it
+ should allow to get the same result again. (It will be set
+ by YoutubeDL if it's missing)
+
+ Unless mentioned otherwise, the fields should be Unicode strings.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
@@ -72,7 +100,13 @@ class InfoExtractor(object):
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
- return re.match(cls._VALID_URL, url) is not None
+
+ # This does not use has/getattr intentionally - we want to know whether
+ # we have cached the regexp for *this* class, whereas getattr would also
+ # match the superclass
+ if '_VALID_URL_RE' not in cls.__dict__:
+ cls._VALID_URL_RE = re.compile(cls._VALID_URL)
+ return cls._VALID_URL_RE.match(url) is not None
@classmethod
def working(cls):
@@ -102,6 +136,11 @@ class InfoExtractor(object):
"""Real extraction process. Redefine in subclasses."""
pass
+ @classmethod
+ def ie_key(cls):
+ """A string for getting the InfoExtractor with get_info_extractor"""
+ return cls.__name__[:-2]
+
@property
def IE_NAME(self):
return type(self).__name__[:-2]
@@ -117,18 +156,28 @@ class InfoExtractor(object):
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if errnote is None:
errnote = u'Unable to download webpage'
- raise ExtractorError(u'%s: %s' % (errnote, compat_str(err)), sys.exc_info()[2])
+ raise ExtractorError(u'%s: %s' % (errnote, compat_str(err)), sys.exc_info()[2], cause=err)
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None):
""" Returns a tuple (page content as string, URL handle) """
+
+ # Strip hashes from the URL (#1038)
+ if isinstance(url_or_request, (compat_str, str)):
+ url_or_request = url_or_request.partition('#')[0]
+
urlh = self._request_webpage(url_or_request, video_id, note, errnote)
content_type = urlh.headers.get('Content-Type', '')
+ webpage_bytes = urlh.read()
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
- encoding = 'utf-8'
- webpage_bytes = urlh.read()
+ m = re.search(br']+charset=[\'"]?([^\'")]+)[ /\'">]',
+ webpage_bytes[:1024])
+ if m:
+ encoding = m.group(1).decode('ascii')
+ else:
+ encoding = 'utf-8'
if self._downloader.params.get('dump_intermediate_pages', False):
try:
url = url_or_request.get_full_url()
@@ -137,6 +186,17 @@ class InfoExtractor(object):
self.to_screen(u'Dumping request to ' + url)
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
+ if self._downloader.params.get('write_pages', False):
+ try:
+ url = url_or_request.get_full_url()
+ except AttributeError:
+ url = url_or_request
+ raw_filename = ('%s_%s.dump' % (video_id, url))
+ filename = sanitize_filename(raw_filename, restricted=True)
+ self.to_screen(u'Saving request to ' + filename)
+ with open(filename, 'wb') as outf:
+ outf.write(webpage_bytes)
+
content = webpage_bytes.decode(encoding, 'replace')
return (content, urlh)
@@ -160,12 +220,11 @@ class InfoExtractor(object):
"""Report attempt to confirm age."""
self.to_screen(u'Confirming age')
+ def report_login(self):
+ """Report attempt to log in."""
+ self.to_screen(u'Logging in')
+
#Methods for following #608
- #They set the correct value of the '_type' key
- def video_result(self, video_info):
- """Returns a video"""
- video_info['_type'] = 'video'
- return video_info
def url_result(self, url, ie=None):
"""Returns a url that points to a page that should be processed"""
#TODO: ie should be the class used for getting the info
@@ -188,7 +247,7 @@ class InfoExtractor(object):
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
- ExtractorError, depending on fatal, specifying the field name.
+ RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
@@ -208,7 +267,7 @@ class InfoExtractor(object):
elif default is not None:
return default
elif fatal:
- raise ExtractorError(u'Unable to extract %s' % _name)
+ raise RegexNotFoundError(u'Unable to extract %s' % _name)
else:
self._downloader.report_warning(u'unable to extract %s; '
u'please report this issue on http://yt-dl.org/bug' % _name)
@@ -224,6 +283,72 @@ class InfoExtractor(object):
else:
return res
+ def _get_login_info(self):
+ """
+ Get the the login info as (username, password)
+ It will look in the netrc file using the _NETRC_MACHINE value
+ If there's no info available, return (None, None)
+ """
+ if self._downloader is None:
+ return (None, None)
+
+ username = None
+ password = None
+ downloader_params = self._downloader.params
+
+ # Attempt to use provided username and password or .netrc data
+ if downloader_params.get('username', None) is not None:
+ username = downloader_params['username']
+ password = downloader_params['password']
+ elif downloader_params.get('usenetrc', False):
+ try:
+ info = netrc.netrc().authenticators(self._NETRC_MACHINE)
+ if info is not None:
+ username = info[0]
+ password = info[2]
+ else:
+ raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
+ except (IOError, netrc.NetrcParseError) as err:
+ self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
+
+ return (username, password)
+
+ # Helper functions for extracting OpenGraph info
+ @staticmethod
+ def _og_regex(prop):
+ return r'