X-Git-Url: https://git.rapsys.eu/youtubedl/blobdiff_plain/a5f3018bec4ce17c57059f2384d9110f850ba28d..7f5878d0ff1e0e937d87732ca339e3ac7ff3ed16:/youtube-dl

diff --git a/youtube-dl b/youtube-dl
index 47c6465..3a37fae 100755
--- a/youtube-dl
+++ b/youtube-dl
@@ -1,10 +1,29 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
-# Author: Ricardo Garcia Gonzalez
-# Author: Danny Colligan
-# Author: Benjamin Johnson
-# License: Public domain code
+
+__author__  = (
+	'Ricardo Garcia Gonzalez',
+	'Danny Colligan',
+	'Benjamin Johnson',
+	'Vasyl\' Vavrychuk',
+	'Witold Baryluk',
+	'Paweł Paprota',
+	'Gergely Imreh',
+	'Rogério Brito',
+	'Philipp Hagemeister',
+	'Sören Schulze',
+	)
+
+__license__ = 'Public Domain'
+__version__ = '2011.10.19'
+
+UPDATE_URL = 'https://raw.github.com/rg3/youtube-dl/master/youtube-dl'
+
+import cookielib
+import datetime
+import gzip
 import htmlentitydefs
+import HTMLParser
 import httplib
 import locale
 import math
@@ -19,6 +38,20 @@ import sys
 import time
 import urllib
 import urllib2
+import warnings
+import zlib
+
+if os.name == 'nt':
+	import ctypes
+
+try:
+	import email.utils
+except ImportError: # Python 2.4
+	import email.Utils
+try:
+	import cStringIO as StringIO
+except ImportError:
+	import StringIO
 
 # parse_qs was moved from the cgi module to the urlparse module recently.
 try:
@@ -26,15 +59,139 @@ try:
 except ImportError:
 	from cgi import parse_qs
 
+try:
+	import lxml.etree
+except ImportError:
+	pass # Handled below
+
+try:
+	import xml.etree.ElementTree
+except ImportError: # Python<2.5: Not officially supported, but let it slip
+	warnings.warn('xml.etree.ElementTree support is missing. Consider upgrading to Python >= 2.5 if you get related errors.')
+
 std_headers = {
-	'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.2) Gecko/20100115 Firefox/3.6',
+	'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:5.0.1) Gecko/20100101 Firefox/5.0.1',
 	'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
-	'Accept': 'text/xml,application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5',
+	'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
+	'Accept-Encoding': 'gzip, deflate',
 	'Accept-Language': 'en-us,en;q=0.5',
 }
 
 simple_title_chars = string.ascii_letters.decode('ascii') + string.digits.decode('ascii')
 
+try:
+	import json
+except ImportError: # Python <2.6, use trivialjson (https://github.com/phihag/trivialjson):
+	import re
+	class json(object):
+		@staticmethod
+		def loads(s):
+			s = s.decode('UTF-8')
+			def raiseError(msg, i):
+				raise ValueError(msg + ' at position ' + str(i) + ' of ' + repr(s) + ': ' + repr(s[i:]))
+			def skipSpace(i, expectMore=True):
+				while i < len(s) and s[i] in ' \t\r\n':
+					i += 1
+				if expectMore:
+					if i >= len(s):
+						raiseError('Premature end', i)
+				return i
+			def decodeEscape(match):
+				esc = match.group(1)
+				_STATIC = {
+					'"': '"',
+					'\\': '\\',
+					'/': '/',
+					'b': unichr(0x8),
+					'f': unichr(0xc),
+					'n': '\n',
+					'r': '\r',
+					't': '\t',
+				}
+				if esc in _STATIC:
+					return _STATIC[esc]
+				if esc[0] == 'u':
+					if len(esc) == 1+4:
+						return unichr(int(esc[1:5], 16))
+					if len(esc) == 5+6 and esc[5:7] == '\\u':
+						hi = int(esc[1:5], 16)
+						low = int(esc[7:11], 16)
+						return unichr((hi - 0xd800) * 0x400 + low - 0xdc00 + 0x10000)
+				raise ValueError('Unknown escape ' + str(esc))
+			def parseString(i):
+				i += 1
+				e = i
+				while True:
+					e = s.index('"', e)
+					bslashes = 0
+					while s[e-bslashes-1] == '\\':
+						bslashes += 1
+					if bslashes % 2 == 1:
+						e += 1
+						continue
+					break
+				rexp = re.compile(r'\\(u[dD][89aAbB][0-9a-fA-F]{2}\\u[0-9a-fA-F]{4}|u[0-9a-fA-F]{4}|.|$)')
+				stri = rexp.sub(decodeEscape, s[i:e])
+				return (e+1,stri)
+			def parseObj(i):
+				i += 1
+				res = {}
+				i = skipSpace(i)
+				if s[i] == '}': # Empty dictionary
+					return (i+1,res)
+				while True:
+					if s[i] != '"':
+						raiseError('Expected a string object key', i)
+					i,key = parseString(i)
+					i = skipSpace(i)
+					if i >= len(s) or s[i] != ':':
+						raiseError('Expected a colon', i)
+					i,val = parse(i+1)
+					res[key] = val
+					i = skipSpace(i)
+					if s[i] == '}':
+						return (i+1, res)
+					if s[i] != ',':
+						raiseError('Expected comma or closing curly brace', i)
+					i = skipSpace(i+1)
+			def parseArray(i):
+				res = []
+				i = skipSpace(i+1)
+				if s[i] == ']': # Empty array
+					return (i+1,res)
+				while True:
+					i,val = parse(i)
+					res.append(val)
+					i = skipSpace(i) # Raise exception if premature end
+					if s[i] == ']':
+						return (i+1, res)
+					if s[i] != ',':
+						raiseError('Expected a comma or closing bracket', i)
+					i = skipSpace(i+1)
+			def parseDiscrete(i):
+				for k,v in {'true': True, 'false': False, 'null': None}.items():
+					if s.startswith(k, i):
+						return (i+len(k), v)
+				raiseError('Not a boolean (or null)', i)
+			def parseNumber(i):
+				mobj = re.match('^(-?(0|[1-9][0-9]*)(\.[0-9]*)?([eE][+-]?[0-9]+)?)', s[i:])
+				if mobj is None:
+					raiseError('Not a number', i)
+				nums = mobj.group(1)
+				if '.' in nums or 'e' in nums or 'E' in nums:
+					return (i+len(nums), float(nums))
+				return (i+len(nums), int(nums))
+			CHARMAP = {'{': parseObj, '[': parseArray, '"': parseString, 't': parseDiscrete, 'f': parseDiscrete, 'n': parseDiscrete}
+			def parse(i):
+				i = skipSpace(i)
+				i,res = CHARMAP.get(s[i], parseNumber)(i)
+				i = skipSpace(i, False)
+				return (i,res)
+			i,res = parse(0)
+			if i < len(s):
+				raise ValueError('Extra data at end of input (index ' + str(i) + ' of ' + repr(s) + ': ' + repr(s[i:]) + ')')
+			return res
+
 def preferredencoding():
 	"""Get preferred encoding.
 
@@ -51,9 +208,10 @@ def preferredencoding():
 			yield pref
 	return yield_preferredencoding().next()
 
+
 def htmlentity_transform(matchobj):
 	"""Transforms an HTML entity to a Unicode character.
-	
+
 	This function receives a match object and is intended to be used with
 	the re.sub() function.
 	"""
@@ -77,11 +235,13 @@ def htmlentity_transform(matchobj):
 	# Unknown entity in name, return its literal representation
 	return (u'&%s;' % entity)
 
+
 def sanitize_title(utitle):
 	"""Sanitizes a video title so it could be used as part of a filename."""
 	utitle = re.sub(ur'(?u)&(.+?);', htmlentity_transform, utitle)
 	return utitle.replace(unicode(os.sep), u'%')
 
+
 def sanitize_open(filename, open_mode):
 	"""Try to open the given filename, and slightly tweak it if this fails.
 
@@ -93,26 +253,41 @@ def sanitize_open(filename, open_mode):
 	It returns the tuple (stream, definitive_file_name).
 	"""
 	try:
+		if filename == u'-':
+			if sys.platform == 'win32':
+				import msvcrt
+				msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
+			return (sys.stdout, filename)
 		stream = open(filename, open_mode)
 		return (stream, filename)
 	except (IOError, OSError), err:
 		# In case of error, try to remove win32 forbidden chars
-		filename = re.sub(ur'[<>:"\|\?\*]', u'#', filename)
+		filename = re.sub(ur'[/<>:"\|\?\*]', u'#', filename)
 
 		# An exception here should be caught in the caller
 		stream = open(filename, open_mode)
 		return (stream, filename)
 
 
+def timeconvert(timestr):
+	"""Convert RFC 2822 defined time string into system timestamp"""
+	timestamp = None
+	timetuple = email.utils.parsedate_tz(timestr)
+	if timetuple is not None:
+		timestamp = email.utils.mktime_tz(timetuple)
+	return timestamp
+
+
 class DownloadError(Exception):
 	"""Download Error exception.
-	
+
 	This exception may be thrown by FileDownloader objects if they are not
 	configured to continue on errors. They will contain the appropriate
 	error message.
 	"""
 	pass
 
+
 class SameFileError(Exception):
 	"""Same File exception.
 
@@ -121,6 +296,7 @@ class SameFileError(Exception):
 	"""
 	pass
 
+
 class PostProcessingError(Exception):
 	"""Post Processing exception.
 
@@ -129,7 +305,8 @@ class PostProcessingError(Exception):
 	"""
 	pass
 
-class UnavailableFormatError(Exception):
+
+class UnavailableVideoError(Exception):
 	"""Unavailable Format exception.
 
 	This exception will be thrown when a video is requested
@@ -137,6 +314,7 @@ class UnavailableFormatError(Exception):
 	"""
 	pass
 
+
 class ContentTooShortError(Exception):
 	"""Content Too Short exception.
 
@@ -152,6 +330,66 @@ class ContentTooShortError(Exception):
 		self.downloaded = downloaded
 		self.expected = expected
 
+
+class YoutubeDLHandler(urllib2.HTTPHandler):
+	"""Handler for HTTP requests and responses.
+
+	This class, when installed with an OpenerDirector, automatically adds
+	the standard headers to every HTTP request and handles gzipped and
+	deflated responses from web servers. If compression is to be avoided in
+	a particular request, the original request in the program code only has
+	to include the HTTP header "Youtubedl-No-Compression", which will be
+	removed before making the real request.
+
+	Part of this code was copied from:
+
+	http://techknack.net/python-urllib2-handlers/
+
+	Andrew Rowls, the author of that code, agreed to release it to the
+	public domain.
+	"""
+
+	@staticmethod
+	def deflate(data):
+		try:
+			return zlib.decompress(data, -zlib.MAX_WBITS)
+		except zlib.error:
+			return zlib.decompress(data)
+
+	@staticmethod
+	def addinfourl_wrapper(stream, headers, url, code):
+		if hasattr(urllib2.addinfourl, 'getcode'):
+			return urllib2.addinfourl(stream, headers, url, code)
+		ret = urllib2.addinfourl(stream, headers, url)
+		ret.code = code
+		return ret
+
+	def http_request(self, req):
+		for h in std_headers:
+			if h in req.headers:
+				del req.headers[h]
+			req.add_header(h, std_headers[h])
+		if 'Youtubedl-no-compression' in req.headers:
+			if 'Accept-encoding' in req.headers:
+				del req.headers['Accept-encoding']
+			del req.headers['Youtubedl-no-compression']
+		return req
+
+	def http_response(self, req, resp):
+		old_resp = resp
+		# gzip
+		if resp.headers.get('Content-encoding', '') == 'gzip':
+			gz = gzip.GzipFile(fileobj=StringIO.StringIO(resp.read()), mode='r')
+			resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
+			resp.msg = old_resp.msg
+		# deflate
+		if resp.headers.get('Content-encoding', '') == 'deflate':
+			gz = StringIO.StringIO(self.deflate(resp.read()))
+			resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
+			resp.msg = old_resp.msg
+		return resp
+
+
 class FileDownloader(object):
 	"""File Downloader class.
 
@@ -179,43 +417,53 @@ class FileDownloader(object):
 
 	Available options:
 
-	username:	Username for authentication purposes.
-	password:	Password for authentication purposes.
-	usenetrc:	Use netrc for authentication instead.
-	quiet:		Do not print messages to stdout.
-	forceurl:	Force printing final URL.
-	forcetitle:	Force printing title.
-	simulate:	Do not download the video files.
-	format:		Video format code.
-	outtmpl:	Template for output names.
-	ignoreerrors:	Do not stop on download errors.
-	ratelimit:	Download speed limit, in bytes/sec.
-	nooverwrites:	Prevent overwriting files.
-	continuedl:	Try to continue downloads if possible.
+	username:         Username for authentication purposes.
+	password:         Password for authentication purposes.
+	usenetrc:         Use netrc for authentication instead.
+	quiet:            Do not print messages to stdout.
+	forceurl:         Force printing final URL.
+	forcetitle:       Force printing title.
+	forcethumbnail:   Force printing thumbnail URL.
+	forcedescription: Force printing description.
+	forcefilename:    Force printing final filename.
+	simulate:         Do not download the video files.
+	format:           Video format code.
+	format_limit:     Highest quality format to try.
+	outtmpl:          Template for output names.
+	ignoreerrors:     Do not stop on download errors.
+	ratelimit:        Download speed limit, in bytes/sec.
+	nooverwrites:     Prevent overwriting files.
+	retries:          Number of times to retry for HTTP error 5xx
+	continuedl:       Try to continue downloads if possible.
+	noprogress:       Do not print the progress bar.
+	playliststart:    Playlist item to start at.
+	playlistend:      Playlist item to end at.
+	matchtitle:       Download only matching titles.
+	rejecttitle:      Reject downloads for matching titles.
+	logtostderr:      Log messages to stderr instead of stdout.
+	consoletitle:     Display progress in console window's titlebar.
+	nopart:           Do not use temporary .part files.
+	updatetime:       Use the Last-modified header to set output file timestamps.
+	writedescription: Write the video description to a .description file
+	writeinfojson:    Write the video description to a .info.json file
 	"""
 
 	params = None
 	_ies = []
 	_pps = []
 	_download_retcode = None
+	_num_downloads = None
+	_screen_file = None
 
 	def __init__(self, params):
 		"""Create a FileDownloader object with the given options."""
 		self._ies = []
 		self._pps = []
 		self._download_retcode = 0
+		self._num_downloads = 0
+		self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
 		self.params = params
-	
-	@staticmethod
-	def pmkdir(filename):
-		"""Create directory components in filename. Similar to Unix "mkdir -p"."""
-		components = filename.split(os.sep)
-		aggregate = [os.sep.join(components[0:x]) for x in xrange(1, len(components))]
-		aggregate = ['%s%s' % (x, os.sep) for x in aggregate] # Finish names with separator
-		for dir in aggregate:
-			if not os.path.exists(dir):
-				os.mkdir(dir)
-	
+
 	@staticmethod
 	def format_bytes(bytes):
 		if bytes is None:
@@ -227,7 +475,7 @@ class FileDownloader(object):
 		else:
 			exponent = long(math.log(bytes, 1024.0))
 		suffix = 'bkMGTPEZY'[exponent]
-		converted = float(bytes) / float(1024**exponent)
+		converted = float(bytes) / float(1024 ** exponent)
 		return '%.2f%s' % (converted, suffix)
 
 	@staticmethod
@@ -280,36 +528,42 @@ class FileDownloader(object):
 		multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
 		return long(round(number * multiplier))
 
-	@staticmethod
-	def verify_url(url):
-		"""Verify a URL is valid and data could be downloaded. Return real data URL."""
-		request = urllib2.Request(url, None, std_headers)
-		data = urllib2.urlopen(request)
-		data.read(1)
-		url = data.geturl()
-		data.close()
-		return url
-
 	def add_info_extractor(self, ie):
 		"""Add an InfoExtractor object to the end of the list."""
 		self._ies.append(ie)
 		ie.set_downloader(self)
-	
+
 	def add_post_processor(self, pp):
 		"""Add a PostProcessor object to the end of the chain."""
 		self._pps.append(pp)
 		pp.set_downloader(self)
-	
-	def to_stdout(self, message, skip_eol=False):
+
+	def to_screen(self, message, skip_eol=False, ignore_encoding_errors=False):
 		"""Print message to stdout if not in quiet mode."""
-		if not self.params.get('quiet', False):
-			print (u'%s%s' % (message, [u'\n', u''][skip_eol])).encode(preferredencoding()),
-			sys.stdout.flush()
-	
+		try:
+			if not self.params.get('quiet', False):
+				terminator = [u'\n', u''][skip_eol]
+				print >>self._screen_file, (u'%s%s' % (message, terminator)).encode(preferredencoding()),
+			self._screen_file.flush()
+		except (UnicodeEncodeError), err:
+			if not ignore_encoding_errors:
+				raise
+
 	def to_stderr(self, message):
 		"""Print message to stderr."""
 		print >>sys.stderr, message.encode(preferredencoding())
-	
+
+	def to_cons_title(self, message):
+		"""Set console/terminal window title to message."""
+		if not self.params.get('consoletitle', False):
+			return
+		if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow():
+			# c_wchar_p() might not be necessary if `message` is
+			# already of type unicode()
+			ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
+		elif 'TERM' in os.environ:
+			sys.stderr.write('\033]0;%s\007' % message.encode(preferredencoding()))
+
 	def fixed_template(self):
 		"""Checks if the output template is fixed."""
 		return (re.search(ur'(?u)%\(.+?\)s', self.params['outtmpl']) is None)
@@ -340,83 +594,204 @@ class FileDownloader(object):
 		if speed > rate_limit:
 			time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit)
 
+	def temp_name(self, filename):
+		"""Returns a temporary filename for the given filename."""
+		if self.params.get('nopart', False) or filename == u'-' or \
+				(os.path.exists(filename) and not os.path.isfile(filename)):
+			return filename
+		return filename + u'.part'
+
+	def undo_temp_name(self, filename):
+		if filename.endswith(u'.part'):
+			return filename[:-len(u'.part')]
+		return filename
+
+	def try_rename(self, old_filename, new_filename):
+		try:
+			if old_filename == new_filename:
+				return
+			os.rename(old_filename, new_filename)
+		except (IOError, OSError), err:
+			self.trouble(u'ERROR: unable to rename file')
+
+	def try_utime(self, filename, last_modified_hdr):
+		"""Try to set the last-modified time of the given file."""
+		if last_modified_hdr is None:
+			return
+		if not os.path.isfile(filename):
+			return
+		timestr = last_modified_hdr
+		if timestr is None:
+			return
+		filetime = timeconvert(timestr)
+		if filetime is None:
+			return filetime
+		try:
+			os.utime(filename, (time.time(), filetime))
+		except:
+			pass
+		return filetime
+
+	def report_writedescription(self, descfn):
+		""" Report that the description file is being written """
+		self.to_screen(u'[info] Writing video description to: %s' % descfn, ignore_encoding_errors=True)
+
+	def report_writeinfojson(self, infofn):
+		""" Report that the metadata file has been written """
+		self.to_screen(u'[info] Video description metadata as JSON to: %s' % infofn, ignore_encoding_errors=True)
+
 	def report_destination(self, filename):
 		"""Report destination filename."""
-		self.to_stdout(u'[download] Destination: %s' % filename)
-	
+		self.to_screen(u'[download] Destination: %s' % filename, ignore_encoding_errors=True)
+
 	def report_progress(self, percent_str, data_len_str, speed_str, eta_str):
 		"""Report download progress."""
-		self.to_stdout(u'\r[download] %s of %s at %s ETA %s' %
+		if self.params.get('noprogress', False):
+			return
+		self.to_screen(u'\r[download] %s of %s at %s ETA %s' %
 				(percent_str, data_len_str, speed_str, eta_str), skip_eol=True)
+		self.to_cons_title(u'youtube-dl - %s of %s at %s ETA %s' %
+				(percent_str.strip(), data_len_str.strip(), speed_str.strip(), eta_str.strip()))
 
 	def report_resuming_byte(self, resume_len):
-		"""Report attemtp to resume at given byte."""
-		self.to_stdout(u'[download] Resuming download at byte %s' % resume_len)
-	
+		"""Report attempt to resume at given byte."""
+		self.to_screen(u'[download] Resuming download at byte %s' % resume_len)
+
+	def report_retry(self, count, retries):
+		"""Report retry in case of HTTP error 5xx"""
+		self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
+
 	def report_file_already_downloaded(self, file_name):
 		"""Report file has already been fully downloaded."""
-		self.to_stdout(u'[download] %s has already been downloaded' % file_name)
-	
+		try:
+			self.to_screen(u'[download] %s has already been downloaded' % file_name)
+		except (UnicodeEncodeError), err:
+			self.to_screen(u'[download] The file has already been downloaded')
+
 	def report_unable_to_resume(self):
 		"""Report it was impossible to resume download."""
-		self.to_stdout(u'[download] Unable to resume')
-	
+		self.to_screen(u'[download] Unable to resume')
+
 	def report_finish(self):
 		"""Report download finished."""
-		self.to_stdout(u'')
+		if self.params.get('noprogress', False):
+			self.to_screen(u'[download] Download completed')
+		else:
+			self.to_screen(u'')
+
+	def increment_downloads(self):
+		"""Increment the ordinal that assigns a number to each file."""
+		self._num_downloads += 1
+
+	def prepare_filename(self, info_dict):
+		"""Generate the output filename."""
+		try:
+			template_dict = dict(info_dict)
+			template_dict['epoch'] = unicode(long(time.time()))
+			template_dict['autonumber'] = unicode('%05d' % self._num_downloads)
+			filename = self.params['outtmpl'] % template_dict
+			return filename
+		except (ValueError, KeyError), err:
+			self.trouble(u'ERROR: invalid system charset or erroneous output template')
+			return None
 
 	def process_info(self, info_dict):
 		"""Process a single dictionary returned by an InfoExtractor."""
+		filename = self.prepare_filename(info_dict)
+		
+		# Forced printings
+		if self.params.get('forcetitle', False):
+			print info_dict['title'].encode(preferredencoding(), 'xmlcharrefreplace')
+		if self.params.get('forceurl', False):
+			print info_dict['url'].encode(preferredencoding(), 'xmlcharrefreplace')
+		if self.params.get('forcethumbnail', False) and 'thumbnail' in info_dict:
+			print info_dict['thumbnail'].encode(preferredencoding(), 'xmlcharrefreplace')
+		if self.params.get('forcedescription', False) and 'description' in info_dict:
+			print info_dict['description'].encode(preferredencoding(), 'xmlcharrefreplace')
+		if self.params.get('forcefilename', False) and filename is not None:
+			print filename.encode(preferredencoding(), 'xmlcharrefreplace')
+		if self.params.get('forceformat', False):
+			print info_dict['format'].encode(preferredencoding(), 'xmlcharrefreplace')
+
 		# Do nothing else if in simulate mode
 		if self.params.get('simulate', False):
-			# Verify URL if it's an HTTP one
-			if info_dict['url'].startswith('http'):
-				try:
-					info_dict['url'] = self.verify_url(info_dict['url'].encode('utf-8')).decode('utf-8')
-				except (OSError, IOError, urllib2.URLError, httplib.HTTPException, socket.error), err:
-					raise UnavailableFormatError
+			return
 
-			# Forced printings
-			if self.params.get('forcetitle', False):
-				print info_dict['title'].encode(preferredencoding(), 'xmlcharrefreplace')
-			if self.params.get('forceurl', False):
-				print info_dict['url'].encode(preferredencoding(), 'xmlcharrefreplace')
+		if filename is None:
+			return
 
+		matchtitle=self.params.get('matchtitle',False)
+		rejecttitle=self.params.get('rejecttitle',False)
+		title=info_dict['title'].encode(preferredencoding(), 'xmlcharrefreplace')
+		if matchtitle and not re.search(matchtitle, title, re.IGNORECASE):
+			self.to_screen(u'[download] "%s" title did not match pattern "%s"' % (title, matchtitle))
+			return
+		if rejecttitle and re.search(rejecttitle, title, re.IGNORECASE):
+			self.to_screen(u'[download] "%s" title matched reject pattern "%s"' % (title, rejecttitle))
 			return
 			
-		try:
-			template_dict = dict(info_dict)
-			template_dict['epoch'] = unicode(long(time.time()))
-			filename = self.params['outtmpl'] % template_dict
-		except (ValueError, KeyError), err:
-			self.trouble('ERROR: invalid output template or system charset: %s' % str(err))
 		if self.params.get('nooverwrites', False) and os.path.exists(filename):
-			self.to_stderr(u'WARNING: file exists: %s; skipping' % filename)
+			self.to_stderr(u'WARNING: file exists and will be skipped')
 			return
 
 		try:
-			self.pmkdir(filename)
+			dn = os.path.dirname(filename)
+			if dn != '' and not os.path.exists(dn):
+				os.makedirs(dn)
 		except (OSError, IOError), err:
-			self.trouble('ERROR: unable to create directories: %s' % str(err))
+			self.trouble(u'ERROR: unable to create directory ' + unicode(err))
 			return
 
-		try:
-			success = self._do_download(filename, info_dict['url'].encode('utf-8'))
-		except (OSError, IOError), err:
-			raise UnavailableFormatError
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self.trouble('ERROR: unable to download video data: %s' % str(err))
-			return
-		except (ContentTooShortError, ), err:
-			self.trouble('ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
-			return
+		if self.params.get('writedescription', False):
+			try:
+				descfn = filename + '.description'
+				self.report_writedescription(descfn)
+				descfile = open(descfn, 'wb')
+				try:
+					descfile.write(info_dict['description'].encode('utf-8'))
+				finally:
+					descfile.close()
+			except (OSError, IOError):
+				self.trouble(u'ERROR: Cannot write description file ' + descfn)
+				return
+
+		if self.params.get('writeinfojson', False):
+			infofn = filename + '.info.json'
+			self.report_writeinfojson(infofn)
+			try:
+				json.dump
+			except (NameError,AttributeError):
+				self.trouble(u'ERROR: No JSON encoder found. Update to Python 2.6+, setup a json module, or leave out --write-info-json.')
+				return
+			try:
+				infof = open(infofn, 'wb')
+				try:
+					json_info_dict = dict((k,v) for k,v in info_dict.iteritems() if not k in ('urlhandle',))
+					json.dump(json_info_dict, infof)
+				finally:
+					infof.close()
+			except (OSError, IOError):
+				self.trouble(u'ERROR: Cannot write metadata to JSON file ' + infofn)
+				return
 
-		if success:
+		if not self.params.get('skip_download', False):
 			try:
-				self.post_process(filename, info_dict)
-			except (PostProcessingError), err:
-				self.trouble('ERROR: postprocessing: %s' % str(err))
+				success = self._do_download(filename, info_dict)
+			except (OSError, IOError), err:
+				raise UnavailableVideoError
+			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+				self.trouble(u'ERROR: unable to download video data: %s' % str(err))
+				return
+			except (ContentTooShortError, ), err:
+				self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
 				return
+	
+			if success:
+				try:
+					self.post_process(filename, info_dict)
+				except (PostProcessingError), err:
+					self.trouble(u'ERROR: postprocessing: %s' % str(err))
+					return
 
 	def download(self, url_list):
 		"""Download a given list of URLs."""
@@ -440,7 +815,7 @@ class FileDownloader(object):
 				break
 
 			if not suitable_found:
-				self.trouble('ERROR: no suitable InfoExtractor: %s' % url)
+				self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url)
 
 		return self._download_retcode
 
@@ -452,9 +827,10 @@ class FileDownloader(object):
 			info = pp.run(info)
 			if info is None:
 				break
-	
-	def _download_with_rtmpdump(self, filename, url):
+
+	def _download_with_rtmpdump(self, filename, url, player_url):
 		self.report_destination(filename)
+		tmpfilename = self.temp_name(filename)
 
 		# Check for rtmpdump first
 		try:
@@ -466,63 +842,120 @@ class FileDownloader(object):
 		# Download using rtmpdump. rtmpdump returns exit code 2 when
 		# the connection was interrumpted and resuming appears to be
 		# possible. This is part of rtmpdump's normal usage, AFAIK.
-		basic_args = ['rtmpdump', '-q', '-r', url, '-o', filename]
+		basic_args = ['rtmpdump', '-q'] + [[], ['-W', player_url]][player_url is not None] + ['-r', url, '-o', tmpfilename]
 		retval = subprocess.call(basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)])
 		while retval == 2 or retval == 1:
-			self.to_stdout(u'\r[rtmpdump] %s bytes' % os.path.getsize(filename), skip_eol=True)
-			time.sleep(2.0) # This seems to be needed
+			prevsize = os.path.getsize(tmpfilename)
+			self.to_screen(u'\r[rtmpdump] %s bytes' % prevsize, skip_eol=True)
+			time.sleep(5.0) # This seems to be needed
 			retval = subprocess.call(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1])
+			cursize = os.path.getsize(tmpfilename)
+			if prevsize == cursize and retval == 1:
+				break
+			 # Some rtmp streams seem abort after ~ 99.8%. Don't complain for those
+			if prevsize == cursize and retval == 2 and cursize > 1024:
+				self.to_screen(u'\r[rtmpdump] Could not download the whole video. This can happen for some advertisements.')
+				retval = 0
+				break
 		if retval == 0:
-			self.to_stdout(u'\r[rtmpdump] %s bytes' % os.path.getsize(filename))
+			self.to_screen(u'\r[rtmpdump] %s bytes' % os.path.getsize(tmpfilename))
+			self.try_rename(tmpfilename, filename)
 			return True
 		else:
-			self.trouble('ERROR: rtmpdump exited with code %d' % retval)
+			self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval)
 			return False
 
-	def _do_download(self, filename, url):
+	def _do_download(self, filename, info_dict):
+		url = info_dict['url']
+		player_url = info_dict.get('player_url', None)
+
+		# Check file already present
+		if self.params.get('continuedl', False) and os.path.isfile(filename) and not self.params.get('nopart', False):
+			self.report_file_already_downloaded(filename)
+			return True
+
 		# Attempt to download using rtmpdump
 		if url.startswith('rtmp'):
-			return self._download_with_rtmpdump(filename, url)
+			return self._download_with_rtmpdump(filename, url, player_url)
 
+		tmpfilename = self.temp_name(filename)
 		stream = None
-		open_mode = 'wb'
-		basic_request = urllib2.Request(url, None, std_headers)
-		request = urllib2.Request(url, None, std_headers)
+
+		# Do not include the Accept-Encoding header
+		headers = {'Youtubedl-no-compression': 'True'}
+		basic_request = urllib2.Request(url, None, headers)
+		request = urllib2.Request(url, None, headers)
 
 		# Establish possible resume length
-		if os.path.isfile(filename):
-			resume_len = os.path.getsize(filename)
+		if os.path.isfile(tmpfilename):
+			resume_len = os.path.getsize(tmpfilename)
 		else:
 			resume_len = 0
 
-		# Request parameters in case of being able to resume
-		if self.params.get('continuedl', False) and resume_len != 0:
-			self.report_resuming_byte(resume_len)
-			request.add_header('Range','bytes=%d-' % resume_len)
-			open_mode = 'ab'
-
-		# Establish connection
-		try:
-			data = urllib2.urlopen(request)
-		except (urllib2.HTTPError, ), err:
-			if err.code != 416: #  416 is 'Requested range not satisfiable'
-				raise
-			# Unable to resume
-			data = urllib2.urlopen(basic_request)
-			content_length = data.info()['Content-Length']
-
-			if content_length is not None and long(content_length) == resume_len:
-				# Because the file had already been fully downloaded
-				self.report_file_already_downloaded(filename)
-				return True
+		open_mode = 'wb'
+		if resume_len != 0:
+			if self.params.get('continuedl', False):
+				self.report_resuming_byte(resume_len)
+				request.add_header('Range','bytes=%d-' % resume_len)
+				open_mode = 'ab'
 			else:
-				# Because the server didn't let us
-				self.report_unable_to_resume()
-				open_mode = 'wb'
+				resume_len = 0
+
+		count = 0
+		retries = self.params.get('retries', 0)
+		while count <= retries:
+			# Establish connection
+			try:
+				if count == 0 and 'urlhandle' in info_dict:
+					data = info_dict['urlhandle']
+				data = urllib2.urlopen(request)
+				break
+			except (urllib2.HTTPError, ), err:
+				if (err.code < 500 or err.code >= 600) and err.code != 416:
+					# Unexpected HTTP error
+					raise
+				elif err.code == 416:
+					# Unable to resume (requested range not satisfiable)
+					try:
+						# Open the connection again without the range header
+						data = urllib2.urlopen(basic_request)
+						content_length = data.info()['Content-Length']
+					except (urllib2.HTTPError, ), err:
+						if err.code < 500 or err.code >= 600:
+							raise
+					else:
+						# Examine the reported length
+						if (content_length is not None and
+								(resume_len - 100 < long(content_length) < resume_len + 100)):
+							# The file had already been fully downloaded.
+							# Explanation to the above condition: in issue #175 it was revealed that
+							# YouTube sometimes adds or removes a few bytes from the end of the file,
+							# changing the file size slightly and causing problems for some users. So
+							# I decided to implement a suggested change and consider the file
+							# completely downloaded if the file size differs less than 100 bytes from
+							# the one in the hard drive.
+							self.report_file_already_downloaded(filename)
+							self.try_rename(tmpfilename, filename)
+							return True
+						else:
+							# The length does not match, we start the download over
+							self.report_unable_to_resume()
+							open_mode = 'wb'
+							break
+			# Retry
+			count += 1
+			if count <= retries:
+				self.report_retry(count, retries)
+
+		if count > retries:
+			self.trouble(u'ERROR: giving up after %s retries' % retries)
+			return False
 
 		data_len = data.info().get('Content-length', None)
+		if data_len is not None:
+			data_len = long(data_len) + resume_len
 		data_len_str = self.format_bytes(data_len)
-		byte_counter = 0
+		byte_counter = 0 + resume_len
 		block_size = 1024
 		start = time.time()
 		while True:
@@ -530,36 +963,55 @@ class FileDownloader(object):
 			before = time.time()
 			data_block = data.read(block_size)
 			after = time.time()
-			data_block_len = len(data_block)
-			if data_block_len == 0:
+			if len(data_block) == 0:
 				break
-			byte_counter += data_block_len
+			byte_counter += len(data_block)
 
 			# Open file just in time
 			if stream is None:
 				try:
-					(stream, filename) = sanitize_open(filename, open_mode)
+					(stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
+					assert stream is not None
+					filename = self.undo_temp_name(tmpfilename)
 					self.report_destination(filename)
 				except (OSError, IOError), err:
-					self.trouble('ERROR: unable to open for writing: %s' % str(err))
+					self.trouble(u'ERROR: unable to open for writing: %s' % str(err))
 					return False
-			stream.write(data_block)
-			block_size = self.best_block_size(after - before, data_block_len)
+			try:
+				stream.write(data_block)
+			except (IOError, OSError), err:
+				self.trouble(u'\nERROR: unable to write data: %s' % str(err))
+				return False
+			block_size = self.best_block_size(after - before, len(data_block))
 
 			# Progress message
-			percent_str = self.calc_percent(byte_counter, data_len)
-			eta_str = self.calc_eta(start, time.time(), data_len, byte_counter)
-			speed_str = self.calc_speed(start, time.time(), byte_counter)
-			self.report_progress(percent_str, data_len_str, speed_str, eta_str)
+			speed_str = self.calc_speed(start, time.time(), byte_counter - resume_len)
+			if data_len is None:
+				self.report_progress('Unknown %', data_len_str, speed_str, 'Unknown ETA')
+			else:
+				percent_str = self.calc_percent(byte_counter, data_len)
+				eta_str = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
+				self.report_progress(percent_str, data_len_str, speed_str, eta_str)
 
 			# Apply rate limit
-			self.slow_down(start, byte_counter)
+			self.slow_down(start, byte_counter - resume_len)
 
+		if stream is None:
+			self.trouble(u'\nERROR: Did not get any data blocks')
+			return False
+		stream.close()
 		self.report_finish()
-		if data_len is not None and str(byte_counter) != data_len:
+		if data_len is not None and byte_counter != data_len:
 			raise ContentTooShortError(byte_counter, long(data_len))
+		self.try_rename(tmpfilename, filename)
+
+		# Update file modification time
+		if self.params.get('updatetime', True):
+			info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None))
+
 		return True
 
+
 class InfoExtractor(object):
 	"""Information Extractor class.
 
@@ -578,11 +1030,20 @@ class InfoExtractor(object):
 	title:		Literal title.
 	stitle:		Simplified title.
 	ext:		Video filename extension.
+	format:		Video format.
+	player_url:	SWF Player URL (may be None).
+
+	The following fields are optional. Their primary purpose is to allow
+	youtube-dl to serve as the backend for a video search function, such
+	as the one in youtube2mp3.  They are only used when their respective
+	forced printing functions are called:
+
+	thumbnail:	Full URL to a video thumbnail image.
+	description:	One-line video description.
 
 	Subclasses of this one should re-define the _real_initialize() and
-	_real_extract() methods, as well as the suitable() static method.
-	Probably, they should also be instantiated and added to the main
-	downloader.
+	_real_extract() methods and define a _VALID_URL regexp.
+	Probably, they should also be added to the list of extractors.
 	"""
 
 	_ready = False
@@ -593,10 +1054,9 @@ class InfoExtractor(object):
 		self._ready = False
 		self.set_downloader(downloader)
 
-	@staticmethod
-	def suitable(url):
+	def suitable(self, url):
 		"""Receives a URL and returns True if suitable for this IE."""
-		return False
+		return re.match(self._VALID_URL, url) is not None
 
 	def initialize(self):
 		"""Initializes an instance (authentication, etc)."""
@@ -612,7 +1072,7 @@ class InfoExtractor(object):
 	def set_downloader(self, downloader):
 		"""Sets the downloader for this IE."""
 		self._downloader = downloader
-	
+
 	def _real_initialize(self):
 		"""Real initialization process. Redefine in subclasses."""
 		pass
@@ -621,55 +1081,82 @@ class InfoExtractor(object):
 		"""Real extraction process. Redefine in subclasses."""
 		pass
 
+
 class YoutubeIE(InfoExtractor):
 	"""Information extractor for youtube.com."""
 
-	_VALID_URL = r'^((?:http://)?(?:\w+\.)?youtube\.com/(?:(?:v/)|(?:(?:watch(?:\.php)?)?[\?#](?:.+&)?v=)))?([0-9A-Za-z_-]+)(?(1).+)?$'
-	_LANG_URL = r'http://uk.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
-	_LOGIN_URL = 'http://www.youtube.com/signup?next=/&gl=US&hl=en'
+	_VALID_URL = r'^((?:https?://)?(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)(?!view_play_list|my_playlists|artist|playlist)(?:(?:(?:v|embed|e)/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=))?)?([0-9A-Za-z_-]+)(?(1).+)?$'
+	_LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
+	_LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en'
 	_AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
 	_NETRC_MACHINE = 'youtube'
-	_available_formats = ['37', '22', '35', '18', '5', '17', '13', None] # listed in order of priority for -b flag
+	# Listed in order of quality
+	_available_formats = ['38', '37', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13']
 	_video_extensions = {
 		'13': '3gp',
 		'17': 'mp4',
 		'18': 'mp4',
 		'22': 'mp4',
 		'37': 'mp4',
+		'38': 'video', # You actually don't know if this will be MOV, AVI or whatever
+		'43': 'webm',
+		'44': 'webm',
+		'45': 'webm',
 	}
-
-	@staticmethod
-	def suitable(url):
-		return (re.match(YoutubeIE._VALID_URL, url) is not None)
+	_video_dimensions = {
+		'5': '240x400',
+		'6': '???',
+		'13': '???',
+		'17': '144x176',
+		'18': '360x640',
+		'22': '720x1280',
+		'34': '360x640',
+		'35': '480x854',
+		'37': '1080x1920',
+		'38': '3072x4096',
+		'43': '360x640',
+		'44': '480x854',
+		'45': '720x1280',
+	}	
+	IE_NAME = u'youtube'
 
 	def report_lang(self):
 		"""Report attempt to set language."""
-		self._downloader.to_stdout(u'[youtube] Setting language')
+		self._downloader.to_screen(u'[youtube] Setting language')
 
 	def report_login(self):
 		"""Report attempt to log in."""
-		self._downloader.to_stdout(u'[youtube] Logging in')
-	
+		self._downloader.to_screen(u'[youtube] Logging in')
+
 	def report_age_confirmation(self):
 		"""Report attempt to confirm age."""
-		self._downloader.to_stdout(u'[youtube] Confirming age')
-	
+		self._downloader.to_screen(u'[youtube] Confirming age')
+
+	def report_video_webpage_download(self, video_id):
+		"""Report attempt to download video webpage."""
+		self._downloader.to_screen(u'[youtube] %s: Downloading video webpage' % video_id)
+
 	def report_video_info_webpage_download(self, video_id):
 		"""Report attempt to download video info webpage."""
-		self._downloader.to_stdout(u'[youtube] %s: Downloading video info webpage' % video_id)
-	
+		self._downloader.to_screen(u'[youtube] %s: Downloading video info webpage' % video_id)
+
 	def report_information_extraction(self, video_id):
 		"""Report attempt to extract video information."""
-		self._downloader.to_stdout(u'[youtube] %s: Extracting video information' % video_id)
-	
+		self._downloader.to_screen(u'[youtube] %s: Extracting video information' % video_id)
+
 	def report_unavailable_format(self, video_id, format):
 		"""Report extracted video URL."""
-		self._downloader.to_stdout(u'[youtube] %s: Format %s not available' % (video_id, format))
-	
+		self._downloader.to_screen(u'[youtube] %s: Format %s not available' % (video_id, format))
+
 	def report_rtmp_download(self):
 		"""Indicate the download will use the RTMP protocol."""
-		self._downloader.to_stdout(u'[youtube] RTMP download detected')
-	
+		self._downloader.to_screen(u'[youtube] RTMP download detected')
+
+	def _print_formats(self, formats):
+		print 'Available formats:'
+		for x in formats:
+			print '%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'flv'), self._video_dimensions.get(x, '???'))
+
 	def _real_initialize(self):
 		if self._downloader is None:
 			return
@@ -695,7 +1182,7 @@ class YoutubeIE(InfoExtractor):
 				return
 
 		# Set language
-		request = urllib2.Request(self._LANG_URL, None, std_headers)
+		request = urllib2.Request(self._LANG_URL)
 		try:
 			self.report_lang()
 			urllib2.urlopen(request).read()
@@ -715,7 +1202,7 @@ class YoutubeIE(InfoExtractor):
 				'username':	username,
 				'password':	password,
 				}
-		request = urllib2.Request(self._LOGIN_URL, urllib.urlencode(login_form), std_headers)
+		request = urllib2.Request(self._LOGIN_URL, urllib.urlencode(login_form))
 		try:
 			self.report_login()
 			login_results = urllib2.urlopen(request).read()
@@ -725,13 +1212,13 @@ class YoutubeIE(InfoExtractor):
 		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
 			self._downloader.to_stderr(u'WARNING: unable to log in: %s' % str(err))
 			return
-	
+
 		# Confirm age
 		age_form = {
 				'next_url':		'/',
 				'action_confirm':	'Confirm',
 				}
-		request = urllib2.Request(self._AGE_URL, urllib.urlencode(age_form), std_headers)
+		request = urllib2.Request(self._AGE_URL, urllib.urlencode(age_form))
 		try:
 			self.report_age_confirmation()
 			age_results = urllib2.urlopen(request).read()
@@ -747,72 +1234,153 @@ class YoutubeIE(InfoExtractor):
 			return
 		video_id = mobj.group(2)
 
-		# Downloader parameters
-		best_quality = False
-		format_param = None
-		quality_index = 0
-		if self._downloader is not None:
-			params = self._downloader.params
-			format_param = params.get('format', None)
-			if format_param == '0':
-				format_param = self._available_formats[quality_index]
-				best_quality = True
-
-		while True:
-			# Extension
-			video_extension = self._video_extensions.get(format_param, 'flv')
+		# Get video webpage
+		self.report_video_webpage_download(video_id)
+		request = urllib2.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id)
+		try:
+			video_webpage = urllib2.urlopen(request).read()
+		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
+			return
 
-			# Get video info
-			video_info_url = 'http://www.youtube.com/get_video_info?&video_id=%s&el=detailpage&ps=default&eurl=&gl=US&hl=en' % video_id
-			request = urllib2.Request(video_info_url, None, std_headers)
+		# Attempt to extract SWF player URL
+		mobj = re.search(r'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
+		if mobj is not None:
+			player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
+		else:
+			player_url = None
+
+		# Get video info
+		self.report_video_info_webpage_download(video_id)
+		for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
+			video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
+					% (video_id, el_type))
+			request = urllib2.Request(video_info_url)
 			try:
-				self.report_video_info_webpage_download(video_id)
 				video_info_webpage = urllib2.urlopen(request).read()
 				video_info = parse_qs(video_info_webpage)
+				if 'token' in video_info:
+					break
 			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
 				self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err))
 				return
-			self.report_information_extraction(video_id)
-
-			# "t" param
-			if 'token' not in video_info:
-				# Attempt to see if YouTube has issued an error message
-				if 'reason' not in video_info:
-					self._downloader.trouble(u'ERROR: unable to extract "t" parameter for unknown reason')
-					stream = open('reportme-ydl-%s.dat' % time.time(), 'wb')
-					stream.write(video_info_webpage)
-					stream.close()
-				else:
-					reason = urllib.unquote_plus(video_info['reason'][0])
-					self._downloader.trouble(u'ERROR: YouTube said: %s' % reason.decode('utf-8'))
+		if 'token' not in video_info:
+			if 'reason' in video_info:
+				self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0].decode('utf-8'))
+			else:
+				self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason')
+			return
+
+		# Start extracting information
+		self.report_information_extraction(video_id)
+
+		# uploader
+		if 'author' not in video_info:
+			self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
+			return
+		video_uploader = urllib.unquote_plus(video_info['author'][0])
+
+		# title
+		if 'title' not in video_info:
+			self._downloader.trouble(u'ERROR: unable to extract video title')
+			return
+		video_title = urllib.unquote_plus(video_info['title'][0])
+		video_title = video_title.decode('utf-8')
+		video_title = sanitize_title(video_title)
+
+		# simplified title
+		simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
+		simple_title = simple_title.strip(ur'_')
+
+		# thumbnail image
+		if 'thumbnail_url' not in video_info:
+			self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
+			video_thumbnail = ''
+		else:	# don't panic if we can't find it
+			video_thumbnail = urllib.unquote_plus(video_info['thumbnail_url'][0])
+
+		# upload date
+		upload_date = u'NA'
+		mobj = re.search(r'id="eow-date.*?>(.*?)</span>', video_webpage, re.DOTALL)
+		if mobj is not None:
+			upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
+			format_expressions = ['%d %B %Y', '%B %d %Y', '%b %d %Y']
+			for expression in format_expressions:
+				try:
+					upload_date = datetime.datetime.strptime(upload_date, expression).strftime('%Y%m%d')
+				except:
+					pass
+
+		# description
+		try:
+			lxml.etree
+		except NameError:
+			video_description = u'No description available.'
+			if self._downloader.params.get('forcedescription', False) or self._downloader.params.get('writedescription', False):
+				mobj = re.search(r'<meta name="description" content="(.*)"(?:\s*/)?>', video_webpage)
+				if mobj is not None:
+					video_description = mobj.group(1).decode('utf-8')
+		else:
+			html_parser = lxml.etree.HTMLParser(encoding='utf-8')
+			vwebpage_doc = lxml.etree.parse(StringIO.StringIO(video_webpage), html_parser)
+			video_description = u''.join(vwebpage_doc.xpath('id("eow-description")//text()'))
+			# TODO use another parser
+
+		# token
+		video_token = urllib.unquote_plus(video_info['token'][0])
+
+		# Decide which formats to download
+		req_format = self._downloader.params.get('format', None)
+
+		if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
+			self.report_rtmp_download()
+			video_url_list = [(None, video_info['conn'][0])]
+		elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1:
+			url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',')
+			url_data = [parse_qs(uds) for uds in url_data_strs]
+			url_data = filter(lambda ud: 'itag' in ud and 'url' in ud, url_data)
+			url_map = dict((ud['itag'][0], ud['url'][0]) for ud in url_data)
+
+			format_limit = self._downloader.params.get('format_limit', None)
+			if format_limit is not None and format_limit in self._available_formats:
+				format_list = self._available_formats[self._available_formats.index(format_limit):]
+			else:
+				format_list = self._available_formats
+			existing_formats = [x for x in format_list if x in url_map]
+			if len(existing_formats) == 0:
+				self._downloader.trouble(u'ERROR: no known formats available for video')
 				return
-			token = urllib.unquote_plus(video_info['token'][0])
-			video_real_url = 'http://www.youtube.com/get_video?video_id=%s&t=%s&eurl=&el=detailpage&ps=default&gl=US&hl=en' % (video_id, token)
-			if format_param is not None:
-				video_real_url = '%s&fmt=%s' % (video_real_url, format_param)
-
-			# Check possible RTMP download
-			if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
-				self.report_rtmp_download()
-				video_real_url = video_info['conn'][0]
-
-			# uploader
-			if 'author' not in video_info:
-				self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
+			if self._downloader.params.get('listformats', None):
+				self._print_formats(existing_formats)
 				return
-			video_uploader = urllib.unquote_plus(video_info['author'][0])
+			if req_format is None or req_format == 'best':
+				video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
+			elif req_format == 'worst':
+				video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality
+			elif req_format in ('-1', 'all'):
+				video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
+			else:
+				# Specific formats. We pick the first in a slash-delimeted sequence.
+				# For example, if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'.
+				req_formats = req_format.split('/')
+				video_url_list = None
+				for rf in req_formats:
+					if rf in url_map:
+						video_url_list = [(rf, url_map[rf])]
+						break
+				if video_url_list is None:
+					self._downloader.trouble(u'ERROR: requested format not available')
+					return
+		else:
+			self._downloader.trouble(u'ERROR: no conn or url_encoded_fmt_stream_map information found in video info')
+			return
 
-			# title
-			if 'title' not in video_info:
-				self._downloader.trouble(u'ERROR: unable to extract video title')
-				return
-			video_title = urllib.unquote_plus(video_info['title'][0])
-			video_title = video_title.decode('utf-8')
-			video_title = sanitize_title(video_title)
+		for format_param, video_real_url in video_url_list:
+			# At this point we have a new video
+			self._downloader.increment_downloads()
 
-			# simplified title
-			simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
-			simple_title = simple_title.strip(ur'_')
+			# Extension
+			video_extension = self._video_extensions.get(format_param, 'flv')
 
 			try:
 				# Process video information
@@ -820,27 +1388,17 @@ class YoutubeIE(InfoExtractor):
 					'id':		video_id.decode('utf-8'),
 					'url':		video_real_url.decode('utf-8'),
 					'uploader':	video_uploader.decode('utf-8'),
+					'upload_date':	upload_date,
 					'title':	video_title,
 					'stitle':	simple_title,
 					'ext':		video_extension.decode('utf-8'),
+					'format':	(format_param is None and u'NA' or format_param.decode('utf-8')),
+					'thumbnail':	video_thumbnail.decode('utf-8'),
+					'description':	video_description,
+					'player_url':	player_url,
 				})
-
-				return
-
-			except UnavailableFormatError, err:
-				if best_quality:
-					if quality_index == len(self._available_formats) - 1:
-						# I don't ever expect this to happen
-						self._downloader.trouble(u'ERROR: no known formats available for video')
-						return
-					else:
-						self.report_unavailable_format(video_id, format_param)
-						quality_index += 1
-						format_param = self._available_formats[quality_index]
-						continue
-				else: 
-					self._downloader.trouble('ERROR: format not available for video')
-					return
+			except UnavailableVideoError, err:
+				self._downloader.trouble(u'\nERROR: unable to download video')
 
 
 class MetacafeIE(InfoExtractor):
@@ -850,34 +1408,31 @@ class MetacafeIE(InfoExtractor):
 	_DISCLAIMER = 'http://www.metacafe.com/family_filter/'
 	_FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
 	_youtube_ie = None
+	IE_NAME = u'metacafe'
 
 	def __init__(self, youtube_ie, downloader=None):
 		InfoExtractor.__init__(self, downloader)
 		self._youtube_ie = youtube_ie
 
-	@staticmethod
-	def suitable(url):
-		return (re.match(MetacafeIE._VALID_URL, url) is not None)
-
 	def report_disclaimer(self):
 		"""Report disclaimer retrieval."""
-		self._downloader.to_stdout(u'[metacafe] Retrieving disclaimer')
+		self._downloader.to_screen(u'[metacafe] Retrieving disclaimer')
 
 	def report_age_confirmation(self):
 		"""Report attempt to confirm age."""
-		self._downloader.to_stdout(u'[metacafe] Confirming age')
-	
+		self._downloader.to_screen(u'[metacafe] Confirming age')
+
 	def report_download_webpage(self, video_id):
 		"""Report webpage download."""
-		self._downloader.to_stdout(u'[metacafe] %s: Downloading webpage' % video_id)
-	
+		self._downloader.to_screen(u'[metacafe] %s: Downloading webpage' % video_id)
+
 	def report_extraction(self, video_id):
 		"""Report information extraction."""
-		self._downloader.to_stdout(u'[metacafe] %s: Extracting information' % video_id)
+		self._downloader.to_screen(u'[metacafe] %s: Extracting information' % video_id)
 
 	def _real_initialize(self):
 		# Retrieve disclaimer
-		request = urllib2.Request(self._DISCLAIMER, None, std_headers)
+		request = urllib2.Request(self._DISCLAIMER)
 		try:
 			self.report_disclaimer()
 			disclaimer = urllib2.urlopen(request).read()
@@ -890,14 +1445,14 @@ class MetacafeIE(InfoExtractor):
 			'filters': '0',
 			'submit': "Continue - I'm over 18",
 			}
-		request = urllib2.Request(self._FILTER_POST, urllib.urlencode(disclaimer_form), std_headers)
+		request = urllib2.Request(self._FILTER_POST, urllib.urlencode(disclaimer_form))
 		try:
 			self.report_age_confirmation()
 			disclaimer = urllib2.urlopen(request).read()
 		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
 			self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err))
 			return
-	
+
 	def _real_extract(self, url):
 		# Extract id and simplified title from URL
 		mobj = re.match(self._VALID_URL, url)
@@ -913,8 +1468,10 @@ class MetacafeIE(InfoExtractor):
 			self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % mobj2.group(1))
 			return
 
+		# At this point we have a new video
+		self._downloader.increment_downloads()
+
 		simple_title = mobj.group(2).decode('utf-8')
-		video_extension = 'flv'
 
 		# Retrieve video webpage to extract further information
 		request = urllib2.Request('http://www.metacafe.com/watch/%s/' % video_id)
@@ -928,20 +1485,33 @@ class MetacafeIE(InfoExtractor):
 		# Extract URL, uploader and title from webpage
 		self.report_extraction(video_id)
 		mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract media URL')
-			return
-		mediaURL = urllib.unquote(mobj.group(1))
-
-		#mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
-		#if mobj is None:
-		#	self._downloader.trouble(u'ERROR: unable to extract gdaKey')
-		#	return
-		#gdaKey = mobj.group(1)
-		#
-		#video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
-
-		video_url = mediaURL
+		if mobj is not None:
+			mediaURL = urllib.unquote(mobj.group(1))
+			video_extension = mediaURL[-3:]
+
+			# Extract gdaKey if available
+			mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
+			if mobj is None:
+				video_url = mediaURL
+			else:
+				gdaKey = mobj.group(1)
+				video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
+		else:
+			mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
+			if mobj is None:
+				self._downloader.trouble(u'ERROR: unable to extract media URL')
+				return
+			vardict = parse_qs(mobj.group(1))
+			if 'mediaData' not in vardict:
+				self._downloader.trouble(u'ERROR: unable to extract media URL')
+				return
+			mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0])
+			if mobj is None:
+				self._downloader.trouble(u'ERROR: unable to extract media URL')
+				return
+			mediaURL = mobj.group(1).replace('\\/', '/')
+			video_extension = mediaURL[-3:]
+			video_url = '%s?__gda__=%s' % (mediaURL, mobj.group(2))
 
 		mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
 		if mobj is None:
@@ -962,81 +1532,90 @@ class MetacafeIE(InfoExtractor):
 				'id':		video_id.decode('utf-8'),
 				'url':		video_url.decode('utf-8'),
 				'uploader':	video_uploader.decode('utf-8'),
+				'upload_date':	u'NA',
 				'title':	video_title,
 				'stitle':	simple_title,
 				'ext':		video_extension.decode('utf-8'),
+				'format':	u'NA',
+				'player_url':	None,
 			})
-		except UnavailableFormatError:
-			self._downloader.trouble(u'ERROR: format not available for video')
+		except UnavailableVideoError:
+			self._downloader.trouble(u'\nERROR: unable to download video')
 
 
-class GoogleIE(InfoExtractor):
-	"""Information extractor for video.google.com."""
+class DailymotionIE(InfoExtractor):
+	"""Information Extractor for Dailymotion"""
 
-	_VALID_URL = r'(?:http://)?video\.google\.(?:com(?:\.au)?|co\.(?:uk|jp|kr|cr)|ca|de|es|fr|it|nl|pl)/videoplay\?docid=([^\&]+).*'
+	_VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^_/]+)_([^/]+)'
+	IE_NAME = u'dailymotion'
 
 	def __init__(self, downloader=None):
 		InfoExtractor.__init__(self, downloader)
 
-	@staticmethod
-	def suitable(url):
-		return (re.match(GoogleIE._VALID_URL, url) is not None)
-
 	def report_download_webpage(self, video_id):
 		"""Report webpage download."""
-		self._downloader.to_stdout(u'[video.google] %s: Downloading webpage' % video_id)
+		self._downloader.to_screen(u'[dailymotion] %s: Downloading webpage' % video_id)
 
 	def report_extraction(self, video_id):
 		"""Report information extraction."""
-		self._downloader.to_stdout(u'[video.google] %s: Extracting information' % video_id)
+		self._downloader.to_screen(u'[dailymotion] %s: Extracting information' % video_id)
 
 	def _real_initialize(self):
 		return
 
 	def _real_extract(self, url):
-		# Extract id from URL
+		# Extract id and simplified title from URL
 		mobj = re.match(self._VALID_URL, url)
 		if mobj is None:
-			self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+			self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
 			return
 
+		# At this point we have a new video
+		self._downloader.increment_downloads()
 		video_id = mobj.group(1)
 
-		video_extension = 'mp4'
+		simple_title = mobj.group(2).decode('utf-8')
+		video_extension = 'flv'
 
 		# Retrieve video webpage to extract further information
-		request = urllib2.Request('http://video.google.com/videoplay?docid=%s&hl=en&oe=utf-8' % video_id)
+		request = urllib2.Request(url)
+		request.add_header('Cookie', 'family_filter=off')
 		try:
 			self.report_download_webpage(video_id)
 			webpage = urllib2.urlopen(request).read()
 		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
+			self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err))
 			return
 
-		# Extract URL, uploader, and title from webpage
+		# Extract URL, uploader and title from webpage
 		self.report_extraction(video_id)
-		mobj = re.search(r"download_url:'([^']+)'", webpage)
+		mobj = re.search(r'(?i)addVariable\(\"sequence\"\s*,\s*\"([^\"]+?)\"\)', webpage)
 		if mobj is None:
-			video_extension = 'flv'
-			mobj = re.search(r"(?i)videoUrl\\x3d(.+?)\\x26", webpage)
+			self._downloader.trouble(u'ERROR: unable to extract media URL')
+			return
+		sequence = urllib.unquote(mobj.group(1))
+		mobj = re.search(r',\"sdURL\"\:\"([^\"]+?)\",', sequence)
 		if mobj is None:
 			self._downloader.trouble(u'ERROR: unable to extract media URL')
 			return
-		mediaURL = urllib.unquote(mobj.group(1))
-		mediaURL = mediaURL.replace('\\x3d', '\x3d')
-		mediaURL = mediaURL.replace('\\x26', '\x26')
+		mediaURL = urllib.unquote(mobj.group(1)).replace('\\', '')
+
+		# if needed add http://www.dailymotion.com/ if relative URL
 
 		video_url = mediaURL
 
-		mobj = re.search(r'<title>(.*)</title>', webpage)
+		mobj = re.search(r'(?im)<title>Dailymotion\s*-\s*(.+)\s*-\s*[^<]+?</title>', webpage)
 		if mobj is None:
 			self._downloader.trouble(u'ERROR: unable to extract title')
 			return
 		video_title = mobj.group(1).decode('utf-8')
 		video_title = sanitize_title(video_title)
 
-		# Google Video doesn't show uploader nicknames?
-		video_uploader = 'NA'
+		mobj = re.search(r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a></span>', webpage)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
+			return
+		video_uploader = mobj.group(1)
 
 		try:
 			# Process video information
@@ -1044,33 +1623,140 @@ class GoogleIE(InfoExtractor):
 				'id':		video_id.decode('utf-8'),
 				'url':		video_url.decode('utf-8'),
 				'uploader':	video_uploader.decode('utf-8'),
+				'upload_date':	u'NA',
+				'title':	video_title,
+				'stitle':	simple_title,
+				'ext':		video_extension.decode('utf-8'),
+				'format':	u'NA',
+				'player_url':	None,
+			})
+		except UnavailableVideoError:
+			self._downloader.trouble(u'\nERROR: unable to download video')
+
+
+class GoogleIE(InfoExtractor):
+	"""Information extractor for video.google.com."""
+
+	_VALID_URL = r'(?:http://)?video\.google\.(?:com(?:\.au)?|co\.(?:uk|jp|kr|cr)|ca|de|es|fr|it|nl|pl)/videoplay\?docid=([^\&]+).*'
+	IE_NAME = u'video.google'
+
+	def __init__(self, downloader=None):
+		InfoExtractor.__init__(self, downloader)
+
+	def report_download_webpage(self, video_id):
+		"""Report webpage download."""
+		self._downloader.to_screen(u'[video.google] %s: Downloading webpage' % video_id)
+
+	def report_extraction(self, video_id):
+		"""Report information extraction."""
+		self._downloader.to_screen(u'[video.google] %s: Extracting information' % video_id)
+
+	def _real_initialize(self):
+		return
+
+	def _real_extract(self, url):
+		# Extract id from URL
+		mobj = re.match(self._VALID_URL, url)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+			return
+
+		# At this point we have a new video
+		self._downloader.increment_downloads()
+		video_id = mobj.group(1)
+
+		video_extension = 'mp4'
+
+		# Retrieve video webpage to extract further information
+		request = urllib2.Request('http://video.google.com/videoplay?docid=%s&hl=en&oe=utf-8' % video_id)
+		try:
+			self.report_download_webpage(video_id)
+			webpage = urllib2.urlopen(request).read()
+		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
+			return
+
+		# Extract URL, uploader, and title from webpage
+		self.report_extraction(video_id)
+		mobj = re.search(r"download_url:'([^']+)'", webpage)
+		if mobj is None:
+			video_extension = 'flv'
+			mobj = re.search(r"(?i)videoUrl\\x3d(.+?)\\x26", webpage)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: unable to extract media URL')
+			return
+		mediaURL = urllib.unquote(mobj.group(1))
+		mediaURL = mediaURL.replace('\\x3d', '\x3d')
+		mediaURL = mediaURL.replace('\\x26', '\x26')
+
+		video_url = mediaURL
+
+		mobj = re.search(r'<title>(.*)</title>', webpage)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: unable to extract title')
+			return
+		video_title = mobj.group(1).decode('utf-8')
+		video_title = sanitize_title(video_title)
+		simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
+
+		# Extract video description
+		mobj = re.search(r'<span id=short-desc-content>([^<]*)</span>', webpage)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: unable to extract video description')
+			return
+		video_description = mobj.group(1).decode('utf-8')
+		if not video_description:
+			video_description = 'No description available.'
+
+		# Extract video thumbnail
+		if self._downloader.params.get('forcethumbnail', False):
+			request = urllib2.Request('http://video.google.com/videosearch?q=%s+site:video.google.com&hl=en' % abs(int(video_id)))
+			try:
+				webpage = urllib2.urlopen(request).read()
+			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+				self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
+				return
+			mobj = re.search(r'<img class=thumbnail-img (?:.* )?src=(http.*)>', webpage)
+			if mobj is None:
+				self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
+				return
+			video_thumbnail = mobj.group(1)
+		else:	# we need something to pass to process_info
+			video_thumbnail = ''
+
+		try:
+			# Process video information
+			self._downloader.process_info({
+				'id':		video_id.decode('utf-8'),
+				'url':		video_url.decode('utf-8'),
+				'uploader':	u'NA',
+				'upload_date':	u'NA',
 				'title':	video_title,
-				'stitle':	video_title,
+				'stitle':	simple_title,
 				'ext':		video_extension.decode('utf-8'),
+				'format':	u'NA',
+				'player_url':	None,
 			})
-		except UnavailableFormatError:
-			self._downloader.trouble(u'ERROR: format not available for video')
+		except UnavailableVideoError:
+			self._downloader.trouble(u'\nERROR: unable to download video')
 
 
 class PhotobucketIE(InfoExtractor):
 	"""Information extractor for photobucket.com."""
 
 	_VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)'
+	IE_NAME = u'photobucket'
 
 	def __init__(self, downloader=None):
 		InfoExtractor.__init__(self, downloader)
 
-	@staticmethod
-	def suitable(url):
-		return (re.match(PhotobucketIE._VALID_URL, url) is not None)
-
 	def report_download_webpage(self, video_id):
 		"""Report webpage download."""
-		self._downloader.to_stdout(u'[photobucket] %s: Downloading webpage' % video_id)
+		self._downloader.to_screen(u'[photobucket] %s: Downloading webpage' % video_id)
 
 	def report_extraction(self, video_id):
 		"""Report information extraction."""
-		self._downloader.to_stdout(u'[photobucket] %s: Extracting information' % video_id)
+		self._downloader.to_screen(u'[photobucket] %s: Extracting information' % video_id)
 
 	def _real_initialize(self):
 		return
@@ -1082,6 +1768,8 @@ class PhotobucketIE(InfoExtractor):
 			self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
 			return
 
+		# At this point we have a new video
+		self._downloader.increment_downloads()
 		video_id = mobj.group(1)
 
 		video_extension = 'flv'
@@ -1111,6 +1799,7 @@ class PhotobucketIE(InfoExtractor):
 			return
 		video_title = mobj.group(1).decode('utf-8')
 		video_title = sanitize_title(video_title)
+		simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
 
 		video_uploader = mobj.group(2).decode('utf-8')
 
@@ -1120,37 +1809,322 @@ class PhotobucketIE(InfoExtractor):
 				'id':		video_id.decode('utf-8'),
 				'url':		video_url.decode('utf-8'),
 				'uploader':	video_uploader,
+				'upload_date':	u'NA',
+				'title':	video_title,
+				'stitle':	simple_title,
+				'ext':		video_extension.decode('utf-8'),
+				'format':	u'NA',
+				'player_url':	None,
+			})
+		except UnavailableVideoError:
+			self._downloader.trouble(u'\nERROR: unable to download video')
+
+
+class YahooIE(InfoExtractor):
+	"""Information extractor for video.yahoo.com."""
+
+	# _VALID_URL matches all Yahoo! Video URLs
+	# _VPAGE_URL matches only the extractable '/watch/' URLs
+	_VALID_URL = r'(?:http://)?(?:[a-z]+\.)?video\.yahoo\.com/(?:watch|network)/([0-9]+)(?:/|\?v=)([0-9]+)(?:[#\?].*)?'
+	_VPAGE_URL = r'(?:http://)?video\.yahoo\.com/watch/([0-9]+)/([0-9]+)(?:[#\?].*)?'
+	IE_NAME = u'video.yahoo'
+
+	def __init__(self, downloader=None):
+		InfoExtractor.__init__(self, downloader)
+
+	def report_download_webpage(self, video_id):
+		"""Report webpage download."""
+		self._downloader.to_screen(u'[video.yahoo] %s: Downloading webpage' % video_id)
+
+	def report_extraction(self, video_id):
+		"""Report information extraction."""
+		self._downloader.to_screen(u'[video.yahoo] %s: Extracting information' % video_id)
+
+	def _real_initialize(self):
+		return
+
+	def _real_extract(self, url, new_video=True):
+		# Extract ID from URL
+		mobj = re.match(self._VALID_URL, url)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+			return
+
+		# At this point we have a new video
+		self._downloader.increment_downloads()
+		video_id = mobj.group(2)
+		video_extension = 'flv'
+
+		# Rewrite valid but non-extractable URLs as
+		# extractable English language /watch/ URLs
+		if re.match(self._VPAGE_URL, url) is None:
+			request = urllib2.Request(url)
+			try:
+				webpage = urllib2.urlopen(request).read()
+			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+				self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
+				return
+
+			mobj = re.search(r'\("id", "([0-9]+)"\);', webpage)
+			if mobj is None:
+				self._downloader.trouble(u'ERROR: Unable to extract id field')
+				return
+			yahoo_id = mobj.group(1)
+
+			mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage)
+			if mobj is None:
+				self._downloader.trouble(u'ERROR: Unable to extract vid field')
+				return
+			yahoo_vid = mobj.group(1)
+
+			url = 'http://video.yahoo.com/watch/%s/%s' % (yahoo_vid, yahoo_id)
+			return self._real_extract(url, new_video=False)
+
+		# Retrieve video webpage to extract further information
+		request = urllib2.Request(url)
+		try:
+			self.report_download_webpage(video_id)
+			webpage = urllib2.urlopen(request).read()
+		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
+			return
+
+		# Extract uploader and title from webpage
+		self.report_extraction(video_id)
+		mobj = re.search(r'<meta name="title" content="(.*)" />', webpage)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: unable to extract video title')
+			return
+		video_title = mobj.group(1).decode('utf-8')
+		simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
+
+		mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: unable to extract video uploader')
+			return
+		video_uploader = mobj.group(1).decode('utf-8')
+
+		# Extract video thumbnail
+		mobj = re.search(r'<link rel="image_src" href="(.*)" />', webpage)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
+			return
+		video_thumbnail = mobj.group(1).decode('utf-8')
+
+		# Extract video description
+		mobj = re.search(r'<meta name="description" content="(.*)" />', webpage)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: unable to extract video description')
+			return
+		video_description = mobj.group(1).decode('utf-8')
+		if not video_description:
+			video_description = 'No description available.'
+
+		# Extract video height and width
+		mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: unable to extract video height')
+			return
+		yv_video_height = mobj.group(1)
+
+		mobj = re.search(r'<meta name="video_width" content="([0-9]+)" />', webpage)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: unable to extract video width')
+			return
+		yv_video_width = mobj.group(1)
+
+		# Retrieve video playlist to extract media URL
+		# I'm not completely sure what all these options are, but we
+		# seem to need most of them, otherwise the server sends a 401.
+		yv_lg = 'R0xx6idZnW2zlrKP8xxAIR'  # not sure what this represents
+		yv_bitrate = '700'  # according to Wikipedia this is hard-coded
+		request = urllib2.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id +
+				'&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height +
+				'&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797')
+		try:
+			self.report_download_webpage(video_id)
+			webpage = urllib2.urlopen(request).read()
+		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
+			return
+
+		# Extract media URL from playlist XML
+		mobj = re.search(r'<STREAM APP="(http://.*)" FULLPATH="/?(/.*\.flv\?[^"]*)"', webpage)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: Unable to extract media URL')
+			return
+		video_url = urllib.unquote(mobj.group(1) + mobj.group(2)).decode('utf-8')
+		video_url = re.sub(r'(?u)&(.+?);', htmlentity_transform, video_url)
+
+		try:
+			# Process video information
+			self._downloader.process_info({
+				'id':		video_id.decode('utf-8'),
+				'url':		video_url,
+				'uploader':	video_uploader,
+				'upload_date':	u'NA',
 				'title':	video_title,
-				'stitle':	video_title,
+				'stitle':	simple_title,
 				'ext':		video_extension.decode('utf-8'),
+				'thumbnail':	video_thumbnail.decode('utf-8'),
+				'description':	video_description,
+				'thumbnail':	video_thumbnail,
+				'player_url':	None,
+			})
+		except UnavailableVideoError:
+			self._downloader.trouble(u'\nERROR: unable to download video')
+
+
+class VimeoIE(InfoExtractor):
+	"""Information extractor for vimeo.com."""
+
+	# _VALID_URL matches Vimeo URLs
+	_VALID_URL = r'(?:https?://)?(?:(?:www|player).)?vimeo\.com/(?:groups/[^/]+/)?(?:videos?/)?([0-9]+)'
+	IE_NAME = u'vimeo'
+
+	def __init__(self, downloader=None):
+		InfoExtractor.__init__(self, downloader)
+
+	def report_download_webpage(self, video_id):
+		"""Report webpage download."""
+		self._downloader.to_screen(u'[vimeo] %s: Downloading webpage' % video_id)
+
+	def report_extraction(self, video_id):
+		"""Report information extraction."""
+		self._downloader.to_screen(u'[vimeo] %s: Extracting information' % video_id)
+
+	def _real_initialize(self):
+		return
+
+	def _real_extract(self, url, new_video=True):
+		# Extract ID from URL
+		mobj = re.match(self._VALID_URL, url)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+			return
+
+		# At this point we have a new video
+		self._downloader.increment_downloads()
+		video_id = mobj.group(1)
+
+		# Retrieve video webpage to extract further information
+		request = urllib2.Request("http://vimeo.com/moogaloop/load/clip:%s" % video_id, None, std_headers)
+		try:
+			self.report_download_webpage(video_id)
+			webpage = urllib2.urlopen(request).read()
+		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
+			return
+
+		# Now we begin extracting as much information as we can from what we
+		# retrieved. First we extract the information common to all extractors,
+		# and latter we extract those that are Vimeo specific.
+		self.report_extraction(video_id)
+
+		# Extract title
+		mobj = re.search(r'<caption>(.*?)</caption>', webpage)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: unable to extract video title')
+			return
+		video_title = mobj.group(1).decode('utf-8')
+		simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
+
+		# Extract uploader
+		mobj = re.search(r'<uploader_url>http://vimeo.com/(.*?)</uploader_url>', webpage)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: unable to extract video uploader')
+			return
+		video_uploader = mobj.group(1).decode('utf-8')
+
+		# Extract video thumbnail
+		mobj = re.search(r'<thumbnail>(.*?)</thumbnail>', webpage)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
+			return
+		video_thumbnail = mobj.group(1).decode('utf-8')
+
+		# # Extract video description
+		# mobj = re.search(r'<meta property="og:description" content="(.*)" />', webpage)
+		# if mobj is None:
+		# 	self._downloader.trouble(u'ERROR: unable to extract video description')
+		# 	return
+		# video_description = mobj.group(1).decode('utf-8')
+		# if not video_description: video_description = 'No description available.'
+		video_description = 'Foo.'
+
+		# Vimeo specific: extract request signature
+		mobj = re.search(r'<request_signature>(.*?)</request_signature>', webpage)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: unable to extract request signature')
+			return
+		sig = mobj.group(1).decode('utf-8')
+
+		# Vimeo specific: extract video quality information
+		mobj = re.search(r'<isHD>(\d+)</isHD>', webpage)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: unable to extract video quality information')
+			return
+		quality = mobj.group(1).decode('utf-8')
+
+		if int(quality) == 1:
+			quality = 'hd'
+		else:
+			quality = 'sd'
+
+		# Vimeo specific: Extract request signature expiration
+		mobj = re.search(r'<request_signature_expires>(.*?)</request_signature_expires>', webpage)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: unable to extract request signature expiration')
+			return
+		sig_exp = mobj.group(1).decode('utf-8')
+
+		video_url = "http://vimeo.com/moogaloop/play/clip:%s/%s/%s/?q=%s" % (video_id, sig, sig_exp, quality)
+
+		try:
+			# Process video information
+			self._downloader.process_info({
+				'id':		video_id.decode('utf-8'),
+				'url':		video_url,
+				'uploader':	video_uploader,
+				'upload_date':	u'NA',
+				'title':	video_title,
+				'stitle':	simple_title,
+				'ext':		u'mp4',
+				'thumbnail':	video_thumbnail.decode('utf-8'),
+				'description':	video_description,
+				'thumbnail':	video_thumbnail,
+				'description':	video_description,
+				'player_url':	None,
 			})
-		except UnavailableFormatError:
-			self._downloader.trouble(u'ERROR: format not available for video')
+		except UnavailableVideoError:
+			self._downloader.trouble(u'ERROR: unable to download video')
 
 
 class GenericIE(InfoExtractor):
 	"""Generic last-resort information extractor."""
 
+	_VALID_URL = r'.*'
+	IE_NAME = u'generic'
+
 	def __init__(self, downloader=None):
 		InfoExtractor.__init__(self, downloader)
 
-	@staticmethod
-	def suitable(url):
-		return True
-
 	def report_download_webpage(self, video_id):
 		"""Report webpage download."""
-		self._downloader.to_stdout(u'WARNING: Falling back on generic information extractor.')
-		self._downloader.to_stdout(u'[generic] %s: Downloading webpage' % video_id)
+		self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.')
+		self._downloader.to_screen(u'[generic] %s: Downloading webpage' % video_id)
 
 	def report_extraction(self, video_id):
 		"""Report information extraction."""
-		self._downloader.to_stdout(u'[generic] %s: Extracting information' % video_id)
+		self._downloader.to_screen(u'[generic] %s: Extracting information' % video_id)
 
 	def _real_initialize(self):
 		return
 
 	def _real_extract(self, url):
+		# At this point we have a new video
+		self._downloader.increment_downloads()
+
 		video_id = url.split('/')[-1]
 		request = urllib2.Request(url)
 		try:
@@ -1165,6 +2139,7 @@ class GenericIE(InfoExtractor):
 			self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
 			return
 
+		self.report_extraction(video_id)
 		# Start with something easy: JW Player in SWFObject
 		mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
 		if mobj is None:
@@ -1181,11 +2156,11 @@ class GenericIE(InfoExtractor):
 			return
 
 		video_url = urllib.unquote(mobj.group(1))
-		video_id  = os.path.basename(video_url)
+		video_id = os.path.basename(video_url)
 
 		# here's a fun little line of code for you:
 		video_extension = os.path.splitext(video_id)[1][1:]
-		video_id        = os.path.splitext(video_id)[0]
+		video_id = os.path.splitext(video_id)[0]
 
 		# it's tempting to parse this further, but you would
 		# have to take into account all the variations like
@@ -1199,6 +2174,7 @@ class GenericIE(InfoExtractor):
 			return
 		video_title = mobj.group(1).decode('utf-8')
 		video_title = sanitize_title(video_title)
+		simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
 
 		# video uploader is domain name
 		mobj = re.match(r'(?:https?://)?([^/]*)/.*', url)
@@ -1213,48 +2189,48 @@ class GenericIE(InfoExtractor):
 				'id':		video_id.decode('utf-8'),
 				'url':		video_url.decode('utf-8'),
 				'uploader':	video_uploader,
+				'upload_date':	u'NA',
 				'title':	video_title,
-				'stitle':	video_title,
+				'stitle':	simple_title,
 				'ext':		video_extension.decode('utf-8'),
+				'format':	u'NA',
+				'player_url':	None,
 			})
-		except UnavailableFormatError:
-			self._downloader.trouble(u'ERROR: format not available for video')
+		except UnavailableVideoError, err:
+			self._downloader.trouble(u'\nERROR: unable to download video')
 
 
 class YoutubeSearchIE(InfoExtractor):
 	"""Information Extractor for YouTube search queries."""
-	_VALID_QUERY = r'ytsearch(\d+|all)?:[\s\S]+'
+	_VALID_URL = r'ytsearch(\d+|all)?:[\s\S]+'
 	_TEMPLATE_URL = 'http://www.youtube.com/results?search_query=%s&page=%s&gl=US&hl=en'
 	_VIDEO_INDICATOR = r'href="/watch\?v=.+?"'
 	_MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
 	_youtube_ie = None
 	_max_youtube_results = 1000
+	IE_NAME = u'youtube:search'
 
 	def __init__(self, youtube_ie, downloader=None):
 		InfoExtractor.__init__(self, downloader)
 		self._youtube_ie = youtube_ie
-	
-	@staticmethod
-	def suitable(url):
-		return (re.match(YoutubeSearchIE._VALID_QUERY, url) is not None)
 
 	def report_download_page(self, query, pagenum):
 		"""Report attempt to download playlist page with given number."""
 		query = query.decode(preferredencoding())
-		self._downloader.to_stdout(u'[youtube] query "%s": Downloading page %s' % (query, pagenum))
+		self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum))
 
 	def _real_initialize(self):
 		self._youtube_ie.initialize()
-	
+
 	def _real_extract(self, query):
-		mobj = re.match(self._VALID_QUERY, query)
+		mobj = re.match(self._VALID_URL, query)
 		if mobj is None:
 			self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
 			return
 
 		prefix, query = query.split(':')
 		prefix = prefix[8:]
-		query  = query.encode('utf-8')
+		query = query.encode('utf-8')
 		if prefix == '':
 			self._download_n_results(query, 1)
 			return
@@ -1268,7 +2244,7 @@ class YoutubeSearchIE(InfoExtractor):
 					self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
 					return
 				elif n > self._max_youtube_results:
-					self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)'  % (self._max_youtube_results, n))
+					self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
 					n = self._max_youtube_results
 				self._download_n_results(query, n)
 				return
@@ -1286,7 +2262,7 @@ class YoutubeSearchIE(InfoExtractor):
 		while True:
 			self.report_download_page(query, pagenum)
 			result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum)
-			request = urllib2.Request(result_url, None, std_headers)
+			request = urllib2.Request(result_url)
 			try:
 				page = urllib2.urlopen(request).read()
 			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
@@ -1312,120 +2288,1198 @@ class YoutubeSearchIE(InfoExtractor):
 
 			pagenum = pagenum + 1
 
-class YoutubePlaylistIE(InfoExtractor):
-	"""Information Extractor for YouTube playlists."""
 
-	_VALID_URL = r'(?:http://)?(?:\w+\.)?youtube.com/(?:view_play_list|my_playlists)\?.*?p=([^&]+).*'
-	_TEMPLATE_URL = 'http://www.youtube.com/view_play_list?p=%s&page=%s&gl=US&hl=en'
-	_VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
-	_MORE_PAGES_INDICATOR = r'/view_play_list?p=%s&page=%s'
-	_youtube_ie = None
+class GoogleSearchIE(InfoExtractor):
+	"""Information Extractor for Google Video search queries."""
+	_VALID_URL = r'gvsearch(\d+|all)?:[\s\S]+'
+	_TEMPLATE_URL = 'http://video.google.com/videosearch?q=%s+site:video.google.com&start=%s&hl=en'
+	_VIDEO_INDICATOR = r'videoplay\?docid=([^\&>]+)\&'
+	_MORE_PAGES_INDICATOR = r'<span>Next</span>'
+	_google_ie = None
+	_max_google_results = 1000
+	IE_NAME = u'video.google:search'
 
-	def __init__(self, youtube_ie, downloader=None):
+	def __init__(self, google_ie, downloader=None):
 		InfoExtractor.__init__(self, downloader)
-		self._youtube_ie = youtube_ie
-	
-	@staticmethod
-	def suitable(url):
-		return (re.match(YoutubePlaylistIE._VALID_URL, url) is not None)
+		self._google_ie = google_ie
 
-	def report_download_page(self, playlist_id, pagenum):
+	def report_download_page(self, query, pagenum):
 		"""Report attempt to download playlist page with given number."""
-		self._downloader.to_stdout(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum))
+		query = query.decode(preferredencoding())
+		self._downloader.to_screen(u'[video.google] query "%s": Downloading page %s' % (query, pagenum))
 
 	def _real_initialize(self):
-		self._youtube_ie.initialize()
-	
-	def _real_extract(self, url):
+		self._google_ie.initialize()
+
+	def _real_extract(self, query):
+		mobj = re.match(self._VALID_URL, query)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
+			return
+
+		prefix, query = query.split(':')
+		prefix = prefix[8:]
+		query = query.encode('utf-8')
+		if prefix == '':
+			self._download_n_results(query, 1)
+			return
+		elif prefix == 'all':
+			self._download_n_results(query, self._max_google_results)
+			return
+		else:
+			try:
+				n = long(prefix)
+				if n <= 0:
+					self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
+					return
+				elif n > self._max_google_results:
+					self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
+					n = self._max_google_results
+				self._download_n_results(query, n)
+				return
+			except ValueError: # parsing prefix as integer fails
+				self._download_n_results(query, 1)
+				return
+
+	def _download_n_results(self, query, n):
+		"""Downloads a specified number of results for a query"""
+
+		video_ids = []
+		already_seen = set()
+		pagenum = 1
+
+		while True:
+			self.report_download_page(query, pagenum)
+			result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum)
+			request = urllib2.Request(result_url)
+			try:
+				page = urllib2.urlopen(request).read()
+			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
+				return
+
+			# Extract video identifiers
+			for mobj in re.finditer(self._VIDEO_INDICATOR, page):
+				video_id = mobj.group(1)
+				if video_id not in already_seen:
+					video_ids.append(video_id)
+					already_seen.add(video_id)
+					if len(video_ids) == n:
+						# Specified n videos reached
+						for id in video_ids:
+							self._google_ie.extract('http://video.google.com/videoplay?docid=%s' % id)
+						return
+
+			if re.search(self._MORE_PAGES_INDICATOR, page) is None:
+				for id in video_ids:
+					self._google_ie.extract('http://video.google.com/videoplay?docid=%s' % id)
+				return
+
+			pagenum = pagenum + 1
+
+
+class YahooSearchIE(InfoExtractor):
+	"""Information Extractor for Yahoo! Video search queries."""
+	_VALID_URL = r'yvsearch(\d+|all)?:[\s\S]+'
+	_TEMPLATE_URL = 'http://video.yahoo.com/search/?p=%s&o=%s'
+	_VIDEO_INDICATOR = r'href="http://video\.yahoo\.com/watch/([0-9]+/[0-9]+)"'
+	_MORE_PAGES_INDICATOR = r'\s*Next'
+	_yahoo_ie = None
+	_max_yahoo_results = 1000
+	IE_NAME = u'video.yahoo:search'
+
+	def __init__(self, yahoo_ie, downloader=None):
+		InfoExtractor.__init__(self, downloader)
+		self._yahoo_ie = yahoo_ie
+
+	def report_download_page(self, query, pagenum):
+		"""Report attempt to download playlist page with given number."""
+		query = query.decode(preferredencoding())
+		self._downloader.to_screen(u'[video.yahoo] query "%s": Downloading page %s' % (query, pagenum))
+
+	def _real_initialize(self):
+		self._yahoo_ie.initialize()
+
+	def _real_extract(self, query):
+		mobj = re.match(self._VALID_URL, query)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
+			return
+
+		prefix, query = query.split(':')
+		prefix = prefix[8:]
+		query = query.encode('utf-8')
+		if prefix == '':
+			self._download_n_results(query, 1)
+			return
+		elif prefix == 'all':
+			self._download_n_results(query, self._max_yahoo_results)
+			return
+		else:
+			try:
+				n = long(prefix)
+				if n <= 0:
+					self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
+					return
+				elif n > self._max_yahoo_results:
+					self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
+					n = self._max_yahoo_results
+				self._download_n_results(query, n)
+				return
+			except ValueError: # parsing prefix as integer fails
+				self._download_n_results(query, 1)
+				return
+
+	def _download_n_results(self, query, n):
+		"""Downloads a specified number of results for a query"""
+
+		video_ids = []
+		already_seen = set()
+		pagenum = 1
+
+		while True:
+			self.report_download_page(query, pagenum)
+			result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum)
+			request = urllib2.Request(result_url)
+			try:
+				page = urllib2.urlopen(request).read()
+			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
+				return
+
+			# Extract video identifiers
+			for mobj in re.finditer(self._VIDEO_INDICATOR, page):
+				video_id = mobj.group(1)
+				if video_id not in already_seen:
+					video_ids.append(video_id)
+					already_seen.add(video_id)
+					if len(video_ids) == n:
+						# Specified n videos reached
+						for id in video_ids:
+							self._yahoo_ie.extract('http://video.yahoo.com/watch/%s' % id)
+						return
+
+			if re.search(self._MORE_PAGES_INDICATOR, page) is None:
+				for id in video_ids:
+					self._yahoo_ie.extract('http://video.yahoo.com/watch/%s' % id)
+				return
+
+			pagenum = pagenum + 1
+
+
+class YoutubePlaylistIE(InfoExtractor):
+	"""Information Extractor for YouTube playlists."""
+
+	_VALID_URL = r'(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL)?([0-9A-Za-z]+)(?:/.*?/([0-9A-Za-z_-]+))?.*'
+	_TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
+	_VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
+	_MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
+	_youtube_ie = None
+	IE_NAME = u'youtube:playlist'
+
+	def __init__(self, youtube_ie, downloader=None):
+		InfoExtractor.__init__(self, downloader)
+		self._youtube_ie = youtube_ie
+
+	def report_download_page(self, playlist_id, pagenum):
+		"""Report attempt to download playlist page with given number."""
+		self._downloader.to_screen(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum))
+
+	def _real_initialize(self):
+		self._youtube_ie.initialize()
+
+	def _real_extract(self, url):
 		# Extract playlist id
 		mobj = re.match(self._VALID_URL, url)
 		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+			self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+			return
+
+		# Single video case
+		if mobj.group(3) is not None:
+			self._youtube_ie.extract(mobj.group(3))
+			return
+
+		# Download playlist pages
+		# prefix is 'p' as default for playlists but there are other types that need extra care
+		playlist_prefix = mobj.group(1)
+		if playlist_prefix == 'a':
+			playlist_access = 'artist'
+		else:
+			playlist_prefix = 'p'
+			playlist_access = 'view_play_list'
+		playlist_id = mobj.group(2)
+		video_ids = []
+		pagenum = 1
+
+		while True:
+			self.report_download_page(playlist_id, pagenum)
+			request = urllib2.Request(self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum))
+			try:
+				page = urllib2.urlopen(request).read()
+			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
+				return
+
+			# Extract video identifiers
+			ids_in_page = []
+			for mobj in re.finditer(self._VIDEO_INDICATOR, page):
+				if mobj.group(1) not in ids_in_page:
+					ids_in_page.append(mobj.group(1))
+			video_ids.extend(ids_in_page)
+
+			if re.search(self._MORE_PAGES_INDICATOR, page) is None:
+				break
+			pagenum = pagenum + 1
+
+		playliststart = self._downloader.params.get('playliststart', 1) - 1
+		playlistend = self._downloader.params.get('playlistend', -1)
+		video_ids = video_ids[playliststart:playlistend]
+
+		for id in video_ids:
+			self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
+		return
+
+
+class YoutubeUserIE(InfoExtractor):
+	"""Information Extractor for YouTube users."""
+
+	_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/user/)|ytuser:)([A-Za-z0-9_-]+)'
+	_TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s'
+	_GDATA_PAGE_SIZE = 50
+	_GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d'
+	_VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
+	_youtube_ie = None
+	IE_NAME = u'youtube:user'
+
+	def __init__(self, youtube_ie, downloader=None):
+		InfoExtractor.__init__(self, downloader)
+		self._youtube_ie = youtube_ie
+
+	def report_download_page(self, username, start_index):
+		"""Report attempt to download user page."""
+		self._downloader.to_screen(u'[youtube] user %s: Downloading video ids from %d to %d' %
+				(username, start_index, start_index + self._GDATA_PAGE_SIZE))
+
+	def _real_initialize(self):
+		self._youtube_ie.initialize()
+
+	def _real_extract(self, url):
+		# Extract username
+		mobj = re.match(self._VALID_URL, url)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+			return
+
+		username = mobj.group(1)
+
+		# Download video ids using YouTube Data API. Result size per
+		# query is limited (currently to 50 videos) so we need to query
+		# page by page until there are no video ids - it means we got
+		# all of them.
+
+		video_ids = []
+		pagenum = 0
+
+		while True:
+			start_index = pagenum * self._GDATA_PAGE_SIZE + 1
+			self.report_download_page(username, start_index)
+
+			request = urllib2.Request(self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index))
+
+			try:
+				page = urllib2.urlopen(request).read()
+			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
+				return
+
+			# Extract video identifiers
+			ids_in_page = []
+
+			for mobj in re.finditer(self._VIDEO_INDICATOR, page):
+				if mobj.group(1) not in ids_in_page:
+					ids_in_page.append(mobj.group(1))
+
+			video_ids.extend(ids_in_page)
+
+			# A little optimization - if current page is not
+			# "full", ie. does not contain PAGE_SIZE video ids then
+			# we can assume that this page is the last one - there
+			# are no more ids on further pages - no need to query
+			# again.
+
+			if len(ids_in_page) < self._GDATA_PAGE_SIZE:
+				break
+
+			pagenum += 1
+
+		all_ids_count = len(video_ids)
+		playliststart = self._downloader.params.get('playliststart', 1) - 1
+		playlistend = self._downloader.params.get('playlistend', -1)
+
+		if playlistend == -1:
+			video_ids = video_ids[playliststart:]
+		else:
+			video_ids = video_ids[playliststart:playlistend]
+
+		self._downloader.to_screen("[youtube] user %s: Collected %d video ids (downloading %d of them)" %
+				(username, all_ids_count, len(video_ids)))
+
+		for video_id in video_ids:
+			self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % video_id)
+
+
+class DepositFilesIE(InfoExtractor):
+	"""Information extractor for depositfiles.com"""
+
+	_VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles\.com/(?:../(?#locale))?files/(.+)'
+	IE_NAME = u'DepositFiles'
+
+	def __init__(self, downloader=None):
+		InfoExtractor.__init__(self, downloader)
+
+	def report_download_webpage(self, file_id):
+		"""Report webpage download."""
+		self._downloader.to_screen(u'[DepositFiles] %s: Downloading webpage' % file_id)
+
+	def report_extraction(self, file_id):
+		"""Report information extraction."""
+		self._downloader.to_screen(u'[DepositFiles] %s: Extracting information' % file_id)
+
+	def _real_initialize(self):
+		return
+
+	def _real_extract(self, url):
+		# At this point we have a new file
+		self._downloader.increment_downloads()
+
+		file_id = url.split('/')[-1]
+		# Rebuild url in english locale
+		url = 'http://depositfiles.com/en/files/' + file_id
+
+		# Retrieve file webpage with 'Free download' button pressed
+		free_download_indication = { 'gateway_result' : '1' }
+		request = urllib2.Request(url, urllib.urlencode(free_download_indication))
+		try:
+			self.report_download_webpage(file_id)
+			webpage = urllib2.urlopen(request).read()
+		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+			self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % str(err))
+			return
+
+		# Search for the real file URL
+		mobj = re.search(r'<form action="(http://fileshare.+?)"', webpage)
+		if (mobj is None) or (mobj.group(1) is None):
+			# Try to figure out reason of the error.
+			mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL)
+			if (mobj is not None) and (mobj.group(1) is not None):
+				restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip()
+				self._downloader.trouble(u'ERROR: %s' % restriction_message)
+			else:
+				self._downloader.trouble(u'ERROR: unable to extract download URL from: %s' % url)
+			return
+
+		file_url = mobj.group(1)
+		file_extension = os.path.splitext(file_url)[1][1:]
+
+		# Search for file title
+		mobj = re.search(r'<b title="(.*?)">', webpage)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: unable to extract title')
+			return
+		file_title = mobj.group(1).decode('utf-8')
+
+		try:
+			# Process file information
+			self._downloader.process_info({
+				'id':		file_id.decode('utf-8'),
+				'url':		file_url.decode('utf-8'),
+				'uploader':	u'NA',
+				'upload_date':	u'NA',
+				'title':	file_title,
+				'stitle':	file_title,
+				'ext':		file_extension.decode('utf-8'),
+				'format':	u'NA',
+				'player_url':	None,
+			})
+		except UnavailableVideoError, err:
+			self._downloader.trouble(u'ERROR: unable to download file')
+
+
+class FacebookIE(InfoExtractor):
+	"""Information Extractor for Facebook"""
+
+	_VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/video/video\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)'
+	_LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&'
+	_NETRC_MACHINE = 'facebook'
+	_available_formats = ['highqual', 'lowqual']
+	_video_extensions = {
+		'highqual': 'mp4',
+		'lowqual': 'mp4',
+	}
+	IE_NAME = u'facebook'
+
+	def __init__(self, downloader=None):
+		InfoExtractor.__init__(self, downloader)
+
+	def _reporter(self, message):
+		"""Add header and report message."""
+		self._downloader.to_screen(u'[facebook] %s' % message)
+
+	def report_login(self):
+		"""Report attempt to log in."""
+		self._reporter(u'Logging in')
+
+	def report_video_webpage_download(self, video_id):
+		"""Report attempt to download video webpage."""
+		self._reporter(u'%s: Downloading video webpage' % video_id)
+
+	def report_information_extraction(self, video_id):
+		"""Report attempt to extract video information."""
+		self._reporter(u'%s: Extracting video information' % video_id)
+
+	def _parse_page(self, video_webpage):
+		"""Extract video information from page"""
+		# General data
+		data = {'title': r'class="video_title datawrap">(.*?)</',
+			'description': r'<div class="datawrap">(.*?)</div>',
+			'owner': r'\("video_owner_name", "(.*?)"\)',
+			'upload_date': r'data-date="(.*?)"',
+			'thumbnail':  r'\("thumb_url", "(?P<THUMB>.*?)"\)',
+			}
+		video_info = {}
+		for piece in data.keys():
+			mobj = re.search(data[piece], video_webpage)
+			if mobj is not None:
+				video_info[piece] = urllib.unquote_plus(mobj.group(1).decode("unicode_escape"))
+
+		# Video urls
+		video_urls = {}
+		for fmt in self._available_formats:
+			mobj = re.search(r'\("%s_src\", "(.+?)"\)' % fmt, video_webpage)
+			if mobj is not None:
+				# URL is in a Javascript segment inside an escaped Unicode format within
+				# the generally utf-8 page
+				video_urls[fmt] = urllib.unquote_plus(mobj.group(1).decode("unicode_escape"))
+		video_info['video_urls'] = video_urls
+
+		return video_info
+
+	def _real_initialize(self):
+		if self._downloader is None:
+			return
+
+		useremail = None
+		password = None
+		downloader_params = self._downloader.params
+
+		# Attempt to use provided username and password or .netrc data
+		if downloader_params.get('username', None) is not None:
+			useremail = downloader_params['username']
+			password = downloader_params['password']
+		elif downloader_params.get('usenetrc', False):
+			try:
+				info = netrc.netrc().authenticators(self._NETRC_MACHINE)
+				if info is not None:
+					useremail = info[0]
+					password = info[2]
+				else:
+					raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
+			except (IOError, netrc.NetrcParseError), err:
+				self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % str(err))
+				return
+
+		if useremail is None:
+			return
+
+		# Log in
+		login_form = {
+			'email': useremail,
+			'pass': password,
+			'login': 'Log+In'
+			}
+		request = urllib2.Request(self._LOGIN_URL, urllib.urlencode(login_form))
+		try:
+			self.report_login()
+			login_results = urllib2.urlopen(request).read()
+			if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
+				self._downloader.to_stderr(u'WARNING: unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
+				return
+		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+			self._downloader.to_stderr(u'WARNING: unable to log in: %s' % str(err))
+			return
+
+	def _real_extract(self, url):
+		mobj = re.match(self._VALID_URL, url)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+			return
+		video_id = mobj.group('ID')
+
+		# Get video webpage
+		self.report_video_webpage_download(video_id)
+		request = urllib2.Request('https://www.facebook.com/video/video.php?v=%s' % video_id)
+		try:
+			page = urllib2.urlopen(request)
+			video_webpage = page.read()
+		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
+			return
+
+		# Start extracting information
+		self.report_information_extraction(video_id)
+
+		# Extract information
+		video_info = self._parse_page(video_webpage)
+
+		# uploader
+		if 'owner' not in video_info:
+			self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
+			return
+		video_uploader = video_info['owner']
+
+		# title
+		if 'title' not in video_info:
+			self._downloader.trouble(u'ERROR: unable to extract video title')
+			return
+		video_title = video_info['title']
+		video_title = video_title.decode('utf-8')
+		video_title = sanitize_title(video_title)
+
+		# simplified title
+		simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
+		simple_title = simple_title.strip(ur'_')
+
+		# thumbnail image
+		if 'thumbnail' not in video_info:
+			self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
+			video_thumbnail = ''
+		else:
+			video_thumbnail = video_info['thumbnail']
+
+		# upload date
+		upload_date = u'NA'
+		if 'upload_date' in video_info:
+			upload_time = video_info['upload_date']
+			timetuple = email.utils.parsedate_tz(upload_time)
+			if timetuple is not None:
+				try:
+					upload_date = time.strftime('%Y%m%d', timetuple[0:9])
+				except:
+					pass
+
+		# description
+		video_description = video_info.get('description', 'No description available.')
+
+		url_map = video_info['video_urls']
+		if len(url_map.keys()) > 0:
+			# Decide which formats to download
+			req_format = self._downloader.params.get('format', None)
+			format_limit = self._downloader.params.get('format_limit', None)
+
+			if format_limit is not None and format_limit in self._available_formats:
+				format_list = self._available_formats[self._available_formats.index(format_limit):]
+			else:
+				format_list = self._available_formats
+			existing_formats = [x for x in format_list if x in url_map]
+			if len(existing_formats) == 0:
+				self._downloader.trouble(u'ERROR: no known formats available for video')
+				return
+			if req_format is None:
+				video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
+			elif req_format == 'worst':
+				video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality
+			elif req_format == '-1':
+				video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
+			else:
+				# Specific format
+				if req_format not in url_map:
+					self._downloader.trouble(u'ERROR: requested format not available')
+					return
+				video_url_list = [(req_format, url_map[req_format])] # Specific format
+
+		for format_param, video_real_url in video_url_list:
+
+			# At this point we have a new video
+			self._downloader.increment_downloads()
+
+			# Extension
+			video_extension = self._video_extensions.get(format_param, 'mp4')
+
+			try:
+				# Process video information
+				self._downloader.process_info({
+					'id':		video_id.decode('utf-8'),
+					'url':		video_real_url.decode('utf-8'),
+					'uploader':	video_uploader.decode('utf-8'),
+					'upload_date':	upload_date,
+					'title':	video_title,
+					'stitle':	simple_title,
+					'ext':		video_extension.decode('utf-8'),
+					'format':	(format_param is None and u'NA' or format_param.decode('utf-8')),
+					'thumbnail':	video_thumbnail.decode('utf-8'),
+					'description':	video_description.decode('utf-8'),
+					'player_url':	None,
+				})
+			except UnavailableVideoError, err:
+				self._downloader.trouble(u'\nERROR: unable to download video')
+
+class BlipTVIE(InfoExtractor):
+	"""Information extractor for blip.tv"""
+
+	_VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv(/.+)$'
+	_URL_EXT = r'^.*\.([a-z0-9]+)$'
+	IE_NAME = u'blip.tv'
+
+	def report_extraction(self, file_id):
+		"""Report information extraction."""
+		self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id))
+
+	def report_direct_download(self, title):
+		"""Report information extraction."""
+		self._downloader.to_screen(u'[%s] %s: Direct download detected' % (self.IE_NAME, title))
+
+	def _simplify_title(self, title):
+		res = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', title)
+		res = res.strip(ur'_')
+		return res
+
+	def _real_extract(self, url):
+		mobj = re.match(self._VALID_URL, url)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+			return
+
+		if '?' in url:
+			cchar = '&'
+		else:
+			cchar = '?'
+		json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
+		request = urllib2.Request(json_url)
+		self.report_extraction(mobj.group(1))
+		info = None
+		try:
+			urlh = urllib2.urlopen(request)
+			if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download
+				basename = url.split('/')[-1]
+				title,ext = os.path.splitext(basename)
+				ext = ext.replace('.', '')
+				self.report_direct_download(title)
+				info = {
+					'id': title,
+					'url': url,
+					'title': title,
+					'stitle': self._simplify_title(title),
+					'ext': ext,
+					'urlhandle': urlh
+				}
+		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+			self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err))
+			return
+		if info is None: # Regular URL
+			try:
+				json_code = urlh.read()
+			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+				self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % str(err))
+				return
+
+			try:
+				json_data = json.loads(json_code)
+				if 'Post' in json_data:
+					data = json_data['Post']
+				else:
+					data = json_data
+	
+				upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
+				video_url = data['media']['url']
+				umobj = re.match(self._URL_EXT, video_url)
+				if umobj is None:
+					raise ValueError('Can not determine filename extension')
+				ext = umobj.group(1)
+	
+				info = {
+					'id': data['item_id'],
+					'url': video_url,
+					'uploader': data['display_name'],
+					'upload_date': upload_date,
+					'title': data['title'],
+					'stitle': self._simplify_title(data['title']),
+					'ext': ext,
+					'format': data['media']['mimeType'],
+					'thumbnail': data['thumbnailUrl'],
+					'description': data['description'],
+					'player_url': data['embedUrl']
+				}
+			except (ValueError,KeyError), err:
+				self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err))
+				return
+
+		self._downloader.increment_downloads()
+
+		try:
+			self._downloader.process_info(info)
+		except UnavailableVideoError, err:
+			self._downloader.trouble(u'\nERROR: unable to download video')
+
+
+class MyVideoIE(InfoExtractor):
+	"""Information Extractor for myvideo.de."""
+
+	_VALID_URL = r'(?:http://)?(?:www\.)?myvideo\.de/watch/([0-9]+)/([^?/]+).*'
+	IE_NAME = u'myvideo'
+
+	def __init__(self, downloader=None):
+		InfoExtractor.__init__(self, downloader)
+	
+	def report_download_webpage(self, video_id):
+		"""Report webpage download."""
+		self._downloader.to_screen(u'[myvideo] %s: Downloading webpage' % video_id)
+
+	def report_extraction(self, video_id):
+		"""Report information extraction."""
+		self._downloader.to_screen(u'[myvideo] %s: Extracting information' % video_id)
+
+	def _real_initialize(self):
+		return
+
+	def _real_extract(self,url):
+		mobj = re.match(self._VALID_URL, url)
+		if mobj is None:
+			self._download.trouble(u'ERROR: invalid URL: %s' % url)
+			return
+
+		video_id = mobj.group(1)
+		simple_title = mobj.group(2).decode('utf-8')
+		# should actually not be necessary
+		simple_title = sanitize_title(simple_title)
+		simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', simple_title)
+
+		# Get video webpage
+		request = urllib2.Request('http://www.myvideo.de/watch/%s' % video_id)
+		try:
+			self.report_download_webpage(video_id)
+			webpage = urllib2.urlopen(request).read()
+		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
+			return
+
+		self.report_extraction(video_id)
+		mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/[^.]+\.jpg\' />',
+				 webpage)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: unable to extract media URL')
+			return
+		video_url = mobj.group(1) + ('/%s.flv' % video_id)
+
+		mobj = re.search('<title>([^<]+)</title>', webpage)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: unable to extract title')
+			return
+
+		video_title = mobj.group(1)
+		video_title = sanitize_title(video_title)
+
+		try:
+			self._downloader.process_info({
+				'id':		video_id,
+				'url':		video_url,
+				'uploader':	u'NA',
+				'upload_date':  u'NA',
+				'title':	video_title,
+				'stitle':	simple_title,
+				'ext':		u'flv',
+				'format':	u'NA',
+				'player_url':	None,
+			})
+		except UnavailableVideoError:
+			self._downloader.trouble(u'\nERROR: Unable to download video')
+
+class ComedyCentralIE(InfoExtractor):
+	"""Information extractor for The Daily Show and Colbert Report """
+
+	_VALID_URL = r'^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport))|(https?://)?(www\.)?(?P<showname>thedailyshow|colbertnation)\.com/full-episodes/(?P<episode>.*)$'
+	IE_NAME = u'comedycentral'
+
+	def report_extraction(self, episode_id):
+		self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id)
+	
+	def report_config_download(self, episode_id):
+		self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration' % episode_id)
+
+	def report_index_download(self, episode_id):
+		self._downloader.to_screen(u'[comedycentral] %s: Downloading show index' % episode_id)
+
+	def report_player_url(self, episode_id):
+		self._downloader.to_screen(u'[comedycentral] %s: Determining player URL' % episode_id)
+
+	def _simplify_title(self, title):
+		res = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', title)
+		res = res.strip(ur'_')
+		return res
+
+	def _real_extract(self, url):
+		mobj = re.match(self._VALID_URL, url)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
 			return
 
-		# Download playlist pages
-		playlist_id = mobj.group(1)
-		video_ids = []
-		pagenum = 1
+		if mobj.group('shortname'):
+			if mobj.group('shortname') in ('tds', 'thedailyshow'):
+				url = 'http://www.thedailyshow.com/full-episodes/'
+			else:
+				url = 'http://www.colbertnation.com/full-episodes/'
+			mobj = re.match(self._VALID_URL, url)
+			assert mobj is not None
 
-		while True:
-			self.report_download_page(playlist_id, pagenum)
-			request = urllib2.Request(self._TEMPLATE_URL % (playlist_id, pagenum), None, std_headers)
+		dlNewest = not mobj.group('episode')
+		if dlNewest:
+			epTitle = mobj.group('showname')
+		else:
+			epTitle = mobj.group('episode')
+
+		req = urllib2.Request(url)
+		self.report_extraction(epTitle)
+		try:
+			htmlHandle = urllib2.urlopen(req)
+			html = htmlHandle.read()
+		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+			self._downloader.trouble(u'ERROR: unable to download webpage: %s' % unicode(err))
+			return
+		if dlNewest:
+			url = htmlHandle.geturl()
+			mobj = re.match(self._VALID_URL, url)
+			if mobj is None:
+				self._downloader.trouble(u'ERROR: Invalid redirected URL: ' + url)
+				return
+			if mobj.group('episode') == '':
+				self._downloader.trouble(u'ERROR: Redirected URL is still not specific: ' + url)
+				return
+			epTitle = mobj.group('episode')
+
+		mMovieParams = re.findall('<param name="movie" value="(http://media.mtvnservices.com/([^"]*episode.*?:.*?))"/>', html)
+		if len(mMovieParams) == 0:
+			self._downloader.trouble(u'ERROR: unable to find Flash URL in webpage ' + url)
+			return
+
+		playerUrl_raw = mMovieParams[0][0]
+		self.report_player_url(epTitle)
+		try:
+			urlHandle = urllib2.urlopen(playerUrl_raw)
+			playerUrl = urlHandle.geturl()
+		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+			self._downloader.trouble(u'ERROR: unable to find out player URL: ' + unicode(err))
+			return
+
+		uri = mMovieParams[0][1]
+		indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + urllib.urlencode({'uri': uri})
+		self.report_index_download(epTitle)
+		try:
+			indexXml = urllib2.urlopen(indexUrl).read()
+		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+			self._downloader.trouble(u'ERROR: unable to download episode index: ' + unicode(err))
+			return
+
+		idoc = xml.etree.ElementTree.fromstring(indexXml)
+		itemEls = idoc.findall('.//item')
+		for itemEl in itemEls:
+			mediaId = itemEl.findall('./guid')[0].text
+			shortMediaId = mediaId.split(':')[-1]
+			showId = mediaId.split(':')[-2].replace('.com', '')
+			officialTitle = itemEl.findall('./title')[0].text
+			officialDate = itemEl.findall('./pubDate')[0].text
+
+			configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' +
+						urllib.urlencode({'uri': mediaId}))
+			configReq = urllib2.Request(configUrl)
+			self.report_config_download(epTitle)
 			try:
-				page = urllib2.urlopen(request).read()
+				configXml = urllib2.urlopen(configReq).read()
 			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
+				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % unicode(err))
 				return
 
-			# Extract video identifiers
-			ids_in_page = []
-			for mobj in re.finditer(self._VIDEO_INDICATOR, page):
-				if mobj.group(1) not in ids_in_page:
-					ids_in_page.append(mobj.group(1))
-			video_ids.extend(ids_in_page)
+			cdoc = xml.etree.ElementTree.fromstring(configXml)
+			turls = []
+			for rendition in cdoc.findall('.//rendition'):
+				finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text)
+				turls.append(finfo)
+
+			if len(turls) == 0:
+				self._downloader.trouble(u'\nERROR: unable to download ' + mediaId + ': No videos found')
+				continue
+
+			# For now, just pick the highest bitrate
+			format,video_url = turls[-1]
+
+			self._downloader.increment_downloads()
+
+			effTitle = showId + '-' + epTitle
+			info = {
+				'id': shortMediaId,
+				'url': video_url,
+				'uploader': showId,
+				'upload_date': officialDate,
+				'title': effTitle,
+				'stitle': self._simplify_title(effTitle),
+				'ext': 'mp4',
+				'format': format,
+				'thumbnail': None,
+				'description': officialTitle,
+				'player_url': playerUrl
+			}
 
-			if (self._MORE_PAGES_INDICATOR % (playlist_id.upper(), pagenum + 1)) not in page:
-				break
-			pagenum = pagenum + 1
+			try:
+				self._downloader.process_info(info)
+			except UnavailableVideoError, err:
+				self._downloader.trouble(u'\nERROR: unable to download ' + mediaId)
+				continue
 
-		for id in video_ids:
-			self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
-		return
 
-class YoutubeUserIE(InfoExtractor):
-	"""Information Extractor for YouTube users."""
+class EscapistIE(InfoExtractor):
+	"""Information extractor for The Escapist """
 
-	_VALID_URL = r'(?:http://)?(?:\w+\.)?youtube.com/user/(.*)'
-	_TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s'
-	_VIDEO_INDICATOR = r'http://gdata.youtube.com/feeds/api/videos/(.*)' # XXX Fix this.
-	_youtube_ie = None
+	_VALID_URL = r'^(https?://)?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<episode>[^/?]+)[/?]?.*$'
+	IE_NAME = u'escapist'
 
-	def __init__(self, youtube_ie, downloader=None):
-		InfoExtractor.__init__(self, downloader)
-		self._youtube_ie = youtube_ie
-	
-	@staticmethod
-	def suitable(url):
-		return (re.match(YoutubeUserIE._VALID_URL, url) is not None)
+	def report_extraction(self, showName):
+		self._downloader.to_screen(u'[escapist] %s: Extracting information' % showName)
 
-	def report_download_page(self, username):
-		"""Report attempt to download user page."""
-		self._downloader.to_stdout(u'[youtube] user %s: Downloading page ' % (username))
+	def report_config_download(self, showName):
+		self._downloader.to_screen(u'[escapist] %s: Downloading configuration' % showName)
+
+	def _simplify_title(self, title):
+		res = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', title)
+		res = res.strip(ur'_')
+		return res
 
-	def _real_initialize(self):
-		self._youtube_ie.initialize()
-	
 	def _real_extract(self, url):
-		# Extract username
+		htmlParser = HTMLParser.HTMLParser()
+
 		mobj = re.match(self._VALID_URL, url)
 		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+			self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
 			return
+		showName = mobj.group('showname')
+		videoId = mobj.group('episode')
 
-		# Download user page
-		username = mobj.group(1)
-		video_ids = []
-		pagenum = 1
+		self.report_extraction(showName)
+		try:
+			webPage = urllib2.urlopen(url).read()
+		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+			self._downloader.trouble(u'ERROR: unable to download webpage: ' + unicode(err))
+			return
 
-		self.report_download_page(username)
-		request = urllib2.Request(self._TEMPLATE_URL % (username), None, std_headers)
+		descMatch = re.search('<meta name="description" content="([^"]*)"', webPage)
+		description = htmlParser.unescape(descMatch.group(1))
+		imgMatch = re.search('<meta property="og:image" content="([^"]*)"', webPage)
+		imgUrl = htmlParser.unescape(imgMatch.group(1))
+		playerUrlMatch = re.search('<meta property="og:video" content="([^"]*)"', webPage)
+		playerUrl = htmlParser.unescape(playerUrlMatch.group(1))
+		configUrlMatch = re.search('config=(.*)$', playerUrl)
+		configUrl = urllib2.unquote(configUrlMatch.group(1))
+
+		self.report_config_download(showName)
 		try:
-			page = urllib2.urlopen(request).read()
+			configJSON = urllib2.urlopen(configUrl).read()
 		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
+			self._downloader.trouble(u'ERROR: unable to download configuration: ' + unicode(err))
 			return
 
-		# Extract video identifiers
-		ids_in_page = []
+		# Technically, it's JavaScript, not JSON
+		configJSON = configJSON.replace("'", '"')
 
-		for mobj in re.finditer(self._VIDEO_INDICATOR, page):
-			if mobj.group(1) not in ids_in_page:
-				ids_in_page.append(mobj.group(1))
-		video_ids.extend(ids_in_page)
+		try:
+			config = json.loads(configJSON)
+		except (ValueError,), err:
+			self._downloader.trouble(u'ERROR: Invalid JSON in configuration file: ' + unicode(err))
+			return
+
+		playlist = config['playlist']
+		videoUrl = playlist[1]['url']
+
+		self._downloader.increment_downloads()
+		info = {
+			'id': videoId,
+			'url': videoUrl,
+			'uploader': showName,
+			'upload_date': None,
+			'title': showName,
+			'stitle': self._simplify_title(showName),
+			'ext': 'flv',
+			'format': 'flv',
+			'thumbnail': imgUrl,
+			'description': description,
+			'player_url': playerUrl,
+		}
+
+		try:
+			self._downloader.process_info(info)
+		except UnavailableVideoError, err:
+			self._downloader.trouble(u'\nERROR: unable to download ' + videoId)
+
+
+class CollegeHumorIE(InfoExtractor):
+	"""Information extractor for collegehumor.com"""
+
+	_VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/video/(?P<videoid>[0-9]+)/(?P<shorttitle>.*)$'
+	IE_NAME = u'collegehumor'
+
+	def report_webpage(self, video_id):
+		"""Report information extraction."""
+		self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
+
+	def report_extraction(self, video_id):
+		"""Report information extraction."""
+		self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
+
+	def _simplify_title(self, title):
+		res = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', title)
+		res = res.strip(ur'_')
+		return res
+
+	def _real_extract(self, url):
+		htmlParser = HTMLParser.HTMLParser()
+
+		mobj = re.match(self._VALID_URL, url)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+			return
+		video_id = mobj.group('videoid')
+
+		self.report_webpage(video_id)
+		request = urllib2.Request(url)
+		try:
+			webpage = urllib2.urlopen(request).read()
+		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
+			return
+
+		m = re.search(r'id="video:(?P<internalvideoid>[0-9]+)"', webpage)
+		if m is None:
+			self._downloader.trouble(u'ERROR: Cannot extract internal video ID')
+			return
+		internal_video_id = m.group('internalvideoid')
+
+		info = {
+			'id': video_id,
+			'internal_id': internal_video_id,
+		}
+
+		self.report_extraction(video_id)
+		xmlUrl = 'http://www.collegehumor.com/moogaloop/video:' + internal_video_id
+		try:
+			metaXml = urllib2.urlopen(xmlUrl).read()
+		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+			self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % str(err))
+			return
+
+		mdoc = xml.etree.ElementTree.fromstring(metaXml)
+		try:
+			videoNode = mdoc.findall('./video')[0]
+			info['description'] = videoNode.findall('./description')[0].text
+			info['title'] = videoNode.findall('./caption')[0].text
+			info['stitle'] = self._simplify_title(info['title'])
+			info['url'] = videoNode.findall('./file')[0].text
+			info['thumbnail'] = videoNode.findall('./thumbnail')[0].text
+			info['ext'] = info['url'].rpartition('.')[2]
+			info['format'] = info['ext']
+		except IndexError:
+			self._downloader.trouble(u'\nERROR: Invalid metadata XML file')
+			return
+
+		self._downloader.increment_downloads()
+
+		try:
+			self._downloader.process_info(info)
+		except UnavailableVideoError, err:
+			self._downloader.trouble(u'\nERROR: unable to download video')
+
+
+class XVideosIE(InfoExtractor):
+	"""Information extractor for xvideos.com"""
+
+	_VALID_URL = r'^(?:https?://)?(?:www\.)?xvideos\.com/video([0-9]+)(?:.*)'
+	IE_NAME = u'xvideos'
+
+	def report_webpage(self, video_id):
+		"""Report information extraction."""
+		self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
+
+	def report_extraction(self, video_id):
+		"""Report information extraction."""
+		self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
+
+	def _simplify_title(self, title):
+		res = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', title)
+		res = res.strip(ur'_')
+		return res
+
+	def _real_extract(self, url):
+		htmlParser = HTMLParser.HTMLParser()
+
+		mobj = re.match(self._VALID_URL, url)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+			return
+		video_id = mobj.group(1).decode('utf-8')
+
+		self.report_webpage(video_id)
+
+		request = urllib2.Request(r'http://www.xvideos.com/video' + video_id)
+		try:
+			webpage = urllib2.urlopen(request).read()
+		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
+			return
+
+		self.report_extraction(video_id)
+
+
+		# Extract video URL
+		mobj = re.search(r'flv_url=(.+?)&', webpage)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: unable to extract video url')
+			return
+		video_url = urllib2.unquote(mobj.group(1).decode('utf-8'))
+
+
+		# Extract title
+		mobj = re.search(r'<title>(.*?)\s+-\s+XVID', webpage)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: unable to extract video title')
+			return
+		video_title = mobj.group(1).decode('utf-8')
+
+
+		# Extract video thumbnail
+		mobj = re.search(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]/[a-fA-F0-9]/[a-fA-F0-9]/([a-fA-F0-9.]+jpg)', webpage)
+		if mobj is None:
+			self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
+			return
+		video_thumbnail = mobj.group(1).decode('utf-8')
+
+
+
+		self._downloader.increment_downloads()
+		info = {
+			'id': video_id,
+			'url': video_url,
+			'uploader': None,
+			'upload_date': None,
+			'title': video_title,
+			'stitle': self._simplify_title(video_title),
+			'ext': 'flv',
+			'format': 'flv',
+			'thumbnail': video_thumbnail,
+			'description': None,
+			'player_url': None,
+		}
+
+		try:
+			self._downloader.process_info(info)
+		except UnavailableVideoError, err:
+			self._downloader.trouble(u'\nERROR: unable to download ' + video_id)
 
-		for id in video_ids:
-			self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
-		return
 
 class PostProcessor(object):
 	"""Post Processor class.
@@ -1452,7 +3506,7 @@ class PostProcessor(object):
 	def set_downloader(self, downloader):
 		"""Sets the downloader for this PP."""
 		self._downloader = downloader
-	
+
 	def run(self, information):
 		"""Run the PostProcessor.
 
@@ -1472,190 +3526,530 @@ class PostProcessor(object):
 		it was called from.
 		"""
 		return information # by default, do nothing
-	
-### MAIN PROGRAM ###
-if __name__ == '__main__':
-	try:
-		# Modules needed only when running the main program
-		import getpass
-		import optparse
-
-		# Function to update the program file with the latest version from bitbucket.org
-		def update_self(downloader, filename):
-			# Note: downloader only used for options
-			if not os.access (filename, os.W_OK):
-				sys.exit('ERROR: no write permissions on %s' % filename)
-
-			downloader.to_stdout('Updating to latest stable version...')
-			latest_url = 'http://bitbucket.org/rg3/youtube-dl/raw/tip/LATEST_VERSION'
-			latest_version = urllib.urlopen(latest_url).read().strip()
-			prog_url = 'http://bitbucket.org/rg3/youtube-dl/raw/%s/youtube-dl' % latest_version
-			newcontent = urllib.urlopen(prog_url).read()
-			stream = open(filename, 'w')
-			stream.write(newcontent)
-			stream.close()
-			downloader.to_stdout('Updated to version %s' % latest_version)
-
-		# General configuration
-		urllib2.install_opener(urllib2.build_opener(urllib2.ProxyHandler()))
-		urllib2.install_opener(urllib2.build_opener(urllib2.HTTPCookieProcessor()))
-		socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
-
-		# Parse command line
-		parser = optparse.OptionParser(
-			usage='Usage: %prog [options] url...',
-			version='2010.02.13',
-			conflict_handler='resolve',
-		)
-
-		parser.add_option('-h', '--help',
-				action='help', help='print this help text and exit')
-		parser.add_option('-v', '--version',
-				action='version', help='print program version and exit')
-		parser.add_option('-U', '--update',
-				action='store_true', dest='update_self', help='update this program to latest stable version')
-		parser.add_option('-i', '--ignore-errors',
-				action='store_true', dest='ignoreerrors', help='continue on download errors', default=False)
-		parser.add_option('-r', '--rate-limit',
-				dest='ratelimit', metavar='L', help='download rate limit (e.g. 50k or 44.6m)')
-
-		authentication = optparse.OptionGroup(parser, 'Authentication Options')
-		authentication.add_option('-u', '--username',
-				dest='username', metavar='UN', help='account username')
-		authentication.add_option('-p', '--password',
-				dest='password', metavar='PW', help='account password')
-		authentication.add_option('-n', '--netrc',
-				action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
-		parser.add_option_group(authentication)
-
-		video_format = optparse.OptionGroup(parser, 'Video Format Options')
-		video_format.add_option('-f', '--format',
-				action='store', dest='format', metavar='FMT', help='video format code')
-		video_format.add_option('-b', '--best-quality',
-				action='store_const', dest='format', help='download the best quality video possible', const='0')
-		video_format.add_option('-m', '--mobile-version',
-				action='store_const', dest='format', help='alias for -f 17', const='17')
-		video_format.add_option('-d', '--high-def',
-				action='store_const', dest='format', help='alias for -f 22', const='22')
-		parser.add_option_group(video_format)
-
-		verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
-		verbosity.add_option('-q', '--quiet',
-				action='store_true', dest='quiet', help='activates quiet mode', default=False)
-		verbosity.add_option('-s', '--simulate',
-				action='store_true', dest='simulate', help='do not download video', default=False)
-		verbosity.add_option('-g', '--get-url',
-				action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
-		verbosity.add_option('-e', '--get-title',
-				action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
-		parser.add_option_group(verbosity)
-
-		filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
-		filesystem.add_option('-t', '--title',
-				action='store_true', dest='usetitle', help='use title in file name', default=False)
-		filesystem.add_option('-l', '--literal',
-				action='store_true', dest='useliteral', help='use literal title in file name', default=False)
-		filesystem.add_option('-o', '--output',
-				dest='outtmpl', metavar='TPL', help='output filename template')
-		filesystem.add_option('-a', '--batch-file',
-				dest='batchfile', metavar='F', help='file containing URLs to download')
-		filesystem.add_option('-w', '--no-overwrites',
-				action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
-		filesystem.add_option('-c', '--continue',
-				action='store_true', dest='continue_dl', help='resume partially downloaded files', default=False)
-		parser.add_option_group(filesystem)
-
-		(opts, args) = parser.parse_args()
-        
-		# Batch file verification
-		batchurls = []
-		if opts.batchfile is not None:
+
+
+class FFmpegExtractAudioPP(PostProcessor):
+
+	def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, keepvideo=False):
+		PostProcessor.__init__(self, downloader)
+		if preferredcodec is None:
+			preferredcodec = 'best'
+		self._preferredcodec = preferredcodec
+		self._preferredquality = preferredquality
+		self._keepvideo = keepvideo
+
+	@staticmethod
+	def get_audio_codec(path):
+		try:
+			cmd = ['ffprobe', '-show_streams', '--', path]
+			handle = subprocess.Popen(cmd, stderr=file(os.path.devnull, 'w'), stdout=subprocess.PIPE)
+			output = handle.communicate()[0]
+			if handle.wait() != 0:
+				return None
+		except (IOError, OSError):
+			return None
+		audio_codec = None
+		for line in output.split('\n'):
+			if line.startswith('codec_name='):
+				audio_codec = line.split('=')[1].strip()
+			elif line.strip() == 'codec_type=audio' and audio_codec is not None:
+				return audio_codec
+		return None
+
+	@staticmethod
+	def run_ffmpeg(path, out_path, codec, more_opts):
+		try:
+			cmd = ['ffmpeg', '-y', '-i', path, '-vn', '-acodec', codec] + more_opts + ['--', out_path]
+			ret = subprocess.call(cmd, stdout=file(os.path.devnull, 'w'), stderr=subprocess.STDOUT)
+			return (ret == 0)
+		except (IOError, OSError):
+			return False
+
+	def run(self, information):
+		path = information['filepath']
+
+		filecodec = self.get_audio_codec(path)
+		if filecodec is None:
+			self._downloader.to_stderr(u'WARNING: unable to obtain file audio codec with ffprobe')
+			return None
+
+		more_opts = []
+		if self._preferredcodec == 'best' or self._preferredcodec == filecodec:
+			if filecodec in ['aac', 'mp3', 'vorbis']:
+				# Lossless if possible
+				acodec = 'copy'
+				extension = filecodec
+				if filecodec == 'aac':
+					more_opts = ['-f', 'adts']
+				if filecodec == 'vorbis':
+					extension = 'ogg'
+			else:
+				# MP3 otherwise.
+				acodec = 'libmp3lame'
+				extension = 'mp3'
+				more_opts = []
+				if self._preferredquality is not None:
+					more_opts += ['-ab', self._preferredquality]
+		else:
+			# We convert the audio (lossy)
+			acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'vorbis': 'libvorbis'}[self._preferredcodec]
+			extension = self._preferredcodec
+			more_opts = []
+			if self._preferredquality is not None:
+				more_opts += ['-ab', self._preferredquality]
+			if self._preferredcodec == 'aac':
+				more_opts += ['-f', 'adts']
+			if self._preferredcodec == 'vorbis':
+				extension = 'ogg'
+
+		(prefix, ext) = os.path.splitext(path)
+		new_path = prefix + '.' + extension
+		self._downloader.to_screen(u'[ffmpeg] Destination: %s' % new_path)
+		status = self.run_ffmpeg(path, new_path, acodec, more_opts)
+
+		if not status:
+			self._downloader.to_stderr(u'WARNING: error running ffmpeg')
+			return None
+
+ 		# Try to update the date time for extracted audio file.
+		if information.get('filetime') is not None:
 			try:
-				batchurls = open(opts.batchfile, 'r').readlines()
-				batchurls = [x.strip() for x in batchurls]
-				batchurls = [x for x in batchurls if len(x) > 0]
-			except IOError:
-				sys.exit(u'ERROR: batch file could not be read')
-		all_urls = batchurls + args
-
-		# Make sure all URLs are in our preferred encoding
-		for i in range(0, len(all_urls)):
-			all_urls[i] = unicode(all_urls[i], preferredencoding())
-
-		# Conflicting, missing and erroneous options
-		if opts.usenetrc and (opts.username is not None or opts.password is not None):
-			parser.error(u'using .netrc conflicts with giving username/password')
-		if opts.password is not None and opts.username is None:
-			parser.error(u'account username missing')
-		if opts.outtmpl is not None and (opts.useliteral or opts.usetitle):
-			parser.error(u'using output template conflicts with using title or literal title')
-		if opts.usetitle and opts.useliteral:
-			parser.error(u'using title conflicts with using literal title')
-		if opts.username is not None and opts.password is None:
-			opts.password = getpass.getpass(u'Type account password and press return:')
-		if opts.ratelimit is not None:
-			numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
-			if numeric_limit is None:
-				parser.error(u'invalid rate limit specified')
-			opts.ratelimit = numeric_limit
-
-		# Information extractors
-		youtube_ie = YoutubeIE()
-		metacafe_ie = MetacafeIE(youtube_ie)
-		youtube_pl_ie = YoutubePlaylistIE(youtube_ie)
-		youtube_user_ie = YoutubeUserIE(youtube_ie)
-		youtube_search_ie = YoutubeSearchIE(youtube_ie)
-		google_ie = GoogleIE()
-		photobucket_ie = PhotobucketIE()
-		generic_ie = GenericIE()
-
-		# File downloader
-		fd = FileDownloader({
-			'usenetrc': opts.usenetrc,
-			'username': opts.username,
-			'password': opts.password,
-			'quiet': (opts.quiet or opts.geturl or opts.gettitle),
-			'forceurl': opts.geturl,
-			'forcetitle': opts.gettitle,
-			'simulate': (opts.simulate or opts.geturl or opts.gettitle),
-			'format': opts.format,
-			'outtmpl': ((opts.outtmpl is not None and opts.outtmpl.decode(preferredencoding()))
-				or (opts.usetitle and u'%(stitle)s-%(id)s.%(ext)s')
-				or (opts.useliteral and u'%(title)s-%(id)s.%(ext)s')
-				or u'%(id)s.%(ext)s'),
-			'ignoreerrors': opts.ignoreerrors,
-			'ratelimit': opts.ratelimit,
-			'nooverwrites': opts.nooverwrites,
-			'continuedl': opts.continue_dl,
-			})
-		fd.add_info_extractor(youtube_search_ie)
-		fd.add_info_extractor(youtube_pl_ie)
-		fd.add_info_extractor(youtube_user_ie)
-		fd.add_info_extractor(metacafe_ie)
-		fd.add_info_extractor(youtube_ie)
-		fd.add_info_extractor(google_ie)
-		fd.add_info_extractor(photobucket_ie)
-
-		# This must come last since it's the
-		# fallback if none of the others work
-		fd.add_info_extractor(generic_ie)
-
-		# Update version
-		if opts.update_self:
-			update_self(fd, sys.argv[0])
-
-		# Maybe do nothing
-		if len(all_urls) < 1:
-			if not opts.update_self:
-				parser.error(u'you must provide at least one URL')
+				os.utime(new_path, (time.time(), information['filetime']))
+			except:
+				self._downloader.to_stderr(u'WARNING: Cannot update utime of audio file')
+
+		if not self._keepvideo:
+			try:
+				os.remove(path)
+			except (IOError, OSError):
+				self._downloader.to_stderr(u'WARNING: Unable to remove downloaded video file')
+				return None
+
+		information['filepath'] = new_path
+		return information
+
+
+def updateSelf(downloader, filename):
+	''' Update the program file with the latest version from the repository '''
+	# Note: downloader only used for options
+	if not os.access(filename, os.W_OK):
+		sys.exit('ERROR: no write permissions on %s' % filename)
+
+	downloader.to_screen('Updating to latest version...')
+
+	try:
+		try:
+			urlh = urllib.urlopen(UPDATE_URL)
+			newcontent = urlh.read()
+			
+			vmatch = re.search("__version__ = '([^']+)'", newcontent)
+			if vmatch is not None and vmatch.group(1) == __version__:
+				downloader.to_screen('youtube-dl is up-to-date (' + __version__ + ')')
+				return
+		finally:
+			urlh.close()
+	except (IOError, OSError), err:
+		sys.exit('ERROR: unable to download latest version')
+
+	try:
+		outf = open(filename, 'wb')
+		try:
+			outf.write(newcontent)
+		finally:
+			outf.close()
+	except (IOError, OSError), err:
+		sys.exit('ERROR: unable to overwrite current version')
+
+	downloader.to_screen('Updated youtube-dl. Restart youtube-dl to use the new version.')
+
+def parseOpts():
+	# Deferred imports
+	import getpass
+	import optparse
+
+	def _format_option_string(option):
+		''' ('-o', '--option') -> -o, --format METAVAR'''
+
+		opts = []
+
+		if option._short_opts: opts.append(option._short_opts[0])
+		if option._long_opts: opts.append(option._long_opts[0])
+		if len(opts) > 1: opts.insert(1, ', ')
+
+		if option.takes_value(): opts.append(' %s' % option.metavar)
+
+		return "".join(opts)
+
+	def _find_term_columns():
+		columns = os.environ.get('COLUMNS', None)
+		if columns:
+			return int(columns)
+
+		try:
+			sp = subprocess.Popen(['stty', 'size'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+			out,err = sp.communicate()
+			return int(out.split()[1])
+		except:
+			pass
+		return None
+
+	max_width = 80
+	max_help_position = 80
+
+	# No need to wrap help messages if we're on a wide console
+	columns = _find_term_columns()
+	if columns: max_width = columns
+
+	fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
+	fmt.format_option_strings = _format_option_string
+
+	kw = {
+		'version'   : __version__,
+		'formatter' : fmt,
+		'usage' : '%prog [options] url [url...]',
+		'conflict_handler' : 'resolve',
+	}
+
+	parser = optparse.OptionParser(**kw)
+
+	# option groups
+	general        = optparse.OptionGroup(parser, 'General Options')
+	selection      = optparse.OptionGroup(parser, 'Video Selection')
+	authentication = optparse.OptionGroup(parser, 'Authentication Options')
+	video_format   = optparse.OptionGroup(parser, 'Video Format Options')
+	postproc       = optparse.OptionGroup(parser, 'Post-processing Options')
+	filesystem     = optparse.OptionGroup(parser, 'Filesystem Options')
+	verbosity      = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
+
+	general.add_option('-h', '--help',
+			action='help', help='print this help text and exit')
+	general.add_option('-v', '--version',
+			action='version', help='print program version and exit')
+	general.add_option('-U', '--update',
+			action='store_true', dest='update_self', help='update this program to latest version')
+	general.add_option('-i', '--ignore-errors',
+			action='store_true', dest='ignoreerrors', help='continue on download errors', default=False)
+	general.add_option('-r', '--rate-limit',
+			dest='ratelimit', metavar='LIMIT', help='download rate limit (e.g. 50k or 44.6m)')
+	general.add_option('-R', '--retries',
+			dest='retries', metavar='RETRIES', help='number of retries (default is 10)', default=10)
+	general.add_option('--dump-user-agent',
+			action='store_true', dest='dump_user_agent',
+			help='display the current browser identification', default=False)
+	general.add_option('--list-extractors',
+			action='store_true', dest='list_extractors',
+			help='List all supported extractors and the URLs they would handle', default=False)
+
+	selection.add_option('--playlist-start',
+			dest='playliststart', metavar='NUMBER', help='playlist video to start at (default is 1)', default=1)
+	selection.add_option('--playlist-end',
+			dest='playlistend', metavar='NUMBER', help='playlist video to end at (default is last)', default=-1)
+	selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)')
+	selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)')
+
+	authentication.add_option('-u', '--username',
+			dest='username', metavar='USERNAME', help='account username')
+	authentication.add_option('-p', '--password',
+			dest='password', metavar='PASSWORD', help='account password')
+	authentication.add_option('-n', '--netrc',
+			action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
+
+
+	video_format.add_option('-f', '--format',
+			action='store', dest='format', metavar='FORMAT', help='video format code')
+	video_format.add_option('--all-formats',
+			action='store_const', dest='format', help='download all available video formats', const='all')
+	video_format.add_option('--max-quality',
+			action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
+	video_format.add_option('-F', '--list-formats',
+			action='store_true', dest='listformats', help='list all available formats (currently youtube only)')
+
+
+	verbosity.add_option('-q', '--quiet',
+			action='store_true', dest='quiet', help='activates quiet mode', default=False)
+	verbosity.add_option('-s', '--simulate',
+			action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False)
+	verbosity.add_option('--skip-download',
+			action='store_true', dest='skip_download', help='do not download the video', default=False)
+	verbosity.add_option('-g', '--get-url',
+			action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
+	verbosity.add_option('-e', '--get-title',
+			action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
+	verbosity.add_option('--get-thumbnail',
+			action='store_true', dest='getthumbnail',
+			help='simulate, quiet but print thumbnail URL', default=False)
+	verbosity.add_option('--get-description',
+			action='store_true', dest='getdescription',
+			help='simulate, quiet but print video description', default=False)
+	verbosity.add_option('--get-filename',
+			action='store_true', dest='getfilename',
+			help='simulate, quiet but print output filename', default=False)
+	verbosity.add_option('--get-format',
+			action='store_true', dest='getformat',
+			help='simulate, quiet but print output format', default=False)
+	verbosity.add_option('--no-progress',
+			action='store_true', dest='noprogress', help='do not print progress bar', default=False)
+	verbosity.add_option('--console-title',
+			action='store_true', dest='consoletitle',
+			help='display progress in console titlebar', default=False)
+
+
+	filesystem.add_option('-t', '--title',
+			action='store_true', dest='usetitle', help='use title in file name', default=False)
+	filesystem.add_option('-l', '--literal',
+			action='store_true', dest='useliteral', help='use literal title in file name', default=False)
+	filesystem.add_option('-A', '--auto-number',
+			action='store_true', dest='autonumber',
+			help='number downloaded files starting from 00000', default=False)
+	filesystem.add_option('-o', '--output',
+			dest='outtmpl', metavar='TEMPLATE', help='output filename template. Use %(stitle)s to get the title, %(uploader)s for the uploader name, %(autonumber)s to get an automatically incremented number, %(ext)s for the filename extension, and %% for a literal percent')
+	filesystem.add_option('-a', '--batch-file',
+			dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
+	filesystem.add_option('-w', '--no-overwrites',
+			action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
+	filesystem.add_option('-c', '--continue',
+			action='store_true', dest='continue_dl', help='resume partially downloaded files', default=False)
+	filesystem.add_option('--no-continue',
+			action='store_false', dest='continue_dl',
+			help='do not resume partially downloaded files (restart from beginning)')
+	filesystem.add_option('--cookies',
+			dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in')
+	filesystem.add_option('--no-part',
+			action='store_true', dest='nopart', help='do not use .part files', default=False)
+	filesystem.add_option('--no-mtime',
+			action='store_false', dest='updatetime',
+			help='do not use the Last-modified header to set the file modification time', default=True)
+	filesystem.add_option('--write-description',
+			action='store_true', dest='writedescription',
+			help='write video description to a .description file', default=False)
+	filesystem.add_option('--write-info-json',
+			action='store_true', dest='writeinfojson',
+			help='write video metadata to a .info.json file', default=False)
+
+
+	postproc.add_option('--extract-audio', action='store_true', dest='extractaudio', default=False,
+			help='convert video files to audio-only files (requires ffmpeg and ffprobe)')
+	postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best',
+			help='"best", "aac", "vorbis" or "mp3"; best by default')
+	postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='128K',
+			help='ffmpeg audio bitrate specification, 128k by default')
+	postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False,
+			help='keeps the video file on disk after the post-processing; the video is erased by default')
+
+
+	parser.add_option_group(general)
+	parser.add_option_group(selection)
+	parser.add_option_group(filesystem)
+	parser.add_option_group(verbosity)
+	parser.add_option_group(video_format)
+	parser.add_option_group(authentication)
+	parser.add_option_group(postproc)
+
+	opts, args = parser.parse_args()
+
+	return parser, opts, args
+
+def gen_extractors():
+	""" Return a list of an instance of every supported extractor.
+	The order does matter; the first extractor matched is the one handling the URL.
+	"""
+	youtube_ie = YoutubeIE()
+	google_ie = GoogleIE()
+	yahoo_ie = YahooIE()
+	return [
+		YoutubePlaylistIE(youtube_ie),
+		YoutubeUserIE(youtube_ie),
+		YoutubeSearchIE(youtube_ie),
+		youtube_ie,
+		MetacafeIE(youtube_ie),
+		DailymotionIE(),
+		google_ie,
+		GoogleSearchIE(google_ie),
+		PhotobucketIE(),
+		yahoo_ie,
+		YahooSearchIE(yahoo_ie),
+		DepositFilesIE(),
+		FacebookIE(),
+		BlipTVIE(),
+		VimeoIE(),
+		MyVideoIE(),
+		ComedyCentralIE(),
+		EscapistIE(),
+		CollegeHumorIE(),
+		XVideosIE(),
+
+		GenericIE()
+	]
+
+def main():
+	parser, opts, args = parseOpts()
+
+	# Open appropriate CookieJar
+	if opts.cookiefile is None:
+		jar = cookielib.CookieJar()
+	else:
+		try:
+			jar = cookielib.MozillaCookieJar(opts.cookiefile)
+			if os.path.isfile(opts.cookiefile) and os.access(opts.cookiefile, os.R_OK):
+				jar.load()
+		except (IOError, OSError), err:
+			sys.exit(u'ERROR: unable to open cookie file')
+
+	# Dump user agent
+	if opts.dump_user_agent:
+		print std_headers['User-Agent']
+		sys.exit(0)
+
+	# Batch file verification
+	batchurls = []
+	if opts.batchfile is not None:
+		try:
+			if opts.batchfile == '-':
+				batchfd = sys.stdin
 			else:
-				sys.exit()
-		retcode = fd.download(all_urls)
-		sys.exit(retcode)
+				batchfd = open(opts.batchfile, 'r')
+			batchurls = batchfd.readlines()
+			batchurls = [x.strip() for x in batchurls]
+			batchurls = [x for x in batchurls if len(x) > 0 and not re.search(r'^[#/;]', x)]
+		except IOError:
+			sys.exit(u'ERROR: batch file could not be read')
+	all_urls = batchurls + args
+
+	# General configuration
+	cookie_processor = urllib2.HTTPCookieProcessor(jar)
+	opener = urllib2.build_opener(urllib2.ProxyHandler(), cookie_processor, YoutubeDLHandler())
+	urllib2.install_opener(opener)
+	socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
+
+	extractors = gen_extractors()
+
+	if opts.list_extractors:
+		for ie in extractors:
+			print(ie.IE_NAME)
+			matchedUrls = filter(lambda url: ie.suitable(url), all_urls)
+			all_urls = filter(lambda url: url not in matchedUrls, all_urls)
+			for mu in matchedUrls:
+				print(u'  ' + mu)
+		sys.exit(0)
+
+	# Conflicting, missing and erroneous options
+	if opts.usenetrc and (opts.username is not None or opts.password is not None):
+		parser.error(u'using .netrc conflicts with giving username/password')
+	if opts.password is not None and opts.username is None:
+		parser.error(u'account username missing')
+	if opts.outtmpl is not None and (opts.useliteral or opts.usetitle or opts.autonumber):
+		parser.error(u'using output template conflicts with using title, literal title or auto number')
+	if opts.usetitle and opts.useliteral:
+		parser.error(u'using title conflicts with using literal title')
+	if opts.username is not None and opts.password is None:
+		opts.password = getpass.getpass(u'Type account password and press return:')
+	if opts.ratelimit is not None:
+		numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
+		if numeric_limit is None:
+			parser.error(u'invalid rate limit specified')
+		opts.ratelimit = numeric_limit
+	if opts.retries is not None:
+		try:
+			opts.retries = long(opts.retries)
+		except (TypeError, ValueError), err:
+			parser.error(u'invalid retry count specified')
+	try:
+		opts.playliststart = int(opts.playliststart)
+		if opts.playliststart <= 0:
+			raise ValueError(u'Playlist start must be positive')
+	except (TypeError, ValueError), err:
+		parser.error(u'invalid playlist start number specified')
+	try:
+		opts.playlistend = int(opts.playlistend)
+		if opts.playlistend != -1 and (opts.playlistend <= 0 or opts.playlistend < opts.playliststart):
+			raise ValueError(u'Playlist end must be greater than playlist start')
+	except (TypeError, ValueError), err:
+		parser.error(u'invalid playlist end number specified')
+	if opts.extractaudio:
+		if opts.audioformat not in ['best', 'aac', 'mp3', 'vorbis']:
+			parser.error(u'invalid audio format specified')
+
+	# File downloader
+	fd = FileDownloader({
+		'usenetrc': opts.usenetrc,
+		'username': opts.username,
+		'password': opts.password,
+		'quiet': (opts.quiet or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat),
+		'forceurl': opts.geturl,
+		'forcetitle': opts.gettitle,
+		'forcethumbnail': opts.getthumbnail,
+		'forcedescription': opts.getdescription,
+		'forcefilename': opts.getfilename,
+		'forceformat': opts.getformat,
+		'simulate': opts.simulate,
+		'skip_download': (opts.skip_download or opts.simulate or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat),
+		'format': opts.format,
+		'format_limit': opts.format_limit,
+		'listformats': opts.listformats,
+		'outtmpl': ((opts.outtmpl is not None and opts.outtmpl.decode(preferredencoding()))
+			or (opts.format == '-1' and opts.usetitle and u'%(stitle)s-%(id)s-%(format)s.%(ext)s')
+			or (opts.format == '-1' and opts.useliteral and u'%(title)s-%(id)s-%(format)s.%(ext)s')
+			or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s')
+			or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(stitle)s-%(id)s.%(ext)s')
+			or (opts.useliteral and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s')
+			or (opts.usetitle and u'%(stitle)s-%(id)s.%(ext)s')
+			or (opts.useliteral and u'%(title)s-%(id)s.%(ext)s')
+			or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
+			or u'%(id)s.%(ext)s'),
+		'ignoreerrors': opts.ignoreerrors,
+		'ratelimit': opts.ratelimit,
+		'nooverwrites': opts.nooverwrites,
+		'retries': opts.retries,
+		'continuedl': opts.continue_dl,
+		'noprogress': opts.noprogress,
+		'playliststart': opts.playliststart,
+		'playlistend': opts.playlistend,
+		'logtostderr': opts.outtmpl == '-',
+		'consoletitle': opts.consoletitle,
+		'nopart': opts.nopart,
+		'updatetime': opts.updatetime,
+		'writedescription': opts.writedescription,
+		'writeinfojson': opts.writeinfojson,
+		'matchtitle': opts.matchtitle,
+		'rejecttitle': opts.rejecttitle,
+		})
+	for extractor in extractors:
+		fd.add_info_extractor(extractor)
+
+	# PostProcessors
+	if opts.extractaudio:
+		fd.add_post_processor(FFmpegExtractAudioPP(preferredcodec=opts.audioformat, preferredquality=opts.audioquality, keepvideo=opts.keepvideo))
+
+	# Update version
+	if opts.update_self:
+		updateSelf(fd, sys.argv[0])
+
+	# Maybe do nothing
+	if len(all_urls) < 1:
+		if not opts.update_self:
+			parser.error(u'you must provide at least one URL')
+		else:
+			sys.exit()
+	retcode = fd.download(all_urls)
+
+	# Dump cookie jar if requested
+	if opts.cookiefile is not None:
+		try:
+			jar.save()
+		except (IOError, OSError), err:
+			sys.exit(u'ERROR: unable to save cookie jar')
+
+	sys.exit(retcode)
 
+
+if __name__ == '__main__':
+	try:
+		main()
 	except DownloadError:
 		sys.exit(1)
 	except SameFileError:
 		sys.exit(u'ERROR: fixed output name but more than one file to download')
 	except KeyboardInterrupt:
 		sys.exit(u'\nERROR: Interrupted by user')
+
+# vim: set ts=4 sw=4 sts=4 noet ai si filetype=python: