]> Raphaƫl G. Git Repositories - youtubedl/blobdiff - youtube-dl
Imported Upstream version 2010.10.03
[youtubedl] / youtube-dl
index 5afff4e61879526235ff5f262ce4be188c122725..51344f27be8932102674821016fb30506c155691 100755 (executable)
@@ -2,6 +2,7 @@
 # -*- coding: utf-8 -*-
 # Author: Ricardo Garcia Gonzalez
 # Author: Danny Colligan
 # -*- coding: utf-8 -*-
 # Author: Ricardo Garcia Gonzalez
 # Author: Danny Colligan
+# Author: Benjamin Johnson
 # License: Public domain code
 import htmlentitydefs
 import httplib
 # License: Public domain code
 import htmlentitydefs
 import httplib
@@ -13,15 +14,22 @@ import os.path
 import re
 import socket
 import string
 import re
 import socket
 import string
+import subprocess
 import sys
 import time
 import urllib
 import urllib2
 
 import sys
 import time
 import urllib
 import urllib2
 
+# parse_qs was moved from the cgi module to the urlparse module recently.
+try:
+       from urlparse import parse_qs
+except ImportError:
+       from cgi import parse_qs
+
 std_headers = {
 std_headers = {
-       'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.2) Gecko/20090729 Firefox/3.5.2',
+       'User-Agent': 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100723 Firefox/3.6.8',
        'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
        'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
-       'Accept': 'text/xml,application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5',
+       'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Language': 'en-us,en;q=0.5',
 }
 
        'Accept-Language': 'en-us,en;q=0.5',
 }
 
@@ -33,15 +41,70 @@ def preferredencoding():
        Returns the best encoding scheme for the system, based on
        locale.getpreferredencoding() and some further tweaks.
        """
        Returns the best encoding scheme for the system, based on
        locale.getpreferredencoding() and some further tweaks.
        """
+       def yield_preferredencoding():
+               try:
+                       pref = locale.getpreferredencoding()
+                       u'TEST'.encode(pref)
+               except:
+                       pref = 'UTF-8'
+               while True:
+                       yield pref
+       return yield_preferredencoding().next()
+
+def htmlentity_transform(matchobj):
+       """Transforms an HTML entity to a Unicode character.
+       
+       This function receives a match object and is intended to be used with
+       the re.sub() function.
+       """
+       entity = matchobj.group(1)
+
+       # Known non-numeric HTML entity
+       if entity in htmlentitydefs.name2codepoint:
+               return unichr(htmlentitydefs.name2codepoint[entity])
+
+       # Unicode character
+       mobj = re.match(ur'(?u)#(x?\d+)', entity)
+       if mobj is not None:
+               numstr = mobj.group(1)
+               if numstr.startswith(u'x'):
+                       base = 16
+                       numstr = u'0%s' % numstr
+               else:
+                       base = 10
+               return unichr(long(numstr, base))
+
+       # Unknown entity in name, return its literal representation
+       return (u'&%s;' % entity)
+
+def sanitize_title(utitle):
+       """Sanitizes a video title so it could be used as part of a filename."""
+       utitle = re.sub(ur'(?u)&(.+?);', htmlentity_transform, utitle)
+       return utitle.replace(unicode(os.sep), u'%')
+
+def sanitize_open(filename, open_mode):
+       """Try to open the given filename, and slightly tweak it if this fails.
+
+       Attempts to open the given filename. If this fails, it tries to change
+       the filename slightly, step by step, until it's either able to open it
+       or it fails and raises a final exception, like the standard open()
+       function.
+
+       It returns the tuple (stream, definitive_file_name).
+       """
        try:
        try:
-               pref = locale.getpreferredencoding()
-               # Mac OSX systems have this problem sometimes
-               if pref == '':
-                       return 'UTF-8'
-               return pref
-       except:
-               sys.stderr.write('WARNING: problem obtaining preferred encoding. Falling back to UTF-8.\n')
-               return 'UTF-8'
+               if filename == u'-':
+                       return (sys.stdout, filename)
+               stream = open(filename, open_mode)
+               return (stream, filename)
+       except (IOError, OSError), err:
+               # In case of error, try to remove win32 forbidden chars
+               filename = re.sub(ur'[/<>:"\|\?\*]', u'#', filename)
+
+               # An exception here should be caught in the caller
+               stream = open(filename, open_mode)
+               return (stream, filename)
+
 
 class DownloadError(Exception):
        """Download Error exception.
 
 class DownloadError(Exception):
        """Download Error exception.
@@ -68,7 +131,7 @@ class PostProcessingError(Exception):
        """
        pass
 
        """
        pass
 
-class UnavailableFormatError(Exception):
+class UnavailableVideoError(Exception):
        """Unavailable Format exception.
 
        This exception will be thrown when a video is requested
        """Unavailable Format exception.
 
        This exception will be thrown when a video is requested
@@ -126,23 +189,28 @@ class FileDownloader(object):
        forcetitle:     Force printing title.
        simulate:       Do not download the video files.
        format:         Video format code.
        forcetitle:     Force printing title.
        simulate:       Do not download the video files.
        format:         Video format code.
+       format_limit:   Highest quality format to try.
        outtmpl:        Template for output names.
        ignoreerrors:   Do not stop on download errors.
        ratelimit:      Download speed limit, in bytes/sec.
        nooverwrites:   Prevent overwriting files.
        outtmpl:        Template for output names.
        ignoreerrors:   Do not stop on download errors.
        ratelimit:      Download speed limit, in bytes/sec.
        nooverwrites:   Prevent overwriting files.
+       retries:        Number of times to retry for HTTP error 5xx
        continuedl:     Try to continue downloads if possible.
        continuedl:     Try to continue downloads if possible.
+       noprogress:     Do not print the progress bar.
        """
 
        params = None
        _ies = []
        _pps = []
        _download_retcode = None
        """
 
        params = None
        _ies = []
        _pps = []
        _download_retcode = None
+       _num_downloads = None
 
        def __init__(self, params):
                """Create a FileDownloader object with the given options."""
                self._ies = []
                self._pps = []
                self._download_retcode = 0
 
        def __init__(self, params):
                """Create a FileDownloader object with the given options."""
                self._ies = []
                self._pps = []
                self._download_retcode = 0
+               self._num_downloads = 0
                self.params = params
        
        @staticmethod
                self.params = params
        
        @staticmethod
@@ -219,16 +287,6 @@ class FileDownloader(object):
                multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
                return long(round(number * multiplier))
 
                multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
                return long(round(number * multiplier))
 
-       @staticmethod
-       def verify_url(url):
-               """Verify a URL is valid and data could be downloaded. Return real data URL."""
-               request = urllib2.Request(url, None, std_headers)
-               data = urllib2.urlopen(request)
-               data.read(1)
-               url = data.geturl()
-               data.close()
-               return url
-
        def add_info_extractor(self, ie):
                """Add an InfoExtractor object to the end of the list."""
                self._ies.append(ie)
        def add_info_extractor(self, ie):
                """Add an InfoExtractor object to the end of the list."""
                self._ies.append(ie)
@@ -239,11 +297,15 @@ class FileDownloader(object):
                self._pps.append(pp)
                pp.set_downloader(self)
        
                self._pps.append(pp)
                pp.set_downloader(self)
        
-       def to_stdout(self, message, skip_eol=False):
+       def to_stdout(self, message, skip_eol=False, ignore_encoding_errors=False):
                """Print message to stdout if not in quiet mode."""
                """Print message to stdout if not in quiet mode."""
-               if not self.params.get('quiet', False):
-                       print (u'%s%s' % (message, [u'\n', u''][skip_eol])).encode(preferredencoding()),
+               try:
+                       if not self.params.get('quiet', False):
+                               print (u'%s%s' % (message, [u'\n', u''][skip_eol])).encode(preferredencoding()),
                        sys.stdout.flush()
                        sys.stdout.flush()
+               except (UnicodeEncodeError), err:
+                       if not ignore_encoding_errors:
+                               raise
        
        def to_stderr(self, message):
                """Print message to stderr."""
        
        def to_stderr(self, message):
                """Print message to stderr."""
@@ -281,20 +343,29 @@ class FileDownloader(object):
 
        def report_destination(self, filename):
                """Report destination filename."""
 
        def report_destination(self, filename):
                """Report destination filename."""
-               self.to_stdout(u'[download] Destination: %s' % filename)
+               self.to_stdout(u'[download] Destination: %s' % filename, ignore_encoding_errors=True)
        
        def report_progress(self, percent_str, data_len_str, speed_str, eta_str):
                """Report download progress."""
        
        def report_progress(self, percent_str, data_len_str, speed_str, eta_str):
                """Report download progress."""
+               if self.params.get('noprogress', False):
+                       return
                self.to_stdout(u'\r[download] %s of %s at %s ETA %s' %
                                (percent_str, data_len_str, speed_str, eta_str), skip_eol=True)
 
        def report_resuming_byte(self, resume_len):
                self.to_stdout(u'\r[download] %s of %s at %s ETA %s' %
                                (percent_str, data_len_str, speed_str, eta_str), skip_eol=True)
 
        def report_resuming_byte(self, resume_len):
-               """Report attemtp to resume at given byte."""
+               """Report attempt to resume at given byte."""
                self.to_stdout(u'[download] Resuming download at byte %s' % resume_len)
        
                self.to_stdout(u'[download] Resuming download at byte %s' % resume_len)
        
+       def report_retry(self, count, retries):
+               """Report retry in case of HTTP error 5xx"""
+               self.to_stdout(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
+       
        def report_file_already_downloaded(self, file_name):
                """Report file has already been fully downloaded."""
        def report_file_already_downloaded(self, file_name):
                """Report file has already been fully downloaded."""
-               self.to_stdout(u'[download] %s has already been downloaded' % file_name)
+               try:
+                       self.to_stdout(u'[download] %s has already been downloaded' % file_name)
+               except (UnicodeEncodeError), err:
+                       self.to_stdout(u'[download] The file has already been downloaded')
        
        def report_unable_to_resume(self):
                """Report it was impossible to resume download."""
        
        def report_unable_to_resume(self):
                """Report it was impossible to resume download."""
@@ -302,57 +373,65 @@ class FileDownloader(object):
        
        def report_finish(self):
                """Report download finished."""
        
        def report_finish(self):
                """Report download finished."""
-               self.to_stdout(u'')
+               if self.params.get('noprogress', False):
+                       self.to_stdout(u'[download] Download completed')
+               else:
+                       self.to_stdout(u'')
+       
+       def increment_downloads(self):
+               """Increment the ordinal that assigns a number to each file."""
+               self._num_downloads += 1
 
        def process_info(self, info_dict):
                """Process a single dictionary returned by an InfoExtractor."""
                # Do nothing else if in simulate mode
                if self.params.get('simulate', False):
 
        def process_info(self, info_dict):
                """Process a single dictionary returned by an InfoExtractor."""
                # Do nothing else if in simulate mode
                if self.params.get('simulate', False):
-                       try:
-                               info_dict['url'] = self.verify_url(info_dict['url'])
-                       except (OSError, IOError, urllib2.URLError, httplib.HTTPException, socket.error), err:
-                               raise UnavailableFormatError
-
                        # Forced printings
                        if self.params.get('forcetitle', False):
                        # Forced printings
                        if self.params.get('forcetitle', False):
-                               print info_dict['title'].encode(preferredencoding())
+                               print info_dict['title'].encode(preferredencoding(), 'xmlcharrefreplace')
                        if self.params.get('forceurl', False):
                        if self.params.get('forceurl', False):
-                               print info_dict['url'].encode(preferredencoding())
+                               print info_dict['url'].encode(preferredencoding(), 'xmlcharrefreplace')
+                       if self.params.get('forcethumbnail', False) and 'thumbnail' in info_dict:
+                               print info_dict['thumbnail'].encode(preferredencoding(), 'xmlcharrefreplace')
+                       if self.params.get('forcedescription', False) and 'description' in info_dict:
+                               print info_dict['description'].encode(preferredencoding(), 'xmlcharrefreplace')
 
                        return
                        
                try:
                        template_dict = dict(info_dict)
                        template_dict['epoch'] = unicode(long(time.time()))
 
                        return
                        
                try:
                        template_dict = dict(info_dict)
                        template_dict['epoch'] = unicode(long(time.time()))
+                       template_dict['ord'] = unicode('%05d' % self._num_downloads)
                        filename = self.params['outtmpl'] % template_dict
                except (ValueError, KeyError), err:
                        filename = self.params['outtmpl'] % template_dict
                except (ValueError, KeyError), err:
-                       self.trouble('ERROR: invalid output template or system charset: %s' % str(err))
-               if self.params['nooverwrites'] and os.path.exists(filename):
-                       self.to_stderr(u'WARNING: file exists: %s; skipping' % filename)
+                       self.trouble(u'ERROR: invalid system charset or erroneous output template')
+                       return
+               if self.params.get('nooverwrites', False) and os.path.exists(filename):
+                       self.to_stderr(u'WARNING: file exists and will be skipped')
                        return
 
                try:
                        self.pmkdir(filename)
                except (OSError, IOError), err:
                        return
 
                try:
                        self.pmkdir(filename)
                except (OSError, IOError), err:
-                       self.trouble('ERROR: unable to create directories: %s' % str(err))
+                       self.trouble(u'ERROR: unable to create directories: %s' % str(err))
                        return
 
                try:
                        return
 
                try:
-                       success = self._do_download(filename, info_dict['url'])
+                       success = self._do_download(filename, info_dict['url'].encode('utf-8'), info_dict.get('player_url', None))
                except (OSError, IOError), err:
                except (OSError, IOError), err:
-                       raise UnavailableFormatError
+                       raise UnavailableVideoError
                except (urllib2.URLError, httplib.HTTPException, socket.error), err:
                except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-                       self.trouble('ERROR: unable to download video data: %s' % str(err))
+                       self.trouble(u'ERROR: unable to download video data: %s' % str(err))
                        return
                except (ContentTooShortError, ), err:
                        return
                except (ContentTooShortError, ), err:
-                       self.trouble('ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
+                       self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
                        return
 
                if success:
                        try:
                                self.post_process(filename, info_dict)
                        except (PostProcessingError), err:
                        return
 
                if success:
                        try:
                                self.post_process(filename, info_dict)
                        except (PostProcessingError), err:
-                               self.trouble('ERROR: postprocessing: %s' % str(err))
+                               self.trouble(u'ERROR: postprocessing: %s' % str(err))
                                return
 
        def download(self, url_list):
                                return
 
        def download(self, url_list):
@@ -377,7 +456,7 @@ class FileDownloader(object):
                                break
 
                        if not suitable_found:
                                break
 
                        if not suitable_found:
-                               self.trouble('ERROR: no suitable InfoExtractor: %s' % url)
+                               self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url)
 
                return self._download_retcode
 
 
                return self._download_retcode
 
@@ -390,36 +469,104 @@ class FileDownloader(object):
                        if info is None:
                                break
        
                        if info is None:
                                break
        
-       def _do_download(self, filename, url):
-               stream = None
-               open_mode = 'ab'
+       def _download_with_rtmpdump(self, filename, url, player_url):
+               self.report_destination(filename)
+
+               # Check for rtmpdump first
+               try:
+                       subprocess.call(['rtmpdump', '-h'], stdout=(file(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
+               except (OSError, IOError):
+                       self.trouble(u'ERROR: RTMP download detected but "rtmpdump" could not be run')
+                       return False
+
+               # Download using rtmpdump. rtmpdump returns exit code 2 when
+               # the connection was interrumpted and resuming appears to be
+               # possible. This is part of rtmpdump's normal usage, AFAIK.
+               basic_args = ['rtmpdump', '-q'] + [[], ['-W', player_url]][player_url is not None] + ['-r', url, '-o', filename]
+               retval = subprocess.call(basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)])
+               while retval == 2 or retval == 1:
+                       prevsize = os.path.getsize(filename)
+                       self.to_stdout(u'\r[rtmpdump] %s bytes' % prevsize, skip_eol=True)
+                       time.sleep(5.0) # This seems to be needed
+                       retval = subprocess.call(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1])
+                       cursize = os.path.getsize(filename)
+                       if prevsize == cursize and retval == 1:
+                               break
+               if retval == 0:
+                       self.to_stdout(u'\r[rtmpdump] %s bytes' % os.path.getsize(filename))
+                       return True
+               else:
+                       self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval)
+                       return False
+
+       def _do_download(self, filename, url, player_url):
+               # Attempt to download using rtmpdump
+               if url.startswith('rtmp'):
+                       return self._download_with_rtmpdump(filename, url, player_url)
 
 
+               stream = None
+               open_mode = 'wb'
                basic_request = urllib2.Request(url, None, std_headers)
                request = urllib2.Request(url, None, std_headers)
 
                basic_request = urllib2.Request(url, None, std_headers)
                request = urllib2.Request(url, None, std_headers)
 
-               # Attempt to resume download with "continuedl" option
+               # Establish possible resume length
                if os.path.isfile(filename):
                        resume_len = os.path.getsize(filename)
                else:
                        resume_len = 0
                if os.path.isfile(filename):
                        resume_len = os.path.getsize(filename)
                else:
                        resume_len = 0
-               if self.params['continuedl'] and resume_len != 0:
+
+               # Request parameters in case of being able to resume
+               if self.params.get('continuedl', False) and resume_len != 0:
                        self.report_resuming_byte(resume_len)
                        request.add_header('Range','bytes=%d-' % resume_len)
                        self.report_resuming_byte(resume_len)
                        request.add_header('Range','bytes=%d-' % resume_len)
+                       open_mode = 'ab'
 
 
-               # Establish connection
-               try:
-                       data = urllib2.urlopen(request)
-               except (urllib2.HTTPError, ), err:
-                       if err.code != 416: #  416 is 'Requested range not satisfiable'
-                               raise
-                       data = urllib2.urlopen(basic_request)
-                       content_length = data.info()['Content-Length']
-                       if content_length is not None and long(content_length) == resume_len:
-                               self.report_file_already_downloaded(filename)
-                               return True
-                       else:
-                               self.report_unable_to_resume()
-                               open_mode = 'wb'
+               count = 0
+               retries = self.params.get('retries', 0)
+               while count <= retries:
+                       # Establish connection
+                       try:
+                               data = urllib2.urlopen(request)
+                               break
+                       except (urllib2.HTTPError, ), err:
+                               if (err.code < 500 or err.code >= 600) and err.code != 416:
+                                       # Unexpected HTTP error
+                                       raise
+                               elif err.code == 416:
+                                       # Unable to resume (requested range not satisfiable)
+                                       try:
+                                               # Open the connection again without the range header
+                                               data = urllib2.urlopen(basic_request)
+                                               content_length = data.info()['Content-Length']
+                                       except (urllib2.HTTPError, ), err:
+                                               if err.code < 500 or err.code >= 600:
+                                                       raise
+                                       else:
+                                               # Examine the reported length
+                                               if (content_length is not None and
+                                                   (resume_len - 100 < long(content_length) < resume_len + 100)):
+                                                       # The file had already been fully downloaded.
+                                                       # Explanation to the above condition: in issue #175 it was revealed that
+                                                       # YouTube sometimes adds or removes a few bytes from the end of the file,
+                                                       # changing the file size slightly and causing problems for some users. So
+                                                       # I decided to implement a suggested change and consider the file
+                                                       # completely downloaded if the file size differs less than 100 bytes from
+                                                       # the one in the hard drive.
+                                                       self.report_file_already_downloaded(filename)
+                                                       return True
+                                               else:
+                                                       # The length does not match, we start the download over
+                                                       self.report_unable_to_resume()
+                                                       open_mode = 'wb'
+                                                       break
+                       # Retry
+                       count += 1
+                       if count <= retries:
+                               self.report_retry(count, retries)
+
+               if count > retries:
+                       self.trouble(u'ERROR: giving up after %s retries' % retries)
+                       return False
 
                data_len = data.info().get('Content-length', None)
                data_len_str = self.format_bytes(data_len)
 
                data_len = data.info().get('Content-length', None)
                data_len_str = self.format_bytes(data_len)
@@ -439,12 +586,16 @@ class FileDownloader(object):
                        # Open file just in time
                        if stream is None:
                                try:
                        # Open file just in time
                        if stream is None:
                                try:
-                                       stream = open(filename, open_mode)
+                                       (stream, filename) = sanitize_open(filename, open_mode)
                                        self.report_destination(filename)
                                except (OSError, IOError), err:
                                        self.report_destination(filename)
                                except (OSError, IOError), err:
-                                       self.trouble('ERROR: unable to open for writing: %s' % str(err))
+                                       self.trouble(u'ERROR: unable to open for writing: %s' % str(err))
                                        return False
                                        return False
-                       stream.write(data_block)
+                       try:
+                               stream.write(data_block)
+                       except (IOError, OSError), err:
+                               self.trouble(u'\nERROR: unable to write data: %s' % str(err))
+                               return False
                        block_size = self.best_block_size(after - before, data_block_len)
 
                        # Progress message
                        block_size = self.best_block_size(after - before, data_block_len)
 
                        # Progress message
@@ -479,6 +630,16 @@ class InfoExtractor(object):
        title:          Literal title.
        stitle:         Simplified title.
        ext:            Video filename extension.
        title:          Literal title.
        stitle:         Simplified title.
        ext:            Video filename extension.
+       format:         Video format.
+       player_url:     SWF Player URL (may be None).
+
+       The following fields are optional. Their primary purpose is to allow
+       youtube-dl to serve as the backend for a video search function, such
+       as the one in youtube2mp3.  They are only used when their respective
+       forced printing functions are called:
+
+       thumbnail:      Full URL to a video thumbnail image.
+       description:    One-line video description.
 
        Subclasses of this one should re-define the _real_initialize() and
        _real_extract() methods, as well as the suitable() static method.
 
        Subclasses of this one should re-define the _real_initialize() and
        _real_extract() methods, as well as the suitable() static method.
@@ -525,46 +686,28 @@ class InfoExtractor(object):
 class YoutubeIE(InfoExtractor):
        """Information extractor for youtube.com."""
 
 class YoutubeIE(InfoExtractor):
        """Information extractor for youtube.com."""
 
-       _VALID_URL = r'^((?:http://)?(?:\w+\.)?youtube\.com/(?:(?:v/)|(?:(?:watch(?:\.php)?)?\?(?:.+&)?v=)))?([0-9A-Za-z_-]+)(?(1).+)?$'
-       _LANG_URL = r'http://uk.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
+       _VALID_URL = r'^((?:http://)?(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/(?:(?:v/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=))))?([0-9A-Za-z_-]+)(?(1).+)?$'
+       _LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
        _LOGIN_URL = 'http://www.youtube.com/signup?next=/&gl=US&hl=en'
        _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
        _NETRC_MACHINE = 'youtube'
        _LOGIN_URL = 'http://www.youtube.com/signup?next=/&gl=US&hl=en'
        _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
        _NETRC_MACHINE = 'youtube'
-       _available_formats = ['22', '35', '18', '5', '17', '13', None] # listed in order of priority for -b flag
+       # Listed in order of quality
+       _available_formats = ['38', '37', '22', '45', '35', '34', '43', '18', '6', '5', '17', '13']
        _video_extensions = {
                '13': '3gp',
                '17': 'mp4',
                '18': 'mp4',
                '22': 'mp4',
        _video_extensions = {
                '13': '3gp',
                '17': 'mp4',
                '18': 'mp4',
                '22': 'mp4',
+               '37': 'mp4',
+               '38': 'video', # You actually don't know if this will be MOV, AVI or whatever
+               '43': 'webm',
+               '45': 'webm',
        }
 
        @staticmethod
        def suitable(url):
                return (re.match(YoutubeIE._VALID_URL, url) is not None)
 
        }
 
        @staticmethod
        def suitable(url):
                return (re.match(YoutubeIE._VALID_URL, url) is not None)
 
-       @staticmethod
-       def htmlentity_transform(matchobj):
-               """Transforms an HTML entity to a Unicode character."""
-               entity = matchobj.group(1)
-
-               # Known non-numeric HTML entity
-               if entity in htmlentitydefs.name2codepoint:
-                       return unichr(htmlentitydefs.name2codepoint[entity])
-
-               # Unicode character
-               mobj = re.match(ur'(?u)#(x?\d+)', entity)
-               if mobj is not None:
-                       numstr = mobj.group(1)
-                       if numstr.startswith(u'x'):
-                               base = 16
-                               numstr = u'0%s' % numstr
-                       else:
-                               base = 10
-                       return unichr(long(numstr, base))
-
-               # Unknown entity in name, return its literal representation
-               return (u'&%s;' % entity)
-
        def report_lang(self):
                """Report attempt to set language."""
                self._downloader.to_stdout(u'[youtube] Setting language')
        def report_lang(self):
                """Report attempt to set language."""
                self._downloader.to_stdout(u'[youtube] Setting language')
@@ -577,6 +720,10 @@ class YoutubeIE(InfoExtractor):
                """Report attempt to confirm age."""
                self._downloader.to_stdout(u'[youtube] Confirming age')
        
                """Report attempt to confirm age."""
                self._downloader.to_stdout(u'[youtube] Confirming age')
        
+       def report_video_webpage_download(self, video_id):
+               """Report attempt to download video webpage."""
+               self._downloader.to_stdout(u'[youtube] %s: Downloading video webpage' % video_id)
+       
        def report_video_info_webpage_download(self, video_id):
                """Report attempt to download video info webpage."""
                self._downloader.to_stdout(u'[youtube] %s: Downloading video info webpage' % video_id)
        def report_video_info_webpage_download(self, video_id):
                """Report attempt to download video info webpage."""
                self._downloader.to_stdout(u'[youtube] %s: Downloading video info webpage' % video_id)
@@ -589,6 +736,10 @@ class YoutubeIE(InfoExtractor):
                """Report extracted video URL."""
                self._downloader.to_stdout(u'[youtube] %s: Format %s not available' % (video_id, format))
        
                """Report extracted video URL."""
                self._downloader.to_stdout(u'[youtube] %s: Format %s not available' % (video_id, format))
        
+       def report_rtmp_download(self):
+               """Indicate the download will use the RTMP protocol."""
+               self._downloader.to_stdout(u'[youtube] RTMP download detected')
+       
        def _real_initialize(self):
                if self._downloader is None:
                        return
        def _real_initialize(self):
                if self._downloader is None:
                        return
@@ -666,72 +817,119 @@ class YoutubeIE(InfoExtractor):
                        return
                video_id = mobj.group(2)
 
                        return
                video_id = mobj.group(2)
 
-               # Downloader parameters
-               best_quality = False
-               format_param = None
-               quality_index = 0
-               if self._downloader is not None:
-                       params = self._downloader.params
-                       format_param = params.get('format', None)
-                       if format_param == '0':
-                               format_param = self._available_formats[quality_index]
-                               best_quality = True
+               # Get video webpage
+               self.report_video_webpage_download(video_id)
+               request = urllib2.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en' % video_id, None, std_headers)
+               try:
+                       video_webpage = urllib2.urlopen(request).read()
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
+                       return
 
 
-               while True:
-                       # Extension
-                       video_extension = self._video_extensions.get(format_param, 'flv')
+               # Attempt to extract SWF player URL
+               mobj = re.search(r'swfConfig.*"(http://.*?watch.*?-.*?\.swf)"', video_webpage)
+               if mobj is not None:
+                       player_url = mobj.group(1)
+               else:
+                       player_url = None
 
 
-                       # Get video info
-                       video_info_url = 'http://www.youtube.com/get_video_info?&video_id=%s&el=detailpage&ps=default&eurl=&gl=US&hl=en' % video_id
+               # Get video info
+               self.report_video_info_webpage_download(video_id)
+               for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
+                       video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
+                                          % (video_id, el_type))
                        request = urllib2.Request(video_info_url, None, std_headers)
                        try:
                        request = urllib2.Request(video_info_url, None, std_headers)
                        try:
-                               self.report_video_info_webpage_download(video_id)
                                video_info_webpage = urllib2.urlopen(request).read()
                                video_info_webpage = urllib2.urlopen(request).read()
+                               video_info = parse_qs(video_info_webpage)
+                               if 'token' in video_info:
+                                       break
                        except (urllib2.URLError, httplib.HTTPException, socket.error), err:
                                self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err))
                                return
                        except (urllib2.URLError, httplib.HTTPException, socket.error), err:
                                self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err))
                                return
-                       self.report_information_extraction(video_id)
+               if 'token' not in video_info:
+                       if 'reason' in video_info:
+                               self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0].decode('utf-8'))
+                       else:
+                               self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason')
+                       return
 
 
-                       # "t" param
-                       mobj = re.search(r'(?m)&token=([^&]+)(?:&|$)', video_info_webpage)
-                       if mobj is None:
-                               # Attempt to see if YouTube has issued an error message
-                               mobj = re.search(r'(?m)&reason=([^&]+)(?:&|$)', video_info_webpage)
-                               if mobj is None:
-                                       self._downloader.trouble(u'ERROR: unable to extract "t" parameter for unknown reason')
-                                       stream = open('reportme-ydl-%s.dat' % time.time(), 'wb')
-                                       stream.write(video_info_webpage)
-                                       stream.close()
-                               else:
-                                       reason = urllib.unquote_plus(mobj.group(1))
-                                       self._downloader.trouble(u'ERROR: YouTube said: %s' % reason.decode('utf-8'))
-                               return
-                       token = urllib.unquote(mobj.group(1))
-                       video_real_url = 'http://www.youtube.com/get_video?video_id=%s&t=%s&eurl=&el=detailpage&ps=default&gl=US&hl=en' % (video_id, token)
-                       if format_param is not None:
-                               video_real_url = '%s&fmt=%s' % (video_real_url, format_param)
+               # Start extracting information
+               self.report_information_extraction(video_id)
 
 
-                       # uploader
-                       mobj = re.search(r'(?m)&author=([^&]+)(?:&|$)', video_info_webpage)
-                       if mobj is None:
-                               self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
-                               return
-                       video_uploader = urllib.unquote(mobj.group(1))
+               # uploader
+               if 'author' not in video_info:
+                       self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
+                       return
+               video_uploader = urllib.unquote_plus(video_info['author'][0])
 
 
-                       # title
-                       mobj = re.search(r'(?m)&title=([^&]+)(?:&|$)', video_info_webpage)
-                       if mobj is None:
-                               self._downloader.trouble(u'ERROR: unable to extract video title')
+               # title
+               if 'title' not in video_info:
+                       self._downloader.trouble(u'ERROR: unable to extract video title')
+                       return
+               video_title = urllib.unquote_plus(video_info['title'][0])
+               video_title = video_title.decode('utf-8')
+               video_title = sanitize_title(video_title)
+
+               # simplified title
+               simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
+               simple_title = simple_title.strip(ur'_')
+
+               # thumbnail image
+               if 'thumbnail_url' not in video_info:
+                       self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
+                       video_thumbnail = ''
+               else:   # don't panic if we can't find it
+                       video_thumbnail = urllib.unquote_plus(video_info['thumbnail_url'][0])
+
+               # description
+               video_description = 'No description available.'
+               if self._downloader.params.get('forcedescription', False):
+                       mobj = re.search(r'<meta name="description" content="(.*)"(?:\s*/)?>', video_webpage)
+                       if mobj is not None:
+                               video_description = mobj.group(1)
+
+               # token
+               video_token = urllib.unquote_plus(video_info['token'][0])
+
+               # Decide which formats to download
+               requested_format = self._downloader.params.get('format', None)
+               get_video_template = 'http://www.youtube.com/get_video?video_id=%s&t=%s&eurl=&el=&ps=&asv=&fmt=%%s' % (video_id, video_token)
+
+               if 'fmt_url_map' in video_info:
+                       url_map = dict(tuple(pair.split('|')) for pair in video_info['fmt_url_map'][0].split(','))
+                       format_limit = self._downloader.params.get('format_limit', None)
+                       if format_limit is not None and format_limit in self._available_formats:
+                               format_list = self._available_formats[self._available_formats.index(format_limit):]
+                       else:
+                               format_list = self._available_formats
+                       existing_formats = [x for x in format_list if x in url_map]
+                       if len(existing_formats) == 0:
+                               self._downloader.trouble(u'ERROR: no known formats available for video')
                                return
                                return
-                       video_title = urllib.unquote(mobj.group(1))
-                       video_title = video_title.decode('utf-8')
-                       video_title = re.sub(ur'(?u)&(.+?);', self.htmlentity_transform, video_title)
-                       video_title = video_title.replace(os.sep, u'%')
+                       if requested_format is None:
+                               video_url_list = [(existing_formats[0], get_video_template % existing_formats[0])] # Best quality
+                       elif requested_format == '-1':
+                               video_url_list = [(f, get_video_template % f) for f in existing_formats] # All formats
+                       else:
+                               video_url_list = [(requested_format, get_video_template % requested_format)] # Specific format
+
+               elif 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
+                       self.report_rtmp_download()
+                       video_url_list = [(None, video_info['conn'][0])]
+
+               else:
+                       self._downloader.trouble(u'ERROR: no fmt_url_map or conn information found in video info')
+                       return
+
+               for format_param, video_real_url in video_url_list:
+                       # At this point we have a new video
+                       self._downloader.increment_downloads()
 
 
-                       # simplified title
-                       simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
-                       simple_title = simple_title.strip(ur'_')
+                       # Extension
+                       video_extension = self._video_extensions.get(format_param, 'flv')
 
 
+                       # Find the video URL in fmt_url_map or conn paramters
                        try:
                                # Process video information
                                self._downloader.process_info({
                        try:
                                # Process video information
                                self._downloader.process_info({
@@ -741,24 +939,13 @@ class YoutubeIE(InfoExtractor):
                                        'title':        video_title,
                                        'stitle':       simple_title,
                                        'ext':          video_extension.decode('utf-8'),
                                        'title':        video_title,
                                        'stitle':       simple_title,
                                        'ext':          video_extension.decode('utf-8'),
+                                       'format':       (format_param is None and u'NA' or format_param.decode('utf-8')),
+                                       'thumbnail':    video_thumbnail.decode('utf-8'),
+                                       'description':  video_description.decode('utf-8'),
+                                       'player_url':   player_url,
                                })
                                })
-
-                               return
-
-                       except UnavailableFormatError, err:
-                               if best_quality:
-                                       if quality_index == len(self._available_formats) - 1:
-                                               # I don't ever expect this to happen
-                                               self._downloader.trouble(u'ERROR: no known formats available for video')
-                                               return
-                                       else:
-                                               self.report_unavailable_format(video_id, format_param)
-                                               quality_index += 1
-                                               format_param = self._available_formats[quality_index]
-                                               continue
-                               else: 
-                                       self._downloader.trouble('ERROR: format not available for video')
-                                       return
+                       except UnavailableVideoError, err:
+                               self._downloader.trouble(u'ERROR: unable to download video (format may not be available)')
 
 
 class MetacafeIE(InfoExtractor):
 
 
 class MetacafeIE(InfoExtractor):
@@ -831,8 +1018,10 @@ class MetacafeIE(InfoExtractor):
                        self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % mobj2.group(1))
                        return
 
                        self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % mobj2.group(1))
                        return
 
+               # At this point we have a new video
+               self._downloader.increment_downloads()
+
                simple_title = mobj.group(2).decode('utf-8')
                simple_title = mobj.group(2).decode('utf-8')
-               video_extension = 'flv'
 
                # Retrieve video webpage to extract further information
                request = urllib2.Request('http://www.metacafe.com/watch/%s/' % video_id)
 
                # Retrieve video webpage to extract further information
                request = urllib2.Request('http://www.metacafe.com/watch/%s/' % video_id)
@@ -846,28 +1035,42 @@ class MetacafeIE(InfoExtractor):
                # Extract URL, uploader and title from webpage
                self.report_extraction(video_id)
                mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
                # Extract URL, uploader and title from webpage
                self.report_extraction(video_id)
                mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
-               if mobj is None:
-                       self._downloader.trouble(u'ERROR: unable to extract media URL')
-                       return
-               mediaURL = urllib.unquote(mobj.group(1))
-
-               #mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
-               #if mobj is None:
-               #       self._downloader.trouble(u'ERROR: unable to extract gdaKey')
-               #       return
-               #gdaKey = mobj.group(1)
-               #
-               #video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
-
-               video_url = mediaURL
+               if mobj is not None:
+                       mediaURL = urllib.unquote(mobj.group(1))
+                       video_extension = mediaURL[-3:]
+                       
+                       # Extract gdaKey if available
+                       mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
+                       if mobj is None:
+                               video_url = mediaURL
+                       else:
+                               gdaKey = mobj.group(1)
+                               video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
+               else:
+                       mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
+                       if mobj is None:
+                               self._downloader.trouble(u'ERROR: unable to extract media URL')
+                               return
+                       vardict = parse_qs(mobj.group(1))
+                       if 'mediaData' not in vardict:
+                               self._downloader.trouble(u'ERROR: unable to extract media URL')
+                               return
+                       mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0])
+                       if mobj is None:
+                               self._downloader.trouble(u'ERROR: unable to extract media URL')
+                               return
+                       mediaURL = mobj.group(1).replace('\\/', '/')
+                       video_extension = mediaURL[-3:]
+                       video_url = '%s?__gda__=%s' % (mediaURL, mobj.group(2))
 
                mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
                if mobj is None:
                        self._downloader.trouble(u'ERROR: unable to extract title')
                        return
                video_title = mobj.group(1).decode('utf-8')
 
                mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
                if mobj is None:
                        self._downloader.trouble(u'ERROR: unable to extract title')
                        return
                video_title = mobj.group(1).decode('utf-8')
+               video_title = sanitize_title(video_title)
 
 
-               mobj = re.search(r'(?ms)<li id="ChnlUsr">.*?Submitter:.*?<a .*?>(.*?)<', webpage)
+               mobj = re.search(r'(?ms)By:\s*<a .*?>(.+?)<', webpage)
                if mobj is None:
                        self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
                        return
                if mobj is None:
                        self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
                        return
@@ -882,134 +1085,854 @@ class MetacafeIE(InfoExtractor):
                                'title':        video_title,
                                'stitle':       simple_title,
                                'ext':          video_extension.decode('utf-8'),
                                'title':        video_title,
                                'stitle':       simple_title,
                                'ext':          video_extension.decode('utf-8'),
+                               'format':       u'NA',
+                               'player_url':   None,
                        })
                        })
-               except UnavailableFormatError:
-                       self._downloader.trouble(u'ERROR: format not available for video')
+               except UnavailableVideoError:
+                       self._downloader.trouble(u'ERROR: unable to download video')
 
 
 
 
-class YoutubeSearchIE(InfoExtractor):
-       """Information Extractor for YouTube search queries."""
-       _VALID_QUERY = r'ytsearch(\d+|all)?:[\s\S]+'
-       _TEMPLATE_URL = 'http://www.youtube.com/results?search_query=%s&page=%s&gl=US&hl=en'
-       _VIDEO_INDICATOR = r'href="/watch\?v=.+?"'
-       _MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
-       _youtube_ie = None
-       _max_youtube_results = 1000
+class DailymotionIE(InfoExtractor):
+       """Information Extractor for Dailymotion"""
 
 
-       def __init__(self, youtube_ie, downloader=None):
+       _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^_/]+)_([^/]+)'
+
+       def __init__(self, downloader=None):
                InfoExtractor.__init__(self, downloader)
                InfoExtractor.__init__(self, downloader)
-               self._youtube_ie = youtube_ie
-       
+
        @staticmethod
        def suitable(url):
        @staticmethod
        def suitable(url):
-               return (re.match(YoutubeSearchIE._VALID_QUERY, url) is not None)
+               return (re.match(DailymotionIE._VALID_URL, url) is not None)
 
 
-       def report_download_page(self, query, pagenum):
-               """Report attempt to download playlist page with given number."""
-               self._downloader.to_stdout(u'[youtube] query "%s": Downloading page %s' % (query, pagenum))
+       def report_download_webpage(self, video_id):
+               """Report webpage download."""
+               self._downloader.to_stdout(u'[dailymotion] %s: Downloading webpage' % video_id)
+       
+       def report_extraction(self, video_id):
+               """Report information extraction."""
+               self._downloader.to_stdout(u'[dailymotion] %s: Extracting information' % video_id)
 
        def _real_initialize(self):
 
        def _real_initialize(self):
-               self._youtube_ie.initialize()
-       
-       def _real_extract(self, query):
-               mobj = re.match(self._VALID_QUERY, query)
+               return
+
+       def _real_extract(self, url):
+               # Extract id and simplified title from URL
+               mobj = re.match(self._VALID_URL, url)
                if mobj is None:
                if mobj is None:
-                       self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
+                       self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
                        return
 
                        return
 
-               prefix, query = query.split(':')
-               prefix = prefix[8:]
-               if prefix == '':
-                       self._download_n_results(query, 1)
-                       return
-               elif prefix == 'all':
-                       self._download_n_results(query, self._max_youtube_results)
+               # At this point we have a new video
+               self._downloader.increment_downloads()
+               video_id = mobj.group(1)
+
+               simple_title = mobj.group(2).decode('utf-8')
+               video_extension = 'flv'
+
+               # Retrieve video webpage to extract further information
+               request = urllib2.Request(url)
+               try:
+                       self.report_download_webpage(video_id)
+                       webpage = urllib2.urlopen(request).read()
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err))
                        return
                        return
-               else:
-                       try:
-                               n = long(prefix)
-                               if n <= 0:
-                                       self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
-                                       return
-                               elif n > self._max_youtube_results:
-                                       self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)'  % (self._max_youtube_results, n))
-                                       n = self._max_youtube_results
-                               self._download_n_results(query, n)
-                               return
-                       except ValueError: # parsing prefix as integer fails
-                               self._download_n_results(query, 1)
-                               return
 
 
-       def _download_n_results(self, query, n):
-               """Downloads a specified number of results for a query"""
+               # Extract URL, uploader and title from webpage
+               self.report_extraction(video_id)
+               mobj = re.search(r'(?i)addVariable\(\"video\"\s*,\s*\"([^\"]*)\"\)', webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract media URL')
+                       return
+               mediaURL = urllib.unquote(mobj.group(1))
 
 
-               video_ids = []
-               already_seen = set()
-               pagenum = 1
+               # if needed add http://www.dailymotion.com/ if relative URL
 
 
-               while True:
-                       self.report_download_page(query, pagenum)
-                       result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum)
-                       request = urllib2.Request(result_url, None, std_headers)
-                       try:
-                               page = urllib2.urlopen(request).read()
-                       except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-                               self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
-                               return
+               video_url = mediaURL
 
 
-                       # Extract video identifiers
-                       for mobj in re.finditer(self._VIDEO_INDICATOR, page):
-                               video_id = page[mobj.span()[0]:mobj.span()[1]].split('=')[2][:-1]
-                               if video_id not in already_seen:
-                                       video_ids.append(video_id)
-                                       already_seen.add(video_id)
-                                       if len(video_ids) == n:
-                                               # Specified n videos reached
-                                               for id in video_ids:
-                                                       self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
-                                               return
+               # '<meta\s+name="title"\s+content="Dailymotion\s*[:\-]\s*(.*?)"\s*\/\s*>'
+               mobj = re.search(r'(?im)<title>Dailymotion\s*[\-:]\s*(.+?)</title>', webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract title')
+                       return
+               video_title = mobj.group(1).decode('utf-8')
+               video_title = sanitize_title(video_title)
 
 
-                       if re.search(self._MORE_PAGES_INDICATOR, page) is None:
-                               for id in video_ids:
-                                       self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
-                               return
+               mobj = re.search(r'(?im)<div class="dmco_html owner">.*?<a class="name" href="/.+?">(.+?)</a>', webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
+                       return
+               video_uploader = mobj.group(1)
 
 
-                       pagenum = pagenum + 1
+               try:
+                       # Process video information
+                       self._downloader.process_info({
+                               'id':           video_id.decode('utf-8'),
+                               'url':          video_url.decode('utf-8'),
+                               'uploader':     video_uploader.decode('utf-8'),
+                               'title':        video_title,
+                               'stitle':       simple_title,
+                               'ext':          video_extension.decode('utf-8'),
+                               'format':       u'NA',
+                               'player_url':   None,
+                       })
+               except UnavailableVideoError:
+                       self._downloader.trouble(u'ERROR: unable to download video')
 
 
-class YoutubePlaylistIE(InfoExtractor):
-       """Information Extractor for YouTube playlists."""
+class GoogleIE(InfoExtractor):
+       """Information extractor for video.google.com."""
 
 
-       _VALID_URL = r'(?:http://)?(?:\w+\.)?youtube.com/(?:view_play_list|my_playlists)\?.*?p=([^&]+).*'
-       _TEMPLATE_URL = 'http://www.youtube.com/view_play_list?p=%s&page=%s&gl=US&hl=en'
-       _VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
-       _MORE_PAGES_INDICATOR = r'/view_play_list?p=%s&page=%s'
-       _youtube_ie = None
+       _VALID_URL = r'(?:http://)?video\.google\.(?:com(?:\.au)?|co\.(?:uk|jp|kr|cr)|ca|de|es|fr|it|nl|pl)/videoplay\?docid=([^\&]+).*'
 
 
-       def __init__(self, youtube_ie, downloader=None):
+       def __init__(self, downloader=None):
                InfoExtractor.__init__(self, downloader)
                InfoExtractor.__init__(self, downloader)
-               self._youtube_ie = youtube_ie
-       
+
        @staticmethod
        def suitable(url):
        @staticmethod
        def suitable(url):
-               return (re.match(YoutubePlaylistIE._VALID_URL, url) is not None)
+               return (re.match(GoogleIE._VALID_URL, url) is not None)
 
 
-       def report_download_page(self, playlist_id, pagenum):
-               """Report attempt to download playlist page with given number."""
-               self._downloader.to_stdout(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum))
+       def report_download_webpage(self, video_id):
+               """Report webpage download."""
+               self._downloader.to_stdout(u'[video.google] %s: Downloading webpage' % video_id)
+
+       def report_extraction(self, video_id):
+               """Report information extraction."""
+               self._downloader.to_stdout(u'[video.google] %s: Extracting information' % video_id)
 
        def _real_initialize(self):
 
        def _real_initialize(self):
-               self._youtube_ie.initialize()
-       
+               return
+
        def _real_extract(self, url):
        def _real_extract(self, url):
-               # Extract playlist id
+               # Extract id from URL
                mobj = re.match(self._VALID_URL, url)
                if mobj is None:
                mobj = re.match(self._VALID_URL, url)
                if mobj is None:
-                       self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+                       self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
                        return
 
                        return
 
-               # Download playlist pages
-               playlist_id = mobj.group(1)
-               video_ids = []
+               # At this point we have a new video
+               self._downloader.increment_downloads()
+               video_id = mobj.group(1)
+
+               video_extension = 'mp4'
+
+               # Retrieve video webpage to extract further information
+               request = urllib2.Request('http://video.google.com/videoplay?docid=%s&hl=en&oe=utf-8' % video_id)
+               try:
+                       self.report_download_webpage(video_id)
+                       webpage = urllib2.urlopen(request).read()
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
+                       return
+
+               # Extract URL, uploader, and title from webpage
+               self.report_extraction(video_id)
+               mobj = re.search(r"download_url:'([^']+)'", webpage)
+               if mobj is None:
+                       video_extension = 'flv'
+                       mobj = re.search(r"(?i)videoUrl\\x3d(.+?)\\x26", webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract media URL')
+                       return
+               mediaURL = urllib.unquote(mobj.group(1))
+               mediaURL = mediaURL.replace('\\x3d', '\x3d')
+               mediaURL = mediaURL.replace('\\x26', '\x26')
+
+               video_url = mediaURL
+
+               mobj = re.search(r'<title>(.*)</title>', webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract title')
+                       return
+               video_title = mobj.group(1).decode('utf-8')
+               video_title = sanitize_title(video_title)
+               simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
+
+               # Extract video description
+               mobj = re.search(r'<span id=short-desc-content>([^<]*)</span>', webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract video description')
+                       return
+               video_description = mobj.group(1).decode('utf-8')
+               if not video_description:
+                       video_description = 'No description available.'
+
+               # Extract video thumbnail
+               if self._downloader.params.get('forcethumbnail', False):
+                       request = urllib2.Request('http://video.google.com/videosearch?q=%s+site:video.google.com&hl=en' % abs(int(video_id)))
+                       try:
+                               webpage = urllib2.urlopen(request).read()
+                       except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                               self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
+                               return
+                       mobj = re.search(r'<img class=thumbnail-img (?:.* )?src=(http.*)>', webpage)
+                       if mobj is None:
+                               self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
+                               return
+                       video_thumbnail = mobj.group(1)
+               else:   # we need something to pass to process_info
+                       video_thumbnail = ''
+
+
+               try:
+                       # Process video information
+                       self._downloader.process_info({
+                               'id':           video_id.decode('utf-8'),
+                               'url':          video_url.decode('utf-8'),
+                               'uploader':     u'NA',
+                               'title':        video_title,
+                               'stitle':       simple_title,
+                               'ext':          video_extension.decode('utf-8'),
+                               'format':       u'NA',
+                               'player_url':   None,
+                       })
+               except UnavailableVideoError:
+                       self._downloader.trouble(u'ERROR: unable to download video')
+
+
+class PhotobucketIE(InfoExtractor):
+       """Information extractor for photobucket.com."""
+
+       _VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)'
+
+       def __init__(self, downloader=None):
+               InfoExtractor.__init__(self, downloader)
+
+       @staticmethod
+       def suitable(url):
+               return (re.match(PhotobucketIE._VALID_URL, url) is not None)
+
+       def report_download_webpage(self, video_id):
+               """Report webpage download."""
+               self._downloader.to_stdout(u'[photobucket] %s: Downloading webpage' % video_id)
+
+       def report_extraction(self, video_id):
+               """Report information extraction."""
+               self._downloader.to_stdout(u'[photobucket] %s: Extracting information' % video_id)
+
+       def _real_initialize(self):
+               return
+
+       def _real_extract(self, url):
+               # Extract id from URL
+               mobj = re.match(self._VALID_URL, url)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+                       return
+
+               # At this point we have a new video
+               self._downloader.increment_downloads()
+               video_id = mobj.group(1)
+
+               video_extension = 'flv'
+
+               # Retrieve video webpage to extract further information
+               request = urllib2.Request(url)
+               try:
+                       self.report_download_webpage(video_id)
+                       webpage = urllib2.urlopen(request).read()
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
+                       return
+
+               # Extract URL, uploader, and title from webpage
+               self.report_extraction(video_id)
+               mobj = re.search(r'<link rel="video_src" href=".*\?file=([^"]+)" />', webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract media URL')
+                       return
+               mediaURL = urllib.unquote(mobj.group(1))
+
+               video_url = mediaURL
+
+               mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract title')
+                       return
+               video_title = mobj.group(1).decode('utf-8')
+               video_title = sanitize_title(video_title)
+               simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
+
+               video_uploader = mobj.group(2).decode('utf-8')
+
+               try:
+                       # Process video information
+                       self._downloader.process_info({
+                               'id':           video_id.decode('utf-8'),
+                               'url':          video_url.decode('utf-8'),
+                               'uploader':     video_uploader,
+                               'title':        video_title,
+                               'stitle':       simple_title,
+                               'ext':          video_extension.decode('utf-8'),
+                               'format':       u'NA',
+                               'player_url':   None,
+                       })
+               except UnavailableVideoError:
+                       self._downloader.trouble(u'ERROR: unable to download video')
+
+
+class YahooIE(InfoExtractor):
+       """Information extractor for video.yahoo.com."""
+
+       # _VALID_URL matches all Yahoo! Video URLs
+       # _VPAGE_URL matches only the extractable '/watch/' URLs
+       _VALID_URL = r'(?:http://)?(?:[a-z]+\.)?video\.yahoo\.com/(?:watch|network)/([0-9]+)(?:/|\?v=)([0-9]+)(?:[#\?].*)?'
+       _VPAGE_URL = r'(?:http://)?video\.yahoo\.com/watch/([0-9]+)/([0-9]+)(?:[#\?].*)?'
+
+       def __init__(self, downloader=None):
+               InfoExtractor.__init__(self, downloader)
+
+       @staticmethod
+       def suitable(url):
+               return (re.match(YahooIE._VALID_URL, url) is not None)
+
+       def report_download_webpage(self, video_id):
+               """Report webpage download."""
+               self._downloader.to_stdout(u'[video.yahoo] %s: Downloading webpage' % video_id)
+
+       def report_extraction(self, video_id):
+               """Report information extraction."""
+               self._downloader.to_stdout(u'[video.yahoo] %s: Extracting information' % video_id)
+
+       def _real_initialize(self):
+               return
+
+       def _real_extract(self, url, new_video=True):
+               # Extract ID from URL
+               mobj = re.match(self._VALID_URL, url)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+                       return
+
+               # At this point we have a new video
+               self._downloader.increment_downloads()
+               video_id = mobj.group(2)
+               video_extension = 'flv'
+
+               # Rewrite valid but non-extractable URLs as
+               # extractable English language /watch/ URLs
+               if re.match(self._VPAGE_URL, url) is None:
+                       request = urllib2.Request(url)
+                       try:
+                               webpage = urllib2.urlopen(request).read()
+                       except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                               self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
+                               return
+
+                       mobj = re.search(r'\("id", "([0-9]+)"\);', webpage)
+                       if mobj is None:
+                               self._downloader.trouble(u'ERROR: Unable to extract id field')
+                               return
+                       yahoo_id = mobj.group(1)
+
+                       mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage)
+                       if mobj is None:
+                               self._downloader.trouble(u'ERROR: Unable to extract vid field')
+                               return
+                       yahoo_vid = mobj.group(1)
+
+                       url = 'http://video.yahoo.com/watch/%s/%s' % (yahoo_vid, yahoo_id)
+                       return self._real_extract(url, new_video=False)
+
+               # Retrieve video webpage to extract further information
+               request = urllib2.Request(url)
+               try:
+                       self.report_download_webpage(video_id)
+                       webpage = urllib2.urlopen(request).read()
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
+                       return
+
+               # Extract uploader and title from webpage
+               self.report_extraction(video_id)
+               mobj = re.search(r'<meta name="title" content="(.*)" />', webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract video title')
+                       return
+               video_title = mobj.group(1).decode('utf-8')
+               simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
+
+               mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract video uploader')
+                       return
+               video_uploader = mobj.group(1).decode('utf-8')
+
+               # Extract video thumbnail
+               mobj = re.search(r'<link rel="image_src" href="(.*)" />', webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
+                       return
+               video_thumbnail = mobj.group(1).decode('utf-8')
+
+               # Extract video description
+               mobj = re.search(r'<meta name="description" content="(.*)" />', webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract video description')
+                       return
+               video_description = mobj.group(1).decode('utf-8')
+               if not video_description: video_description = 'No description available.'
+
+               # Extract video height and width
+               mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract video height')
+                       return
+               yv_video_height = mobj.group(1)
+
+               mobj = re.search(r'<meta name="video_width" content="([0-9]+)" />', webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract video width')
+                       return
+               yv_video_width = mobj.group(1)
+
+               # Retrieve video playlist to extract media URL
+               # I'm not completely sure what all these options are, but we
+               # seem to need most of them, otherwise the server sends a 401.
+               yv_lg = 'R0xx6idZnW2zlrKP8xxAIR'  # not sure what this represents
+               yv_bitrate = '700'  # according to Wikipedia this is hard-coded
+               request = urllib2.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id +
+                                         '&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height +
+                                         '&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797')
+               try:
+                       self.report_download_webpage(video_id)
+                       webpage = urllib2.urlopen(request).read()
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
+                       return
+
+               # Extract media URL from playlist XML
+               mobj = re.search(r'<STREAM APP="(http://.*)" FULLPATH="/?(/.*\.flv\?[^"]*)"', webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: Unable to extract media URL')
+                       return
+               video_url = urllib.unquote(mobj.group(1) + mobj.group(2)).decode('utf-8')
+               video_url = re.sub(r'(?u)&(.+?);', htmlentity_transform, video_url)
+
+               try:
+                       # Process video information
+                       self._downloader.process_info({
+                               'id':           video_id.decode('utf-8'),
+                               'url':          video_url,
+                               'uploader':     video_uploader,
+                               'title':        video_title,
+                               'stitle':       simple_title,
+                               'ext':          video_extension.decode('utf-8'),
+                               'thumbnail':    video_thumbnail.decode('utf-8'),
+                               'description':  video_description,
+                               'thumbnail':    video_thumbnail,
+                               'description':  video_description,
+                               'player_url':   None,
+                       })
+               except UnavailableVideoError:
+                       self._downloader.trouble(u'ERROR: unable to download video')
+
+
+class GenericIE(InfoExtractor):
+       """Generic last-resort information extractor."""
+
+       def __init__(self, downloader=None):
+               InfoExtractor.__init__(self, downloader)
+
+       @staticmethod
+       def suitable(url):
+               return True
+
+       def report_download_webpage(self, video_id):
+               """Report webpage download."""
+               self._downloader.to_stdout(u'WARNING: Falling back on generic information extractor.')
+               self._downloader.to_stdout(u'[generic] %s: Downloading webpage' % video_id)
+
+       def report_extraction(self, video_id):
+               """Report information extraction."""
+               self._downloader.to_stdout(u'[generic] %s: Extracting information' % video_id)
+
+       def _real_initialize(self):
+               return
+
+       def _real_extract(self, url):
+               # At this point we have a new video
+               self._downloader.increment_downloads()
+
+               video_id = url.split('/')[-1]
+               request = urllib2.Request(url)
+               try:
+                       self.report_download_webpage(video_id)
+                       webpage = urllib2.urlopen(request).read()
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
+                       return
+               except ValueError, err:
+                       # since this is the last-resort InfoExtractor, if
+                       # this error is thrown, it'll be thrown here
+                       self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+                       return
+
+               # Start with something easy: JW Player in SWFObject
+               mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
+               if mobj is None:
+                       # Broaden the search a little bit
+                       mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+                       return
+
+               # It's possible that one of the regexes
+               # matched, but returned an empty group:
+               if mobj.group(1) is None:
+                       self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+                       return
+
+               video_url = urllib.unquote(mobj.group(1))
+               video_id  = os.path.basename(video_url)
+
+               # here's a fun little line of code for you:
+               video_extension = os.path.splitext(video_id)[1][1:]
+               video_id        = os.path.splitext(video_id)[0]
+
+               # it's tempting to parse this further, but you would
+               # have to take into account all the variations like
+               #   Video Title - Site Name
+               #   Site Name | Video Title
+               #   Video Title - Tagline | Site Name
+               # and so on and so forth; it's just not practical
+               mobj = re.search(r'<title>(.*)</title>', webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract title')
+                       return
+               video_title = mobj.group(1).decode('utf-8')
+               video_title = sanitize_title(video_title)
+               simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
+
+               # video uploader is domain name
+               mobj = re.match(r'(?:https?://)?([^/]*)/.*', url)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract title')
+                       return
+               video_uploader = mobj.group(1).decode('utf-8')
+
+               try:
+                       # Process video information
+                       self._downloader.process_info({
+                               'id':           video_id.decode('utf-8'),
+                               'url':          video_url.decode('utf-8'),
+                               'uploader':     video_uploader,
+                               'title':        video_title,
+                               'stitle':       simple_title,
+                               'ext':          video_extension.decode('utf-8'),
+                               'format':       u'NA',
+                               'player_url':   None,
+                       })
+               except UnavailableVideoError, err:
+                       self._downloader.trouble(u'ERROR: unable to download video')
+
+
+class YoutubeSearchIE(InfoExtractor):
+       """Information Extractor for YouTube search queries."""
+       _VALID_QUERY = r'ytsearch(\d+|all)?:[\s\S]+'
+       _TEMPLATE_URL = 'http://www.youtube.com/results?search_query=%s&page=%s&gl=US&hl=en'
+       _VIDEO_INDICATOR = r'href="/watch\?v=.+?"'
+       _MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
+       _youtube_ie = None
+       _max_youtube_results = 1000
+
+       def __init__(self, youtube_ie, downloader=None):
+               InfoExtractor.__init__(self, downloader)
+               self._youtube_ie = youtube_ie
+       
+       @staticmethod
+       def suitable(url):
+               return (re.match(YoutubeSearchIE._VALID_QUERY, url) is not None)
+
+       def report_download_page(self, query, pagenum):
+               """Report attempt to download playlist page with given number."""
+               query = query.decode(preferredencoding())
+               self._downloader.to_stdout(u'[youtube] query "%s": Downloading page %s' % (query, pagenum))
+
+       def _real_initialize(self):
+               self._youtube_ie.initialize()
+       
+       def _real_extract(self, query):
+               mobj = re.match(self._VALID_QUERY, query)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
+                       return
+
+               prefix, query = query.split(':')
+               prefix = prefix[8:]
+               query  = query.encode('utf-8')
+               if prefix == '':
+                       self._download_n_results(query, 1)
+                       return
+               elif prefix == 'all':
+                       self._download_n_results(query, self._max_youtube_results)
+                       return
+               else:
+                       try:
+                               n = long(prefix)
+                               if n <= 0:
+                                       self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
+                                       return
+                               elif n > self._max_youtube_results:
+                                       self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)'  % (self._max_youtube_results, n))
+                                       n = self._max_youtube_results
+                               self._download_n_results(query, n)
+                               return
+                       except ValueError: # parsing prefix as integer fails
+                               self._download_n_results(query, 1)
+                               return
+
+       def _download_n_results(self, query, n):
+               """Downloads a specified number of results for a query"""
+
+               video_ids = []
+               already_seen = set()
+               pagenum = 1
+
+               while True:
+                       self.report_download_page(query, pagenum)
+                       result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum)
+                       request = urllib2.Request(result_url, None, std_headers)
+                       try:
+                               page = urllib2.urlopen(request).read()
+                       except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                               self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
+                               return
+
+                       # Extract video identifiers
+                       for mobj in re.finditer(self._VIDEO_INDICATOR, page):
+                               video_id = page[mobj.span()[0]:mobj.span()[1]].split('=')[2][:-1]
+                               if video_id not in already_seen:
+                                       video_ids.append(video_id)
+                                       already_seen.add(video_id)
+                                       if len(video_ids) == n:
+                                               # Specified n videos reached
+                                               for id in video_ids:
+                                                       self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
+                                               return
+
+                       if re.search(self._MORE_PAGES_INDICATOR, page) is None:
+                               for id in video_ids:
+                                       self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
+                               return
+
+                       pagenum = pagenum + 1
+
+class GoogleSearchIE(InfoExtractor):
+       """Information Extractor for Google Video search queries."""
+       _VALID_QUERY = r'gvsearch(\d+|all)?:[\s\S]+'
+       _TEMPLATE_URL = 'http://video.google.com/videosearch?q=%s+site:video.google.com&start=%s&hl=en'
+       _VIDEO_INDICATOR = r'videoplay\?docid=([^\&>]+)\&'
+       _MORE_PAGES_INDICATOR = r'<span>Next</span>'
+       _google_ie = None
+       _max_google_results = 1000
+
+       def __init__(self, google_ie, downloader=None):
+               InfoExtractor.__init__(self, downloader)
+               self._google_ie = google_ie
+       
+       @staticmethod
+       def suitable(url):
+               return (re.match(GoogleSearchIE._VALID_QUERY, url) is not None)
+
+       def report_download_page(self, query, pagenum):
+               """Report attempt to download playlist page with given number."""
+               query = query.decode(preferredencoding())
+               self._downloader.to_stdout(u'[video.google] query "%s": Downloading page %s' % (query, pagenum))
+
+       def _real_initialize(self):
+               self._google_ie.initialize()
+       
+       def _real_extract(self, query):
+               mobj = re.match(self._VALID_QUERY, query)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
+                       return
+
+               prefix, query = query.split(':')
+               prefix = prefix[8:]
+               query  = query.encode('utf-8')
+               if prefix == '':
+                       self._download_n_results(query, 1)
+                       return
+               elif prefix == 'all':
+                       self._download_n_results(query, self._max_google_results)
+                       return
+               else:
+                       try:
+                               n = long(prefix)
+                               if n <= 0:
+                                       self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
+                                       return
+                               elif n > self._max_google_results:
+                                       self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)'  % (self._max_google_results, n))
+                                       n = self._max_google_results
+                               self._download_n_results(query, n)
+                               return
+                       except ValueError: # parsing prefix as integer fails
+                               self._download_n_results(query, 1)
+                               return
+
+       def _download_n_results(self, query, n):
+               """Downloads a specified number of results for a query"""
+
+               video_ids = []
+               already_seen = set()
+               pagenum = 1
+
+               while True:
+                       self.report_download_page(query, pagenum)
+                       result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum)
+                       request = urllib2.Request(result_url, None, std_headers)
+                       try:
+                               page = urllib2.urlopen(request).read()
+                       except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                               self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
+                               return
+
+                       # Extract video identifiers
+                       for mobj in re.finditer(self._VIDEO_INDICATOR, page):
+                               video_id = mobj.group(1)
+                               if video_id not in already_seen:
+                                       video_ids.append(video_id)
+                                       already_seen.add(video_id)
+                                       if len(video_ids) == n:
+                                               # Specified n videos reached
+                                               for id in video_ids:
+                                                       self._google_ie.extract('http://video.google.com/videoplay?docid=%s' % id)
+                                               return
+
+                       if re.search(self._MORE_PAGES_INDICATOR, page) is None:
+                               for id in video_ids:
+                                       self._google_ie.extract('http://video.google.com/videoplay?docid=%s' % id)
+                               return
+
+                       pagenum = pagenum + 1
+
+class YahooSearchIE(InfoExtractor):
+       """Information Extractor for Yahoo! Video search queries."""
+       _VALID_QUERY = r'yvsearch(\d+|all)?:[\s\S]+'
+       _TEMPLATE_URL = 'http://video.yahoo.com/search/?p=%s&o=%s'
+       _VIDEO_INDICATOR = r'href="http://video\.yahoo\.com/watch/([0-9]+/[0-9]+)"'
+       _MORE_PAGES_INDICATOR = r'\s*Next'
+       _yahoo_ie = None
+       _max_yahoo_results = 1000
+
+       def __init__(self, yahoo_ie, downloader=None):
+               InfoExtractor.__init__(self, downloader)
+               self._yahoo_ie = yahoo_ie
+       
+       @staticmethod
+       def suitable(url):
+               return (re.match(YahooSearchIE._VALID_QUERY, url) is not None)
+
+       def report_download_page(self, query, pagenum):
+               """Report attempt to download playlist page with given number."""
+               query = query.decode(preferredencoding())
+               self._downloader.to_stdout(u'[video.yahoo] query "%s": Downloading page %s' % (query, pagenum))
+
+       def _real_initialize(self):
+               self._yahoo_ie.initialize()
+       
+       def _real_extract(self, query):
+               mobj = re.match(self._VALID_QUERY, query)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
+                       return
+
+               prefix, query = query.split(':')
+               prefix = prefix[8:]
+               query  = query.encode('utf-8')
+               if prefix == '':
+                       self._download_n_results(query, 1)
+                       return
+               elif prefix == 'all':
+                       self._download_n_results(query, self._max_yahoo_results)
+                       return
+               else:
+                       try:
+                               n = long(prefix)
+                               if n <= 0:
+                                       self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
+                                       return
+                               elif n > self._max_yahoo_results:
+                                       self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)'  % (self._max_yahoo_results, n))
+                                       n = self._max_yahoo_results
+                               self._download_n_results(query, n)
+                               return
+                       except ValueError: # parsing prefix as integer fails
+                               self._download_n_results(query, 1)
+                               return
+
+       def _download_n_results(self, query, n):
+               """Downloads a specified number of results for a query"""
+
+               video_ids = []
+               already_seen = set()
+               pagenum = 1
+
+               while True:
+                       self.report_download_page(query, pagenum)
+                       result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum)
+                       request = urllib2.Request(result_url, None, std_headers)
+                       try:
+                               page = urllib2.urlopen(request).read()
+                       except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                               self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
+                               return
+
+                       # Extract video identifiers
+                       for mobj in re.finditer(self._VIDEO_INDICATOR, page):
+                               video_id = mobj.group(1)
+                               if video_id not in already_seen:
+                                       video_ids.append(video_id)
+                                       already_seen.add(video_id)
+                                       if len(video_ids) == n:
+                                               # Specified n videos reached
+                                               for id in video_ids:
+                                                       self._yahoo_ie.extract('http://video.yahoo.com/watch/%s' % id)
+                                               return
+
+                       if re.search(self._MORE_PAGES_INDICATOR, page) is None:
+                               for id in video_ids:
+                                       self._yahoo_ie.extract('http://video.yahoo.com/watch/%s' % id)
+                               return
+
+                       pagenum = pagenum + 1
+
+class YoutubePlaylistIE(InfoExtractor):
+       """Information Extractor for YouTube playlists."""
+
+       _VALID_URL = r'(?:http://)?(?:\w+\.)?youtube.com/(?:(?:view_play_list|my_playlists)\?.*?p=|user/.*?/user/)([^&]+).*'
+       _TEMPLATE_URL = 'http://www.youtube.com/view_play_list?p=%s&page=%s&gl=US&hl=en'
+       _VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
+       _MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
+       _youtube_ie = None
+
+       def __init__(self, youtube_ie, downloader=None):
+               InfoExtractor.__init__(self, downloader)
+               self._youtube_ie = youtube_ie
+       
+       @staticmethod
+       def suitable(url):
+               return (re.match(YoutubePlaylistIE._VALID_URL, url) is not None)
+
+       def report_download_page(self, playlist_id, pagenum):
+               """Report attempt to download playlist page with given number."""
+               self._downloader.to_stdout(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum))
+
+       def _real_initialize(self):
+               self._youtube_ie.initialize()
+       
+       def _real_extract(self, url):
+               # Extract playlist id
+               mobj = re.match(self._VALID_URL, url)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+                       return
+
+               # Download playlist pages
+               playlist_id = mobj.group(1)
+               video_ids = []
                pagenum = 1
 
                while True:
                pagenum = 1
 
                while True:
@@ -1028,10 +1951,75 @@ class YoutubePlaylistIE(InfoExtractor):
                                        ids_in_page.append(mobj.group(1))
                        video_ids.extend(ids_in_page)
 
                                        ids_in_page.append(mobj.group(1))
                        video_ids.extend(ids_in_page)
 
-                       if (self._MORE_PAGES_INDICATOR % (playlist_id.upper(), pagenum + 1)) not in page:
+                       if re.search(self._MORE_PAGES_INDICATOR, page) is None:
                                break
                        pagenum = pagenum + 1
 
                                break
                        pagenum = pagenum + 1
 
+               playliststart = self._downloader.params.get('playliststart', 1)
+               playliststart -= 1 #our arrays are zero-based but the playlist is 1-based
+               if playliststart > 0:
+                       video_ids = video_ids[playliststart:]
+                       
+               for id in video_ids:
+                       self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
+               return
+
+class YoutubeUserIE(InfoExtractor):
+       """Information Extractor for YouTube users."""
+
+       _VALID_URL = r'(?:http://)?(?:\w+\.)?youtube.com/user/(.*)'
+       _TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s'
+       _VIDEO_INDICATOR = r'http://gdata.youtube.com/feeds/api/videos/(.*)' # XXX Fix this.
+       _youtube_ie = None
+
+       def __init__(self, youtube_ie, downloader=None):
+               InfoExtractor.__init__(self, downloader)
+               self._youtube_ie = youtube_ie
+       
+       @staticmethod
+       def suitable(url):
+               return (re.match(YoutubeUserIE._VALID_URL, url) is not None)
+
+       def report_download_page(self, username):
+               """Report attempt to download user page."""
+               self._downloader.to_stdout(u'[youtube] user %s: Downloading page ' % (username))
+
+       def _real_initialize(self):
+               self._youtube_ie.initialize()
+       
+       def _real_extract(self, url):
+               # Extract username
+               mobj = re.match(self._VALID_URL, url)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+                       return
+
+               # Download user page
+               username = mobj.group(1)
+               video_ids = []
+               pagenum = 1
+
+               self.report_download_page(username)
+               request = urllib2.Request(self._TEMPLATE_URL % (username), None, std_headers)
+               try:
+                       page = urllib2.urlopen(request).read()
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
+                       return
+
+               # Extract video identifiers
+               ids_in_page = []
+
+               for mobj in re.finditer(self._VIDEO_INDICATOR, page):
+                       if mobj.group(1) not in ids_in_page:
+                               ids_in_page.append(mobj.group(1))
+               video_ids.extend(ids_in_page)
+
+               playliststart = self._downloader.params.get('playliststart', 1)
+               playliststart = playliststart-1 #our arrays are zero-based but the playlist is 1-based
+               if playliststart > 0:
+                       video_ids = video_ids[playliststart:]   
+
                for id in video_ids:
                        self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
                return
                for id in video_ids:
                        self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
                return
@@ -1089,6 +2077,22 @@ if __name__ == '__main__':
                import getpass
                import optparse
 
                import getpass
                import optparse
 
+               # Function to update the program file with the latest version from bitbucket.org
+               def update_self(downloader, filename):
+                       # Note: downloader only used for options
+                       if not os.access (filename, os.W_OK):
+                               sys.exit('ERROR: no write permissions on %s' % filename)
+
+                       downloader.to_stdout('Updating to latest stable version...')
+                       latest_url = 'http://bitbucket.org/rg3/youtube-dl/raw/tip/LATEST_VERSION'
+                       latest_version = urllib.urlopen(latest_url).read().strip()
+                       prog_url = 'http://bitbucket.org/rg3/youtube-dl/raw/%s/youtube-dl' % latest_version
+                       newcontent = urllib.urlopen(prog_url).read()
+                       stream = open(filename, 'w')
+                       stream.write(newcontent)
+                       stream.close()
+                       downloader.to_stdout('Updated to version %s' % latest_version)
+
                # General configuration
                urllib2.install_opener(urllib2.build_opener(urllib2.ProxyHandler()))
                urllib2.install_opener(urllib2.build_opener(urllib2.HTTPCookieProcessor()))
                # General configuration
                urllib2.install_opener(urllib2.build_opener(urllib2.ProxyHandler()))
                urllib2.install_opener(urllib2.build_opener(urllib2.HTTPCookieProcessor()))
@@ -1097,7 +2101,7 @@ if __name__ == '__main__':
                # Parse command line
                parser = optparse.OptionParser(
                        usage='Usage: %prog [options] url...',
                # Parse command line
                parser = optparse.OptionParser(
                        usage='Usage: %prog [options] url...',
-                       version='2009.09.13',
+                       version='2010.10.03',
                        conflict_handler='resolve',
                )
 
                        conflict_handler='resolve',
                )
 
@@ -1105,29 +2109,37 @@ if __name__ == '__main__':
                                action='help', help='print this help text and exit')
                parser.add_option('-v', '--version',
                                action='version', help='print program version and exit')
                                action='help', help='print this help text and exit')
                parser.add_option('-v', '--version',
                                action='version', help='print program version and exit')
+               parser.add_option('-U', '--update',
+                               action='store_true', dest='update_self', help='update this program to latest stable version')
                parser.add_option('-i', '--ignore-errors',
                                action='store_true', dest='ignoreerrors', help='continue on download errors', default=False)
                parser.add_option('-r', '--rate-limit',
                parser.add_option('-i', '--ignore-errors',
                                action='store_true', dest='ignoreerrors', help='continue on download errors', default=False)
                parser.add_option('-r', '--rate-limit',
-                               dest='ratelimit', metavar='L', help='download rate limit (e.g. 50k or 44.6m)')
+                               dest='ratelimit', metavar='LIMIT', help='download rate limit (e.g. 50k or 44.6m)')
+               parser.add_option('-R', '--retries',
+                               dest='retries', metavar='RETRIES', help='number of retries (default is 10)', default=10)
+               parser.add_option('--playlist-start',
+                               dest='playliststart', metavar='NUMBER', help='playlist video to start at (default is 1)', default=1)
 
                authentication = optparse.OptionGroup(parser, 'Authentication Options')
                authentication.add_option('-u', '--username',
 
                authentication = optparse.OptionGroup(parser, 'Authentication Options')
                authentication.add_option('-u', '--username',
-                               dest='username', metavar='UN', help='account username')
+                               dest='username', metavar='USERNAME', help='account username')
                authentication.add_option('-p', '--password',
                authentication.add_option('-p', '--password',
-                               dest='password', metavar='PW', help='account password')
+                               dest='password', metavar='PASSWORD', help='account password')
                authentication.add_option('-n', '--netrc',
                                action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
                parser.add_option_group(authentication)
 
                video_format = optparse.OptionGroup(parser, 'Video Format Options')
                video_format.add_option('-f', '--format',
                authentication.add_option('-n', '--netrc',
                                action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
                parser.add_option_group(authentication)
 
                video_format = optparse.OptionGroup(parser, 'Video Format Options')
                video_format.add_option('-f', '--format',
-                               action='store', dest='format', metavar='FMT', help='video format code')
-               video_format.add_option('-b', '--best-quality',
-                               action='store_const', dest='format', help='download the best quality video possible', const='0')
+                               action='store', dest='format', metavar='FORMAT', help='video format code')
                video_format.add_option('-m', '--mobile-version',
                                action='store_const', dest='format', help='alias for -f 17', const='17')
                video_format.add_option('-m', '--mobile-version',
                                action='store_const', dest='format', help='alias for -f 17', const='17')
-               video_format.add_option('-d', '--high-def',
-                               action='store_const', dest='format', help='alias for -f 22', const='22')
+               video_format.add_option('--all-formats',
+                               action='store_const', dest='format', help='download all available video formats', const='-1')
+               video_format.add_option('--max-quality',
+                               action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
+               video_format.add_option('-b', '--best-quality',
+                               action='store_true', dest='bestquality', help='download the best video quality (DEPRECATED)')
                parser.add_option_group(video_format)
 
                verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
                parser.add_option_group(video_format)
 
                verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
@@ -1139,6 +2151,12 @@ if __name__ == '__main__':
                                action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
                verbosity.add_option('-e', '--get-title',
                                action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
                                action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
                verbosity.add_option('-e', '--get-title',
                                action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
+               verbosity.add_option('--get-thumbnail',
+                               action='store_true', dest='getthumbnail', help='simulate, quiet but print thumbnail URL', default=False)
+               verbosity.add_option('--get-description',
+                               action='store_true', dest='getdescription', help='simulate, quiet but print video description', default=False)
+               verbosity.add_option('--no-progress',
+                               action='store_true', dest='noprogress', help='do not print progress bar', default=False)
                parser.add_option_group(verbosity)
 
                filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
                parser.add_option_group(verbosity)
 
                filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
@@ -1147,9 +2165,9 @@ if __name__ == '__main__':
                filesystem.add_option('-l', '--literal',
                                action='store_true', dest='useliteral', help='use literal title in file name', default=False)
                filesystem.add_option('-o', '--output',
                filesystem.add_option('-l', '--literal',
                                action='store_true', dest='useliteral', help='use literal title in file name', default=False)
                filesystem.add_option('-o', '--output',
-                               dest='outtmpl', metavar='TPL', help='output filename template')
+                               dest='outtmpl', metavar='TEMPLATE', help='output filename template')
                filesystem.add_option('-a', '--batch-file',
                filesystem.add_option('-a', '--batch-file',
-                               dest='batchfile', metavar='F', help='file containing URLs to download')
+                               dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
                filesystem.add_option('-w', '--no-overwrites',
                                action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
                filesystem.add_option('-c', '--continue',
                filesystem.add_option('-w', '--no-overwrites',
                                action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
                filesystem.add_option('-c', '--continue',
@@ -1162,7 +2180,11 @@ if __name__ == '__main__':
                batchurls = []
                if opts.batchfile is not None:
                        try:
                batchurls = []
                if opts.batchfile is not None:
                        try:
-                               batchurls = open(opts.batchfile, 'r').readlines()
+                               if opts.batchfile == '-':
+                                       batchfd = sys.stdin
+                               else:
+                                       batchfd = open(opts.batchfile, 'r')
+                               batchurls = batchfd.readlines()
                                batchurls = [x.strip() for x in batchurls]
                                batchurls = [x for x in batchurls if len(x) > 0]
                        except IOError:
                                batchurls = [x.strip() for x in batchurls]
                                batchurls = [x for x in batchurls if len(x) > 0]
                        except IOError:
@@ -1170,8 +2192,8 @@ if __name__ == '__main__':
                all_urls = batchurls + args
 
                # Conflicting, missing and erroneous options
                all_urls = batchurls + args
 
                # Conflicting, missing and erroneous options
-               if len(all_urls) < 1:
-                       parser.error(u'you must provide at least one URL')
+               if opts.bestquality:
+                       print >>sys.stderr, u'\nWARNING: -b/--best-quality IS DEPRECATED AS IT IS THE DEFAULT BEHAVIOR NOW\n'
                if opts.usenetrc and (opts.username is not None or opts.password is not None):
                        parser.error(u'using .netrc conflicts with giving username/password')
                if opts.password is not None and opts.username is None:
                if opts.usenetrc and (opts.username is not None or opts.password is not None):
                        parser.error(u'using .netrc conflicts with giving username/password')
                if opts.password is not None and opts.username is None:
@@ -1187,36 +2209,85 @@ if __name__ == '__main__':
                        if numeric_limit is None:
                                parser.error(u'invalid rate limit specified')
                        opts.ratelimit = numeric_limit
                        if numeric_limit is None:
                                parser.error(u'invalid rate limit specified')
                        opts.ratelimit = numeric_limit
+               if opts.retries is not None:
+                       try:
+                               opts.retries = long(opts.retries)
+                       except (TypeError, ValueError), err:
+                               parser.error(u'invalid retry count specified')
+               if opts.playliststart is not None:
+                       try:
+                               opts.playliststart = long(opts.playliststart)
+                       except (TypeError, ValueError), err:
+                               parser.error(u'invalid playlist page specified')
 
                # Information extractors
                youtube_ie = YoutubeIE()
                metacafe_ie = MetacafeIE(youtube_ie)
 
                # Information extractors
                youtube_ie = YoutubeIE()
                metacafe_ie = MetacafeIE(youtube_ie)
+               dailymotion_ie = DailymotionIE()
                youtube_pl_ie = YoutubePlaylistIE(youtube_ie)
                youtube_pl_ie = YoutubePlaylistIE(youtube_ie)
+               youtube_user_ie = YoutubeUserIE(youtube_ie)
                youtube_search_ie = YoutubeSearchIE(youtube_ie)
                youtube_search_ie = YoutubeSearchIE(youtube_ie)
+               google_ie = GoogleIE()
+               google_search_ie = GoogleSearchIE(google_ie)
+               photobucket_ie = PhotobucketIE()
+               yahoo_ie = YahooIE()
+               yahoo_search_ie = YahooSearchIE(yahoo_ie)
+               generic_ie = GenericIE()
 
                # File downloader
                fd = FileDownloader({
                        'usenetrc': opts.usenetrc,
                        'username': opts.username,
                        'password': opts.password,
 
                # File downloader
                fd = FileDownloader({
                        'usenetrc': opts.usenetrc,
                        'username': opts.username,
                        'password': opts.password,
-                       'quiet': (opts.quiet or opts.geturl or opts.gettitle),
+                       'quiet': (opts.quiet or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription),
                        'forceurl': opts.geturl,
                        'forcetitle': opts.gettitle,
                        'forceurl': opts.geturl,
                        'forcetitle': opts.gettitle,
-                       'simulate': (opts.simulate or opts.geturl or opts.gettitle),
+                       'forcethumbnail': opts.getthumbnail,
+                       'forcedescription': opts.getdescription,
+                       'simulate': (opts.simulate or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription),
                        'format': opts.format,
                        'format': opts.format,
+                       'format_limit': opts.format_limit,
                        'outtmpl': ((opts.outtmpl is not None and opts.outtmpl.decode(preferredencoding()))
                        'outtmpl': ((opts.outtmpl is not None and opts.outtmpl.decode(preferredencoding()))
+                               or (opts.format == '-1' and opts.usetitle and u'%(stitle)s-%(id)s-%(format)s.%(ext)s')
+                               or (opts.format == '-1' and opts.useliteral and u'%(title)s-%(id)s-%(format)s.%(ext)s')
+                               or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s')
                                or (opts.usetitle and u'%(stitle)s-%(id)s.%(ext)s')
                                or (opts.useliteral and u'%(title)s-%(id)s.%(ext)s')
                                or u'%(id)s.%(ext)s'),
                        'ignoreerrors': opts.ignoreerrors,
                        'ratelimit': opts.ratelimit,
                        'nooverwrites': opts.nooverwrites,
                                or (opts.usetitle and u'%(stitle)s-%(id)s.%(ext)s')
                                or (opts.useliteral and u'%(title)s-%(id)s.%(ext)s')
                                or u'%(id)s.%(ext)s'),
                        'ignoreerrors': opts.ignoreerrors,
                        'ratelimit': opts.ratelimit,
                        'nooverwrites': opts.nooverwrites,
+                       'retries': opts.retries,
                        'continuedl': opts.continue_dl,
                        'continuedl': opts.continue_dl,
+                       'noprogress': opts.noprogress,
+                       'playliststart': opts.playliststart,
                        })
                fd.add_info_extractor(youtube_search_ie)
                fd.add_info_extractor(youtube_pl_ie)
                        })
                fd.add_info_extractor(youtube_search_ie)
                fd.add_info_extractor(youtube_pl_ie)
+               fd.add_info_extractor(youtube_user_ie)
                fd.add_info_extractor(metacafe_ie)
                fd.add_info_extractor(metacafe_ie)
+               fd.add_info_extractor(dailymotion_ie)
                fd.add_info_extractor(youtube_ie)
                fd.add_info_extractor(youtube_ie)
+               fd.add_info_extractor(google_ie)
+               fd.add_info_extractor(google_search_ie)
+               fd.add_info_extractor(photobucket_ie)
+               fd.add_info_extractor(yahoo_ie)
+               fd.add_info_extractor(yahoo_search_ie)
+
+               # This must come last since it's the
+               # fallback if none of the others work
+               fd.add_info_extractor(generic_ie)
+
+               # Update version
+               if opts.update_self:
+                       update_self(fd, sys.argv[0])
+
+               # Maybe do nothing
+               if len(all_urls) < 1:
+                       if not opts.update_self:
+                               parser.error(u'you must provide at least one URL')
+                       else:
+                               sys.exit()
                retcode = fd.download(all_urls)
                sys.exit(retcode)
 
                retcode = fd.download(all_urls)
                sys.exit(retcode)