+               file_url = mobj.group(1)
+               file_extension = os.path.splitext(file_url)[1][1:]
+
+               # Search for file title
+               mobj = re.search(r'<b title="(.*?)">', webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract title')
+                       return
+               file_title = mobj.group(1).decode('utf-8')
+
+               try:
+                       # Process file information
+                       self._downloader.process_info({
+                               'id':           file_id.decode('utf-8'),
+                               'url':          file_url.decode('utf-8'),
+                               'uploader':     u'NA',
+                               'upload_date':  u'NA',
+                               'title':        file_title,
+                               'stitle':       file_title,
+                               'ext':          file_extension.decode('utf-8'),
+                               'format':       u'NA',
+                               'player_url':   None,
+                       })
+               except UnavailableVideoError, err:
+                       self._downloader.trouble(u'ERROR: unable to download file')
+
+
+class FacebookIE(InfoExtractor):
+       """Information Extractor for Facebook"""
+
+       _VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook.com/video/video.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)'
+       _LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&'
+       _NETRC_MACHINE = 'facebook'
+       _available_formats = ['highqual', 'lowqual']
+       _video_extensions = {
+               'highqual': 'mp4',
+               'lowqual': 'mp4',
+       }
+
+       def __init__(self, downloader=None):
+               InfoExtractor.__init__(self, downloader)
+
+       @staticmethod
+       def suitable(url):
+               return (re.match(FacebookIE._VALID_URL, url) is not None)
+
+       def _reporter(self, message):
+               """Add header and report message."""
+               self._downloader.to_screen(u'[facebook] %s' % message)
+
+       def report_login(self):
+               """Report attempt to log in."""
+               self._reporter(u'Logging in')
+
+       def report_video_webpage_download(self, video_id):
+               """Report attempt to download video webpage."""
+               self._reporter(u'%s: Downloading video webpage' % video_id)
+
+       def report_information_extraction(self, video_id):
+               """Report attempt to extract video information."""
+               self._reporter(u'%s: Extracting video information' % video_id)
+
+       def _parse_page(self, video_webpage):
+               """Extract video information from page"""
+               # General data
+               data = {'title': r'class="video_title datawrap">(.*?)</',
+                       'description': r'<div class="datawrap">(.*?)</div>',
+                       'owner': r'\("video_owner_name", "(.*?)"\)',
+                       'upload_date': r'data-date="(.*?)"',
+                       'thumbnail':  r'\("thumb_url", "(?P<THUMB>.*?)"\)',
+                       }
+               video_info = {}
+               for piece in data.keys():
+                       mobj = re.search(data[piece], video_webpage)
+                       if mobj is not None:
+                               video_info[piece] = urllib.unquote_plus(mobj.group(1).decode("unicode_escape"))
+
+               # Video urls
+               video_urls = {}
+               for fmt in self._available_formats:
+                       mobj = re.search(r'\("%s_src\", "(.+?)"\)' % fmt, video_webpage)
+                       if mobj is not None:
+                               # URL is in a Javascript segment inside an escaped Unicode format within
+                               # the generally utf-8 page
+                               video_urls[fmt] = urllib.unquote_plus(mobj.group(1).decode("unicode_escape"))
+               video_info['video_urls'] = video_urls
+
+               return video_info
+
+       def _real_initialize(self):
+               if self._downloader is None:
+                       return
+
+               useremail = None
+               password = None
+               downloader_params = self._downloader.params
+
+               # Attempt to use provided username and password or .netrc data
+               if downloader_params.get('username', None) is not None:
+                       useremail = downloader_params['username']
+                       password = downloader_params['password']
+               elif downloader_params.get('usenetrc', False):
+                       try:
+                               info = netrc.netrc().authenticators(self._NETRC_MACHINE)
+                               if info is not None:
+                                       useremail = info[0]
+                                       password = info[2]
+                               else:
+                                       raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
+                       except (IOError, netrc.NetrcParseError), err:
+                               self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % str(err))
+                               return
+
+               if useremail is None:
+                       return
+
+               # Log in
+               login_form = {
+                       'email': useremail,
+                       'pass': password,
+                       'login': 'Log+In'
+                       }
+               request = urllib2.Request(self._LOGIN_URL, urllib.urlencode(login_form))
+               try:
+                       self.report_login()
+                       login_results = urllib2.urlopen(request).read()
+                       if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
+                               self._downloader.to_stderr(u'WARNING: unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
+                               return
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.to_stderr(u'WARNING: unable to log in: %s' % str(err))
+                       return
+
+       def _real_extract(self, url):
+               mobj = re.match(self._VALID_URL, url)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+                       return
+               video_id = mobj.group('ID')
+
+               # Get video webpage
+               self.report_video_webpage_download(video_id)
+               request = urllib2.Request('https://www.facebook.com/video/video.php?v=%s' % video_id)
+               try:
+                       page = urllib2.urlopen(request)
+                       video_webpage = page.read()
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
+                       return
+
+               # Start extracting information
+               self.report_information_extraction(video_id)
+
+               # Extract information
+               video_info = self._parse_page(video_webpage)
+
+               # uploader
+               if 'owner' not in video_info:
+                       self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
+                       return
+               video_uploader = video_info['owner']
+
+               # title
+               if 'title' not in video_info:
+                       self._downloader.trouble(u'ERROR: unable to extract video title')
+                       return
+               video_title = video_info['title']
+               video_title = video_title.decode('utf-8')
+               video_title = sanitize_title(video_title)
+
+               # simplified title
+               simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
+               simple_title = simple_title.strip(ur'_')
+
+               # thumbnail image
+               if 'thumbnail' not in video_info:
+                       self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
+                       video_thumbnail = ''
+               else:
+                       video_thumbnail = video_info['thumbnail']
+
+               # upload date
+               upload_date = u'NA'
+               if 'upload_date' in video_info:
+                       upload_time = video_info['upload_date']
+                       timetuple = email.utils.parsedate_tz(upload_time)
+                       if timetuple is not None:
+                               try:
+                                       upload_date = time.strftime('%Y%m%d', timetuple[0:9])
+                               except:
+                                       pass
+
+               # description
+               video_description = video_info.get('description', 'No description available.')
+
+               url_map = video_info['video_urls']
+               if len(url_map.keys()) > 0:
+                       # Decide which formats to download
+                       req_format = self._downloader.params.get('format', None)
+                       format_limit = self._downloader.params.get('format_limit', None)
+
+                       if format_limit is not None and format_limit in self._available_formats:
+                               format_list = self._available_formats[self._available_formats.index(format_limit):]
+                       else:
+                               format_list = self._available_formats
+                       existing_formats = [x for x in format_list if x in url_map]
+                       if len(existing_formats) == 0:
+                               self._downloader.trouble(u'ERROR: no known formats available for video')
+                               return
+                       if req_format is None:
+                               video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
+                       elif req_format == '-1':
+                               video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
+                       else:
+                               # Specific format
+                               if req_format not in url_map:
+                                       self._downloader.trouble(u'ERROR: requested format not available')
+                                       return
+                               video_url_list = [(req_format, url_map[req_format])] # Specific format
+
+               for format_param, video_real_url in video_url_list:
+
+                       # At this point we have a new video
+                       self._downloader.increment_downloads()
+
+                       # Extension
+                       video_extension = self._video_extensions.get(format_param, 'mp4')
+
+                       try:
+                               # Process video information
+                               self._downloader.process_info({
+                                       'id':           video_id.decode('utf-8'),
+                                       'url':          video_real_url.decode('utf-8'),
+                                       'uploader':     video_uploader.decode('utf-8'),
+                                       'upload_date':  upload_date,
+                                       'title':        video_title,
+                                       'stitle':       simple_title,
+                                       'ext':          video_extension.decode('utf-8'),
+                                       'format':       (format_param is None and u'NA' or format_param.decode('utf-8')),
+                                       'thumbnail':    video_thumbnail.decode('utf-8'),
+                                       'description':  video_description.decode('utf-8'),
+                                       'player_url':   None,
+                               })
+                       except UnavailableVideoError, err:
+                               self._downloader.trouble(u'\nERROR: unable to download video')
+
+class BlipTVIE(InfoExtractor):
+       """Information extractor for blip.tv"""
+
+       _VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv(/.+)$'
+       _URL_EXT = r'^.*\.([a-z0-9]+)$'
+
+       @staticmethod
+       def suitable(url):
+               return (re.match(BlipTVIE._VALID_URL, url) is not None)
+
+       def report_extraction(self, file_id):
+               """Report information extraction."""
+               self._downloader.to_screen(u'[blip.tv] %s: Extracting information' % file_id)
+
+       def _simplify_title(self, title):
+               res = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', title)
+               res = res.strip(ur'_')
+               return res
+
+       def _real_extract(self, url):
+               mobj = re.match(self._VALID_URL, url)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+                       return
+
+               if '?' in url:
+                       cchar = '&'
+               else:
+                       cchar = '?'
+               json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
+               request = urllib2.Request(json_url)
+               self.report_extraction(mobj.group(1))
+               try:
+                       json_code = urllib2.urlopen(request).read()
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err))
+                       return
+               try:
+                       json_data = json.loads(json_code)
+                       if 'Post' in json_data:
+                               data = json_data['Post']
+                       else:
+                               data = json_data
+
+                       upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
+                       video_url = data['media']['url']
+                       umobj = re.match(self._URL_EXT, video_url)
+                       if umobj is None:
+                               raise ValueError('Can not determine filename extension')
+                       ext = umobj.group(1)
+
+                       self._downloader.increment_downloads()
+
+                       info = {
+                               'id': data['item_id'],
+                               'url': video_url,
+                               'uploader': data['display_name'],
+                               'upload_date': upload_date,
+                               'title': data['title'],
+                               'stitle': self._simplify_title(data['title']),
+                               'ext': ext,
+                               'format': data['media']['mimeType'],
+                               'thumbnail': data['thumbnailUrl'],
+                               'description': data['description'],
+                               'player_url': data['embedUrl']
+                       }
+               except (ValueError,KeyError), err:
+                       self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err))
+                       return
+
+               try:
+                       self._downloader.process_info(info)
+               except UnavailableVideoError, err:
+                       self._downloader.trouble(u'\nERROR: unable to download video')
+
+
+class MyVideoIE(InfoExtractor):
+       """Information Extractor for myvideo.de."""
+
+       _VALID_URL = r'(?:http://)?(?:www\.)?myvideo\.de/watch/([0-9]+)/([^?/]+).*'
+
+       def __init__(self, downloader=None):
+               InfoExtractor.__init__(self, downloader)
+       
+       @staticmethod
+       def suitable(url):
+               return (re.match(MyVideoIE._VALID_URL, url) is not None)
+
+       def report_download_webpage(self, video_id):
+               """Report webpage download."""
+               self._downloader.to_screen(u'[myvideo] %s: Downloading webpage' % video_id)
+
+       def report_extraction(self, video_id):
+               """Report information extraction."""
+               self._downloader.to_screen(u'[myvideo] %s: Extracting information' % video_id)
+
+       def _real_initialize(self):
+               return
+
+       def _real_extract(self,url):
+               mobj = re.match(self._VALID_URL, url)
+               if mobj is None:
+                       self._download.trouble(u'ERROR: invalid URL: %s' % url)
+                       return
+
+               video_id = mobj.group(1)
+               simple_title = mobj.group(2).decode('utf-8')
+               # should actually not be necessary
+               simple_title = sanitize_title(simple_title)
+               simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', simple_title)
+
+               # Get video webpage
+               request = urllib2.Request('http://www.myvideo.de/watch/%s' % video_id)
+               try:
+                       self.report_download_webpage(video_id)
+                       webpage = urllib2.urlopen(request).read()
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
+                       return
+
+               self.report_extraction(video_id)
+               mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/[^.]+\.jpg\' />',
+                                webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract media URL')
+                       return
+               video_url = mobj.group(1) + ('/%s.flv' % video_id)
+
+               mobj = re.search('<title>([^<]+)</title>', webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract title')
+                       return
+
+               video_title = mobj.group(1)
+               video_title = sanitize_title(video_title)
+
+               try:
+                       print(video_url)
+                       self._downloader.process_info({
+                               'id':           video_id,
+                               'url':          video_url,
+                               'uploader':     u'NA',
+                               'upload_date':  u'NA',
+                               'title':        video_title,
+                               'stitle':       simple_title,
+                               'ext':          u'flv',
+                               'format':       u'NA',
+                               'player_url':   None,
+                       })
+               except UnavailableVideoError:
+                       self._downloader.trouble(u'\nERROR: Unable to download video')
+
+class ComedyCentralIE(InfoExtractor):
+       """Information extractor for The Daily Show and Colbert Report """
+
+       _VALID_URL = r'^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport))|(https?://)?(www\.)(?P<showname>thedailyshow|colbertnation)\.com/full-episodes/(?P<episode>.*)$'
+
+       @staticmethod
+       def suitable(url):
+               return (re.match(ComedyCentralIE._VALID_URL, url) is not None)
+
+       def report_extraction(self, episode_id):
+               self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id)
+       
+       def report_config_download(self, episode_id):
+               self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration' % episode_id)
+
+       def report_player_url(self, episode_id):
+               self._downloader.to_screen(u'[comedycentral] %s: Determining player URL' % episode_id)
+
+       def _simplify_title(self, title):
+               res = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', title)
+               res = res.strip(ur'_')
+               return res
+
+       def _real_extract(self, url):
+               mobj = re.match(self._VALID_URL, url)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+                       return
+
+               if mobj.group('shortname'):
+                       if mobj.group('shortname') in ('tds', 'thedailyshow'):
+                               url = 'http://www.thedailyshow.com/full-episodes/'
+                       else:
+                               url = 'http://www.colbertnation.com/full-episodes/'
+                       mobj = re.match(self._VALID_URL, url)
+                       assert mobj is not None
+
+               dlNewest = not mobj.group('episode')
+               if dlNewest:
+                       epTitle = mobj.group('showname')
+               else:
+                       epTitle = mobj.group('episode')
+
+               req = urllib2.Request(url)
+               self.report_extraction(epTitle)
+               try:
+                       htmlHandle = urllib2.urlopen(req)
+                       html = htmlHandle.read()
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.trouble(u'ERROR: unable to download webpage: %s' % unicode(err))
+                       return
+               if dlNewest:
+                       url = htmlHandle.geturl()
+                       mobj = re.match(self._VALID_URL, url)
+                       if mobj is None:
+                               self._downloader.trouble(u'ERROR: Invalid redirected URL: ' + url)
+                               return
+                       if mobj.group('episode') == '':
+                               self._downloader.trouble(u'ERROR: Redirected URL is still not specific: ' + url)
+                               return
+                       epTitle = mobj.group('episode')
+
+               mMovieParams = re.findall('<param name="movie" value="(http://media.mtvnservices.com/(.*?:episode:([^:]*):)(.*?))"/>', html)
+               if len(mMovieParams) == 0:
+                       self._downloader.trouble(u'ERROR: unable to find Flash URL in webpage ' + url)
+                       return
+               show_id = mMovieParams[0][2]
+               ACT_COUNT = { # TODO: Detect this dynamically
+                       'thedailyshow.com': 4,
+                       'colbertnation.com': 3,
+               }.get(show_id, 4)
+               OFFSET = {
+                       'thedailyshow.com': 1,
+                       'colbertnation.com': 1,
+               }.get(show_id, 1)
+
+               first_player_url = mMovieParams[0][0]
+               startMediaNum = int(mMovieParams[0][3]) + OFFSET
+               movieId = mMovieParams[0][1]
+
+               playerReq = urllib2.Request(first_player_url)
+               self.report_player_url(epTitle)
+               try:
+                       playerResponse = urllib2.urlopen(playerReq)
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.trouble(u'ERROR: unable to download player: %s' % unicode(err))
+                       return
+               player_url = playerResponse.geturl()
+
+               for actNum in range(ACT_COUNT):
+                       mediaNum = startMediaNum + actNum
+                       mediaId = movieId + str(mediaNum)
+                       configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' +
+                                               urllib.urlencode({'uri': mediaId}))
+                       configReq = urllib2.Request(configUrl)
+                       self.report_config_download(epTitle)
+                       try:
+                               configXml = urllib2.urlopen(configReq).read()
+                       except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                               self._downloader.trouble(u'ERROR: unable to download webpage: %s' % unicode(err))
+                               return
+
+                       cdoc = xml.etree.ElementTree.fromstring(configXml)
+                       turls = []
+                       for rendition in cdoc.findall('.//rendition'):
+                               finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text)
+                               turls.append(finfo)
+
+                       if len(turls) == 0:
+                               self._downloader.trouble(u'\nERROR: unable to download ' + str(mediaNum) + ': No videos found')
+                               continue
+
+                       # For now, just pick the highest bitrate
+                       format,video_url = turls[-1]
+
+                       self._downloader.increment_downloads()
+
+                       effTitle = show_id.replace('.com', '') + '-' + epTitle
+                       info = {
+                               'id': str(mediaNum),
+                               'url': video_url,
+                               'uploader': show_id,
+                               'upload_date': 'NA',
+                               'title': effTitle,
+                               'stitle': self._simplify_title(effTitle),
+                               'ext': 'mp4',
+                               'format': format,
+                               'thumbnail': None,
+                               'description': 'TODO: Not yet supported',
+                               'player_url': player_url
+                       }
+
+                       try:
+                               self._downloader.process_info(info)
+                       except UnavailableVideoError, err:
+                               self._downloader.trouble(u'\nERROR: unable to download ' + str(mediaNum))
+                               continue
+
+
+class PostProcessor(object):
+       """Post Processor class.
+
+       PostProcessor objects can be added to downloaders with their
+       add_post_processor() method. When the downloader has finished a
+       successful download, it will take its internal chain of PostProcessors
+       and start calling the run() method on each one of them, first with
+       an initial argument and then with the returned value of the previous
+       PostProcessor.
+
+       The chain will be stopped if one of them ever returns None or the end
+       of the chain is reached.
+
+       PostProcessor objects follow a "mutual registration" process similar
+       to InfoExtractor objects.
+       """
+
+       _downloader = None
+
+       def __init__(self, downloader=None):
+               self._downloader = downloader
+
+       def set_downloader(self, downloader):
+               """Sets the downloader for this PP."""
+               self._downloader = downloader
+
+       def run(self, information):
+               """Run the PostProcessor.
+
+               The "information" argument is a dictionary like the ones
+               composed by InfoExtractors. The only difference is that this
+               one has an extra field called "filepath" that points to the
+               downloaded file.
+
+               When this method returns None, the postprocessing chain is
+               stopped. However, this method may return an information
+               dictionary that will be passed to the next postprocessing
+               object in the chain. It can be the one it received after
+               changing some fields.
+
+               In addition, this method may raise a PostProcessingError
+               exception that will be taken into account by the downloader
+               it was called from.
+               """
+               return information # by default, do nothing
+
+
+class FFmpegExtractAudioPP(PostProcessor):
+
+       def __init__(self, downloader=None, preferredcodec=None):
+               PostProcessor.__init__(self, downloader)
+               if preferredcodec is None:
+                       preferredcodec = 'best'
+               self._preferredcodec = preferredcodec
+
+       @staticmethod
+       def get_audio_codec(path):
+               try:
+                       cmd = ['ffprobe', '-show_streams', '--', path]
+                       handle = subprocess.Popen(cmd, stderr=file(os.path.devnull, 'w'), stdout=subprocess.PIPE)
+                       output = handle.communicate()[0]
+                       if handle.wait() != 0:
+                               return None
+               except (IOError, OSError):
+                       return None
+               audio_codec = None
+               for line in output.split('\n'):
+                       if line.startswith('codec_name='):
+                               audio_codec = line.split('=')[1].strip()
+                       elif line.strip() == 'codec_type=audio' and audio_codec is not None:
+                               return audio_codec
+               return None
+
+       @staticmethod
+       def run_ffmpeg(path, out_path, codec, more_opts):
+               try:
+                       cmd = ['ffmpeg', '-y', '-i', path, '-vn', '-acodec', codec] + more_opts + ['--', out_path]
+                       ret = subprocess.call(cmd, stdout=file(os.path.devnull, 'w'), stderr=subprocess.STDOUT)
+                       return (ret == 0)
+               except (IOError, OSError):
+                       return False
+
+       def run(self, information):
+               path = information['filepath']
+
+               filecodec = self.get_audio_codec(path)
+               if filecodec is None:
+                       self._downloader.to_stderr(u'WARNING: unable to obtain file audio codec with ffprobe')
+                       return None
+
+               more_opts = []
+               if self._preferredcodec == 'best' or self._preferredcodec == filecodec:
+                       if filecodec == 'aac' or filecodec == 'mp3':
+                               # Lossless if possible
+                               acodec = 'copy'
+                               extension = filecodec
+                               if filecodec == 'aac':
+                                       more_opts = ['-f', 'adts']
+                       else:
+                               # MP3 otherwise.
+                               acodec = 'libmp3lame'
+                               extension = 'mp3'
+                               more_opts = ['-ab', '128k']
+               else:
+                       # We convert the audio (lossy)
+                       acodec = {'mp3': 'libmp3lame', 'aac': 'aac'}[self._preferredcodec]
+                       extension = self._preferredcodec
+                       more_opts = ['-ab', '128k']
+                       if self._preferredcodec == 'aac':
+                               more_opts += ['-f', 'adts']
+
+               (prefix, ext) = os.path.splitext(path)
+               new_path = prefix + '.' + extension
+               self._downloader.to_screen(u'[ffmpeg] Destination: %s' % new_path)
+               status = self.run_ffmpeg(path, new_path, acodec, more_opts)
+
+               if not status:
+                       self._downloader.to_stderr(u'WARNING: error running ffmpeg')
+                       return None
+
+               try:
+                       os.remove(path)
+               except (IOError, OSError):
+                       self._downloader.to_stderr(u'WARNING: Unable to remove downloaded video file')
+                       return None
+
+               information['filepath'] = new_path
+               return information
+
+
+def updateSelf(downloader, filename):
+       ''' Update the program file with the latest version from the repository '''
+       # Note: downloader only used for options
+       if not os.access(filename, os.W_OK):
+               sys.exit('ERROR: no write permissions on %s' % filename)
+
+       downloader.to_screen('Updating to latest version...')
+
+       try:
+               try:
+                       urlh = urllib.urlopen(UPDATE_URL)
+                       newcontent = urlh.read()
+               finally:
+                       urlh.close()
+       except (IOError, OSError), err:
+               sys.exit('ERROR: unable to download latest version')
+
+       try:
+               outf = open(filename, 'wb')
+               try:
+                       outf.write(newcontent)
+               finally:
+                       outf.close()
+       except (IOError, OSError), err:
+               sys.exit('ERROR: unable to overwrite current version')
+
+       downloader.to_screen('Updated youtube-dl. Restart to use the new version.')
+
+def parseOpts():
+       # Deferred imports
+       import getpass
+       import optparse
+
+       def _format_option_string(option):
+               ''' ('-o', '--option') -> -o, --format METAVAR'''
+
+               opts = []
+
+               if option._short_opts: opts.append(option._short_opts[0])
+               if option._long_opts: opts.append(option._long_opts[0])
+               if len(opts) > 1: opts.insert(1, ', ')
+
+               if option.takes_value(): opts.append(' %s' % option.metavar)
+
+               return "".join(opts)
+
+       def _find_term_columns():
+               columns = os.environ.get('COLUMNS', None)
+               if columns:
+                       return int(columns)
+
+               try:
+                       sp = subprocess.Popen(['stty', 'size'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+                       out,err = sp.communicate()
+                       return int(out.split()[1])
+               except:
+                       pass
+               return None
+
+       max_width = 80
+       max_help_position = 80
+
+       # No need to wrap help messages if we're on a wide console
+       columns = _find_term_columns()
+       if columns: max_width = columns
+
+       fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
+       fmt.format_option_strings = _format_option_string
+
+       kw = {
+               'version'   : __version__,
+               'formatter' : fmt,
+               'usage' : '%prog [options] url...',
+               'conflict_handler' : 'resolve',
+       }
+
+       parser = optparse.OptionParser(**kw)
+
+       # option groups
+       general        = optparse.OptionGroup(parser, 'General Options')
+       authentication = optparse.OptionGroup(parser, 'Authentication Options')
+       video_format   = optparse.OptionGroup(parser, 'Video Format Options')
+       postproc       = optparse.OptionGroup(parser, 'Post-processing Options')
+       filesystem     = optparse.OptionGroup(parser, 'Filesystem Options')
+       verbosity      = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
+
+       general.add_option('-h', '--help',
+                       action='help', help='print this help text and exit')
+       general.add_option('-v', '--version',
+                       action='version', help='print program version and exit')
+       general.add_option('-U', '--update',
+                       action='store_true', dest='update_self', help='update this program to latest version')
+       general.add_option('-i', '--ignore-errors',
+                       action='store_true', dest='ignoreerrors', help='continue on download errors', default=False)
+       general.add_option('-r', '--rate-limit',
+                       dest='ratelimit', metavar='LIMIT', help='download rate limit (e.g. 50k or 44.6m)')
+       general.add_option('-R', '--retries',
+                       dest='retries', metavar='RETRIES', help='number of retries (default is 10)', default=10)
+       general.add_option('--playlist-start',
+                       dest='playliststart', metavar='NUMBER', help='playlist video to start at (default is 1)', default=1)
+       general.add_option('--playlist-end',
+                       dest='playlistend', metavar='NUMBER', help='playlist video to end at (default is last)', default=-1)
+       general.add_option('--dump-user-agent',
+                       action='store_true', dest='dump_user_agent',
+                       help='display the current browser identification', default=False)
+
+       authentication.add_option('-u', '--username',
+                       dest='username', metavar='USERNAME', help='account username')
+       authentication.add_option('-p', '--password',
+                       dest='password', metavar='PASSWORD', help='account password')
+       authentication.add_option('-n', '--netrc',
+                       action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
+
+
+       video_format.add_option('-f', '--format',
+                       action='store', dest='format', metavar='FORMAT', help='video format code')
+       video_format.add_option('--all-formats',
+                       action='store_const', dest='format', help='download all available video formats', const='-1')
+       video_format.add_option('--max-quality',
+                       action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
+
+
+       verbosity.add_option('-q', '--quiet',
+                       action='store_true', dest='quiet', help='activates quiet mode', default=False)
+       verbosity.add_option('-s', '--simulate',
+                       action='store_true', dest='simulate', help='do not download video', default=False)
+       verbosity.add_option('-g', '--get-url',
+                       action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
+       verbosity.add_option('-e', '--get-title',
+                       action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
+       verbosity.add_option('--get-thumbnail',
+                       action='store_true', dest='getthumbnail',
+                       help='simulate, quiet but print thumbnail URL', default=False)
+       verbosity.add_option('--get-description',
+                       action='store_true', dest='getdescription',
+                       help='simulate, quiet but print video description', default=False)
+       verbosity.add_option('--get-filename',
+                       action='store_true', dest='getfilename',
+                       help='simulate, quiet but print output filename', default=False)
+       verbosity.add_option('--no-progress',
+                       action='store_true', dest='noprogress', help='do not print progress bar', default=False)
+       verbosity.add_option('--console-title',
+                       action='store_true', dest='consoletitle',
+                       help='display progress in console titlebar', default=False)
+
+
+       filesystem.add_option('-t', '--title',
+                       action='store_true', dest='usetitle', help='use title in file name', default=False)
+       filesystem.add_option('-l', '--literal',
+                       action='store_true', dest='useliteral', help='use literal title in file name', default=False)
+       filesystem.add_option('-A', '--auto-number',
+                       action='store_true', dest='autonumber',
+                       help='number downloaded files starting from 00000', default=False)
+       filesystem.add_option('-o', '--output',
+                       dest='outtmpl', metavar='TEMPLATE', help='output filename template')
+       filesystem.add_option('-a', '--batch-file',
+                       dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
+       filesystem.add_option('-w', '--no-overwrites',
+                       action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
+       filesystem.add_option('-c', '--continue',
+                       action='store_true', dest='continue_dl', help='resume partially downloaded files', default=False)
+       filesystem.add_option('--cookies',
+                       dest='cookiefile', metavar='FILE', help='file to dump cookie jar to')
+       filesystem.add_option('--no-part',
+                       action='store_true', dest='nopart', help='do not use .part files', default=False)
+       filesystem.add_option('--no-mtime',
+                       action='store_false', dest='updatetime',
+                       help='do not use the Last-modified header to set the file modification time', default=True)
+       filesystem.add_option('--write-description',
+                       action='store_true', dest='writedescription',
+                       help='write video description to a .description file', default=False)
+       filesystem.add_option('--write-info-json',
+                       action='store_true', dest='writeinfojson',
+                       help='write video metadata to a .info.json file', default=False)
+
+
+       postproc.add_option('--extract-audio', action='store_true', dest='extractaudio', default=False,
+                       help='convert video files to audio-only files (requires ffmpeg and ffprobe)')
+       postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best',
+                       help='"best", "aac" or "mp3"; best by default')
+
+
+       parser.add_option_group(general)
+       parser.add_option_group(filesystem)
+       parser.add_option_group(verbosity)
+       parser.add_option_group(video_format)
+       parser.add_option_group(authentication)
+       parser.add_option_group(postproc)
+
+       opts, args = parser.parse_args()
+
+       return parser, opts, args
+
+def main():
+       parser, opts, args = parseOpts()
+
+       # Open appropriate CookieJar
+       if opts.cookiefile is None:
+               jar = cookielib.CookieJar()
+       else:
+               try:
+                       jar = cookielib.MozillaCookieJar(opts.cookiefile)
+                       if os.path.isfile(opts.cookiefile) and os.access(opts.cookiefile, os.R_OK):
+                               jar.load()
+               except (IOError, OSError), err:
+                       sys.exit(u'ERROR: unable to open cookie file')
+
+       # Dump user agent
+       if opts.dump_user_agent:
+               print std_headers['User-Agent']
+               sys.exit(0)
+
+       # General configuration
+       cookie_processor = urllib2.HTTPCookieProcessor(jar)
+       opener = urllib2.build_opener(urllib2.ProxyHandler(), cookie_processor, YoutubeDLHandler())
+       urllib2.install_opener(opener)
+       socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
+
+       # Batch file verification
+       batchurls = []
+       if opts.batchfile is not None:
+               try:
+                       if opts.batchfile == '-':
+                               batchfd = sys.stdin
+                       else:
+                               batchfd = open(opts.batchfile, 'r')
+                       batchurls = batchfd.readlines()
+                       batchurls = [x.strip() for x in batchurls]
+                       batchurls = [x for x in batchurls if len(x) > 0 and not re.search(r'^[#/;]', x)]
+               except IOError:
+                       sys.exit(u'ERROR: batch file could not be read')
+       all_urls = batchurls + args
+
+       # Conflicting, missing and erroneous options
+       if opts.usenetrc and (opts.username is not None or opts.password is not None):
+               parser.error(u'using .netrc conflicts with giving username/password')
+       if opts.password is not None and opts.username is None:
+               parser.error(u'account username missing')
+       if opts.outtmpl is not None and (opts.useliteral or opts.usetitle or opts.autonumber):
+               parser.error(u'using output template conflicts with using title, literal title or auto number')
+       if opts.usetitle and opts.useliteral:
+               parser.error(u'using title conflicts with using literal title')
+       if opts.username is not None and opts.password is None:
+               opts.password = getpass.getpass(u'Type account password and press return:')
+       if opts.ratelimit is not None:
+               numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
+               if numeric_limit is None:
+                       parser.error(u'invalid rate limit specified')
+               opts.ratelimit = numeric_limit
+       if opts.retries is not None:
+               try:
+                       opts.retries = long(opts.retries)
+               except (TypeError, ValueError), err:
+                       parser.error(u'invalid retry count specified')
+       try:
+               opts.playliststart = int(opts.playliststart)
+               if opts.playliststart <= 0:
+                       raise ValueError(u'Playlist start must be positive')
+       except (TypeError, ValueError), err:
+               parser.error(u'invalid playlist start number specified')
+       try:
+               opts.playlistend = int(opts.playlistend)
+               if opts.playlistend != -1 and (opts.playlistend <= 0 or opts.playlistend < opts.playliststart):
+                       raise ValueError(u'Playlist end must be greater than playlist start')
+       except (TypeError, ValueError), err:
+               parser.error(u'invalid playlist end number specified')
+       if opts.extractaudio:
+               if opts.audioformat not in ['best', 'aac', 'mp3']:
+                       parser.error(u'invalid audio format specified')
+
+       # Information extractors
+       youtube_ie = YoutubeIE()
+       metacafe_ie = MetacafeIE(youtube_ie)
+       dailymotion_ie = DailymotionIE()
+       youtube_pl_ie = YoutubePlaylistIE(youtube_ie)
+       youtube_user_ie = YoutubeUserIE(youtube_ie)
+       youtube_search_ie = YoutubeSearchIE(youtube_ie)
+       google_ie = GoogleIE()
+       google_search_ie = GoogleSearchIE(google_ie)
+       photobucket_ie = PhotobucketIE()
+       yahoo_ie = YahooIE()
+       yahoo_search_ie = YahooSearchIE(yahoo_ie)
+       deposit_files_ie = DepositFilesIE()
+       facebook_ie = FacebookIE()
+       bliptv_ie = BlipTVIE()
+       vimeo_ie = VimeoIE()
+       myvideo_ie = MyVideoIE()
+       comedycentral_ie = ComedyCentralIE()
+
+       generic_ie = GenericIE()
+
+       # File downloader
+       fd = FileDownloader({
+               'usenetrc': opts.usenetrc,
+               'username': opts.username,
+               'password': opts.password,
+               'quiet': (opts.quiet or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename),
+               'forceurl': opts.geturl,
+               'forcetitle': opts.gettitle,
+               'forcethumbnail': opts.getthumbnail,
+               'forcedescription': opts.getdescription,
+               'forcefilename': opts.getfilename,
+               'simulate': (opts.simulate or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename),
+               'format': opts.format,
+               'format_limit': opts.format_limit,
+               'outtmpl': ((opts.outtmpl is not None and opts.outtmpl.decode(preferredencoding()))
+                       or (opts.format == '-1' and opts.usetitle and u'%(stitle)s-%(id)s-%(format)s.%(ext)s')
+                       or (opts.format == '-1' and opts.useliteral and u'%(title)s-%(id)s-%(format)s.%(ext)s')
+                       or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s')
+                       or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(stitle)s-%(id)s.%(ext)s')
+                       or (opts.useliteral and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s')
+                       or (opts.usetitle and u'%(stitle)s-%(id)s.%(ext)s')
+                       or (opts.useliteral and u'%(title)s-%(id)s.%(ext)s')
+                       or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
+                       or u'%(id)s.%(ext)s'),
+               'ignoreerrors': opts.ignoreerrors,
+               'ratelimit': opts.ratelimit,
+               'nooverwrites': opts.nooverwrites,
+               'retries': opts.retries,
+               'continuedl': opts.continue_dl,
+               'noprogress': opts.noprogress,
+               'playliststart': opts.playliststart,
+               'playlistend': opts.playlistend,
+               'logtostderr': opts.outtmpl == '-',
+               'consoletitle': opts.consoletitle,
+               'nopart': opts.nopart,
+               'updatetime': opts.updatetime,
+               'writedescription': opts.writedescription,
+               'writeinfojson': opts.writeinfojson,
+               })
+       fd.add_info_extractor(youtube_search_ie)
+       fd.add_info_extractor(youtube_pl_ie)
+       fd.add_info_extractor(youtube_user_ie)
+       fd.add_info_extractor(metacafe_ie)
+       fd.add_info_extractor(dailymotion_ie)
+       fd.add_info_extractor(youtube_ie)
+       fd.add_info_extractor(google_ie)
+       fd.add_info_extractor(google_search_ie)
+       fd.add_info_extractor(photobucket_ie)
+       fd.add_info_extractor(yahoo_ie)
+       fd.add_info_extractor(yahoo_search_ie)
+       fd.add_info_extractor(deposit_files_ie)
+       fd.add_info_extractor(facebook_ie)
+       fd.add_info_extractor(bliptv_ie)
+       fd.add_info_extractor(vimeo_ie)
+       fd.add_info_extractor(myvideo_ie)
+       fd.add_info_extractor(comedycentral_ie)
+
+       # This must come last since it's the
+       # fallback if none of the others work
+       fd.add_info_extractor(generic_ie)
+
+       # PostProcessors
+       if opts.extractaudio:
+               fd.add_post_processor(FFmpegExtractAudioPP(preferredcodec=opts.audioformat))
+
+       # Update version
+       if opts.update_self:
+               updateSelf(fd, sys.argv[0])
+
+       # Maybe do nothing
+       if len(all_urls) < 1:
+               if not opts.update_self:
+                       parser.error(u'you must provide at least one URL')
+               else:
+                       sys.exit()
+       retcode = fd.download(all_urls)
+
+       # Dump cookie jar if requested
+       if opts.cookiefile is not None:
+               try:
+                       jar.save()
+               except (IOError, OSError), err:
+                       sys.exit(u'ERROR: unable to save cookie jar')
+
+       sys.exit(retcode)
+
+
+if __name__ == '__main__':
+       try:
+               main()
+       except DownloadError:
+               sys.exit(1)
+       except SameFileError:
+               sys.exit(u'ERROR: fixed output name but more than one file to download')
+       except KeyboardInterrupt:
+               sys.exit(u'\nERROR: Interrupted by user')
+
+# vim: set ts=4 sw=4 sts=4 noet ai si filetype=python: