-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-        video_id = mobj.group('id')
-        webpage = self._download_webpage(url, video_id)
-        m_id = re.search(r'YUI\.namespace\("Media"\)\.CONTENT_ID = "(?P<new_id>.+?)";', webpage)
-
-        if m_id is None: 
-            # TODO: Check which url parameters are required
-            info_url = 'http://cosmos.bcst.yahoo.com/rest/v2/pops;lmsoverride=1;outputformat=mrss;cb=974419660;id=%s;rd=news.yahoo.com;datacontext=mdb;lg=KCa2IihxG3qE60vQ7HtyUy' % video_id
-            webpage = self._download_webpage(info_url, video_id, u'Downloading info webpage')
-            info_re = r'''<title><!\[CDATA\[(?P<title>.*?)\]\]></title>.*
-                        <description><!\[CDATA\[(?P<description>.*?)\]\]></description>.*
-                        <media:pubStart><!\[CDATA\[(?P<date>.*?)\ .*\]\]></media:pubStart>.*
-                        <media:content\ medium="image"\ url="(?P<thumb>.*?)"\ name="LARGETHUMB"
-                        '''
-            self.report_extraction(video_id)
-            m_info = re.search(info_re, webpage, re.VERBOSE|re.DOTALL)
-            if m_info is None:
-                raise ExtractorError(u'Unable to extract video info')
-            video_title = m_info.group('title')
-            video_description = m_info.group('description')
-            video_thumb = m_info.group('thumb')
-            video_date = m_info.group('date')
-            video_date = datetime.datetime.strptime(video_date, '%m/%d/%Y').strftime('%Y%m%d')
-    
-            # TODO: Find a way to get mp4 videos
-            rest_url = 'http://cosmos.bcst.yahoo.com/rest/v2/pops;element=stream;outputformat=mrss;id=%s;lmsoverride=1;bw=375;dynamicstream=1;cb=83521105;tech=flv,mp4;rd=news.yahoo.com;datacontext=mdb;lg=KCa2IihxG3qE60vQ7HtyUy' % video_id
-            webpage = self._download_webpage(rest_url, video_id, u'Downloading video url webpage')
-            m_rest = re.search(r'<media:content url="(?P<url>.*?)" path="(?P<path>.*?)"', webpage)
-            video_url = m_rest.group('url')
-            video_path = m_rest.group('path')
-            if m_rest is None:
-                raise ExtractorError(u'Unable to extract video url')
-
-        else: # We have to use a different method if another id is defined
-            long_id = m_id.group('new_id')
-            info_url = 'http://video.query.yahoo.com/v1/public/yql?q=SELECT%20*%20FROM%20yahoo.media.video.streams%20WHERE%20id%3D%22' + long_id + '%22%20AND%20format%3D%22mp4%2Cflv%22%20AND%20protocol%3D%22rtmp%2Chttp%22%20AND%20plrs%3D%2286Gj0vCaSzV_Iuf6hNylf2%22%20AND%20acctid%3D%22389%22%20AND%20plidl%3D%22%22%20AND%20pspid%3D%22792700001%22%20AND%20offnetwork%3D%22false%22%20AND%20site%3D%22ivy%22%20AND%20lang%3D%22en-US%22%20AND%20region%3D%22US%22%20AND%20override%3D%22none%22%3B&env=prod&format=json&callback=YUI.Env.JSONP.yui_3_8_1_1_1368368376830_335'
-            webpage = self._download_webpage(info_url, video_id, u'Downloading info json')
-            json_str = re.search(r'YUI.Env.JSONP.yui.*?\((.*?)\);', webpage).group(1)
-            info = json.loads(json_str)
-            res = info[u'query'][u'results'][u'mediaObj'][0]
-            stream = res[u'streams'][0]
-            video_path = stream[u'path']
-            video_url = stream[u'host']
-            meta = res[u'meta']
-            video_title = meta[u'title']
-            video_description = meta[u'description']
-            video_thumb = meta[u'thumbnail']
-            video_date = None # I can't find it
-
-        info_dict = {
-                     'id': video_id,
-                     'url': video_url,
-                     'play_path': video_path,
-                     'title':video_title,
-                     'description': video_description,
-                     'thumbnail': video_thumb,
-                     'upload_date': video_date,
-                     'ext': 'flv',
-                     }
-        return info_dict
+        display_id = mobj.group('display_id') or self._match_id(url)
+        page_id = mobj.group('id')
+        url = mobj.group('url')
+        host = mobj.group('host')
+        webpage, urlh = self._download_webpage_handle(url, display_id)
+        if 'err=404' in urlh.geturl():
+            raise ExtractorError('Video gone', expected=True)
+
+        # Look for iframed media first
+        entries = []
+        iframe_urls = re.findall(r'<iframe[^>]+src="(/video/.+?-\d+\.html\?format=embed.*?)"', webpage)
+        for idx, iframe_url in enumerate(iframe_urls):
+            entries.append(self.url_result(host + iframe_url, 'Yahoo'))
+        if entries:
+            return self.playlist_result(entries, page_id)
+
+        # Look for NBCSports iframes
+        nbc_sports_url = NBCSportsVPlayerIE._extract_url(webpage)
+        if nbc_sports_url:
+            return self.url_result(nbc_sports_url, NBCSportsVPlayerIE.ie_key())
+
+        # Look for Brightcove Legacy Studio embeds
+        bc_url = BrightcoveLegacyIE._extract_brightcove_url(webpage)
+        if bc_url:
+            return self.url_result(bc_url, BrightcoveLegacyIE.ie_key())
+
+        # Look for Brightcove New Studio embeds
+        bc_url = BrightcoveNewIE._extract_url(webpage)
+        if bc_url:
+            return self.url_result(bc_url, BrightcoveNewIE.ie_key())
+
+        # Query result is often embedded in webpage as JSON. Sometimes explicit requests
+        # to video API results in a failure with geo restriction reason therefore using
+        # embedded query result when present sounds reasonable.
+        config_json = self._search_regex(
+            r'window\.Af\.bootstrap\[[^\]]+\]\s*=\s*({.*?"applet_type"\s*:\s*"td-applet-videoplayer".*?});(?:</script>|$)',
+            webpage, 'videoplayer applet', default=None)
+        if config_json:
+            config = self._parse_json(config_json, display_id, fatal=False)
+            if config:
+                sapi = config.get('models', {}).get('applet_model', {}).get('data', {}).get('sapi')
+                if sapi and 'query' in sapi:
+                    info = self._extract_info(display_id, sapi, webpage)
+                    self._sort_formats(info['formats'])
+                    return info
+
+        items_json = self._search_regex(
+            r'mediaItems: ({.*?})$', webpage, 'items', flags=re.MULTILINE,
+            default=None)
+        if items_json is None:
+            alias = self._search_regex(
+                r'"aliases":{"video":"(.*?)"', webpage, 'alias', default=None)
+            if alias is not None:
+                alias_info = self._download_json(
+                    'https://www.yahoo.com/_td/api/resource/VideoService.videos;video_aliases=["%s"]' % alias,
+                    display_id, 'Downloading alias info')
+                video_id = alias_info[0]['id']
+            else:
+                CONTENT_ID_REGEXES = [
+                    r'YUI\.namespace\("Media"\)\.CONTENT_ID\s*=\s*"([^"]+)"',
+                    r'root\.App\.Cache\.context\.videoCache\.curVideo = \{"([^"]+)"',
+                    r'"first_videoid"\s*:\s*"([^"]+)"',
+                    r'%s[^}]*"ccm_id"\s*:\s*"([^"]+)"' % re.escape(page_id),
+                    r'<article[^>]data-uuid=["\']([^"\']+)',
+                    r'<meta[^<>]+yahoo://article/view\?.*\buuid=([^&"\']+)',
+                    r'<meta[^<>]+["\']ytwnews://cavideo/(?:[^/]+/)+([\da-fA-F-]+)[&"\']',
+                ]
+                video_id = self._search_regex(
+                    CONTENT_ID_REGEXES, webpage, 'content ID')
+        else:
+            items = json.loads(items_json)
+            info = items['mediaItems']['query']['results']['mediaObj'][0]
+            # The 'meta' field is not always in the video webpage, we request it
+            # from another page
+            video_id = info['id']
+        return self._get_info(video_id, display_id, webpage)
+
+    def _extract_info(self, display_id, query, webpage):
+        info = query['query']['results']['mediaObj'][0]
+        meta = info.get('meta')
+        video_id = info.get('id')
+
+        if not meta:
+            msg = info['status'].get('msg')
+            if msg:
+                raise ExtractorError(
+                    '%s returned error: %s' % (self.IE_NAME, msg), expected=True)
+            raise ExtractorError('Unable to extract media object meta')
+
+        formats = []
+        for s in info['streams']:
+            tbr = int_or_none(s.get('bitrate'))
+            format_info = {
+                'width': int_or_none(s.get('width')),
+                'height': int_or_none(s.get('height')),
+                'tbr': tbr,
+            }
+
+            host = s['host']
+            path = s['path']
+            if host.startswith('rtmp'):
+                fmt = 'rtmp'
+                format_info.update({
+                    'url': host,
+                    'play_path': path,
+                    'ext': 'flv',
+                })
+            else:
+                if s.get('format') == 'm3u8_playlist':
+                    fmt = 'hls'
+                    format_info.update({
+                        'protocol': 'm3u8_native',
+                        'ext': 'mp4',
+                    })
+                else:
+                    fmt = format_info['ext'] = determine_ext(path)
+                format_url = compat_urlparse.urljoin(host, path)
+                format_info['url'] = format_url
+            format_info['format_id'] = fmt + ('-%d' % tbr if tbr else '')
+            formats.append(format_info)
+
+        closed_captions = self._html_search_regex(
+            r'"closedcaptions":(\[[^\]]+\])', webpage, 'closed captions',
+            default='[]')
+
+        cc_json = self._parse_json(closed_captions, video_id, fatal=False)
+        subtitles = {}
+        if cc_json:
+            for closed_caption in cc_json:
+                lang = closed_caption['lang']
+                if lang not in subtitles:
+                    subtitles[lang] = []
+                subtitles[lang].append({
+                    'url': closed_caption['url'],
+                    'ext': mimetype2ext(closed_caption['content_type']),
+                })
+
+        return {
+            'id': video_id,
+            'display_id': display_id,
+            'title': unescapeHTML(meta['title']),
+            'formats': formats,
+            'description': clean_html(meta['description']),
+            'thumbnail': meta['thumbnail'] if meta.get('thumbnail') else self._og_search_thumbnail(webpage),
+            'duration': int_or_none(meta.get('duration')),
+            'subtitles': subtitles,
+        }
+
+    def _get_info(self, video_id, display_id, webpage):
+        region = self._search_regex(
+            r'\\?"region\\?"\s*:\s*\\?"([^"]+?)\\?"',
+            webpage, 'region', fatal=False, default='US').upper()
+        formats = []
+        info = {}
+        for fmt in ('webm', 'mp4'):
+            query_result = self._download_json(
+                'https://video.media.yql.yahoo.com/v1/video/sapi/streams/' + video_id,
+                display_id, 'Downloading %s video info' % fmt, query={
+                    'protocol': 'http',
+                    'region': region,
+                    'format': fmt,
+                })
+            info = self._extract_info(display_id, query_result, webpage)
+            formats.extend(info['formats'])
+        formats.extend(self._extract_m3u8_formats(
+            'http://video.media.yql.yahoo.com/v1/hls/%s?region=%s' % (video_id, region),
+            video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
+        self._sort_formats(formats)
+        info['formats'] = formats
+        return info
+