]> Raphaƫl G. Git Repositories - youtubedl/blobdiff - youtube_dl/extractor/afreecatv.py
d/p/remove-autoupdate-mechanism.patch: Update patch with metadata at the top.
[youtubedl] / youtube_dl / extractor / afreecatv.py
index e6513c7a4d86caabe282fe874826ef5219826085..df2a3fc4a2a24d2feeb81083d2414d8dc09b682b 100644 (file)
@@ -175,10 +175,27 @@ class AfreecaTVIE(InfoExtractor):
     def _real_extract(self, url):
         video_id = self._match_id(url)
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
 
+        webpage = self._download_webpage(url, video_id)
+
+        if re.search(r'alert\(["\']This video has been deleted', webpage):
+            raise ExtractorError(
+                'Video %s has been deleted' % video_id, expected=True)
+
+        station_id = self._search_regex(
+            r'nStationNo\s*=\s*(\d+)', webpage, 'station')
+        bbs_id = self._search_regex(
+            r'nBbsNo\s*=\s*(\d+)', webpage, 'bbs')
+        video_id = self._search_regex(
+            r'nTitleNo\s*=\s*(\d+)', webpage, 'title', default=video_id)
+
         video_xml = self._download_xml(
             'http://afbbs.afreecatv.com:8080/api/video/get_video_info.php',
         video_xml = self._download_xml(
             'http://afbbs.afreecatv.com:8080/api/video/get_video_info.php',
-            video_id, query={
+            video_id, headers={
+                'Referer': 'http://vod.afreecatv.com/embed.php',
+            }, query={
                 'nTitleNo': video_id,
                 'nTitleNo': video_id,
+                'nStationNo': station_id,
+                'nBbsNo': bbs_id,
                 'partialView': 'SKIP_ADULT',
             })
 
                 'partialView': 'SKIP_ADULT',
             })
 
@@ -187,10 +204,10 @@ class AfreecaTVIE(InfoExtractor):
             raise ExtractorError(
                 '%s said: %s' % (self.IE_NAME, flag), expected=True)
 
             raise ExtractorError(
                 '%s said: %s' % (self.IE_NAME, flag), expected=True)
 
-        video_element = video_xml.findall(compat_xpath('./track/video'))[1]
+        video_element = video_xml.findall(compat_xpath('./track/video'))[-1]
         if video_element is None or video_element.text is None:
         if video_element is None or video_element.text is None:
-            raise ExtractorError('Specified AfreecaTV video does not exist',
-                                 expected=True)
+            raise ExtractorError(
+                'Video %s video does not exist' % video_id, expected=True)
 
         video_url = video_element.text.strip()
 
 
         video_url = video_element.text.strip()
 
@@ -228,10 +245,19 @@ class AfreecaTVIE(InfoExtractor):
                     r'^(\d{8})_', key, 'upload date', default=None)
                 file_duration = int_or_none(file_element.get('duration'))
                 format_id = key if key else '%s_%s' % (video_id, file_num)
                     r'^(\d{8})_', key, 'upload date', default=None)
                 file_duration = int_or_none(file_element.get('duration'))
                 format_id = key if key else '%s_%s' % (video_id, file_num)
-                formats = self._extract_m3u8_formats(
-                    file_url, video_id, 'mp4', entry_protocol='m3u8_native',
-                    m3u8_id='hls',
-                    note='Downloading part %d m3u8 information' % file_num)
+                if determine_ext(file_url) == 'm3u8':
+                    formats = self._extract_m3u8_formats(
+                        file_url, video_id, 'mp4', entry_protocol='m3u8_native',
+                        m3u8_id='hls',
+                        note='Downloading part %d m3u8 information' % file_num)
+                else:
+                    formats = [{
+                        'url': file_url,
+                        'format_id': 'http',
+                    }]
+                if not formats:
+                    continue
+                self._sort_formats(formats)
                 file_info = common_entry.copy()
                 file_info.update({
                     'id': format_id,
                 file_info = common_entry.copy()
                 file_info.update({
                     'id': format_id,