]> Raphaƫl G. Git Repositories - youtubedl/blobdiff - youtube_dl/extractor/libsyn.py
Annotate changelog with bug closed.
[youtubedl] / youtube_dl / extractor / libsyn.py
index 9ab1416f55e29d69681d0ccf3678957482a3e80c..d375695f5a26dbc072455777487ed239820c1ec6 100644 (file)
@@ -8,9 +8,9 @@ from ..utils import unified_strdate
 
 
 class LibsynIE(InfoExtractor):
 
 
 class LibsynIE(InfoExtractor):
-    _VALID_URL = r'https?://html5-player\.libsyn\.com/embed/episode/id/(?P<id>[0-9]+)'
+    _VALID_URL = r'(?P<mainurl>https?://html5-player\.libsyn\.com/embed/episode/id/(?P<id>[0-9]+))'
 
 
-    _TEST = {
+    _TESTS = [{
         'url': 'http://html5-player.libsyn.com/embed/episode/id/3377616/',
         'md5': '443360ee1b58007bc3dcf09b41d093bb',
         'info_dict': {
         'url': 'http://html5-player.libsyn.com/embed/episode/id/3377616/',
         'md5': '443360ee1b58007bc3dcf09b41d093bb',
         'info_dict': {
@@ -19,12 +19,24 @@ class LibsynIE(InfoExtractor):
             'title': "The Daily Show Podcast without Jon Stewart - Episode 12: Bassem Youssef: Egypt's Jon Stewart",
             'description': 'md5:601cb790edd05908957dae8aaa866465',
             'upload_date': '20150220',
             'title': "The Daily Show Podcast without Jon Stewart - Episode 12: Bassem Youssef: Egypt's Jon Stewart",
             'description': 'md5:601cb790edd05908957dae8aaa866465',
             'upload_date': '20150220',
+            'thumbnail': 're:^https?://.*',
         },
         },
-    }
+    }, {
+        'url': 'https://html5-player.libsyn.com/embed/episode/id/3727166/height/75/width/200/theme/standard/direction/no/autoplay/no/autonext/no/thumbnail/no/preload/no/no_addthis/no/',
+        'md5': '6c5cb21acd622d754d3b1a92b582ce42',
+        'info_dict': {
+            'id': '3727166',
+            'ext': 'mp3',
+            'title': 'Clients From Hell Podcast - How a Sex Toy Company Kickstarted my Freelance Career',
+            'upload_date': '20150818',
+            'thumbnail': 're:^https?://.*',
+        }
+    }]
 
     def _real_extract(self, url):
 
     def _real_extract(self, url):
-        video_id = self._match_id(url)
-
+        m = re.match(self._VALID_URL, url)
+        video_id = m.group('id')
+        url = m.group('mainurl')
         webpage = self._download_webpage(url, video_id)
 
         formats = [{
         webpage = self._download_webpage(url, video_id)
 
         formats = [{
@@ -32,20 +44,18 @@ class LibsynIE(InfoExtractor):
         } for media_url in set(re.findall('var\s+mediaURL(?:Libsyn)?\s*=\s*"([^"]+)"', webpage))]
 
         podcast_title = self._search_regex(
         } for media_url in set(re.findall('var\s+mediaURL(?:Libsyn)?\s*=\s*"([^"]+)"', webpage))]
 
         podcast_title = self._search_regex(
-            r'<h2>([^<]+)</h2>', webpage, 'title')
+            r'<h2>([^<]+)</h2>', webpage, 'podcast title', default=None)
         episode_title = self._search_regex(
         episode_title = self._search_regex(
-            r'<h3>([^<]+)</h3>', webpage, 'title', default=None)
+            r'(?:<div class="episode-title">|<h3>)([^<]+)</', webpage, 'episode title')
 
         title = '%s - %s' % (podcast_title, episode_title) if podcast_title else episode_title
 
         description = self._html_search_regex(
             r'<div id="info_text_body">(.+?)</div>', webpage,
 
         title = '%s - %s' % (podcast_title, episode_title) if podcast_title else episode_title
 
         description = self._html_search_regex(
             r'<div id="info_text_body">(.+?)</div>', webpage,
-            'description', fatal=False)
-
+            'description', default=None)
         thumbnail = self._search_regex(
             r'<img[^>]+class="info-show-icon"[^>]+src="([^"]+)"',
             webpage, 'thumbnail', fatal=False)
         thumbnail = self._search_regex(
             r'<img[^>]+class="info-show-icon"[^>]+src="([^"]+)"',
             webpage, 'thumbnail', fatal=False)
-
         release_date = unified_strdate(self._search_regex(
             r'<div class="release_date">Released: ([^<]+)<', webpage, 'release date', fatal=False))
 
         release_date = unified_strdate(self._search_regex(
             r'<div class="release_date">Released: ([^<]+)<', webpage, 'release date', fatal=False))