]> Raphaƫl G. Git Repositories - youtubedl/blobdiff - youtube_dl/extractor/libsyn.py
debian/{control,copyright,watch}: Change upstream location.
[youtubedl] / youtube_dl / extractor / libsyn.py
index 4750b03a3fb2f47818858338b7eb9a8b4889c012..2cf444258a2b57e7509a5ce83efdab0125bf20e3 100644 (file)
@@ -4,21 +4,29 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
 import re
 
 from .common import InfoExtractor
-from ..utils import unified_strdate
+from ..utils import (
+    clean_html,
+    get_element_by_class,
+    parse_duration,
+    strip_or_none,
+    unified_strdate,
+)
 
 
 class LibsynIE(InfoExtractor):
     _VALID_URL = r'(?P<mainurl>https?://html5-player\.libsyn\.com/embed/episode/id/(?P<id>[0-9]+))'
 
     _TESTS = [{
 
 
 class LibsynIE(InfoExtractor):
     _VALID_URL = r'(?P<mainurl>https?://html5-player\.libsyn\.com/embed/episode/id/(?P<id>[0-9]+))'
 
     _TESTS = [{
-        'url': 'http://html5-player.libsyn.com/embed/episode/id/3377616/',
-        'md5': '443360ee1b58007bc3dcf09b41d093bb',
+        'url': 'http://html5-player.libsyn.com/embed/episode/id/6385796/',
+        'md5': '2a55e75496c790cdeb058e7e6c087746',
         'info_dict': {
         'info_dict': {
-            'id': '3377616',
+            'id': '6385796',
             'ext': 'mp3',
             'ext': 'mp3',
-            'title': "The Daily Show Podcast without Jon Stewart - Episode 12: Bassem Youssef: Egypt's Jon Stewart",
-            'description': 'md5:601cb790edd05908957dae8aaa866465',
-            'upload_date': '20150220',
+            'title': "Champion Minded - Developing a Growth Mindset",
+            # description fetched using another request:
+            # http://html5-player.libsyn.com/embed/getitemdetails?item_id=6385796
+            # 'description': 'In this episode, Allistair talks about the importance of developing a growth mindset, not only in sports, but in life too.',
+            'upload_date': '20180320',
             'thumbnail': 're:^https?://.*',
         },
     }, {
             'thumbnail': 're:^https?://.*',
         },
     }, {
@@ -34,36 +42,52 @@ class LibsynIE(InfoExtractor):
     }]
 
     def _real_extract(self, url):
     }]
 
     def _real_extract(self, url):
-        m = re.match(self._VALID_URL, url)
-        video_id = m.group('id')
-        url = m.group('mainurl')
+        url, video_id = re.match(self._VALID_URL, url).groups()
         webpage = self._download_webpage(url, video_id)
 
         webpage = self._download_webpage(url, video_id)
 
-        formats = [{
-            'url': media_url,
-        } for media_url in set(re.findall(r'var\s+mediaURL(?:Libsyn)?\s*=\s*"([^"]+)"', webpage))]
+        data = self._parse_json(self._search_regex(
+            r'var\s+playlistItem\s*=\s*({.+?});',
+            webpage, 'JSON data block'), video_id)
 
 
-        podcast_title = self._search_regex(
-            r'<h2>([^<]+)</h2>', webpage, 'podcast title', default=None)
-        episode_title = self._search_regex(
-            r'(?:<div class="episode-title">|<h3>)([^<]+)</', webpage, 'episode title')
+        episode_title = data.get('item_title') or get_element_by_class('episode-title', webpage)
+        if not episode_title:
+            self._search_regex(
+                [r'data-title="([^"]+)"', r'<title>(.+?)</title>'],
+                webpage, 'episode title')
+        episode_title = episode_title.strip()
+
+        podcast_title = strip_or_none(clean_html(self._search_regex(
+            r'<h3>([^<]+)</h3>', webpage, 'podcast title',
+            default=None) or get_element_by_class('podcast-title', webpage)))
 
         title = '%s - %s' % (podcast_title, episode_title) if podcast_title else episode_title
 
 
         title = '%s - %s' % (podcast_title, episode_title) if podcast_title else episode_title
 
+        formats = []
+        for k, format_id in (('media_url_libsyn', 'libsyn'), ('media_url', 'main'), ('download_link', 'download')):
+            f_url = data.get(k)
+            if not f_url:
+                continue
+            formats.append({
+                'url': f_url,
+                'format_id': format_id,
+            })
+
         description = self._html_search_regex(
         description = self._html_search_regex(
-            r'<div id="info_text_body">(.+?)</div>', webpage,
+            r'<p\s+id="info_text_body">(.+?)</p>', webpage,
             'description', default=None)
             'description', default=None)
-        thumbnail = self._search_regex(
-            r'<img[^>]+class="info-show-icon"[^>]+src="([^"]+)"',
-            webpage, 'thumbnail', fatal=False)
+        if description:
+            # Strip non-breaking and normal spaces
+            description = description.replace('\u00A0', ' ').strip()
         release_date = unified_strdate(self._search_regex(
         release_date = unified_strdate(self._search_regex(
-            r'<div class="release_date">Released: ([^<]+)<', webpage, 'release date', fatal=False))
+            r'<div class="release_date">Released: ([^<]+)<',
+            webpage, 'release date', default=None) or data.get('release_date'))
 
         return {
             'id': video_id,
             'title': title,
             'description': description,
 
         return {
             'id': video_id,
             'title': title,
             'description': description,
-            'thumbnail': thumbnail,
+            'thumbnail': data.get('thumbnail_url'),
             'upload_date': release_date,
             'upload_date': release_date,
+            'duration': parse_duration(data.get('duration')),
             'formats': formats,
         }
             'formats': formats,
         }