]> Raphaƫl G. Git Repositories - youtubedl/blobdiff - youtube_dl/extractor/pyvideo.py
Merge tag 'upstream/2017.03.07'
[youtubedl] / youtube_dl / extractor / pyvideo.py
index 33054591b755b383eb0d216623b99d308073c64d..b8ac93a62c4157ae51335efcadf83ca363272f19 100644 (file)
@@ -1,51 +1,72 @@
+from __future__ import unicode_literals
+
 import re
-import os
 
 from .common import InfoExtractor
+from ..compat import compat_str
+from ..utils import int_or_none
 
 
 class PyvideoIE(InfoExtractor):
-    _VALID_URL = r'(?:http://)?(?:www\.)?pyvideo\.org/video/(?P<id>\d+)/(.*)'
+    _VALID_URL = r'https?://(?:www\.)?pyvideo\.org/(?P<category>[^/]+)/(?P<id>[^/?#&.]+)'
+
     _TESTS = [{
-        u'url': u'http://pyvideo.org/video/1737/become-a-logging-expert-in-30-minutes',
-        u'file': u'24_4WWkSmNo.mp4',
-        u'md5': u'de317418c8bc76b1fd8633e4f32acbc6',
-        u'info_dict': {
-            u"title": u"Become a logging expert in 30 minutes",
-            u"description": u"md5:9665350d466c67fb5b1598de379021f7",
-            u"upload_date": u"20130320",
-            u"uploader": u"NextDayVideo",
-            u"uploader_id": u"NextDayVideo",
+        'url': 'http://pyvideo.org/pycon-us-2013/become-a-logging-expert-in-30-minutes.html',
+        'info_dict': {
+            'id': 'become-a-logging-expert-in-30-minutes',
         },
-        u'add_ie': ['Youtube'],
-    },
-    {
-        u'url': u'http://pyvideo.org/video/2542/gloriajw-spotifywitherikbernhardsson182m4v',
-        u'md5': u'5fe1c7e0a8aa5570330784c847ff6d12',
-        u'info_dict': {
-            u'id': u'2542',
-            u'ext': u'm4v',
-            u'title': u'Gloriajw-SpotifyWithErikBernhardsson182',
+        'playlist_count': 2,
+    }, {
+        'url': 'http://pyvideo.org/pygotham-2012/gloriajw-spotifywitherikbernhardsson182m4v.html',
+        'md5': '5fe1c7e0a8aa5570330784c847ff6d12',
+        'info_dict': {
+            'id': '2542',
+            'ext': 'm4v',
+            'title': 'Gloriajw-SpotifyWithErikBernhardsson182.m4v',
         },
-    },
-    ]
+    }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
+        category = mobj.group('category')
         video_id = mobj.group('id')
-        webpage = self._download_webpage(url, video_id)
-        m_youtube = re.search(r'(https?://www\.youtube\.com/watch\?v=.*)', webpage)
-
-        if m_youtube is not None:
-            return self.url_result(m_youtube.group(1), 'Youtube')
-
-        title = self._html_search_regex(r'<div class="section">.*?<h3>([^>]+?)</h3>',
-            webpage, u'title', flags=re.DOTALL)
-        video_url = self._search_regex([r'<source src="(.*?)"',
-            r'<dt>Download</dt>.*?<a href="(.+?)"'],
-            webpage, u'video url', flags=re.DOTALL)
-        return {
-            'id': video_id,
-            'title': os.path.splitext(title)[0],
-            'url': video_url,
-        }
+
+        entries = []
+
+        data = self._download_json(
+            'https://raw.githubusercontent.com/pyvideo/data/master/%s/videos/%s.json'
+            % (category, video_id), video_id, fatal=False)
+
+        if data:
+            for video in data['videos']:
+                video_url = video.get('url')
+                if video_url:
+                    if video.get('type') == 'youtube':
+                        entries.append(self.url_result(video_url, 'Youtube'))
+                    else:
+                        entries.append({
+                            'id': compat_str(data.get('id') or video_id),
+                            'url': video_url,
+                            'title': data['title'],
+                            'description': data.get('description') or data.get('summary'),
+                            'thumbnail': data.get('thumbnail_url'),
+                            'duration': int_or_none(data.get('duration')),
+                        })
+        else:
+            webpage = self._download_webpage(url, video_id)
+            title = self._og_search_title(webpage)
+            media_urls = self._search_regex(
+                r'(?s)Media URL:(.+?)</li>', webpage, 'media urls')
+            for m in re.finditer(
+                    r'<a[^>]+href=(["\'])(?P<url>http.+?)\1', media_urls):
+                media_url = m.group('url')
+                if re.match(r'https?://www\.youtube\.com/watch\?v=.*', media_url):
+                    entries.append(self.url_result(media_url, 'Youtube'))
+                else:
+                    entries.append({
+                        'id': video_id,
+                        'url': media_url,
+                        'title': title,
+                    })
+
+        return self.playlist_result(entries, video_id)