]> Raphaƫl G. Git Repositories - youtubedl/blobdiff - youtube_dl/extractor/toypics.py
debian/control: Mark compliance with policy 4.1.1. No further changes needed.
[youtubedl] / youtube_dl / extractor / toypics.py
index 34008afc6b87226d37aee4612a4b782c7c392a57..f705a06c95a7d04645d3ccb50784ff444cc6e934 100644 (file)
@@ -1,48 +1,63 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
 from .common import InfoExtractor
 import re
 
 
 class ToypicsIE(InfoExtractor):
 from .common import InfoExtractor
 import re
 
 
 class ToypicsIE(InfoExtractor):
-    IE_DESC = 'Toypics user profile'
-    _VALID_URL = r'http://videos\.toypics\.net/view/(?P<id>[0-9]+)/.*'
+    IE_DESC = 'Toypics video'
+    _VALID_URL = r'https?://videos\.toypics\.net/view/(?P<id>[0-9]+)'
     _TEST = {
         'url': 'http://videos.toypics.net/view/514/chancebulged,-2-1/',
         'md5': '16e806ad6d6f58079d210fe30985e08b',
         'info_dict': {
             'id': '514',
             'ext': 'mp4',
     _TEST = {
         'url': 'http://videos.toypics.net/view/514/chancebulged,-2-1/',
         'md5': '16e806ad6d6f58079d210fe30985e08b',
         'info_dict': {
             'id': '514',
             'ext': 'mp4',
-            'title': 'Chance-Bulge\'d, 2',
+            'title': "Chance-Bulge'd, 2",
             'age_limit': 18,
             'uploader': 'kidsune',
         }
     }
 
     def _real_extract(self, url):
             'age_limit': 18,
             'uploader': 'kidsune',
         }
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-        page = self._download_webpage(url, video_id)
-        video_url = self._html_search_regex(
-            r'src:\s+"(http://static[0-9]+\.toypics\.net/flvideo/[^"]+)"', page, 'video URL')
-        title = self._html_search_regex(
-            r'<title>Toypics - ([^<]+)</title>', page, 'title')
-        username = self._html_search_regex(
-            r'toypics.net/([^/"]+)" class="user-name">', page, 'username')
+        video_id = self._match_id(url)
+
+        webpage = self._download_webpage(url, video_id)
+
+        formats = self._parse_html5_media_entries(
+            url, webpage, video_id)[0]['formats']
+        title = self._html_search_regex([
+            r'<h1[^>]+class=["\']view-video-title[^>]+>([^<]+)</h',
+            r'<title>([^<]+) - Toypics</title>',
+        ], webpage, 'title')
+
+        uploader = self._html_search_regex(
+            r'More videos from <strong>([^<]+)</strong>', webpage, 'uploader',
+            fatal=False)
+
         return {
             'id': video_id,
         return {
             'id': video_id,
-            'url': video_url,
+            'formats': formats,
             'title': title,
             'title': title,
-            'uploader': username,
+            'uploader': uploader,
             'age_limit': 18,
         }
 
 
 class ToypicsUserIE(InfoExtractor):
     IE_DESC = 'Toypics user profile'
             'age_limit': 18,
         }
 
 
 class ToypicsUserIE(InfoExtractor):
     IE_DESC = 'Toypics user profile'
-    _VALID_URL = r'http://videos\.toypics\.net/(?P<username>[^/?]+)(?:$|[?#])'
+    _VALID_URL = r'https?://videos\.toypics\.net/(?!view)(?P<id>[^/?#&]+)'
+    _TEST = {
+        'url': 'http://videos.toypics.net/Mikey',
+        'info_dict': {
+            'id': 'Mikey',
+        },
+        'playlist_mincount': 19,
+    }
 
     def _real_extract(self, url):
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        username = mobj.group('username')
+        username = self._match_id(url)
 
         profile_page = self._download_webpage(
             url, username, note='Retrieving profile page')
 
         profile_page = self._download_webpage(
             url, username, note='Retrieving profile page')
@@ -61,7 +76,7 @@ class ToypicsUserIE(InfoExtractor):
                 note='Downloading page %d/%d' % (n, page_count))
             urls.extend(
                 re.findall(
                 note='Downloading page %d/%d' % (n, page_count))
             urls.extend(
                 re.findall(
-                    r'<p class="video-entry-title">\n\s*<a href="(http://videos.toypics.net/view/[^"]+)">',
+                    r'<div[^>]+class=["\']preview[^>]+>\s*<a[^>]+href="(https?://videos\.toypics\.net/view/[^"]+)"',
                     lpage))
 
         return {
                     lpage))
 
         return {