]> Raphaƫl G. Git Repositories - youtubedl/blobdiff - youtube_dl/extractor/toypics.py
d/rules: Drop makefile mangling
[youtubedl] / youtube_dl / extractor / toypics.py
index 2579ba8c67498c91aa117c6853b83f391ccb3ba6..f705a06c95a7d04645d3ccb50784ff444cc6e934 100644 (file)
@@ -1,4 +1,4 @@
-# -*- coding:utf-8 -*-
+# coding: utf-8
 from __future__ import unicode_literals
 
 from .common import InfoExtractor
 from __future__ import unicode_literals
 
 from .common import InfoExtractor
@@ -6,42 +6,48 @@ import re
 
 
 class ToypicsIE(InfoExtractor):
 
 
 class ToypicsIE(InfoExtractor):
-    IE_DESC = 'Toypics user profile'
-    _VALID_URL = r'https?://videos\.toypics\.net/view/(?P<id>[0-9]+)/.*'
+    IE_DESC = 'Toypics video'
+    _VALID_URL = r'https?://videos\.toypics\.net/view/(?P<id>[0-9]+)'
     _TEST = {
         'url': 'http://videos.toypics.net/view/514/chancebulged,-2-1/',
         'md5': '16e806ad6d6f58079d210fe30985e08b',
         'info_dict': {
             'id': '514',
             'ext': 'mp4',
     _TEST = {
         'url': 'http://videos.toypics.net/view/514/chancebulged,-2-1/',
         'md5': '16e806ad6d6f58079d210fe30985e08b',
         'info_dict': {
             'id': '514',
             'ext': 'mp4',
-            'title': 'Chance-Bulge\'d, 2',
+            'title': "Chance-Bulge'd, 2",
             'age_limit': 18,
             'uploader': 'kidsune',
         }
     }
 
     def _real_extract(self, url):
             'age_limit': 18,
             'uploader': 'kidsune',
         }
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-        page = self._download_webpage(url, video_id)
-        video_url = self._html_search_regex(
-            r'src:\s+"(http://static[0-9]+\.toypics\.net/flvideo/[^"]+)"', page, 'video URL')
-        title = self._html_search_regex(
-            r'<title>Toypics - ([^<]+)</title>', page, 'title')
-        username = self._html_search_regex(
-            r'toypics.net/([^/"]+)" class="user-name">', page, 'username')
+        video_id = self._match_id(url)
+
+        webpage = self._download_webpage(url, video_id)
+
+        formats = self._parse_html5_media_entries(
+            url, webpage, video_id)[0]['formats']
+        title = self._html_search_regex([
+            r'<h1[^>]+class=["\']view-video-title[^>]+>([^<]+)</h',
+            r'<title>([^<]+) - Toypics</title>',
+        ], webpage, 'title')
+
+        uploader = self._html_search_regex(
+            r'More videos from <strong>([^<]+)</strong>', webpage, 'uploader',
+            fatal=False)
+
         return {
             'id': video_id,
         return {
             'id': video_id,
-            'url': video_url,
+            'formats': formats,
             'title': title,
             'title': title,
-            'uploader': username,
+            'uploader': uploader,
             'age_limit': 18,
         }
 
 
 class ToypicsUserIE(InfoExtractor):
     IE_DESC = 'Toypics user profile'
             'age_limit': 18,
         }
 
 
 class ToypicsUserIE(InfoExtractor):
     IE_DESC = 'Toypics user profile'
-    _VALID_URL = r'https?://videos\.toypics\.net/(?P<username>[^/?]+)(?:$|[?#])'
+    _VALID_URL = r'https?://videos\.toypics\.net/(?!view)(?P<id>[^/?#&]+)'
     _TEST = {
         'url': 'http://videos.toypics.net/Mikey',
         'info_dict': {
     _TEST = {
         'url': 'http://videos.toypics.net/Mikey',
         'info_dict': {
@@ -51,8 +57,7 @@ class ToypicsUserIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
     }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        username = mobj.group('username')
+        username = self._match_id(url)
 
         profile_page = self._download_webpage(
             url, username, note='Retrieving profile page')
 
         profile_page = self._download_webpage(
             url, username, note='Retrieving profile page')
@@ -71,7 +76,7 @@ class ToypicsUserIE(InfoExtractor):
                 note='Downloading page %d/%d' % (n, page_count))
             urls.extend(
                 re.findall(
                 note='Downloading page %d/%d' % (n, page_count))
             urls.extend(
                 re.findall(
-                    r'<p class="video-entry-title">\s+<a href="(https?://videos.toypics.net/view/[^"]+)">',
+                    r'<div[^>]+class=["\']preview[^>]+>\s*<a[^>]+href="(https?://videos\.toypics\.net/view/[^"]+)"',
                     lpage))
 
         return {
                     lpage))
 
         return {