X-Git-Url: https://git.rapsys.eu/youtubedl/blobdiff_plain/af478477605bdf3f5d57562035885cfee905f379..c512650955de0b16d37e7fa7fb29ea0985e415bb:/youtube_dl/extractor/toypics.py diff --git a/youtube_dl/extractor/toypics.py b/youtube_dl/extractor/toypics.py new file mode 100644 index 0000000..34008af --- /dev/null +++ b/youtube_dl/extractor/toypics.py @@ -0,0 +1,75 @@ +from .common import InfoExtractor +import re + + +class ToypicsIE(InfoExtractor): + IE_DESC = 'Toypics user profile' + _VALID_URL = r'http://videos\.toypics\.net/view/(?P[0-9]+)/.*' + _TEST = { + 'url': 'http://videos.toypics.net/view/514/chancebulged,-2-1/', + 'md5': '16e806ad6d6f58079d210fe30985e08b', + 'info_dict': { + 'id': '514', + 'ext': 'mp4', + 'title': 'Chance-Bulge\'d, 2', + 'age_limit': 18, + 'uploader': 'kidsune', + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + page = self._download_webpage(url, video_id) + video_url = self._html_search_regex( + r'src:\s+"(http://static[0-9]+\.toypics\.net/flvideo/[^"]+)"', page, 'video URL') + title = self._html_search_regex( + r'Toypics - ([^<]+)', page, 'title') + username = self._html_search_regex( + r'toypics.net/([^/"]+)" class="user-name">', page, 'username') + return { + 'id': video_id, + 'url': video_url, + 'title': title, + 'uploader': username, + 'age_limit': 18, + } + + +class ToypicsUserIE(InfoExtractor): + IE_DESC = 'Toypics user profile' + _VALID_URL = r'http://videos\.toypics\.net/(?P[^/?]+)(?:$|[?#])' + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + username = mobj.group('username') + + profile_page = self._download_webpage( + url, username, note='Retrieving profile page') + + video_count = int(self._search_regex( + r'public/">Public Videos \(([0-9]+)\)', profile_page, + 'video count')) + + PAGE_SIZE = 8 + urls = [] + page_count = (video_count + PAGE_SIZE + 1) // PAGE_SIZE + for n in range(1, page_count + 1): + lpage_url = url + '/public/%d' % n + lpage = self._download_webpage( + lpage_url, username, + note='Downloading page %d/%d' % (n, page_count)) + urls.extend( + re.findall( + r'

\n\s*', + lpage)) + + return { + '_type': 'playlist', + 'id': username, + 'entries': [{ + '_type': 'url', + 'url': eurl, + 'ie_key': 'Toypics', + } for eurl in urls] + }