]> Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/toypics.py
debian/control: Add recommends on aria2 | wget | curl to use external downloaders.
[youtubedl] / youtube_dl / extractor / toypics.py
1 # -*- coding:utf-8 -*-
2 from __future__ import unicode_literals
3
4 from .common import InfoExtractor
5 import re
6
7
8 class ToypicsIE(InfoExtractor):
9 IE_DESC = 'Toypics user profile'
10 _VALID_URL = r'https?://videos\.toypics\.net/view/(?P<id>[0-9]+)/.*'
11 _TEST = {
12 'url': 'http://videos.toypics.net/view/514/chancebulged,-2-1/',
13 'md5': '16e806ad6d6f58079d210fe30985e08b',
14 'info_dict': {
15 'id': '514',
16 'ext': 'mp4',
17 'title': 'Chance-Bulge\'d, 2',
18 'age_limit': 18,
19 'uploader': 'kidsune',
20 }
21 }
22
23 def _real_extract(self, url):
24 mobj = re.match(self._VALID_URL, url)
25 video_id = mobj.group('id')
26 page = self._download_webpage(url, video_id)
27 video_url = self._html_search_regex(
28 r'src:\s+"(http://static[0-9]+\.toypics\.net/flvideo/[^"]+)"', page, 'video URL')
29 title = self._html_search_regex(
30 r'<title>Toypics - ([^<]+)</title>', page, 'title')
31 username = self._html_search_regex(
32 r'toypics.net/([^/"]+)" class="user-name">', page, 'username')
33 return {
34 'id': video_id,
35 'url': video_url,
36 'title': title,
37 'uploader': username,
38 'age_limit': 18,
39 }
40
41
42 class ToypicsUserIE(InfoExtractor):
43 IE_DESC = 'Toypics user profile'
44 _VALID_URL = r'http://videos\.toypics\.net/(?P<username>[^/?]+)(?:$|[?#])'
45 _TEST = {
46 'url': 'http://videos.toypics.net/Mikey',
47 'info_dict': {
48 'id': 'Mikey',
49 },
50 'playlist_mincount': 19,
51 }
52
53 def _real_extract(self, url):
54 mobj = re.match(self._VALID_URL, url)
55 username = mobj.group('username')
56
57 profile_page = self._download_webpage(
58 url, username, note='Retrieving profile page')
59
60 video_count = int(self._search_regex(
61 r'public/">Public Videos \(([0-9]+)\)</a></li>', profile_page,
62 'video count'))
63
64 PAGE_SIZE = 8
65 urls = []
66 page_count = (video_count + PAGE_SIZE + 1) // PAGE_SIZE
67 for n in range(1, page_count + 1):
68 lpage_url = url + '/public/%d' % n
69 lpage = self._download_webpage(
70 lpage_url, username,
71 note='Downloading page %d/%d' % (n, page_count))
72 urls.extend(
73 re.findall(
74 r'<p class="video-entry-title">\s+<a href="(https?://videos.toypics.net/view/[^"]+)">',
75 lpage))
76
77 return {
78 '_type': 'playlist',
79 'id': username,
80 'entries': [{
81 '_type': 'url',
82 'url': eurl,
83 'ie_key': 'Toypics',
84 } for eurl in urls]
85 }