X-Git-Url: https://git.rapsys.eu/youtubedl/blobdiff_plain/f19349c26118065acbe21509383c63465df794fe..47d80ec0b18245caeb97018d4c1af18d0b5b972b:/youtube_dl/extractor/xtube.py?ds=sidebyside diff --git a/youtube_dl/extractor/xtube.py b/youtube_dl/extractor/xtube.py index a1fe240..83bc1fe 100644 --- a/youtube_dl/extractor/xtube.py +++ b/youtube_dl/extractor/xtube.py @@ -1,10 +1,12 @@ from __future__ import unicode_literals +import itertools import re from .common import InfoExtractor -from ..compat import compat_urllib_parse_unquote from ..utils import ( + int_or_none, + orderedSet, parse_duration, sanitized_Request, str_to_int, @@ -12,8 +14,16 @@ from ..utils import ( class XTubeIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?(?Pxtube\.com/watch\.php\?v=(?P[^/?&#]+))' - _TEST = { + _VALID_URL = r'''(?x) + (?: + xtube:| + https?://(?:www\.)?xtube\.com/(?:watch\.php\?.*\bv=|video-watch/(?P[^/]+)-) + ) + (?P[^/?&#]+) + ''' + + _TESTS = [{ + # old URL schema 'url': 'http://www.xtube.com/watch.php?v=kVTUy_G222_', 'md5': '092fbdd3cbe292c920ef6fc6a8a9cdab', 'info_dict': { @@ -23,110 +33,117 @@ class XTubeIE(InfoExtractor): 'description': 'contains:an ET kind of thing', 'uploader': 'greenshowers', 'duration': 450, + 'view_count': int, + 'comment_count': int, 'age_limit': 18, } - } + }, { + # new URL schema + 'url': 'http://www.xtube.com/video-watch/strange-erotica-625837', + 'only_matching': True, + }, { + 'url': 'xtube:625837', + 'only_matching': True, + }] def _real_extract(self, url): - video_id = self._match_id(url) + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + display_id = mobj.group('display_id') + + if not display_id: + display_id = video_id + url = 'http://www.xtube.com/watch.php?v=%s' % video_id req = sanitized_Request(url) - req.add_header('Cookie', 'age_verified=1') - webpage = self._download_webpage(req, video_id) - - video_title = self._html_search_regex( - r'

([^<]+)', webpage, 'title') - video_uploader = self._html_search_regex( - [r"var\s+contentOwnerId\s*=\s*'([^']+)", - r'By:\s*(?P[^<]+)</h1>', r'videoTitle\s*:\s*(["\'])(?P<title>.+?)\1'), + webpage, 'title', group='title') + description = self._search_regex( + r'</h1>\s*<p>([^<]+)', webpage, 'description', fatal=False) + uploader = self._search_regex( + (r'<input[^>]+name="contentOwnerId"[^>]+value="([^"]+)"', + r'<span[^>]+class="nickname"[^>]*>([^<]+)'), webpage, 'uploader', fatal=False) - video_description = self._html_search_regex( - r'<p class="fieldsDesc">([^<]+)', - webpage, 'description', fatal=False) - duration = parse_duration(self._html_search_regex( - r'<span class="bold">Runtime:</span> ([^<]+)</p>', + duration = parse_duration(self._search_regex( + r'<dt>Runtime:</dt>\s*<dd>([^<]+)</dd>', webpage, 'duration', fatal=False)) - view_count = str_to_int(self._html_search_regex( - r'<span class="bold">Views:</span> ([\d,\.]+)</p>', + view_count = str_to_int(self._search_regex( + r'<dt>Views:</dt>\s*<dd>([\d,\.]+)</dd>', webpage, 'view count', fatal=False)) comment_count = str_to_int(self._html_search_regex( - r'<div id="commentBar">([\d,\.]+) Comments</div>', + r'>Comments? \(([\d,\.]+)\)<', webpage, 'comment count', fatal=False)) - formats = [] - for format_id, video_url in re.findall( - r'flashvars\.quality_(.+?)\s*=\s*"([^"]+)"', webpage): - fmt = { - 'url': compat_urllib_parse_unquote(video_url), - 'format_id': format_id, - } - m = re.search(r'^(?P<height>\d+)[pP]', format_id) - if m: - fmt['height'] = int(m.group('height')) - formats.append(fmt) - - if not formats: - video_url = compat_urllib_parse_unquote(self._search_regex( - r'flashvars\.video_url\s*=\s*"([^"]+)"', - webpage, 'video URL')) - formats.append({'url': video_url}) - - self._sort_formats(formats) - return { 'id': video_id, - 'title': video_title, - 'uploader': video_uploader, - 'description': video_description, + 'display_id': display_id, + 'title': title, + 'description': description, + 'uploader': uploader, 'duration': duration, 'view_count': view_count, 'comment_count': comment_count, - 'formats': formats, 'age_limit': 18, + 'formats': formats, } class XTubeUserIE(InfoExtractor): IE_DESC = 'XTube user profile' - _VALID_URL = r'https?://(?:www\.)?xtube\.com/community/profile\.php\?(.*?)user=(?P<username>[^&#]+)(?:$|[&#])' + _VALID_URL = r'https?://(?:www\.)?xtube\.com/profile/(?P<id>[^/]+-\d+)' _TEST = { - 'url': 'http://www.xtube.com/community/profile.php?user=greenshowers', + 'url': 'http://www.xtube.com/profile/greenshowers-4056496', 'info_dict': { - 'id': 'greenshowers', + 'id': 'greenshowers-4056496', 'age_limit': 18, }, 'playlist_mincount': 155, } def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - username = mobj.group('username') - - profile_page = self._download_webpage( - url, username, note='Retrieving profile page') - - video_count = int(self._search_regex( - r'<strong>%s\'s Videos \(([0-9]+)\)</strong>' % username, profile_page, - 'video count')) - - PAGE_SIZE = 25 - urls = [] - page_count = (video_count + PAGE_SIZE + 1) // PAGE_SIZE - for n in range(1, page_count + 1): - lpage_url = 'http://www.xtube.com/user_videos.php?page=%d&u=%s' % (n, username) - lpage = self._download_webpage( - lpage_url, username, - note='Downloading page %d/%d' % (n, page_count)) - urls.extend( - re.findall(r'addthis:url="([^"]+)"', lpage)) - - return { - '_type': 'playlist', - 'id': username, - 'age_limit': 18, - 'entries': [{ - '_type': 'url', - 'url': eurl, - 'ie_key': 'XTube', - } for eurl in urls] - } + user_id = self._match_id(url) + + entries = [] + for pagenum in itertools.count(1): + request = sanitized_Request( + 'http://www.xtube.com/profile/%s/videos/%d' % (user_id, pagenum), + headers={ + 'Cookie': 'popunder=4', + 'X-Requested-With': 'XMLHttpRequest', + 'Referer': url, + }) + + page = self._download_json( + request, user_id, 'Downloading videos JSON page %d' % pagenum) + + html = page.get('html') + if not html: + break + + for video_id in orderedSet([video_id for _, video_id in re.findall( + r'data-plid=(["\'])(.+?)\1', html)]): + entries.append(self.url_result('xtube:%s' % video_id, XTubeIE.ie_key())) + + page_count = int_or_none(page.get('pageCount')) + if not page_count or pagenum == page_count: + break + + playlist = self.playlist_result(entries, user_id) + playlist['age_limit'] = 18 + return playlist