- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('videoid')
- url = 'http://www.' + mobj.group('url')
-
- req = compat_urllib_request.Request(url)
- req.add_header('Cookie', 'age_verified=1')
- webpage = self._download_webpage(req, video_id)
-
- video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, 'title')
- video_uploader = self._html_search_regex(r'<b>From: </b>(?:\s|<[^>]*>)*(.+?)<', webpage, 'uploader', fatal=False)
- thumbnail = self._html_search_regex(r'"image_url":"([^"]+)', webpage, 'thumbnail', fatal=False)
- if thumbnail:
- thumbnail = compat_urllib_parse.unquote(thumbnail)
-
- video_urls = list(map(compat_urllib_parse.unquote , re.findall(r'"quality_[0-9]{3}p":"([^"]+)', webpage)))
- if webpage.find('"encrypted":true') != -1:
- password = self._html_search_regex(r'"video_title":"([^"]+)', webpage, 'password').replace('+', ' ')
- video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
-
- formats = []
- for video_url in video_urls:
- path = compat_urllib_parse_urlparse(video_url).path
- extension = os.path.splitext(path)[1][1:]
- format = path.split('/')[5].split('_')[:2]
- format = "-".join(format)
-
- m = re.match(r'^(?P<height>[0-9]+)P-(?P<tbr>[0-9]+)K$', format)
- if m is None:
- height = None
- tbr = None
- else:
- height = int(m.group('height'))
- tbr = int(m.group('tbr'))
-
- formats.append({
- 'url': video_url,
- 'ext': extension,
- 'format': format,
- 'format_id': format,
- 'tbr': tbr,
- 'height': height,
- })
- self._sort_formats(formats)
+ video_id = self._match_id(url)
+
+ def dl_webpage(platform):
+ return self._download_webpage(
+ 'http://www.pornhub.com/view_video.php?viewkey=%s' % video_id,
+ video_id, headers={
+ 'Cookie': 'age_verified=1; platform=%s' % platform,
+ })
+
+ webpage = dl_webpage('pc')
+
+ error_msg = self._html_search_regex(
+ r'(?s)<div[^>]+class=(["\'])(?:(?!\1).)*\b(?:removed|userMessageSection)\b(?:(?!\1).)*\1[^>]*>(?P<error>.+?)</div>',
+ webpage, 'error message', default=None, group='error')
+ if error_msg:
+ error_msg = re.sub(r'\s+', ' ', error_msg)
+ raise ExtractorError(
+ 'PornHub said: %s' % error_msg,
+ expected=True, video_id=video_id)
+
+ tv_webpage = dl_webpage('tv')
+
+ assignments = self._search_regex(
+ r'(var.+?mediastring.+?)</script>', tv_webpage,
+ 'encoded url').split(';')
+
+ js_vars = {}
+
+ def parse_js_value(inp):
+ inp = re.sub(r'/\*(?:(?!\*/).)*?\*/', '', inp)
+ if '+' in inp:
+ inps = inp.split('+')
+ return functools.reduce(
+ operator.concat, map(parse_js_value, inps))
+ inp = inp.strip()
+ if inp in js_vars:
+ return js_vars[inp]
+ return remove_quotes(inp)
+
+ for assn in assignments:
+ assn = assn.strip()
+ if not assn:
+ continue
+ assn = re.sub(r'var\s+', '', assn)
+ vname, value = assn.split('=', 1)
+ js_vars[vname] = parse_js_value(value)
+
+ video_url = js_vars['mediastring']
+
+ title = self._search_regex(
+ r'<h1>([^>]+)</h1>', tv_webpage, 'title', default=None)
+
+ # video_title from flashvars contains whitespace instead of non-ASCII (see
+ # http://www.pornhub.com/view_video.php?viewkey=1331683002), not relying
+ # on that anymore.
+ title = title or self._html_search_meta(
+ 'twitter:title', webpage, default=None) or self._search_regex(
+ (r'<h1[^>]+class=["\']title["\'][^>]*>(?P<title>[^<]+)',
+ r'<div[^>]+data-video-title=(["\'])(?P<title>.+?)\1',
+ r'shareTitle\s*=\s*(["\'])(?P<title>.+?)\1'),
+ webpage, 'title', group='title')
+
+ flashvars = self._parse_json(
+ self._search_regex(
+ r'var\s+flashvars_\d+\s*=\s*({.+?});', webpage, 'flashvars', default='{}'),
+ video_id)
+ if flashvars:
+ thumbnail = flashvars.get('image_url')
+ duration = int_or_none(flashvars.get('video_duration'))
+ else:
+ title, thumbnail, duration = [None] * 3
+
+ video_uploader = self._html_search_regex(
+ r'(?s)From: .+?<(?:a\b[^>]+\bhref=["\']/(?:user|channel)s/|span\b[^>]+\bclass=["\']username)[^>]+>(.+?)<',
+ webpage, 'uploader', fatal=False)
+
+ view_count = self._extract_count(
+ r'<span class="count">([\d,\.]+)</span> views', webpage, 'view')
+ like_count = self._extract_count(
+ r'<span class="votesUp">([\d,\.]+)</span>', webpage, 'like')
+ dislike_count = self._extract_count(
+ r'<span class="votesDown">([\d,\.]+)</span>', webpage, 'dislike')
+ comment_count = self._extract_count(
+ r'All Comments\s*<span>\(([\d,.]+)\)', webpage, 'comment')
+
+ page_params = self._parse_json(self._search_regex(
+ r'page_params\.zoneDetails\[([\'"])[^\'"]+\1\]\s*=\s*(?P<data>{[^}]+})',
+ webpage, 'page parameters', group='data', default='{}'),
+ video_id, transform_source=js_to_json, fatal=False)
+ tags = categories = None
+ if page_params:
+ tags = page_params.get('tags', '').split(',')
+ categories = page_params.get('categories', '').split(',')