X-Git-Url: https://git.rapsys.eu/youtubedl/blobdiff_plain/6d835d4d6903cf5e18ad01844736929e06d16004..03342304420e5daeb428ffdcc7bbd2bbfecfa61a:/youtube_dl/extractor/aparat.py?ds=sidebyside diff --git a/youtube_dl/extractor/aparat.py b/youtube_dl/extractor/aparat.py index 7486088..6342978 100644 --- a/youtube_dl/extractor/aparat.py +++ b/youtube_dl/extractor/aparat.py @@ -1,5 +1,4 @@ -#coding: utf-8 - +# coding: utf-8 from __future__ import unicode_literals import re @@ -21,13 +20,13 @@ class AparatIE(InfoExtractor): 'id': 'wP8On', 'ext': 'mp4', 'title': 'تیم گلکسی 11 - زومیت', + 'age_limit': 0, }, # 'skip': 'Extremely unreliable', } def _real_extract(self, url): - m = re.match(self._VALID_URL, url) - video_id = m.group('id') + video_id = self._match_id(url) # Note: There is an easier-to-parse configuration at # http://www.aparat.com/video/video/config/videohash/%video_id @@ -36,19 +35,20 @@ class AparatIE(InfoExtractor): video_id + '/vt/frame') webpage = self._download_webpage(embed_url, video_id) - video_urls = re.findall(r'fileList\[[0-9]+\]\s*=\s*"([^"]+)"', webpage) + video_urls = [video_url.replace('\\/', '/') for video_url in re.findall( + r'(?:fileList\[[0-9]+\]\s*=|"file"\s*:)\s*"([^"]+)"', webpage)] for i, video_url in enumerate(video_urls): req = HEADRequest(video_url) res = self._request_webpage( - req, video_id, note=u'Testing video URL %d' % i, errnote=False) + req, video_id, note='Testing video URL %d' % i, errnote=False) if res: break else: - raise ExtractorError(u'No working video URLs found') + raise ExtractorError('No working video URLs found') - title = self._search_regex(r'\s+title:\s*"([^"]+)"', webpage, u'title') + title = self._search_regex(r'\s+title:\s*"([^"]+)"', webpage, 'title') thumbnail = self._search_regex( - r'\s+image:\s*"([^"]+)"', webpage, u'thumbnail', fatal=False) + r'image:\s*"([^"]+)"', webpage, 'thumbnail', fatal=False) return { 'id': video_id, @@ -56,4 +56,5 @@ class AparatIE(InfoExtractor): 'url': video_url, 'ext': 'mp4', 'thumbnail': thumbnail, + 'age_limit': self._family_friendly_search(webpage), }