2 from __future__ 
import unicode_literals
 
   8 from .common 
import InfoExtractor
 
   9 from .subtitles 
import SubtitlesInfoExtractor
 
  12     compat_urllib_request
, 
  21 class DailymotionBaseInfoExtractor(InfoExtractor
): 
  23     def _build_request(url
): 
  24         """Build a request with the family filter disabled""" 
  25         request 
= compat_urllib_request
.Request(url
) 
  26         request
.add_header('Cookie', 'family_filter=off') 
  27         request
.add_header('Cookie', 'ff=off') 
  30 class DailymotionIE(DailymotionBaseInfoExtractor
, SubtitlesInfoExtractor
): 
  31     """Information Extractor for Dailymotion""" 
  33     _VALID_URL 
= r
'(?i)(?:https?://)?(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(embed|#)/)?video/(?P<id>[^/?_]+)' 
  34     IE_NAME 
= 'dailymotion' 
  37         ('stream_h264_ld_url', 'ld'), 
  38         ('stream_h264_url', 'standard'), 
  39         ('stream_h264_hq_url', 'hq'), 
  40         ('stream_h264_hd_url', 'hd'), 
  41         ('stream_h264_hd1080_url', 'hd180'), 
  46             'url': 'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech', 
  47             'md5': '392c4b85a60a90dc4792da41ce3144eb', 
  51                 'uploader': 'Amphora Alex and Van .', 
  52                 'title': 'Tutoriel de Youtubeur"DL DES VIDEO DE YOUTUBE"', 
  57             'url': 'http://www.dailymotion.com/video/x149uew_katy-perry-roar-official_musi', 
  59                 'title': 'Roar (Official)', 
  62                 'uploader': 'Katy Perry', 
  63                 'upload_date': '20130905', 
  66                 'skip_download': True, 
  68             'skip': 'VEVO is only available in some countries', 
  70         # age-restricted video 
  72             'url': 'http://www.dailymotion.com/video/xyh2zz_leanna-decker-cyber-girl-of-the-year-desires-nude-playboy-plus_redband', 
  73             'md5': '0d667a7b9cebecc3c89ee93099c4159d', 
  77                 'title': 'Leanna Decker - Cyber Girl Of The Year Desires Nude [Playboy Plus]', 
  78                 'uploader': 'HotWaves1012', 
  84     def _real_extract(self
, url
): 
  85         video_id 
= self
._match
_id
(url
) 
  86         url 
= 'http://www.dailymotion.com/video/%s' % video_id
 
  88         # Retrieve video webpage to extract further information 
  89         request 
= self
._build
_request
(url
) 
  90         webpage 
= self
._download
_webpage
(request
, video_id
) 
  92         # Extract URL, uploader and title from webpage 
  93         self
.report_extraction(video_id
) 
  95         # It may just embed a vevo video: 
  97             r
'<link rel="video_src" href="[^"]*?vevo.com[^"]*?videoId=(?P<id>[\w]*)', 
  99         if m_vevo 
is not None: 
 100             vevo_id 
= m_vevo
.group('id') 
 101             self
.to_screen('Vevo video detected: %s' % vevo_id
) 
 102             return self
.url_result('vevo:%s' % vevo_id
, ie
='Vevo') 
 104         age_limit 
= self
._rta
_search
(webpage
) 
 106         video_upload_date 
= None 
 107         mobj 
= re
.search(r
'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage
) 
 109             video_upload_date 
= mobj
.group(3) + mobj
.group(2) + mobj
.group(1) 
 111         embed_url 
= 'http://www.dailymotion.com/embed/video/%s' % video_id
 
 112         embed_page 
= self
._download
_webpage
(embed_url
, video_id
, 
 113                                             'Downloading embed page') 
 114         info 
= self
._search
_regex
(r
'var info = ({.*?}),$', embed_page
, 
 115             'video info', flags
=re
.MULTILINE
) 
 116         info 
= json
.loads(info
) 
 117         if info
.get('error') is not None: 
 118             msg 
= 'Couldn\'t get video, Dailymotion says: %s' % info
['error']['title'] 
 119             raise ExtractorError(msg
, expected
=True) 
 122         for (key
, format_id
) in self
._FORMATS
: 
 123             video_url 
= info
.get(key
) 
 124             if video_url 
is not None: 
 125                 m_size 
= re
.search(r
'H264-(\d+)x(\d+)', video_url
) 
 126                 if m_size 
is not None: 
 127                     width
, height 
= map(int_or_none
, (m_size
.group(1), m_size
.group(2))) 
 129                     width
, height 
= None, None 
 133                     'format_id': format_id
, 
 138             raise ExtractorError('Unable to extract video URL') 
 141         video_subtitles 
= self
.extract_subtitles(video_id
, webpage
) 
 142         if self
._downloader
.params
.get('listsubtitles', False): 
 143             self
._list
_available
_subtitles
(video_id
, webpage
) 
 146         view_count 
= str_to_int(self
._search
_regex
( 
 147             r
'video_views_count[^>]+>\s+([\d\.,]+)', 
 148             webpage
, 'view count', fatal
=False)) 
 150         title 
= self
._og
_search
_title
(webpage
, default
=None) 
 152             title 
= self
._html
_search
_regex
( 
 153                 r
'(?s)<span\s+id="video_title"[^>]*>(.*?)</span>', webpage
, 
 159             'uploader': info
['owner.screenname'], 
 160             'upload_date': video_upload_date
, 
 162             'subtitles': video_subtitles
, 
 163             'thumbnail': info
['thumbnail_url'], 
 164             'age_limit': age_limit
, 
 165             'view_count': view_count
, 
 168     def _get_available_subtitles(self
, video_id
, webpage
): 
 170             sub_list 
= self
._download
_webpage
( 
 171                 'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id
, 
 172                 video_id
, note
=False) 
 173         except ExtractorError 
as err
: 
 174             self
._downloader
.report_warning('unable to download video subtitles: %s' % compat_str(err
)) 
 176         info 
= json
.loads(sub_list
) 
 177         if (info
['total'] > 0): 
 178             sub_lang_list 
= dict((l
['language'], l
['url']) for l 
in info
['list']) 
 180         self
._downloader
.report_warning('video doesn\'t have subtitles') 
 184 class DailymotionPlaylistIE(DailymotionBaseInfoExtractor
): 
 185     IE_NAME 
= 'dailymotion:playlist' 
 186     _VALID_URL 
= r
'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>.+?)/' 
 187     _MORE_PAGES_INDICATOR 
= r
'(?s)<div class="pages[^"]*">.*?<a\s+class="[^"]*?icon-arrow_right[^"]*?"' 
 188     _PAGE_TEMPLATE 
= 'https://www.dailymotion.com/playlist/%s/%s' 
 190         'url': 'http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q', 
 194         'playlist_mincount': 20, 
 197     def _extract_entries(self
, id): 
 199         for pagenum 
in itertools
.count(1): 
 200             request 
= self
._build
_request
(self
._PAGE
_TEMPLATE 
% (id, pagenum
)) 
 201             webpage 
= self
._download
_webpage
(request
, 
 202                                              id, 'Downloading page %s' % pagenum
) 
 204             video_ids
.extend(re
.findall(r
'data-xid="(.+?)"', webpage
)) 
 206             if re
.search(self
._MORE
_PAGES
_INDICATOR
, webpage
) is None: 
 208         return [self
.url_result('http://www.dailymotion.com/video/%s' % video_id
, 'Dailymotion') 
 209                    for video_id 
in orderedSet(video_ids
)] 
 211     def _real_extract(self
, url
): 
 212         mobj 
= re
.match(self
._VALID
_URL
, url
) 
 213         playlist_id 
= mobj
.group('id') 
 214         webpage 
= self
._download
_webpage
(url
, playlist_id
) 
 219             'title': self
._og
_search
_title
(webpage
), 
 220             'entries': self
._extract
_entries
(playlist_id
), 
 224 class DailymotionUserIE(DailymotionPlaylistIE
): 
 225     IE_NAME 
= 'dailymotion:user' 
 226     _VALID_URL 
= r
'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/user/(?P<user>[^/]+)' 
 227     _PAGE_TEMPLATE 
= 'http://www.dailymotion.com/user/%s/%s' 
 229         'url': 'https://www.dailymotion.com/user/nqtv', 
 232             'title': 'Rémi Gaillard', 
 234         'playlist_mincount': 100, 
 237     def _real_extract(self
, url
): 
 238         mobj 
= re
.match(self
._VALID
_URL
, url
) 
 239         user 
= mobj
.group('user') 
 240         webpage 
= self
._download
_webpage
(url
, user
) 
 241         full_user 
= unescapeHTML(self
._html
_search
_regex
( 
 242             r
'<a class="nav-image" title="([^"]+)" href="/%s">' % re
.escape(user
), 
 249             'entries': self
._extract
_entries
(user
),