2 from __future__ 
import unicode_literals
 
   8 from .common 
import InfoExtractor
 
  10 from ..compat 
import ( 
  12     compat_urllib_request
, 
  24 class DailymotionBaseInfoExtractor(InfoExtractor
): 
  26     def _build_request(url
): 
  27         """Build a request with the family filter disabled""" 
  28         request 
= compat_urllib_request
.Request(url
) 
  29         request
.add_header('Cookie', 'family_filter=off; ff=off') 
  32     def _download_webpage_handle_no_ff(self
, url
, *args
, **kwargs
): 
  33         request 
= self
._build
_request
(url
) 
  34         return self
._download
_webpage
_handle
(request
, *args
, **kwargs
) 
  36     def _download_webpage_no_ff(self
, url
, *args
, **kwargs
): 
  37         request 
= self
._build
_request
(url
) 
  38         return self
._download
_webpage
(request
, *args
, **kwargs
) 
  41 class DailymotionIE(DailymotionBaseInfoExtractor
): 
  42     _VALID_URL 
= r
'(?i)(?:https?://)?(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(embed|#)/)?video/(?P<id>[^/?_]+)' 
  43     IE_NAME 
= 'dailymotion' 
  46         ('stream_h264_ld_url', 'ld'), 
  47         ('stream_h264_url', 'standard'), 
  48         ('stream_h264_hq_url', 'hq'), 
  49         ('stream_h264_hd_url', 'hd'), 
  50         ('stream_h264_hd1080_url', 'hd180'), 
  55             'url': 'https://www.dailymotion.com/video/x2iuewm_steam-machine-models-pricing-listed-on-steam-store-ign-news_videogames', 
  56             'md5': '2137c41a8e78554bb09225b8eb322406', 
  60                 'title': 'Steam Machine Models, Pricing Listed on Steam Store - IGN News', 
  61                 'description': 'Several come bundled with the Steam Controller.', 
  62                 'thumbnail': 're:^https?:.*\.(?:jpg|png)$', 
  64                 'timestamp': 1425657362, 
  65                 'upload_date': '20150306', 
  67                 'uploader_id': 'xijv66', 
  75             'url': 'http://www.dailymotion.com/video/x149uew_katy-perry-roar-official_musi', 
  77                 'title': 'Roar (Official)', 
  80                 'uploader': 'Katy Perry', 
  81                 'upload_date': '20130905', 
  84                 'skip_download': True, 
  86             'skip': 'VEVO is only available in some countries', 
  88         # age-restricted video 
  90             'url': 'http://www.dailymotion.com/video/xyh2zz_leanna-decker-cyber-girl-of-the-year-desires-nude-playboy-plus_redband', 
  91             'md5': '0d667a7b9cebecc3c89ee93099c4159d', 
  95                 'title': 'Leanna Decker - Cyber Girl Of The Year Desires Nude [Playboy Plus]', 
  96                 'uploader': 'HotWaves1012', 
 100         # geo-restricted, player v5 
 102             'url': 'http://www.dailymotion.com/video/xhza0o', 
 103             'only_matching': True, 
 107     def _real_extract(self
, url
): 
 108         video_id 
= self
._match
_id
(url
) 
 110         webpage 
= self
._download
_webpage
_no
_ff
( 
 111             'https://www.dailymotion.com/video/%s' % video_id
, video_id
) 
 113         age_limit 
= self
._rta
_search
(webpage
) 
 115         description 
= self
._og
_search
_description
(webpage
) or self
._html
_search
_meta
( 
 116             'description', webpage
, 'description') 
 118         view_count 
= str_to_int(self
._search
_regex
( 
 119             [r
'<meta[^>]+itemprop="interactionCount"[^>]+content="UserPlays:(\d+)"', 
 120              r
'video_views_count[^>]+>\s+([\d\.,]+)'], 
 121             webpage
, 'view count', fatal
=False)) 
 122         comment_count 
= int_or_none(self
._search
_regex
( 
 123             r
'<meta[^>]+itemprop="interactionCount"[^>]+content="UserComments:(\d+)"', 
 124             webpage
, 'comment count', fatal
=False)) 
 126         player_v5 
= self
._search
_regex
( 
 127             [r
'buildPlayer\(({.+?})\);', r
'playerV5\s*=\s*dmp\.create\([^,]+?,\s*({.+?})\);'], 
 128             webpage
, 'player v5', default
=None) 
 130             player 
= self
._parse
_json
(player_v5
, video_id
) 
 131             metadata 
= player
['metadata'] 
 133             self
._check
_error
(metadata
) 
 136             for quality
, media_list 
in metadata
['qualities'].items(): 
 137                 for media 
in media_list
: 
 138                     media_url 
= media
.get('url') 
 141                     type_ 
= media
.get('type') 
 142                     if type_ 
== 'application/vnd.lumberjack.manifest': 
 144                     ext 
= determine_ext(media_url
) 
 145                     if type_ 
== 'application/x-mpegURL' or ext 
== 'm3u8': 
 146                         m3u8_formats 
= self
._extract
_m
3u8_formats
( 
 147                             media_url
, video_id
, 'mp4', m3u8_id
='hls', fatal
=False) 
 149                             formats
.extend(m3u8_formats
) 
 150                     elif type_ 
== 'application/f4m' or ext 
== 'f4m': 
 151                         f4m_formats 
= self
._extract
_f
4m
_formats
( 
 152                             media_url
, video_id
, preference
=-1, f4m_id
='hds', fatal
=False) 
 154                             formats
.extend(f4m_formats
) 
 158                             'format_id': quality
, 
 160                         m 
= re
.search(r
'H264-(?P<width>\d+)x(?P<height>\d+)', media_url
) 
 163                                 'width': int(m
.group('width')), 
 164                                 'height': int(m
.group('height')), 
 167             self
._sort
_formats
(formats
) 
 169             title 
= metadata
['title'] 
 170             duration 
= int_or_none(metadata
.get('duration')) 
 171             timestamp 
= int_or_none(metadata
.get('created_time')) 
 172             thumbnail 
= metadata
.get('poster_url') 
 173             uploader 
= metadata
.get('owner', {}).get('screenname') 
 174             uploader_id 
= metadata
.get('owner', {}).get('id') 
 177             for subtitle_lang
, subtitle 
in metadata
.get('subtitles', {}).get('data', {}).items(): 
 178                 subtitles
[subtitle_lang
] = [{ 
 179                     'ext': determine_ext(subtitle_url
), 
 181                 } for subtitle_url 
in subtitle
.get('urls', [])] 
 186                 'description': description
, 
 187                 'thumbnail': thumbnail
, 
 188                 'duration': duration
, 
 189                 'timestamp': timestamp
, 
 190                 'uploader': uploader
, 
 191                 'uploader_id': uploader_id
, 
 192                 'age_limit': age_limit
, 
 193                 'view_count': view_count
, 
 194                 'comment_count': comment_count
, 
 196                 'subtitles': subtitles
, 
 200         vevo_id 
= self
._search
_regex
( 
 201             r
'<link rel="video_src" href="[^"]*?vevo.com[^"]*?video=(?P<id>[\w]*)', 
 202             webpage
, 'vevo embed', default
=None) 
 204             return self
.url_result('vevo:%s' % vevo_id
, 'Vevo') 
 206         # fallback old player 
 207         embed_page 
= self
._download
_webpage
_no
_ff
( 
 208             'https://www.dailymotion.com/embed/video/%s' % video_id
, 
 209             video_id
, 'Downloading embed page') 
 211         timestamp 
= parse_iso8601(self
._html
_search
_meta
( 
 212             'video:release_date', webpage
, 'upload date')) 
 214         info 
= self
._parse
_json
( 
 216                 r
'var info = ({.*?}),$', embed_page
, 
 217                 'video info', flags
=re
.MULTILINE
), 
 220         self
._check
_error
(info
) 
 223         for (key
, format_id
) in self
._FORMATS
: 
 224             video_url 
= info
.get(key
) 
 225             if video_url 
is not None: 
 226                 m_size 
= re
.search(r
'H264-(\d+)x(\d+)', video_url
) 
 227                 if m_size 
is not None: 
 228                     width
, height 
= map(int_or_none
, (m_size
.group(1), m_size
.group(2))) 
 230                     width
, height 
= None, None 
 234                     'format_id': format_id
, 
 238         self
._sort
_formats
(formats
) 
 241         video_subtitles 
= self
.extract_subtitles(video_id
, webpage
) 
 243         title 
= self
._og
_search
_title
(webpage
, default
=None) 
 245             title 
= self
._html
_search
_regex
( 
 246                 r
'(?s)<span\s+id="video_title"[^>]*>(.*?)</span>', webpage
, 
 252             'uploader': info
['owner.screenname'], 
 253             'timestamp': timestamp
, 
 255             'description': description
, 
 256             'subtitles': video_subtitles
, 
 257             'thumbnail': info
['thumbnail_url'], 
 258             'age_limit': age_limit
, 
 259             'view_count': view_count
, 
 260             'duration': info
['duration'] 
 263     def _check_error(self
, info
): 
 264         if info
.get('error') is not None: 
 265             raise ExtractorError( 
 266                 '%s said: %s' % (self
.IE_NAME
, info
['error']['title']), expected
=True) 
 268     def _get_subtitles(self
, video_id
, webpage
): 
 270             sub_list 
= self
._download
_webpage
( 
 271                 'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id
, 
 272                 video_id
, note
=False) 
 273         except ExtractorError 
as err
: 
 274             self
._downloader
.report_warning('unable to download video subtitles: %s' % compat_str(err
)) 
 276         info 
= json
.loads(sub_list
) 
 277         if (info
['total'] > 0): 
 278             sub_lang_list 
= dict((l
['language'], [{'url': l
['url'], 'ext': 'srt'}]) for l 
in info
['list']) 
 280         self
._downloader
.report_warning('video doesn\'t have subtitles') 
 284 class DailymotionPlaylistIE(DailymotionBaseInfoExtractor
): 
 285     IE_NAME 
= 'dailymotion:playlist' 
 286     _VALID_URL 
= r
'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>.+?)/' 
 287     _MORE_PAGES_INDICATOR 
= r
'(?s)<div class="pages[^"]*">.*?<a\s+class="[^"]*?icon-arrow_right[^"]*?"' 
 288     _PAGE_TEMPLATE 
= 'https://www.dailymotion.com/playlist/%s/%s' 
 290         'url': 'http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q', 
 293             'id': 'xv4bw_nqtv_sport', 
 295         'playlist_mincount': 20, 
 298     def _extract_entries(self
, id): 
 300         processed_urls 
= set() 
 301         for pagenum 
in itertools
.count(1): 
 302             page_url 
= self
._PAGE
_TEMPLATE 
% (id, pagenum
) 
 303             webpage
, urlh 
= self
._download
_webpage
_handle
_no
_ff
( 
 304                 page_url
, id, 'Downloading page %s' % pagenum
) 
 305             if urlh
.geturl() in processed_urls
: 
 306                 self
.report_warning('Stopped at duplicated page %s, which is the same as %s' % ( 
 307                     page_url
, urlh
.geturl()), id) 
 310             processed_urls
.add(urlh
.geturl()) 
 312             for video_id 
in re
.findall(r
'data-xid="(.+?)"', webpage
): 
 313                 if video_id 
not in video_ids
: 
 314                     yield self
.url_result('http://www.dailymotion.com/video/%s' % video_id
, 'Dailymotion') 
 315                     video_ids
.add(video_id
) 
 317             if re
.search(self
._MORE
_PAGES
_INDICATOR
, webpage
) is None: 
 320     def _real_extract(self
, url
): 
 321         mobj 
= re
.match(self
._VALID
_URL
, url
) 
 322         playlist_id 
= mobj
.group('id') 
 323         webpage 
= self
._download
_webpage
(url
, playlist_id
) 
 328             'title': self
._og
_search
_title
(webpage
), 
 329             'entries': self
._extract
_entries
(playlist_id
), 
 333 class DailymotionUserIE(DailymotionPlaylistIE
): 
 334     IE_NAME 
= 'dailymotion:user' 
 335     _VALID_URL 
= r
'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/(?!(?:embed|#|video|playlist)/)(?:(?:old/)?user/)?(?P<user>[^/]+)' 
 336     _PAGE_TEMPLATE 
= 'http://www.dailymotion.com/user/%s/%s' 
 338         'url': 'https://www.dailymotion.com/user/nqtv', 
 341             'title': 'Rémi Gaillard', 
 343         'playlist_mincount': 100, 
 345         'url': 'http://www.dailymotion.com/user/UnderProject', 
 347             'id': 'UnderProject', 
 348             'title': 'UnderProject', 
 350         'playlist_mincount': 1800, 
 351         'expected_warnings': [ 
 352             'Stopped at duplicated page', 
 354         'skip': 'Takes too long time', 
 357     def _real_extract(self
, url
): 
 358         mobj 
= re
.match(self
._VALID
_URL
, url
) 
 359         user 
= mobj
.group('user') 
 360         webpage 
= self
._download
_webpage
( 
 361             'https://www.dailymotion.com/user/%s' % user
, user
) 
 362         full_user 
= unescapeHTML(self
._html
_search
_regex
( 
 363             r
'<a class="nav-image" title="([^"]+)" href="/%s">' % re
.escape(user
), 
 370             'entries': self
._extract
_entries
(user
), 
 374 class DailymotionCloudIE(DailymotionBaseInfoExtractor
): 
 375     _VALID_URL_PREFIX 
= r
'http://api\.dmcloud\.net/(?:player/)?embed/' 
 376     _VALID_URL 
= r
'%s[^/]+/(?P<id>[^/?]+)' % _VALID_URL_PREFIX
 
 377     _VALID_EMBED_URL 
= r
'%s[^/]+/[^\'"]+' % _VALID_URL_PREFIX 
 380         # From http://www.francetvinfo.fr/economie/entreprises/les-entreprises-familiales-le-secret-de-la-reussite_933271.html 
 381         # Tested at FranceTvInfo_2 
 382         'url': 'http://api.dmcloud.net/embed/4e7343f894a6f677b10006b4/556e03339473995ee145930c?auth=1464865870-0-jyhsm84b-ead4c701fb750cf9367bf4447167a3db&autoplay=1', 
 383         'only_matching': True, 
 385         # http://www.francetvinfo.fr/societe/larguez-les-amarres-le-cobaturage-se-developpe_980101.html 
 386         'url': 'http://api.dmcloud.net/player/embed/4e7343f894a6f677b10006b4/559545469473996d31429f06?auth=1467430263-0-90tglw2l-a3a4b64ed41efe48d7fccad85b8b8fda&autoplay=1', 
 387         'only_matching': True, 
 391     def _extract_dmcloud_url(self, webpage): 
 392         mobj = re.search(r'<iframe[^>]+src=[\'"](%s)[\'"]' % self._VALID_EMBED_URL, webpage) 
 397             r'<input[^>]+id=[\'"]dmcloudUrlEmissionSelect
[\'"][^>]+value=[\'"](%s)[\'"]' % self._VALID_EMBED_URL, 
 402     def _real_extract(self, url): 
 403         video_id = self._match_id(url) 
 405         webpage = self._download_webpage_no_ff(url, video_id) 
 407         title = self._html_search_regex(r'<title>([^>]+)</title>', webpage, 'title') 
 409         video_info = self._parse_json(self._search_regex( 
 410             r'var\s+info\s*=\s*([^;]+);', webpage, 'video info'), video_id) 
 412         # TODO: parse ios_url, which is in fact a manifest 
 413         video_url = video_info['mp4_url'] 
 419             'thumbnail': video_info.get('thumbnail_url'),