]> Raphaël G. Git Repositories - youtubedl/blob - youtube_dl/extractor/playplustv.py
Update changelog.
[youtubedl] / youtube_dl / extractor / playplustv.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import json
5 import re
6
7 from .common import InfoExtractor
8 from ..compat import compat_HTTPError
9 from ..utils import (
10 clean_html,
11 ExtractorError,
12 int_or_none,
13 PUTRequest,
14 )
15
16
17 class PlayPlusTVIE(InfoExtractor):
18 _VALID_URL = r'https?://(?:www\.)?playplus\.(?:com|tv)/VOD/(?P<project_id>[0-9]+)/(?P<id>[0-9a-f]{32})'
19 _TEST = {
20 'url': 'https://www.playplus.tv/VOD/7572/db8d274a5163424e967f35a30ddafb8e',
21 'md5': 'd078cb89d7ab6b9df37ce23c647aef72',
22 'info_dict': {
23 'id': 'db8d274a5163424e967f35a30ddafb8e',
24 'ext': 'mp4',
25 'title': 'Capítulo 179 - Final',
26 'description': 'md5:01085d62d8033a1e34121d3c3cabc838',
27 'timestamp': 1529992740,
28 'upload_date': '20180626',
29 },
30 'skip': 'Requires account credential',
31 }
32 _NETRC_MACHINE = 'playplustv'
33 _GEO_COUNTRIES = ['BR']
34 _token = None
35 _profile_id = None
36
37 def _call_api(self, resource, video_id=None, query=None):
38 return self._download_json('https://api.playplus.tv/api/media/v2/get' + resource, video_id, headers={
39 'Authorization': 'Bearer ' + self._token,
40 }, query=query)
41
42 def _real_initialize(self):
43 email, password = self._get_login_info()
44 if email is None:
45 self.raise_login_required()
46
47 req = PUTRequest(
48 'https://api.playplus.tv/api/web/login', json.dumps({
49 'email': email,
50 'password': password,
51 }).encode(), {
52 'Content-Type': 'application/json; charset=utf-8',
53 })
54
55 try:
56 self._token = self._download_json(req, None)['token']
57 except ExtractorError as e:
58 if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
59 raise ExtractorError(self._parse_json(
60 e.cause.read(), None)['errorMessage'], expected=True)
61 raise
62
63 self._profile = self._call_api('Profiles')['list'][0]['_id']
64
65 def _real_extract(self, url):
66 project_id, media_id = re.match(self._VALID_URL, url).groups()
67 media = self._call_api(
68 'Media', media_id, {
69 'profileId': self._profile,
70 'projectId': project_id,
71 'mediaId': media_id,
72 })['obj']
73 title = media['title']
74
75 formats = []
76 for f in media.get('files', []):
77 f_url = f.get('url')
78 if not f_url:
79 continue
80 file_info = f.get('fileInfo') or {}
81 formats.append({
82 'url': f_url,
83 'width': int_or_none(file_info.get('width')),
84 'height': int_or_none(file_info.get('height')),
85 })
86 self._sort_formats(formats)
87
88 thumbnails = []
89 for thumb in media.get('thumbs', []):
90 thumb_url = thumb.get('url')
91 if not thumb_url:
92 continue
93 thumbnails.append({
94 'url': thumb_url,
95 'width': int_or_none(thumb.get('width')),
96 'height': int_or_none(thumb.get('height')),
97 })
98
99 return {
100 'id': media_id,
101 'title': title,
102 'formats': formats,
103 'thumbnails': thumbnails,
104 'description': clean_html(media.get('description')) or media.get('shortDescription'),
105 'timestamp': int_or_none(media.get('publishDate'), 1000),
106 'view_count': int_or_none(media.get('numberOfViews')),
107 'comment_count': int_or_none(media.get('numberOfComments')),
108 'tags': media.get('tags'),
109 }