]> Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/hearthisat.py
debian/control: Add recommends on aria2 | wget | curl to use external downloaders.
[youtubedl] / youtube_dl / extractor / hearthisat.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5
6 from .common import InfoExtractor
7 from ..compat import (
8 compat_urllib_request,
9 compat_urlparse,
10 )
11 from ..utils import (
12 HEADRequest,
13 str_to_int,
14 urlencode_postdata,
15 urlhandle_detect_ext,
16 )
17
18
19 class HearThisAtIE(InfoExtractor):
20 _VALID_URL = r'https?://(?:www\.)?hearthis\.at/(?P<artist>[^/]+)/(?P<title>[A-Za-z0-9\-]+)/?$'
21 _PLAYLIST_URL = 'https://hearthis.at/playlist.php'
22 _TEST = {
23 'url': 'https://hearthis.at/moofi/dr-kreep',
24 'md5': 'ab6ec33c8fed6556029337c7885eb4e0',
25 'info_dict': {
26 'id': '150939',
27 'ext': 'wav',
28 'title': 'Moofi - Dr. Kreep',
29 'thumbnail': 're:^https?://.*\.jpg$',
30 'timestamp': 1421564134,
31 'description': 'Creepy Patch. Mutable Instruments Braids Vowel + Formant Mode.',
32 'upload_date': '20150118',
33 'comment_count': int,
34 'view_count': int,
35 'like_count': int,
36 'duration': 71,
37 'categories': ['Experimental'],
38 }
39 }
40
41 def _real_extract(self, url):
42 m = re.match(self._VALID_URL, url)
43 display_id = '{artist:s} - {title:s}'.format(**m.groupdict())
44
45 webpage = self._download_webpage(url, display_id)
46 track_id = self._search_regex(
47 r'intTrackId\s*=\s*(\d+)', webpage, 'track ID')
48
49 payload = urlencode_postdata({'tracks[]': track_id})
50 req = compat_urllib_request.Request(self._PLAYLIST_URL, payload)
51 req.add_header('Content-type', 'application/x-www-form-urlencoded')
52
53 track = self._download_json(req, track_id, 'Downloading playlist')[0]
54 title = '{artist:s} - {title:s}'.format(**track)
55
56 categories = None
57 if track.get('category'):
58 categories = [track['category']]
59
60 description = self._og_search_description(webpage)
61 thumbnail = self._og_search_thumbnail(webpage)
62
63 meta_span = r'<span[^>]+class="%s".*?</i>([^<]+)</span>'
64 view_count = str_to_int(self._search_regex(
65 meta_span % 'plays_count', webpage, 'view count', fatal=False))
66 like_count = str_to_int(self._search_regex(
67 meta_span % 'likes_count', webpage, 'like count', fatal=False))
68 comment_count = str_to_int(self._search_regex(
69 meta_span % 'comment_count', webpage, 'comment count', fatal=False))
70 duration = str_to_int(self._search_regex(
71 r'data-length="(\d+)', webpage, 'duration', fatal=False))
72 timestamp = str_to_int(self._search_regex(
73 r'<span[^>]+class="calctime"[^>]+data-time="(\d+)', webpage, 'timestamp', fatal=False))
74
75 formats = []
76 mp3_url = self._search_regex(
77 r'(?s)<a class="player-link"\s+(?:[a-zA-Z0-9_:-]+="[^"]+"\s+)*?data-mp3="([^"]+)"',
78 webpage, 'mp3 URL', fatal=False)
79 if mp3_url:
80 formats.append({
81 'format_id': 'mp3',
82 'vcodec': 'none',
83 'acodec': 'mp3',
84 'url': mp3_url,
85 })
86 download_path = self._search_regex(
87 r'<a class="[^"]*download_fct[^"]*"\s+href="([^"]+)"',
88 webpage, 'download URL', default=None)
89 if download_path:
90 download_url = compat_urlparse.urljoin(url, download_path)
91 ext_req = HEADRequest(download_url)
92 ext_handle = self._request_webpage(
93 ext_req, display_id, note='Determining extension')
94 ext = urlhandle_detect_ext(ext_handle)
95 formats.append({
96 'format_id': 'download',
97 'vcodec': 'none',
98 'ext': ext,
99 'url': download_url,
100 'preference': 2, # Usually better quality
101 })
102 self._sort_formats(formats)
103
104 return {
105 'id': track_id,
106 'display_id': display_id,
107 'title': title,
108 'formats': formats,
109 'thumbnail': thumbnail,
110 'description': description,
111 'duration': duration,
112 'timestamp': timestamp,
113 'view_count': view_count,
114 'comment_count': comment_count,
115 'like_count': like_count,
116 'categories': categories,
117 }