]> Raphaƫl G. Git Repositories - youtubedl/blobdiff - youtube_dl/extractor/bandcamp.py
debian/{control,copyright,watch}: Change upstream location.
[youtubedl] / youtube_dl / extractor / bandcamp.py
index 489d0ba53f672363213c7f788e83b692eb11894d..f14b407dc82cf8f945f581ab059bd6218866eb57 100644 (file)
@@ -1,6 +1,5 @@
 from __future__ import unicode_literals
 
 from __future__ import unicode_literals
 
-import json
 import random
 import re
 import time
 import random
 import re
 import time
@@ -14,14 +13,20 @@ from ..utils import (
     ExtractorError,
     float_or_none,
     int_or_none,
     ExtractorError,
     float_or_none,
     int_or_none,
+    KNOWN_EXTENSIONS,
     parse_filesize,
     parse_filesize,
+    str_or_none,
+    try_get,
     unescapeHTML,
     update_url_query,
     unescapeHTML,
     update_url_query,
+    unified_strdate,
+    unified_timestamp,
+    url_or_none,
 )
 
 
 class BandcampIE(InfoExtractor):
 )
 
 
 class BandcampIE(InfoExtractor):
-    _VALID_URL = r'https?://.*?\.bandcamp\.com/track/(?P<title>.*)'
+    _VALID_URL = r'https?://[^/]+\.bandcamp\.com/track/(?P<title>[^/?#&]+)'
     _TESTS = [{
         'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
         'md5': 'c557841d5e50261777a6585648adf439',
     _TESTS = [{
         'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
         'md5': 'c557841d5e50261777a6585648adf439',
@@ -33,13 +38,44 @@ class BandcampIE(InfoExtractor):
         },
         '_skip': 'There is a limit of 200 free downloads / month for the test song'
     }, {
         },
         '_skip': 'There is a limit of 200 free downloads / month for the test song'
     }, {
+        # free download
         'url': 'http://benprunty.bandcamp.com/track/lanius-battle',
         'url': 'http://benprunty.bandcamp.com/track/lanius-battle',
-        'md5': '0369ace6b939f0927e62c67a1a8d9fa7',
+        'md5': '853e35bf34aa1d6fe2615ae612564b36',
         'info_dict': {
             'id': '2650410135',
             'ext': 'aiff',
             'title': 'Ben Prunty - Lanius (Battle)',
         'info_dict': {
             'id': '2650410135',
             'ext': 'aiff',
             'title': 'Ben Prunty - Lanius (Battle)',
+            'thumbnail': r're:^https?://.*\.jpg$',
             'uploader': 'Ben Prunty',
             'uploader': 'Ben Prunty',
+            'timestamp': 1396508491,
+            'upload_date': '20140403',
+            'release_date': '20140403',
+            'duration': 260.877,
+            'track': 'Lanius (Battle)',
+            'track_number': 1,
+            'track_id': '2650410135',
+            'artist': 'Ben Prunty',
+            'album': 'FTL: Advanced Edition Soundtrack',
+        },
+    }, {
+        # no free download, mp3 128
+        'url': 'https://relapsealumni.bandcamp.com/track/hail-to-fire',
+        'md5': 'fec12ff55e804bb7f7ebeb77a800c8b7',
+        'info_dict': {
+            'id': '2584466013',
+            'ext': 'mp3',
+            'title': 'Mastodon - Hail to Fire',
+            'thumbnail': r're:^https?://.*\.jpg$',
+            'uploader': 'Mastodon',
+            'timestamp': 1322005399,
+            'upload_date': '20111122',
+            'release_date': '20040207',
+            'duration': 120.79,
+            'track': 'Hail to Fire',
+            'track_number': 5,
+            'track_id': '2584466013',
+            'artist': 'Mastodon',
+            'album': 'Call of the Mastodon',
         },
     }]
 
         },
     }]
 
@@ -48,19 +84,23 @@ class BandcampIE(InfoExtractor):
         title = mobj.group('title')
         webpage = self._download_webpage(url, title)
         thumbnail = self._html_search_meta('og:image', webpage, default=None)
         title = mobj.group('title')
         webpage = self._download_webpage(url, title)
         thumbnail = self._html_search_meta('og:image', webpage, default=None)
-        m_download = re.search(r'freeDownloadPage: "(.*?)"', webpage)
-        if not m_download:
-            m_trackinfo = re.search(r'trackinfo: (.+),\s*?\n', webpage)
-            if m_trackinfo:
-                json_code = m_trackinfo.group(1)
-                data = json.loads(json_code)[0]
-                track_id = compat_str(data['id'])
-
-                if not data.get('file'):
-                    raise ExtractorError('Not streamable', video_id=track_id, expected=True)
-
-                formats = []
-                for format_id, format_url in data['file'].items():
+
+        track_id = None
+        track = None
+        track_number = None
+        duration = None
+
+        formats = []
+        track_info = self._parse_json(
+            self._search_regex(
+                r'trackinfo\s*:\s*\[\s*({.+?})\s*\]\s*,\s*?\n',
+                webpage, 'track info', default='{}'), title)
+        if track_info:
+            file_ = track_info.get('file')
+            if isinstance(file_, dict):
+                for format_id, format_url in file_.items():
+                    if not url_or_none(format_url):
+                        continue
                     ext, abr_str = format_id.split('-', 1)
                     formats.append({
                         'format_id': format_id,
                     ext, abr_str = format_id.split('-', 1)
                     formats.append({
                         'format_id': format_id,
@@ -70,92 +110,117 @@ class BandcampIE(InfoExtractor):
                         'acodec': ext,
                         'abr': int_or_none(abr_str),
                     })
                         'acodec': ext,
                         'abr': int_or_none(abr_str),
                     })
+            track = track_info.get('title')
+            track_id = str_or_none(track_info.get('track_id') or track_info.get('id'))
+            track_number = int_or_none(track_info.get('track_num'))
+            duration = float_or_none(track_info.get('duration'))
 
 
-                self._sort_formats(formats)
+        def extract(key):
+            return self._search_regex(
+                r'\b%s\s*["\']?\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1' % key,
+                webpage, key, default=None, group='value')
 
 
-                return {
-                    'id': track_id,
-                    'title': data['title'],
-                    'thumbnail': thumbnail,
-                    'formats': formats,
-                    'duration': float_or_none(data.get('duration')),
-                }
-            else:
-                raise ExtractorError('No free songs found')
+        artist = extract('artist')
+        album = extract('album_title')
+        timestamp = unified_timestamp(
+            extract('publish_date') or extract('album_publish_date'))
+        release_date = unified_strdate(extract('album_release_date'))
 
 
-        download_link = m_download.group(1)
-        video_id = self._search_regex(
-            r'(?ms)var TralbumData = .*?[{,]\s*id: (?P<id>\d+),?$',
-            webpage, 'video id')
+        download_link = self._search_regex(
+            r'freeDownloadPage\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage,
+            'download link', default=None, group='url')
+        if download_link:
+            track_id = self._search_regex(
+                r'(?ms)var TralbumData = .*?[{,]\s*id: (?P<id>\d+),?$',
+                webpage, 'track id')
 
 
-        download_webpage = self._download_webpage(
-            download_link, video_id, 'Downloading free downloads page')
-
-        blob = self._parse_json(
-            self._search_regex(
-                r'data-blob=(["\'])(?P<blob>{.+?})\1', download_webpage,
-                'blob', group='blob'),
-            video_id, transform_source=unescapeHTML)
+            download_webpage = self._download_webpage(
+                download_link, track_id, 'Downloading free downloads page')
 
 
-        info = blob['digital_items'][0]
+            blob = self._parse_json(
+                self._search_regex(
+                    r'data-blob=(["\'])(?P<blob>{.+?})\1', download_webpage,
+                    'blob', group='blob'),
+                track_id, transform_source=unescapeHTML)
 
 
-        downloads = info['downloads']
-        track = info['title']
+            info = try_get(
+                blob, (lambda x: x['digital_items'][0],
+                       lambda x: x['download_items'][0]), dict)
+            if info:
+                downloads = info.get('downloads')
+                if isinstance(downloads, dict):
+                    if not track:
+                        track = info.get('title')
+                    if not artist:
+                        artist = info.get('artist')
+                    if not thumbnail:
+                        thumbnail = info.get('thumb_url')
 
 
-        artist = info.get('artist')
-        title = '%s - %s' % (artist, track) if artist else track
+                    download_formats = {}
+                    download_formats_list = blob.get('download_formats')
+                    if isinstance(download_formats_list, list):
+                        for f in blob['download_formats']:
+                            name, ext = f.get('name'), f.get('file_extension')
+                            if all(isinstance(x, compat_str) for x in (name, ext)):
+                                download_formats[name] = ext.strip('.')
 
 
-        download_formats = {}
-        for f in blob['download_formats']:
-            name, ext = f.get('name'), f.get('file_extension')
-            if all(isinstance(x, compat_str) for x in (name, ext)):
-                download_formats[name] = ext.strip('.')
+                    for format_id, f in downloads.items():
+                        format_url = f.get('url')
+                        if not format_url:
+                            continue
+                        # Stat URL generation algorithm is reverse engineered from
+                        # download_*_bundle_*.js
+                        stat_url = update_url_query(
+                            format_url.replace('/download/', '/statdownload/'), {
+                                '.rand': int(time.time() * 1000 * random.random()),
+                            })
+                        format_id = f.get('encoding_name') or format_id
+                        stat = self._download_json(
+                            stat_url, track_id, 'Downloading %s JSON' % format_id,
+                            transform_source=lambda s: s[s.index('{'):s.rindex('}') + 1],
+                            fatal=False)
+                        if not stat:
+                            continue
+                        retry_url = url_or_none(stat.get('retry_url'))
+                        if not retry_url:
+                            continue
+                        formats.append({
+                            'url': self._proto_relative_url(retry_url, 'http:'),
+                            'ext': download_formats.get(format_id),
+                            'format_id': format_id,
+                            'format_note': f.get('description'),
+                            'filesize': parse_filesize(f.get('size_mb')),
+                            'vcodec': 'none',
+                        })
 
 
-        formats = []
-        for format_id, f in downloads.items():
-            format_url = f.get('url')
-            if not format_url:
-                continue
-            # Stat URL generation algorithm is reverse engineered from
-            # download_*_bundle_*.js
-            stat_url = update_url_query(
-                format_url.replace('/download/', '/statdownload/'), {
-                    '.rand': int(time.time() * 1000 * random.random()),
-                })
-            format_id = f.get('encoding_name') or format_id
-            stat = self._download_json(
-                stat_url, video_id, 'Downloading %s JSON' % format_id,
-                transform_source=lambda s: s[s.index('{'):s.rindex('}') + 1],
-                fatal=False)
-            if not stat:
-                continue
-            retry_url = stat.get('retry_url')
-            if not isinstance(retry_url, compat_str):
-                continue
-            formats.append({
-                'url': self._proto_relative_url(retry_url, 'http:'),
-                'ext': download_formats.get(format_id),
-                'format_id': format_id,
-                'format_note': f.get('description'),
-                'filesize': parse_filesize(f.get('size_mb')),
-                'vcodec': 'none',
-            })
         self._sort_formats(formats)
 
         self._sort_formats(formats)
 
+        title = '%s - %s' % (artist, track) if artist else track
+
+        if not duration:
+            duration = float_or_none(self._html_search_meta(
+                'duration', webpage, default=None))
+
         return {
         return {
-            'id': video_id,
+            'id': track_id,
             'title': title,
             'title': title,
-            'thumbnail': info.get('thumb_url') or thumbnail,
-            'uploader': info.get('artist'),
-            'artist': artist,
+            'thumbnail': thumbnail,
+            'uploader': artist,
+            'timestamp': timestamp,
+            'release_date': release_date,
+            'duration': duration,
             'track': track,
             'track': track,
+            'track_number': track_number,
+            'track_id': track_id,
+            'artist': artist,
+            'album': album,
             'formats': formats,
         }
 
 
 class BandcampAlbumIE(InfoExtractor):
     IE_NAME = 'Bandcamp:album'
             'formats': formats,
         }
 
 
 class BandcampAlbumIE(InfoExtractor):
     IE_NAME = 'Bandcamp:album'
-    _VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?:/album/(?P<album_id>[^?#]+)|/?(?:$|[?#]))'
+    _VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?:/album/(?P<album_id>[^/?#&]+))?'
 
     _TESTS = [{
         'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
 
     _TESTS = [{
         'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
@@ -222,6 +287,12 @@ class BandcampAlbumIE(InfoExtractor):
         'playlist_count': 2,
     }]
 
         'playlist_count': 2,
     }]
 
+    @classmethod
+    def suitable(cls, url):
+        return (False
+                if BandcampWeeklyIE.suitable(url) or BandcampIE.suitable(url)
+                else super(BandcampAlbumIE, cls).suitable(url))
+
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         uploader_id = mobj.group('subdomain')
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         uploader_id = mobj.group('subdomain')
@@ -234,7 +305,12 @@ class BandcampAlbumIE(InfoExtractor):
             raise ExtractorError('The page doesn\'t contain any tracks')
         # Only tracks with duration info have songs
         entries = [
             raise ExtractorError('The page doesn\'t contain any tracks')
         # Only tracks with duration info have songs
         entries = [
-            self.url_result(compat_urlparse.urljoin(url, t_path), ie=BandcampIE.ie_key())
+            self.url_result(
+                compat_urlparse.urljoin(url, t_path),
+                ie=BandcampIE.ie_key(),
+                video_title=self._search_regex(
+                    r'<span\b[^>]+\bitemprop=["\']name["\'][^>]*>([^<]+)',
+                    elem_content, 'track title', fatal=False))
             for elem_content, t_path in track_elements
             if self._html_search_meta('duration', elem_content, default=None)]
 
             for elem_content, t_path in track_elements
             if self._html_search_meta('duration', elem_content, default=None)]
 
@@ -250,3 +326,92 @@ class BandcampAlbumIE(InfoExtractor):
             'title': title,
             'entries': entries,
         }
             'title': title,
             'entries': entries,
         }
+
+
+class BandcampWeeklyIE(InfoExtractor):
+    IE_NAME = 'Bandcamp:weekly'
+    _VALID_URL = r'https?://(?:www\.)?bandcamp\.com/?\?(?:.*?&)?show=(?P<id>\d+)'
+    _TESTS = [{
+        'url': 'https://bandcamp.com/?show=224',
+        'md5': 'b00df799c733cf7e0c567ed187dea0fd',
+        'info_dict': {
+            'id': '224',
+            'ext': 'opus',
+            'title': 'BC Weekly April 4th 2017 - Magic Moments',
+            'description': 'md5:5d48150916e8e02d030623a48512c874',
+            'duration': 5829.77,
+            'release_date': '20170404',
+            'series': 'Bandcamp Weekly',
+            'episode': 'Magic Moments',
+            'episode_number': 208,
+            'episode_id': '224',
+        }
+    }, {
+        'url': 'https://bandcamp.com/?blah/blah@&show=228',
+        'only_matching': True
+    }]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        blob = self._parse_json(
+            self._search_regex(
+                r'data-blob=(["\'])(?P<blob>{.+?})\1', webpage,
+                'blob', group='blob'),
+            video_id, transform_source=unescapeHTML)
+
+        show = blob['bcw_show']
+
+        # This is desired because any invalid show id redirects to `bandcamp.com`
+        # which happens to expose the latest Bandcamp Weekly episode.
+        show_id = int_or_none(show.get('show_id')) or int_or_none(video_id)
+
+        formats = []
+        for format_id, format_url in show['audio_stream'].items():
+            if not url_or_none(format_url):
+                continue
+            for known_ext in KNOWN_EXTENSIONS:
+                if known_ext in format_id:
+                    ext = known_ext
+                    break
+            else:
+                ext = None
+            formats.append({
+                'format_id': format_id,
+                'url': format_url,
+                'ext': ext,
+                'vcodec': 'none',
+            })
+        self._sort_formats(formats)
+
+        title = show.get('audio_title') or 'Bandcamp Weekly'
+        subtitle = show.get('subtitle')
+        if subtitle:
+            title += ' - %s' % subtitle
+
+        episode_number = None
+        seq = blob.get('bcw_seq')
+
+        if seq and isinstance(seq, list):
+            try:
+                episode_number = next(
+                    int_or_none(e.get('episode_number'))
+                    for e in seq
+                    if isinstance(e, dict) and int_or_none(e.get('id')) == show_id)
+            except StopIteration:
+                pass
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': show.get('desc') or show.get('short_desc'),
+            'duration': float_or_none(show.get('audio_duration')),
+            'is_live': False,
+            'release_date': unified_strdate(show.get('published_date')),
+            'series': 'Bandcamp Weekly',
+            'episode': show.get('subtitle'),
+            'episode_number': episode_number,
+            'episode_id': compat_str(video_id),
+            'formats': formats
+        }