]> Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/bliptv.py
Clarify use of mplayer.
[youtubedl] / youtube_dl / extractor / bliptv.py
1 import datetime
2 import json
3 import os
4 import re
5 import socket
6
7 from .common import InfoExtractor
8 from ..utils import (
9 compat_http_client,
10 compat_parse_qs,
11 compat_str,
12 compat_urllib_error,
13 compat_urllib_parse_urlparse,
14 compat_urllib_request,
15
16 ExtractorError,
17 unescapeHTML,
18 )
19
20
21 class BlipTVIE(InfoExtractor):
22 """Information extractor for blip.tv"""
23
24 _VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv/((.+/)|(play/)|(api\.swf#))(.+)$'
25 _URL_EXT = r'^.*\.([a-z0-9]+)$'
26 IE_NAME = u'blip.tv'
27
28 def report_direct_download(self, title):
29 """Report information extraction."""
30 self.to_screen(u'%s: Direct download detected' % title)
31
32 def _real_extract(self, url):
33 mobj = re.match(self._VALID_URL, url)
34 if mobj is None:
35 raise ExtractorError(u'Invalid URL: %s' % url)
36
37 # See https://github.com/rg3/youtube-dl/issues/857
38 api_mobj = re.match(r'http://a\.blip\.tv/api\.swf#(?P<video_id>[\d\w]+)', url)
39 if api_mobj is not None:
40 url = 'http://blip.tv/play/g_%s' % api_mobj.group('video_id')
41 urlp = compat_urllib_parse_urlparse(url)
42 if urlp.path.startswith('/play/'):
43 request = compat_urllib_request.Request(url)
44 response = compat_urllib_request.urlopen(request)
45 redirecturl = response.geturl()
46 rurlp = compat_urllib_parse_urlparse(redirecturl)
47 file_id = compat_parse_qs(rurlp.fragment)['file'][0].rpartition('/')[2]
48 url = 'http://blip.tv/a/a-' + file_id
49 return self._real_extract(url)
50
51
52 if '?' in url:
53 cchar = '&'
54 else:
55 cchar = '?'
56 json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
57 request = compat_urllib_request.Request(json_url)
58 request.add_header('User-Agent', 'iTunes/10.6.1')
59 self.report_extraction(mobj.group(1))
60 info = None
61 try:
62 urlh = compat_urllib_request.urlopen(request)
63 if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download
64 basename = url.split('/')[-1]
65 title,ext = os.path.splitext(basename)
66 title = title.decode('UTF-8')
67 ext = ext.replace('.', '')
68 self.report_direct_download(title)
69 info = {
70 'id': title,
71 'url': url,
72 'uploader': None,
73 'upload_date': None,
74 'title': title,
75 'ext': ext,
76 'urlhandle': urlh
77 }
78 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
79 raise ExtractorError(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
80 if info is None: # Regular URL
81 try:
82 json_code_bytes = urlh.read()
83 json_code = json_code_bytes.decode('utf-8')
84 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
85 raise ExtractorError(u'Unable to read video info webpage: %s' % compat_str(err))
86
87 try:
88 json_data = json.loads(json_code)
89 if 'Post' in json_data:
90 data = json_data['Post']
91 else:
92 data = json_data
93
94 upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
95 video_url = data['media']['url']
96 umobj = re.match(self._URL_EXT, video_url)
97 if umobj is None:
98 raise ValueError('Can not determine filename extension')
99 ext = umobj.group(1)
100
101 info = {
102 'id': data['item_id'],
103 'url': video_url,
104 'uploader': data['display_name'],
105 'upload_date': upload_date,
106 'title': data['title'],
107 'ext': ext,
108 'format': data['media']['mimeType'],
109 'thumbnail': data['thumbnailUrl'],
110 'description': data['description'],
111 'player_url': data['embedUrl'],
112 'user_agent': 'iTunes/10.6.1',
113 }
114 except (ValueError,KeyError) as err:
115 raise ExtractorError(u'Unable to parse video information: %s' % repr(err))
116
117 return [info]
118
119
120 class BlipTVUserIE(InfoExtractor):
121 """Information Extractor for blip.tv users."""
122
123 _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$'
124 _PAGE_SIZE = 12
125 IE_NAME = u'blip.tv:user'
126
127 def _real_extract(self, url):
128 # Extract username
129 mobj = re.match(self._VALID_URL, url)
130 if mobj is None:
131 raise ExtractorError(u'Invalid URL: %s' % url)
132
133 username = mobj.group(1)
134
135 page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1'
136
137 page = self._download_webpage(url, username, u'Downloading user page')
138 mobj = re.search(r'data-users-id="([^"]+)"', page)
139 page_base = page_base % mobj.group(1)
140
141
142 # Download video ids using BlipTV Ajax calls. Result size per
143 # query is limited (currently to 12 videos) so we need to query
144 # page by page until there are no video ids - it means we got
145 # all of them.
146
147 video_ids = []
148 pagenum = 1
149
150 while True:
151 url = page_base + "&page=" + str(pagenum)
152 page = self._download_webpage(url, username,
153 u'Downloading video ids from page %d' % pagenum)
154
155 # Extract video identifiers
156 ids_in_page = []
157
158 for mobj in re.finditer(r'href="/([^"]+)"', page):
159 if mobj.group(1) not in ids_in_page:
160 ids_in_page.append(unescapeHTML(mobj.group(1)))
161
162 video_ids.extend(ids_in_page)
163
164 # A little optimization - if current page is not
165 # "full", ie. does not contain PAGE_SIZE video ids then
166 # we can assume that this page is the last one - there
167 # are no more ids on further pages - no need to query
168 # again.
169
170 if len(ids_in_page) < self._PAGE_SIZE:
171 break
172
173 pagenum += 1
174
175 urls = [u'http://blip.tv/%s' % video_id for video_id in video_ids]
176 url_entries = [self.url_result(url, 'BlipTV') for url in urls]
177 return [self.playlist_result(url_entries, playlist_title = username)]