]>
Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/downloader/http.py
1 from __future__
import unicode_literals
10 from .common
import FileDownloader
11 from ..compat
import (
23 XAttrUnavailableError
,
27 class HttpFD(FileDownloader
):
28 def real_download(self
, filename
, info_dict
):
29 url
= info_dict
['url']
31 class DownloadContext(dict):
32 __getattr__
= dict.get
33 __setattr__
= dict.__setitem
__
34 __delattr__
= dict.__delitem
__
36 ctx
= DownloadContext()
37 ctx
.filename
= filename
38 ctx
.tmpfilename
= self
.temp_name(filename
)
41 # Do not include the Accept-Encoding header
42 headers
= {'Youtubedl-no-compression': 'True'}
43 add_headers
= info_dict
.get('http_headers')
45 headers
.update(add_headers
)
47 is_test
= self
.params
.get('test', False)
48 chunk_size
= self
._TEST
_FILE
_SIZE
if is_test
else (
49 info_dict
.get('downloader_options', {}).get('http_chunk_size') or
50 self
.params
.get('http_chunk_size') or 0)
55 ctx
.block_size
= self
.params
.get('buffersize', 1024)
56 ctx
.start_time
= time
.time()
59 if self
.params
.get('continuedl', True):
60 # Establish possible resume length
61 if os
.path
.isfile(encodeFilename(ctx
.tmpfilename
)):
62 ctx
.resume_len
= os
.path
.getsize(
63 encodeFilename(ctx
.tmpfilename
))
65 ctx
.is_resume
= ctx
.resume_len
> 0
68 retries
= self
.params
.get('retries', 0)
70 class SucceedDownload(Exception):
73 class RetryDownload(Exception):
74 def __init__(self
, source_error
):
75 self
.source_error
= source_error
77 class NextFragment(Exception):
80 def set_range(req
, start
, end
):
81 range_header
= 'bytes=%d-' % start
83 range_header
+= compat_str(end
)
84 req
.add_header('Range', range_header
)
86 def establish_connection():
87 ctx
.chunk_size
= (random
.randint(int(chunk_size
* 0.95), chunk_size
)
88 if not is_test
and chunk_size
else chunk_size
)
89 if ctx
.resume_len
> 0:
90 range_start
= ctx
.resume_len
92 self
.report_resuming_byte(ctx
.resume_len
)
94 elif ctx
.chunk_size
> 0:
99 range_end
= range_start
+ ctx
.chunk_size
- 1 if ctx
.chunk_size
else None
100 if range_end
and ctx
.data_len
is not None and range_end
>= ctx
.data_len
:
101 range_end
= ctx
.data_len
- 1
102 has_range
= range_start
is not None
103 ctx
.has_range
= has_range
104 request
= sanitized_Request(url
, None, headers
)
106 set_range(request
, range_start
, range_end
)
107 # Establish connection
109 ctx
.data
= self
.ydl
.urlopen(request
)
110 # When trying to resume, Content-Range HTTP header of response has to be checked
111 # to match the value of requested Range HTTP header. This is due to a webservers
112 # that don't support resuming and serve a whole file with no Content-Range
113 # set in response despite of requested Range (see
114 # https://github.com/rg3/youtube-dl/issues/6057#issuecomment-126129799)
116 content_range
= ctx
.data
.headers
.get('Content-Range')
118 content_range_m
= re
.search(r
'bytes (\d+)-(\d+)?(?:/(\d+))?', content_range
)
119 # Content-Range is present and matches requested Range, resume is possible
121 if range_start
== int(content_range_m
.group(1)):
122 content_range_end
= int_or_none(content_range_m
.group(2))
123 content_len
= int_or_none(content_range_m
.group(3))
124 accept_content_len
= (
125 # Non-chunked download
126 not ctx
.chunk_size
or
127 # Chunked download and requested piece or
128 # its part is promised to be served
129 content_range_end
== range_end
or
130 content_len
< range_end
)
131 if accept_content_len
:
132 ctx
.data_len
= content_len
134 # Content-Range is either not present or invalid. Assuming remote webserver is
135 # trying to send the whole file, resume is not possible, so wiping the local file
136 # and performing entire redownload
137 self
.report_unable_to_resume()
140 ctx
.data_len
= int_or_none(ctx
.data
.info().get('Content-length', None))
142 except (compat_urllib_error
.HTTPError
, ) as err
:
144 # Unable to resume (requested range not satisfiable)
146 # Open the connection again without the range header
147 ctx
.data
= self
.ydl
.urlopen(
148 sanitized_Request(url
, None, headers
))
149 content_length
= ctx
.data
.info()['Content-Length']
150 except (compat_urllib_error
.HTTPError
, ) as err
:
151 if err
.code
< 500 or err
.code
>= 600:
154 # Examine the reported length
155 if (content_length
is not None and
156 (ctx
.resume_len
- 100 < int(content_length
) < ctx
.resume_len
+ 100)):
157 # The file had already been fully downloaded.
158 # Explanation to the above condition: in issue #175 it was revealed that
159 # YouTube sometimes adds or removes a few bytes from the end of the file,
160 # changing the file size slightly and causing problems for some users. So
161 # I decided to implement a suggested change and consider the file
162 # completely downloaded if the file size differs less than 100 bytes from
163 # the one in the hard drive.
164 self
.report_file_already_downloaded(ctx
.filename
)
165 self
.try_rename(ctx
.tmpfilename
, ctx
.filename
)
166 self
._hook
_progress
({
167 'filename': ctx
.filename
,
168 'status': 'finished',
169 'downloaded_bytes': ctx
.resume_len
,
170 'total_bytes': ctx
.resume_len
,
172 raise SucceedDownload()
174 # The length does not match, we start the download over
175 self
.report_unable_to_resume()
179 elif err
.code
< 500 or err
.code
>= 600:
180 # Unexpected HTTP error
182 raise RetryDownload(err
)
183 except socket
.error
as err
:
184 if err
.errno
!= errno
.ECONNRESET
:
185 # Connection reset is no problem, just retry
187 raise RetryDownload(err
)
190 data_len
= ctx
.data
.info().get('Content-length', None)
192 # Range HTTP header may be ignored/unsupported by a webserver
193 # (e.g. extractor/scivee.py, extractor/bambuser.py).
194 # However, for a test we still would like to download just a piece of a file.
195 # To achieve this we limit data_len to _TEST_FILE_SIZE and manually control
196 # block size when downloading a file.
197 if is_test
and (data_len
is None or int(data_len
) > self
._TEST
_FILE
_SIZE
):
198 data_len
= self
._TEST
_FILE
_SIZE
200 if data_len
is not None:
201 data_len
= int(data_len
) + ctx
.resume_len
202 min_data_len
= self
.params
.get('min_filesize')
203 max_data_len
= self
.params
.get('max_filesize')
204 if min_data_len
is not None and data_len
< min_data_len
:
205 self
.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len
, min_data_len
))
207 if max_data_len
is not None and data_len
> max_data_len
:
208 self
.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len
, max_data_len
))
211 byte_counter
= 0 + ctx
.resume_len
212 block_size
= ctx
.block_size
215 # measure time over whole while-loop, so slow_down() and best_block_size() work together properly
216 now
= None # needed for slow_down() in the first loop run
217 before
= start
# start measuring
220 if ctx
.tmpfilename
!= '-':
223 ctx
.resume_len
= os
.path
.getsize(encodeFilename(ctx
.tmpfilename
))
224 raise RetryDownload(e
)
229 data_block
= ctx
.data
.read(block_size
if not is_test
else min(block_size
, data_len
- byte_counter
))
230 # socket.timeout is a subclass of socket.error but may not have
232 except socket
.timeout
as e
:
234 except socket
.error
as e
:
235 if e
.errno
not in (errno
.ECONNRESET
, errno
.ETIMEDOUT
):
239 byte_counter
+= len(data_block
)
241 # exit loop when download is finished
242 if len(data_block
) == 0:
245 # Open destination file just in time
246 if ctx
.stream
is None:
248 ctx
.stream
, ctx
.tmpfilename
= sanitize_open(
249 ctx
.tmpfilename
, ctx
.open_mode
)
250 assert ctx
.stream
is not None
251 ctx
.filename
= self
.undo_temp_name(ctx
.tmpfilename
)
252 self
.report_destination(ctx
.filename
)
253 except (OSError, IOError) as err
:
254 self
.report_error('unable to open for writing: %s' % str(err
))
257 if self
.params
.get('xattr_set_filesize', False) and data_len
is not None:
259 write_xattr(ctx
.tmpfilename
, 'user.ytdl.filesize', str(data_len
).encode('utf-8'))
260 except (XAttrUnavailableError
, XAttrMetadataError
) as err
:
261 self
.report_error('unable to set filesize xattr: %s' % str(err
))
264 ctx
.stream
.write(data_block
)
265 except (IOError, OSError) as err
:
267 self
.report_error('unable to write data: %s' % str(err
))
271 self
.slow_down(start
, now
, byte_counter
- ctx
.resume_len
)
273 # end measuring of one loop run
278 if not self
.params
.get('noresizebuffer', False):
279 block_size
= self
.best_block_size(after
- before
, len(data_block
))
284 speed
= self
.calc_speed(start
, now
, byte_counter
- ctx
.resume_len
)
285 if ctx
.data_len
is None:
288 eta
= self
.calc_eta(start
, time
.time(), ctx
.data_len
- ctx
.resume_len
, byte_counter
- ctx
.resume_len
)
290 self
._hook
_progress
({
291 'status': 'downloading',
292 'downloaded_bytes': byte_counter
,
293 'total_bytes': ctx
.data_len
,
294 'tmpfilename': ctx
.tmpfilename
,
295 'filename': ctx
.filename
,
298 'elapsed': now
- ctx
.start_time
,
301 if is_test
and byte_counter
== data_len
:
304 if not is_test
and ctx
.chunk_size
and ctx
.data_len
is not None and byte_counter
< ctx
.data_len
:
305 ctx
.resume_len
= byte_counter
306 # ctx.block_size = block_size
309 if ctx
.stream
is None:
311 self
.report_error('Did not get any data blocks')
313 if ctx
.tmpfilename
!= '-':
316 if data_len
is not None and byte_counter
!= data_len
:
317 err
= ContentTooShortError(byte_counter
, int(data_len
))
322 self
.try_rename(ctx
.tmpfilename
, ctx
.filename
)
324 # Update file modification time
325 if self
.params
.get('updatetime', True):
326 info_dict
['filetime'] = self
.try_utime(ctx
.filename
, ctx
.data
.info().get('last-modified', None))
328 self
._hook
_progress
({
329 'downloaded_bytes': byte_counter
,
330 'total_bytes': byte_counter
,
331 'filename': ctx
.filename
,
332 'status': 'finished',
333 'elapsed': time
.time() - ctx
.start_time
,
338 while count
<= retries
:
340 establish_connection()
342 except RetryDownload
as e
:
345 self
.report_retry(e
.source_error
, count
, retries
)
349 except SucceedDownload
:
352 self
.report_error('giving up after %s retries' % retries
)