]>
Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/downloader/http.py
   1 from __future__ 
import unicode_literals
 
   9 from .common 
import FileDownloader
 
  10 from ..compat 
import compat_urllib_error
 
  19 class HttpFD(FileDownloader
): 
  20     def real_download(self
, filename
, info_dict
): 
  21         url 
= info_dict
['url'] 
  22         tmpfilename 
= self
.temp_name(filename
) 
  25         # Do not include the Accept-Encoding header 
  26         headers 
= {'Youtubedl-no-compression': 'True'} 
  27         add_headers 
= info_dict
.get('http_headers') 
  29             headers
.update(add_headers
) 
  30         basic_request 
= sanitized_Request(url
, None, headers
) 
  31         request 
= sanitized_Request(url
, None, headers
) 
  33         is_test 
= self
.params
.get('test', False) 
  36             request
.add_header('Range', 'bytes=0-%s' % str(self
._TEST
_FILE
_SIZE 
- 1)) 
  38         # Establish possible resume length 
  39         if os
.path
.isfile(encodeFilename(tmpfilename
)): 
  40             resume_len 
= os
.path
.getsize(encodeFilename(tmpfilename
)) 
  46             if self
.params
.get('continuedl', True): 
  47                 self
.report_resuming_byte(resume_len
) 
  48                 request
.add_header('Range', 'bytes=%d-' % resume_len
) 
  54         retries 
= self
.params
.get('retries', 0) 
  55         while count 
<= retries
: 
  56             # Establish connection 
  58                 data 
= self
.ydl
.urlopen(request
) 
  59                 # When trying to resume, Content-Range HTTP header of response has to be checked 
  60                 # to match the value of requested Range HTTP header. This is due to a webservers 
  61                 # that don't support resuming and serve a whole file with no Content-Range 
  62                 # set in response despite of requested Range (see 
  63                 # https://github.com/rg3/youtube-dl/issues/6057#issuecomment-126129799) 
  65                     content_range 
= data
.headers
.get('Content-Range') 
  67                         content_range_m 
= re
.search(r
'bytes (\d+)-', content_range
) 
  68                         # Content-Range is present and matches requested Range, resume is possible 
  69                         if content_range_m 
and resume_len 
== int(content_range_m
.group(1)): 
  71                     # Content-Range is either not present or invalid. Assuming remote webserver is 
  72                     # trying to send the whole file, resume is not possible, so wiping the local file 
  73                     # and performing entire redownload 
  74                     self
.report_unable_to_resume() 
  78             except (compat_urllib_error
.HTTPError
, ) as err
: 
  79                 if (err
.code 
< 500 or err
.code 
>= 600) and err
.code 
!= 416: 
  80                     # Unexpected HTTP error 
  83                     # Unable to resume (requested range not satisfiable) 
  85                         # Open the connection again without the range header 
  86                         data 
= self
.ydl
.urlopen(basic_request
) 
  87                         content_length 
= data
.info()['Content-Length'] 
  88                     except (compat_urllib_error
.HTTPError
, ) as err
: 
  89                         if err
.code 
< 500 or err
.code 
>= 600: 
  92                         # Examine the reported length 
  93                         if (content_length 
is not None and 
  94                                 (resume_len 
- 100 < int(content_length
) < resume_len 
+ 100)): 
  95                             # The file had already been fully downloaded. 
  96                             # Explanation to the above condition: in issue #175 it was revealed that 
  97                             # YouTube sometimes adds or removes a few bytes from the end of the file, 
  98                             # changing the file size slightly and causing problems for some users. So 
  99                             # I decided to implement a suggested change and consider the file 
 100                             # completely downloaded if the file size differs less than 100 bytes from 
 101                             # the one in the hard drive. 
 102                             self
.report_file_already_downloaded(filename
) 
 103                             self
.try_rename(tmpfilename
, filename
) 
 104                             self
._hook
_progress
({ 
 105                                 'filename': filename
, 
 106                                 'status': 'finished', 
 107                                 'downloaded_bytes': resume_len
, 
 108                                 'total_bytes': resume_len
, 
 112                             # The length does not match, we start the download over 
 113                             self
.report_unable_to_resume() 
 117             except socket
.error 
as e
: 
 118                 if e
.errno 
!= errno
.ECONNRESET
: 
 119                     # Connection reset is no problem, just retry 
 125                 self
.report_retry(count
, retries
) 
 128             self
.report_error('giving up after %s retries' % retries
) 
 131         data_len 
= data
.info().get('Content-length', None) 
 133         # Range HTTP header may be ignored/unsupported by a webserver 
 134         # (e.g. extractor/scivee.py, extractor/bambuser.py). 
 135         # However, for a test we still would like to download just a piece of a file. 
 136         # To achieve this we limit data_len to _TEST_FILE_SIZE and manually control 
 137         # block size when downloading a file. 
 138         if is_test 
and (data_len 
is None or int(data_len
) > self
._TEST
_FILE
_SIZE
): 
 139             data_len 
= self
._TEST
_FILE
_SIZE
 
 141         if data_len 
is not None: 
 142             data_len 
= int(data_len
) + resume_len
 
 143             min_data_len 
= self
.params
.get('min_filesize') 
 144             max_data_len 
= self
.params
.get('max_filesize') 
 145             if min_data_len 
is not None and data_len 
< min_data_len
: 
 146                 self
.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len
, min_data_len
)) 
 148             if max_data_len 
is not None and data_len 
> max_data_len
: 
 149                 self
.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len
, max_data_len
)) 
 152         byte_counter 
= 0 + resume_len
 
 153         block_size 
= self
.params
.get('buffersize', 1024) 
 156         # measure time over whole while-loop, so slow_down() and best_block_size() work together properly 
 157         now 
= None  # needed for slow_down() in the first loop run 
 158         before 
= start  
# start measuring 
 162             data_block 
= data
.read(block_size 
if not is_test 
else min(block_size
, data_len 
- byte_counter
)) 
 163             byte_counter 
+= len(data_block
) 
 165             # exit loop when download is finished 
 166             if len(data_block
) == 0: 
 169             # Open destination file just in time 
 172                     (stream
, tmpfilename
) = sanitize_open(tmpfilename
, open_mode
) 
 173                     assert stream 
is not None 
 174                     filename 
= self
.undo_temp_name(tmpfilename
) 
 175                     self
.report_destination(filename
) 
 176                 except (OSError, IOError) as err
: 
 177                     self
.report_error('unable to open for writing: %s' % str(err
)) 
 180                 if self
.params
.get('xattr_set_filesize', False) and data_len 
is not None: 
 183                         xattr
.setxattr(tmpfilename
, 'user.ytdl.filesize', str(data_len
)) 
 184                     except(OSError, IOError, ImportError) as err
: 
 185                         self
.report_error('unable to set filesize xattr: %s' % str(err
)) 
 188                 stream
.write(data_block
) 
 189             except (IOError, OSError) as err
: 
 191                 self
.report_error('unable to write data: %s' % str(err
)) 
 195             self
.slow_down(start
, now
, byte_counter 
- resume_len
) 
 197             # end measuring of one loop run 
 202             if not self
.params
.get('noresizebuffer', False): 
 203                 block_size 
= self
.best_block_size(after 
- before
, len(data_block
)) 
 208             speed 
= self
.calc_speed(start
, now
, byte_counter 
- resume_len
) 
 212                 eta 
= self
.calc_eta(start
, time
.time(), data_len 
- resume_len
, byte_counter 
- resume_len
) 
 214             self
._hook
_progress
({ 
 215                 'status': 'downloading', 
 216                 'downloaded_bytes': byte_counter
, 
 217                 'total_bytes': data_len
, 
 218                 'tmpfilename': tmpfilename
, 
 219                 'filename': filename
, 
 222                 'elapsed': now 
- start
, 
 225             if is_test 
and byte_counter 
== data_len
: 
 230             self
.report_error('Did not get any data blocks') 
 232         if tmpfilename 
!= '-': 
 235         if data_len 
is not None and byte_counter 
!= data_len
: 
 236             raise ContentTooShortError(byte_counter
, int(data_len
)) 
 237         self
.try_rename(tmpfilename
, filename
) 
 239         # Update file modification time 
 240         if self
.params
.get('updatetime', True): 
 241             info_dict
['filetime'] = self
.try_utime(filename
, data
.info().get('last-modified', None)) 
 243         self
._hook
_progress
({ 
 244             'downloaded_bytes': byte_counter
, 
 245             'total_bytes': byte_counter
, 
 246             'filename': filename
, 
 247             'status': 'finished', 
 248             'elapsed': time
.time() - start
,