]>
Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/downloader/http.py
a29f5cf31fddf3f3b15f47311fafeef9ab4f3d7a
   1 from __future__ 
import unicode_literals
 
   9 from .common 
import FileDownloader
 
  10 from ..compat 
import ( 
  11     compat_urllib_request
, 
  21 class HttpFD(FileDownloader
): 
  22     def real_download(self
, filename
, info_dict
): 
  23         url 
= info_dict
['url'] 
  24         tmpfilename 
= self
.temp_name(filename
) 
  27         # Do not include the Accept-Encoding header 
  28         headers 
= {'Youtubedl-no-compression': 'True'} 
  29         add_headers 
= info_dict
.get('http_headers') 
  31             headers
.update(add_headers
) 
  32         basic_request 
= compat_urllib_request
.Request(url
, None, headers
) 
  33         request 
= compat_urllib_request
.Request(url
, None, headers
) 
  35         is_test 
= self
.params
.get('test', False) 
  38             request
.add_header('Range', 'bytes=0-%s' % str(self
._TEST
_FILE
_SIZE 
- 1)) 
  40         # Establish possible resume length 
  41         if os
.path
.isfile(encodeFilename(tmpfilename
)): 
  42             resume_len 
= os
.path
.getsize(encodeFilename(tmpfilename
)) 
  48             if self
.params
.get('continuedl', True): 
  49                 self
.report_resuming_byte(resume_len
) 
  50                 request
.add_header('Range', 'bytes=%d-' % resume_len
) 
  56         retries 
= self
.params
.get('retries', 0) 
  57         while count 
<= retries
: 
  58             # Establish connection 
  60                 data 
= self
.ydl
.urlopen(request
) 
  61                 # When trying to resume, Content-Range HTTP header of response has to be checked 
  62                 # to match the value of requested Range HTTP header. This is due to a webservers 
  63                 # that don't support resuming and serve a whole file with no Content-Range 
  64                 # set in response despite of requested Range (see 
  65                 # https://github.com/rg3/youtube-dl/issues/6057#issuecomment-126129799) 
  67                     content_range 
= data
.headers
.get('Content-Range') 
  69                         content_range_m 
= re
.search(r
'bytes (\d+)-', content_range
) 
  70                         # Content-Range is present and matches requested Range, resume is possible 
  71                         if content_range_m 
and resume_len 
== int(content_range_m
.group(1)): 
  73                     # Content-Range is either not present or invalid. Assuming remote webserver is 
  74                     # trying to send the whole file, resume is not possible, so wiping the local file 
  75                     # and performing entire redownload 
  76                     self
.report_unable_to_resume() 
  80             except (compat_urllib_error
.HTTPError
, ) as err
: 
  81                 if (err
.code 
< 500 or err
.code 
>= 600) and err
.code 
!= 416: 
  82                     # Unexpected HTTP error 
  85                     # Unable to resume (requested range not satisfiable) 
  87                         # Open the connection again without the range header 
  88                         data 
= self
.ydl
.urlopen(basic_request
) 
  89                         content_length 
= data
.info()['Content-Length'] 
  90                     except (compat_urllib_error
.HTTPError
, ) as err
: 
  91                         if err
.code 
< 500 or err
.code 
>= 600: 
  94                         # Examine the reported length 
  95                         if (content_length 
is not None and 
  96                                 (resume_len 
- 100 < int(content_length
) < resume_len 
+ 100)): 
  97                             # The file had already been fully downloaded. 
  98                             # Explanation to the above condition: in issue #175 it was revealed that 
  99                             # YouTube sometimes adds or removes a few bytes from the end of the file, 
 100                             # changing the file size slightly and causing problems for some users. So 
 101                             # I decided to implement a suggested change and consider the file 
 102                             # completely downloaded if the file size differs less than 100 bytes from 
 103                             # the one in the hard drive. 
 104                             self
.report_file_already_downloaded(filename
) 
 105                             self
.try_rename(tmpfilename
, filename
) 
 106                             self
._hook
_progress
({ 
 107                                 'filename': filename
, 
 108                                 'status': 'finished', 
 109                                 'downloaded_bytes': resume_len
, 
 110                                 'total_bytes': resume_len
, 
 114                             # The length does not match, we start the download over 
 115                             self
.report_unable_to_resume() 
 119             except socket
.error 
as e
: 
 120                 if e
.errno 
!= errno
.ECONNRESET
: 
 121                     # Connection reset is no problem, just retry 
 127                 self
.report_retry(count
, retries
) 
 130             self
.report_error('giving up after %s retries' % retries
) 
 133         data_len 
= data
.info().get('Content-length', None) 
 135         # Range HTTP header may be ignored/unsupported by a webserver 
 136         # (e.g. extractor/scivee.py, extractor/bambuser.py). 
 137         # However, for a test we still would like to download just a piece of a file. 
 138         # To achieve this we limit data_len to _TEST_FILE_SIZE and manually control 
 139         # block size when downloading a file. 
 140         if is_test 
and (data_len 
is None or int(data_len
) > self
._TEST
_FILE
_SIZE
): 
 141             data_len 
= self
._TEST
_FILE
_SIZE
 
 143         if data_len 
is not None: 
 144             data_len 
= int(data_len
) + resume_len
 
 145             min_data_len 
= self
.params
.get("min_filesize", None) 
 146             max_data_len 
= self
.params
.get("max_filesize", None) 
 147             if min_data_len 
is not None and data_len 
< min_data_len
: 
 148                 self
.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len
, min_data_len
)) 
 150             if max_data_len 
is not None and data_len 
> max_data_len
: 
 151                 self
.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len
, max_data_len
)) 
 154         byte_counter 
= 0 + resume_len
 
 155         block_size 
= self
.params
.get('buffersize', 1024) 
 158         # measure time over whole while-loop, so slow_down() and best_block_size() work together properly 
 159         now 
= None  # needed for slow_down() in the first loop run 
 160         before 
= start  
# start measuring 
 164             data_block 
= data
.read(block_size 
if not is_test 
else min(block_size
, data_len 
- byte_counter
)) 
 165             byte_counter 
+= len(data_block
) 
 167             # exit loop when download is finished 
 168             if len(data_block
) == 0: 
 171             # Open destination file just in time 
 174                     (stream
, tmpfilename
) = sanitize_open(tmpfilename
, open_mode
) 
 175                     assert stream 
is not None 
 176                     filename 
= self
.undo_temp_name(tmpfilename
) 
 177                     self
.report_destination(filename
) 
 178                 except (OSError, IOError) as err
: 
 179                     self
.report_error('unable to open for writing: %s' % str(err
)) 
 182                 if self
.params
.get('xattr_set_filesize', False) and data_len 
is not None: 
 185                         xattr
.setxattr(tmpfilename
, 'user.ytdl.filesize', str(data_len
)) 
 186                     except(OSError, IOError, ImportError) as err
: 
 187                         self
.report_error('unable to set filesize xattr: %s' % str(err
)) 
 190                 stream
.write(data_block
) 
 191             except (IOError, OSError) as err
: 
 193                 self
.report_error('unable to write data: %s' % str(err
)) 
 197             self
.slow_down(start
, now
, byte_counter 
- resume_len
) 
 199             # end measuring of one loop run 
 204             if not self
.params
.get('noresizebuffer', False): 
 205                 block_size 
= self
.best_block_size(after 
- before
, len(data_block
)) 
 210             speed 
= self
.calc_speed(start
, now
, byte_counter 
- resume_len
) 
 214                 eta 
= self
.calc_eta(start
, time
.time(), data_len 
- resume_len
, byte_counter 
- resume_len
) 
 216             self
._hook
_progress
({ 
 217                 'status': 'downloading', 
 218                 'downloaded_bytes': byte_counter
, 
 219                 'total_bytes': data_len
, 
 220                 'tmpfilename': tmpfilename
, 
 221                 'filename': filename
, 
 224                 'elapsed': now 
- start
, 
 227             if is_test 
and byte_counter 
== data_len
: 
 232             self
.report_error('Did not get any data blocks') 
 234         if tmpfilename 
!= '-': 
 237         if data_len 
is not None and byte_counter 
!= data_len
: 
 238             raise ContentTooShortError(byte_counter
, int(data_len
)) 
 239         self
.try_rename(tmpfilename
, filename
) 
 241         # Update file modification time 
 242         if self
.params
.get('updatetime', True): 
 243             info_dict
['filetime'] = self
.try_utime(filename
, data
.info().get('last-modified', None)) 
 245         self
._hook
_progress
({ 
 246             'downloaded_bytes': byte_counter
, 
 247             'total_bytes': byte_counter
, 
 248             'filename': filename
, 
 249             'status': 'finished', 
 250             'elapsed': time
.time() - start
,