]>
Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/downloader/http.py
   1 from __future__ 
import unicode_literals
 
   9 from .common 
import FileDownloader
 
  10 from ..compat 
import compat_urllib_error
 
  18     XAttrUnavailableError
, 
  22 class HttpFD(FileDownloader
): 
  23     def real_download(self
, filename
, info_dict
): 
  24         url 
= info_dict
['url'] 
  25         tmpfilename 
= self
.temp_name(filename
) 
  28         # Do not include the Accept-Encoding header 
  29         headers 
= {'Youtubedl-no-compression': 'True'} 
  30         add_headers 
= info_dict
.get('http_headers') 
  32             headers
.update(add_headers
) 
  33         basic_request 
= sanitized_Request(url
, None, headers
) 
  34         request 
= sanitized_Request(url
, None, headers
) 
  36         is_test 
= self
.params
.get('test', False) 
  39             request
.add_header('Range', 'bytes=0-%s' % str(self
._TEST
_FILE
_SIZE 
- 1)) 
  41         # Establish possible resume length 
  42         if os
.path
.isfile(encodeFilename(tmpfilename
)): 
  43             resume_len 
= os
.path
.getsize(encodeFilename(tmpfilename
)) 
  49             if self
.params
.get('continuedl', True): 
  50                 self
.report_resuming_byte(resume_len
) 
  51                 request
.add_header('Range', 'bytes=%d-' % resume_len
) 
  57         retries 
= self
.params
.get('retries', 0) 
  58         while count 
<= retries
: 
  59             # Establish connection 
  61                 data 
= self
.ydl
.urlopen(request
) 
  62                 # When trying to resume, Content-Range HTTP header of response has to be checked 
  63                 # to match the value of requested Range HTTP header. This is due to a webservers 
  64                 # that don't support resuming and serve a whole file with no Content-Range 
  65                 # set in response despite of requested Range (see 
  66                 # https://github.com/rg3/youtube-dl/issues/6057#issuecomment-126129799) 
  68                     content_range 
= data
.headers
.get('Content-Range') 
  70                         content_range_m 
= re
.search(r
'bytes (\d+)-', content_range
) 
  71                         # Content-Range is present and matches requested Range, resume is possible 
  72                         if content_range_m 
and resume_len 
== int(content_range_m
.group(1)): 
  74                     # Content-Range is either not present or invalid. Assuming remote webserver is 
  75                     # trying to send the whole file, resume is not possible, so wiping the local file 
  76                     # and performing entire redownload 
  77                     self
.report_unable_to_resume() 
  81             except (compat_urllib_error
.HTTPError
, ) as err
: 
  82                 if (err
.code 
< 500 or err
.code 
>= 600) and err
.code 
!= 416: 
  83                     # Unexpected HTTP error 
  86                     # Unable to resume (requested range not satisfiable) 
  88                         # Open the connection again without the range header 
  89                         data 
= self
.ydl
.urlopen(basic_request
) 
  90                         content_length 
= data
.info()['Content-Length'] 
  91                     except (compat_urllib_error
.HTTPError
, ) as err
: 
  92                         if err
.code 
< 500 or err
.code 
>= 600: 
  95                         # Examine the reported length 
  96                         if (content_length 
is not None and 
  97                                 (resume_len 
- 100 < int(content_length
) < resume_len 
+ 100)): 
  98                             # The file had already been fully downloaded. 
  99                             # Explanation to the above condition: in issue #175 it was revealed that 
 100                             # YouTube sometimes adds or removes a few bytes from the end of the file, 
 101                             # changing the file size slightly and causing problems for some users. So 
 102                             # I decided to implement a suggested change and consider the file 
 103                             # completely downloaded if the file size differs less than 100 bytes from 
 104                             # the one in the hard drive. 
 105                             self
.report_file_already_downloaded(filename
) 
 106                             self
.try_rename(tmpfilename
, filename
) 
 107                             self
._hook
_progress
({ 
 108                                 'filename': filename
, 
 109                                 'status': 'finished', 
 110                                 'downloaded_bytes': resume_len
, 
 111                                 'total_bytes': resume_len
, 
 115                             # The length does not match, we start the download over 
 116                             self
.report_unable_to_resume() 
 120             except socket
.error 
as e
: 
 121                 if e
.errno 
!= errno
.ECONNRESET
: 
 122                     # Connection reset is no problem, just retry 
 128                 self
.report_retry(count
, retries
) 
 131             self
.report_error('giving up after %s retries' % retries
) 
 134         data_len 
= data
.info().get('Content-length', None) 
 136         # Range HTTP header may be ignored/unsupported by a webserver 
 137         # (e.g. extractor/scivee.py, extractor/bambuser.py). 
 138         # However, for a test we still would like to download just a piece of a file. 
 139         # To achieve this we limit data_len to _TEST_FILE_SIZE and manually control 
 140         # block size when downloading a file. 
 141         if is_test 
and (data_len 
is None or int(data_len
) > self
._TEST
_FILE
_SIZE
): 
 142             data_len 
= self
._TEST
_FILE
_SIZE
 
 144         if data_len 
is not None: 
 145             data_len 
= int(data_len
) + resume_len
 
 146             min_data_len 
= self
.params
.get('min_filesize') 
 147             max_data_len 
= self
.params
.get('max_filesize') 
 148             if min_data_len 
is not None and data_len 
< min_data_len
: 
 149                 self
.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len
, min_data_len
)) 
 151             if max_data_len 
is not None and data_len 
> max_data_len
: 
 152                 self
.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len
, max_data_len
)) 
 155         byte_counter 
= 0 + resume_len
 
 156         block_size 
= self
.params
.get('buffersize', 1024) 
 159         # measure time over whole while-loop, so slow_down() and best_block_size() work together properly 
 160         now 
= None  # needed for slow_down() in the first loop run 
 161         before 
= start  
# start measuring 
 165             data_block 
= data
.read(block_size 
if not is_test 
else min(block_size
, data_len 
- byte_counter
)) 
 166             byte_counter 
+= len(data_block
) 
 168             # exit loop when download is finished 
 169             if len(data_block
) == 0: 
 172             # Open destination file just in time 
 175                     (stream
, tmpfilename
) = sanitize_open(tmpfilename
, open_mode
) 
 176                     assert stream 
is not None 
 177                     filename 
= self
.undo_temp_name(tmpfilename
) 
 178                     self
.report_destination(filename
) 
 179                 except (OSError, IOError) as err
: 
 180                     self
.report_error('unable to open for writing: %s' % str(err
)) 
 183                 if self
.params
.get('xattr_set_filesize', False) and data_len 
is not None: 
 185                         write_xattr(tmpfilename
, 'user.ytdl.filesize', str(data_len
).encode('utf-8')) 
 186                     except (XAttrUnavailableError
, XAttrMetadataError
) as err
: 
 187                         self
.report_error('unable to set filesize xattr: %s' % str(err
)) 
 190                 stream
.write(data_block
) 
 191             except (IOError, OSError) as err
: 
 193                 self
.report_error('unable to write data: %s' % str(err
)) 
 197             self
.slow_down(start
, now
, byte_counter 
- resume_len
) 
 199             # end measuring of one loop run 
 204             if not self
.params
.get('noresizebuffer', False): 
 205                 block_size 
= self
.best_block_size(after 
- before
, len(data_block
)) 
 210             speed 
= self
.calc_speed(start
, now
, byte_counter 
- resume_len
) 
 214                 eta 
= self
.calc_eta(start
, time
.time(), data_len 
- resume_len
, byte_counter 
- resume_len
) 
 216             self
._hook
_progress
({ 
 217                 'status': 'downloading', 
 218                 'downloaded_bytes': byte_counter
, 
 219                 'total_bytes': data_len
, 
 220                 'tmpfilename': tmpfilename
, 
 221                 'filename': filename
, 
 224                 'elapsed': now 
- start
, 
 227             if is_test 
and byte_counter 
== data_len
: 
 232             self
.report_error('Did not get any data blocks') 
 234         if tmpfilename 
!= '-': 
 237         if data_len 
is not None and byte_counter 
!= data_len
: 
 238             raise ContentTooShortError(byte_counter
, int(data_len
)) 
 239         self
.try_rename(tmpfilename
, filename
) 
 241         # Update file modification time 
 242         if self
.params
.get('updatetime', True): 
 243             info_dict
['filetime'] = self
.try_utime(filename
, data
.info().get('last-modified', None)) 
 245         self
._hook
_progress
({ 
 246             'downloaded_bytes': byte_counter
, 
 247             'total_bytes': byte_counter
, 
 248             'filename': filename
, 
 249             'status': 'finished', 
 250             'elapsed': time
.time() - start
,