]>
Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/downloader/http.py
8491cee8aa2769e8465176411a92ded85b07ad13
   1 from __future__ 
import unicode_literals
 
   6 from .common 
import FileDownloader
 
  18 class HttpFD(FileDownloader
): 
  19     def real_download(self
, filename
, info_dict
): 
  20         url 
= info_dict
['url'] 
  21         tmpfilename 
= self
.temp_name(filename
) 
  24         # Do not include the Accept-Encoding header 
  25         headers 
= {'Youtubedl-no-compression': 'True'} 
  26         if 'user_agent' in info_dict
: 
  27             headers
['Youtubedl-user-agent'] = info_dict
['user_agent'] 
  28         if 'http_referer' in info_dict
: 
  29             headers
['Referer'] = info_dict
['http_referer'] 
  30         add_headers 
= info_dict
.get('http_headers') 
  32             headers
.update(add_headers
) 
  33         data 
= info_dict
.get('http_post_data') 
  34         http_method 
= info_dict
.get('http_method') 
  35         basic_request 
= compat_urllib_request
.Request(url
, data
, headers
) 
  36         request 
= compat_urllib_request
.Request(url
, data
, headers
) 
  37         if http_method 
is not None: 
  38             basic_request
.get_method 
= lambda: http_method
 
  39             request
.get_method 
= lambda: http_method
 
  41         is_test 
= self
.params
.get('test', False) 
  44             request
.add_header('Range', 'bytes=0-%s' % str(self
._TEST
_FILE
_SIZE 
- 1)) 
  46         # Establish possible resume length 
  47         if os
.path
.isfile(encodeFilename(tmpfilename
)): 
  48             resume_len 
= os
.path
.getsize(encodeFilename(tmpfilename
)) 
  54             if self
.params
.get('continuedl', False): 
  55                 self
.report_resuming_byte(resume_len
) 
  56                 request
.add_header('Range', 'bytes=%d-' % resume_len
) 
  62         retries 
= self
.params
.get('retries', 0) 
  63         while count 
<= retries
: 
  64             # Establish connection 
  66                 data 
= self
.ydl
.urlopen(request
) 
  68             except (compat_urllib_error
.HTTPError
, ) as err
: 
  69                 if (err
.code 
< 500 or err
.code 
>= 600) and err
.code 
!= 416: 
  70                     # Unexpected HTTP error 
  73                     # Unable to resume (requested range not satisfiable) 
  75                         # Open the connection again without the range header 
  76                         data 
= self
.ydl
.urlopen(basic_request
) 
  77                         content_length 
= data
.info()['Content-Length'] 
  78                     except (compat_urllib_error
.HTTPError
, ) as err
: 
  79                         if err
.code 
< 500 or err
.code 
>= 600: 
  82                         # Examine the reported length 
  83                         if (content_length 
is not None and 
  84                                 (resume_len 
- 100 < int(content_length
) < resume_len 
+ 100)): 
  85                             # The file had already been fully downloaded. 
  86                             # Explanation to the above condition: in issue #175 it was revealed that 
  87                             # YouTube sometimes adds or removes a few bytes from the end of the file, 
  88                             # changing the file size slightly and causing problems for some users. So 
  89                             # I decided to implement a suggested change and consider the file 
  90                             # completely downloaded if the file size differs less than 100 bytes from 
  91                             # the one in the hard drive. 
  92                             self
.report_file_already_downloaded(filename
) 
  93                             self
.try_rename(tmpfilename
, filename
) 
 100                             # The length does not match, we start the download over 
 101                             self
.report_unable_to_resume() 
 108                 self
.report_retry(count
, retries
) 
 111             self
.report_error('giving up after %s retries' % retries
) 
 114         data_len 
= data
.info().get('Content-length', None) 
 116         # Range HTTP header may be ignored/unsupported by a webserver 
 117         # (e.g. extractor/scivee.py, extractor/bambuser.py). 
 118         # However, for a test we still would like to download just a piece of a file. 
 119         # To achieve this we limit data_len to _TEST_FILE_SIZE and manually control 
 120         # block size when downloading a file. 
 121         if is_test 
and (data_len 
is None or int(data_len
) > self
._TEST
_FILE
_SIZE
): 
 122             data_len 
= self
._TEST
_FILE
_SIZE
 
 124         if data_len 
is not None: 
 125             data_len 
= int(data_len
) + resume_len
 
 126             min_data_len 
= self
.params
.get("min_filesize", None) 
 127             max_data_len 
= self
.params
.get("max_filesize", None) 
 128             if min_data_len 
is not None and data_len 
< min_data_len
: 
 129                 self
.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len
, min_data_len
)) 
 131             if max_data_len 
is not None and data_len 
> max_data_len
: 
 132                 self
.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len
, max_data_len
)) 
 135         data_len_str 
= format_bytes(data_len
) 
 136         byte_counter 
= 0 + resume_len
 
 137         block_size 
= self
.params
.get('buffersize', 1024) 
 142             data_block 
= data
.read(block_size 
if not is_test 
else min(block_size
, data_len 
- byte_counter
)) 
 144             if len(data_block
) == 0: 
 146             byte_counter 
+= len(data_block
) 
 148             # Open file just in time 
 151                     (stream
, tmpfilename
) = sanitize_open(tmpfilename
, open_mode
) 
 152                     assert stream 
is not None 
 153                     filename 
= self
.undo_temp_name(tmpfilename
) 
 154                     self
.report_destination(filename
) 
 155                 except (OSError, IOError) as err
: 
 156                     self
.report_error('unable to open for writing: %s' % str(err
)) 
 159                 stream
.write(data_block
) 
 160             except (IOError, OSError) as err
: 
 162                 self
.report_error('unable to write data: %s' % str(err
)) 
 164             if not self
.params
.get('noresizebuffer', False): 
 165                 block_size 
= self
.best_block_size(after 
- before
, len(data_block
)) 
 168             speed 
= self
.calc_speed(start
, time
.time(), byte_counter 
- resume_len
) 
 172                 percent 
= self
.calc_percent(byte_counter
, data_len
) 
 173                 eta 
= self
.calc_eta(start
, time
.time(), data_len 
- resume_len
, byte_counter 
- resume_len
) 
 174             self
.report_progress(percent
, data_len_str
, speed
, eta
) 
 176             self
._hook
_progress
({ 
 177                 'downloaded_bytes': byte_counter
, 
 178                 'total_bytes': data_len
, 
 179                 'tmpfilename': tmpfilename
, 
 180                 'filename': filename
, 
 181                 'status': 'downloading', 
 186             if is_test 
and byte_counter 
== data_len
: 
 190             self
.slow_down(start
, byte_counter 
- resume_len
) 
 194             self
.report_error('Did not get any data blocks') 
 196         if tmpfilename 
!= '-': 
 198         self
.report_finish(data_len_str
, (time
.time() - start
)) 
 199         if data_len 
is not None and byte_counter 
!= data_len
: 
 200             raise ContentTooShortError(byte_counter
, int(data_len
)) 
 201         self
.try_rename(tmpfilename
, filename
) 
 203         # Update file modification time 
 204         if self
.params
.get('updatetime', True): 
 205             info_dict
['filetime'] = self
.try_utime(filename
, data
.info().get('last-modified', None)) 
 207         self
._hook
_progress
({ 
 208             'downloaded_bytes': byte_counter
, 
 209             'total_bytes': byte_counter
, 
 210             'filename': filename
, 
 211             'status': 'finished',