]>
Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/downloader/http.py
   4 from .common 
import FileDownloader
 
  16 class HttpFD(FileDownloader
): 
  17     def real_download(self
, filename
, info_dict
): 
  18         url 
= info_dict
['url'] 
  19         tmpfilename 
= self
.temp_name(filename
) 
  22         # Do not include the Accept-Encoding header 
  23         headers 
= {'Youtubedl-no-compression': 'True'} 
  24         if 'user_agent' in info_dict
: 
  25             headers
['Youtubedl-user-agent'] = info_dict
['user_agent'] 
  26         if 'http_referer' in info_dict
: 
  27             headers
['Referer'] = info_dict
['http_referer'] 
  28         add_headers 
= info_dict
.get('http_headers') 
  30             headers
.update(add_headers
) 
  31         data 
= info_dict
.get('http_post_data') 
  32         http_method 
= info_dict
.get('http_method') 
  33         basic_request 
= compat_urllib_request
.Request(url
, data
, headers
) 
  34         request 
= compat_urllib_request
.Request(url
, data
, headers
) 
  35         if http_method 
is not None: 
  36             basic_request
.get_method 
= lambda: http_method
 
  37             request
.get_method 
= lambda: http_method
 
  39         is_test 
= self
.params
.get('test', False) 
  42             request
.add_header('Range', 'bytes=0-%s' % str(self
._TEST
_FILE
_SIZE 
- 1)) 
  44         # Establish possible resume length 
  45         if os
.path
.isfile(encodeFilename(tmpfilename
)): 
  46             resume_len 
= os
.path
.getsize(encodeFilename(tmpfilename
)) 
  52             if self
.params
.get('continuedl', False): 
  53                 self
.report_resuming_byte(resume_len
) 
  54                 request
.add_header('Range', 'bytes=%d-' % resume_len
) 
  60         retries 
= self
.params
.get('retries', 0) 
  61         while count 
<= retries
: 
  62             # Establish connection 
  64                 data 
= self
.ydl
.urlopen(request
) 
  66             except (compat_urllib_error
.HTTPError
, ) as err
: 
  67                 if (err
.code 
< 500 or err
.code 
>= 600) and err
.code 
!= 416: 
  68                     # Unexpected HTTP error 
  71                     # Unable to resume (requested range not satisfiable) 
  73                         # Open the connection again without the range header 
  74                         data 
= self
.ydl
.urlopen(basic_request
) 
  75                         content_length 
= data
.info()['Content-Length'] 
  76                     except (compat_urllib_error
.HTTPError
, ) as err
: 
  77                         if err
.code 
< 500 or err
.code 
>= 600: 
  80                         # Examine the reported length 
  81                         if (content_length 
is not None and 
  82                                 (resume_len 
- 100 < int(content_length
) < resume_len 
+ 100)): 
  83                             # The file had already been fully downloaded. 
  84                             # Explanation to the above condition: in issue #175 it was revealed that 
  85                             # YouTube sometimes adds or removes a few bytes from the end of the file, 
  86                             # changing the file size slightly and causing problems for some users. So 
  87                             # I decided to implement a suggested change and consider the file 
  88                             # completely downloaded if the file size differs less than 100 bytes from 
  89                             # the one in the hard drive. 
  90                             self
.report_file_already_downloaded(filename
) 
  91                             self
.try_rename(tmpfilename
, filename
) 
  98                             # The length does not match, we start the download over 
  99                             self
.report_unable_to_resume() 
 106                 self
.report_retry(count
, retries
) 
 109             self
.report_error(u
'giving up after %s retries' % retries
) 
 112         data_len 
= data
.info().get('Content-length', None) 
 114         # Range HTTP header may be ignored/unsupported by a webserver 
 115         # (e.g. extractor/scivee.py, extractor/bambuser.py). 
 116         # However, for a test we still would like to download just a piece of a file. 
 117         # To achieve this we limit data_len to _TEST_FILE_SIZE and manually control 
 118         # block size when downloading a file. 
 119         if is_test 
and (data_len 
is None or int(data_len
) > self
._TEST
_FILE
_SIZE
): 
 120             data_len 
= self
._TEST
_FILE
_SIZE
 
 122         if data_len 
is not None: 
 123             data_len 
= int(data_len
) + resume_len
 
 124             min_data_len 
= self
.params
.get("min_filesize", None) 
 125             max_data_len 
= self
.params
.get("max_filesize", None) 
 126             if min_data_len 
is not None and data_len 
< min_data_len
: 
 127                 self
.to_screen(u
'\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len
, min_data_len
)) 
 129             if max_data_len 
is not None and data_len 
> max_data_len
: 
 130                 self
.to_screen(u
'\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len
, max_data_len
)) 
 133         data_len_str 
= format_bytes(data_len
) 
 134         byte_counter 
= 0 + resume_len
 
 135         block_size 
= self
.params
.get('buffersize', 1024) 
 140             data_block 
= data
.read(block_size 
if not is_test 
else min(block_size
, data_len 
- byte_counter
)) 
 142             if len(data_block
) == 0: 
 144             byte_counter 
+= len(data_block
) 
 146             # Open file just in time 
 149                     (stream
, tmpfilename
) = sanitize_open(tmpfilename
, open_mode
) 
 150                     assert stream 
is not None 
 151                     filename 
= self
.undo_temp_name(tmpfilename
) 
 152                     self
.report_destination(filename
) 
 153                 except (OSError, IOError) as err
: 
 154                     self
.report_error(u
'unable to open for writing: %s' % str(err
)) 
 157                 stream
.write(data_block
) 
 158             except (IOError, OSError) as err
: 
 159                 self
.to_stderr(u
"\n") 
 160                 self
.report_error(u
'unable to write data: %s' % str(err
)) 
 162             if not self
.params
.get('noresizebuffer', False): 
 163                 block_size 
= self
.best_block_size(after 
- before
, len(data_block
)) 
 166             speed 
= self
.calc_speed(start
, time
.time(), byte_counter 
- resume_len
) 
 170                 percent 
= self
.calc_percent(byte_counter
, data_len
) 
 171                 eta 
= self
.calc_eta(start
, time
.time(), data_len 
- resume_len
, byte_counter 
- resume_len
) 
 172             self
.report_progress(percent
, data_len_str
, speed
, eta
) 
 174             self
._hook
_progress
({ 
 175                 'downloaded_bytes': byte_counter
, 
 176                 'total_bytes': data_len
, 
 177                 'tmpfilename': tmpfilename
, 
 178                 'filename': filename
, 
 179                 'status': 'downloading', 
 184             if is_test 
and byte_counter 
== data_len
: 
 188             self
.slow_down(start
, byte_counter 
- resume_len
) 
 191             self
.to_stderr(u
"\n") 
 192             self
.report_error(u
'Did not get any data blocks') 
 194         if tmpfilename 
!= u
'-': 
 196         self
.report_finish(data_len_str
, (time
.time() - start
)) 
 197         if data_len 
is not None and byte_counter 
!= data_len
: 
 198             raise ContentTooShortError(byte_counter
, int(data_len
)) 
 199         self
.try_rename(tmpfilename
, filename
) 
 201         # Update file modification time 
 202         if self
.params
.get('updatetime', True): 
 203             info_dict
['filetime'] = self
.try_utime(filename
, data
.info().get('last-modified', None)) 
 205         self
._hook
_progress
({ 
 206             'downloaded_bytes': byte_counter
, 
 207             'total_bytes': byte_counter
, 
 208             'filename': filename
, 
 209             'status': 'finished',