99import time
1010import copy
1111import json
12+ import threading
1213import xml .dom .minidom
1314import xml .etree .ElementTree
1415from requests import Request , Session , ConnectionError , Timeout
@@ -2908,7 +2909,7 @@ def list_buckets(self, **kwargs):
29082909 return data
29092910
29102911 # Advanced interface
2911- def _upload_part (self , bucket , key , local_path , offset , size , part_num , uploadid , md5_lst , resumable_flag , already_exist_parts , enable_md5 , traffic_limit ):
2912+ def _upload_part (self , bucket , key , local_path , offset , size , part_num , uploadid , md5_lst , resumable_flag , already_exist_parts , enable_md5 , traffic_limit , progress_callback = None ):
29122913 """从本地文件中读取分块, 上传单个分块,将结果记录在md5——list中
29132914
29142915 :param bucket(string): 存储桶名称.
@@ -2933,6 +2934,8 @@ def _upload_part(self, bucket, key, local_path, offset, size, part_num, uploadid
29332934 data = fp .read (size )
29342935 rt = self .upload_part (bucket , key , data , part_num , uploadid , enable_md5 , TrafficLimit = traffic_limit )
29352936 md5_lst .append ({'PartNumber' : part_num , 'ETag' : rt ['ETag' ]})
2937+ if progress_callback :
2938+ progress_callback .report (size )
29362939 return None
29372940
29382941 def _get_resumable_uploadid (self , bucket , key ):
@@ -3045,7 +3048,7 @@ def download_file(self, Bucket, Key, DestFilePath, PartSize=20, MAXThread=5, Ena
30453048 downloader = ResumableDownLoader (self , Bucket , Key , DestFilePath , object_info , PartSize , MAXThread , EnableCRC , ** Kwargs )
30463049 downloader .start ()
30473050
3048- def upload_file (self , Bucket , Key , LocalFilePath , PartSize = 1 , MAXThread = 5 , EnableMD5 = False , ** kwargs ):
3051+ def upload_file (self , Bucket , Key , LocalFilePath , PartSize = 1 , MAXThread = 5 , EnableMD5 = False , progress_callback = None , ** kwargs ):
30493052 """小于等于20MB的文件简单上传,大于20MB的文件使用分块上传
30503053
30513054 :param Bucket(string): 存储桶名称.
@@ -3115,12 +3118,14 @@ def upload_file(self, Bucket, Key, LocalFilePath, PartSize=1, MAXThread=5, Enabl
31153118 offset = 0 # 记录文件偏移量
31163119 lst = list () # 记录分块信息
31173120 pool = SimpleThreadPool (MAXThread )
3118-
3121+ callback = None
3122+ if progress_callback :
3123+ callback = ProgressCallback (file_size , progress_callback )
31193124 for i in range (1 , parts_num + 1 ):
31203125 if i == parts_num : # 最后一块
3121- pool .add_task (self ._upload_part , Bucket , Key , LocalFilePath , offset , file_size - offset , i , uploadid , lst , resumable_flag , already_exist_parts , EnableMD5 , traffic_limit )
3126+ pool .add_task (self ._upload_part , Bucket , Key , LocalFilePath , offset , file_size - offset , i , uploadid , lst , resumable_flag , already_exist_parts , EnableMD5 , traffic_limit , callback )
31223127 else :
3123- pool .add_task (self ._upload_part , Bucket , Key , LocalFilePath , offset , part_size , i , uploadid , lst , resumable_flag , already_exist_parts , EnableMD5 , traffic_limit )
3128+ pool .add_task (self ._upload_part , Bucket , Key , LocalFilePath , offset , part_size , i , uploadid , lst , resumable_flag , already_exist_parts , EnableMD5 , traffic_limit , callback )
31243129 offset += part_size
31253130
31263131 pool .wait_completion ()
0 commit comments