|
| 1 | +# -*- coding: utf-8 -*- |
| 2 | + |
| 3 | +import json |
| 4 | +import os |
| 5 | +import sys |
| 6 | +import threading |
| 7 | +import logging |
| 8 | +import uuid |
| 9 | +import hashlib |
| 10 | +import crcmod |
| 11 | +from .cos_comm import * |
| 12 | +from .streambody import StreamBody |
| 13 | +from .cos_threadpool import SimpleThreadPool |
| 14 | +logger = logging.getLogger(__name__) |
| 15 | + |
| 16 | +class ResumableDownLoader(object): |
| 17 | + def __init__(self, cos_client, bucket, key, dest_filename, object_info, part_size=20, max_thread=5, enable_crc=False, **kwargs): |
| 18 | + self.__cos_client = cos_client |
| 19 | + self.__bucket = bucket |
| 20 | + self.__key = key |
| 21 | + self.__dest_file_path = os.path.abspath(dest_filename) |
| 22 | + self.__object_info = object_info |
| 23 | + self.__max_thread = max_thread |
| 24 | + self.__enable_crc = enable_crc |
| 25 | + self.__headers = kwargs |
| 26 | + |
| 27 | + self.__max_part_count = 100 # 取决于服务端是否对并发有限制 |
| 28 | + self.__min_part_size = 1024 * 1024 # 1M |
| 29 | + self.__part_size = self.__determine_part_size_internal(int(object_info['Content-Length']), part_size) |
| 30 | + self.__finished_parts = [] |
| 31 | + self.__lock = threading.Lock() |
| 32 | + self.__record = None #记录当前的上下文 |
| 33 | + self.__dump_record_dir = os.path.join(os.path.expanduser('~'), '.cos_download_tmp_file') |
| 34 | + |
| 35 | + record_filename = self.__get_record_filename(bucket, key, self.__dest_file_path) |
| 36 | + self.__record_filepath = os.path.join(self.__dump_record_dir, record_filename) |
| 37 | + self.__tmp_file = None |
| 38 | + |
| 39 | + if not os.path.exists(self.__dump_record_dir): |
| 40 | + os.makedirs(self.__dump_record_dir) |
| 41 | + |
| 42 | + logger.debug('resumale downloader init finish, bucket: {0}, key: {1}'.format(bucket, key)) |
| 43 | + |
| 44 | + def start(self): |
| 45 | + logger.debug('start resumable downloade, bucket: {0}, key: {1}'.format(self.__bucket, self.__key)) |
| 46 | + self.__load_record() # 从record文件中恢复读取上下文 |
| 47 | + |
| 48 | + assert self.__tmp_file |
| 49 | + open(self.__tmp_file, 'a').close() |
| 50 | + |
| 51 | + parts_need_to_download = self.__get_parts_need_to_download() |
| 52 | + logger.debug('parts_need_to_download: {0}'.format(parts_need_to_download)) |
| 53 | + pool = SimpleThreadPool(self.__max_thread) |
| 54 | + for part in parts_need_to_download: |
| 55 | + part_range = "bytes=" + str(part.start) + "-" + str(part.start + part.length - 1) |
| 56 | + headers = dict.copy(self.__headers) |
| 57 | + headers["Range"] = part_range |
| 58 | + pool.add_task(self.__download_part, part, headers) |
| 59 | + |
| 60 | + pool.wait_completion() |
| 61 | + result = pool.get_result() |
| 62 | + if not result['success_all']: |
| 63 | + raise CosClientError('some download_part fail after max_retry, please downloade_file again') |
| 64 | + |
| 65 | + if os.path.exists(self.__dest_file_path): |
| 66 | + os.remove(self.__dest_file_path) |
| 67 | + os.rename(self.__tmp_file, self.__dest_file_path) |
| 68 | + |
| 69 | + if self.__enable_crc: |
| 70 | + self.__check_crc() |
| 71 | + |
| 72 | + self.__del_record() |
| 73 | + logger.debug('download success, bucket: {0}, key: {1}'.format(self.__bucket, self.__key)) |
| 74 | + |
| 75 | + def __get_record_filename(self, bucket, key, dest_file_path): |
| 76 | + dest_file_path_md5 = hashlib.md5(dest_file_path).hexdigest() |
| 77 | + key_md5 = hashlib.md5(key).hexdigest() |
| 78 | + return '{0}_{1}.{2}'.format(bucket, key_md5, dest_file_path_md5) |
| 79 | + |
| 80 | + def __determine_part_size_internal(self, file_size, part_size): |
| 81 | + real_part_size = part_size * 1024 * 1024 # MB |
| 82 | + if real_part_size < self.__min_part_size: |
| 83 | + real_part_size = self.__min_part_size |
| 84 | + |
| 85 | + while real_part_size * self.__max_part_count < file_size: |
| 86 | + real_part_size = real_part_size * 2 |
| 87 | + logger.debug('finish to determine part size, file_size: {0}, part_size: {1}'.format(file_size, real_part_size)) |
| 88 | + return real_part_size |
| 89 | + |
| 90 | + def __splite_to_parts(self): |
| 91 | + parts = [] |
| 92 | + file_size = int(self.__object_info['Content-Length']) |
| 93 | + num_parts = (file_size + self.__part_size - 1) / self.__part_size |
| 94 | + for i in range(num_parts): |
| 95 | + start = i * self.__part_size |
| 96 | + if i == num_parts - 1: |
| 97 | + length = file_size - start |
| 98 | + else: |
| 99 | + length = self.__part_size |
| 100 | + |
| 101 | + parts.append(PartInfo(i + 1, start, length)) |
| 102 | + return parts |
| 103 | + |
| 104 | + def __get_parts_need_to_download(self): |
| 105 | + all_set = set(self.__splite_to_parts()) |
| 106 | + logger.debug('all_set: {0}'.format(len(all_set))) |
| 107 | + finished_set = set(self.__finished_parts) |
| 108 | + logger.debug('finished_set: {0}'.format(len(finished_set))) |
| 109 | + return list(all_set - finished_set) |
| 110 | + |
| 111 | + def __download_part(self, part, headers): |
| 112 | + with open(self.__tmp_file, 'rb+') as f: |
| 113 | + f.seek(part.start, 0) |
| 114 | + range = None |
| 115 | + traffic_limit = None |
| 116 | + if 'Range' in headers: |
| 117 | + range = headers['Range'] |
| 118 | + |
| 119 | + if 'TrafficLimit' in headers: |
| 120 | + traffic_limit = headers['TrafficLimit'] |
| 121 | + logger.debug("part_id: {0}, part_range: {1}, traffic_limit:{2}".format(part.part_id, range, traffic_limit)) |
| 122 | + result = self.__cos_client.get_object(Bucket=self.__bucket, Key=self.__key, **headers) |
| 123 | + result["Body"].pget_stream_to_file(f, part.start, part.length) |
| 124 | + |
| 125 | + self.__finish_part(part) |
| 126 | + |
| 127 | + def __finish_part(self, part): |
| 128 | + logger.debug('download part finished,bucket: {0}, key: {1}, part_id: {2}'. |
| 129 | + format(self.__bucket, self.__key, part.part_id)) |
| 130 | + with self.__lock: |
| 131 | + self.__finished_parts.append(part) |
| 132 | + self.__record['parts'].append({'part_id': part.part_id, |
| 133 | + 'start': part.start, |
| 134 | + 'length': part.length}) |
| 135 | + self.__dump_record(self.__record) |
| 136 | + |
| 137 | + def __dump_record(self, record): |
| 138 | + with open(self.__record_filepath, 'w') as f: |
| 139 | + json.dump(record, f) |
| 140 | + logger.debug('dump record to {0}, bucket: {1}, key: {2}'. |
| 141 | + format(self.__record_filepath, self.__bucket, self.__key)) |
| 142 | + |
| 143 | + def __load_record(self): |
| 144 | + record = None |
| 145 | + |
| 146 | + if os.path.exists(self.__record_filepath): |
| 147 | + with open(self.__record_filepath, 'r') as f: |
| 148 | + record = json.load(f) |
| 149 | + |
| 150 | + ret = self.__check_record(record) |
| 151 | + # record记录是否跟head object的一致,不一致则删除 |
| 152 | + if ret == False: |
| 153 | + self.__del_record() |
| 154 | + record = None |
| 155 | + else: |
| 156 | + self.__part_size = record['part_size'] |
| 157 | + self.__tmp_file = record['tmp_filename'] |
| 158 | + if not os.path.exists(self.__tmp_file): |
| 159 | + record = None |
| 160 | + self.__tmp_file = None |
| 161 | + self.__del_record() |
| 162 | + else: |
| 163 | + self.__finished_parts = list(PartInfo(p['part_id'], p['start'], p['length']) for p in record['parts']) |
| 164 | + logger.debug('load record: finished parts nums: {0}'.format(len(self.__finished_parts))) |
| 165 | + self.__record = record |
| 166 | + |
| 167 | + if not record: |
| 168 | + self.__tmp_file = "{file_name}_{uuid}".format(file_name=self.__dest_file_path, uuid=uuid.uuid4().hex) |
| 169 | + record = {'bucket': self.__bucket, 'key': self.__key, 'tmp_filename':self.__tmp_file, |
| 170 | + 'mtime':self.__object_info['Last-Modified'], 'etag':self.__object_info['ETag'], |
| 171 | + 'file_size':self.__object_info['Content-Length'], 'part_size': self.__part_size, 'parts':[]} |
| 172 | + self.__record = record |
| 173 | + self.__dump_record(record) |
| 174 | + |
| 175 | + def __check_record(self, record): |
| 176 | + return record['etag'] == self.__object_info['ETag'] and\ |
| 177 | + record['mtime'] == self.__object_info['Last-Modified'] and\ |
| 178 | + record['file_size'] == self.__object_info['Content-Length'] |
| 179 | + |
| 180 | + def __del_record(self): |
| 181 | + os.remove(self.__record_filepath) |
| 182 | + logger.debug('ResumableDownLoader delete record_file, path: {0}'.format(self.__record_filepath)) |
| 183 | + |
| 184 | + def __check_crc(self): |
| 185 | + logger.debug('start to check crc') |
| 186 | + c64 = crcmod.mkCrcFun(0x142F0E1EBA9EA3693L, initCrc=0L, xorOut=0xffffffffffffffffL, rev=True) |
| 187 | + with open(self.__dest_file_path,'rb') as f: |
| 188 | + local_crc64 = str(c64(f.read())) |
| 189 | + object_crc64 = self.__object_info['x-cos-hash-crc64ecma'] |
| 190 | + if local_crc64 is not None and object_crc64 is not None and local_crc64 != object_crc64: |
| 191 | + raise CosClientError('crc of client: {0} is mismatch with cos: {1}'.format(local_crc64, object_crc64)) |
| 192 | + |
| 193 | +class PartInfo(object): |
| 194 | + def __init__(self, part_id, start, length): |
| 195 | + self.part_id = part_id |
| 196 | + self.start = start |
| 197 | + self.length = length |
| 198 | + |
| 199 | + def __eq__(self, other): |
| 200 | + return self.__key() == other.__key() |
| 201 | + |
| 202 | + def __hash__(self): |
| 203 | + return hash(self.__key()) |
| 204 | + |
| 205 | + def __key(self): |
| 206 | + return self.part_id, self.start, self.length |
0 commit comments