rclone-api 1.4.6__py2.py3-none-any.whl → 1.4.8__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,395 @@
1
+ """
2
+ https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3/client/upload_part_copy.html
3
+ * client.upload_part_copy
4
+
5
+ This module provides functionality for S3 multipart uploads, including copying parts
6
+ from existing S3 objects using upload_part_copy.
7
+ """
8
+
9
+ from concurrent.futures import Future, ThreadPoolExecutor
10
+ from dataclasses import dataclass
11
+ from pathlib import Path
12
+ from typing import Optional
13
+
14
+ from botocore.client import BaseClient
15
+
16
+ from rclone_api.s3.multipart.finished_piece import FinishedPiece
17
+ from rclone_api.util import locked_print
18
+
19
+
20
+ @dataclass
21
+ class MultipartUploadInfo:
22
+ """Simplified upload information for multipart uploads."""
23
+
24
+ s3_client: BaseClient
25
+ bucket_name: str
26
+ object_name: str
27
+ upload_id: str
28
+ chunk_size: int
29
+ retries: int
30
+ file_size: Optional[int] = None
31
+ src_file_path: Optional[Path] = None
32
+
33
+
34
+ # response = client.upload_part_copy(
35
+ # Bucket='string',
36
+ # CopySource='string' or {'Bucket': 'string', 'Key': 'string', 'VersionId': 'string'},
37
+ # CopySourceIfMatch='string',
38
+ # CopySourceIfModifiedSince=datetime(2015, 1, 1),
39
+ # CopySourceIfNoneMatch='string',
40
+ # CopySourceIfUnmodifiedSince=datetime(2015, 1, 1),
41
+ # CopySourceRange='string',
42
+ # Key='string',
43
+ # PartNumber=123,
44
+ # UploadId='string',
45
+ # SSECustomerAlgorithm='string',
46
+ # SSECustomerKey='string',
47
+ # CopySourceSSECustomerAlgorithm='string',
48
+ # CopySourceSSECustomerKey='string',
49
+ # RequestPayer='requester',
50
+ # ExpectedBucketOwner='string',
51
+ # ExpectedSourceBucketOwner='string'
52
+ # )
53
+
54
+ # import _thread
55
+ # import os
56
+ # import traceback
57
+ # import warnings
58
+ # from concurrent.futures import Future, ThreadPoolExecutor
59
+ # from pathlib import Path
60
+ # from queue import Queue
61
+ # from threading import Event, Thread
62
+ # from typing import Any, Callable
63
+
64
+ # from botocore.client import BaseClient
65
+
66
+ # from rclone_api.mount_read_chunker import FilePart
67
+ # from rclone_api.s3.chunk_task import S3FileInfo, file_chunker
68
+ # from rclone_api.s3.chunk_types import (
69
+ # FinishedPiece,
70
+ # UploadInfo,
71
+ # UploadState,
72
+ # )
73
+ # from rclone_api.s3.types import MultiUploadResult
74
+ # from rclone_api.types import EndOfStream
75
+ # from rclone_api.util import locked_print
76
+
77
+
78
+ # This is how you upload large parts through multi part upload, then the final call
79
+ # is to assemble the parts that have already been uploaded through a multi part uploader
80
+ # and then call complete_multipart_upload to finish the upload
81
+ # response = (
82
+ # client.upload_part_copy(
83
+ # Bucket='string',
84
+ # CopySource='string' or {'Bucket': 'string', 'Key': 'string', 'VersionId': 'string'},
85
+ # CopySourceIfMatch='string',
86
+ # CopySourceIfModifiedSince=datetime(2015, 1, 1),
87
+ # CopySourceIfNoneMatch='string',
88
+ # CopySourceIfUnmodifiedSince=datetime(2015, 1, 1),
89
+ # CopySourceRange='string',
90
+ # Key='string',
91
+ # PartNumber=123,
92
+ # UploadId='string',
93
+ # SSECustomerAlgorithm='string',
94
+ # SSECustomerKey='string',
95
+ # CopySourceSSECustomerAlgorithm='string',
96
+ # CopySourceSSECustomerKey='string',
97
+ # RequestPayer='requester',
98
+ # ExpectedBucketOwner='string',
99
+ # ExpectedSourceBucketOwner='string'
100
+ # )
101
+
102
+
103
+ # def upload_task(
104
+ # info: UploadInfo,
105
+ # chunk: FilePart,
106
+ # part_number: int,
107
+ # retries: int,
108
+ # ) -> FinishedPiece:
109
+ # file_or_err: Path | Exception = chunk.get_file()
110
+ # if isinstance(file_or_err, Exception):
111
+ # raise file_or_err
112
+ # file: Path = file_or_err
113
+ # size = os.path.getsize(file)
114
+ # retries = retries + 1 # Add one for the initial attempt
115
+ # for retry in range(retries):
116
+ # try:
117
+ # if retry > 0:
118
+ # locked_print(f"Retrying part {part_number} for {info.src_file_path}")
119
+ # locked_print(
120
+ # f"Uploading part {part_number} for {info.src_file_path} of size {size}"
121
+ # )
122
+
123
+ # with open(file, "rb") as f:
124
+ # part = info.s3_client.upload_part(
125
+ # Bucket=info.bucket_name,
126
+ # Key=info.object_name,
127
+ # PartNumber=part_number,
128
+ # UploadId=info.upload_id,
129
+ # Body=f,
130
+ # )
131
+ # out: FinishedPiece = FinishedPiece(
132
+ # etag=part["ETag"], part_number=part_number
133
+ # )
134
+ # chunk.dispose()
135
+ # return out
136
+ # except Exception as e:
137
+ # if retry == retries - 1:
138
+ # locked_print(f"Error uploading part {part_number}: {e}")
139
+ # chunk.dispose()
140
+ # raise e
141
+ # else:
142
+ # locked_print(f"Error uploading part {part_number}: {e}, retrying")
143
+ # continue
144
+ # raise Exception("Should not reach here")
145
+
146
+
147
+ # def prepare_upload_file_multipart(
148
+ # s3_client: BaseClient,
149
+ # bucket_name: str,
150
+ # file_path: Path,
151
+ # file_size: int | None,
152
+ # object_name: str,
153
+ # chunk_size: int,
154
+ # retries: int,
155
+ # ) -> UploadInfo:
156
+ # """Upload a file to the bucket using multipart upload with customizable chunk size."""
157
+
158
+ # # Initiate multipart upload
159
+ # locked_print(
160
+ # f"Creating multipart upload for {file_path} to {bucket_name}/{object_name}"
161
+ # )
162
+ # mpu = s3_client.create_multipart_upload(Bucket=bucket_name, Key=object_name)
163
+ # upload_id = mpu["UploadId"]
164
+
165
+ # file_size = file_size if file_size is not None else os.path.getsize(file_path)
166
+
167
+ # upload_info: UploadInfo = UploadInfo(
168
+ # s3_client=s3_client,
169
+ # bucket_name=bucket_name,
170
+ # object_name=object_name,
171
+ # src_file_path=file_path,
172
+ # upload_id=upload_id,
173
+ # retries=retries,
174
+ # chunk_size=chunk_size,
175
+ # file_size=file_size,
176
+ # )
177
+ # return upload_info
178
+
179
+
180
+ def upload_part_copy_task(
181
+ info: MultipartUploadInfo,
182
+ source_bucket: str,
183
+ source_key: str,
184
+ part_number: int,
185
+ retries: int = 3,
186
+ ) -> FinishedPiece:
187
+ """
188
+ Upload a part by copying from an existing S3 object.
189
+
190
+ Args:
191
+ info: Upload information
192
+ source_bucket: Source bucket name
193
+ source_key: Source object key
194
+ part_number: Part number (1-10000)
195
+ byte_range: Optional byte range in format 'bytes=start-end'
196
+ retries: Number of retry attempts
197
+
198
+ Returns:
199
+ FinishedPiece with ETag and part number
200
+ """
201
+ copy_source = {"Bucket": source_bucket, "Key": source_key}
202
+
203
+ retries = retries + 1 # Add one for the initial attempt
204
+ for retry in range(retries):
205
+ try:
206
+ if retry > 0:
207
+ locked_print(f"Retrying part copy {part_number} for {info.object_name}")
208
+
209
+ locked_print(
210
+ f"Copying part {part_number} for {info.object_name} from {source_bucket}/{source_key}"
211
+ )
212
+
213
+ # Prepare the upload_part_copy parameters
214
+ params = {
215
+ "Bucket": info.bucket_name,
216
+ "CopySource": copy_source,
217
+ "Key": info.object_name,
218
+ "PartNumber": part_number,
219
+ "UploadId": info.upload_id,
220
+ }
221
+
222
+ # Execute the copy operation
223
+ part = info.s3_client.upload_part_copy(**params)
224
+
225
+ # Extract ETag from the response
226
+ etag = part["CopyPartResult"]["ETag"]
227
+
228
+ return FinishedPiece(etag=etag, part_number=part_number)
229
+
230
+ except Exception as e:
231
+ if retry == retries - 1:
232
+ locked_print(f"Error copying part {part_number}: {e}")
233
+ raise e
234
+ else:
235
+ locked_print(f"Error copying part {part_number}: {e}, retrying")
236
+ continue
237
+
238
+ raise Exception("Should not reach here")
239
+
240
+
241
+ def complete_multipart_upload_from_parts(
242
+ info: MultipartUploadInfo, parts: list[FinishedPiece]
243
+ ) -> str:
244
+ """
245
+ Complete a multipart upload using the provided parts.
246
+
247
+ Args:
248
+ info: Upload information
249
+ parts: List of finished pieces with ETags
250
+
251
+ Returns:
252
+ The URL of the completed object
253
+ """
254
+ # Sort parts by part number to ensure correct order
255
+ parts.sort(key=lambda x: x.part_number)
256
+
257
+ # Prepare the parts list for the complete_multipart_upload call
258
+ multipart_parts = [
259
+ {"ETag": part.etag, "PartNumber": part.part_number} for part in parts
260
+ ]
261
+
262
+ # Complete the multipart upload
263
+ response = info.s3_client.complete_multipart_upload(
264
+ Bucket=info.bucket_name,
265
+ Key=info.object_name,
266
+ UploadId=info.upload_id,
267
+ MultipartUpload={"Parts": multipart_parts},
268
+ )
269
+
270
+ # Return the URL of the completed object
271
+ return response.get("Location", f"s3://{info.bucket_name}/{info.object_name}")
272
+
273
+
274
+ def finish_multipart_upload_from_keys(
275
+ s3_client: BaseClient,
276
+ source_bucket: str,
277
+ parts: list[tuple[int, str]],
278
+ final_size: int,
279
+ destination_bucket: str,
280
+ destination_key: str,
281
+ chunk_size: int, # 5MB default
282
+ max_workers: int = 100,
283
+ retries: int = 3,
284
+ ) -> str:
285
+ """
286
+ Finish a multipart upload by copying parts from existing S3 objects.
287
+
288
+ Args:
289
+ s3_client: Boto3 S3 client
290
+ source_bucket: Source bucket name
291
+ source_keys: List of source object keys to copy from
292
+ destination_bucket: Destination bucket name
293
+ destination_key: Destination object key
294
+ chunk_size: Size of each part in bytes
295
+ retries: Number of retry attempts
296
+ byte_ranges: Optional list of byte ranges corresponding to source_keys
297
+
298
+ Returns:
299
+ The URL of the completed object
300
+ """
301
+
302
+ # Initiate multipart upload
303
+ locked_print(
304
+ f"Creating multipart upload for {destination_bucket}/{destination_key} from {len(parts)} source objects"
305
+ )
306
+ mpu = s3_client.create_multipart_upload(
307
+ Bucket=destination_bucket, Key=destination_key
308
+ )
309
+ upload_id = mpu["UploadId"]
310
+
311
+ # Create upload info
312
+ upload_info = MultipartUploadInfo(
313
+ s3_client=s3_client,
314
+ bucket_name=destination_bucket,
315
+ object_name=destination_key,
316
+ upload_id=upload_id,
317
+ retries=retries,
318
+ chunk_size=chunk_size,
319
+ file_size=final_size,
320
+ )
321
+
322
+ futures: list[Future[FinishedPiece]] = []
323
+
324
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
325
+ for part_number, source_key in parts:
326
+
327
+ def task(
328
+ info=upload_info,
329
+ source_bucket=source_bucket,
330
+ source_key=source_key,
331
+ part_number=part_number,
332
+ retries=retries,
333
+ ):
334
+ return upload_part_copy_task(
335
+ info=info,
336
+ source_bucket=source_bucket,
337
+ source_key=source_key,
338
+ part_number=part_number,
339
+ retries=retries,
340
+ )
341
+
342
+ fut = executor.submit(task)
343
+ futures.append(fut)
344
+
345
+ # Upload parts by copying from source objects
346
+ finished_parts = []
347
+
348
+ for fut in futures:
349
+ finished_part = fut.result()
350
+ finished_parts.append(finished_part)
351
+
352
+ # Complete the multipart upload
353
+ return complete_multipart_upload_from_parts(upload_info, finished_parts)
354
+
355
+
356
+ class S3MultiPartUploader:
357
+ def __init__(self, s3_client: BaseClient, verbose: bool) -> None:
358
+ self.s3_client = s3_client
359
+ self.verbose = verbose
360
+
361
+ def finish_from_keys(
362
+ self,
363
+ source_bucket: str,
364
+ parts: list[tuple[int, str]],
365
+ destination_bucket: str,
366
+ destination_key: str,
367
+ chunk_size: int,
368
+ final_size: int,
369
+ retries: int = 3,
370
+ ) -> str:
371
+ """
372
+ Finish a multipart upload by copying parts from existing S3 objects.
373
+
374
+ Args:
375
+ source_bucket: Source bucket name
376
+ source_keys: List of source object keys to copy from
377
+ destination_bucket: Destination bucket name
378
+ destination_key: Destination object key
379
+ chunk_size: Size of each part in bytes
380
+ retries: Number of retry attempts
381
+ byte_ranges: Optional list of byte ranges corresponding to source_keys
382
+
383
+ Returns:
384
+ The URL of the completed object
385
+ """
386
+ return finish_multipart_upload_from_keys(
387
+ s3_client=self.s3_client,
388
+ source_bucket=source_bucket,
389
+ parts=parts,
390
+ destination_bucket=destination_bucket,
391
+ destination_key=destination_key,
392
+ chunk_size=chunk_size,
393
+ final_size=final_size,
394
+ retries=retries,
395
+ )
rclone_api/s3/types.py CHANGED
@@ -26,6 +26,7 @@ class S3Provider(Enum):
26
26
  class S3Credentials:
27
27
  """Credentials for accessing S3."""
28
28
 
29
+ bucket_name: str
29
30
  provider: S3Provider
30
31
  access_key_id: str
31
32
  secret_access_key: str
rclone_api/types.py CHANGED
@@ -309,6 +309,13 @@ class Range:
309
309
  val = f"bytes={self.start.as_int()}-{last.as_int()}"
310
310
  return {"Range": val}
311
311
 
312
+ def __repr__(self) -> str:
313
+ length = self.end - self.start
314
+ return f"Range(start={self.start}, length={length})"
315
+
316
+ def __str__(self) -> str:
317
+ return self.__repr__()
318
+
312
319
 
313
320
  _MAX_PART_NUMBER = 10000
314
321
 
@@ -387,3 +394,9 @@ class PartInfo:
387
394
  end = SizeSuffix(self.range.end._size).as_int()
388
395
  dst_name = f"part.{partnumber}_{offset}-{end}"
389
396
  return dst_name
397
+
398
+ def __repr__(self) -> str:
399
+ return f"PartInfo(part_number={self.part_number}, range={self.range})"
400
+
401
+ def __str__(self) -> str:
402
+ return self.__repr__()
rclone_api/util.py CHANGED
@@ -1,10 +1,11 @@
1
+ import atexit
1
2
  import os
2
3
  import random
3
4
  import shutil
5
+ import signal
4
6
  import subprocess
5
7
  import warnings
6
8
  from pathlib import Path
7
- from tempfile import TemporaryDirectory
8
9
  from threading import Lock
9
10
  from typing import Any
10
11
 
@@ -18,6 +19,53 @@ from rclone_api.types import S3PathInfo
18
19
 
19
20
  _PRINT_LOCK = Lock()
20
21
 
22
+ _TMP_CONFIG_DIR = Path(".") / ".rclone" / "tmp_config"
23
+ _RCLONE_CONFIGS_LIST: list[Path] = []
24
+ _DO_CLEANUP = os.getenv("RCLONE_API_CLEANUP", "1") == "1"
25
+
26
+
27
+ def _clean_configs(signum=None, frame=None) -> None:
28
+ if not _DO_CLEANUP:
29
+ return
30
+ for config in _RCLONE_CONFIGS_LIST:
31
+ try:
32
+ config.unlink()
33
+ except Exception as e:
34
+ print(f"Error deleting config file: {config}, {e}")
35
+ _RCLONE_CONFIGS_LIST.clear()
36
+ if signum is not None:
37
+ signal.signal(signum, signal.SIG_DFL)
38
+ os.kill(os.getpid(), signum)
39
+
40
+
41
+ def _init_cleanup() -> None:
42
+ atexit.register(_clean_configs)
43
+
44
+ for sig in (signal.SIGINT, signal.SIGTERM):
45
+ signal.signal(sig, _clean_configs)
46
+
47
+
48
+ _init_cleanup()
49
+
50
+
51
+ def make_temp_config_file() -> Path:
52
+ from rclone_api.util import random_str
53
+
54
+ tmpdir = _TMP_CONFIG_DIR / random_str(32)
55
+ tmpdir.mkdir(parents=True, exist_ok=True)
56
+ tmpfile = tmpdir / "rclone.conf"
57
+ _RCLONE_CONFIGS_LIST.append(tmpfile)
58
+ return tmpfile
59
+
60
+
61
+ def clear_temp_config_file(path: Path | None) -> None:
62
+ if (path is None) or (not path.exists()) or (not _DO_CLEANUP):
63
+ return
64
+ try:
65
+ path.unlink()
66
+ except Exception as e:
67
+ print(f"Error deleting config file: {path}, {e}")
68
+
21
69
 
22
70
  def locked_print(*args, **kwargs):
23
71
  with _PRINT_LOCK:
@@ -116,7 +164,7 @@ def rclone_execute(
116
164
  capture: bool | Path | None = None,
117
165
  verbose: bool | None = None,
118
166
  ) -> subprocess.CompletedProcess:
119
- tempdir: TemporaryDirectory | None = None
167
+ tmpfile: Path | None = None
120
168
  verbose = get_verbose(verbose)
121
169
 
122
170
  # Handle the Path case for capture
@@ -131,8 +179,7 @@ def rclone_execute(
131
179
 
132
180
  try:
133
181
  if isinstance(rclone_conf, Config):
134
- tempdir = TemporaryDirectory()
135
- tmpfile = Path(tempdir.name) / "rclone.conf"
182
+ tmpfile = make_temp_config_file()
136
183
  tmpfile.write_text(rclone_conf.text, encoding="utf-8")
137
184
  rclone_conf = tmpfile
138
185
  cmd = (
@@ -168,11 +215,7 @@ def rclone_execute(
168
215
  )
169
216
  return cp
170
217
  finally:
171
- if tempdir:
172
- try:
173
- tempdir.cleanup()
174
- except Exception as e:
175
- print(f"Error cleaning up tempdir: {e}")
218
+ clear_temp_config_file(tmpfile)
176
219
 
177
220
 
178
221
  def split_s3_path(path: str) -> S3PathInfo:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: rclone_api
3
- Version: 1.4.6
3
+ Version: 1.4.8
4
4
  Summary: rclone api in python
5
5
  Home-page: https://github.com/zackees/rclone-api
6
6
  License: BSD 3-Clause License
@@ -17,22 +17,23 @@ rclone_api/group_files.py,sha256=H92xPW9lQnbNw5KbtZCl00bD6iRh9yRbCuxku4j_3dg,803
17
17
  rclone_api/http_server.py,sha256=3fPBV6l50erTe32DyeJBNmsDrn5KuujsbmEAbx13T-c,8720
18
18
  rclone_api/log.py,sha256=VZHM7pNSXip2ZLBKMP7M1u-rp_F7zoafFDuR8CPUoKI,1271
19
19
  rclone_api/mount.py,sha256=TE_VIBMW7J1UkF_6HRCt8oi_jGdMov4S51bm2OgxFAM,10045
20
- rclone_api/process.py,sha256=BGXJTZVT__jeaDyjN8_kRycliOhkBErMPdHO1hKRvJE,5271
21
- rclone_api/rclone_impl.py,sha256=HTvWbD0cWhqrIfUPzqCDp3Av1uCEBOoVO0BQrBYmjQM,48205
20
+ rclone_api/process.py,sha256=I7B4arAIbFcTBsek27cZ0t-l5YRWVHJJPji7G6ZLGjQ,4989
21
+ rclone_api/rclone_impl.py,sha256=xTTriz6-zn_aSrkY8B7wzT-zRXax7Og7ns6xu6-7O6g,48769
22
22
  rclone_api/remote.py,sha256=mTgMTQTwxUmbLjTpr-AGTId2ycXKI9mLX5L7PPpDIoc,520
23
23
  rclone_api/rpath.py,sha256=Y1JjQWcie39EgQrq-UtbfDz5yDLCwwfu27W7AQXllSE,2860
24
24
  rclone_api/scan_missing_folders.py,sha256=-8NCwpCaHeHrX-IepCoAEsX1rl8S-GOCxcIhTr_w3gA,4747
25
- rclone_api/types.py,sha256=gpEYVHNkxB7X3p_B4nEc1ls1kF_ykjNuUyZXCsvU-Cs,11788
26
- rclone_api/util.py,sha256=j252WPB-UMiz6zQcLvsZe9XvMq-Z2FIeZrjb-y01eL4,5947
25
+ rclone_api/types.py,sha256=HkpEZgZWhr5Gb04iHq5NxMRXxieWoN-PKmOfJFrg5Qg,12155
26
+ rclone_api/util.py,sha256=9w_m6W62l_X42Jw5q8p_p30h-QoxAqufvnCLI4PTMOE,7056
27
27
  rclone_api/assets/example.txt,sha256=lTBovRjiz0_TgtAtbA1C5hNi2ffbqnNPqkKg6UiKCT8,54
28
28
  rclone_api/cmd/analyze.py,sha256=RHbvk1G5ZUc3qLqlm1AZEyQzd_W_ZjcbCNDvW4YpTKQ,1252
29
- rclone_api/cmd/copy_large_s3.py,sha256=w1aCaq9EQ84aRoMj9mHtdr0Svjflkx6KTKcoldzRSo8,3788
29
+ rclone_api/cmd/copy_large_s3.py,sha256=B17GliDQyAauNglJCpsey0d3eArT2DAcT9g684TMQk8,3514
30
+ rclone_api/cmd/copy_large_s3_finish.py,sha256=ybZWaRa_89lyq7WpXUjpbhc_TQctUC-fKCZmdffPpKo,6731
30
31
  rclone_api/cmd/list_files.py,sha256=x8FHODEilwKqwdiU1jdkeJbLwOqUkUQuDWPo2u_zpf0,741
31
32
  rclone_api/cmd/save_to_db.py,sha256=ylvnhg_yzexM-m6Zr7XDiswvoDVSl56ELuFAdb9gqBY,1957
32
33
  rclone_api/db/__init__.py,sha256=OSRUdnSWUlDTOHmjdjVmxYTUNpTbtaJ5Ll9sl-PfZg0,40
33
34
  rclone_api/db/db.py,sha256=YRnYrCaXHwytQt07uEZ_mMpvPHo9-0IWcOb95fVOOfs,10086
34
35
  rclone_api/db/models.py,sha256=v7qaXUehvsDvU51uk69JI23fSIs9JFGcOa-Tv1c_wVs,1600
35
- rclone_api/detail/copy_file_parts.py,sha256=wO0IZoZS8360dR3S3TyuPTPJOyqd-z0QpSz88-9tjio,12727
36
+ rclone_api/detail/copy_file_parts.py,sha256=CXiFuCJKSAdopDzigdO0j8uYrNl1N2Y9X9sbeGffqDU,15919
36
37
  rclone_api/detail/walk.py,sha256=-54NVE8EJcCstwDoaC_UtHm73R2HrZwVwQmsnv55xNU,3369
37
38
  rclone_api/experimental/flags.py,sha256=qCVD--fSTmzlk9hloRLr0q9elzAOFzPsvVpKM3aB1Mk,2739
38
39
  rclone_api/experimental/flags_base.py,sha256=ajU_czkTcAxXYU-SlmiCfHY7aCQGHvpCLqJ-Z8uZLk0,2102
@@ -40,16 +41,16 @@ rclone_api/s3/api.py,sha256=PafsIEyWDpLWAXsZAjFm9CY14vJpsDr9lOsn0kGRLZ0,4009
40
41
  rclone_api/s3/basic_ops.py,sha256=hK3366xhVEzEcjz9Gk_8lFx6MRceAk72cax6mUrr6ko,2104
41
42
  rclone_api/s3/chunk_task.py,sha256=waEYe-iYQ1_BR3NCS4BrzVrK9UANvH1EcbXx2I6Z_NM,6839
42
43
  rclone_api/s3/create.py,sha256=wgfkapv_j904CfKuWyiBIWJVxfAx_ftemFSUV14aT68,3149
43
- rclone_api/s3/s3_multipart_uploader.py,sha256=KY9k585Us0VW8uqgS5jdSVrYynxlnO8Wzb52pcvR06M,4570
44
- rclone_api/s3/types.py,sha256=VqnvH0qhvb3_4wngqk0vSSStH-TgVQxNNxo9slz9-p8,1595
44
+ rclone_api/s3/s3_multipart_uploader_by_copy.py,sha256=kXKAqpxi6WuyTjJ4BsBQ0wH3Px8aJEc8CH9Ugcni82A,12944
45
+ rclone_api/s3/types.py,sha256=cYI5MbXRNdT-ps5kGIRQaYrseHyx_ozT4AcwBABTKwk,1616
45
46
  rclone_api/s3/upload_file_multipart.py,sha256=V7syKjFyVIe4U9Ahl5XgqVTzt9akiew3MFjGmufLo2w,12503
46
47
  rclone_api/s3/multipart/file_info.py,sha256=8v_07_eADo0K-Nsv7F0Ac1wcv3lkIsrR3MaRCmkYLTQ,105
47
- rclone_api/s3/multipart/finished_piece.py,sha256=TcwA58-qgKBiskfHrePoCWaSSep6Za9psZEpzrLUUhE,1199
48
+ rclone_api/s3/multipart/finished_piece.py,sha256=9nMWnVZ8S99wi2VFQsm1h1ZHqmebkhMGgd2s56wNj9w,1331
48
49
  rclone_api/s3/multipart/upload_info.py,sha256=d6_OfzFR_vtDzCEegFfzCfWi2kUBUV4aXZzqAEVp1c4,1874
49
50
  rclone_api/s3/multipart/upload_state.py,sha256=f-Aq2NqtAaMUMhYitlICSNIxCKurWAl2gDEUVizLIqw,6019
50
- rclone_api-1.4.6.dist-info/LICENSE,sha256=b6pOoifSXiUaz_lDS84vWlG3fr4yUKwB8fzkrH9R8bQ,1064
51
- rclone_api-1.4.6.dist-info/METADATA,sha256=Wq4cALgVpPO9GCGDE6YkFfZiFwbZWglP5-uwqNWuwjQ,4627
52
- rclone_api-1.4.6.dist-info/WHEEL,sha256=rF4EZyR2XVS6irmOHQIJx2SUqXLZKRMUrjsg8UwN-XQ,109
53
- rclone_api-1.4.6.dist-info/entry_points.txt,sha256=fJteOlYVwgX3UbNuL9jJ0zUTuX2O79JFAeNgK7Sw7EQ,255
54
- rclone_api-1.4.6.dist-info/top_level.txt,sha256=EvZ7uuruUpe9RiUyEp25d1Keq7PWYNT0O_-mr8FCG5g,11
55
- rclone_api-1.4.6.dist-info/RECORD,,
51
+ rclone_api-1.4.8.dist-info/LICENSE,sha256=b6pOoifSXiUaz_lDS84vWlG3fr4yUKwB8fzkrH9R8bQ,1064
52
+ rclone_api-1.4.8.dist-info/METADATA,sha256=GfvhP_JxvsCbW0r6NVyiEHzkac5QhkVjasG7vyP8IMo,4627
53
+ rclone_api-1.4.8.dist-info/WHEEL,sha256=rF4EZyR2XVS6irmOHQIJx2SUqXLZKRMUrjsg8UwN-XQ,109
54
+ rclone_api-1.4.8.dist-info/entry_points.txt,sha256=fJteOlYVwgX3UbNuL9jJ0zUTuX2O79JFAeNgK7Sw7EQ,255
55
+ rclone_api-1.4.8.dist-info/top_level.txt,sha256=EvZ7uuruUpe9RiUyEp25d1Keq7PWYNT0O_-mr8FCG5g,11
56
+ rclone_api-1.4.8.dist-info/RECORD,,