rclone-api 1.2.8__tar.gz → 1.2.9__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {rclone_api-1.2.8 → rclone_api-1.2.9}/PKG-INFO +1 -1
- {rclone_api-1.2.8 → rclone_api-1.2.9}/pyproject.toml +1 -1
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/mount.py +34 -27
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/profile/mount_copy_bytes.py +10 -7
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/rclone.py +15 -12
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/s3/chunk_file.py +35 -32
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/s3/chunk_types.py +4 -73
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/s3/types.py +4 -2
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/s3/upload_file_multipart.py +27 -16
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/types.py +117 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api.egg-info/PKG-INFO +1 -1
- {rclone_api-1.2.8 → rclone_api-1.2.9}/tests/test_s3.py +10 -4
- {rclone_api-1.2.8 → rclone_api-1.2.9}/.aiderignore +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/.github/workflows/lint.yml +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/.github/workflows/push_macos.yml +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/.github/workflows/push_ubuntu.yml +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/.github/workflows/push_win.yml +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/.gitignore +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/.pylintrc +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/.vscode/launch.json +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/.vscode/settings.json +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/.vscode/tasks.json +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/LICENSE +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/MANIFEST.in +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/README.md +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/clean +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/install +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/lint +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/requirements.testing.txt +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/setup.cfg +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/setup.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/__init__.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/assets/example.txt +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/cli.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/cmd/copy_large_s3.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/cmd/list_files.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/completed_process.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/config.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/convert.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/deprecated.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/diff.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/dir.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/dir_listing.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/exec.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/experimental/flags.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/experimental/flags_base.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/file.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/filelist.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/group_files.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/process.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/remote.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/rpath.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/s3/api.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/s3/basic_ops.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/s3/create.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/scan_missing_folders.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/util.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api/walk.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api.egg-info/SOURCES.txt +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api.egg-info/dependency_links.txt +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api.egg-info/entry_points.txt +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api.egg-info/requires.txt +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/src/rclone_api.egg-info/top_level.txt +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/test +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/tests/archive/test_paramiko.py.disabled +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/tests/test_cmd_list_files.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/tests/test_copy.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/tests/test_copy_bytes.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/tests/test_copy_file_resumable_s3.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/tests/test_copy_files.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/tests/test_diff.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/tests/test_group_files.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/tests/test_is_synced.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/tests/test_ls.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/tests/test_mount.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/tests/test_mount_s3.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/tests/test_obscure.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/tests/test_rclone_config.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/tests/test_remote_control.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/tests/test_remotes.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/tests/test_scan_missing_folders.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/tests/test_size_files.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/tests/test_size_suffix.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/tests/test_walk.py +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/tox.ini +0 -0
- {rclone_api-1.2.8 → rclone_api-1.2.9}/upload_package.sh +0 -0
|
@@ -14,7 +14,7 @@ from threading import Lock, Semaphore
|
|
|
14
14
|
from typing import Any
|
|
15
15
|
|
|
16
16
|
from rclone_api.process import Process
|
|
17
|
-
from rclone_api.types import
|
|
17
|
+
from rclone_api.types import FilePart
|
|
18
18
|
|
|
19
19
|
_SYSTEM = platform.system() # "Linux", "Darwin", "Windows", etc.
|
|
20
20
|
|
|
@@ -34,7 +34,7 @@ def _cleanup_mounts() -> None:
|
|
|
34
34
|
with ThreadPoolExecutor() as executor:
|
|
35
35
|
mount: Mount
|
|
36
36
|
for mount in _MOUNTS_FOR_GC:
|
|
37
|
-
executor.submit(mount.close
|
|
37
|
+
executor.submit(mount.close)
|
|
38
38
|
|
|
39
39
|
|
|
40
40
|
atexit.register(_cleanup_mounts)
|
|
@@ -296,14 +296,13 @@ def _read_from_mount_task(
|
|
|
296
296
|
try:
|
|
297
297
|
with path.open("rb") as f:
|
|
298
298
|
f.seek(offset)
|
|
299
|
-
|
|
300
|
-
assert len(
|
|
301
|
-
return
|
|
299
|
+
payload = f.read(size)
|
|
300
|
+
assert len(payload) == size, f"Invalid read size: {len(payload)}"
|
|
301
|
+
return payload
|
|
302
|
+
|
|
302
303
|
except KeyboardInterrupt as e:
|
|
303
304
|
import _thread
|
|
304
305
|
|
|
305
|
-
warnings.warn(f"Error fetching file chunk: {e}")
|
|
306
|
-
|
|
307
306
|
_thread.interrupt_main()
|
|
308
307
|
return Exception(e)
|
|
309
308
|
except Exception as e:
|
|
@@ -319,7 +318,6 @@ class MultiMountFileChunker:
|
|
|
319
318
|
self,
|
|
320
319
|
filename: str,
|
|
321
320
|
filesize: int,
|
|
322
|
-
chunk_size: SizeSuffix,
|
|
323
321
|
mounts: list[Mount],
|
|
324
322
|
executor: ThreadPoolExecutor,
|
|
325
323
|
verbose: bool | None,
|
|
@@ -328,7 +326,6 @@ class MultiMountFileChunker:
|
|
|
328
326
|
|
|
329
327
|
self.filename = filename
|
|
330
328
|
self.filesize = filesize
|
|
331
|
-
self.chunk_size = chunk_size
|
|
332
329
|
self.executor = executor
|
|
333
330
|
self.mounts_processing: list[Mount] = []
|
|
334
331
|
self.mounts_availabe: list[Mount] = mounts
|
|
@@ -336,13 +333,26 @@ class MultiMountFileChunker:
|
|
|
336
333
|
self.lock = Lock()
|
|
337
334
|
self.verbose = get_verbose(verbose)
|
|
338
335
|
|
|
339
|
-
def
|
|
336
|
+
def shutdown(self) -> None:
|
|
340
337
|
self.executor.shutdown(wait=True, cancel_futures=True)
|
|
341
338
|
with ThreadPoolExecutor() as executor:
|
|
342
339
|
for mount in self.mounts_processing:
|
|
343
340
|
executor.submit(lambda: mount.close())
|
|
344
341
|
|
|
345
|
-
def
|
|
342
|
+
def _acquire_mount(self) -> Mount:
|
|
343
|
+
self.semaphore.acquire()
|
|
344
|
+
with self.lock:
|
|
345
|
+
mount = self.mounts_availabe.pop()
|
|
346
|
+
self.mounts_processing.append(mount)
|
|
347
|
+
return mount
|
|
348
|
+
|
|
349
|
+
def _release_mount(self, mount: Mount) -> None:
|
|
350
|
+
with self.lock:
|
|
351
|
+
self.mounts_processing.remove(mount)
|
|
352
|
+
self.mounts_availabe.append(mount)
|
|
353
|
+
self.semaphore.release()
|
|
354
|
+
|
|
355
|
+
def fetch(self, offset: int, size: int, extra: Any) -> Future[FilePart]:
|
|
346
356
|
if self.verbose:
|
|
347
357
|
print(f"Fetching data range: offset={offset}, size={size}")
|
|
348
358
|
|
|
@@ -353,28 +363,25 @@ class MultiMountFileChunker:
|
|
|
353
363
|
), f"Invalid offset + size: {offset} + {size} ({offset+size}) <= {self.filesize}"
|
|
354
364
|
|
|
355
365
|
try:
|
|
356
|
-
self.
|
|
357
|
-
with self.lock:
|
|
358
|
-
mount = self.mounts_availabe.pop()
|
|
359
|
-
self.mounts_processing.append(mount)
|
|
360
|
-
|
|
366
|
+
mount = self._acquire_mount()
|
|
361
367
|
path = mount.mount_path / self.filename
|
|
362
368
|
|
|
363
|
-
def
|
|
364
|
-
|
|
365
|
-
) ->
|
|
366
|
-
|
|
369
|
+
def task_fetch_file_range(
|
|
370
|
+
size=size, path=path, mount=mount, verbose=self.verbose
|
|
371
|
+
) -> FilePart:
|
|
372
|
+
bytes_or_err = _read_from_mount_task(
|
|
367
373
|
offset=offset, size=size, path=path, verbose=verbose
|
|
368
374
|
)
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
375
|
+
self._release_mount(mount)
|
|
376
|
+
|
|
377
|
+
if isinstance(bytes_or_err, Exception):
|
|
378
|
+
return FilePart(payload=bytes_or_err, extra=extra)
|
|
379
|
+
out = FilePart(payload=bytes_or_err, extra=extra)
|
|
373
380
|
return out
|
|
374
381
|
|
|
375
|
-
fut = self.executor.submit(
|
|
382
|
+
fut = self.executor.submit(task_fetch_file_range)
|
|
376
383
|
return fut
|
|
377
384
|
except Exception as e:
|
|
378
385
|
warnings.warn(f"Error fetching file chunk: {e}")
|
|
379
|
-
|
|
380
|
-
return self.executor.submit(lambda:
|
|
386
|
+
fp = FilePart(payload=e, extra=extra)
|
|
387
|
+
return self.executor.submit(lambda: fp)
|
|
@@ -15,6 +15,7 @@ from dotenv import load_dotenv
|
|
|
15
15
|
|
|
16
16
|
from rclone_api import Config, Rclone, SizeSuffix
|
|
17
17
|
from rclone_api.mount import MultiMountFileChunker
|
|
18
|
+
from rclone_api.types import FilePart
|
|
18
19
|
|
|
19
20
|
os.environ["RCLONE_API_VERBOSE"] = "1"
|
|
20
21
|
|
|
@@ -130,16 +131,18 @@ def _run_profile(
|
|
|
130
131
|
)
|
|
131
132
|
bytes_count = 0
|
|
132
133
|
|
|
133
|
-
futures: list[Future[
|
|
134
|
+
futures: list[Future[FilePart]] = []
|
|
134
135
|
for i in range(num):
|
|
135
136
|
offset = SizeSuffix(i * chunk_size.as_int()) + offset
|
|
136
|
-
future = filechunker.fetch(offset.as_int(), size.as_int())
|
|
137
|
+
future = filechunker.fetch(offset.as_int(), size.as_int(), "TEST OBJECT")
|
|
137
138
|
futures.append(future)
|
|
138
139
|
|
|
139
140
|
for future in futures:
|
|
140
|
-
|
|
141
|
-
if isinstance(
|
|
142
|
-
assert False, f"Error: {
|
|
141
|
+
filepart_or_err = future.result()
|
|
142
|
+
if isinstance(filepart_or_err, Exception):
|
|
143
|
+
assert False, f"Error: {filepart_or_err}"
|
|
144
|
+
bytes_count += filepart_or_err.n_bytes()
|
|
145
|
+
filepart_or_err.close()
|
|
143
146
|
futures.clear()
|
|
144
147
|
|
|
145
148
|
start = time.time()
|
|
@@ -149,14 +152,14 @@ def _run_profile(
|
|
|
149
152
|
|
|
150
153
|
for i in range(num):
|
|
151
154
|
offset = SizeSuffix(i * chunk_size.as_int()) + offset
|
|
152
|
-
future = filechunker.fetch(offset.as_int(), size.as_int())
|
|
155
|
+
future = filechunker.fetch(offset.as_int(), size.as_int(), "TEST OBJECT")
|
|
153
156
|
futures.append(future)
|
|
154
157
|
|
|
155
158
|
for future in futures:
|
|
156
159
|
bytes_or_err = future.result()
|
|
157
160
|
if isinstance(bytes_or_err, Exception):
|
|
158
161
|
assert False, f"Error: {bytes_or_err}"
|
|
159
|
-
bytes_count +=
|
|
162
|
+
bytes_count += bytes_or_err.n_bytes()
|
|
160
163
|
|
|
161
164
|
diff = (time.time() - start) / num
|
|
162
165
|
net_io_end = psutil.net_io_counters()
|
|
@@ -4,6 +4,7 @@ Unit test file.
|
|
|
4
4
|
|
|
5
5
|
import os
|
|
6
6
|
import random
|
|
7
|
+
import shutil
|
|
7
8
|
import subprocess
|
|
8
9
|
import time
|
|
9
10
|
import traceback
|
|
@@ -825,7 +826,7 @@ class Rclone:
|
|
|
825
826
|
traceback.print_exc()
|
|
826
827
|
raise
|
|
827
828
|
finally:
|
|
828
|
-
chunk_fetcher.
|
|
829
|
+
chunk_fetcher.shutdown()
|
|
829
830
|
|
|
830
831
|
def _copy_bytes(
|
|
831
832
|
self,
|
|
@@ -984,7 +985,6 @@ class Rclone:
|
|
|
984
985
|
filechunker: MultiMountFileChunker = MultiMountFileChunker(
|
|
985
986
|
filename=filename,
|
|
986
987
|
filesize=filesize,
|
|
987
|
-
chunk_size=chunk_size,
|
|
988
988
|
mounts=mounts,
|
|
989
989
|
executor=executor,
|
|
990
990
|
verbose=mount_log is not None,
|
|
@@ -1004,6 +1004,8 @@ class Rclone:
|
|
|
1004
1004
|
direct_io: bool = True,
|
|
1005
1005
|
) -> bytes | Exception:
|
|
1006
1006
|
"""Copy bytes from a file to another file."""
|
|
1007
|
+
from rclone_api.types import FilePart
|
|
1008
|
+
|
|
1007
1009
|
# determine number of threads from chunk size
|
|
1008
1010
|
threads = max(1, min(max_threads, length // chunk_size.as_int()))
|
|
1009
1011
|
filechunker = self.get_multi_mount_file_chunker(
|
|
@@ -1014,23 +1016,24 @@ class Rclone:
|
|
|
1014
1016
|
direct_io=direct_io,
|
|
1015
1017
|
)
|
|
1016
1018
|
try:
|
|
1017
|
-
fut = filechunker.fetch(offset, length)
|
|
1018
|
-
|
|
1019
|
-
|
|
1020
|
-
|
|
1021
|
-
|
|
1019
|
+
fut = filechunker.fetch(offset, length, extra=None)
|
|
1020
|
+
fp: FilePart = fut.result()
|
|
1021
|
+
payload = fp.payload
|
|
1022
|
+
if isinstance(payload, Exception):
|
|
1023
|
+
return payload
|
|
1022
1024
|
if outfile is None:
|
|
1023
|
-
|
|
1024
|
-
|
|
1025
|
-
out
|
|
1026
|
-
|
|
1025
|
+
out = payload.read_bytes()
|
|
1026
|
+
payload.unlink()
|
|
1027
|
+
return out
|
|
1028
|
+
shutil.move(payload, outfile)
|
|
1027
1029
|
return bytes(0)
|
|
1030
|
+
|
|
1028
1031
|
except Exception as e:
|
|
1029
1032
|
warnings.warn(f"Error copying bytes: {e}")
|
|
1030
1033
|
return e
|
|
1031
1034
|
finally:
|
|
1032
1035
|
try:
|
|
1033
|
-
filechunker.
|
|
1036
|
+
filechunker.shutdown()
|
|
1034
1037
|
except Exception as e:
|
|
1035
1038
|
warnings.warn(f"Error closing filechunker: {e}")
|
|
1036
1039
|
|
|
@@ -1,12 +1,15 @@
|
|
|
1
1
|
import time
|
|
2
2
|
import warnings
|
|
3
3
|
from concurrent.futures import Future
|
|
4
|
+
from dataclasses import dataclass
|
|
4
5
|
from pathlib import Path
|
|
5
6
|
from queue import Queue
|
|
6
7
|
from threading import Event
|
|
7
|
-
from typing import Callable
|
|
8
|
+
from typing import Any, Callable
|
|
8
9
|
|
|
9
|
-
from rclone_api.
|
|
10
|
+
from rclone_api.mount import FilePart
|
|
11
|
+
from rclone_api.s3.chunk_types import UploadState
|
|
12
|
+
from rclone_api.types import Finished
|
|
10
13
|
from rclone_api.util import locked_print
|
|
11
14
|
|
|
12
15
|
|
|
@@ -27,12 +30,18 @@ def _get_file_size(file_path: Path, timeout: int = 60) -> int:
|
|
|
27
30
|
raise TimeoutError(f"File {file_path} not found after {timeout} seconds")
|
|
28
31
|
|
|
29
32
|
|
|
33
|
+
@dataclass
|
|
34
|
+
class S3FileInfo:
|
|
35
|
+
upload_id: str
|
|
36
|
+
part_number: int
|
|
37
|
+
|
|
38
|
+
|
|
30
39
|
def file_chunker(
|
|
31
40
|
upload_state: UploadState,
|
|
32
|
-
|
|
41
|
+
fetcher: Callable[[int, int, Any], Future[FilePart]],
|
|
33
42
|
max_chunks: int | None,
|
|
34
43
|
cancel_signal: Event,
|
|
35
|
-
|
|
44
|
+
queue_upload: Queue[FilePart | Finished],
|
|
36
45
|
) -> None:
|
|
37
46
|
count = 0
|
|
38
47
|
|
|
@@ -49,7 +58,7 @@ def file_chunker(
|
|
|
49
58
|
upload_info = upload_state.upload_info
|
|
50
59
|
file_path = upload_info.src_file_path
|
|
51
60
|
chunk_size = upload_info.chunk_size
|
|
52
|
-
src = Path(file_path)
|
|
61
|
+
# src = Path(file_path)
|
|
53
62
|
|
|
54
63
|
try:
|
|
55
64
|
part_number = 1
|
|
@@ -73,12 +82,12 @@ def file_chunker(
|
|
|
73
82
|
return
|
|
74
83
|
|
|
75
84
|
while not should_stop():
|
|
76
|
-
|
|
77
|
-
if
|
|
85
|
+
curr_part_number = next_part_number()
|
|
86
|
+
if curr_part_number is None:
|
|
78
87
|
locked_print(f"File {file_path} has completed chunking all parts")
|
|
79
88
|
break
|
|
80
|
-
assert
|
|
81
|
-
offset = (
|
|
89
|
+
assert curr_part_number is not None
|
|
90
|
+
offset = (curr_part_number - 1) * chunk_size
|
|
82
91
|
file_size = upload_info.file_size
|
|
83
92
|
|
|
84
93
|
assert offset < file_size, f"Offset {offset} is greater than file size"
|
|
@@ -90,44 +99,38 @@ def file_chunker(
|
|
|
90
99
|
|
|
91
100
|
# data = chunk_fetcher(offset, chunk_size).result()
|
|
92
101
|
|
|
93
|
-
assert
|
|
94
|
-
cpn: int =
|
|
102
|
+
assert curr_part_number is not None
|
|
103
|
+
cpn: int = curr_part_number
|
|
95
104
|
|
|
96
|
-
def on_complete(
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
file_path: Path = file_path,
|
|
100
|
-
) -> None:
|
|
101
|
-
data: bytes | Exception = fut.result()
|
|
102
|
-
if isinstance(data, Exception):
|
|
105
|
+
def on_complete(fut: Future[FilePart]) -> None:
|
|
106
|
+
fp: FilePart = fut.result()
|
|
107
|
+
if fp.is_error():
|
|
103
108
|
warnings.warn(
|
|
104
|
-
f"Error reading file: {
|
|
109
|
+
f"Error reading file: {fp}, skipping part {part_number}"
|
|
105
110
|
)
|
|
106
111
|
return
|
|
107
112
|
|
|
108
|
-
if
|
|
113
|
+
if fp.n_bytes() == 0:
|
|
109
114
|
warnings.warn(f"Empty data for part {part_number} of {file_path}")
|
|
110
115
|
raise ValueError(
|
|
111
116
|
f"Empty data for part {part_number} of {file_path}"
|
|
112
117
|
)
|
|
113
118
|
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
data=data, # After this, data should not be reused.
|
|
119
|
-
)
|
|
119
|
+
if isinstance(fp.payload, Exception):
|
|
120
|
+
warnings.warn(f"Error reading file because of error: {fp.payload}")
|
|
121
|
+
return
|
|
122
|
+
|
|
120
123
|
done_part_numbers.add(part_number)
|
|
121
|
-
|
|
124
|
+
queue_upload.put(fp)
|
|
122
125
|
|
|
123
|
-
offset = (
|
|
124
|
-
fut =
|
|
126
|
+
offset = (curr_part_number - 1) * chunk_size
|
|
127
|
+
fut = fetcher(offset, file_size, S3FileInfo(upload_info.upload_id, cpn))
|
|
125
128
|
fut.add_done_callback(on_complete)
|
|
126
|
-
# wait until the
|
|
127
|
-
while
|
|
129
|
+
# wait until the queue_upload queue can accept the next chunk
|
|
130
|
+
while queue_upload.full():
|
|
128
131
|
time.sleep(0.1)
|
|
129
132
|
except Exception as e:
|
|
130
133
|
|
|
131
134
|
warnings.warn(f"Error reading file: {e}")
|
|
132
135
|
finally:
|
|
133
|
-
|
|
136
|
+
queue_upload.put(Finished())
|
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
import hashlib
|
|
2
2
|
import json
|
|
3
3
|
import os
|
|
4
|
-
import time
|
|
5
4
|
from dataclasses import dataclass, field, fields
|
|
6
5
|
from pathlib import Path
|
|
7
6
|
from threading import Lock
|
|
@@ -11,79 +10,9 @@ from botocore.client import BaseClient
|
|
|
11
10
|
from rclone_api.types import SizeSuffix
|
|
12
11
|
from rclone_api.util import locked_print
|
|
13
12
|
|
|
14
|
-
_MIN_UPLOAD_CHUNK_SIZE = 5 * 1024 * 1024 # 5MB
|
|
13
|
+
# _MIN_UPLOAD_CHUNK_SIZE = 5 * 1024 * 1024 # 5MB
|
|
15
14
|
_SAVE_STATE_LOCK = Lock()
|
|
16
15
|
|
|
17
|
-
_TMP_DIR_ACCESS_LOCK = Lock()
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
def _clean_old_files(out: Path) -> None:
|
|
21
|
-
# clean up files older than 1 day
|
|
22
|
-
|
|
23
|
-
now = time.time()
|
|
24
|
-
# Erase all stale files and then purge empty directories.
|
|
25
|
-
for root, dirs, files in os.walk(out):
|
|
26
|
-
for name in files:
|
|
27
|
-
f = Path(root) / name
|
|
28
|
-
filemod = f.stat().st_mtime
|
|
29
|
-
diff_secs = now - filemod
|
|
30
|
-
diff_days = diff_secs / (60 * 60 * 24)
|
|
31
|
-
if diff_days > 1:
|
|
32
|
-
locked_print(f"Removing old file: {f}")
|
|
33
|
-
f.unlink()
|
|
34
|
-
|
|
35
|
-
for root, dirs, _ in os.walk(out):
|
|
36
|
-
for dir in dirs:
|
|
37
|
-
d = Path(root) / dir
|
|
38
|
-
if not list(d.iterdir()):
|
|
39
|
-
locked_print(f"Removing empty directory: {d}")
|
|
40
|
-
d.rmdir()
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
def _get_chunk_tmpdir() -> Path:
|
|
44
|
-
with _TMP_DIR_ACCESS_LOCK:
|
|
45
|
-
dat = _get_chunk_tmpdir.__dict__
|
|
46
|
-
if "out" in dat:
|
|
47
|
-
return dat["out"] # Folder already validated.
|
|
48
|
-
out = Path("chunk_store")
|
|
49
|
-
if out.exists():
|
|
50
|
-
# first access, clean up directory
|
|
51
|
-
_clean_old_files(out)
|
|
52
|
-
out.mkdir(exist_ok=True, parents=True)
|
|
53
|
-
dat["out"] = out
|
|
54
|
-
return out
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
class FileChunk:
|
|
58
|
-
def __init__(self, src: Path, upload_id: str, part_number: int, data: bytes):
|
|
59
|
-
assert data is not None, f"{src}: Data must not be None"
|
|
60
|
-
self.upload_id = upload_id
|
|
61
|
-
self.src = src
|
|
62
|
-
self.part_number = part_number
|
|
63
|
-
name = src.name
|
|
64
|
-
self.tmpdir = _get_chunk_tmpdir()
|
|
65
|
-
self.filepart = self.tmpdir / f"{name}_{upload_id}.part_{part_number}.tmp"
|
|
66
|
-
self.filepart.write_bytes(data)
|
|
67
|
-
del data # free up memory
|
|
68
|
-
|
|
69
|
-
@property
|
|
70
|
-
def data(self) -> bytes:
|
|
71
|
-
assert self.filepart is not None
|
|
72
|
-
with open(self.filepart, "rb") as f:
|
|
73
|
-
return f.read()
|
|
74
|
-
return b""
|
|
75
|
-
|
|
76
|
-
def close(self):
|
|
77
|
-
import traceback
|
|
78
|
-
|
|
79
|
-
stacktrace = traceback.format_stack()
|
|
80
|
-
locked_print(f"Closing file chunk: {self.filepart}\n{stacktrace}")
|
|
81
|
-
if self.filepart.exists():
|
|
82
|
-
self.filepart.unlink()
|
|
83
|
-
|
|
84
|
-
def __del__(self):
|
|
85
|
-
self.close()
|
|
86
|
-
|
|
87
16
|
|
|
88
17
|
@dataclass
|
|
89
18
|
class UploadInfo:
|
|
@@ -220,11 +149,13 @@ class UploadState:
|
|
|
220
149
|
self._save_no_lock()
|
|
221
150
|
|
|
222
151
|
def __post_init__(self):
|
|
152
|
+
from rclone_api.types import get_chunk_tmpdir
|
|
153
|
+
|
|
223
154
|
if self.peristant is None:
|
|
224
155
|
# upload_id = self.upload_info.upload_id
|
|
225
156
|
object_name = self.upload_info.object_name
|
|
226
157
|
chunk_size = self.upload_info.chunk_size
|
|
227
|
-
parent =
|
|
158
|
+
parent = get_chunk_tmpdir()
|
|
228
159
|
self.peristant = parent / f"{object_name}_chunk_size_{chunk_size}_.json"
|
|
229
160
|
|
|
230
161
|
def save(self) -> None:
|
|
@@ -2,7 +2,9 @@ from concurrent.futures import Future
|
|
|
2
2
|
from dataclasses import dataclass
|
|
3
3
|
from enum import Enum
|
|
4
4
|
from pathlib import Path
|
|
5
|
-
from typing import Callable
|
|
5
|
+
from typing import Any, Callable
|
|
6
|
+
|
|
7
|
+
from rclone_api.mount import FilePart
|
|
6
8
|
|
|
7
9
|
|
|
8
10
|
class S3Provider(Enum):
|
|
@@ -48,7 +50,7 @@ class S3MutliPartUploadConfig:
|
|
|
48
50
|
|
|
49
51
|
chunk_size: int
|
|
50
52
|
retries: int
|
|
51
|
-
chunk_fetcher: Callable[[int, int], Future[
|
|
53
|
+
chunk_fetcher: Callable[[int, int, Any], Future[FilePart]]
|
|
52
54
|
resume_path_json: Path
|
|
53
55
|
max_write_threads: int
|
|
54
56
|
max_chunks_before_suspension: int | None = None
|
|
@@ -6,13 +6,19 @@ from concurrent.futures import Future, ThreadPoolExecutor
|
|
|
6
6
|
from pathlib import Path
|
|
7
7
|
from queue import Queue
|
|
8
8
|
from threading import Event, Thread
|
|
9
|
-
from typing import Callable
|
|
9
|
+
from typing import Any, Callable
|
|
10
10
|
|
|
11
11
|
from botocore.client import BaseClient
|
|
12
12
|
|
|
13
|
-
from rclone_api.
|
|
14
|
-
from rclone_api.s3.
|
|
13
|
+
from rclone_api.mount import FilePart
|
|
14
|
+
from rclone_api.s3.chunk_file import S3FileInfo, file_chunker
|
|
15
|
+
from rclone_api.s3.chunk_types import (
|
|
16
|
+
FinishedPiece,
|
|
17
|
+
UploadInfo,
|
|
18
|
+
UploadState,
|
|
19
|
+
)
|
|
15
20
|
from rclone_api.s3.types import MultiUploadResult
|
|
21
|
+
from rclone_api.types import Finished
|
|
16
22
|
from rclone_api.util import locked_print
|
|
17
23
|
|
|
18
24
|
_MIN_UPLOAD_CHUNK_SIZE = 5 * 1024 * 1024 # 5MB
|
|
@@ -52,20 +58,22 @@ def upload_task(
|
|
|
52
58
|
|
|
53
59
|
|
|
54
60
|
def handle_upload(
|
|
55
|
-
upload_info: UploadInfo,
|
|
61
|
+
upload_info: UploadInfo, fp: FilePart
|
|
56
62
|
) -> FinishedPiece | Exception | None:
|
|
57
|
-
if
|
|
63
|
+
if fp is None:
|
|
58
64
|
return None
|
|
59
|
-
|
|
60
|
-
|
|
65
|
+
assert isinstance(fp.extra, S3FileInfo)
|
|
66
|
+
extra: S3FileInfo = fp.extra
|
|
67
|
+
part_number = extra.part_number
|
|
68
|
+
print(f"Handling upload for {part_number}, size {fp.size}")
|
|
61
69
|
try:
|
|
62
70
|
part: FinishedPiece = upload_task(
|
|
63
71
|
info=upload_info,
|
|
64
|
-
chunk=
|
|
72
|
+
chunk=fp.load(),
|
|
65
73
|
part_number=part_number,
|
|
66
74
|
retries=upload_info.retries,
|
|
67
75
|
)
|
|
68
|
-
|
|
76
|
+
fp.close()
|
|
69
77
|
return part
|
|
70
78
|
except Exception as e:
|
|
71
79
|
stacktrace = traceback.format_exc()
|
|
@@ -121,7 +129,7 @@ def _abort_previous_upload(upload_state: UploadState) -> None:
|
|
|
121
129
|
|
|
122
130
|
def upload_file_multipart(
|
|
123
131
|
s3_client: BaseClient,
|
|
124
|
-
chunk_fetcher: Callable[[int, int], Future[
|
|
132
|
+
chunk_fetcher: Callable[[int, int, Any], Future[FilePart]],
|
|
125
133
|
bucket_name: str,
|
|
126
134
|
file_path: Path,
|
|
127
135
|
file_size: int | None,
|
|
@@ -178,7 +186,6 @@ def upload_file_multipart(
|
|
|
178
186
|
|
|
179
187
|
work_que_max = upload_threads // 2 + 2
|
|
180
188
|
|
|
181
|
-
filechunks: Queue[FileChunk | None] = Queue(work_que_max)
|
|
182
189
|
new_state = make_new_state()
|
|
183
190
|
loaded_state = get_upload_state()
|
|
184
191
|
|
|
@@ -215,13 +222,14 @@ def upload_file_multipart(
|
|
|
215
222
|
started_new_upload = finished == 0
|
|
216
223
|
upload_info = upload_state.upload_info
|
|
217
224
|
|
|
225
|
+
queue_upload: Queue[FilePart | Finished] = Queue(work_que_max)
|
|
218
226
|
chunker_errors: Queue[Exception] = Queue()
|
|
219
227
|
cancel_chunker_event = Event()
|
|
220
228
|
|
|
221
229
|
def chunker_task(
|
|
222
230
|
upload_state=upload_state,
|
|
223
231
|
chunk_fetcher=chunk_fetcher,
|
|
224
|
-
|
|
232
|
+
queue_upload=queue_upload,
|
|
225
233
|
max_chunks=max_chunks_before_suspension,
|
|
226
234
|
cancel_signal=cancel_chunker_event,
|
|
227
235
|
queue_errors=chunker_errors,
|
|
@@ -229,8 +237,8 @@ def upload_file_multipart(
|
|
|
229
237
|
try:
|
|
230
238
|
file_chunker(
|
|
231
239
|
upload_state=upload_state,
|
|
232
|
-
|
|
233
|
-
|
|
240
|
+
fetcher=chunk_fetcher,
|
|
241
|
+
queue_upload=queue_upload,
|
|
234
242
|
max_chunks=max_chunks,
|
|
235
243
|
cancel_signal=cancel_signal,
|
|
236
244
|
)
|
|
@@ -246,8 +254,11 @@ def upload_file_multipart(
|
|
|
246
254
|
with ThreadPoolExecutor(max_workers=upload_threads) as executor:
|
|
247
255
|
try:
|
|
248
256
|
while True:
|
|
249
|
-
file_chunk:
|
|
250
|
-
if file_chunk is
|
|
257
|
+
file_chunk: FilePart | Finished = queue_upload.get()
|
|
258
|
+
if file_chunk is Finished:
|
|
259
|
+
break
|
|
260
|
+
|
|
261
|
+
if isinstance(file_chunk, Finished):
|
|
251
262
|
break
|
|
252
263
|
|
|
253
264
|
def task(upload_info=upload_info, file_chunk=file_chunk):
|
|
@@ -1,6 +1,12 @@
|
|
|
1
|
+
import os
|
|
1
2
|
import re
|
|
3
|
+
import time
|
|
4
|
+
import warnings
|
|
2
5
|
from dataclasses import dataclass
|
|
3
6
|
from enum import Enum
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from threading import Lock
|
|
9
|
+
from typing import Any
|
|
4
10
|
|
|
5
11
|
|
|
6
12
|
class ModTimeStrategy(Enum):
|
|
@@ -214,3 +220,114 @@ class SizeSuffix:
|
|
|
214
220
|
|
|
215
221
|
def __int__(self) -> int:
|
|
216
222
|
return self._size
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
_TMP_DIR_ACCESS_LOCK = Lock()
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
def _clean_old_files(out: Path) -> None:
|
|
229
|
+
# clean up files older than 1 day
|
|
230
|
+
from rclone_api.util import locked_print
|
|
231
|
+
|
|
232
|
+
now = time.time()
|
|
233
|
+
# Erase all stale files and then purge empty directories.
|
|
234
|
+
for root, dirs, files in os.walk(out):
|
|
235
|
+
for name in files:
|
|
236
|
+
f = Path(root) / name
|
|
237
|
+
filemod = f.stat().st_mtime
|
|
238
|
+
diff_secs = now - filemod
|
|
239
|
+
diff_days = diff_secs / (60 * 60 * 24)
|
|
240
|
+
if diff_days > 1:
|
|
241
|
+
locked_print(f"Removing old file: {f}")
|
|
242
|
+
f.unlink()
|
|
243
|
+
|
|
244
|
+
for root, dirs, _ in os.walk(out):
|
|
245
|
+
for dir in dirs:
|
|
246
|
+
d = Path(root) / dir
|
|
247
|
+
if not list(d.iterdir()):
|
|
248
|
+
locked_print(f"Removing empty directory: {d}")
|
|
249
|
+
d.rmdir()
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
def get_chunk_tmpdir() -> Path:
|
|
253
|
+
with _TMP_DIR_ACCESS_LOCK:
|
|
254
|
+
dat = get_chunk_tmpdir.__dict__
|
|
255
|
+
if "out" in dat:
|
|
256
|
+
return dat["out"] # Folder already validated.
|
|
257
|
+
out = Path("chunk_store")
|
|
258
|
+
if out.exists():
|
|
259
|
+
# first access, clean up directory
|
|
260
|
+
_clean_old_files(out)
|
|
261
|
+
out.mkdir(exist_ok=True, parents=True)
|
|
262
|
+
dat["out"] = out
|
|
263
|
+
return out
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
class Finished:
|
|
267
|
+
pass
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
class FilePart:
|
|
271
|
+
def __init__(self, payload: bytes | Exception, extra: Any) -> None:
|
|
272
|
+
from rclone_api.util import random_str
|
|
273
|
+
|
|
274
|
+
self.extra = extra
|
|
275
|
+
self._lock = Lock()
|
|
276
|
+
self.payload: Path | Exception
|
|
277
|
+
if isinstance(payload, Exception):
|
|
278
|
+
self.payload = payload
|
|
279
|
+
return
|
|
280
|
+
self.payload = get_chunk_tmpdir() / f"{random_str(12)}.chunk"
|
|
281
|
+
with _TMP_DIR_ACCESS_LOCK:
|
|
282
|
+
if not self.payload.parent.exists():
|
|
283
|
+
self.payload.parent.mkdir(parents=True)
|
|
284
|
+
self.payload.write_bytes(payload)
|
|
285
|
+
|
|
286
|
+
@property
|
|
287
|
+
def size(self) -> int:
|
|
288
|
+
with self._lock:
|
|
289
|
+
if isinstance(self.payload, Path):
|
|
290
|
+
return self.payload.stat().st_size
|
|
291
|
+
return -1
|
|
292
|
+
|
|
293
|
+
def n_bytes(self) -> int:
|
|
294
|
+
with self._lock:
|
|
295
|
+
if isinstance(self.payload, Path):
|
|
296
|
+
return self.payload.stat().st_size
|
|
297
|
+
return -1
|
|
298
|
+
|
|
299
|
+
def load(self) -> bytes:
|
|
300
|
+
with self._lock:
|
|
301
|
+
if isinstance(self.payload, Path):
|
|
302
|
+
with open(self.payload, "rb") as f:
|
|
303
|
+
return f.read()
|
|
304
|
+
raise ValueError("Cannot load from error")
|
|
305
|
+
|
|
306
|
+
def __post_init__(self):
|
|
307
|
+
if isinstance(self.payload, Path):
|
|
308
|
+
assert self.payload.exists(), f"File part {self.payload} does not exist"
|
|
309
|
+
assert self.payload.is_file(), f"File part {self.payload} is not a file"
|
|
310
|
+
assert self.payload.stat().st_size > 0, f"File part {self.payload} is empty"
|
|
311
|
+
elif isinstance(self.payload, Exception):
|
|
312
|
+
warnings.warn(f"File part error: {self.payload}")
|
|
313
|
+
print(f"File part created with payload: {self.payload}")
|
|
314
|
+
|
|
315
|
+
def is_error(self) -> bool:
|
|
316
|
+
return isinstance(self.payload, Exception)
|
|
317
|
+
|
|
318
|
+
def close(self) -> None:
|
|
319
|
+
with self._lock:
|
|
320
|
+
if isinstance(self.payload, Exception):
|
|
321
|
+
warnings.warn(
|
|
322
|
+
f"Cannot close file part because the payload represents an error: {self.payload}"
|
|
323
|
+
)
|
|
324
|
+
return
|
|
325
|
+
if self.payload.exists():
|
|
326
|
+
try:
|
|
327
|
+
self.payload.unlink()
|
|
328
|
+
print(f"File part {self.payload} deleted")
|
|
329
|
+
except Exception as e:
|
|
330
|
+
warnings.warn(f"Cannot close file part because of error: {e}")
|
|
331
|
+
|
|
332
|
+
def __del__(self):
|
|
333
|
+
self.close()
|
|
@@ -3,6 +3,7 @@ import tempfile
|
|
|
3
3
|
import unittest
|
|
4
4
|
from concurrent.futures import Future, ThreadPoolExecutor
|
|
5
5
|
from pathlib import Path
|
|
6
|
+
from typing import Any
|
|
6
7
|
|
|
7
8
|
from dotenv import load_dotenv
|
|
8
9
|
|
|
@@ -10,6 +11,7 @@ from rclone_api.s3.api import S3Client
|
|
|
10
11
|
from rclone_api.s3.create import S3Provider
|
|
11
12
|
from rclone_api.s3.types import S3Credentials, S3MutliPartUploadConfig, S3UploadTarget
|
|
12
13
|
from rclone_api.s3.upload_file_multipart import MultiUploadResult
|
|
14
|
+
from rclone_api.types import FilePart
|
|
13
15
|
|
|
14
16
|
load_dotenv()
|
|
15
17
|
|
|
@@ -67,14 +69,18 @@ class RcloneS3Tester(unittest.TestCase):
|
|
|
67
69
|
state_json = Path(tempdir) / "state.json"
|
|
68
70
|
|
|
69
71
|
def simple_fetcher(
|
|
70
|
-
offset: int, chunk_size: int
|
|
71
|
-
) -> Future[
|
|
72
|
+
offset: int, chunk_size: int, extra: Any
|
|
73
|
+
) -> Future[FilePart]:
|
|
72
74
|
with ThreadPoolExecutor() as executor:
|
|
73
75
|
|
|
74
|
-
def task(
|
|
76
|
+
def task(
|
|
77
|
+
tmpfile=tmpfile, offset=offset, chunk_size=chunk_size
|
|
78
|
+
) -> FilePart:
|
|
75
79
|
with open(str(tmpfile), "rb") as f:
|
|
76
80
|
f.seek(offset)
|
|
77
|
-
|
|
81
|
+
data = f.read(chunk_size)
|
|
82
|
+
fp = FilePart(payload=data, extra=extra)
|
|
83
|
+
return fp
|
|
78
84
|
|
|
79
85
|
fut = executor.submit(task)
|
|
80
86
|
return fut
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|