rclone-api 1.2.8__py2.py3-none-any.whl → 1.2.9__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
rclone_api/mount.py CHANGED
@@ -14,7 +14,7 @@ from threading import Lock, Semaphore
14
14
  from typing import Any
15
15
 
16
16
  from rclone_api.process import Process
17
- from rclone_api.types import SizeSuffix
17
+ from rclone_api.types import FilePart
18
18
 
19
19
  _SYSTEM = platform.system() # "Linux", "Darwin", "Windows", etc.
20
20
 
@@ -34,7 +34,7 @@ def _cleanup_mounts() -> None:
34
34
  with ThreadPoolExecutor() as executor:
35
35
  mount: Mount
36
36
  for mount in _MOUNTS_FOR_GC:
37
- executor.submit(mount.close, wait=False)
37
+ executor.submit(mount.close)
38
38
 
39
39
 
40
40
  atexit.register(_cleanup_mounts)
@@ -296,14 +296,13 @@ def _read_from_mount_task(
296
296
  try:
297
297
  with path.open("rb") as f:
298
298
  f.seek(offset)
299
- sz = f.read(size)
300
- assert len(sz) == size, f"Invalid read size: {len(sz)}"
301
- return sz
299
+ payload = f.read(size)
300
+ assert len(payload) == size, f"Invalid read size: {len(payload)}"
301
+ return payload
302
+
302
303
  except KeyboardInterrupt as e:
303
304
  import _thread
304
305
 
305
- warnings.warn(f"Error fetching file chunk: {e}")
306
-
307
306
  _thread.interrupt_main()
308
307
  return Exception(e)
309
308
  except Exception as e:
@@ -319,7 +318,6 @@ class MultiMountFileChunker:
319
318
  self,
320
319
  filename: str,
321
320
  filesize: int,
322
- chunk_size: SizeSuffix,
323
321
  mounts: list[Mount],
324
322
  executor: ThreadPoolExecutor,
325
323
  verbose: bool | None,
@@ -328,7 +326,6 @@ class MultiMountFileChunker:
328
326
 
329
327
  self.filename = filename
330
328
  self.filesize = filesize
331
- self.chunk_size = chunk_size
332
329
  self.executor = executor
333
330
  self.mounts_processing: list[Mount] = []
334
331
  self.mounts_availabe: list[Mount] = mounts
@@ -336,13 +333,26 @@ class MultiMountFileChunker:
336
333
  self.lock = Lock()
337
334
  self.verbose = get_verbose(verbose)
338
335
 
339
- def close(self) -> None:
336
+ def shutdown(self) -> None:
340
337
  self.executor.shutdown(wait=True, cancel_futures=True)
341
338
  with ThreadPoolExecutor() as executor:
342
339
  for mount in self.mounts_processing:
343
340
  executor.submit(lambda: mount.close())
344
341
 
345
- def fetch(self, offset: int, size: int) -> Future[bytes | Exception]:
342
+ def _acquire_mount(self) -> Mount:
343
+ self.semaphore.acquire()
344
+ with self.lock:
345
+ mount = self.mounts_availabe.pop()
346
+ self.mounts_processing.append(mount)
347
+ return mount
348
+
349
+ def _release_mount(self, mount: Mount) -> None:
350
+ with self.lock:
351
+ self.mounts_processing.remove(mount)
352
+ self.mounts_availabe.append(mount)
353
+ self.semaphore.release()
354
+
355
+ def fetch(self, offset: int, size: int, extra: Any) -> Future[FilePart]:
346
356
  if self.verbose:
347
357
  print(f"Fetching data range: offset={offset}, size={size}")
348
358
 
@@ -353,28 +363,25 @@ class MultiMountFileChunker:
353
363
  ), f"Invalid offset + size: {offset} + {size} ({offset+size}) <= {self.filesize}"
354
364
 
355
365
  try:
356
- self.semaphore.acquire()
357
- with self.lock:
358
- mount = self.mounts_availabe.pop()
359
- self.mounts_processing.append(mount)
360
-
366
+ mount = self._acquire_mount()
361
367
  path = mount.mount_path / self.filename
362
368
 
363
- def task(
364
- offset=offset, size=size, path=path, mount=mount, verbose=self.verbose
365
- ) -> bytes | Exception:
366
- out = _read_from_mount_task(
369
+ def task_fetch_file_range(
370
+ size=size, path=path, mount=mount, verbose=self.verbose
371
+ ) -> FilePart:
372
+ bytes_or_err = _read_from_mount_task(
367
373
  offset=offset, size=size, path=path, verbose=verbose
368
374
  )
369
- with self.lock:
370
- self.mounts_processing.remove(mount)
371
- self.mounts_availabe.append(mount)
372
- self.semaphore.release()
375
+ self._release_mount(mount)
376
+
377
+ if isinstance(bytes_or_err, Exception):
378
+ return FilePart(payload=bytes_or_err, extra=extra)
379
+ out = FilePart(payload=bytes_or_err, extra=extra)
373
380
  return out
374
381
 
375
- fut = self.executor.submit(task)
382
+ fut = self.executor.submit(task_fetch_file_range)
376
383
  return fut
377
384
  except Exception as e:
378
385
  warnings.warn(f"Error fetching file chunk: {e}")
379
- err = Exception(e)
380
- return self.executor.submit(lambda: err)
386
+ fp = FilePart(payload=e, extra=extra)
387
+ return self.executor.submit(lambda: fp)
@@ -15,6 +15,7 @@ from dotenv import load_dotenv
15
15
 
16
16
  from rclone_api import Config, Rclone, SizeSuffix
17
17
  from rclone_api.mount import MultiMountFileChunker
18
+ from rclone_api.types import FilePart
18
19
 
19
20
  os.environ["RCLONE_API_VERBOSE"] = "1"
20
21
 
@@ -130,16 +131,18 @@ def _run_profile(
130
131
  )
131
132
  bytes_count = 0
132
133
 
133
- futures: list[Future[bytes | Exception]] = []
134
+ futures: list[Future[FilePart]] = []
134
135
  for i in range(num):
135
136
  offset = SizeSuffix(i * chunk_size.as_int()) + offset
136
- future = filechunker.fetch(offset.as_int(), size.as_int())
137
+ future = filechunker.fetch(offset.as_int(), size.as_int(), "TEST OBJECT")
137
138
  futures.append(future)
138
139
 
139
140
  for future in futures:
140
- bytes_or_err = future.result()
141
- if isinstance(bytes_or_err, Exception):
142
- assert False, f"Error: {bytes_or_err}"
141
+ filepart_or_err = future.result()
142
+ if isinstance(filepart_or_err, Exception):
143
+ assert False, f"Error: {filepart_or_err}"
144
+ bytes_count += filepart_or_err.n_bytes()
145
+ filepart_or_err.close()
143
146
  futures.clear()
144
147
 
145
148
  start = time.time()
@@ -149,14 +152,14 @@ def _run_profile(
149
152
 
150
153
  for i in range(num):
151
154
  offset = SizeSuffix(i * chunk_size.as_int()) + offset
152
- future = filechunker.fetch(offset.as_int(), size.as_int())
155
+ future = filechunker.fetch(offset.as_int(), size.as_int(), "TEST OBJECT")
153
156
  futures.append(future)
154
157
 
155
158
  for future in futures:
156
159
  bytes_or_err = future.result()
157
160
  if isinstance(bytes_or_err, Exception):
158
161
  assert False, f"Error: {bytes_or_err}"
159
- bytes_count += len(bytes_or_err)
162
+ bytes_count += bytes_or_err.n_bytes()
160
163
 
161
164
  diff = (time.time() - start) / num
162
165
  net_io_end = psutil.net_io_counters()
rclone_api/rclone.py CHANGED
@@ -4,6 +4,7 @@ Unit test file.
4
4
 
5
5
  import os
6
6
  import random
7
+ import shutil
7
8
  import subprocess
8
9
  import time
9
10
  import traceback
@@ -825,7 +826,7 @@ class Rclone:
825
826
  traceback.print_exc()
826
827
  raise
827
828
  finally:
828
- chunk_fetcher.close()
829
+ chunk_fetcher.shutdown()
829
830
 
830
831
  def _copy_bytes(
831
832
  self,
@@ -984,7 +985,6 @@ class Rclone:
984
985
  filechunker: MultiMountFileChunker = MultiMountFileChunker(
985
986
  filename=filename,
986
987
  filesize=filesize,
987
- chunk_size=chunk_size,
988
988
  mounts=mounts,
989
989
  executor=executor,
990
990
  verbose=mount_log is not None,
@@ -1004,6 +1004,8 @@ class Rclone:
1004
1004
  direct_io: bool = True,
1005
1005
  ) -> bytes | Exception:
1006
1006
  """Copy bytes from a file to another file."""
1007
+ from rclone_api.types import FilePart
1008
+
1007
1009
  # determine number of threads from chunk size
1008
1010
  threads = max(1, min(max_threads, length // chunk_size.as_int()))
1009
1011
  filechunker = self.get_multi_mount_file_chunker(
@@ -1014,23 +1016,24 @@ class Rclone:
1014
1016
  direct_io=direct_io,
1015
1017
  )
1016
1018
  try:
1017
- fut = filechunker.fetch(offset, length)
1018
- data = fut.result()
1019
- if isinstance(data, Exception):
1020
- warnings.warn(f"Error copying bytes: {data}")
1021
- raise data
1019
+ fut = filechunker.fetch(offset, length, extra=None)
1020
+ fp: FilePart = fut.result()
1021
+ payload = fp.payload
1022
+ if isinstance(payload, Exception):
1023
+ return payload
1022
1024
  if outfile is None:
1023
- return data
1024
- with open(outfile, "wb") as out:
1025
- out.write(data)
1026
- del data
1025
+ out = payload.read_bytes()
1026
+ payload.unlink()
1027
+ return out
1028
+ shutil.move(payload, outfile)
1027
1029
  return bytes(0)
1030
+
1028
1031
  except Exception as e:
1029
1032
  warnings.warn(f"Error copying bytes: {e}")
1030
1033
  return e
1031
1034
  finally:
1032
1035
  try:
1033
- filechunker.close()
1036
+ filechunker.shutdown()
1034
1037
  except Exception as e:
1035
1038
  warnings.warn(f"Error closing filechunker: {e}")
1036
1039
 
@@ -1,12 +1,15 @@
1
1
  import time
2
2
  import warnings
3
3
  from concurrent.futures import Future
4
+ from dataclasses import dataclass
4
5
  from pathlib import Path
5
6
  from queue import Queue
6
7
  from threading import Event
7
- from typing import Callable
8
+ from typing import Any, Callable
8
9
 
9
- from rclone_api.s3.chunk_types import FileChunk, UploadState
10
+ from rclone_api.mount import FilePart
11
+ from rclone_api.s3.chunk_types import UploadState
12
+ from rclone_api.types import Finished
10
13
  from rclone_api.util import locked_print
11
14
 
12
15
 
@@ -27,12 +30,18 @@ def _get_file_size(file_path: Path, timeout: int = 60) -> int:
27
30
  raise TimeoutError(f"File {file_path} not found after {timeout} seconds")
28
31
 
29
32
 
33
+ @dataclass
34
+ class S3FileInfo:
35
+ upload_id: str
36
+ part_number: int
37
+
38
+
30
39
  def file_chunker(
31
40
  upload_state: UploadState,
32
- chunk_fetcher: Callable[[int, int], Future[bytes | Exception]],
41
+ fetcher: Callable[[int, int, Any], Future[FilePart]],
33
42
  max_chunks: int | None,
34
43
  cancel_signal: Event,
35
- output: Queue[FileChunk | None],
44
+ queue_upload: Queue[FilePart | Finished],
36
45
  ) -> None:
37
46
  count = 0
38
47
 
@@ -49,7 +58,7 @@ def file_chunker(
49
58
  upload_info = upload_state.upload_info
50
59
  file_path = upload_info.src_file_path
51
60
  chunk_size = upload_info.chunk_size
52
- src = Path(file_path)
61
+ # src = Path(file_path)
53
62
 
54
63
  try:
55
64
  part_number = 1
@@ -73,12 +82,12 @@ def file_chunker(
73
82
  return
74
83
 
75
84
  while not should_stop():
76
- curr_parth_num = next_part_number()
77
- if curr_parth_num is None:
85
+ curr_part_number = next_part_number()
86
+ if curr_part_number is None:
78
87
  locked_print(f"File {file_path} has completed chunking all parts")
79
88
  break
80
- assert curr_parth_num is not None
81
- offset = (curr_parth_num - 1) * chunk_size
89
+ assert curr_part_number is not None
90
+ offset = (curr_part_number - 1) * chunk_size
82
91
  file_size = upload_info.file_size
83
92
 
84
93
  assert offset < file_size, f"Offset {offset} is greater than file size"
@@ -90,44 +99,38 @@ def file_chunker(
90
99
 
91
100
  # data = chunk_fetcher(offset, chunk_size).result()
92
101
 
93
- assert curr_parth_num is not None
94
- cpn: int = curr_parth_num
102
+ assert curr_part_number is not None
103
+ cpn: int = curr_part_number
95
104
 
96
- def on_complete(
97
- fut: Future[bytes | Exception],
98
- part_number: int = cpn,
99
- file_path: Path = file_path,
100
- ) -> None:
101
- data: bytes | Exception = fut.result()
102
- if isinstance(data, Exception):
105
+ def on_complete(fut: Future[FilePart]) -> None:
106
+ fp: FilePart = fut.result()
107
+ if fp.is_error():
103
108
  warnings.warn(
104
- f"Error reading file: {data}, skipping part {part_number}"
109
+ f"Error reading file: {fp}, skipping part {part_number}"
105
110
  )
106
111
  return
107
112
 
108
- if not data or len(data) == 0:
113
+ if fp.n_bytes() == 0:
109
114
  warnings.warn(f"Empty data for part {part_number} of {file_path}")
110
115
  raise ValueError(
111
116
  f"Empty data for part {part_number} of {file_path}"
112
117
  )
113
118
 
114
- file_chunk = FileChunk(
115
- src,
116
- upload_id=upload_info.upload_id,
117
- part_number=part_number,
118
- data=data, # After this, data should not be reused.
119
- )
119
+ if isinstance(fp.payload, Exception):
120
+ warnings.warn(f"Error reading file because of error: {fp.payload}")
121
+ return
122
+
120
123
  done_part_numbers.add(part_number)
121
- output.put(file_chunk)
124
+ queue_upload.put(fp)
122
125
 
123
- offset = (curr_parth_num - 1) * chunk_size
124
- fut = chunk_fetcher(offset, chunk_size)
126
+ offset = (curr_part_number - 1) * chunk_size
127
+ fut = fetcher(offset, file_size, S3FileInfo(upload_info.upload_id, cpn))
125
128
  fut.add_done_callback(on_complete)
126
- # wait until the output queue can accept the next chunk
127
- while output.full():
129
+ # wait until the queue_upload queue can accept the next chunk
130
+ while queue_upload.full():
128
131
  time.sleep(0.1)
129
132
  except Exception as e:
130
133
 
131
134
  warnings.warn(f"Error reading file: {e}")
132
135
  finally:
133
- output.put(None)
136
+ queue_upload.put(Finished())
@@ -1,7 +1,6 @@
1
1
  import hashlib
2
2
  import json
3
3
  import os
4
- import time
5
4
  from dataclasses import dataclass, field, fields
6
5
  from pathlib import Path
7
6
  from threading import Lock
@@ -11,79 +10,9 @@ from botocore.client import BaseClient
11
10
  from rclone_api.types import SizeSuffix
12
11
  from rclone_api.util import locked_print
13
12
 
14
- _MIN_UPLOAD_CHUNK_SIZE = 5 * 1024 * 1024 # 5MB
13
+ # _MIN_UPLOAD_CHUNK_SIZE = 5 * 1024 * 1024 # 5MB
15
14
  _SAVE_STATE_LOCK = Lock()
16
15
 
17
- _TMP_DIR_ACCESS_LOCK = Lock()
18
-
19
-
20
- def _clean_old_files(out: Path) -> None:
21
- # clean up files older than 1 day
22
-
23
- now = time.time()
24
- # Erase all stale files and then purge empty directories.
25
- for root, dirs, files in os.walk(out):
26
- for name in files:
27
- f = Path(root) / name
28
- filemod = f.stat().st_mtime
29
- diff_secs = now - filemod
30
- diff_days = diff_secs / (60 * 60 * 24)
31
- if diff_days > 1:
32
- locked_print(f"Removing old file: {f}")
33
- f.unlink()
34
-
35
- for root, dirs, _ in os.walk(out):
36
- for dir in dirs:
37
- d = Path(root) / dir
38
- if not list(d.iterdir()):
39
- locked_print(f"Removing empty directory: {d}")
40
- d.rmdir()
41
-
42
-
43
- def _get_chunk_tmpdir() -> Path:
44
- with _TMP_DIR_ACCESS_LOCK:
45
- dat = _get_chunk_tmpdir.__dict__
46
- if "out" in dat:
47
- return dat["out"] # Folder already validated.
48
- out = Path("chunk_store")
49
- if out.exists():
50
- # first access, clean up directory
51
- _clean_old_files(out)
52
- out.mkdir(exist_ok=True, parents=True)
53
- dat["out"] = out
54
- return out
55
-
56
-
57
- class FileChunk:
58
- def __init__(self, src: Path, upload_id: str, part_number: int, data: bytes):
59
- assert data is not None, f"{src}: Data must not be None"
60
- self.upload_id = upload_id
61
- self.src = src
62
- self.part_number = part_number
63
- name = src.name
64
- self.tmpdir = _get_chunk_tmpdir()
65
- self.filepart = self.tmpdir / f"{name}_{upload_id}.part_{part_number}.tmp"
66
- self.filepart.write_bytes(data)
67
- del data # free up memory
68
-
69
- @property
70
- def data(self) -> bytes:
71
- assert self.filepart is not None
72
- with open(self.filepart, "rb") as f:
73
- return f.read()
74
- return b""
75
-
76
- def close(self):
77
- import traceback
78
-
79
- stacktrace = traceback.format_stack()
80
- locked_print(f"Closing file chunk: {self.filepart}\n{stacktrace}")
81
- if self.filepart.exists():
82
- self.filepart.unlink()
83
-
84
- def __del__(self):
85
- self.close()
86
-
87
16
 
88
17
  @dataclass
89
18
  class UploadInfo:
@@ -220,11 +149,13 @@ class UploadState:
220
149
  self._save_no_lock()
221
150
 
222
151
  def __post_init__(self):
152
+ from rclone_api.types import get_chunk_tmpdir
153
+
223
154
  if self.peristant is None:
224
155
  # upload_id = self.upload_info.upload_id
225
156
  object_name = self.upload_info.object_name
226
157
  chunk_size = self.upload_info.chunk_size
227
- parent = _get_chunk_tmpdir()
158
+ parent = get_chunk_tmpdir()
228
159
  self.peristant = parent / f"{object_name}_chunk_size_{chunk_size}_.json"
229
160
 
230
161
  def save(self) -> None:
rclone_api/s3/types.py CHANGED
@@ -2,7 +2,9 @@ from concurrent.futures import Future
2
2
  from dataclasses import dataclass
3
3
  from enum import Enum
4
4
  from pathlib import Path
5
- from typing import Callable
5
+ from typing import Any, Callable
6
+
7
+ from rclone_api.mount import FilePart
6
8
 
7
9
 
8
10
  class S3Provider(Enum):
@@ -48,7 +50,7 @@ class S3MutliPartUploadConfig:
48
50
 
49
51
  chunk_size: int
50
52
  retries: int
51
- chunk_fetcher: Callable[[int, int], Future[bytes | Exception]]
53
+ chunk_fetcher: Callable[[int, int, Any], Future[FilePart]]
52
54
  resume_path_json: Path
53
55
  max_write_threads: int
54
56
  max_chunks_before_suspension: int | None = None
@@ -6,13 +6,19 @@ from concurrent.futures import Future, ThreadPoolExecutor
6
6
  from pathlib import Path
7
7
  from queue import Queue
8
8
  from threading import Event, Thread
9
- from typing import Callable
9
+ from typing import Any, Callable
10
10
 
11
11
  from botocore.client import BaseClient
12
12
 
13
- from rclone_api.s3.chunk_file import file_chunker
14
- from rclone_api.s3.chunk_types import FileChunk, FinishedPiece, UploadInfo, UploadState
13
+ from rclone_api.mount import FilePart
14
+ from rclone_api.s3.chunk_file import S3FileInfo, file_chunker
15
+ from rclone_api.s3.chunk_types import (
16
+ FinishedPiece,
17
+ UploadInfo,
18
+ UploadState,
19
+ )
15
20
  from rclone_api.s3.types import MultiUploadResult
21
+ from rclone_api.types import Finished
16
22
  from rclone_api.util import locked_print
17
23
 
18
24
  _MIN_UPLOAD_CHUNK_SIZE = 5 * 1024 * 1024 # 5MB
@@ -52,20 +58,22 @@ def upload_task(
52
58
 
53
59
 
54
60
  def handle_upload(
55
- upload_info: UploadInfo, file_chunk: FileChunk | None
61
+ upload_info: UploadInfo, fp: FilePart
56
62
  ) -> FinishedPiece | Exception | None:
57
- if file_chunk is None:
63
+ if fp is None:
58
64
  return None
59
- print(f"Handling upload for {file_chunk.part_number}, size {len(file_chunk.data)}")
60
- chunk, part_number = file_chunk.data, file_chunk.part_number
65
+ assert isinstance(fp.extra, S3FileInfo)
66
+ extra: S3FileInfo = fp.extra
67
+ part_number = extra.part_number
68
+ print(f"Handling upload for {part_number}, size {fp.size}")
61
69
  try:
62
70
  part: FinishedPiece = upload_task(
63
71
  info=upload_info,
64
- chunk=chunk,
72
+ chunk=fp.load(),
65
73
  part_number=part_number,
66
74
  retries=upload_info.retries,
67
75
  )
68
- file_chunk.close()
76
+ fp.close()
69
77
  return part
70
78
  except Exception as e:
71
79
  stacktrace = traceback.format_exc()
@@ -121,7 +129,7 @@ def _abort_previous_upload(upload_state: UploadState) -> None:
121
129
 
122
130
  def upload_file_multipart(
123
131
  s3_client: BaseClient,
124
- chunk_fetcher: Callable[[int, int], Future[bytes | Exception]],
132
+ chunk_fetcher: Callable[[int, int, Any], Future[FilePart]],
125
133
  bucket_name: str,
126
134
  file_path: Path,
127
135
  file_size: int | None,
@@ -178,7 +186,6 @@ def upload_file_multipart(
178
186
 
179
187
  work_que_max = upload_threads // 2 + 2
180
188
 
181
- filechunks: Queue[FileChunk | None] = Queue(work_que_max)
182
189
  new_state = make_new_state()
183
190
  loaded_state = get_upload_state()
184
191
 
@@ -215,13 +222,14 @@ def upload_file_multipart(
215
222
  started_new_upload = finished == 0
216
223
  upload_info = upload_state.upload_info
217
224
 
225
+ queue_upload: Queue[FilePart | Finished] = Queue(work_que_max)
218
226
  chunker_errors: Queue[Exception] = Queue()
219
227
  cancel_chunker_event = Event()
220
228
 
221
229
  def chunker_task(
222
230
  upload_state=upload_state,
223
231
  chunk_fetcher=chunk_fetcher,
224
- output=filechunks,
232
+ queue_upload=queue_upload,
225
233
  max_chunks=max_chunks_before_suspension,
226
234
  cancel_signal=cancel_chunker_event,
227
235
  queue_errors=chunker_errors,
@@ -229,8 +237,8 @@ def upload_file_multipart(
229
237
  try:
230
238
  file_chunker(
231
239
  upload_state=upload_state,
232
- chunk_fetcher=chunk_fetcher,
233
- output=output,
240
+ fetcher=chunk_fetcher,
241
+ queue_upload=queue_upload,
234
242
  max_chunks=max_chunks,
235
243
  cancel_signal=cancel_signal,
236
244
  )
@@ -246,8 +254,11 @@ def upload_file_multipart(
246
254
  with ThreadPoolExecutor(max_workers=upload_threads) as executor:
247
255
  try:
248
256
  while True:
249
- file_chunk: FileChunk | None = filechunks.get()
250
- if file_chunk is None:
257
+ file_chunk: FilePart | Finished = queue_upload.get()
258
+ if file_chunk is Finished:
259
+ break
260
+
261
+ if isinstance(file_chunk, Finished):
251
262
  break
252
263
 
253
264
  def task(upload_info=upload_info, file_chunk=file_chunk):
rclone_api/types.py CHANGED
@@ -1,6 +1,12 @@
1
+ import os
1
2
  import re
3
+ import time
4
+ import warnings
2
5
  from dataclasses import dataclass
3
6
  from enum import Enum
7
+ from pathlib import Path
8
+ from threading import Lock
9
+ from typing import Any
4
10
 
5
11
 
6
12
  class ModTimeStrategy(Enum):
@@ -214,3 +220,114 @@ class SizeSuffix:
214
220
 
215
221
  def __int__(self) -> int:
216
222
  return self._size
223
+
224
+
225
+ _TMP_DIR_ACCESS_LOCK = Lock()
226
+
227
+
228
+ def _clean_old_files(out: Path) -> None:
229
+ # clean up files older than 1 day
230
+ from rclone_api.util import locked_print
231
+
232
+ now = time.time()
233
+ # Erase all stale files and then purge empty directories.
234
+ for root, dirs, files in os.walk(out):
235
+ for name in files:
236
+ f = Path(root) / name
237
+ filemod = f.stat().st_mtime
238
+ diff_secs = now - filemod
239
+ diff_days = diff_secs / (60 * 60 * 24)
240
+ if diff_days > 1:
241
+ locked_print(f"Removing old file: {f}")
242
+ f.unlink()
243
+
244
+ for root, dirs, _ in os.walk(out):
245
+ for dir in dirs:
246
+ d = Path(root) / dir
247
+ if not list(d.iterdir()):
248
+ locked_print(f"Removing empty directory: {d}")
249
+ d.rmdir()
250
+
251
+
252
+ def get_chunk_tmpdir() -> Path:
253
+ with _TMP_DIR_ACCESS_LOCK:
254
+ dat = get_chunk_tmpdir.__dict__
255
+ if "out" in dat:
256
+ return dat["out"] # Folder already validated.
257
+ out = Path("chunk_store")
258
+ if out.exists():
259
+ # first access, clean up directory
260
+ _clean_old_files(out)
261
+ out.mkdir(exist_ok=True, parents=True)
262
+ dat["out"] = out
263
+ return out
264
+
265
+
266
+ class Finished:
267
+ pass
268
+
269
+
270
+ class FilePart:
271
+ def __init__(self, payload: bytes | Exception, extra: Any) -> None:
272
+ from rclone_api.util import random_str
273
+
274
+ self.extra = extra
275
+ self._lock = Lock()
276
+ self.payload: Path | Exception
277
+ if isinstance(payload, Exception):
278
+ self.payload = payload
279
+ return
280
+ self.payload = get_chunk_tmpdir() / f"{random_str(12)}.chunk"
281
+ with _TMP_DIR_ACCESS_LOCK:
282
+ if not self.payload.parent.exists():
283
+ self.payload.parent.mkdir(parents=True)
284
+ self.payload.write_bytes(payload)
285
+
286
+ @property
287
+ def size(self) -> int:
288
+ with self._lock:
289
+ if isinstance(self.payload, Path):
290
+ return self.payload.stat().st_size
291
+ return -1
292
+
293
+ def n_bytes(self) -> int:
294
+ with self._lock:
295
+ if isinstance(self.payload, Path):
296
+ return self.payload.stat().st_size
297
+ return -1
298
+
299
+ def load(self) -> bytes:
300
+ with self._lock:
301
+ if isinstance(self.payload, Path):
302
+ with open(self.payload, "rb") as f:
303
+ return f.read()
304
+ raise ValueError("Cannot load from error")
305
+
306
+ def __post_init__(self):
307
+ if isinstance(self.payload, Path):
308
+ assert self.payload.exists(), f"File part {self.payload} does not exist"
309
+ assert self.payload.is_file(), f"File part {self.payload} is not a file"
310
+ assert self.payload.stat().st_size > 0, f"File part {self.payload} is empty"
311
+ elif isinstance(self.payload, Exception):
312
+ warnings.warn(f"File part error: {self.payload}")
313
+ print(f"File part created with payload: {self.payload}")
314
+
315
+ def is_error(self) -> bool:
316
+ return isinstance(self.payload, Exception)
317
+
318
+ def close(self) -> None:
319
+ with self._lock:
320
+ if isinstance(self.payload, Exception):
321
+ warnings.warn(
322
+ f"Cannot close file part because the payload represents an error: {self.payload}"
323
+ )
324
+ return
325
+ if self.payload.exists():
326
+ try:
327
+ self.payload.unlink()
328
+ print(f"File part {self.payload} deleted")
329
+ except Exception as e:
330
+ warnings.warn(f"Cannot close file part because of error: {e}")
331
+
332
+ def __del__(self):
333
+ self.close()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: rclone_api
3
- Version: 1.2.8
3
+ Version: 1.2.9
4
4
  Summary: rclone api in python
5
5
  Home-page: https://github.com/zackees/rclone-api
6
6
  License: BSD 3-Clause License
@@ -11,13 +11,13 @@ rclone_api/exec.py,sha256=Pd7pUBd8ib5MzqvMybG2DQISPRbDRu20VjVRL2mLAVY,1076
11
11
  rclone_api/file.py,sha256=EP5yT2dZ0H2p7CY5n0y5k5pHhIliV25pm8KOwBklUTk,1863
12
12
  rclone_api/filelist.py,sha256=xbiusvNgaB_b_kQOZoHMJJxn6TWGtPrWd2J042BI28o,767
13
13
  rclone_api/group_files.py,sha256=H92xPW9lQnbNw5KbtZCl00bD6iRh9yRbCuxku4j_3dg,8036
14
- rclone_api/mount.py,sha256=qdfPatPKGAAaNexJQCvunq_MdBJL6NSlGmin8cHCXGg,13255
14
+ rclone_api/mount.py,sha256=tjWMBEz67p4d6wR4fwZZltEvsReLKzdikL3KOKUCqII,13522
15
15
  rclone_api/process.py,sha256=rBj_S86jC6nqCYop-jq8r9eMSteKeObxUrJMgH8LZvI,5084
16
- rclone_api/rclone.py,sha256=V4oeepsleic2llCA7JekMw1iOwN7uYE3ktJ-gEA7wcw,49277
16
+ rclone_api/rclone.py,sha256=4_eoKkC0eZDT2VnpMlrF5EkDZam5WWaiiWFxNntqs2k,49323
17
17
  rclone_api/remote.py,sha256=O9WDUFQy9f6oT1HdUbTixK2eg0xtBBm8k4Xl6aa6K00,431
18
18
  rclone_api/rpath.py,sha256=8ZA_1wxWtskwcy0I8V2VbjKDmzPkiWd8Q2JQSvh-sYE,2586
19
19
  rclone_api/scan_missing_folders.py,sha256=Kulca2Q6WZodt00ATFHkmqqInuoPvBkhTcS9703y6po,4740
20
- rclone_api/types.py,sha256=Gchc24Ze0QFUoUZpF_H1pwtP37OT8MKLUpMk3RwXSTs,6151
20
+ rclone_api/types.py,sha256=bmLu9EnX5Q2DyUkoyi6ExgywkfOkb_YNJdTQfj8YYrk,9923
21
21
  rclone_api/util.py,sha256=_Z-GUMVXnHYOGdo2dy2ie2P5fGgyg8KdGjHKicx68Ko,4573
22
22
  rclone_api/walk.py,sha256=-54NVE8EJcCstwDoaC_UtHm73R2HrZwVwQmsnv55xNU,3369
23
23
  rclone_api/assets/example.txt,sha256=lTBovRjiz0_TgtAtbA1C5hNi2ffbqnNPqkKg6UiKCT8,54
@@ -25,17 +25,17 @@ rclone_api/cmd/copy_large_s3.py,sha256=fYHyHq2YZT_dfMbS7SCpEeLCaWD-BU-jcpKP9eKf1
25
25
  rclone_api/cmd/list_files.py,sha256=x8FHODEilwKqwdiU1jdkeJbLwOqUkUQuDWPo2u_zpf0,741
26
26
  rclone_api/experimental/flags.py,sha256=qCVD--fSTmzlk9hloRLr0q9elzAOFzPsvVpKM3aB1Mk,2739
27
27
  rclone_api/experimental/flags_base.py,sha256=ajU_czkTcAxXYU-SlmiCfHY7aCQGHvpCLqJ-Z8uZLk0,2102
28
- rclone_api/profile/mount_copy_bytes.py,sha256=_bd9oergTuCdj1P6AVh_pC6GmtCpLFQYwiTdhwXeYZE,8404
28
+ rclone_api/profile/mount_copy_bytes.py,sha256=HK11knsykKO1WNi8LYOkQbipQBpZn7uhV5CVEQDRCJs,8558
29
29
  rclone_api/s3/api.py,sha256=PafsIEyWDpLWAXsZAjFm9CY14vJpsDr9lOsn0kGRLZ0,4009
30
30
  rclone_api/s3/basic_ops.py,sha256=hK3366xhVEzEcjz9Gk_8lFx6MRceAk72cax6mUrr6ko,2104
31
- rclone_api/s3/chunk_file.py,sha256=2YLWPzQnVVtdBUOlZkLATcRcQGE18cI2TJZzfj4LAoc,4402
32
- rclone_api/s3/chunk_types.py,sha256=2n9U1BZ_5mcpoLLbSqkhvzgKj814jtSMnih9CWKcChU,10592
31
+ rclone_api/s3/chunk_file.py,sha256=lZh4oCEo87jn5oEWHzK786_G9Y2RMmJz6cuL-ypAIT0,4402
32
+ rclone_api/s3/chunk_types.py,sha256=NOdMz9lvoOi2DzNIaIZVY4-nGa-00P6eLSsPMak0gh8,8498
33
33
  rclone_api/s3/create.py,sha256=wgfkapv_j904CfKuWyiBIWJVxfAx_ftemFSUV14aT68,3149
34
- rclone_api/s3/types.py,sha256=ZUw9s164wljCEMTS4CoHXNzFIhYJEgKD6NDAu-RXyr8,1551
35
- rclone_api/s3/upload_file_multipart.py,sha256=aDaLF2FWSvU_jurxiFphGZBncvkPqWN47VFNzYSSTWM,10906
36
- rclone_api-1.2.8.dist-info/LICENSE,sha256=b6pOoifSXiUaz_lDS84vWlG3fr4yUKwB8fzkrH9R8bQ,1064
37
- rclone_api-1.2.8.dist-info/METADATA,sha256=eWhq_lLs9m_Vp01kxODo85kaTye0kNaizIDMFXeIlmY,4536
38
- rclone_api-1.2.8.dist-info/WHEEL,sha256=rF4EZyR2XVS6irmOHQIJx2SUqXLZKRMUrjsg8UwN-XQ,109
39
- rclone_api-1.2.8.dist-info/entry_points.txt,sha256=TV8kwP3FRzYwUEr0RLC7aJh0W03SAefIJNXTJ-FdMIQ,200
40
- rclone_api-1.2.8.dist-info/top_level.txt,sha256=EvZ7uuruUpe9RiUyEp25d1Keq7PWYNT0O_-mr8FCG5g,11
41
- rclone_api-1.2.8.dist-info/RECORD,,
34
+ rclone_api/s3/types.py,sha256=FkUNNAk8fjSbLhDA45YgNQk9BTjmJZHT3fExiCKcDt0,1591
35
+ rclone_api/s3/upload_file_multipart.py,sha256=scsGOxMpg2j9qK2oX_Xe4vHI6u0IHX0xtp9mHnt7Ksc,11113
36
+ rclone_api-1.2.9.dist-info/LICENSE,sha256=b6pOoifSXiUaz_lDS84vWlG3fr4yUKwB8fzkrH9R8bQ,1064
37
+ rclone_api-1.2.9.dist-info/METADATA,sha256=mSCgGU2Hwr87w4wlc8ZxlqG4oJ1F0t49olLS6I51wDc,4536
38
+ rclone_api-1.2.9.dist-info/WHEEL,sha256=rF4EZyR2XVS6irmOHQIJx2SUqXLZKRMUrjsg8UwN-XQ,109
39
+ rclone_api-1.2.9.dist-info/entry_points.txt,sha256=TV8kwP3FRzYwUEr0RLC7aJh0W03SAefIJNXTJ-FdMIQ,200
40
+ rclone_api-1.2.9.dist-info/top_level.txt,sha256=EvZ7uuruUpe9RiUyEp25d1Keq7PWYNT0O_-mr8FCG5g,11
41
+ rclone_api-1.2.9.dist-info/RECORD,,