rclone-api 1.5.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. rclone_api/__init__.py +951 -0
  2. rclone_api/assets/example.txt +1 -0
  3. rclone_api/cli.py +15 -0
  4. rclone_api/cmd/analyze.py +51 -0
  5. rclone_api/cmd/copy_large_s3.py +111 -0
  6. rclone_api/cmd/copy_large_s3_finish.py +81 -0
  7. rclone_api/cmd/list_files.py +27 -0
  8. rclone_api/cmd/save_to_db.py +77 -0
  9. rclone_api/completed_process.py +60 -0
  10. rclone_api/config.py +87 -0
  11. rclone_api/convert.py +31 -0
  12. rclone_api/db/__init__.py +3 -0
  13. rclone_api/db/db.py +277 -0
  14. rclone_api/db/models.py +57 -0
  15. rclone_api/deprecated.py +24 -0
  16. rclone_api/detail/copy_file_parts_resumable.py +42 -0
  17. rclone_api/detail/walk.py +116 -0
  18. rclone_api/diff.py +164 -0
  19. rclone_api/dir.py +113 -0
  20. rclone_api/dir_listing.py +66 -0
  21. rclone_api/exec.py +40 -0
  22. rclone_api/experimental/flags.py +89 -0
  23. rclone_api/experimental/flags_base.py +58 -0
  24. rclone_api/file.py +205 -0
  25. rclone_api/file_item.py +68 -0
  26. rclone_api/file_part.py +198 -0
  27. rclone_api/file_stream.py +52 -0
  28. rclone_api/filelist.py +30 -0
  29. rclone_api/group_files.py +256 -0
  30. rclone_api/http_server.py +244 -0
  31. rclone_api/install.py +95 -0
  32. rclone_api/log.py +44 -0
  33. rclone_api/mount.py +55 -0
  34. rclone_api/mount_util.py +247 -0
  35. rclone_api/process.py +187 -0
  36. rclone_api/rclone_impl.py +1285 -0
  37. rclone_api/remote.py +21 -0
  38. rclone_api/rpath.py +102 -0
  39. rclone_api/s3/api.py +109 -0
  40. rclone_api/s3/basic_ops.py +61 -0
  41. rclone_api/s3/chunk_task.py +187 -0
  42. rclone_api/s3/create.py +107 -0
  43. rclone_api/s3/multipart/file_info.py +7 -0
  44. rclone_api/s3/multipart/finished_piece.py +69 -0
  45. rclone_api/s3/multipart/info_json.py +239 -0
  46. rclone_api/s3/multipart/merge_state.py +147 -0
  47. rclone_api/s3/multipart/upload_info.py +62 -0
  48. rclone_api/s3/multipart/upload_parts_inline.py +356 -0
  49. rclone_api/s3/multipart/upload_parts_resumable.py +304 -0
  50. rclone_api/s3/multipart/upload_parts_server_side_merge.py +546 -0
  51. rclone_api/s3/multipart/upload_state.py +165 -0
  52. rclone_api/s3/types.py +67 -0
  53. rclone_api/scan_missing_folders.py +153 -0
  54. rclone_api/types.py +402 -0
  55. rclone_api/util.py +324 -0
  56. rclone_api-1.5.8.dist-info/LICENSE +21 -0
  57. rclone_api-1.5.8.dist-info/METADATA +969 -0
  58. rclone_api-1.5.8.dist-info/RECORD +61 -0
  59. rclone_api-1.5.8.dist-info/WHEEL +5 -0
  60. rclone_api-1.5.8.dist-info/entry_points.txt +5 -0
  61. rclone_api-1.5.8.dist-info/top_level.txt +1 -0
@@ -0,0 +1,244 @@
1
+ """
2
+ Unit test file for testing rclone mount functionality.
3
+ """
4
+
5
+ import tempfile
6
+ import time
7
+ import warnings
8
+ from concurrent.futures import Future, ThreadPoolExecutor
9
+ from pathlib import Path
10
+ from threading import Semaphore
11
+ from typing import Any
12
+
13
+ import httpx
14
+
15
+ from rclone_api.file_part import FilePart
16
+ from rclone_api.process import Process
17
+ from rclone_api.types import Range, SizeSuffix, get_chunk_tmpdir
18
+
19
+ _TIMEOUT = 10 * 60 # 10 minutes
20
+
21
+
22
+ _range = range
23
+
24
+
25
+ class HttpServer:
26
+ """HTTP server configuration."""
27
+
28
+ def __init__(self, url: str, subpath: str, process: Process) -> None:
29
+ self.url = url
30
+ self.subpath = subpath
31
+ self.process: Process | None = process
32
+
33
+ def _get_file_url(self, path: str | Path) -> str:
34
+ # if self.subpath == "":
35
+ path = Path(path).as_posix()
36
+ return f"{self.url}/{path}"
37
+ # return f"{self.url}/{self.subpath}/{path}"
38
+
39
+ def get_fetcher(self, path: str, n_threads: int = 16) -> "HttpFetcher":
40
+ return HttpFetcher(self, path, n_threads=n_threads)
41
+
42
+ def get(self, path: str, range: Range | None = None) -> bytes | Exception:
43
+ """Get bytes from the server."""
44
+ with tempfile.TemporaryFile() as file:
45
+ self.download(path, Path(file.name), range)
46
+ file.seek(0)
47
+ return file.read()
48
+
49
+ def size(self, path: str) -> int | Exception:
50
+ """Get size of the file from the server."""
51
+ try:
52
+ assert self.process is not None
53
+ # response = httpx.head(f"{self.url}/{path}")
54
+ url = self._get_file_url(path)
55
+ response = httpx.head(url)
56
+ response.raise_for_status()
57
+ size = int(response.headers["Content-Length"])
58
+ return size
59
+ except Exception as e:
60
+ warnings.warn(f"Failed to get size of {self.url}/{path}: {e}")
61
+ return e
62
+
63
+ def download(
64
+ self, path: str, dst: Path, range: Range | None = None
65
+ ) -> Path | Exception:
66
+ """Get bytes from the server."""
67
+
68
+ def task() -> Path | Exception:
69
+
70
+ if not dst.parent.exists():
71
+ dst.parent.mkdir(parents=True, exist_ok=True)
72
+ headers: dict[str, str] = {}
73
+ if range:
74
+ headers.update(range.to_header())
75
+ url = self._get_file_url(path)
76
+ try:
77
+ with httpx.stream(
78
+ "GET", url, headers=headers, timeout=_TIMEOUT
79
+ ) as response:
80
+ response.raise_for_status()
81
+ with open(dst, "wb") as file:
82
+ for chunk in response.iter_bytes(chunk_size=8192):
83
+ if chunk:
84
+ file.write(chunk)
85
+ else:
86
+ assert response.is_closed
87
+ # print(f"Downloaded bytes {start}-{end} to {dst}")
88
+ if range:
89
+ length = range.end - range.start
90
+ print(
91
+ f"Downloaded bytes starting at {range.start} with size {length} to {dst}"
92
+ )
93
+ else:
94
+ size = dst.stat().st_size
95
+ print(f"Downloaded {size} bytes to {dst}")
96
+ return dst
97
+ except Exception as e:
98
+ warnings.warn(f"Failed to download {url} to {dst}: {e}")
99
+ return e
100
+
101
+ retries = 3
102
+ for i in _range(retries):
103
+ out = task()
104
+ if not isinstance(out, Exception):
105
+ return out
106
+ warnings.warn(f"Failed to download {path} to {dst}: {out}, retrying ({i})")
107
+ time.sleep(10)
108
+ else:
109
+ return Exception(f"Failed to download {path} to {dst}")
110
+
111
+ def download_multi_threaded(
112
+ self,
113
+ src_path: str,
114
+ dst_path: Path,
115
+ chunk_size: int = 32 * 1024 * 1024,
116
+ n_threads: int = 16,
117
+ range: Range | None = None,
118
+ ) -> Path | Exception:
119
+ """Copy file from src to dst."""
120
+
121
+ finished: list[Path] = []
122
+ errors: list[Exception] = []
123
+
124
+ if range is None:
125
+ sz = self.size(src_path)
126
+ if isinstance(sz, Exception):
127
+ return sz
128
+ range = Range(0, sz)
129
+
130
+ with ThreadPoolExecutor(max_workers=n_threads) as executor:
131
+ try:
132
+ futures: list[Future[Path | Exception]] = []
133
+ start: int
134
+ for start in _range(
135
+ range.start.as_int(), range.end.as_int(), chunk_size
136
+ ):
137
+ end = min(
138
+ SizeSuffix(start + chunk_size).as_int(), range.end.as_int()
139
+ )
140
+ r = Range(start=start, end=end)
141
+
142
+ def task(r: Range = r) -> Path | Exception:
143
+ dst = dst_path.with_suffix(f".{r.start}")
144
+ out = self.download(src_path, dst, r)
145
+ if isinstance(out, Exception):
146
+ warnings.warn(f"Failed to download chunked: {out}")
147
+ return out
148
+
149
+ fut = executor.submit(task, r)
150
+ futures.append(fut)
151
+ for fut in futures:
152
+ result = fut.result()
153
+ if isinstance(result, Exception):
154
+ errors.append(result)
155
+ else:
156
+ finished.append(result)
157
+ if errors:
158
+ for finished_file in finished:
159
+ try:
160
+ finished_file.unlink()
161
+ except Exception as e:
162
+ warnings.warn(f"Failed to delete file {finished_file}: {e}")
163
+ return Exception(f"Failed to download chunked: {errors}")
164
+
165
+ if not dst_path.parent.exists():
166
+ dst_path.parent.mkdir(parents=True, exist_ok=True)
167
+
168
+ count = 0
169
+ with open(dst_path, "wb") as file:
170
+ for f in finished:
171
+ print(f"Appending {f} to {dst_path}")
172
+ with open(f, "rb") as part:
173
+ # chunk = part.read(8192 * 4)
174
+ while chunk := part.read(8192 * 4):
175
+ if not chunk:
176
+ break
177
+ count += len(chunk)
178
+ file.write(chunk)
179
+ print(f"Removing {f}")
180
+ f.unlink()
181
+ # print(f"Downloaded {count} bytes to {dst_path}")
182
+ return dst_path
183
+ except Exception as e:
184
+ warnings.warn(f"Failed to copy chunked: {e}")
185
+ for f in finished:
186
+ try:
187
+ if f.exists():
188
+ f.unlink()
189
+ except Exception as ee:
190
+ warnings.warn(f"Failed to delete file {f}: {ee}")
191
+ return e
192
+
193
+ def __enter__(self) -> "HttpServer":
194
+ return self
195
+
196
+ def __exit__(self, exc_type, exc_value, traceback) -> None:
197
+ self.shutdown()
198
+
199
+ def shutdown(self) -> None:
200
+ """Shutdown the server."""
201
+ if self.process:
202
+ self.process.terminate()
203
+ if self.process.stdout:
204
+ self.process.stdout.close()
205
+ if self.process.stderr:
206
+ self.process.stderr.close()
207
+
208
+
209
+ class HttpFetcher:
210
+ def __init__(self, server: "HttpServer", path: str, n_threads: int) -> None:
211
+ self.server = server
212
+ self.path = path
213
+ self.executor = ThreadPoolExecutor(max_workers=n_threads)
214
+ # Semaphore throttles the number of concurrent fetches
215
+ # TODO this is kind of a hack.
216
+ self.semaphore = Semaphore(n_threads)
217
+
218
+ def bytes_fetcher(
219
+ self, offset: int | SizeSuffix, size: int | SizeSuffix, extra: Any
220
+ ) -> Future[FilePart]:
221
+ if isinstance(offset, SizeSuffix):
222
+ offset = offset.as_int()
223
+ if isinstance(size, SizeSuffix):
224
+ size = size.as_int()
225
+
226
+ def task() -> FilePart:
227
+ from rclone_api.util import random_str
228
+
229
+ try:
230
+ range = Range(offset, offset + size)
231
+ dst = get_chunk_tmpdir() / f"{random_str(12)}.chunk"
232
+ out = self.server.download(self.path, dst, range)
233
+ if isinstance(out, Exception):
234
+ raise out
235
+ return FilePart(payload=dst, extra=extra)
236
+ finally:
237
+ self.semaphore.release()
238
+
239
+ self.semaphore.acquire()
240
+ fut = self.executor.submit(task)
241
+ return fut
242
+
243
+ def shutdown(self) -> None:
244
+ self.executor.shutdown(wait=True)
rclone_api/install.py ADDED
@@ -0,0 +1,95 @@
1
+ import os
2
+ import platform
3
+ import shutil
4
+ from pathlib import Path
5
+ from tempfile import TemporaryDirectory
6
+ from warnings import warn
7
+
8
+ from download import download
9
+
10
+ URL_WINDOWS = "https://downloads.rclone.org/rclone-current-windows-amd64.zip"
11
+ URL_LINUX = "https://downloads.rclone.org/rclone-current-linux-amd64.zip"
12
+ URL_MACOS_ARM = "https://downloads.rclone.org/rclone-current-osx-arm64.zip"
13
+ URL_MACOS_X86 = "https://downloads.rclone.org/rclone-current-osx-amd64.zip"
14
+
15
+
16
+ def rclone_download_url() -> str:
17
+ system = platform.system()
18
+ arch = platform.machine()
19
+ if system == "Windows":
20
+ assert "arm" not in arch, f"Unsupported arch: {arch}"
21
+ return URL_WINDOWS
22
+ elif system == "Linux":
23
+ assert "arm" not in arch, f"Unsupported arch: {arch}"
24
+ return URL_LINUX
25
+ elif system == "Darwin":
26
+ if "x86" in arch:
27
+ return URL_MACOS_X86
28
+ elif "arm" in arch:
29
+ return URL_MACOS_ARM
30
+ else:
31
+ raise Exception(f"Unsupported arch: {arch}")
32
+ else:
33
+ raise Exception("Unsupported system")
34
+
35
+
36
+ def _remove_signed_binary_requirements(out: Path) -> None:
37
+ if platform.system() == "Windows":
38
+ return
39
+ # mac os
40
+ if platform.system() == "Darwin":
41
+ # remove signed binary requirements
42
+ #
43
+ # xattr -d com.apple.quarantine rclone
44
+ import subprocess
45
+
46
+ subprocess.run(
47
+ ["xattr", "-d", "com.apple.quarantine", str(out)],
48
+ capture_output=True,
49
+ check=False,
50
+ )
51
+ return
52
+
53
+
54
+ def _make_executable(out: Path) -> None:
55
+ if platform.system() == "Windows":
56
+ return
57
+ # linux and mac os
58
+ os.chmod(out, 0o755)
59
+
60
+
61
+ def _find_rclone_exe(start: Path) -> Path | None:
62
+ for root, dirs, files in os.walk(start):
63
+ if platform.system() == "Windows":
64
+ if "rclone.exe" in files:
65
+ return Path(root) / "rclone.exe"
66
+ else:
67
+ if "rclone" in files:
68
+ return Path(root) / "rclone"
69
+ return None
70
+
71
+
72
+ def rclone_download(out: Path, replace=False) -> Exception | None:
73
+ if out.exists() and not replace:
74
+ return None
75
+ try:
76
+ url = rclone_download_url()
77
+ with TemporaryDirectory() as tmpdir:
78
+ tmp = Path(tmpdir)
79
+ download(url, tmp, kind="zip", replace=True)
80
+ exe = _find_rclone_exe(tmp)
81
+ if exe is None:
82
+ raise FileNotFoundError("rclone executable not found")
83
+ if os.path.exists(out):
84
+ os.remove(out)
85
+ out.parent.mkdir(parents=True, exist_ok=True)
86
+ shutil.move(exe, out)
87
+ _remove_signed_binary_requirements(out)
88
+ _make_executable(out)
89
+ return None
90
+ except Exception as e:
91
+ import traceback
92
+
93
+ stacktrace = traceback.format_exc()
94
+ warn(f"Failed to download rclone: {e}\n{stacktrace}")
95
+ return e
rclone_api/log.py ADDED
@@ -0,0 +1,44 @@
1
+ import logging
2
+ import sys
3
+
4
+ _INITIALISED = False
5
+
6
+
7
+ def setup_default_logging():
8
+ """Set up default logging configuration if none exists."""
9
+ global _INITIALISED
10
+ if _INITIALISED:
11
+ return
12
+ if not logging.root.handlers:
13
+ logging.basicConfig(
14
+ level=logging.INFO,
15
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
16
+ handlers=[
17
+ logging.StreamHandler(sys.stdout),
18
+ # Uncomment to add file logging
19
+ # logging.FileHandler('rclone_api.log')
20
+ ],
21
+ )
22
+
23
+
24
+ def configure_logging(level=logging.INFO, log_file=None):
25
+ """Configure logging for the rclone_api package.
26
+
27
+ Args:
28
+ level: The logging level (default: logging.INFO)
29
+ log_file: Optional path to a log file
30
+ """
31
+ handlers = [logging.StreamHandler(sys.stdout)]
32
+ if log_file:
33
+ handlers.append(logging.FileHandler(log_file))
34
+
35
+ logging.basicConfig(
36
+ level=level,
37
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
38
+ handlers=handlers,
39
+ force=True, # Override any existing configuration
40
+ )
41
+
42
+
43
+ # Call setup_default_logging when this module is imported
44
+ setup_default_logging()
rclone_api/mount.py ADDED
@@ -0,0 +1,55 @@
1
+ from dataclasses import dataclass
2
+ from pathlib import Path
3
+
4
+ from rclone_api.process import Process
5
+
6
+
7
+ @dataclass
8
+ class Mount:
9
+ """Mount information."""
10
+
11
+ src: str
12
+ mount_path: Path
13
+ process: Process
14
+ read_only: bool
15
+ cache_dir: Path | None = None
16
+ cache_dir_delete_on_exit: bool | None = None
17
+ _closed: bool = False
18
+
19
+ def __post_init__(self):
20
+ from rclone_api.mount_util import add_mount_for_gc, wait_for_mount
21
+
22
+ assert isinstance(self.mount_path, Path)
23
+ assert self.process is not None
24
+ wait_for_mount(self)
25
+ add_mount_for_gc(self)
26
+
27
+ def close(self, wait=True) -> None:
28
+ """Clean up the mount."""
29
+ from rclone_api.mount_util import (
30
+ cache_dir_delete_on_exit,
31
+ clean_mount,
32
+ remove_mount_for_gc,
33
+ )
34
+
35
+ if self._closed:
36
+ return
37
+ self._closed = True
38
+ self.process.terminate()
39
+ clean_mount(self, verbose=False, wait=wait)
40
+ if self.cache_dir and self.cache_dir_delete_on_exit:
41
+ cache_dir_delete_on_exit(self.cache_dir)
42
+ remove_mount_for_gc(self)
43
+
44
+ def __enter__(self) -> "Mount":
45
+ return self
46
+
47
+ def __exit__(self, exc_type, exc_value, traceback) -> None:
48
+ self.close(wait=True)
49
+
50
+ def __del__(self):
51
+ self.close(wait=False)
52
+
53
+ # make this a hashable object
54
+ def __hash__(self):
55
+ return hash(self.mount_path)
@@ -0,0 +1,247 @@
1
+ import atexit
2
+ import os
3
+ import platform
4
+ import shutil
5
+ import subprocess
6
+ import time
7
+ import warnings
8
+ import weakref
9
+ from concurrent.futures import ThreadPoolExecutor
10
+ from pathlib import Path
11
+
12
+ from rclone_api.mount import Mount
13
+ from rclone_api.process import Process
14
+
15
+ _SYSTEM = platform.system() # "Linux", "Darwin", "Windows", etc.
16
+
17
+
18
+ _MOUNTS_FOR_GC: weakref.WeakSet = weakref.WeakSet()
19
+
20
+
21
+ def _cleanup_mounts() -> None:
22
+ with ThreadPoolExecutor() as executor:
23
+ mount: Mount
24
+ for mount in _MOUNTS_FOR_GC:
25
+ executor.submit(mount.close)
26
+
27
+
28
+ def _run_command(cmd: str, verbose: bool) -> int:
29
+ """Run a shell command and print its output if verbose is True."""
30
+ if verbose:
31
+ print(f"Executing: {cmd}")
32
+ try:
33
+ result = subprocess.run(
34
+ cmd, shell=True, capture_output=True, text=True, check=False
35
+ )
36
+ if result.returncode != 0 and verbose:
37
+ print(f"Command failed: {cmd}\nStdErr: {result.stderr.strip()}")
38
+ return result.returncode
39
+ except Exception as e:
40
+ warnings.warn(f"Error running command '{cmd}': {e}")
41
+ return -1
42
+
43
+
44
+ atexit.register(_cleanup_mounts)
45
+
46
+
47
+ def cache_dir_delete_on_exit(cache_dir: Path) -> None:
48
+ if cache_dir.exists():
49
+ try:
50
+ shutil.rmtree(cache_dir)
51
+ except Exception as e:
52
+ warnings.warn(f"Error removing cache directory {cache_dir}: {e}")
53
+
54
+
55
+ def add_mount_for_gc(mount: Mount) -> None:
56
+ # weak reference to avoid circular references
57
+ _MOUNTS_FOR_GC.add(mount)
58
+
59
+
60
+ def remove_mount_for_gc(mount: Mount) -> None:
61
+ _MOUNTS_FOR_GC.discard(mount)
62
+
63
+
64
+ def prepare_mount(outdir: Path, verbose: bool) -> None:
65
+ if _SYSTEM == "Windows":
66
+ # Windows -> Must create parent directories only if they don't exist
67
+ if verbose:
68
+ print(f"Creating parent directories for {outdir}")
69
+ outdir.parent.mkdir(parents=True, exist_ok=True)
70
+ else:
71
+ # Linux -> Must create parent directories and the directory itself
72
+ if verbose:
73
+ print(f"Creating directories for {outdir}")
74
+ outdir.mkdir(parents=True, exist_ok=True)
75
+
76
+
77
+ def wait_for_mount(
78
+ mount: Mount,
79
+ timeout: int = 20,
80
+ post_mount_delay: int = 5,
81
+ poll_interval: float = 1.0,
82
+ check_mount_flag: bool = False,
83
+ ) -> None:
84
+ """
85
+ Wait for a mount point to become available by checking if the directory exists,
86
+ optionally verifying that it is a mount point, and confirming that it contains files.
87
+ This function periodically polls for the mount status, ensures the mount process
88
+ is still running, and applies an extra delay after detecting content for stabilization.
89
+
90
+ Args:
91
+ src (Path): The mount point directory to check.
92
+ mount_process (Any): A Process instance handling the mount (must be an instance of Process).
93
+ timeout (int): Maximum time in seconds to wait for the mount to become available.
94
+ post_mount_delay (int): Additional seconds to wait after detecting files.
95
+ poll_interval (float): Seconds between each poll iteration.
96
+ check_mount_flag (bool): If True, verifies that the path is recognized as a mount point.
97
+
98
+ Raises:
99
+ subprocess.CalledProcessError: If the mount_process exits unexpectedly.
100
+ TimeoutError: If the mount is not available within the timeout period.
101
+ TypeError: If mount_process is not an instance of Process.
102
+ """
103
+
104
+ mount_process = mount.process
105
+ src = mount.mount_path
106
+
107
+ if not isinstance(mount_process, Process):
108
+ raise TypeError("mount_process must be an instance of Process")
109
+
110
+ expire_time = time.time() + timeout
111
+ last_error = None
112
+
113
+ while time.time() < expire_time:
114
+ # Check if the mount process has terminated unexpectedly.
115
+ rtn = mount_process.poll()
116
+ if rtn is not None:
117
+ cmd_str = subprocess.list2cmdline(mount_process.cmd)
118
+ print(f"Mount process terminated unexpectedly: {cmd_str}")
119
+ raise subprocess.CalledProcessError(rtn, cmd_str)
120
+
121
+ # Check if the mount path exists.
122
+ if src.exists():
123
+ # Optionally check if path is a mount point.
124
+ if check_mount_flag:
125
+ try:
126
+ if not os.path.ismount(str(src)):
127
+ print(
128
+ f"{src} exists but is not recognized as a mount point yet."
129
+ )
130
+ time.sleep(poll_interval)
131
+ continue
132
+ except Exception as e:
133
+ print(f"Could not verify mount point status for {src}: {e}")
134
+
135
+ try:
136
+ # Check for at least one entry in the directory.
137
+ if any(src.iterdir()):
138
+ print(
139
+ f"Mount point {src} appears available with files. Waiting {post_mount_delay} seconds for stabilization."
140
+ )
141
+ time.sleep(post_mount_delay)
142
+ return
143
+ else:
144
+ print(f"Mount point {src} is empty. Waiting for files to appear.")
145
+ except Exception as e:
146
+ last_error = e
147
+ print(f"Error accessing {src}: {e}")
148
+ else:
149
+ print(f"Mount point {src} does not exist yet.")
150
+
151
+ time.sleep(poll_interval)
152
+
153
+ # raise TimeoutError(
154
+ # f"Mount point {src} did not become available within {timeout} seconds. Last error: {last_error}"
155
+ # )
156
+ if last_error is not None:
157
+ raise last_error
158
+
159
+
160
+ def clean_mount(mount: Mount | Path, verbose: bool = False, wait=True) -> None:
161
+ """
162
+ Clean up a mount path across Linux, macOS, and Windows.
163
+
164
+ The function attempts to unmount the mount at mount_path, then, if the
165
+ directory is empty, removes it. On Linux it uses 'fusermount -u' (for FUSE mounts)
166
+ and 'umount'. On macOS it uses 'umount' (and optionally 'diskutil unmount'),
167
+ while on Windows it attempts to remove the mount point via 'mountvol /D'.
168
+ """
169
+
170
+ def verbose_print(msg: str):
171
+ if verbose:
172
+ print(msg)
173
+
174
+ proc = mount.process if isinstance(mount, Mount) else None
175
+ if proc is not None and proc.poll() is None:
176
+ verbose_print(f"Terminating mount process {proc.pid}")
177
+ proc.kill()
178
+
179
+ # Check if the mount path exists; if an OSError occurs, assume it exists.
180
+ mount_path = mount.mount_path if isinstance(mount, Mount) else mount
181
+ try:
182
+ mount_exists = mount_path.exists()
183
+ except OSError:
184
+ # warnings.warn(f"Error checking {mount_path}: {e}")
185
+ mount_exists = True
186
+
187
+ # Give the system a moment (if unmount is in progress, etc.)
188
+ if wait:
189
+ time.sleep(2)
190
+
191
+ if not mount_exists:
192
+ verbose_print(f"{mount_path} does not exist; nothing to clean up.")
193
+ return
194
+
195
+ verbose_print(f"{mount_path} still exists, attempting to unmount and remove.")
196
+
197
+ # Platform-specific unmount procedures
198
+ if _SYSTEM == "Linux":
199
+ # Try FUSE unmount first (if applicable), then the regular umount.
200
+ _run_command(f"fusermount -u {mount_path}", verbose)
201
+ _run_command(f"umount {mount_path}", verbose)
202
+ elif _SYSTEM == "Darwin":
203
+ # On macOS, use umount; optionally try diskutil for stubborn mounts.
204
+ _run_command(f"umount {mount_path}", verbose)
205
+ # Optionally: uncomment the next line if diskutil unmount is preferred.
206
+ # _run_command(f"diskutil unmount {mount_path}", verbose)
207
+ elif _SYSTEM == "Windows":
208
+ # On Windows, remove the mount point using mountvol.
209
+ _run_command(f"mountvol {mount_path} /D", verbose)
210
+ # If that does not work, try to remove the directory directly.
211
+ try:
212
+ mount_path.rmdir()
213
+ if verbose:
214
+ print(f"Successfully removed mount directory {mount_path}")
215
+ except Exception:
216
+ # warnings.warn(f"Failed to remove mount {mount_path}: {e}")
217
+ pass
218
+ else:
219
+ warnings.warn(f"Unsupported platform: {_SYSTEM}")
220
+
221
+ # Allow some time for the unmount commands to take effect.
222
+ if wait:
223
+ time.sleep(2)
224
+
225
+ # Re-check if the mount path still exists.
226
+ try:
227
+ still_exists = mount_path.exists()
228
+ except OSError as e:
229
+ warnings.warn(f"Error re-checking {mount_path}: {e}")
230
+ still_exists = True
231
+
232
+ if still_exists:
233
+ verbose_print(f"{mount_path} still exists after unmount attempt.")
234
+ # Attempt to remove the directory if it is empty.
235
+ try:
236
+ # Only remove if the directory is empty.
237
+ if not any(mount_path.iterdir()):
238
+ mount_path.rmdir()
239
+ if verbose:
240
+ verbose_print(f"Removed empty mount directory {mount_path}")
241
+ else:
242
+ warnings.warn(f"{mount_path} is not empty; cannot remove.")
243
+ raise OSError(f"{mount_path} is not empty")
244
+ except Exception as e:
245
+ warnings.warn(f"Failed during cleanup of {mount_path}: {e}")
246
+ else:
247
+ verbose_print(f"{mount_path} successfully cleaned up.")