redundanet 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- redundanet/__init__.py +16 -0
- redundanet/__main__.py +6 -0
- redundanet/auth/__init__.py +9 -0
- redundanet/auth/gpg.py +323 -0
- redundanet/auth/keyserver.py +219 -0
- redundanet/cli/__init__.py +5 -0
- redundanet/cli/main.py +247 -0
- redundanet/cli/network.py +194 -0
- redundanet/cli/node.py +305 -0
- redundanet/cli/storage.py +267 -0
- redundanet/core/__init__.py +31 -0
- redundanet/core/config.py +200 -0
- redundanet/core/exceptions.py +84 -0
- redundanet/core/manifest.py +325 -0
- redundanet/core/node.py +135 -0
- redundanet/network/__init__.py +11 -0
- redundanet/network/discovery.py +218 -0
- redundanet/network/dns.py +180 -0
- redundanet/network/validation.py +279 -0
- redundanet/storage/__init__.py +13 -0
- redundanet/storage/client.py +306 -0
- redundanet/storage/furl.py +196 -0
- redundanet/storage/introducer.py +175 -0
- redundanet/storage/storage.py +195 -0
- redundanet/utils/__init__.py +15 -0
- redundanet/utils/files.py +165 -0
- redundanet/utils/logging.py +93 -0
- redundanet/utils/process.py +226 -0
- redundanet/vpn/__init__.py +12 -0
- redundanet/vpn/keys.py +173 -0
- redundanet/vpn/mesh.py +201 -0
- redundanet/vpn/tinc.py +323 -0
- redundanet-2.0.0.dist-info/LICENSE +674 -0
- redundanet-2.0.0.dist-info/METADATA +265 -0
- redundanet-2.0.0.dist-info/RECORD +37 -0
- redundanet-2.0.0.dist-info/WHEEL +4 -0
- redundanet-2.0.0.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,279 @@
|
|
|
1
|
+
"""Network validation for RedundaNet."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from typing import TYPE_CHECKING
|
|
7
|
+
|
|
8
|
+
from redundanet.utils.logging import get_logger
|
|
9
|
+
from redundanet.utils.process import run_command
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from redundanet.core.manifest import Manifest
|
|
13
|
+
|
|
14
|
+
logger = get_logger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@dataclass
|
|
18
|
+
class ValidationResult:
|
|
19
|
+
"""Result of a validation check."""
|
|
20
|
+
|
|
21
|
+
name: str
|
|
22
|
+
passed: bool
|
|
23
|
+
message: str
|
|
24
|
+
details: dict[str, object] = field(default_factory=dict)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass
|
|
28
|
+
class NetworkValidationReport:
|
|
29
|
+
"""Complete network validation report."""
|
|
30
|
+
|
|
31
|
+
checks: list[ValidationResult] = field(default_factory=list)
|
|
32
|
+
|
|
33
|
+
@property
|
|
34
|
+
def passed(self) -> bool:
|
|
35
|
+
"""Check if all validations passed."""
|
|
36
|
+
return all(c.passed for c in self.checks)
|
|
37
|
+
|
|
38
|
+
@property
|
|
39
|
+
def failed_checks(self) -> list[ValidationResult]:
|
|
40
|
+
"""Get all failed checks."""
|
|
41
|
+
return [c for c in self.checks if not c.passed]
|
|
42
|
+
|
|
43
|
+
def add_check(self, result: ValidationResult) -> None:
|
|
44
|
+
"""Add a validation result."""
|
|
45
|
+
self.checks.append(result)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class NetworkValidator:
|
|
49
|
+
"""Validates network configuration and connectivity."""
|
|
50
|
+
|
|
51
|
+
def __init__(
|
|
52
|
+
self,
|
|
53
|
+
manifest: Manifest,
|
|
54
|
+
local_node_name: str,
|
|
55
|
+
vpn_interface: str = "tinc0",
|
|
56
|
+
) -> None:
|
|
57
|
+
"""Initialize network validator.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
manifest: Network manifest
|
|
61
|
+
local_node_name: Name of the local node
|
|
62
|
+
vpn_interface: VPN interface name
|
|
63
|
+
"""
|
|
64
|
+
self.manifest = manifest
|
|
65
|
+
self.local_node_name = local_node_name
|
|
66
|
+
self.vpn_interface = vpn_interface
|
|
67
|
+
|
|
68
|
+
def validate_all(self) -> NetworkValidationReport:
|
|
69
|
+
"""Run all validation checks.
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
Complete validation report
|
|
73
|
+
"""
|
|
74
|
+
report = NetworkValidationReport()
|
|
75
|
+
|
|
76
|
+
# Run all checks
|
|
77
|
+
report.add_check(self.check_vpn_interface())
|
|
78
|
+
report.add_check(self.check_local_node_config())
|
|
79
|
+
report.add_check(self.check_manifest_validity())
|
|
80
|
+
report.add_check(self.check_peer_connectivity())
|
|
81
|
+
report.add_check(self.check_introducer_reachability())
|
|
82
|
+
|
|
83
|
+
return report
|
|
84
|
+
|
|
85
|
+
def check_vpn_interface(self) -> ValidationResult:
|
|
86
|
+
"""Check if the VPN interface is up and configured."""
|
|
87
|
+
result = run_command(f"ip link show {self.vpn_interface}", check=False)
|
|
88
|
+
|
|
89
|
+
if not result.success:
|
|
90
|
+
return ValidationResult(
|
|
91
|
+
name="vpn_interface",
|
|
92
|
+
passed=False,
|
|
93
|
+
message=f"VPN interface {self.vpn_interface} does not exist",
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
if "UP" not in result.stdout:
|
|
97
|
+
return ValidationResult(
|
|
98
|
+
name="vpn_interface",
|
|
99
|
+
passed=False,
|
|
100
|
+
message=f"VPN interface {self.vpn_interface} is not up",
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
# Get IP address
|
|
104
|
+
result = run_command(f"ip addr show {self.vpn_interface}", check=False)
|
|
105
|
+
ip_configured = "inet " in result.stdout
|
|
106
|
+
|
|
107
|
+
return ValidationResult(
|
|
108
|
+
name="vpn_interface",
|
|
109
|
+
passed=ip_configured,
|
|
110
|
+
message="VPN interface is up and configured"
|
|
111
|
+
if ip_configured
|
|
112
|
+
else "VPN interface has no IP",
|
|
113
|
+
details={"interface": self.vpn_interface},
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
def check_local_node_config(self) -> ValidationResult:
|
|
117
|
+
"""Check if the local node is properly configured in the manifest."""
|
|
118
|
+
node = self.manifest.get_node(self.local_node_name)
|
|
119
|
+
|
|
120
|
+
if not node:
|
|
121
|
+
return ValidationResult(
|
|
122
|
+
name="local_node_config",
|
|
123
|
+
passed=False,
|
|
124
|
+
message=f"Local node {self.local_node_name} not found in manifest",
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
issues = []
|
|
128
|
+
|
|
129
|
+
if not node.vpn_ip and not node.internal_ip:
|
|
130
|
+
issues.append("No VPN IP configured")
|
|
131
|
+
|
|
132
|
+
if not node.roles:
|
|
133
|
+
issues.append("No roles assigned")
|
|
134
|
+
|
|
135
|
+
if issues:
|
|
136
|
+
return ValidationResult(
|
|
137
|
+
name="local_node_config",
|
|
138
|
+
passed=False,
|
|
139
|
+
message=f"Local node configuration issues: {', '.join(issues)}",
|
|
140
|
+
details={"node_name": self.local_node_name, "issues": issues},
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
return ValidationResult(
|
|
144
|
+
name="local_node_config",
|
|
145
|
+
passed=True,
|
|
146
|
+
message="Local node properly configured",
|
|
147
|
+
details={"node_name": self.local_node_name, "roles": [r.value for r in node.roles]},
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
def check_manifest_validity(self) -> ValidationResult:
|
|
151
|
+
"""Check if the manifest is valid."""
|
|
152
|
+
errors = self.manifest.validate()
|
|
153
|
+
|
|
154
|
+
if errors:
|
|
155
|
+
return ValidationResult(
|
|
156
|
+
name="manifest_validity",
|
|
157
|
+
passed=False,
|
|
158
|
+
message=f"Manifest has {len(errors)} validation error(s)",
|
|
159
|
+
details={"errors": errors},
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
return ValidationResult(
|
|
163
|
+
name="manifest_validity",
|
|
164
|
+
passed=True,
|
|
165
|
+
message="Manifest is valid",
|
|
166
|
+
details={"node_count": len(self.manifest.nodes)},
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
def check_peer_connectivity(self) -> ValidationResult:
|
|
170
|
+
"""Check connectivity to peer nodes."""
|
|
171
|
+
peers = [n for n in self.manifest.nodes if n.name != self.local_node_name]
|
|
172
|
+
|
|
173
|
+
if not peers:
|
|
174
|
+
return ValidationResult(
|
|
175
|
+
name="peer_connectivity",
|
|
176
|
+
passed=True,
|
|
177
|
+
message="No peers to check",
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
reachable = 0
|
|
181
|
+
unreachable = []
|
|
182
|
+
|
|
183
|
+
for peer in peers:
|
|
184
|
+
ip = peer.vpn_ip or peer.internal_ip
|
|
185
|
+
result = run_command(f"ping -c 1 -W 2 {ip}", check=False)
|
|
186
|
+
if result.success:
|
|
187
|
+
reachable += 1
|
|
188
|
+
else:
|
|
189
|
+
unreachable.append(peer.name)
|
|
190
|
+
|
|
191
|
+
if len(unreachable) == len(peers):
|
|
192
|
+
return ValidationResult(
|
|
193
|
+
name="peer_connectivity",
|
|
194
|
+
passed=False,
|
|
195
|
+
message="Cannot reach any peers",
|
|
196
|
+
details={"unreachable": unreachable},
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
if unreachable:
|
|
200
|
+
return ValidationResult(
|
|
201
|
+
name="peer_connectivity",
|
|
202
|
+
passed=True, # Some peers reachable is OK
|
|
203
|
+
message=f"Reached {reachable}/{len(peers)} peers",
|
|
204
|
+
details={"reachable": reachable, "unreachable": unreachable},
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
return ValidationResult(
|
|
208
|
+
name="peer_connectivity",
|
|
209
|
+
passed=True,
|
|
210
|
+
message=f"All {reachable} peers reachable",
|
|
211
|
+
details={"reachable": reachable},
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
def check_introducer_reachability(self) -> ValidationResult:
|
|
215
|
+
"""Check if the introducer is reachable."""
|
|
216
|
+
introducers = self.manifest.get_introducers()
|
|
217
|
+
|
|
218
|
+
if not introducers:
|
|
219
|
+
return ValidationResult(
|
|
220
|
+
name="introducer_reachability",
|
|
221
|
+
passed=False,
|
|
222
|
+
message="No introducer configured in manifest",
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
# Try to reach the first introducer
|
|
226
|
+
introducer = introducers[0]
|
|
227
|
+
ip = introducer.vpn_ip or introducer.internal_ip
|
|
228
|
+
port = introducer.ports.tahoe_introducer
|
|
229
|
+
|
|
230
|
+
# Check if port is open
|
|
231
|
+
result = run_command(f"nc -z -w 5 {ip} {port}", check=False)
|
|
232
|
+
|
|
233
|
+
if result.success:
|
|
234
|
+
return ValidationResult(
|
|
235
|
+
name="introducer_reachability",
|
|
236
|
+
passed=True,
|
|
237
|
+
message=f"Introducer {introducer.name} is reachable",
|
|
238
|
+
details={"introducer": introducer.name, "address": f"{ip}:{port}"},
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
# Port not open, but maybe node is reachable
|
|
242
|
+
ping_result = run_command(f"ping -c 1 -W 2 {ip}", check=False)
|
|
243
|
+
|
|
244
|
+
if ping_result.success:
|
|
245
|
+
return ValidationResult(
|
|
246
|
+
name="introducer_reachability",
|
|
247
|
+
passed=False,
|
|
248
|
+
message=f"Introducer {introducer.name} reachable but port {port} closed",
|
|
249
|
+
details={"introducer": introducer.name, "address": f"{ip}:{port}"},
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
return ValidationResult(
|
|
253
|
+
name="introducer_reachability",
|
|
254
|
+
passed=False,
|
|
255
|
+
message=f"Introducer {introducer.name} is not reachable",
|
|
256
|
+
details={"introducer": introducer.name, "address": f"{ip}:{port}"},
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
def wait_for_vpn(self, timeout: float = 60.0, interval: float = 5.0) -> bool:
|
|
260
|
+
"""Wait for the VPN interface to come up.
|
|
261
|
+
|
|
262
|
+
Args:
|
|
263
|
+
timeout: Maximum wait time in seconds
|
|
264
|
+
interval: Check interval in seconds
|
|
265
|
+
|
|
266
|
+
Returns:
|
|
267
|
+
True if VPN came up within timeout
|
|
268
|
+
"""
|
|
269
|
+
import time
|
|
270
|
+
|
|
271
|
+
start = time.time()
|
|
272
|
+
|
|
273
|
+
while time.time() - start < timeout:
|
|
274
|
+
result = self.check_vpn_interface()
|
|
275
|
+
if result.passed:
|
|
276
|
+
return True
|
|
277
|
+
time.sleep(interval)
|
|
278
|
+
|
|
279
|
+
return False
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
"""Tahoe-LAFS storage module for RedundaNet."""
|
|
2
|
+
|
|
3
|
+
from redundanet.storage.client import TahoeClient
|
|
4
|
+
from redundanet.storage.furl import FURLManager
|
|
5
|
+
from redundanet.storage.introducer import TahoeIntroducer
|
|
6
|
+
from redundanet.storage.storage import TahoeStorage
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"TahoeClient",
|
|
10
|
+
"TahoeStorage",
|
|
11
|
+
"TahoeIntroducer",
|
|
12
|
+
"FURLManager",
|
|
13
|
+
]
|
|
@@ -0,0 +1,306 @@
|
|
|
1
|
+
"""Tahoe-LAFS client operations for RedundaNet."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
from jinja2 import Template
|
|
10
|
+
|
|
11
|
+
from redundanet.core.config import TahoeConfig
|
|
12
|
+
from redundanet.core.exceptions import StorageError
|
|
13
|
+
from redundanet.utils.files import ensure_dir, write_file
|
|
14
|
+
from redundanet.utils.logging import get_logger
|
|
15
|
+
from redundanet.utils.process import is_command_available, run_command
|
|
16
|
+
|
|
17
|
+
logger = get_logger(__name__)
|
|
18
|
+
|
|
19
|
+
TAHOE_CLIENT_CFG_TEMPLATE = """# Tahoe-LAFS client configuration
|
|
20
|
+
# Generated by RedundaNet
|
|
21
|
+
|
|
22
|
+
[node]
|
|
23
|
+
nickname = {{ nickname }}
|
|
24
|
+
web.port = tcp:{{ web_port }}:interface=127.0.0.1
|
|
25
|
+
tub.port = tcp:{{ tub_port }}
|
|
26
|
+
tub.location = {{ tub_location }}
|
|
27
|
+
|
|
28
|
+
[client]
|
|
29
|
+
introducer.furl = {{ introducer_furl }}
|
|
30
|
+
shares.needed = {{ shares_needed }}
|
|
31
|
+
shares.happy = {{ shares_happy }}
|
|
32
|
+
shares.total = {{ shares_total }}
|
|
33
|
+
|
|
34
|
+
[storage]
|
|
35
|
+
enabled = false
|
|
36
|
+
|
|
37
|
+
[helper]
|
|
38
|
+
enabled = false
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
@dataclass
|
|
43
|
+
class TahoeClientConfig:
|
|
44
|
+
"""Configuration for Tahoe-LAFS client."""
|
|
45
|
+
|
|
46
|
+
nickname: str
|
|
47
|
+
node_dir: Path
|
|
48
|
+
introducer_furl: str
|
|
49
|
+
web_port: int = 3456
|
|
50
|
+
tub_port: int = 34560
|
|
51
|
+
tub_location: str = "AUTO"
|
|
52
|
+
shares_needed: int = 3
|
|
53
|
+
shares_happy: int = 7
|
|
54
|
+
shares_total: int = 10
|
|
55
|
+
|
|
56
|
+
@classmethod
|
|
57
|
+
def from_tahoe_config(
|
|
58
|
+
cls,
|
|
59
|
+
nickname: str,
|
|
60
|
+
introducer_furl: str,
|
|
61
|
+
tahoe_config: TahoeConfig,
|
|
62
|
+
node_dir: Path | None = None,
|
|
63
|
+
) -> TahoeClientConfig:
|
|
64
|
+
"""Create client config from TahoeConfig."""
|
|
65
|
+
return cls(
|
|
66
|
+
nickname=nickname,
|
|
67
|
+
node_dir=node_dir or Path("/var/lib/tahoe-client"),
|
|
68
|
+
introducer_furl=introducer_furl,
|
|
69
|
+
shares_needed=tahoe_config.shares_needed,
|
|
70
|
+
shares_happy=tahoe_config.shares_happy,
|
|
71
|
+
shares_total=tahoe_config.shares_total,
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class TahoeClient:
|
|
76
|
+
"""Manages Tahoe-LAFS client node."""
|
|
77
|
+
|
|
78
|
+
def __init__(self, config: TahoeClientConfig) -> None:
|
|
79
|
+
self.config = config
|
|
80
|
+
self._tahoe_cfg = config.node_dir / "tahoe.cfg"
|
|
81
|
+
|
|
82
|
+
def ensure_tahoe_installed(self) -> bool:
|
|
83
|
+
"""Check if Tahoe-LAFS is installed."""
|
|
84
|
+
if not is_command_available("tahoe"):
|
|
85
|
+
raise StorageError("Tahoe-LAFS is not installed. Please install tahoe-lafs first.")
|
|
86
|
+
return True
|
|
87
|
+
|
|
88
|
+
def create_node(self) -> None:
|
|
89
|
+
"""Create a new Tahoe-LAFS client node."""
|
|
90
|
+
logger.info("Creating Tahoe client node", nickname=self.config.nickname)
|
|
91
|
+
|
|
92
|
+
self.ensure_tahoe_installed()
|
|
93
|
+
ensure_dir(self.config.node_dir, mode=0o700)
|
|
94
|
+
|
|
95
|
+
# Create the node using tahoe command
|
|
96
|
+
result = run_command(
|
|
97
|
+
f"tahoe create-client --basedir={self.config.node_dir} "
|
|
98
|
+
f"--nickname={self.config.nickname}",
|
|
99
|
+
check=False,
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
if not result.success and "already exists" not in result.stderr:
|
|
103
|
+
raise StorageError(f"Failed to create client node: {result.stderr}")
|
|
104
|
+
|
|
105
|
+
# Write custom configuration
|
|
106
|
+
self._write_config()
|
|
107
|
+
|
|
108
|
+
logger.info("Tahoe client node created", node_dir=str(self.config.node_dir))
|
|
109
|
+
|
|
110
|
+
def _write_config(self) -> None:
|
|
111
|
+
"""Write tahoe.cfg configuration file."""
|
|
112
|
+
template = Template(TAHOE_CLIENT_CFG_TEMPLATE)
|
|
113
|
+
content = template.render(
|
|
114
|
+
nickname=self.config.nickname,
|
|
115
|
+
web_port=self.config.web_port,
|
|
116
|
+
tub_port=self.config.tub_port,
|
|
117
|
+
tub_location=self.config.tub_location,
|
|
118
|
+
introducer_furl=self.config.introducer_furl,
|
|
119
|
+
shares_needed=self.config.shares_needed,
|
|
120
|
+
shares_happy=self.config.shares_happy,
|
|
121
|
+
shares_total=self.config.shares_total,
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
write_file(self._tahoe_cfg, content, mode=0o600)
|
|
125
|
+
logger.debug("Wrote tahoe.cfg", path=str(self._tahoe_cfg))
|
|
126
|
+
|
|
127
|
+
def start(self) -> bool:
|
|
128
|
+
"""Start the Tahoe client node."""
|
|
129
|
+
logger.info("Starting Tahoe client", nickname=self.config.nickname)
|
|
130
|
+
|
|
131
|
+
result = run_command(f"tahoe start {self.config.node_dir}", check=False)
|
|
132
|
+
|
|
133
|
+
if not result.success:
|
|
134
|
+
logger.error("Failed to start Tahoe client", error=result.stderr)
|
|
135
|
+
return False
|
|
136
|
+
|
|
137
|
+
logger.info("Tahoe client started")
|
|
138
|
+
return True
|
|
139
|
+
|
|
140
|
+
def stop(self) -> bool:
|
|
141
|
+
"""Stop the Tahoe client node."""
|
|
142
|
+
logger.info("Stopping Tahoe client", nickname=self.config.nickname)
|
|
143
|
+
|
|
144
|
+
result = run_command(f"tahoe stop {self.config.node_dir}", check=False)
|
|
145
|
+
|
|
146
|
+
if not result.success:
|
|
147
|
+
logger.error("Failed to stop Tahoe client", error=result.stderr)
|
|
148
|
+
return False
|
|
149
|
+
|
|
150
|
+
logger.info("Tahoe client stopped")
|
|
151
|
+
return True
|
|
152
|
+
|
|
153
|
+
def restart(self) -> bool:
|
|
154
|
+
"""Restart the Tahoe client node."""
|
|
155
|
+
result = run_command(f"tahoe restart {self.config.node_dir}", check=False)
|
|
156
|
+
return result.success
|
|
157
|
+
|
|
158
|
+
def is_running(self) -> bool:
|
|
159
|
+
"""Check if the Tahoe client is running."""
|
|
160
|
+
result = run_command(f"tahoe status {self.config.node_dir}", check=False)
|
|
161
|
+
return result.success and "RUNNING" in result.stdout
|
|
162
|
+
|
|
163
|
+
def get_node_url(self) -> str:
|
|
164
|
+
"""Get the web UI URL for this node."""
|
|
165
|
+
return f"http://127.0.0.1:{self.config.web_port}"
|
|
166
|
+
|
|
167
|
+
def upload_file(self, local_path: Path, format_: str = "CHK") -> str:
|
|
168
|
+
"""Upload a file to the grid.
|
|
169
|
+
|
|
170
|
+
Args:
|
|
171
|
+
local_path: Path to the file to upload
|
|
172
|
+
format_: Upload format (CHK, SDMF, MDMF)
|
|
173
|
+
|
|
174
|
+
Returns:
|
|
175
|
+
The capability string for the uploaded file
|
|
176
|
+
"""
|
|
177
|
+
if not local_path.exists():
|
|
178
|
+
raise StorageError(f"File not found: {local_path}")
|
|
179
|
+
|
|
180
|
+
result = run_command(
|
|
181
|
+
f"tahoe put --format={format_} {local_path}",
|
|
182
|
+
cwd=self.config.node_dir,
|
|
183
|
+
check=False,
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
if not result.success:
|
|
187
|
+
raise StorageError(f"Upload failed: {result.stderr}")
|
|
188
|
+
|
|
189
|
+
# The capability is the last line of output
|
|
190
|
+
return result.stdout.strip().split("\n")[-1]
|
|
191
|
+
|
|
192
|
+
def download_file(self, cap: str, local_path: Path) -> Path:
|
|
193
|
+
"""Download a file from the grid.
|
|
194
|
+
|
|
195
|
+
Args:
|
|
196
|
+
cap: The capability string
|
|
197
|
+
local_path: Where to save the file
|
|
198
|
+
|
|
199
|
+
Returns:
|
|
200
|
+
Path to the downloaded file
|
|
201
|
+
"""
|
|
202
|
+
ensure_dir(local_path.parent)
|
|
203
|
+
|
|
204
|
+
result = run_command(
|
|
205
|
+
f"tahoe get {cap} {local_path}",
|
|
206
|
+
cwd=self.config.node_dir,
|
|
207
|
+
check=False,
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
if not result.success:
|
|
211
|
+
raise StorageError(f"Download failed: {result.stderr}")
|
|
212
|
+
|
|
213
|
+
return local_path
|
|
214
|
+
|
|
215
|
+
def list_directory(self, cap: str) -> list[dict[str, Any]]:
|
|
216
|
+
"""List contents of a directory capability.
|
|
217
|
+
|
|
218
|
+
Args:
|
|
219
|
+
cap: Directory capability
|
|
220
|
+
|
|
221
|
+
Returns:
|
|
222
|
+
List of entries with name, type, and size
|
|
223
|
+
"""
|
|
224
|
+
result = run_command(
|
|
225
|
+
f"tahoe ls --json {cap}",
|
|
226
|
+
cwd=self.config.node_dir,
|
|
227
|
+
check=False,
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
if not result.success:
|
|
231
|
+
raise StorageError(f"List failed: {result.stderr}")
|
|
232
|
+
|
|
233
|
+
import json
|
|
234
|
+
|
|
235
|
+
try:
|
|
236
|
+
data = json.loads(result.stdout)
|
|
237
|
+
entries = []
|
|
238
|
+
for name, info in data.get("children", {}).items():
|
|
239
|
+
entry_type, metadata = info
|
|
240
|
+
entries.append(
|
|
241
|
+
{
|
|
242
|
+
"name": name,
|
|
243
|
+
"type": entry_type,
|
|
244
|
+
"size": metadata.get("size", 0),
|
|
245
|
+
"mutable": metadata.get("mutable", False),
|
|
246
|
+
}
|
|
247
|
+
)
|
|
248
|
+
return entries
|
|
249
|
+
except json.JSONDecodeError:
|
|
250
|
+
return []
|
|
251
|
+
|
|
252
|
+
def check_file(self, cap: str, repair: bool = False) -> dict[str, Any]:
|
|
253
|
+
"""Check the health of a file.
|
|
254
|
+
|
|
255
|
+
Args:
|
|
256
|
+
cap: Capability to check
|
|
257
|
+
repair: Whether to repair if unhealthy
|
|
258
|
+
|
|
259
|
+
Returns:
|
|
260
|
+
Health check results
|
|
261
|
+
"""
|
|
262
|
+
cmd = "tahoe check"
|
|
263
|
+
if repair:
|
|
264
|
+
cmd += " --repair"
|
|
265
|
+
cmd += f" {cap}"
|
|
266
|
+
|
|
267
|
+
result = run_command(cmd, cwd=self.config.node_dir, check=False)
|
|
268
|
+
|
|
269
|
+
return {
|
|
270
|
+
"healthy": result.success,
|
|
271
|
+
"output": result.stdout,
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
def mount_filesystem(self, mountpoint: Path, cap: str | None = None) -> bool:
|
|
275
|
+
"""Mount the Tahoe filesystem using FUSE.
|
|
276
|
+
|
|
277
|
+
Args:
|
|
278
|
+
mountpoint: Where to mount
|
|
279
|
+
cap: Optional root capability (uses alias if not provided)
|
|
280
|
+
|
|
281
|
+
Returns:
|
|
282
|
+
True if mount succeeded
|
|
283
|
+
"""
|
|
284
|
+
ensure_dir(mountpoint)
|
|
285
|
+
|
|
286
|
+
cmd = f"tahoe start {self.config.node_dir}"
|
|
287
|
+
if cap:
|
|
288
|
+
cmd += f" --mountpoint={mountpoint} --cap={cap}"
|
|
289
|
+
else:
|
|
290
|
+
cmd += f" --mountpoint={mountpoint}"
|
|
291
|
+
|
|
292
|
+
# Note: tahoe-lafs FUSE mounting is complex, this is simplified
|
|
293
|
+
result = run_command(cmd, check=False)
|
|
294
|
+
return result.success
|
|
295
|
+
|
|
296
|
+
def unmount_filesystem(self, mountpoint: Path) -> bool:
|
|
297
|
+
"""Unmount the Tahoe filesystem.
|
|
298
|
+
|
|
299
|
+
Args:
|
|
300
|
+
mountpoint: Mountpoint to unmount
|
|
301
|
+
|
|
302
|
+
Returns:
|
|
303
|
+
True if unmount succeeded
|
|
304
|
+
"""
|
|
305
|
+
result = run_command(f"fusermount -u {mountpoint}", check=False)
|
|
306
|
+
return result.success
|