wnm 0.0.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of wnm might be problematic. Click here for more details.

wnm/models.py ADDED
@@ -0,0 +1,459 @@
1
+ # Turn a class into a storable object with ORM
2
+ from typing import Optional
3
+
4
+ import json_fix
5
+ from sqlalchemy import (
6
+ Float,
7
+ ForeignKey,
8
+ Integer,
9
+ Unicode,
10
+ UnicodeText,
11
+ create_engine,
12
+ insert,
13
+ select,
14
+ update,
15
+ )
16
+ from sqlalchemy.orm import (
17
+ DeclarativeBase,
18
+ Mapped,
19
+ mapped_column,
20
+ relationship,
21
+ scoped_session,
22
+ sessionmaker,
23
+ )
24
+
25
+
26
+ # create a Base class bound to sqlalchemy
27
+ class Base(DeclarativeBase):
28
+ pass
29
+
30
+
31
+ # Extend the Base class to create our Host info
32
+ class Machine(Base):
33
+ """One row per wnm instance (single physical machine)"""
34
+
35
+ __tablename__ = "machine"
36
+ # No schema in sqlite3
37
+ # __table_args__ = {"schema": "colony"}
38
+ id: Mapped[int] = mapped_column(Integer, primary_key=True)
39
+
40
+ # System configuration
41
+ cpu_count: Mapped[int] = mapped_column(Integer)
42
+ node_cap: Mapped[int] = mapped_column(Integer)
43
+
44
+ # Resource thresholds for adding nodes
45
+ cpu_less_than: Mapped[int] = mapped_column(Integer)
46
+ mem_less_than: Mapped[int] = mapped_column(Integer)
47
+ hd_less_than: Mapped[int] = mapped_column(Integer)
48
+ hdio_read_less_than: Mapped[int] = mapped_column(Integer)
49
+ hdio_write_less_than: Mapped[int] = mapped_column(Integer)
50
+ netio_read_less_than: Mapped[int] = mapped_column(Integer)
51
+ netio_write_less_than: Mapped[int] = mapped_column(Integer)
52
+
53
+ # Resource thresholds for removing nodes
54
+ cpu_remove: Mapped[int] = mapped_column(Integer)
55
+ mem_remove: Mapped[int] = mapped_column(Integer)
56
+ hd_remove: Mapped[int] = mapped_column(Integer)
57
+ hdio_read_remove: Mapped[int] = mapped_column(Integer)
58
+ hdio_write_remove: Mapped[int] = mapped_column(Integer)
59
+ netio_read_remove: Mapped[int] = mapped_column(Integer)
60
+ netio_write_remove: Mapped[int] = mapped_column(Integer)
61
+
62
+ # Load average thresholds
63
+ max_load_average_allowed: Mapped[float] = mapped_column(Float)
64
+ desired_load_average: Mapped[float] = mapped_column(Float)
65
+
66
+ # Delay timers (in seconds, changed from minutes)
67
+ delay_start: Mapped[int] = mapped_column(Integer)
68
+ delay_upgrade: Mapped[int] = mapped_column(Integer)
69
+ delay_remove: Mapped[int] = mapped_column(Integer)
70
+
71
+ # Node configuration
72
+ node_storage: Mapped[str] = mapped_column(UnicodeText)
73
+ rewards_address: Mapped[str] = mapped_column(UnicodeText)
74
+ donate_address: Mapped[str] = mapped_column(UnicodeText)
75
+
76
+ # Port configuration
77
+ port_start: Mapped[int] = mapped_column(Integer)
78
+ metrics_port_start: Mapped[int] = mapped_column(Integer)
79
+
80
+ # System state
81
+ last_stopped_at: Mapped[int] = mapped_column(Integer)
82
+ host: Mapped[str] = mapped_column(UnicodeText)
83
+ crisis_bytes: Mapped[int] = mapped_column(Integer)
84
+
85
+ # Runtime configuration
86
+ environment: Mapped[Optional[str]] = mapped_column(UnicodeText)
87
+ start_args: Mapped[Optional[str]] = mapped_column(UnicodeText)
88
+
89
+ # NEW: Concurrency limits (Phase 5)
90
+ max_concurrent_upgrades: Mapped[int] = mapped_column(Integer, default=1)
91
+ max_concurrent_starts: Mapped[int] = mapped_column(Integer, default=2)
92
+ max_concurrent_removals: Mapped[int] = mapped_column(Integer, default=1)
93
+
94
+ # NEW: Node selection strategy (Phase 6)
95
+ node_removal_strategy: Mapped[str] = mapped_column(
96
+ UnicodeText, default="youngest"
97
+ )
98
+
99
+ # Relationships
100
+ containers: Mapped[list["Container"]] = relationship(
101
+ back_populates="machine", cascade="all, delete-orphan"
102
+ )
103
+ nodes: Mapped[list["Node"]] = relationship(
104
+ back_populates="machine", cascade="all, delete-orphan"
105
+ )
106
+
107
+ def __init__(
108
+ self,
109
+ cpu_count,
110
+ node_cap,
111
+ cpu_less_than,
112
+ cpu_remove,
113
+ mem_less_than,
114
+ mem_remove,
115
+ hd_less_than,
116
+ hd_remove,
117
+ delay_start,
118
+ delay_upgrade,
119
+ delay_remove,
120
+ node_storage,
121
+ rewards_address,
122
+ donate_address,
123
+ max_load_average_allowed,
124
+ desired_load_average,
125
+ port_start,
126
+ hdio_read_less_than,
127
+ hdio_read_remove,
128
+ hdio_write_less_than,
129
+ hdio_write_remove,
130
+ netio_read_less_than,
131
+ netio_read_remove,
132
+ netio_write_less_than,
133
+ netio_write_remove,
134
+ last_stopped_at,
135
+ host,
136
+ crisis_bytes,
137
+ metrics_port_start,
138
+ environment,
139
+ start_args,
140
+ max_concurrent_upgrades=1,
141
+ max_concurrent_starts=2,
142
+ max_concurrent_removals=1,
143
+ node_removal_strategy="youngest",
144
+ ):
145
+ self.cpu_count = cpu_count
146
+ self.node_cap = node_cap
147
+ self.cpu_less_than = cpu_less_than
148
+ self.cpu_remove = cpu_remove
149
+ self.mem_less_than = mem_less_than
150
+ self.mem_remove = mem_remove
151
+ self.hd_less_than = hd_less_than
152
+ self.hd_remove = hd_remove
153
+ self.delay_start = delay_start
154
+ self.delay_upgrade = delay_upgrade
155
+ self.delay_remove = delay_remove
156
+ self.node_storage = node_storage
157
+ self.rewards_address = rewards_address
158
+ self.donate_address = donate_address
159
+ self.max_load_average_allowed = max_load_average_allowed
160
+ self.desired_load_average = desired_load_average
161
+ self.port_start = port_start
162
+ self.hdio_read_less_than = hdio_read_less_than
163
+ self.hdio_read_remove = hdio_read_remove
164
+ self.hdio_write_less_than = hdio_write_less_than
165
+ self.hdio_write_remove = hdio_write_remove
166
+ self.netio_read_less_than = netio_read_less_than
167
+ self.netio_read_remove = netio_read_remove
168
+ self.netio_write_less_than = netio_write_less_than
169
+ self.netio_write_remove = netio_write_remove
170
+ self.last_stopped_at = last_stopped_at
171
+ self.host = host
172
+ self.crisis_bytes = crisis_bytes
173
+ self.metrics_port_start = metrics_port_start
174
+ self.environment = environment
175
+ self.start_args = start_args
176
+ self.max_concurrent_upgrades = max_concurrent_upgrades
177
+ self.max_concurrent_starts = max_concurrent_starts
178
+ self.max_concurrent_removals = max_concurrent_removals
179
+ self.node_removal_strategy = node_removal_strategy
180
+
181
+ def __repr__(self):
182
+ return (
183
+ f"Machine({self.cpu_count},{self.node_cap},{self.cpu_less_than},{self.cpu_remove}"
184
+ + f",{self.mem_less_than},{self.mem_remove},{self.hd_less_than}"
185
+ + f",{self.hd_remove},{self.delay_start},{self.delay_upgrade}"
186
+ + f",{self.delay_remove}"
187
+ + f',"{self.node_storage}","{self.rewards_address}","{self.donate_address}"'
188
+ + f",{self.max_load_average_allowed},{self.desired_load_average}"
189
+ + f",{self.port_start},{self.hdio_read_less_than},{self.hdio_read_remove}"
190
+ + f",{self.hdio_write_less_than},{self.hdio_write_remove}"
191
+ + f",{self.netio_read_less_than},{self.netio_read_remove}"
192
+ + f",{self.netio_write_less_than},{self.netio_write_remove}"
193
+ + f",{self.last_stopped_at},{self.host},{self.crisis_bytes}"
194
+ + f",{self.metrics_port_start},{self.environment},{self.start_args})"
195
+ )
196
+
197
+ def __json__(self):
198
+ return {
199
+ "cpu_count": self.cpu_count,
200
+ "node_cap": self.node_cap,
201
+ "cpu_less_than": self.cpu_less_than,
202
+ "cpu_remove": self.cpu_remove,
203
+ "mem_less_than": self.mem_less_than,
204
+ "mem_remove": self.mem_remove,
205
+ "hd_less_than": self.hd_less_than,
206
+ "hd_remove": self.hd_remove,
207
+ "delay_start": self.delay_start,
208
+ "delay_upgrade": self.delay_upgrade,
209
+ "delay_remove": self.delay_remove,
210
+ "node_storage": f"{self.node_storage}",
211
+ "rewards_address": f"{self.rewards_address}",
212
+ "donate_address": f"{self.donate_address}",
213
+ "max_load_average_allowed": self.max_load_average_allowed,
214
+ "desired_load_average": self.desired_load_average,
215
+ "port_start": self.port_start,
216
+ "hdio_read_less_than": self.hdio_read_less_than,
217
+ "hdio_read_remove": self.hdio_read_remove,
218
+ "hdio_write_less_than": self.hdio_write_less_than,
219
+ "hdio_write_remove": self.hdio_write_remove,
220
+ "netio_read_less_than": self.netio_read_less_than,
221
+ "netio_read_remove": self.netio_read_remove,
222
+ "netio_write_less_than": self.netio_write_less_than,
223
+ "netio_write_remove": self.netio_write_remove,
224
+ "last_stopped_at": self.last_stopped_at,
225
+ "host": f"{self.host}",
226
+ "crisis_bytes": self.crisis_bytes,
227
+ "metrics_port_start": self.metrics_port_start,
228
+ "environment": f"{self.environment}",
229
+ "start_args": f"{self.start_args}",
230
+ "max_concurrent_upgrades": self.max_concurrent_upgrades,
231
+ "max_concurrent_starts": self.max_concurrent_starts,
232
+ "max_concurrent_removals": self.max_concurrent_removals,
233
+ "node_removal_strategy": f"{self.node_removal_strategy}",
234
+ }
235
+
236
+
237
+ # NEW: Container table for Docker container management
238
+ class Container(Base):
239
+ """Optional: Docker containers hosting nodes"""
240
+
241
+ __tablename__ = "container"
242
+ id: Mapped[int] = mapped_column(Integer, primary_key=True)
243
+
244
+ # Foreign key to machine
245
+ machine_id: Mapped[int] = mapped_column(ForeignKey("machine.id"), default=1)
246
+
247
+ # Docker container details
248
+ container_id: Mapped[str] = mapped_column(Unicode(64), unique=True)
249
+ name: Mapped[str] = mapped_column(UnicodeText)
250
+ image: Mapped[str] = mapped_column(UnicodeText)
251
+ status: Mapped[str] = mapped_column(Unicode(32)) # running, stopped, etc.
252
+ created_at: Mapped[int] = mapped_column(Integer)
253
+
254
+ # Relationships
255
+ machine: Mapped["Machine"] = relationship(back_populates="containers")
256
+ nodes: Mapped[list["Node"]] = relationship(
257
+ back_populates="container", cascade="all, delete-orphan"
258
+ )
259
+
260
+ def __init__(
261
+ self,
262
+ container_id,
263
+ name,
264
+ image,
265
+ status,
266
+ created_at,
267
+ machine_id=1,
268
+ ):
269
+ self.container_id = container_id
270
+ self.name = name
271
+ self.image = image
272
+ self.status = status
273
+ self.created_at = created_at
274
+ self.machine_id = machine_id
275
+
276
+ def __repr__(self):
277
+ return (
278
+ f'Container({self.id},"{self.container_id}","{self.name}","{self.image}"'
279
+ + f',"{self.status}",{self.created_at})'
280
+ )
281
+
282
+ def __json__(self):
283
+ return {
284
+ "id": self.id,
285
+ "container_id": f"{self.container_id}",
286
+ "name": f"{self.name}",
287
+ "image": f"{self.image}",
288
+ "status": f"{self.status}",
289
+ "created_at": self.created_at,
290
+ "machine_id": self.machine_id,
291
+ }
292
+
293
+
294
+ # Extend the Base class to create our Node info
295
+ class Node(Base):
296
+ """Nodes on host OS or in containers"""
297
+
298
+ __tablename__ = "node"
299
+ # No schema in sqlite3
300
+ # __table_args__ = {"schema": "colony"}
301
+ id: Mapped[int] = mapped_column(Integer, primary_key=True)
302
+
303
+ # Foreign key to machine
304
+ machine_id: Mapped[int] = mapped_column(ForeignKey("machine.id"), default=1)
305
+
306
+ # NEW: Optional container reference
307
+ container_id: Mapped[Optional[int]] = mapped_column(
308
+ ForeignKey("container.id"), nullable=True
309
+ )
310
+
311
+ # NEW: Process manager type
312
+ manager_type: Mapped[str] = mapped_column(
313
+ UnicodeText, default="systemd"
314
+ ) # "systemd", "docker", "setsid", "antctl", "launchctl"
315
+
316
+ # Maps to antnode-{nodename}
317
+ node_name: Mapped[str] = mapped_column(Unicode(10))
318
+ # service definition name
319
+ service: Mapped[str] = mapped_column(UnicodeText)
320
+ # User running node
321
+ user: Mapped[str] = mapped_column(Unicode(24))
322
+ # Full path to node binary
323
+ binary: Mapped[str] = mapped_column(UnicodeText)
324
+ # Last polled version of the binary
325
+ version: Mapped[Optional[str]] = mapped_column(UnicodeText)
326
+ # Root directory of the node
327
+ root_dir: Mapped[str] = mapped_column(UnicodeText)
328
+ # Node open port
329
+ port: Mapped[int] = mapped_column(Integer)
330
+ # Node metrics port
331
+ metrics_port: Mapped[int] = mapped_column(Integer)
332
+ # Network to use ( Live is evm-arbitrum-one )
333
+ network: Mapped[str] = mapped_column(UnicodeText)
334
+ # Reward address
335
+ wallet: Mapped[Optional[str]] = mapped_column(Unicode(42), index=True)
336
+ # Reported peer_id
337
+ peer_id: Mapped[Optional[str]] = mapped_column(Unicode(52))
338
+ # Node's last probed status
339
+ status: Mapped[str] = mapped_column(Unicode(32), index=True)
340
+ # Timestamp of last update
341
+ timestamp: Mapped[int] = mapped_column(Integer, index=True)
342
+ # Number of node records stored as reported by node
343
+ records: Mapped[int] = mapped_column(Integer, index=True)
344
+ # Node reported uptime
345
+ uptime: Mapped[int] = mapped_column(Integer)
346
+ # Number of shuns
347
+ shunned: Mapped[int] = mapped_column(Integer)
348
+ # Number of connected peers as reported by node
349
+ connected_peers: Mapped[int] = mapped_column(Integer, default=0)
350
+ # Timestamp of node first launch
351
+ age: Mapped[int] = mapped_column(Integer)
352
+ # Host ip for data
353
+ host: Mapped[str] = mapped_column(UnicodeText)
354
+ # node launch method
355
+ method: Mapped[str] = mapped_column(UnicodeText)
356
+ # node layout
357
+ layout: Mapped[str] = mapped_column(UnicodeText)
358
+ # node environment settings
359
+ environment: Mapped[Optional[str]] = mapped_column(UnicodeText)
360
+
361
+ # Relationships
362
+ machine: Mapped["Machine"] = relationship(back_populates="nodes")
363
+ container: Mapped[Optional["Container"]] = relationship(back_populates="nodes")
364
+
365
+ def __init__(
366
+ self,
367
+ id,
368
+ node_name,
369
+ service,
370
+ user,
371
+ binary,
372
+ version,
373
+ root_dir,
374
+ port,
375
+ metrics_port,
376
+ network,
377
+ wallet,
378
+ peer_id,
379
+ status,
380
+ timestamp,
381
+ records,
382
+ uptime,
383
+ shunned,
384
+ connected_peers=0,
385
+ age=None,
386
+ host=None,
387
+ method=None,
388
+ layout=None,
389
+ environment=None,
390
+ machine_id=1,
391
+ container_id=None,
392
+ manager_type="systemd",
393
+ ):
394
+ self.id = id
395
+ self.node_name = node_name
396
+ self.service = service
397
+ self.user = user
398
+ self.binary = binary
399
+ self.version = version
400
+ self.root_dir = root_dir
401
+ self.port = port
402
+ self.metrics_port = metrics_port
403
+ self.network = network
404
+ self.wallet = wallet
405
+ self.peer_id = peer_id
406
+ self.status = status
407
+ self.timestamp = timestamp
408
+ self.records = records
409
+ self.uptime = uptime
410
+ self.shunned = shunned
411
+ self.connected_peers = connected_peers
412
+ self.age = age
413
+ self.host = host
414
+ self.method = method
415
+ self.layout = layout
416
+ self.environment = environment
417
+ self.machine_id = machine_id
418
+ self.container_id = container_id
419
+ self.manager_type = manager_type
420
+
421
+ def __repr__(self):
422
+ return (
423
+ f'Node({self.id},"{self.node_name}","{self.service}","{self.user},"{self.binary}"'
424
+ + f',"{self.version}","{self.root_dir}",{self.port},{self.metrics_port}'
425
+ + f',"{self.network}","{self.wallet}","{self.peer_id}","{self.status}",{self.timestamp}'
426
+ + f',{self.records},{self.uptime},{self.shunned},{self.connected_peers},{self.age},"{self.host}"'
427
+ + f',{self.method},{self.layout},"{self.environment}"'
428
+ + f',{self.machine_id},{self.container_id},"{self.manager_type}")'
429
+ )
430
+
431
+ def __json__(self):
432
+ return {
433
+ "id": self.id,
434
+ "node_name": f"{self.node_name}",
435
+ "service": f"{self.service}",
436
+ "user": f"{self.user}",
437
+ "binary": f"{self.binary}",
438
+ "version": f"{self.version}",
439
+ "root_dir": f"{self.root_dir}",
440
+ "port": self.port,
441
+ "metrics_port": self.metrics_port,
442
+ "network": f"{self.network}",
443
+ "wallet": f"{self.wallet}",
444
+ "peer_id": f"{self.peer_id}",
445
+ "status": f"{self.status}",
446
+ "timestamp": self.timestamp,
447
+ "records": self.records,
448
+ "uptime": self.uptime,
449
+ "shunned": self.shunned,
450
+ "connected_peers": self.connected_peers,
451
+ "age": self.age,
452
+ "host": f"{self.host}",
453
+ "method": f"{self.method}",
454
+ "layout": f"{self.layout}",
455
+ "environment": f"{self.environment}",
456
+ "machine_id": self.machine_id,
457
+ "container_id": self.container_id,
458
+ "manager_type": f"{self.manager_type}",
459
+ }
@@ -0,0 +1,23 @@
1
+ """
2
+ Process managers for node lifecycle management.
3
+
4
+ Supports multiple backends: systemd, docker, setsid, antctl, launchctl
5
+ """
6
+
7
+ from wnm.process_managers.base import NodeProcess, ProcessManager
8
+ from wnm.process_managers.docker_manager import DockerManager
9
+ from wnm.process_managers.factory import get_default_manager_type, get_process_manager
10
+ from wnm.process_managers.launchd_manager import LaunchctlManager
11
+ from wnm.process_managers.setsid_manager import SetsidManager
12
+ from wnm.process_managers.systemd_manager import SystemdManager
13
+
14
+ __all__ = [
15
+ "NodeProcess",
16
+ "ProcessManager",
17
+ "get_process_manager",
18
+ "get_default_manager_type",
19
+ "SystemdManager",
20
+ "DockerManager",
21
+ "SetsidManager",
22
+ "LaunchctlManager",
23
+ ]
@@ -0,0 +1,203 @@
1
+ """
2
+ Abstract base class for process managers.
3
+
4
+ Process managers handle node lifecycle operations across different
5
+ execution environments (systemd, docker, setsid, etc.)
6
+ """
7
+
8
+ from abc import ABC, abstractmethod
9
+ from dataclasses import dataclass
10
+ from typing import Optional
11
+
12
+ from wnm.firewall.factory import get_firewall_manager
13
+ from wnm.models import Node
14
+
15
+
16
+ @dataclass
17
+ class NodeProcess:
18
+ """Represents the runtime state of a node process"""
19
+
20
+ node_id: int
21
+ pid: Optional[int] = None
22
+ status: str = "UNKNOWN" # RUNNING, STOPPED, UPGRADING, etc.
23
+ container_id: Optional[str] = None # For docker-managed nodes
24
+
25
+
26
+ class ProcessManager(ABC):
27
+ """
28
+ Abstract interface for node lifecycle management.
29
+
30
+ Each implementation handles a specific process management backend:
31
+ - SystemdManager: systemd services (Linux)
32
+ - DockerManager: Docker containers
33
+ - SetsidManager: Background processes via setsid
34
+ - AntctlManager: Wrapper around antctl CLI
35
+ - LaunchctlManager: macOS launchd services
36
+ """
37
+
38
+ def __init__(self, firewall_type: str = None):
39
+ """
40
+ Initialize process manager with optional firewall manager.
41
+
42
+ Args:
43
+ firewall_type: Type of firewall to use ("ufw", "null", etc.)
44
+ If None, auto-detects best available option
45
+ """
46
+ self.firewall = get_firewall_manager(firewall_type)
47
+
48
+ @abstractmethod
49
+ def create_node(self, node: Node, binary_path: str) -> bool:
50
+ """
51
+ Create and start a new node.
52
+
53
+ Args:
54
+ node: Node database record with configuration
55
+ binary_path: Path to the node binary to execute
56
+
57
+ Returns:
58
+ True if node was created successfully
59
+ """
60
+ pass
61
+
62
+ @abstractmethod
63
+ def start_node(self, node: Node) -> bool:
64
+ """
65
+ Start a stopped node.
66
+
67
+ Args:
68
+ node: Node database record
69
+
70
+ Returns:
71
+ True if node started successfully
72
+ """
73
+ pass
74
+
75
+ @abstractmethod
76
+ def stop_node(self, node: Node) -> bool:
77
+ """
78
+ Stop a running node.
79
+
80
+ Args:
81
+ node: Node database record
82
+
83
+ Returns:
84
+ True if node stopped successfully
85
+ """
86
+ pass
87
+
88
+ @abstractmethod
89
+ def restart_node(self, node: Node) -> bool:
90
+ """
91
+ Restart a node.
92
+
93
+ Args:
94
+ node: Node database record
95
+
96
+ Returns:
97
+ True if node restarted successfully
98
+ """
99
+ pass
100
+
101
+ @abstractmethod
102
+ def get_status(self, node: Node) -> NodeProcess:
103
+ """
104
+ Get current runtime status of a node.
105
+
106
+ Args:
107
+ node: Node database record
108
+
109
+ Returns:
110
+ NodeProcess with current status and PID
111
+ """
112
+ pass
113
+
114
+ @abstractmethod
115
+ def remove_node(self, node: Node) -> bool:
116
+ """
117
+ Stop and remove all traces of a node.
118
+
119
+ This should:
120
+ 1. Stop the node process
121
+ 2. Remove service/container definitions
122
+ 3. Optionally clean up data directories (controlled by node.keep_data)
123
+
124
+ Args:
125
+ node: Node database record
126
+
127
+ Returns:
128
+ True if node was removed successfully
129
+ """
130
+ pass
131
+
132
+ @abstractmethod
133
+ def survey_nodes(self, machine_config) -> list:
134
+ """
135
+ Survey all nodes managed by this process manager.
136
+
137
+ This is used during database initialization/rebuild to discover
138
+ existing nodes by scanning the manager's configuration directory.
139
+ Each manager handles its own path logic internally.
140
+
141
+ The database is the source of truth for regular operations.
142
+ This method is ONLY used for initialization and migration.
143
+
144
+ Args:
145
+ machine_config: Machine configuration object
146
+
147
+ Returns:
148
+ List of node dictionaries ready for database insertion
149
+ """
150
+ pass
151
+
152
+ def enable_firewall_port(
153
+ self, port: int, protocol: str = "udp", comment: str = None
154
+ ) -> bool:
155
+ """
156
+ Open firewall port for node communication.
157
+
158
+ Uses the configured firewall manager to open the port.
159
+ Subclasses can override for custom firewall behavior.
160
+
161
+ Args:
162
+ port: Port number to open
163
+ protocol: Protocol type (udp/tcp)
164
+ comment: Optional comment for the firewall rule
165
+
166
+ Returns:
167
+ True if port was opened successfully
168
+ """
169
+ return self.firewall.enable_port(port, protocol, comment)
170
+
171
+ def disable_firewall_port(self, port: int, protocol: str = "udp") -> bool:
172
+ """
173
+ Close firewall port when node is removed.
174
+
175
+ Uses the configured firewall manager to close the port.
176
+ Subclasses can override for custom firewall behavior.
177
+
178
+ Args:
179
+ port: Port number to close
180
+ protocol: Protocol type (udp/tcp)
181
+
182
+ Returns:
183
+ True if port was closed successfully
184
+ """
185
+ return self.firewall.disable_port(port, protocol)
186
+
187
+ def teardown_cluster(self) -> bool:
188
+ """
189
+ Teardown the entire cluster using manager-specific commands.
190
+
191
+ This is an optional method that managers can override to provide
192
+ efficient bulk teardown operations. If not overridden, returns False
193
+ to indicate that individual node removal should be used instead.
194
+
195
+ Examples:
196
+ - AntctlManager: Uses 'antctl reset' command
197
+ - Other managers: Return False to use default individual removal
198
+
199
+ Returns:
200
+ True if cluster was torn down successfully using manager-specific method,
201
+ False to indicate fallback to individual node removal
202
+ """
203
+ return False