network-ai 3.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/QUICKSTART.md +260 -0
- package/README.md +604 -0
- package/SKILL.md +568 -0
- package/dist/adapters/adapter-registry.d.ts +94 -0
- package/dist/adapters/adapter-registry.d.ts.map +1 -0
- package/dist/adapters/adapter-registry.js +355 -0
- package/dist/adapters/adapter-registry.js.map +1 -0
- package/dist/adapters/agno-adapter.d.ts +112 -0
- package/dist/adapters/agno-adapter.d.ts.map +1 -0
- package/dist/adapters/agno-adapter.js +140 -0
- package/dist/adapters/agno-adapter.js.map +1 -0
- package/dist/adapters/autogen-adapter.d.ts +67 -0
- package/dist/adapters/autogen-adapter.d.ts.map +1 -0
- package/dist/adapters/autogen-adapter.js +141 -0
- package/dist/adapters/autogen-adapter.js.map +1 -0
- package/dist/adapters/base-adapter.d.ts +51 -0
- package/dist/adapters/base-adapter.d.ts.map +1 -0
- package/dist/adapters/base-adapter.js +103 -0
- package/dist/adapters/base-adapter.js.map +1 -0
- package/dist/adapters/crewai-adapter.d.ts +72 -0
- package/dist/adapters/crewai-adapter.d.ts.map +1 -0
- package/dist/adapters/crewai-adapter.js +148 -0
- package/dist/adapters/crewai-adapter.js.map +1 -0
- package/dist/adapters/custom-adapter.d.ts +74 -0
- package/dist/adapters/custom-adapter.d.ts.map +1 -0
- package/dist/adapters/custom-adapter.js +142 -0
- package/dist/adapters/custom-adapter.js.map +1 -0
- package/dist/adapters/dspy-adapter.d.ts +70 -0
- package/dist/adapters/dspy-adapter.d.ts.map +1 -0
- package/dist/adapters/dspy-adapter.js +127 -0
- package/dist/adapters/dspy-adapter.js.map +1 -0
- package/dist/adapters/haystack-adapter.d.ts +83 -0
- package/dist/adapters/haystack-adapter.d.ts.map +1 -0
- package/dist/adapters/haystack-adapter.js +149 -0
- package/dist/adapters/haystack-adapter.js.map +1 -0
- package/dist/adapters/index.d.ts +47 -0
- package/dist/adapters/index.d.ts.map +1 -0
- package/dist/adapters/index.js +56 -0
- package/dist/adapters/index.js.map +1 -0
- package/dist/adapters/langchain-adapter.d.ts +51 -0
- package/dist/adapters/langchain-adapter.d.ts.map +1 -0
- package/dist/adapters/langchain-adapter.js +134 -0
- package/dist/adapters/langchain-adapter.js.map +1 -0
- package/dist/adapters/llamaindex-adapter.d.ts +89 -0
- package/dist/adapters/llamaindex-adapter.d.ts.map +1 -0
- package/dist/adapters/llamaindex-adapter.js +135 -0
- package/dist/adapters/llamaindex-adapter.js.map +1 -0
- package/dist/adapters/mcp-adapter.d.ts +90 -0
- package/dist/adapters/mcp-adapter.d.ts.map +1 -0
- package/dist/adapters/mcp-adapter.js +200 -0
- package/dist/adapters/mcp-adapter.js.map +1 -0
- package/dist/adapters/openai-assistants-adapter.d.ts +94 -0
- package/dist/adapters/openai-assistants-adapter.d.ts.map +1 -0
- package/dist/adapters/openai-assistants-adapter.js +130 -0
- package/dist/adapters/openai-assistants-adapter.js.map +1 -0
- package/dist/adapters/openclaw-adapter.d.ts +21 -0
- package/dist/adapters/openclaw-adapter.d.ts.map +1 -0
- package/dist/adapters/openclaw-adapter.js +140 -0
- package/dist/adapters/openclaw-adapter.js.map +1 -0
- package/dist/adapters/semantic-kernel-adapter.d.ts +73 -0
- package/dist/adapters/semantic-kernel-adapter.d.ts.map +1 -0
- package/dist/adapters/semantic-kernel-adapter.js +123 -0
- package/dist/adapters/semantic-kernel-adapter.js.map +1 -0
- package/dist/index.d.ts +379 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +1428 -0
- package/dist/index.js.map +1 -0
- package/dist/lib/blackboard-validator.d.ts +205 -0
- package/dist/lib/blackboard-validator.d.ts.map +1 -0
- package/dist/lib/blackboard-validator.js +756 -0
- package/dist/lib/blackboard-validator.js.map +1 -0
- package/dist/lib/locked-blackboard.d.ts +174 -0
- package/dist/lib/locked-blackboard.d.ts.map +1 -0
- package/dist/lib/locked-blackboard.js +654 -0
- package/dist/lib/locked-blackboard.js.map +1 -0
- package/dist/lib/swarm-utils.d.ts +136 -0
- package/dist/lib/swarm-utils.d.ts.map +1 -0
- package/dist/lib/swarm-utils.js +510 -0
- package/dist/lib/swarm-utils.js.map +1 -0
- package/dist/security.d.ts +269 -0
- package/dist/security.d.ts.map +1 -0
- package/dist/security.js +713 -0
- package/dist/security.js.map +1 -0
- package/package.json +84 -0
- package/scripts/blackboard.py +819 -0
- package/scripts/check_permission.py +331 -0
- package/scripts/revoke_token.py +243 -0
- package/scripts/swarm_guard.py +1140 -0
- package/scripts/validate_token.py +97 -0
- package/types/agent-adapter.d.ts +244 -0
- package/types/openclaw-core.d.ts +52 -0
|
@@ -0,0 +1,819 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Shared Blackboard - Agent Coordination State Manager (Atomic Commit Edition)
|
|
4
|
+
|
|
5
|
+
A markdown-based shared state system for multi-agent coordination.
|
|
6
|
+
Stores key-value pairs with optional TTL (time-to-live) expiration.
|
|
7
|
+
|
|
8
|
+
FEATURES:
|
|
9
|
+
- File Locking: Prevents race conditions in multi-agent environments
|
|
10
|
+
- Staging Area: propose → validate → commit workflow
|
|
11
|
+
- Atomic Commits: Changes are all-or-nothing
|
|
12
|
+
|
|
13
|
+
Usage:
|
|
14
|
+
python blackboard.py write KEY VALUE [--ttl SECONDS]
|
|
15
|
+
python blackboard.py read KEY
|
|
16
|
+
python blackboard.py delete KEY
|
|
17
|
+
python blackboard.py list
|
|
18
|
+
python blackboard.py snapshot
|
|
19
|
+
|
|
20
|
+
# Atomic commit workflow:
|
|
21
|
+
python blackboard.py propose CHANGE_ID KEY VALUE [--ttl SECONDS]
|
|
22
|
+
python blackboard.py validate CHANGE_ID
|
|
23
|
+
python blackboard.py commit CHANGE_ID
|
|
24
|
+
python blackboard.py abort CHANGE_ID
|
|
25
|
+
python blackboard.py list-pending
|
|
26
|
+
|
|
27
|
+
Examples:
|
|
28
|
+
python blackboard.py write "task:analysis" '{"status": "running"}'
|
|
29
|
+
python blackboard.py write "cache:data" '{"value": 123}' --ttl 3600
|
|
30
|
+
python blackboard.py read "task:analysis"
|
|
31
|
+
python blackboard.py list
|
|
32
|
+
|
|
33
|
+
# Safe multi-agent update:
|
|
34
|
+
python blackboard.py propose "chg_001" "order:123" '{"status": "approved"}'
|
|
35
|
+
python blackboard.py validate "chg_001" # Orchestrator checks for conflicts
|
|
36
|
+
python blackboard.py commit "chg_001" # Apply atomically
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
import argparse
|
|
40
|
+
import json
|
|
41
|
+
import os
|
|
42
|
+
import re
|
|
43
|
+
import sys
|
|
44
|
+
import time
|
|
45
|
+
import hashlib
|
|
46
|
+
from datetime import datetime, timezone
|
|
47
|
+
from pathlib import Path
|
|
48
|
+
from typing import Any, Optional
|
|
49
|
+
from contextlib import contextmanager
|
|
50
|
+
|
|
51
|
+
# Try to import fcntl (Unix only), fall back to file-based locking on Windows
|
|
52
|
+
_fcntl: Any = None
|
|
53
|
+
try:
|
|
54
|
+
import fcntl as _fcntl_import
|
|
55
|
+
_fcntl = _fcntl_import
|
|
56
|
+
except ImportError:
|
|
57
|
+
pass
|
|
58
|
+
|
|
59
|
+
# Default blackboard location
|
|
60
|
+
BLACKBOARD_PATH = Path(__file__).parent.parent / "swarm-blackboard.md"
|
|
61
|
+
LOCK_PATH = Path(__file__).parent.parent / "data" / ".blackboard.lock"
|
|
62
|
+
PENDING_DIR = Path(__file__).parent.parent / "data" / "pending_changes"
|
|
63
|
+
|
|
64
|
+
# Lock timeout settings
|
|
65
|
+
LOCK_TIMEOUT_SECONDS = 10
|
|
66
|
+
LOCK_RETRY_INTERVAL = 0.1
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
class FileLock:
|
|
70
|
+
"""
|
|
71
|
+
Cross-platform file lock for preventing race conditions.
|
|
72
|
+
Uses fcntl on Unix, fallback to lock file on Windows.
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
def __init__(self, lock_path: Path, timeout: float = LOCK_TIMEOUT_SECONDS):
|
|
76
|
+
self.lock_path = lock_path
|
|
77
|
+
self.timeout = timeout
|
|
78
|
+
self.lock_file: Optional[Any] = None
|
|
79
|
+
self.lock_marker: Optional[Path] = None
|
|
80
|
+
self.lock_path.parent.mkdir(parents=True, exist_ok=True)
|
|
81
|
+
|
|
82
|
+
def acquire(self) -> bool:
|
|
83
|
+
"""Acquire the lock with timeout."""
|
|
84
|
+
start_time = time.time()
|
|
85
|
+
|
|
86
|
+
while True:
|
|
87
|
+
try:
|
|
88
|
+
self.lock_file = open(self.lock_path, 'w')
|
|
89
|
+
|
|
90
|
+
if _fcntl is not None:
|
|
91
|
+
# Unix/Linux/Mac - use fcntl
|
|
92
|
+
_fcntl.flock(self.lock_file.fileno(), _fcntl.LOCK_EX | _fcntl.LOCK_NB)
|
|
93
|
+
else:
|
|
94
|
+
# Windows fallback: use lock marker file
|
|
95
|
+
self.lock_marker = self.lock_path.with_suffix('.locked')
|
|
96
|
+
if self.lock_marker.exists():
|
|
97
|
+
# Check if stale (older than timeout)
|
|
98
|
+
age = time.time() - self.lock_marker.stat().st_mtime
|
|
99
|
+
if age < self.timeout:
|
|
100
|
+
self.lock_file.close()
|
|
101
|
+
raise BlockingIOError("Lock held by another process")
|
|
102
|
+
# Stale lock, remove it
|
|
103
|
+
self.lock_marker.unlink()
|
|
104
|
+
self.lock_marker.write_text(str(time.time()))
|
|
105
|
+
|
|
106
|
+
# Write lock holder info
|
|
107
|
+
self.lock_file.write(json.dumps({
|
|
108
|
+
"pid": os.getpid(),
|
|
109
|
+
"acquired_at": datetime.now(timezone.utc).isoformat()
|
|
110
|
+
}))
|
|
111
|
+
self.lock_file.flush()
|
|
112
|
+
return True
|
|
113
|
+
|
|
114
|
+
except (BlockingIOError, OSError):
|
|
115
|
+
if self.lock_file:
|
|
116
|
+
self.lock_file.close()
|
|
117
|
+
self.lock_file = None
|
|
118
|
+
if time.time() - start_time > self.timeout:
|
|
119
|
+
return False
|
|
120
|
+
time.sleep(LOCK_RETRY_INTERVAL)
|
|
121
|
+
|
|
122
|
+
def release(self) -> None:
|
|
123
|
+
"""Release the lock."""
|
|
124
|
+
if self.lock_file:
|
|
125
|
+
try:
|
|
126
|
+
if _fcntl is not None:
|
|
127
|
+
_fcntl.flock(self.lock_file.fileno(), _fcntl.LOCK_UN)
|
|
128
|
+
elif self.lock_marker and self.lock_marker.exists():
|
|
129
|
+
self.lock_marker.unlink()
|
|
130
|
+
|
|
131
|
+
self.lock_file.close()
|
|
132
|
+
except Exception:
|
|
133
|
+
pass
|
|
134
|
+
finally:
|
|
135
|
+
self.lock_file = None
|
|
136
|
+
self.lock_marker = None
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
@contextmanager
|
|
140
|
+
def blackboard_lock(lock_path: Path = LOCK_PATH):
|
|
141
|
+
"""Context manager for atomic blackboard access."""
|
|
142
|
+
lock = FileLock(lock_path)
|
|
143
|
+
if not lock.acquire():
|
|
144
|
+
raise TimeoutError(
|
|
145
|
+
f"Could not acquire blackboard lock within {LOCK_TIMEOUT_SECONDS}s. "
|
|
146
|
+
"Another agent may be holding it. Retry later."
|
|
147
|
+
)
|
|
148
|
+
try:
|
|
149
|
+
yield
|
|
150
|
+
finally:
|
|
151
|
+
lock.release()
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
class SharedBlackboard:
|
|
155
|
+
"""Markdown-based shared state for agent coordination with atomic commits."""
|
|
156
|
+
|
|
157
|
+
def __init__(self, path: Path = BLACKBOARD_PATH):
|
|
158
|
+
self.path = path
|
|
159
|
+
self.cache: dict[str, dict[str, Any]] = {}
|
|
160
|
+
self.pending_dir = PENDING_DIR
|
|
161
|
+
self.pending_dir.mkdir(parents=True, exist_ok=True)
|
|
162
|
+
self._initialize()
|
|
163
|
+
self._load_from_disk()
|
|
164
|
+
|
|
165
|
+
def _initialize(self):
|
|
166
|
+
"""Create blackboard file if it doesn't exist."""
|
|
167
|
+
if not self.path.exists():
|
|
168
|
+
self.path.parent.mkdir(parents=True, exist_ok=True)
|
|
169
|
+
initial_content = f"""# Swarm Blackboard
|
|
170
|
+
Last Updated: {datetime.now(timezone.utc).isoformat()}
|
|
171
|
+
|
|
172
|
+
## Active Tasks
|
|
173
|
+
| TaskID | Agent | Status | Started | Description |
|
|
174
|
+
|--------|-------|--------|---------|-------------|
|
|
175
|
+
|
|
176
|
+
## Knowledge Cache
|
|
177
|
+
<!-- Cached results from agent operations -->
|
|
178
|
+
|
|
179
|
+
## Coordination Signals
|
|
180
|
+
<!-- Agent availability status -->
|
|
181
|
+
|
|
182
|
+
## Execution History
|
|
183
|
+
<!-- Chronological log of completed tasks -->
|
|
184
|
+
"""
|
|
185
|
+
self.path.write_text(initial_content, encoding="utf-8")
|
|
186
|
+
|
|
187
|
+
def _load_from_disk(self):
|
|
188
|
+
"""Load entries from the markdown blackboard."""
|
|
189
|
+
try:
|
|
190
|
+
content = self.path.read_text(encoding="utf-8")
|
|
191
|
+
|
|
192
|
+
# Parse Knowledge Cache section
|
|
193
|
+
cache_match = re.search(
|
|
194
|
+
r'## Knowledge Cache\n([\s\S]*?)(?=\n## |$)',
|
|
195
|
+
content
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
if cache_match:
|
|
199
|
+
cache_section = cache_match.group(1)
|
|
200
|
+
# Find all entries: ### key\n{json}
|
|
201
|
+
entries = re.findall(
|
|
202
|
+
r'### (\S+)\n([\s\S]*?)(?=\n### |$)',
|
|
203
|
+
cache_section
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
for key, value_str in entries:
|
|
207
|
+
try:
|
|
208
|
+
entry = json.loads(value_str.strip())
|
|
209
|
+
self.cache[key] = entry
|
|
210
|
+
except json.JSONDecodeError:
|
|
211
|
+
# Skip malformed entries
|
|
212
|
+
pass
|
|
213
|
+
except Exception as e:
|
|
214
|
+
print(f"Warning: Failed to load blackboard: {e}", file=sys.stderr)
|
|
215
|
+
|
|
216
|
+
def _persist_to_disk(self):
|
|
217
|
+
"""Save entries to the markdown blackboard."""
|
|
218
|
+
sections = [
|
|
219
|
+
"# Swarm Blackboard",
|
|
220
|
+
f"Last Updated: {datetime.now(timezone.utc).isoformat()}",
|
|
221
|
+
"",
|
|
222
|
+
"## Active Tasks",
|
|
223
|
+
"| TaskID | Agent | Status | Started | Description |",
|
|
224
|
+
"|--------|-------|--------|---------|-------------|",
|
|
225
|
+
"",
|
|
226
|
+
"## Knowledge Cache",
|
|
227
|
+
]
|
|
228
|
+
|
|
229
|
+
# Clean expired entries and write valid ones
|
|
230
|
+
for key, entry in list(self.cache.items()):
|
|
231
|
+
if self._is_expired(entry):
|
|
232
|
+
del self.cache[key]
|
|
233
|
+
continue
|
|
234
|
+
|
|
235
|
+
sections.append(f"### {key}")
|
|
236
|
+
sections.append(json.dumps(entry, indent=2))
|
|
237
|
+
sections.append("")
|
|
238
|
+
|
|
239
|
+
sections.extend([
|
|
240
|
+
"## Coordination Signals",
|
|
241
|
+
"",
|
|
242
|
+
"## Execution History",
|
|
243
|
+
])
|
|
244
|
+
|
|
245
|
+
self.path.write_text("\n".join(sections), encoding="utf-8")
|
|
246
|
+
|
|
247
|
+
def _is_expired(self, entry: dict[str, Any]) -> bool:
|
|
248
|
+
"""Check if an entry has expired based on TTL."""
|
|
249
|
+
ttl = entry.get("ttl")
|
|
250
|
+
if ttl is None:
|
|
251
|
+
return False
|
|
252
|
+
|
|
253
|
+
timestamp = entry.get("timestamp")
|
|
254
|
+
if not timestamp:
|
|
255
|
+
return False
|
|
256
|
+
|
|
257
|
+
try:
|
|
258
|
+
created = datetime.fromisoformat(str(timestamp).replace("Z", "+00:00"))
|
|
259
|
+
now = datetime.now(timezone.utc)
|
|
260
|
+
elapsed = (now - created).total_seconds()
|
|
261
|
+
return elapsed > ttl
|
|
262
|
+
except Exception:
|
|
263
|
+
return False
|
|
264
|
+
|
|
265
|
+
def read(self, key: str) -> Optional[dict[str, Any]]:
|
|
266
|
+
"""Read an entry from the blackboard."""
|
|
267
|
+
entry = self.cache.get(key)
|
|
268
|
+
if entry is None:
|
|
269
|
+
return None
|
|
270
|
+
|
|
271
|
+
if self._is_expired(entry):
|
|
272
|
+
del self.cache[key]
|
|
273
|
+
self._persist_to_disk()
|
|
274
|
+
return None
|
|
275
|
+
|
|
276
|
+
return entry
|
|
277
|
+
|
|
278
|
+
def write(self, key: str, value: Any, source_agent: str = "unknown",
|
|
279
|
+
ttl: Optional[int] = None) -> dict[str, Any]:
|
|
280
|
+
"""Write an entry to the blackboard (with file locking)."""
|
|
281
|
+
entry: dict[str, Any] = {
|
|
282
|
+
"key": key,
|
|
283
|
+
"value": value,
|
|
284
|
+
"source_agent": source_agent,
|
|
285
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
286
|
+
"ttl": ttl,
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
with blackboard_lock():
|
|
290
|
+
# Reload to get latest state
|
|
291
|
+
self._load_from_disk()
|
|
292
|
+
self.cache[key] = entry
|
|
293
|
+
self._persist_to_disk()
|
|
294
|
+
|
|
295
|
+
return entry
|
|
296
|
+
|
|
297
|
+
def delete(self, key: str) -> bool:
|
|
298
|
+
"""Delete an entry from the blackboard (with file locking)."""
|
|
299
|
+
with blackboard_lock():
|
|
300
|
+
self._load_from_disk()
|
|
301
|
+
if key in self.cache:
|
|
302
|
+
del self.cache[key]
|
|
303
|
+
self._persist_to_disk()
|
|
304
|
+
return True
|
|
305
|
+
return False
|
|
306
|
+
|
|
307
|
+
# ========================================================================
|
|
308
|
+
# ATOMIC COMMIT WORKFLOW: propose → validate → commit
|
|
309
|
+
# ========================================================================
|
|
310
|
+
|
|
311
|
+
def propose_change(self, change_id: str, key: str, value: Any,
|
|
312
|
+
source_agent: str = "unknown", ttl: Optional[int] = None,
|
|
313
|
+
operation: str = "write") -> dict[str, Any]:
|
|
314
|
+
"""
|
|
315
|
+
Stage a change without applying it (Step 1 of atomic commit).
|
|
316
|
+
|
|
317
|
+
The change is written to a .pending file and must be validated
|
|
318
|
+
and committed by the orchestrator before it takes effect.
|
|
319
|
+
"""
|
|
320
|
+
pending_file = self.pending_dir / f"{change_id}.pending.json"
|
|
321
|
+
|
|
322
|
+
# Check for duplicate change_id
|
|
323
|
+
if pending_file.exists():
|
|
324
|
+
return {
|
|
325
|
+
"success": False,
|
|
326
|
+
"error": f"Change ID '{change_id}' already exists. Use a unique ID."
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
# Get current value for conflict detection
|
|
330
|
+
current_entry = self.cache.get(key)
|
|
331
|
+
current_hash = None
|
|
332
|
+
if current_entry:
|
|
333
|
+
current_hash = hashlib.sha256(
|
|
334
|
+
json.dumps(current_entry, sort_keys=True).encode()
|
|
335
|
+
).hexdigest()[:16]
|
|
336
|
+
|
|
337
|
+
change_set: dict[str, Any] = {
|
|
338
|
+
"change_id": change_id,
|
|
339
|
+
"operation": operation, # "write" or "delete"
|
|
340
|
+
"key": key,
|
|
341
|
+
"value": value,
|
|
342
|
+
"source_agent": source_agent,
|
|
343
|
+
"ttl": ttl,
|
|
344
|
+
"proposed_at": datetime.now(timezone.utc).isoformat(),
|
|
345
|
+
"status": "pending",
|
|
346
|
+
"base_hash": current_hash, # For conflict detection
|
|
347
|
+
}
|
|
348
|
+
|
|
349
|
+
pending_file.write_text(json.dumps(change_set, indent=2))
|
|
350
|
+
|
|
351
|
+
return {
|
|
352
|
+
"success": True,
|
|
353
|
+
"change_id": change_id,
|
|
354
|
+
"status": "proposed",
|
|
355
|
+
"pending_file": str(pending_file),
|
|
356
|
+
"message": "Change staged. Run 'validate' then 'commit' to apply."
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
def validate_change(self, change_id: str) -> dict[str, Any]:
|
|
360
|
+
"""
|
|
361
|
+
Validate a pending change for conflicts (Step 2 of atomic commit).
|
|
362
|
+
|
|
363
|
+
Checks:
|
|
364
|
+
- Change exists
|
|
365
|
+
- No conflicting changes to the same key
|
|
366
|
+
- Base hash matches (data hasn't changed since proposal)
|
|
367
|
+
"""
|
|
368
|
+
pending_file = self.pending_dir / f"{change_id}.pending.json"
|
|
369
|
+
|
|
370
|
+
if not pending_file.exists():
|
|
371
|
+
return {
|
|
372
|
+
"valid": False,
|
|
373
|
+
"error": f"Change '{change_id}' not found. Was it proposed?"
|
|
374
|
+
}
|
|
375
|
+
|
|
376
|
+
change_set = json.loads(pending_file.read_text())
|
|
377
|
+
|
|
378
|
+
if change_set["status"] != "pending":
|
|
379
|
+
return {
|
|
380
|
+
"valid": False,
|
|
381
|
+
"error": f"Change is in '{change_set['status']}' state, not 'pending'"
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
key = change_set["key"]
|
|
385
|
+
base_hash = change_set["base_hash"]
|
|
386
|
+
|
|
387
|
+
# Check for conflicts: has the key changed since we proposed?
|
|
388
|
+
with blackboard_lock():
|
|
389
|
+
self._load_from_disk()
|
|
390
|
+
current_entry = self.cache.get(key)
|
|
391
|
+
|
|
392
|
+
current_hash = None
|
|
393
|
+
if current_entry:
|
|
394
|
+
current_hash = hashlib.sha256(
|
|
395
|
+
json.dumps(current_entry, sort_keys=True).encode()
|
|
396
|
+
).hexdigest()[:16]
|
|
397
|
+
|
|
398
|
+
if base_hash != current_hash:
|
|
399
|
+
return {
|
|
400
|
+
"valid": False,
|
|
401
|
+
"conflict": True,
|
|
402
|
+
"error": f"CONFLICT: Key '{key}' was modified since proposal. "
|
|
403
|
+
f"Expected hash {base_hash}, got {current_hash}. "
|
|
404
|
+
"Abort and re-propose with fresh data.",
|
|
405
|
+
"current_value": current_entry
|
|
406
|
+
}
|
|
407
|
+
|
|
408
|
+
# Check for other pending changes to the same key
|
|
409
|
+
conflicts: list[str] = []
|
|
410
|
+
for other_file in self.pending_dir.glob("*.pending.json"):
|
|
411
|
+
if other_file.name == pending_file.name:
|
|
412
|
+
continue
|
|
413
|
+
other_change = json.loads(other_file.read_text())
|
|
414
|
+
if other_change["key"] == key and other_change["status"] == "pending":
|
|
415
|
+
conflicts.append(other_change["change_id"])
|
|
416
|
+
|
|
417
|
+
if conflicts:
|
|
418
|
+
return {
|
|
419
|
+
"valid": False,
|
|
420
|
+
"conflict": True,
|
|
421
|
+
"error": f"CONFLICT: Other pending changes affect key '{key}': {conflicts}. "
|
|
422
|
+
"Resolve conflicts before committing."
|
|
423
|
+
}
|
|
424
|
+
|
|
425
|
+
# Mark as validated
|
|
426
|
+
change_set["status"] = "validated"
|
|
427
|
+
change_set["validated_at"] = datetime.now(timezone.utc).isoformat()
|
|
428
|
+
pending_file.write_text(json.dumps(change_set, indent=2))
|
|
429
|
+
|
|
430
|
+
return {
|
|
431
|
+
"valid": True,
|
|
432
|
+
"change_id": change_id,
|
|
433
|
+
"key": key,
|
|
434
|
+
"status": "validated",
|
|
435
|
+
"message": "No conflicts detected. Ready to commit."
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
def commit_change(self, change_id: str) -> dict[str, Any]:
|
|
439
|
+
"""
|
|
440
|
+
Apply a validated change atomically (Step 3 of atomic commit).
|
|
441
|
+
"""
|
|
442
|
+
pending_file = self.pending_dir / f"{change_id}.pending.json"
|
|
443
|
+
|
|
444
|
+
if not pending_file.exists():
|
|
445
|
+
return {
|
|
446
|
+
"committed": False,
|
|
447
|
+
"error": f"Change '{change_id}' not found."
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
change_set = json.loads(pending_file.read_text())
|
|
451
|
+
|
|
452
|
+
if change_set["status"] != "validated":
|
|
453
|
+
return {
|
|
454
|
+
"committed": False,
|
|
455
|
+
"error": f"Change must be validated first. Current status: {change_set['status']}"
|
|
456
|
+
}
|
|
457
|
+
|
|
458
|
+
# Apply the change atomically
|
|
459
|
+
with blackboard_lock():
|
|
460
|
+
self._load_from_disk()
|
|
461
|
+
|
|
462
|
+
if change_set["operation"] == "delete":
|
|
463
|
+
if change_set["key"] in self.cache:
|
|
464
|
+
del self.cache[change_set["key"]]
|
|
465
|
+
else:
|
|
466
|
+
entry: dict[str, Any] = {
|
|
467
|
+
"key": change_set["key"],
|
|
468
|
+
"value": change_set["value"],
|
|
469
|
+
"source_agent": change_set["source_agent"],
|
|
470
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
471
|
+
"ttl": change_set["ttl"],
|
|
472
|
+
"committed_from": change_id
|
|
473
|
+
}
|
|
474
|
+
self.cache[change_set["key"]] = entry
|
|
475
|
+
|
|
476
|
+
self._persist_to_disk()
|
|
477
|
+
|
|
478
|
+
# Archive the committed change
|
|
479
|
+
change_set["status"] = "committed"
|
|
480
|
+
change_set["committed_at"] = datetime.now(timezone.utc).isoformat()
|
|
481
|
+
|
|
482
|
+
archive_dir = self.pending_dir / "archive"
|
|
483
|
+
archive_dir.mkdir(exist_ok=True)
|
|
484
|
+
archive_file = archive_dir / f"{change_id}.committed.json"
|
|
485
|
+
archive_file.write_text(json.dumps(change_set, indent=2))
|
|
486
|
+
|
|
487
|
+
# Remove pending file
|
|
488
|
+
pending_file.unlink()
|
|
489
|
+
|
|
490
|
+
return {
|
|
491
|
+
"committed": True,
|
|
492
|
+
"change_id": change_id,
|
|
493
|
+
"key": change_set["key"],
|
|
494
|
+
"operation": change_set["operation"],
|
|
495
|
+
"message": "Change committed atomically."
|
|
496
|
+
}
|
|
497
|
+
|
|
498
|
+
def abort_change(self, change_id: str) -> dict[str, Any]:
|
|
499
|
+
"""Abort a pending change without applying it."""
|
|
500
|
+
pending_file = self.pending_dir / f"{change_id}.pending.json"
|
|
501
|
+
|
|
502
|
+
if not pending_file.exists():
|
|
503
|
+
return {
|
|
504
|
+
"aborted": False,
|
|
505
|
+
"error": f"Change '{change_id}' not found."
|
|
506
|
+
}
|
|
507
|
+
|
|
508
|
+
change_set = json.loads(pending_file.read_text())
|
|
509
|
+
change_set["status"] = "aborted"
|
|
510
|
+
change_set["aborted_at"] = datetime.now(timezone.utc).isoformat()
|
|
511
|
+
|
|
512
|
+
# Archive the aborted change
|
|
513
|
+
archive_dir = self.pending_dir / "archive"
|
|
514
|
+
archive_dir.mkdir(exist_ok=True)
|
|
515
|
+
archive_file = archive_dir / f"{change_id}.aborted.json"
|
|
516
|
+
archive_file.write_text(json.dumps(change_set, indent=2))
|
|
517
|
+
|
|
518
|
+
pending_file.unlink()
|
|
519
|
+
|
|
520
|
+
return {
|
|
521
|
+
"aborted": True,
|
|
522
|
+
"change_id": change_id,
|
|
523
|
+
"key": change_set["key"]
|
|
524
|
+
}
|
|
525
|
+
|
|
526
|
+
def list_pending_changes(self) -> list[dict[str, Any]]:
|
|
527
|
+
"""List all pending changes awaiting commit."""
|
|
528
|
+
pending: list[dict[str, Any]] = []
|
|
529
|
+
for pending_file in self.pending_dir.glob("*.pending.json"):
|
|
530
|
+
change_set = json.loads(pending_file.read_text())
|
|
531
|
+
pending.append({
|
|
532
|
+
"change_id": change_set["change_id"],
|
|
533
|
+
"key": change_set["key"],
|
|
534
|
+
"operation": change_set["operation"],
|
|
535
|
+
"source_agent": change_set["source_agent"],
|
|
536
|
+
"status": change_set["status"],
|
|
537
|
+
"proposed_at": change_set["proposed_at"]
|
|
538
|
+
})
|
|
539
|
+
return pending
|
|
540
|
+
|
|
541
|
+
def exists(self, key: str) -> bool:
|
|
542
|
+
"""Check if a key exists (and is not expired)."""
|
|
543
|
+
return self.read(key) is not None
|
|
544
|
+
|
|
545
|
+
def list_keys(self) -> list[str]:
|
|
546
|
+
"""List all valid (non-expired) keys."""
|
|
547
|
+
valid_keys: list[str] = []
|
|
548
|
+
for key in list(self.cache.keys()):
|
|
549
|
+
if self.read(key) is not None:
|
|
550
|
+
valid_keys.append(key)
|
|
551
|
+
return valid_keys
|
|
552
|
+
|
|
553
|
+
def get_snapshot(self) -> dict[str, dict[str, Any]]:
|
|
554
|
+
"""Get a snapshot of all valid entries."""
|
|
555
|
+
snapshot: dict[str, dict[str, Any]] = {}
|
|
556
|
+
for key in list(self.cache.keys()):
|
|
557
|
+
entry = self.read(key)
|
|
558
|
+
if entry is not None:
|
|
559
|
+
snapshot[key] = entry
|
|
560
|
+
return snapshot
|
|
561
|
+
|
|
562
|
+
|
|
563
|
+
def main():
|
|
564
|
+
parser = argparse.ArgumentParser(
|
|
565
|
+
description="Shared Blackboard - Agent Coordination State Manager (Atomic Commit Edition)",
|
|
566
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
567
|
+
epilog="""
|
|
568
|
+
Commands:
|
|
569
|
+
write KEY VALUE [--ttl SECONDS] Write a value (with file locking)
|
|
570
|
+
read KEY Read a value
|
|
571
|
+
delete KEY Delete a key
|
|
572
|
+
list List all keys
|
|
573
|
+
snapshot Get full snapshot as JSON
|
|
574
|
+
|
|
575
|
+
Atomic Commit Workflow (for multi-agent safety):
|
|
576
|
+
propose CHANGE_ID KEY VALUE Stage a change (Step 1)
|
|
577
|
+
validate CHANGE_ID Check for conflicts (Step 2)
|
|
578
|
+
commit CHANGE_ID Apply atomically (Step 3)
|
|
579
|
+
abort CHANGE_ID Cancel a pending change
|
|
580
|
+
list-pending Show all pending changes
|
|
581
|
+
|
|
582
|
+
Examples:
|
|
583
|
+
%(prog)s write "task:analysis" '{"status": "running"}'
|
|
584
|
+
%(prog)s write "cache:temp" '{"data": [1,2,3]}' --ttl 3600
|
|
585
|
+
|
|
586
|
+
# Safe multi-agent update:
|
|
587
|
+
%(prog)s propose "chg_001" "order:123" '{"status": "approved"}'
|
|
588
|
+
%(prog)s validate "chg_001"
|
|
589
|
+
%(prog)s commit "chg_001"
|
|
590
|
+
"""
|
|
591
|
+
)
|
|
592
|
+
|
|
593
|
+
parser.add_argument(
|
|
594
|
+
"command",
|
|
595
|
+
choices=["write", "read", "delete", "list", "snapshot",
|
|
596
|
+
"propose", "validate", "commit", "abort", "list-pending"],
|
|
597
|
+
help="Command to execute"
|
|
598
|
+
)
|
|
599
|
+
parser.add_argument(
|
|
600
|
+
"key",
|
|
601
|
+
nargs="?",
|
|
602
|
+
help="Key name or Change ID (depending on command)"
|
|
603
|
+
)
|
|
604
|
+
parser.add_argument(
|
|
605
|
+
"value",
|
|
606
|
+
nargs="?",
|
|
607
|
+
help="JSON value (required for write/propose)"
|
|
608
|
+
)
|
|
609
|
+
parser.add_argument(
|
|
610
|
+
"--ttl",
|
|
611
|
+
type=int,
|
|
612
|
+
help="Time-to-live in seconds (for write/propose)"
|
|
613
|
+
)
|
|
614
|
+
parser.add_argument(
|
|
615
|
+
"--agent",
|
|
616
|
+
default="cli",
|
|
617
|
+
help="Source agent ID (for write/propose)"
|
|
618
|
+
)
|
|
619
|
+
parser.add_argument(
|
|
620
|
+
"--json",
|
|
621
|
+
action="store_true",
|
|
622
|
+
help="Output as JSON"
|
|
623
|
+
)
|
|
624
|
+
parser.add_argument(
|
|
625
|
+
"--path",
|
|
626
|
+
type=Path,
|
|
627
|
+
default=BLACKBOARD_PATH,
|
|
628
|
+
help="Path to blackboard file"
|
|
629
|
+
)
|
|
630
|
+
|
|
631
|
+
args = parser.parse_args()
|
|
632
|
+
bb = SharedBlackboard(args.path)
|
|
633
|
+
|
|
634
|
+
try:
|
|
635
|
+
if args.command == "write":
|
|
636
|
+
if not args.key or not args.value:
|
|
637
|
+
print("Error: write requires KEY and VALUE", file=sys.stderr)
|
|
638
|
+
sys.exit(1)
|
|
639
|
+
|
|
640
|
+
try:
|
|
641
|
+
value = json.loads(args.value)
|
|
642
|
+
except json.JSONDecodeError:
|
|
643
|
+
value = args.value
|
|
644
|
+
|
|
645
|
+
entry = bb.write(args.key, value, args.agent, args.ttl)
|
|
646
|
+
|
|
647
|
+
if args.json:
|
|
648
|
+
print(json.dumps(entry, indent=2))
|
|
649
|
+
else:
|
|
650
|
+
print(f"✅ Written: {args.key} (with lock)")
|
|
651
|
+
if args.ttl:
|
|
652
|
+
print(f" TTL: {args.ttl} seconds")
|
|
653
|
+
|
|
654
|
+
elif args.command == "read":
|
|
655
|
+
if not args.key:
|
|
656
|
+
print("Error: read requires KEY", file=sys.stderr)
|
|
657
|
+
sys.exit(1)
|
|
658
|
+
|
|
659
|
+
entry = bb.read(args.key)
|
|
660
|
+
|
|
661
|
+
if entry is None:
|
|
662
|
+
if args.json:
|
|
663
|
+
print("null")
|
|
664
|
+
else:
|
|
665
|
+
print(f"❌ Key not found or expired: {args.key}")
|
|
666
|
+
sys.exit(1)
|
|
667
|
+
|
|
668
|
+
if args.json:
|
|
669
|
+
print(json.dumps(entry, indent=2))
|
|
670
|
+
else:
|
|
671
|
+
print(f"📖 {args.key}:")
|
|
672
|
+
print(f" Value: {json.dumps(entry.get('value'))}")
|
|
673
|
+
print(f" Source: {entry.get('source_agent')}")
|
|
674
|
+
print(f" Timestamp: {entry.get('timestamp')}")
|
|
675
|
+
if entry.get('ttl'):
|
|
676
|
+
print(f" TTL: {entry['ttl']} seconds")
|
|
677
|
+
|
|
678
|
+
elif args.command == "delete":
|
|
679
|
+
if not args.key:
|
|
680
|
+
print("Error: delete requires KEY", file=sys.stderr)
|
|
681
|
+
sys.exit(1)
|
|
682
|
+
|
|
683
|
+
if bb.delete(args.key):
|
|
684
|
+
print(f"✅ Deleted: {args.key}")
|
|
685
|
+
else:
|
|
686
|
+
print(f"❌ Key not found: {args.key}")
|
|
687
|
+
sys.exit(1)
|
|
688
|
+
|
|
689
|
+
elif args.command == "list":
|
|
690
|
+
keys = bb.list_keys()
|
|
691
|
+
|
|
692
|
+
if args.json:
|
|
693
|
+
print(json.dumps(keys, indent=2))
|
|
694
|
+
else:
|
|
695
|
+
if keys:
|
|
696
|
+
print(f"📋 Blackboard keys ({len(keys)}):")
|
|
697
|
+
for key in sorted(keys):
|
|
698
|
+
entry = bb.read(key)
|
|
699
|
+
ttl_info = f" [TTL: {entry['ttl']}s]" if entry and entry.get('ttl') else ""
|
|
700
|
+
print(f" • {key}{ttl_info}")
|
|
701
|
+
else:
|
|
702
|
+
print("📋 Blackboard is empty")
|
|
703
|
+
|
|
704
|
+
elif args.command == "snapshot":
|
|
705
|
+
snapshot = bb.get_snapshot()
|
|
706
|
+
print(json.dumps(snapshot, indent=2))
|
|
707
|
+
|
|
708
|
+
# === ATOMIC COMMIT COMMANDS ===
|
|
709
|
+
|
|
710
|
+
elif args.command == "propose":
|
|
711
|
+
if not args.key or not args.value:
|
|
712
|
+
print("Error: propose requires CHANGE_ID and KEY VALUE", file=sys.stderr)
|
|
713
|
+
print("Usage: propose CHANGE_ID KEY VALUE", file=sys.stderr)
|
|
714
|
+
sys.exit(1)
|
|
715
|
+
|
|
716
|
+
# Parse: propose CHANGE_ID KEY VALUE (key is actually change_id, value is "KEY VALUE")
|
|
717
|
+
parts = args.value.split(" ", 1)
|
|
718
|
+
if len(parts) < 2:
|
|
719
|
+
print("Error: propose requires CHANGE_ID KEY VALUE", file=sys.stderr)
|
|
720
|
+
sys.exit(1)
|
|
721
|
+
|
|
722
|
+
change_id = args.key
|
|
723
|
+
actual_key = parts[0]
|
|
724
|
+
actual_value_str = parts[1] if len(parts) > 1 else "{}"
|
|
725
|
+
|
|
726
|
+
try:
|
|
727
|
+
actual_value = json.loads(actual_value_str)
|
|
728
|
+
except json.JSONDecodeError:
|
|
729
|
+
actual_value = actual_value_str
|
|
730
|
+
|
|
731
|
+
result = bb.propose_change(change_id, actual_key, actual_value, args.agent, args.ttl)
|
|
732
|
+
|
|
733
|
+
if args.json:
|
|
734
|
+
print(json.dumps(result, indent=2))
|
|
735
|
+
else:
|
|
736
|
+
if result["success"]:
|
|
737
|
+
print(f"📝 Change PROPOSED: {change_id}")
|
|
738
|
+
print(f" Key: {actual_key}")
|
|
739
|
+
print(f" Status: pending validation")
|
|
740
|
+
print(f" Next: run 'validate {change_id}'")
|
|
741
|
+
else:
|
|
742
|
+
print(f"❌ Proposal FAILED: {result['error']}")
|
|
743
|
+
sys.exit(1)
|
|
744
|
+
|
|
745
|
+
elif args.command == "validate":
|
|
746
|
+
if not args.key:
|
|
747
|
+
print("Error: validate requires CHANGE_ID", file=sys.stderr)
|
|
748
|
+
sys.exit(1)
|
|
749
|
+
|
|
750
|
+
result = bb.validate_change(args.key)
|
|
751
|
+
|
|
752
|
+
if args.json:
|
|
753
|
+
print(json.dumps(result, indent=2))
|
|
754
|
+
else:
|
|
755
|
+
if result["valid"]:
|
|
756
|
+
print(f"✅ Change VALIDATED: {args.key}")
|
|
757
|
+
print(f" Key: {result['key']}")
|
|
758
|
+
print(f" No conflicts detected")
|
|
759
|
+
print(f" Next: run 'commit {args.key}'")
|
|
760
|
+
else:
|
|
761
|
+
print(f"❌ Validation FAILED: {result['error']}")
|
|
762
|
+
sys.exit(1)
|
|
763
|
+
|
|
764
|
+
elif args.command == "commit":
|
|
765
|
+
if not args.key:
|
|
766
|
+
print("Error: commit requires CHANGE_ID", file=sys.stderr)
|
|
767
|
+
sys.exit(1)
|
|
768
|
+
|
|
769
|
+
result = bb.commit_change(args.key)
|
|
770
|
+
|
|
771
|
+
if args.json:
|
|
772
|
+
print(json.dumps(result, indent=2))
|
|
773
|
+
else:
|
|
774
|
+
if result["committed"]:
|
|
775
|
+
print(f"🎉 Change COMMITTED: {args.key}")
|
|
776
|
+
print(f" Key: {result['key']}")
|
|
777
|
+
print(f" Operation: {result['operation']}")
|
|
778
|
+
else:
|
|
779
|
+
print(f"❌ Commit FAILED: {result['error']}")
|
|
780
|
+
sys.exit(1)
|
|
781
|
+
|
|
782
|
+
elif args.command == "abort":
|
|
783
|
+
if not args.key:
|
|
784
|
+
print("Error: abort requires CHANGE_ID", file=sys.stderr)
|
|
785
|
+
sys.exit(1)
|
|
786
|
+
|
|
787
|
+
result = bb.abort_change(args.key)
|
|
788
|
+
|
|
789
|
+
if args.json:
|
|
790
|
+
print(json.dumps(result, indent=2))
|
|
791
|
+
else:
|
|
792
|
+
if result["aborted"]:
|
|
793
|
+
print(f"🚫 Change ABORTED: {args.key}")
|
|
794
|
+
else:
|
|
795
|
+
print(f"❌ Abort FAILED: {result['error']}")
|
|
796
|
+
sys.exit(1)
|
|
797
|
+
|
|
798
|
+
elif args.command == "list-pending":
|
|
799
|
+
pending = bb.list_pending_changes()
|
|
800
|
+
|
|
801
|
+
if args.json:
|
|
802
|
+
print(json.dumps(pending, indent=2))
|
|
803
|
+
else:
|
|
804
|
+
if pending:
|
|
805
|
+
print(f"📋 Pending changes ({len(pending)}):")
|
|
806
|
+
for p in pending:
|
|
807
|
+
status_icon = "🟡" if p["status"] == "pending" else "🟢"
|
|
808
|
+
print(f" {status_icon} {p['change_id']}: {p['operation']} '{p['key']}'")
|
|
809
|
+
print(f" Agent: {p['source_agent']} | Status: {p['status']}")
|
|
810
|
+
else:
|
|
811
|
+
print("📋 No pending changes")
|
|
812
|
+
|
|
813
|
+
except TimeoutError as e:
|
|
814
|
+
print(f"🔒 LOCK TIMEOUT: {e}", file=sys.stderr)
|
|
815
|
+
sys.exit(1)
|
|
816
|
+
|
|
817
|
+
|
|
818
|
+
if __name__ == "__main__":
|
|
819
|
+
main()
|