amfs-core 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- amfs_core-0.1.0/.gitignore +16 -0
- amfs_core-0.1.0/PKG-INFO +7 -0
- amfs_core-0.1.0/pyproject.toml +16 -0
- amfs_core-0.1.0/src/amfs_core/__init__.py +109 -0
- amfs_core-0.1.0/src/amfs_core/abc.py +519 -0
- amfs_core-0.1.0/src/amfs_core/default_embedder.py +83 -0
- amfs_core-0.1.0/src/amfs_core/embedder.py +43 -0
- amfs_core-0.1.0/src/amfs_core/engine.py +324 -0
- amfs_core-0.1.0/src/amfs_core/exceptions.py +60 -0
- amfs_core-0.1.0/src/amfs_core/importance.py +48 -0
- amfs_core-0.1.0/src/amfs_core/lifecycle.py +89 -0
- amfs_core-0.1.0/src/amfs_core/lock.py +64 -0
- amfs_core-0.1.0/src/amfs_core/models.py +653 -0
- amfs_core-0.1.0/src/amfs_core/outcome.py +98 -0
- amfs_core-0.1.0/src/amfs_core/snapshot.py +61 -0
- amfs_core-0.1.0/src/amfs_core/tiering.py +97 -0
amfs_core-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "amfs-core"
|
|
3
|
+
version = "0.1.0"
|
|
4
|
+
description = "AMFS core models, engine, and adapter ABC"
|
|
5
|
+
requires-python = ">=3.11"
|
|
6
|
+
license = "Apache-2.0"
|
|
7
|
+
dependencies = [
|
|
8
|
+
"pydantic>=2.0,<3",
|
|
9
|
+
]
|
|
10
|
+
|
|
11
|
+
[build-system]
|
|
12
|
+
requires = ["hatchling"]
|
|
13
|
+
build-backend = "hatchling.build"
|
|
14
|
+
|
|
15
|
+
[tool.hatch.build.targets.wheel]
|
|
16
|
+
packages = ["src/amfs_core"]
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
"""AMFS Core — models, engine, and adapter ABC."""
|
|
2
|
+
|
|
3
|
+
from amfs_core.abc import AdapterABC, WatchHandle
|
|
4
|
+
from amfs_core.embedder import EmbedderABC, cosine_similarity
|
|
5
|
+
from amfs_core.engine import CausalTagger, CoWEngine, ReadTracker
|
|
6
|
+
from amfs_core.exceptions import (
|
|
7
|
+
AMFSError,
|
|
8
|
+
AdapterError,
|
|
9
|
+
EntryNotFoundError,
|
|
10
|
+
LockTimeoutError,
|
|
11
|
+
StaleWriteError,
|
|
12
|
+
VersionConflictError,
|
|
13
|
+
)
|
|
14
|
+
from amfs_core.lifecycle import LifecycleManager
|
|
15
|
+
from amfs_core.models import (
|
|
16
|
+
AMFSConfig,
|
|
17
|
+
Agent,
|
|
18
|
+
Branch,
|
|
19
|
+
BranchAccess,
|
|
20
|
+
BranchAccessPermission,
|
|
21
|
+
BranchStatus,
|
|
22
|
+
ConfidenceChange,
|
|
23
|
+
ConflictPolicy,
|
|
24
|
+
DecisionTrace,
|
|
25
|
+
DiffEntry,
|
|
26
|
+
Digest,
|
|
27
|
+
DigestType,
|
|
28
|
+
ErrorEvent,
|
|
29
|
+
Event,
|
|
30
|
+
EventType,
|
|
31
|
+
ExternalContext,
|
|
32
|
+
GraphEdge,
|
|
33
|
+
GraphNeighborQuery,
|
|
34
|
+
LayerConfig,
|
|
35
|
+
MemoryEntry,
|
|
36
|
+
MemoryStateDiff,
|
|
37
|
+
MemoryStats,
|
|
38
|
+
MergeConflict,
|
|
39
|
+
MergeResult,
|
|
40
|
+
MergeStrategy,
|
|
41
|
+
PRReview,
|
|
42
|
+
PRReviewStatus,
|
|
43
|
+
PullRequest,
|
|
44
|
+
PullRequestStatus,
|
|
45
|
+
OutcomeRecord,
|
|
46
|
+
OutcomeType,
|
|
47
|
+
Provenance,
|
|
48
|
+
QueryEvent,
|
|
49
|
+
SearchQuery,
|
|
50
|
+
SemanticQuery,
|
|
51
|
+
Tag,
|
|
52
|
+
TraceEntry,
|
|
53
|
+
)
|
|
54
|
+
from amfs_core.outcome import OutcomeBackPropagator
|
|
55
|
+
|
|
56
|
+
__all__ = [
|
|
57
|
+
"AMFSConfig",
|
|
58
|
+
"AMFSError",
|
|
59
|
+
"AdapterABC",
|
|
60
|
+
"AdapterError",
|
|
61
|
+
"Agent",
|
|
62
|
+
"Branch",
|
|
63
|
+
"BranchAccess",
|
|
64
|
+
"BranchAccessPermission",
|
|
65
|
+
"BranchStatus",
|
|
66
|
+
"CausalTagger",
|
|
67
|
+
"ConfidenceChange",
|
|
68
|
+
"ConflictPolicy",
|
|
69
|
+
"CoWEngine",
|
|
70
|
+
"DecisionTrace",
|
|
71
|
+
"DiffEntry",
|
|
72
|
+
"Digest",
|
|
73
|
+
"DigestType",
|
|
74
|
+
"EmbedderABC",
|
|
75
|
+
"EntryNotFoundError",
|
|
76
|
+
"ErrorEvent",
|
|
77
|
+
"Event",
|
|
78
|
+
"EventType",
|
|
79
|
+
"ExternalContext",
|
|
80
|
+
"GraphEdge",
|
|
81
|
+
"GraphNeighborQuery",
|
|
82
|
+
"LayerConfig",
|
|
83
|
+
"LifecycleManager",
|
|
84
|
+
"LockTimeoutError",
|
|
85
|
+
"MemoryEntry",
|
|
86
|
+
"MemoryStateDiff",
|
|
87
|
+
"MemoryStats",
|
|
88
|
+
"MergeConflict",
|
|
89
|
+
"MergeResult",
|
|
90
|
+
"MergeStrategy",
|
|
91
|
+
"PRReview",
|
|
92
|
+
"PRReviewStatus",
|
|
93
|
+
"PullRequest",
|
|
94
|
+
"PullRequestStatus",
|
|
95
|
+
"OutcomeBackPropagator",
|
|
96
|
+
"OutcomeRecord",
|
|
97
|
+
"OutcomeType",
|
|
98
|
+
"Provenance",
|
|
99
|
+
"QueryEvent",
|
|
100
|
+
"ReadTracker",
|
|
101
|
+
"SearchQuery",
|
|
102
|
+
"SemanticQuery",
|
|
103
|
+
"StaleWriteError",
|
|
104
|
+
"Tag",
|
|
105
|
+
"TraceEntry",
|
|
106
|
+
"VersionConflictError",
|
|
107
|
+
"WatchHandle",
|
|
108
|
+
"cosine_similarity",
|
|
109
|
+
]
|
|
@@ -0,0 +1,519 @@
|
|
|
1
|
+
"""Abstract base class for AMFS storage adapters."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from abc import ABC, abstractmethod
|
|
6
|
+
from datetime import datetime
|
|
7
|
+
from typing import Callable
|
|
8
|
+
|
|
9
|
+
from amfs_core.embedder import EmbedderABC, cosine_similarity
|
|
10
|
+
from amfs_core.models import (
|
|
11
|
+
Agent,
|
|
12
|
+
Branch,
|
|
13
|
+
BranchAccess,
|
|
14
|
+
DecisionTrace,
|
|
15
|
+
DiffEntry,
|
|
16
|
+
Event,
|
|
17
|
+
GraphEdge,
|
|
18
|
+
GraphNeighborQuery,
|
|
19
|
+
MemoryEntry,
|
|
20
|
+
MemoryStats,
|
|
21
|
+
MergeResult,
|
|
22
|
+
MergeStrategy,
|
|
23
|
+
OutcomeRecord,
|
|
24
|
+
PRReview,
|
|
25
|
+
PullRequest,
|
|
26
|
+
SearchQuery,
|
|
27
|
+
SemanticQuery,
|
|
28
|
+
Tag,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class WatchHandle:
|
|
33
|
+
"""Handle returned by adapter.watch() — call .cancel() to stop watching."""
|
|
34
|
+
|
|
35
|
+
def __init__(self, cancel_fn: Callable[[], None]) -> None:
|
|
36
|
+
self._cancel_fn = cancel_fn
|
|
37
|
+
self._cancelled = False
|
|
38
|
+
|
|
39
|
+
def cancel(self) -> None:
|
|
40
|
+
if not self._cancelled:
|
|
41
|
+
self._cancel_fn()
|
|
42
|
+
self._cancelled = True
|
|
43
|
+
|
|
44
|
+
@property
|
|
45
|
+
def cancelled(self) -> bool:
|
|
46
|
+
return self._cancelled
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class AdapterABC(ABC):
|
|
50
|
+
"""Interface that every AMFS storage adapter must implement.
|
|
51
|
+
|
|
52
|
+
Seven operations define the contract:
|
|
53
|
+
- read: fetch the current version of a key
|
|
54
|
+
- write: persist a new version of a key (CoW)
|
|
55
|
+
- list: enumerate entries under an entity path
|
|
56
|
+
- search: query entries with rich filters (confidence, agent, recency)
|
|
57
|
+
- stats: aggregate statistics about memory state
|
|
58
|
+
- watch: observe writes to an entity path in real time
|
|
59
|
+
- commit_outcome: record an outcome and back-propagate to entries
|
|
60
|
+
"""
|
|
61
|
+
|
|
62
|
+
@abstractmethod
|
|
63
|
+
def read(
|
|
64
|
+
self,
|
|
65
|
+
entity_path: str,
|
|
66
|
+
key: str,
|
|
67
|
+
*,
|
|
68
|
+
min_confidence: float = 0.0,
|
|
69
|
+
) -> MemoryEntry | None:
|
|
70
|
+
"""Read the current version of a key, or None if not found.
|
|
71
|
+
|
|
72
|
+
If the entry's confidence is below *min_confidence*, return None.
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
@abstractmethod
|
|
76
|
+
def write(self, entry: MemoryEntry) -> MemoryEntry:
|
|
77
|
+
"""Persist a memory entry. Returns the written entry (with final version)."""
|
|
78
|
+
|
|
79
|
+
@abstractmethod
|
|
80
|
+
def list(
|
|
81
|
+
self,
|
|
82
|
+
entity_path: str | None = None,
|
|
83
|
+
*,
|
|
84
|
+
include_superseded: bool = False,
|
|
85
|
+
) -> list[MemoryEntry]:
|
|
86
|
+
"""List current entries, optionally filtered to an entity path.
|
|
87
|
+
|
|
88
|
+
If *include_superseded* is True, also include older versions.
|
|
89
|
+
"""
|
|
90
|
+
|
|
91
|
+
@abstractmethod
|
|
92
|
+
def watch(
|
|
93
|
+
self,
|
|
94
|
+
entity_path: str,
|
|
95
|
+
callback: Callable[[MemoryEntry], None],
|
|
96
|
+
) -> WatchHandle:
|
|
97
|
+
"""Watch for writes to any key under *entity_path*.
|
|
98
|
+
|
|
99
|
+
Returns a WatchHandle that can be cancelled.
|
|
100
|
+
"""
|
|
101
|
+
|
|
102
|
+
@abstractmethod
|
|
103
|
+
def commit_outcome(self, record: OutcomeRecord) -> list[MemoryEntry]:
|
|
104
|
+
"""Record an outcome and back-propagate confidence to causal entries.
|
|
105
|
+
|
|
106
|
+
Returns the list of entries whose confidence was updated.
|
|
107
|
+
"""
|
|
108
|
+
|
|
109
|
+
def list_outcomes(
|
|
110
|
+
self,
|
|
111
|
+
*,
|
|
112
|
+
entity_path: str | None = None,
|
|
113
|
+
since: datetime | None = None,
|
|
114
|
+
limit: int = 1000,
|
|
115
|
+
) -> list[OutcomeRecord]:
|
|
116
|
+
"""Return historical outcome records.
|
|
117
|
+
|
|
118
|
+
Used by the Pro ML layer for training ranking models and calibrating
|
|
119
|
+
confidence multipliers. Default implementation returns an empty list;
|
|
120
|
+
adapters that persist outcomes (e.g. Postgres) should override.
|
|
121
|
+
"""
|
|
122
|
+
return []
|
|
123
|
+
|
|
124
|
+
def read_at_version(
|
|
125
|
+
self,
|
|
126
|
+
entity_path: str,
|
|
127
|
+
key: str,
|
|
128
|
+
version: int,
|
|
129
|
+
) -> MemoryEntry | None:
|
|
130
|
+
"""Read a specific historical version of an entry.
|
|
131
|
+
|
|
132
|
+
Returns the entry at exactly *version*, even if it has been
|
|
133
|
+
superseded. Default implementation scans via ``list()`` with
|
|
134
|
+
``include_superseded=True``; adapters with indexed storage
|
|
135
|
+
(e.g. Postgres) should override for O(1) lookup.
|
|
136
|
+
"""
|
|
137
|
+
all_versions = self.list(entity_path, include_superseded=True)
|
|
138
|
+
for entry in all_versions:
|
|
139
|
+
if entry.key == key and entry.version == version:
|
|
140
|
+
return entry
|
|
141
|
+
return None
|
|
142
|
+
|
|
143
|
+
def get_trace(self, trace_id: str) -> DecisionTrace | None:
|
|
144
|
+
"""Return a single trace by ID, or None if not found.
|
|
145
|
+
|
|
146
|
+
Default implementation scans ``list_traces()``; adapters with
|
|
147
|
+
indexed storage (e.g. Postgres) should override for O(1) lookup.
|
|
148
|
+
"""
|
|
149
|
+
for t in self.list_traces(limit=10_000):
|
|
150
|
+
if t.id == trace_id:
|
|
151
|
+
return t
|
|
152
|
+
return None
|
|
153
|
+
|
|
154
|
+
def save_trace(self, trace: DecisionTrace) -> DecisionTrace:
|
|
155
|
+
"""Persist a decision trace. Default is a no-op; adapters with
|
|
156
|
+
persistent storage (e.g. Postgres) should override."""
|
|
157
|
+
return trace
|
|
158
|
+
|
|
159
|
+
def list_traces(
|
|
160
|
+
self,
|
|
161
|
+
*,
|
|
162
|
+
entity_path: str | None = None,
|
|
163
|
+
agent_id: str | None = None,
|
|
164
|
+
outcome_type: str | None = None,
|
|
165
|
+
limit: int = 100,
|
|
166
|
+
) -> list[DecisionTrace]:
|
|
167
|
+
"""Return persisted decision traces. Default returns an empty list."""
|
|
168
|
+
return []
|
|
169
|
+
|
|
170
|
+
def search(self, query: SearchQuery) -> list[MemoryEntry]:
|
|
171
|
+
"""Search entries with rich filters. Default: filter over list().
|
|
172
|
+
|
|
173
|
+
Adapters may override with optimised implementations (e.g. SQL WHERE).
|
|
174
|
+
"""
|
|
175
|
+
entries = self.list(query.entity_path)
|
|
176
|
+
results: list[MemoryEntry] = []
|
|
177
|
+
for entry in entries:
|
|
178
|
+
if entry.confidence < query.min_confidence:
|
|
179
|
+
continue
|
|
180
|
+
if query.max_confidence is not None and entry.confidence > query.max_confidence:
|
|
181
|
+
continue
|
|
182
|
+
if query.agent_id is not None and entry.provenance.agent_id != query.agent_id:
|
|
183
|
+
continue
|
|
184
|
+
if query.since is not None and entry.provenance.written_at < query.since:
|
|
185
|
+
continue
|
|
186
|
+
if query.pattern_ref is not None and query.pattern_ref not in entry.provenance.pattern_refs:
|
|
187
|
+
continue
|
|
188
|
+
results.append(entry)
|
|
189
|
+
|
|
190
|
+
if query.sort_by == "confidence":
|
|
191
|
+
results.sort(key=lambda e: e.confidence, reverse=True)
|
|
192
|
+
elif query.sort_by == "recency":
|
|
193
|
+
results.sort(key=lambda e: e.provenance.written_at, reverse=True)
|
|
194
|
+
elif query.sort_by == "version":
|
|
195
|
+
results.sort(key=lambda e: e.version, reverse=True)
|
|
196
|
+
|
|
197
|
+
return results[: query.limit]
|
|
198
|
+
|
|
199
|
+
def stats(self) -> MemoryStats:
|
|
200
|
+
"""Compute aggregate statistics. Default: iterate over list().
|
|
201
|
+
|
|
202
|
+
Adapters may override with optimised implementations (e.g. SQL aggregates).
|
|
203
|
+
"""
|
|
204
|
+
entries = self.list()
|
|
205
|
+
if not entries:
|
|
206
|
+
return MemoryStats()
|
|
207
|
+
|
|
208
|
+
agents: dict[str, int] = {}
|
|
209
|
+
entities: dict[str, int] = {}
|
|
210
|
+
confidences: list[float] = []
|
|
211
|
+
outcome_linked = 0
|
|
212
|
+
oldest = entries[0].provenance.written_at
|
|
213
|
+
newest = entries[0].provenance.written_at
|
|
214
|
+
|
|
215
|
+
for entry in entries:
|
|
216
|
+
aid = entry.provenance.agent_id
|
|
217
|
+
agents[aid] = agents.get(aid, 0) + 1
|
|
218
|
+
entities[entry.entity_path] = entities.get(entry.entity_path, 0) + 1
|
|
219
|
+
confidences.append(entry.confidence)
|
|
220
|
+
if entry.outcome_count > 0:
|
|
221
|
+
outcome_linked += 1
|
|
222
|
+
if entry.provenance.written_at < oldest:
|
|
223
|
+
oldest = entry.provenance.written_at
|
|
224
|
+
if entry.provenance.written_at > newest:
|
|
225
|
+
newest = entry.provenance.written_at
|
|
226
|
+
|
|
227
|
+
return MemoryStats(
|
|
228
|
+
total_entries=len(entries),
|
|
229
|
+
total_entities=len(entities),
|
|
230
|
+
total_agents=len(agents),
|
|
231
|
+
agents=agents,
|
|
232
|
+
entities=entities,
|
|
233
|
+
confidence_avg=sum(confidences) / len(confidences),
|
|
234
|
+
confidence_min=min(confidences),
|
|
235
|
+
confidence_max=max(confidences),
|
|
236
|
+
outcome_linked_count=outcome_linked,
|
|
237
|
+
oldest_entry_at=oldest,
|
|
238
|
+
newest_entry_at=newest,
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
# ── Recall tracking ─────────────────────────────────────────────────
|
|
242
|
+
|
|
243
|
+
def increment_recall_count(
|
|
244
|
+
self,
|
|
245
|
+
entity_path: str,
|
|
246
|
+
key: str,
|
|
247
|
+
*,
|
|
248
|
+
branch: str = "main",
|
|
249
|
+
) -> None:
|
|
250
|
+
"""Increment the recall_count of the current entry version in place.
|
|
251
|
+
|
|
252
|
+
This is a mutable metadata update that does NOT create a new CoW
|
|
253
|
+
version. Adapters that do not support in-place updates can safely
|
|
254
|
+
leave the default no-op.
|
|
255
|
+
"""
|
|
256
|
+
|
|
257
|
+
# ── Tiered memory ─────────────────────────────────────────────────
|
|
258
|
+
|
|
259
|
+
def update_tiers(
|
|
260
|
+
self,
|
|
261
|
+
tier_assignments: dict[str, int],
|
|
262
|
+
scores: dict[str, float],
|
|
263
|
+
*,
|
|
264
|
+
branch: str = "main",
|
|
265
|
+
) -> int:
|
|
266
|
+
"""Batch-update tier and priority_score for entries in place.
|
|
267
|
+
|
|
268
|
+
*tier_assignments* maps ``entry_key`` -> tier (1/2/3).
|
|
269
|
+
*scores* maps ``entry_key`` -> priority_score.
|
|
270
|
+
Returns count of entries updated. Default no-op returns 0.
|
|
271
|
+
"""
|
|
272
|
+
return 0
|
|
273
|
+
|
|
274
|
+
|
|
275
|
+
# ── Agent registration ─────────────────────────────────────────────
|
|
276
|
+
|
|
277
|
+
def ensure_agent(self, agent_id: str, namespace: str = "default") -> Agent:
|
|
278
|
+
"""Auto-register an agent on first write. Returns the Agent record.
|
|
279
|
+
|
|
280
|
+
Default is a no-op returning a stub. Postgres adapter implements
|
|
281
|
+
INSERT ... ON CONFLICT DO UPDATE.
|
|
282
|
+
"""
|
|
283
|
+
return Agent(agent_id=agent_id, namespace=namespace)
|
|
284
|
+
|
|
285
|
+
def get_agent(self, agent_id: str, namespace: str = "default") -> Agent | None:
|
|
286
|
+
"""Return a registered agent or None."""
|
|
287
|
+
return None
|
|
288
|
+
|
|
289
|
+
def list_agents(self, namespace: str = "default") -> list[Agent]:
|
|
290
|
+
"""Return all registered agents in a namespace."""
|
|
291
|
+
return []
|
|
292
|
+
|
|
293
|
+
# ── Event log / timeline ───────────────────────────────────────────
|
|
294
|
+
|
|
295
|
+
def log_event(self, event: Event) -> Event:
|
|
296
|
+
"""Persist a timeline event. Default is a no-op."""
|
|
297
|
+
return event
|
|
298
|
+
|
|
299
|
+
def list_events(
|
|
300
|
+
self,
|
|
301
|
+
agent_id: str,
|
|
302
|
+
namespace: str = "default",
|
|
303
|
+
*,
|
|
304
|
+
branch: str | None = None,
|
|
305
|
+
event_type: str | None = None,
|
|
306
|
+
since: datetime | None = None,
|
|
307
|
+
limit: int = 100,
|
|
308
|
+
) -> list[Event]:
|
|
309
|
+
"""Return events on an agent's timeline. Default returns empty list."""
|
|
310
|
+
return []
|
|
311
|
+
|
|
312
|
+
# ── Branch management (Pro) ───────────────────────────────────────
|
|
313
|
+
|
|
314
|
+
def create_branch(self, branch: Branch) -> Branch:
|
|
315
|
+
"""Create a new memory branch. Default raises NotImplementedError."""
|
|
316
|
+
raise NotImplementedError("Branching requires the Postgres adapter")
|
|
317
|
+
|
|
318
|
+
def get_branch(self, name: str, namespace: str = "default") -> Branch | None:
|
|
319
|
+
"""Return a branch by name, or None."""
|
|
320
|
+
return None
|
|
321
|
+
|
|
322
|
+
def list_branches(
|
|
323
|
+
self, namespace: str = "default", *, status: str | None = None
|
|
324
|
+
) -> list[Branch]:
|
|
325
|
+
"""List all branches in a namespace."""
|
|
326
|
+
return []
|
|
327
|
+
|
|
328
|
+
def close_branch(self, name: str, namespace: str = "default") -> Branch:
|
|
329
|
+
"""Mark a branch as closed."""
|
|
330
|
+
raise NotImplementedError("Branching requires the Postgres adapter")
|
|
331
|
+
|
|
332
|
+
def diff_branch(self, name: str, namespace: str = "default") -> list[DiffEntry]:
|
|
333
|
+
"""Diff a branch against its parent."""
|
|
334
|
+
return []
|
|
335
|
+
|
|
336
|
+
def merge_branch(
|
|
337
|
+
self,
|
|
338
|
+
name: str,
|
|
339
|
+
namespace: str = "default",
|
|
340
|
+
*,
|
|
341
|
+
strategy: MergeStrategy = MergeStrategy.FAST_FORWARD,
|
|
342
|
+
resolve_conflicts: dict[str, str] | None = None,
|
|
343
|
+
) -> MergeResult:
|
|
344
|
+
"""Merge a branch into its parent."""
|
|
345
|
+
raise NotImplementedError("Branching requires the Postgres adapter")
|
|
346
|
+
|
|
347
|
+
# ── Branch access control (Pro) ───────────────────────────────────
|
|
348
|
+
|
|
349
|
+
def grant_branch_access(self, access: BranchAccess) -> BranchAccess:
|
|
350
|
+
"""Grant external access to a branch."""
|
|
351
|
+
raise NotImplementedError("Branch access requires the Postgres adapter")
|
|
352
|
+
|
|
353
|
+
def revoke_branch_access(
|
|
354
|
+
self, branch_name: str, grantee_type: str, grantee_id: str,
|
|
355
|
+
namespace: str = "default",
|
|
356
|
+
) -> None:
|
|
357
|
+
"""Revoke access from a branch."""
|
|
358
|
+
raise NotImplementedError("Branch access requires the Postgres adapter")
|
|
359
|
+
|
|
360
|
+
def list_branch_access(
|
|
361
|
+
self, branch_name: str, namespace: str = "default"
|
|
362
|
+
) -> list[BranchAccess]:
|
|
363
|
+
"""List access grants for a branch."""
|
|
364
|
+
return []
|
|
365
|
+
|
|
366
|
+
def check_branch_access(
|
|
367
|
+
self, branch_name: str, api_key_id: str, namespace: str = "default"
|
|
368
|
+
) -> str | None:
|
|
369
|
+
"""Check if an API key has access to a branch. Returns permission or None."""
|
|
370
|
+
return None
|
|
371
|
+
|
|
372
|
+
# ── Tags / Snapshots (Pro) ────────────────────────────────────────
|
|
373
|
+
|
|
374
|
+
def create_tag(self, tag: Tag) -> Tag:
|
|
375
|
+
"""Create a named point-in-time tag."""
|
|
376
|
+
raise NotImplementedError("Tags require the Postgres adapter")
|
|
377
|
+
|
|
378
|
+
def get_tag(self, name: str, namespace: str = "default") -> Tag | None:
|
|
379
|
+
"""Return a tag by name, or None."""
|
|
380
|
+
return None
|
|
381
|
+
|
|
382
|
+
def list_tags(
|
|
383
|
+
self, namespace: str = "default", *, branch: str | None = None
|
|
384
|
+
) -> list[Tag]:
|
|
385
|
+
"""List tags, optionally filtered by branch."""
|
|
386
|
+
return []
|
|
387
|
+
|
|
388
|
+
def delete_tag(self, name: str, namespace: str = "default") -> None:
|
|
389
|
+
"""Delete a tag."""
|
|
390
|
+
raise NotImplementedError("Tags require the Postgres adapter")
|
|
391
|
+
|
|
392
|
+
# ── Pull Requests (Pro) ───────────────────────────────────────────
|
|
393
|
+
|
|
394
|
+
def create_pull_request(self, pr: PullRequest) -> PullRequest:
|
|
395
|
+
"""Create a pull request for branch merge review."""
|
|
396
|
+
raise NotImplementedError("PRs require the Postgres adapter")
|
|
397
|
+
|
|
398
|
+
def get_pull_request(self, pr_id: str, namespace: str = "default") -> PullRequest | None:
|
|
399
|
+
return None
|
|
400
|
+
|
|
401
|
+
def list_pull_requests(
|
|
402
|
+
self, namespace: str = "default", *, status: str | None = None
|
|
403
|
+
) -> list[PullRequest]:
|
|
404
|
+
return []
|
|
405
|
+
|
|
406
|
+
def update_pull_request_status(
|
|
407
|
+
self, pr_id: str, status: str, *, by: str = "", namespace: str = "default"
|
|
408
|
+
) -> PullRequest:
|
|
409
|
+
raise NotImplementedError("PRs require the Postgres adapter")
|
|
410
|
+
|
|
411
|
+
def add_pr_review(self, review: PRReview) -> PRReview:
|
|
412
|
+
raise NotImplementedError("PRs require the Postgres adapter")
|
|
413
|
+
|
|
414
|
+
def list_pr_reviews(self, pr_id: str, namespace: str = "default") -> list[PRReview]:
|
|
415
|
+
return []
|
|
416
|
+
|
|
417
|
+
# ── Rollback (Pro) ────────────────────────────────────────────────
|
|
418
|
+
|
|
419
|
+
def rollback_to_timestamp(
|
|
420
|
+
self,
|
|
421
|
+
agent_id: str,
|
|
422
|
+
branch: str,
|
|
423
|
+
timestamp: datetime,
|
|
424
|
+
namespace: str = "default",
|
|
425
|
+
) -> int:
|
|
426
|
+
"""Rollback memory to a point in time. Returns count of restored entries."""
|
|
427
|
+
raise NotImplementedError("Rollback requires the Postgres adapter")
|
|
428
|
+
|
|
429
|
+
# ── Fork (Pro) ────────────────────────────────────────────────────
|
|
430
|
+
|
|
431
|
+
def fork_agent(
|
|
432
|
+
self,
|
|
433
|
+
source_agent_id: str,
|
|
434
|
+
target_agent_id: str,
|
|
435
|
+
*,
|
|
436
|
+
namespace: str = "default",
|
|
437
|
+
branch: str = "main",
|
|
438
|
+
) -> int:
|
|
439
|
+
"""Copy all entries from source_agent's branch into target_agent's main.
|
|
440
|
+
|
|
441
|
+
Returns the number of entries copied.
|
|
442
|
+
"""
|
|
443
|
+
raise NotImplementedError("Fork requires the Postgres adapter")
|
|
444
|
+
|
|
445
|
+
# ── Knowledge graph ─────────────────────────────────────────────
|
|
446
|
+
|
|
447
|
+
def upsert_graph_edge(
|
|
448
|
+
self,
|
|
449
|
+
edge: GraphEdge,
|
|
450
|
+
*,
|
|
451
|
+
namespace: str = "default",
|
|
452
|
+
branch: str = "main",
|
|
453
|
+
) -> GraphEdge:
|
|
454
|
+
"""Insert or update a knowledge graph edge.
|
|
455
|
+
|
|
456
|
+
On conflict (same source, relation, target within namespace+branch)
|
|
457
|
+
bumps evidence_count, updates last_seen and takes max confidence.
|
|
458
|
+
Default is a no-op returning the edge unchanged.
|
|
459
|
+
"""
|
|
460
|
+
return edge
|
|
461
|
+
|
|
462
|
+
def graph_neighbors(
|
|
463
|
+
self,
|
|
464
|
+
query: GraphNeighborQuery,
|
|
465
|
+
*,
|
|
466
|
+
namespace: str = "default",
|
|
467
|
+
branch: str = "main",
|
|
468
|
+
) -> list[GraphEdge]:
|
|
469
|
+
"""Traverse the knowledge graph from an entity.
|
|
470
|
+
|
|
471
|
+
Supports multi-hop via *query.depth*. Default returns empty list.
|
|
472
|
+
"""
|
|
473
|
+
return []
|
|
474
|
+
|
|
475
|
+
def list_graph_edges(
|
|
476
|
+
self,
|
|
477
|
+
*,
|
|
478
|
+
entity: str | None = None,
|
|
479
|
+
relation: str | None = None,
|
|
480
|
+
min_confidence: float = 0.0,
|
|
481
|
+
namespace: str = "default",
|
|
482
|
+
branch: str = "main",
|
|
483
|
+
limit: int = 500,
|
|
484
|
+
) -> list[GraphEdge]:
|
|
485
|
+
"""List graph edges with optional filters. Default returns empty list."""
|
|
486
|
+
return []
|
|
487
|
+
|
|
488
|
+
def graph_stats(
|
|
489
|
+
self,
|
|
490
|
+
*,
|
|
491
|
+
namespace: str = "default",
|
|
492
|
+
branch: str = "main",
|
|
493
|
+
) -> dict:
|
|
494
|
+
"""Return aggregate graph statistics. Default returns empty dict."""
|
|
495
|
+
return {}
|
|
496
|
+
|
|
497
|
+
def semantic_search(
|
|
498
|
+
self, query: SemanticQuery, embedder: EmbedderABC
|
|
499
|
+
) -> list[tuple[MemoryEntry, float]]:
|
|
500
|
+
"""Search entries by semantic similarity. Default: brute-force cosine.
|
|
501
|
+
|
|
502
|
+
Returns (entry, similarity) tuples sorted by similarity descending.
|
|
503
|
+
Adapters may override with vector-index implementations (e.g. pgvector).
|
|
504
|
+
"""
|
|
505
|
+
query_vec = embedder.embed(query.text)
|
|
506
|
+
entries = self.list(query.entity_path)
|
|
507
|
+
|
|
508
|
+
scored: list[tuple[MemoryEntry, float]] = []
|
|
509
|
+
for entry in entries:
|
|
510
|
+
if entry.confidence < query.min_confidence:
|
|
511
|
+
continue
|
|
512
|
+
if entry.embedding is None:
|
|
513
|
+
continue
|
|
514
|
+
sim = cosine_similarity(query_vec, entry.embedding)
|
|
515
|
+
if sim >= query.min_similarity:
|
|
516
|
+
scored.append((entry, sim))
|
|
517
|
+
|
|
518
|
+
scored.sort(key=lambda x: x[1], reverse=True)
|
|
519
|
+
return scored[: query.limit]
|