flowscript-agents 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- flowscript_agents/__init__.py +21 -0
- flowscript_agents/crewai.py +409 -0
- flowscript_agents/google_adk.py +258 -0
- flowscript_agents/langgraph.py +280 -0
- flowscript_agents/memory.py +504 -0
- flowscript_agents/openai_agents.py +170 -0
- flowscript_agents-0.1.0.dist-info/METADATA +285 -0
- flowscript_agents-0.1.0.dist-info/RECORD +9 -0
- flowscript_agents-0.1.0.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
"""
|
|
2
|
+
FlowScript Agents — Decision intelligence memory for AI agent frameworks.
|
|
3
|
+
|
|
4
|
+
Drop-in memory provider for LangGraph, CrewAI, Google ADK, and OpenAI Agents SDK.
|
|
5
|
+
Replaces vector retrieval with queryable reasoning: why(), tensions(), blocked(),
|
|
6
|
+
alternatives(), whatIf().
|
|
7
|
+
|
|
8
|
+
Usage:
|
|
9
|
+
from flowscript_agents import Memory
|
|
10
|
+
|
|
11
|
+
mem = Memory()
|
|
12
|
+
q = mem.question("Which database?")
|
|
13
|
+
mem.alternative(q, "Redis").decide(rationale="speed critical")
|
|
14
|
+
mem.alternative(q, "SQLite").block(reason="no concurrent writes")
|
|
15
|
+
print(mem.query.tensions())
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
from .memory import Memory, NodeRef
|
|
19
|
+
|
|
20
|
+
__version__ = "0.1.0"
|
|
21
|
+
__all__ = ["Memory", "NodeRef"]
|
|
@@ -0,0 +1,409 @@
|
|
|
1
|
+
"""
|
|
2
|
+
FlowScript CrewAI Integration.
|
|
3
|
+
|
|
4
|
+
Implements CrewAI's StorageBackend protocol, making FlowScript memory
|
|
5
|
+
available as a drop-in storage backend for CrewAI agents.
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
from flowscript_agents.crewai import FlowScriptStorage
|
|
9
|
+
|
|
10
|
+
storage = FlowScriptStorage("./agent-memory.json")
|
|
11
|
+
# Use with CrewAI Memory:
|
|
12
|
+
# memory = Memory(storage=storage, llm="...", embedder={...})
|
|
13
|
+
# crew = Crew(agents=[...], tasks=[...], memory=memory)
|
|
14
|
+
|
|
15
|
+
Note: CrewAI requires Python <3.14. This module is importable on 3.14+
|
|
16
|
+
but CrewAI itself won't install.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from __future__ import annotations
|
|
20
|
+
|
|
21
|
+
import json
|
|
22
|
+
import uuid
|
|
23
|
+
from datetime import datetime, timezone
|
|
24
|
+
from typing import Any, Optional, Sequence
|
|
25
|
+
|
|
26
|
+
from .memory import Memory
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class FlowScriptStorage:
|
|
30
|
+
"""CrewAI StorageBackend backed by FlowScript reasoning memory.
|
|
31
|
+
|
|
32
|
+
Implements the CrewAI StorageBackend protocol (duck-typed, no inheritance
|
|
33
|
+
needed). Each MemoryRecord maps to a FlowScript node.
|
|
34
|
+
|
|
35
|
+
Access FlowScript's semantic queries via the .memory property:
|
|
36
|
+
storage.memory.query.tensions()
|
|
37
|
+
storage.memory.query.blocked()
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
def __init__(self, file_path: str | None = None) -> None:
|
|
41
|
+
if file_path:
|
|
42
|
+
self._memory = Memory.load_or_create(file_path)
|
|
43
|
+
else:
|
|
44
|
+
self._memory = Memory()
|
|
45
|
+
# Index: record_id → {node_id, record_data}
|
|
46
|
+
self._records: dict[str, _RecordEntry] = {}
|
|
47
|
+
self._rebuild_index()
|
|
48
|
+
|
|
49
|
+
@property
|
|
50
|
+
def memory(self) -> Memory:
|
|
51
|
+
return self._memory
|
|
52
|
+
|
|
53
|
+
def _rebuild_index(self) -> None:
|
|
54
|
+
for ref in self._memory.nodes:
|
|
55
|
+
node = ref.node
|
|
56
|
+
if node.ext and "crewai_record_id" in node.ext:
|
|
57
|
+
rec_id = node.ext["crewai_record_id"]
|
|
58
|
+
self._records[rec_id] = _RecordEntry(
|
|
59
|
+
node_id=node.id,
|
|
60
|
+
record_id=rec_id,
|
|
61
|
+
content=node.content,
|
|
62
|
+
scope=node.ext.get("scope", "/"),
|
|
63
|
+
categories=node.ext.get("categories", []),
|
|
64
|
+
metadata=node.ext.get("metadata", {}),
|
|
65
|
+
importance=node.ext.get("importance", 0.5),
|
|
66
|
+
created_at=node.ext.get("created_at", node.provenance.timestamp),
|
|
67
|
+
last_accessed=node.ext.get("last_accessed", node.provenance.timestamp),
|
|
68
|
+
embedding=node.ext.get("embedding"),
|
|
69
|
+
source=node.ext.get("source"),
|
|
70
|
+
private=node.ext.get("private", False),
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
# -- Required StorageBackend methods --
|
|
74
|
+
|
|
75
|
+
def save(self, records: list[Any]) -> None:
|
|
76
|
+
"""Save MemoryRecord objects."""
|
|
77
|
+
for record in records:
|
|
78
|
+
rec_id = getattr(record, "id", str(uuid.uuid4()))
|
|
79
|
+
content = getattr(record, "content", str(record))
|
|
80
|
+
scope = getattr(record, "scope", "/")
|
|
81
|
+
categories = getattr(record, "categories", [])
|
|
82
|
+
metadata = getattr(record, "metadata", {})
|
|
83
|
+
importance = getattr(record, "importance", 0.5)
|
|
84
|
+
created_at = getattr(record, "created_at", datetime.now(timezone.utc))
|
|
85
|
+
last_accessed = getattr(record, "last_accessed", datetime.now(timezone.utc))
|
|
86
|
+
embedding = getattr(record, "embedding", None)
|
|
87
|
+
source = getattr(record, "source", None)
|
|
88
|
+
private = getattr(record, "private", False)
|
|
89
|
+
|
|
90
|
+
ref = self._memory.thought(content)
|
|
91
|
+
node = ref.node
|
|
92
|
+
node.ext = node.ext or {}
|
|
93
|
+
node.ext.update({
|
|
94
|
+
"crewai_record_id": rec_id,
|
|
95
|
+
"scope": scope,
|
|
96
|
+
"categories": categories,
|
|
97
|
+
"metadata": metadata,
|
|
98
|
+
"importance": importance,
|
|
99
|
+
"created_at": _dt_to_str(created_at),
|
|
100
|
+
"last_accessed": _dt_to_str(last_accessed),
|
|
101
|
+
"embedding": embedding,
|
|
102
|
+
"source": source,
|
|
103
|
+
"private": private,
|
|
104
|
+
})
|
|
105
|
+
|
|
106
|
+
self._records[rec_id] = _RecordEntry(
|
|
107
|
+
node_id=ref.id,
|
|
108
|
+
record_id=rec_id,
|
|
109
|
+
content=content,
|
|
110
|
+
scope=scope,
|
|
111
|
+
categories=categories,
|
|
112
|
+
metadata=metadata,
|
|
113
|
+
importance=importance,
|
|
114
|
+
created_at=_dt_to_str(created_at),
|
|
115
|
+
last_accessed=_dt_to_str(last_accessed),
|
|
116
|
+
embedding=embedding,
|
|
117
|
+
source=source,
|
|
118
|
+
private=private,
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
def search(
|
|
122
|
+
self,
|
|
123
|
+
query_embedding: list[float],
|
|
124
|
+
scope_prefix: str | None = None,
|
|
125
|
+
categories: list[str] | None = None,
|
|
126
|
+
metadata_filter: dict[str, Any] | None = None,
|
|
127
|
+
limit: int = 10,
|
|
128
|
+
min_score: float = 0.0,
|
|
129
|
+
) -> list[tuple[Any, float]]:
|
|
130
|
+
"""Search for records. Returns (MemoryRecord-like, score) tuples.
|
|
131
|
+
|
|
132
|
+
Uses cosine similarity when embeddings are available,
|
|
133
|
+
falls back to content matching otherwise.
|
|
134
|
+
"""
|
|
135
|
+
results: list[tuple[_RecordEntry, float]] = []
|
|
136
|
+
|
|
137
|
+
for entry in self._records.values():
|
|
138
|
+
# Scope filter
|
|
139
|
+
if scope_prefix and not entry.scope.startswith(scope_prefix):
|
|
140
|
+
continue
|
|
141
|
+
|
|
142
|
+
# Category filter
|
|
143
|
+
if categories:
|
|
144
|
+
if not any(c in entry.categories for c in categories):
|
|
145
|
+
continue
|
|
146
|
+
|
|
147
|
+
# Metadata filter
|
|
148
|
+
if metadata_filter:
|
|
149
|
+
if not all(
|
|
150
|
+
entry.metadata.get(k) == v for k, v in metadata_filter.items()
|
|
151
|
+
):
|
|
152
|
+
continue
|
|
153
|
+
|
|
154
|
+
# Score: cosine similarity if embeddings exist
|
|
155
|
+
score = 0.5 # default
|
|
156
|
+
if query_embedding and entry.embedding:
|
|
157
|
+
score = _cosine_similarity(query_embedding, entry.embedding)
|
|
158
|
+
|
|
159
|
+
if score >= min_score:
|
|
160
|
+
results.append((entry, score))
|
|
161
|
+
|
|
162
|
+
results.sort(key=lambda x: -x[1])
|
|
163
|
+
return [(e.to_dict(), s) for e, s in results[:limit]]
|
|
164
|
+
|
|
165
|
+
def delete(
|
|
166
|
+
self,
|
|
167
|
+
scope_prefix: str | None = None,
|
|
168
|
+
categories: list[str] | None = None,
|
|
169
|
+
record_ids: list[str] | None = None,
|
|
170
|
+
older_than: Any | None = None,
|
|
171
|
+
metadata_filter: dict[str, Any] | None = None,
|
|
172
|
+
) -> int:
|
|
173
|
+
"""Delete matching records. Returns count deleted.
|
|
174
|
+
|
|
175
|
+
When record_ids is provided, only those specific records are deleted
|
|
176
|
+
(other filters are ignored — ID match is exact). When record_ids is
|
|
177
|
+
None, records are filtered by scope, categories, and metadata.
|
|
178
|
+
"""
|
|
179
|
+
to_delete: list[str] = []
|
|
180
|
+
|
|
181
|
+
if record_ids is not None:
|
|
182
|
+
# Exact ID match — delete specified records regardless of other filters
|
|
183
|
+
for rec_id in record_ids:
|
|
184
|
+
if rec_id in self._records:
|
|
185
|
+
to_delete.append(rec_id)
|
|
186
|
+
else:
|
|
187
|
+
# Filter-based deletion
|
|
188
|
+
has_filters = scope_prefix or categories or metadata_filter or older_than
|
|
189
|
+
for rec_id, entry in self._records.items():
|
|
190
|
+
if not has_filters:
|
|
191
|
+
# No filters = match all
|
|
192
|
+
to_delete.append(rec_id)
|
|
193
|
+
continue
|
|
194
|
+
|
|
195
|
+
match = True
|
|
196
|
+
if scope_prefix and not entry.scope.startswith(scope_prefix):
|
|
197
|
+
match = False
|
|
198
|
+
if categories and not any(c in entry.categories for c in categories):
|
|
199
|
+
match = False
|
|
200
|
+
if metadata_filter and not all(
|
|
201
|
+
entry.metadata.get(k) == v for k, v in metadata_filter.items()
|
|
202
|
+
):
|
|
203
|
+
match = False
|
|
204
|
+
if match:
|
|
205
|
+
to_delete.append(rec_id)
|
|
206
|
+
|
|
207
|
+
for rec_id in to_delete:
|
|
208
|
+
entry = self._records.pop(rec_id)
|
|
209
|
+
self._memory.remove_node(entry.node_id)
|
|
210
|
+
|
|
211
|
+
return len(to_delete)
|
|
212
|
+
|
|
213
|
+
def update(self, record: Any) -> None:
|
|
214
|
+
"""Update an existing record. Syncs changes to the FlowScript node."""
|
|
215
|
+
rec_id = getattr(record, "id", None)
|
|
216
|
+
if rec_id and rec_id in self._records:
|
|
217
|
+
entry = self._records[rec_id]
|
|
218
|
+
new_content = getattr(record, "content", entry.content)
|
|
219
|
+
|
|
220
|
+
# If content changed, replace the node
|
|
221
|
+
if new_content != entry.content:
|
|
222
|
+
self._memory.remove_node(entry.node_id)
|
|
223
|
+
ref = self._memory.thought(new_content)
|
|
224
|
+
node = ref.node
|
|
225
|
+
node.ext = node.ext or {}
|
|
226
|
+
entry.node_id = ref.id
|
|
227
|
+
entry.content = new_content
|
|
228
|
+
else:
|
|
229
|
+
node = self._memory.get_node(entry.node_id)
|
|
230
|
+
if node:
|
|
231
|
+
node.ext = node.ext or {}
|
|
232
|
+
|
|
233
|
+
entry.scope = getattr(record, "scope", entry.scope)
|
|
234
|
+
entry.categories = getattr(record, "categories", entry.categories)
|
|
235
|
+
entry.metadata = getattr(record, "metadata", entry.metadata)
|
|
236
|
+
entry.importance = getattr(record, "importance", entry.importance)
|
|
237
|
+
entry.embedding = getattr(record, "embedding", entry.embedding)
|
|
238
|
+
entry.last_accessed = _dt_to_str(datetime.now(timezone.utc))
|
|
239
|
+
|
|
240
|
+
# Sync ext data to node
|
|
241
|
+
if node and node.ext is not None:
|
|
242
|
+
node.ext.update({
|
|
243
|
+
"crewai_record_id": rec_id,
|
|
244
|
+
"scope": entry.scope,
|
|
245
|
+
"categories": entry.categories,
|
|
246
|
+
"metadata": entry.metadata,
|
|
247
|
+
"importance": entry.importance,
|
|
248
|
+
"last_accessed": entry.last_accessed,
|
|
249
|
+
"embedding": entry.embedding,
|
|
250
|
+
})
|
|
251
|
+
|
|
252
|
+
def get_record(self, record_id: str) -> Any | None:
|
|
253
|
+
"""Get a record by ID."""
|
|
254
|
+
entry = self._records.get(record_id)
|
|
255
|
+
if entry:
|
|
256
|
+
return entry.to_dict()
|
|
257
|
+
return None
|
|
258
|
+
|
|
259
|
+
def list_records(
|
|
260
|
+
self,
|
|
261
|
+
scope_prefix: str | None = None,
|
|
262
|
+
limit: int = 200,
|
|
263
|
+
offset: int = 0,
|
|
264
|
+
) -> list[Any]:
|
|
265
|
+
"""List records, optionally filtered by scope."""
|
|
266
|
+
results = []
|
|
267
|
+
for entry in self._records.values():
|
|
268
|
+
if scope_prefix and not entry.scope.startswith(scope_prefix):
|
|
269
|
+
continue
|
|
270
|
+
results.append(entry.to_dict())
|
|
271
|
+
return results[offset : offset + limit]
|
|
272
|
+
|
|
273
|
+
def get_scope_info(self, scope: str) -> dict[str, Any]:
|
|
274
|
+
"""Get info about a scope."""
|
|
275
|
+
records = [e for e in self._records.values() if e.scope.startswith(scope)]
|
|
276
|
+
cats: set[str] = set()
|
|
277
|
+
for r in records:
|
|
278
|
+
cats.update(r.categories)
|
|
279
|
+
|
|
280
|
+
return {
|
|
281
|
+
"path": scope,
|
|
282
|
+
"record_count": len(records),
|
|
283
|
+
"categories": sorted(cats),
|
|
284
|
+
"oldest_record": min((r.created_at for r in records), default=None),
|
|
285
|
+
"newest_record": max((r.created_at for r in records), default=None),
|
|
286
|
+
"child_scopes": sorted(set(
|
|
287
|
+
r.scope for r in records if r.scope != scope and r.scope.startswith(scope)
|
|
288
|
+
)),
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
def list_scopes(self, parent: str = "/") -> list[str]:
|
|
292
|
+
"""List child scopes under parent."""
|
|
293
|
+
scopes: set[str] = set()
|
|
294
|
+
for entry in self._records.values():
|
|
295
|
+
if entry.scope.startswith(parent) and entry.scope != parent:
|
|
296
|
+
scopes.add(entry.scope)
|
|
297
|
+
return sorted(scopes)
|
|
298
|
+
|
|
299
|
+
def list_categories(self, scope_prefix: str | None = None) -> dict[str, int]:
|
|
300
|
+
"""List categories with counts."""
|
|
301
|
+
counts: dict[str, int] = {}
|
|
302
|
+
for entry in self._records.values():
|
|
303
|
+
if scope_prefix and not entry.scope.startswith(scope_prefix):
|
|
304
|
+
continue
|
|
305
|
+
for cat in entry.categories:
|
|
306
|
+
counts[cat] = counts.get(cat, 0) + 1
|
|
307
|
+
return counts
|
|
308
|
+
|
|
309
|
+
def count(self, scope_prefix: str | None = None) -> int:
|
|
310
|
+
"""Count records."""
|
|
311
|
+
if scope_prefix is None:
|
|
312
|
+
return len(self._records)
|
|
313
|
+
return sum(
|
|
314
|
+
1 for e in self._records.values() if e.scope.startswith(scope_prefix)
|
|
315
|
+
)
|
|
316
|
+
|
|
317
|
+
def reset(self, scope_prefix: str | None = None) -> None:
|
|
318
|
+
"""Delete all records (or scoped subset). Removes nodes from graph."""
|
|
319
|
+
if scope_prefix is None:
|
|
320
|
+
for entry in self._records.values():
|
|
321
|
+
self._memory.remove_node(entry.node_id)
|
|
322
|
+
self._records.clear()
|
|
323
|
+
else:
|
|
324
|
+
to_delete = [
|
|
325
|
+
k for k, v in self._records.items()
|
|
326
|
+
if v.scope.startswith(scope_prefix)
|
|
327
|
+
]
|
|
328
|
+
for k in to_delete:
|
|
329
|
+
entry = self._records.pop(k)
|
|
330
|
+
self._memory.remove_node(entry.node_id)
|
|
331
|
+
|
|
332
|
+
# -- Async variants (delegate to sync) --
|
|
333
|
+
|
|
334
|
+
async def asave(self, records: list[Any]) -> None:
|
|
335
|
+
self.save(records)
|
|
336
|
+
|
|
337
|
+
async def asearch(
|
|
338
|
+
self,
|
|
339
|
+
query_embedding: list[float],
|
|
340
|
+
scope_prefix: str | None = None,
|
|
341
|
+
categories: list[str] | None = None,
|
|
342
|
+
metadata_filter: dict[str, Any] | None = None,
|
|
343
|
+
limit: int = 10,
|
|
344
|
+
min_score: float = 0.0,
|
|
345
|
+
) -> list[tuple[Any, float]]:
|
|
346
|
+
return self.search(
|
|
347
|
+
query_embedding, scope_prefix, categories, metadata_filter, limit, min_score
|
|
348
|
+
)
|
|
349
|
+
|
|
350
|
+
async def adelete(
|
|
351
|
+
self,
|
|
352
|
+
scope_prefix: str | None = None,
|
|
353
|
+
categories: list[str] | None = None,
|
|
354
|
+
record_ids: list[str] | None = None,
|
|
355
|
+
older_than: Any | None = None,
|
|
356
|
+
metadata_filter: dict[str, Any] | None = None,
|
|
357
|
+
) -> int:
|
|
358
|
+
return self.delete(scope_prefix, categories, record_ids, older_than, metadata_filter)
|
|
359
|
+
|
|
360
|
+
def save_to_disk(self) -> None:
|
|
361
|
+
"""Persist to disk."""
|
|
362
|
+
self._memory.save()
|
|
363
|
+
|
|
364
|
+
|
|
365
|
+
class _RecordEntry:
|
|
366
|
+
"""Internal record storage."""
|
|
367
|
+
|
|
368
|
+
__slots__ = (
|
|
369
|
+
"node_id", "record_id", "content", "scope", "categories",
|
|
370
|
+
"metadata", "importance", "created_at", "last_accessed",
|
|
371
|
+
"embedding", "source", "private",
|
|
372
|
+
)
|
|
373
|
+
|
|
374
|
+
def __init__(self, **kwargs: Any) -> None:
|
|
375
|
+
for k, v in kwargs.items():
|
|
376
|
+
setattr(self, k, v)
|
|
377
|
+
|
|
378
|
+
def to_dict(self) -> dict[str, Any]:
|
|
379
|
+
return {
|
|
380
|
+
"id": self.record_id,
|
|
381
|
+
"content": self.content,
|
|
382
|
+
"scope": self.scope,
|
|
383
|
+
"categories": self.categories,
|
|
384
|
+
"metadata": self.metadata,
|
|
385
|
+
"importance": self.importance,
|
|
386
|
+
"created_at": self.created_at,
|
|
387
|
+
"last_accessed": self.last_accessed,
|
|
388
|
+
"embedding": self.embedding,
|
|
389
|
+
"source": self.source,
|
|
390
|
+
"private": self.private,
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
|
|
394
|
+
def _dt_to_str(dt: Any) -> str:
|
|
395
|
+
if isinstance(dt, datetime):
|
|
396
|
+
return dt.isoformat()
|
|
397
|
+
return str(dt)
|
|
398
|
+
|
|
399
|
+
|
|
400
|
+
def _cosine_similarity(a: list[float], b: list[float]) -> float:
|
|
401
|
+
"""Compute cosine similarity between two vectors."""
|
|
402
|
+
if len(a) != len(b):
|
|
403
|
+
return 0.0
|
|
404
|
+
dot = sum(x * y for x, y in zip(a, b))
|
|
405
|
+
norm_a = sum(x * x for x in a) ** 0.5
|
|
406
|
+
norm_b = sum(x * x for x in b) ** 0.5
|
|
407
|
+
if norm_a == 0 or norm_b == 0:
|
|
408
|
+
return 0.0
|
|
409
|
+
return dot / (norm_a * norm_b)
|
|
@@ -0,0 +1,258 @@
|
|
|
1
|
+
"""
|
|
2
|
+
FlowScript Google ADK Integration.
|
|
3
|
+
|
|
4
|
+
Implements Google ADK's BaseMemoryService interface, making FlowScript memory
|
|
5
|
+
available as a memory service for ADK agents.
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
from flowscript_agents.google_adk import FlowScriptMemoryService
|
|
9
|
+
|
|
10
|
+
memory_service = FlowScriptMemoryService("./agent-memory.json")
|
|
11
|
+
# Use with ADK Runner:
|
|
12
|
+
# runner = Runner(agent=agent, memory_service=memory_service, ...)
|
|
13
|
+
|
|
14
|
+
Note: Requires google-adk package: pip install flowscript-agents[google-adk]
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
from __future__ import annotations
|
|
18
|
+
|
|
19
|
+
import json
|
|
20
|
+
import logging
|
|
21
|
+
from dataclasses import asdict
|
|
22
|
+
from datetime import datetime, timezone
|
|
23
|
+
from typing import Any, Mapping, Optional, Sequence
|
|
24
|
+
|
|
25
|
+
from .memory import Memory
|
|
26
|
+
|
|
27
|
+
logger = logging.getLogger(__name__)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class FlowScriptMemoryService:
|
|
31
|
+
"""Google ADK BaseMemoryService backed by FlowScript reasoning memory.
|
|
32
|
+
|
|
33
|
+
Implements the two required ADK methods:
|
|
34
|
+
- add_session_to_memory(session) — extracts reasoning from session events
|
|
35
|
+
- search_memory(app_name, user_id, query) — searches FlowScript graph
|
|
36
|
+
|
|
37
|
+
Access FlowScript queries via .memory property:
|
|
38
|
+
service.memory.query.tensions()
|
|
39
|
+
service.memory.query.blocked()
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
def __init__(self, file_path: str | None = None) -> None:
|
|
43
|
+
if file_path:
|
|
44
|
+
self._memory = Memory.load_or_create(file_path)
|
|
45
|
+
else:
|
|
46
|
+
self._memory = Memory()
|
|
47
|
+
self._file_path = file_path
|
|
48
|
+
|
|
49
|
+
@property
|
|
50
|
+
def memory(self) -> Memory:
|
|
51
|
+
return self._memory
|
|
52
|
+
|
|
53
|
+
async def add_session_to_memory(self, session: Any) -> None:
|
|
54
|
+
"""Extract reasoning from an ADK session and add to memory.
|
|
55
|
+
|
|
56
|
+
Iterates session events, extracts content from user and model turns,
|
|
57
|
+
and adds them as FlowScript nodes with relationships.
|
|
58
|
+
"""
|
|
59
|
+
app_name = getattr(session, "app_name", "unknown")
|
|
60
|
+
user_id = getattr(session, "user_id", "unknown")
|
|
61
|
+
session_id = getattr(session, "id", "unknown")
|
|
62
|
+
|
|
63
|
+
events = getattr(session, "events", []) or []
|
|
64
|
+
|
|
65
|
+
prev_ref = None
|
|
66
|
+
for event in events:
|
|
67
|
+
content = _extract_event_content(event)
|
|
68
|
+
if not content:
|
|
69
|
+
continue
|
|
70
|
+
|
|
71
|
+
author = getattr(event, "author", "unknown")
|
|
72
|
+
ref = self._memory.thought(content)
|
|
73
|
+
|
|
74
|
+
# Store ADK metadata
|
|
75
|
+
node = ref.node
|
|
76
|
+
node.ext = node.ext or {}
|
|
77
|
+
node.ext.update({
|
|
78
|
+
"adk_app": app_name,
|
|
79
|
+
"adk_user": user_id,
|
|
80
|
+
"adk_session": session_id,
|
|
81
|
+
"adk_author": author,
|
|
82
|
+
})
|
|
83
|
+
|
|
84
|
+
# Create temporal chain between sequential events
|
|
85
|
+
if prev_ref:
|
|
86
|
+
prev_ref.then(ref)
|
|
87
|
+
prev_ref = ref
|
|
88
|
+
|
|
89
|
+
if self._file_path:
|
|
90
|
+
self._memory.save()
|
|
91
|
+
|
|
92
|
+
async def search_memory(
|
|
93
|
+
self,
|
|
94
|
+
*,
|
|
95
|
+
app_name: str,
|
|
96
|
+
user_id: str,
|
|
97
|
+
query: str,
|
|
98
|
+
) -> dict[str, Any]:
|
|
99
|
+
"""Search FlowScript memory for relevant content.
|
|
100
|
+
|
|
101
|
+
Returns a dict matching ADK's SearchMemoryResponse shape:
|
|
102
|
+
{"memories": [{"content": ..., "id": ..., "author": ..., "timestamp": ...}, ...]}
|
|
103
|
+
"""
|
|
104
|
+
# Search by content match
|
|
105
|
+
matches = self._memory.find_nodes(query)
|
|
106
|
+
|
|
107
|
+
# Also check if query relates to FlowScript query operations
|
|
108
|
+
memories = []
|
|
109
|
+
for ref in matches[:10]: # limit results
|
|
110
|
+
node = ref.node
|
|
111
|
+
ext = node.ext or {}
|
|
112
|
+
|
|
113
|
+
# Filter by app_name/user_id if stored (skip filter for nodes without ADK metadata)
|
|
114
|
+
if ext.get("adk_app") and ext["adk_app"] != app_name:
|
|
115
|
+
continue
|
|
116
|
+
if ext.get("adk_user") and ext["adk_user"] != user_id:
|
|
117
|
+
continue
|
|
118
|
+
|
|
119
|
+
memories.append({
|
|
120
|
+
"content": _make_content(node.content),
|
|
121
|
+
"id": node.id,
|
|
122
|
+
"author": ext.get("adk_author", "memory"),
|
|
123
|
+
"timestamp": node.provenance.timestamp,
|
|
124
|
+
"custom_metadata": {
|
|
125
|
+
"node_type": node.type.value,
|
|
126
|
+
"source": "flowscript",
|
|
127
|
+
},
|
|
128
|
+
})
|
|
129
|
+
|
|
130
|
+
# Add FlowScript query insights if relevant
|
|
131
|
+
query_lower = query.lower()
|
|
132
|
+
if any(kw in query_lower for kw in ["why", "reason", "cause"]):
|
|
133
|
+
# Surface causal chain insights
|
|
134
|
+
for ref in matches[:3]:
|
|
135
|
+
try:
|
|
136
|
+
result = self._memory.query.why(ref.id)
|
|
137
|
+
chain = getattr(result, "causal_chain", [])
|
|
138
|
+
if chain:
|
|
139
|
+
chain_text = " → ".join(
|
|
140
|
+
getattr(n, "content", str(n)) for n in chain
|
|
141
|
+
)
|
|
142
|
+
memories.append({
|
|
143
|
+
"content": _make_content(f"Causal chain: {chain_text}"),
|
|
144
|
+
"id": f"why-{ref.id[:8]}",
|
|
145
|
+
"author": "flowscript-query",
|
|
146
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
147
|
+
})
|
|
148
|
+
except Exception:
|
|
149
|
+
logger.debug("FlowScript query failed during search enrichment", exc_info=True)
|
|
150
|
+
|
|
151
|
+
if any(kw in query_lower for kw in ["tension", "tradeoff", "trade-off"]):
|
|
152
|
+
try:
|
|
153
|
+
tensions = self._memory.query.tensions()
|
|
154
|
+
if tensions.metadata.get("total_tensions", 0) > 0:
|
|
155
|
+
memories.append({
|
|
156
|
+
"content": _make_content(
|
|
157
|
+
f"Active tensions: {json.dumps(asdict(tensions), default=str)}"
|
|
158
|
+
),
|
|
159
|
+
"id": "tensions-summary",
|
|
160
|
+
"author": "flowscript-query",
|
|
161
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
162
|
+
})
|
|
163
|
+
except Exception:
|
|
164
|
+
pass
|
|
165
|
+
|
|
166
|
+
if any(kw in query_lower for kw in ["blocked", "stuck", "waiting"]):
|
|
167
|
+
try:
|
|
168
|
+
blocked = self._memory.query.blocked()
|
|
169
|
+
if blocked.blockers:
|
|
170
|
+
memories.append({
|
|
171
|
+
"content": _make_content(
|
|
172
|
+
f"Blocked items: {json.dumps(asdict(blocked), default=str)}"
|
|
173
|
+
),
|
|
174
|
+
"id": "blocked-summary",
|
|
175
|
+
"author": "flowscript-query",
|
|
176
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
177
|
+
})
|
|
178
|
+
except Exception:
|
|
179
|
+
pass
|
|
180
|
+
|
|
181
|
+
return {"memories": memories}
|
|
182
|
+
|
|
183
|
+
# -- Optional methods --
|
|
184
|
+
|
|
185
|
+
async def add_events_to_memory(
|
|
186
|
+
self,
|
|
187
|
+
*,
|
|
188
|
+
app_name: str,
|
|
189
|
+
user_id: str,
|
|
190
|
+
events: Sequence[Any],
|
|
191
|
+
session_id: str | None = None,
|
|
192
|
+
custom_metadata: Mapping[str, object] | None = None,
|
|
193
|
+
) -> None:
|
|
194
|
+
"""Incremental event addition."""
|
|
195
|
+
prev_ref = None
|
|
196
|
+
for event in events:
|
|
197
|
+
content = _extract_event_content(event)
|
|
198
|
+
if not content:
|
|
199
|
+
continue
|
|
200
|
+
|
|
201
|
+
author = getattr(event, "author", "unknown")
|
|
202
|
+
ref = self._memory.thought(content)
|
|
203
|
+
node = ref.node
|
|
204
|
+
node.ext = node.ext or {}
|
|
205
|
+
node.ext.update({
|
|
206
|
+
"adk_app": app_name,
|
|
207
|
+
"adk_user": user_id,
|
|
208
|
+
"adk_session": session_id or "unknown",
|
|
209
|
+
"adk_author": author,
|
|
210
|
+
})
|
|
211
|
+
if custom_metadata:
|
|
212
|
+
node.ext["custom_metadata"] = dict(custom_metadata)
|
|
213
|
+
|
|
214
|
+
if prev_ref:
|
|
215
|
+
prev_ref.then(ref)
|
|
216
|
+
prev_ref = ref
|
|
217
|
+
|
|
218
|
+
if self._file_path:
|
|
219
|
+
self._memory.save()
|
|
220
|
+
|
|
221
|
+
def save(self) -> None:
|
|
222
|
+
"""Persist to disk."""
|
|
223
|
+
self._memory.save()
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
def _extract_event_content(event: Any) -> str | None:
|
|
227
|
+
"""Extract text content from an ADK event."""
|
|
228
|
+
content = getattr(event, "content", None)
|
|
229
|
+
if content is None:
|
|
230
|
+
return None
|
|
231
|
+
|
|
232
|
+
# ADK Content has .parts list
|
|
233
|
+
parts = getattr(content, "parts", None)
|
|
234
|
+
if parts:
|
|
235
|
+
texts = []
|
|
236
|
+
for part in parts:
|
|
237
|
+
text = getattr(part, "text", None)
|
|
238
|
+
if text:
|
|
239
|
+
texts.append(text)
|
|
240
|
+
return " ".join(texts) if texts else None
|
|
241
|
+
|
|
242
|
+
# Fallback: string content
|
|
243
|
+
if isinstance(content, str):
|
|
244
|
+
return content
|
|
245
|
+
|
|
246
|
+
return None
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
def _make_content(text: str) -> dict[str, Any]:
|
|
250
|
+
"""Create ADK-compatible Content structure.
|
|
251
|
+
|
|
252
|
+
Returns a dict that can be used to construct google.genai.types.Content.
|
|
253
|
+
When google-adk is installed, callers can wrap this with types.Content().
|
|
254
|
+
"""
|
|
255
|
+
return {
|
|
256
|
+
"parts": [{"text": text}],
|
|
257
|
+
"role": "model",
|
|
258
|
+
}
|