@susu-eng/gralkor 26.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +32 -0
- package/README.md +429 -0
- package/config.yaml +16 -0
- package/dist/cli/bin.d.ts +3 -0
- package/dist/cli/bin.d.ts.map +1 -0
- package/dist/cli/bin.js +89 -0
- package/dist/cli/bin.js.map +1 -0
- package/dist/cli/commands/check.d.ts +2 -0
- package/dist/cli/commands/check.d.ts.map +1 -0
- package/dist/cli/commands/check.js +118 -0
- package/dist/cli/commands/check.js.map +1 -0
- package/dist/cli/commands/config.d.ts +3 -0
- package/dist/cli/commands/config.d.ts.map +1 -0
- package/dist/cli/commands/config.js +24 -0
- package/dist/cli/commands/config.js.map +1 -0
- package/dist/cli/commands/install.d.ts +8 -0
- package/dist/cli/commands/install.d.ts.map +1 -0
- package/dist/cli/commands/install.js +105 -0
- package/dist/cli/commands/install.js.map +1 -0
- package/dist/cli/commands/status.d.ts +2 -0
- package/dist/cli/commands/status.d.ts.map +1 -0
- package/dist/cli/commands/status.js +68 -0
- package/dist/cli/commands/status.js.map +1 -0
- package/dist/cli/lib/config.d.ts +7 -0
- package/dist/cli/lib/config.d.ts.map +1 -0
- package/dist/cli/lib/config.js +38 -0
- package/dist/cli/lib/config.js.map +1 -0
- package/dist/cli/lib/openclaw.d.ts +21 -0
- package/dist/cli/lib/openclaw.d.ts.map +1 -0
- package/dist/cli/lib/openclaw.js +93 -0
- package/dist/cli/lib/openclaw.js.map +1 -0
- package/dist/cli/lib/output.d.ts +9 -0
- package/dist/cli/lib/output.d.ts.map +1 -0
- package/dist/cli/lib/output.js +36 -0
- package/dist/cli/lib/output.js.map +1 -0
- package/dist/cli/lib/version.d.ts +9 -0
- package/dist/cli/lib/version.d.ts.map +1 -0
- package/dist/cli/lib/version.js +51 -0
- package/dist/cli/lib/version.js.map +1 -0
- package/dist/client.d.ts +72 -0
- package/dist/client.d.ts.map +1 -0
- package/dist/client.js +85 -0
- package/dist/client.js.map +1 -0
- package/dist/config.d.ts +69 -0
- package/dist/config.d.ts.map +1 -0
- package/dist/config.js +163 -0
- package/dist/config.js.map +1 -0
- package/dist/hooks.d.ts +131 -0
- package/dist/hooks.d.ts.map +1 -0
- package/dist/hooks.js +458 -0
- package/dist/hooks.js.map +1 -0
- package/dist/index.d.ts +88 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +184 -0
- package/dist/index.js.map +1 -0
- package/dist/native-memory.d.ts +67 -0
- package/dist/native-memory.d.ts.map +1 -0
- package/dist/native-memory.js +79 -0
- package/dist/native-memory.js.map +1 -0
- package/dist/register.d.ts +10 -0
- package/dist/register.d.ts.map +1 -0
- package/dist/register.js +150 -0
- package/dist/register.js.map +1 -0
- package/dist/server-manager.d.ts +19 -0
- package/dist/server-manager.d.ts.map +1 -0
- package/dist/server-manager.js +238 -0
- package/dist/server-manager.js.map +1 -0
- package/dist/tools.d.ts +32 -0
- package/dist/tools.d.ts.map +1 -0
- package/dist/tools.js +56 -0
- package/dist/tools.js.map +1 -0
- package/dist/types.d.ts +48 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +8 -0
- package/dist/types.js.map +1 -0
- package/docker-compose.yml +34 -0
- package/openclaw.plugin.json +99 -0
- package/package.json +65 -0
- package/server/Dockerfile +7 -0
- package/server/main.py +763 -0
- package/server/pyproject.toml +19 -0
- package/server/requirements.txt +5 -0
- package/server/uv.lock +1162 -0
package/server/main.py
ADDED
|
@@ -0,0 +1,763 @@
|
|
|
1
|
+
"""Thin FastAPI server wrapping graphiti-core for the Gralkor plugin."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import logging
|
|
7
|
+
import os
|
|
8
|
+
import time
|
|
9
|
+
from contextlib import asynccontextmanager
|
|
10
|
+
from dataclasses import dataclass, field
|
|
11
|
+
from datetime import datetime, timezone
|
|
12
|
+
from typing import Any, Literal
|
|
13
|
+
|
|
14
|
+
import yaml
|
|
15
|
+
from fastapi import FastAPI, Response
|
|
16
|
+
from fastapi.responses import JSONResponse
|
|
17
|
+
from pydantic import BaseModel, Field, create_model
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
from graphiti_core import Graphiti
|
|
22
|
+
from graphiti_core.driver.falkordb_driver import FalkorDriver
|
|
23
|
+
from graphiti_core.edges import EntityEdge
|
|
24
|
+
from graphiti_core.nodes import EpisodicNode, EpisodeType, Node
|
|
25
|
+
from graphiti_core.llm_client import LLMConfig
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
# ── Config ────────────────────────────────────────────────────
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _load_config() -> dict:
|
|
32
|
+
path = os.getenv("CONFIG_PATH", "/app/config.yaml")
|
|
33
|
+
if os.path.exists(path):
|
|
34
|
+
with open(path) as f:
|
|
35
|
+
return yaml.safe_load(f) or {}
|
|
36
|
+
return {}
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def _build_llm_client(cfg: dict):
|
|
40
|
+
provider = cfg.get("llm", {}).get("provider", "gemini")
|
|
41
|
+
model = cfg.get("llm", {}).get("model")
|
|
42
|
+
llm_cfg = LLMConfig(model=model) if model else None
|
|
43
|
+
|
|
44
|
+
if provider == "anthropic":
|
|
45
|
+
from graphiti_core.llm_client.anthropic_client import AnthropicClient
|
|
46
|
+
|
|
47
|
+
return AnthropicClient(config=llm_cfg)
|
|
48
|
+
if provider == "gemini":
|
|
49
|
+
from graphiti_core.llm_client.gemini_client import GeminiClient
|
|
50
|
+
|
|
51
|
+
return GeminiClient(config=llm_cfg)
|
|
52
|
+
if provider == "groq":
|
|
53
|
+
from graphiti_core.llm_client.groq_client import GroqClient
|
|
54
|
+
|
|
55
|
+
return GroqClient(config=llm_cfg)
|
|
56
|
+
|
|
57
|
+
# Default: openai (also covers azure_openai with base_url set via env)
|
|
58
|
+
from graphiti_core.llm_client import OpenAIClient
|
|
59
|
+
|
|
60
|
+
return OpenAIClient(config=llm_cfg)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def _build_embedder(cfg: dict):
|
|
64
|
+
provider = cfg.get("embedder", {}).get("provider", "gemini")
|
|
65
|
+
model = cfg.get("embedder", {}).get("model")
|
|
66
|
+
|
|
67
|
+
if provider == "gemini":
|
|
68
|
+
from graphiti_core.embedder.gemini import GeminiEmbedder, GeminiEmbedderConfig
|
|
69
|
+
|
|
70
|
+
ecfg = GeminiEmbedderConfig(embedding_model=model) if model else GeminiEmbedderConfig()
|
|
71
|
+
return GeminiEmbedder(ecfg)
|
|
72
|
+
|
|
73
|
+
from graphiti_core.embedder import OpenAIEmbedder, OpenAIEmbedderConfig
|
|
74
|
+
|
|
75
|
+
ecfg = OpenAIEmbedderConfig(embedding_model=model) if model else OpenAIEmbedderConfig()
|
|
76
|
+
return OpenAIEmbedder(ecfg)
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
_TYPE_MAP: dict[str, type] = {
|
|
80
|
+
"string": str,
|
|
81
|
+
"int": int,
|
|
82
|
+
"float": float,
|
|
83
|
+
"bool": bool,
|
|
84
|
+
"datetime": datetime,
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def _build_type_defs(
|
|
89
|
+
defs: dict[str, Any],
|
|
90
|
+
) -> dict[str, type[BaseModel]]:
|
|
91
|
+
"""Build Pydantic models from ontology type definitions."""
|
|
92
|
+
models: dict[str, type[BaseModel]] = {}
|
|
93
|
+
for name, defn in defs.items():
|
|
94
|
+
fields: dict[str, Any] = {}
|
|
95
|
+
for attr_name, attr_val in (defn.get("attributes") or {}).items():
|
|
96
|
+
if isinstance(attr_val, str):
|
|
97
|
+
fields[attr_name] = (str, Field(description=attr_val))
|
|
98
|
+
elif isinstance(attr_val, list):
|
|
99
|
+
lit_type = Literal[tuple(attr_val)] # type: ignore[valid-type]
|
|
100
|
+
fields[attr_name] = (lit_type, Field())
|
|
101
|
+
elif isinstance(attr_val, dict):
|
|
102
|
+
if "enum" in attr_val:
|
|
103
|
+
lit_type = Literal[tuple(attr_val["enum"])] # type: ignore[valid-type]
|
|
104
|
+
fields[attr_name] = (lit_type, Field(description=attr_val.get("description", "")))
|
|
105
|
+
else:
|
|
106
|
+
py_type = _TYPE_MAP[attr_val["type"]]
|
|
107
|
+
fields[attr_name] = (py_type, Field(description=attr_val.get("description", "")))
|
|
108
|
+
model = create_model(name, **fields)
|
|
109
|
+
model.__doc__ = defn.get("description", "")
|
|
110
|
+
models[name] = model
|
|
111
|
+
return models
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def _build_ontology(
|
|
115
|
+
cfg: dict,
|
|
116
|
+
) -> tuple[
|
|
117
|
+
dict[str, type[BaseModel]] | None,
|
|
118
|
+
dict[str, type[BaseModel]] | None,
|
|
119
|
+
dict[tuple[str, str], list[str]] | None,
|
|
120
|
+
list[str] | None,
|
|
121
|
+
]:
|
|
122
|
+
"""Build ontology from config. Returns (entity_types, edge_types, edge_type_map, excluded)."""
|
|
123
|
+
raw = cfg.get("ontology")
|
|
124
|
+
if not raw:
|
|
125
|
+
return None, None, None, None
|
|
126
|
+
|
|
127
|
+
entity_defs = raw.get("entities") or {}
|
|
128
|
+
edge_defs = raw.get("edges") or {}
|
|
129
|
+
edge_map_raw = raw.get("edgeMap") or {}
|
|
130
|
+
excluded_raw = raw.get("excludedEntityTypes")
|
|
131
|
+
|
|
132
|
+
entity_types = _build_type_defs(entity_defs) if entity_defs else None
|
|
133
|
+
edge_types = _build_type_defs(edge_defs) if edge_defs else None
|
|
134
|
+
|
|
135
|
+
edge_type_map: dict[tuple[str, str], list[str]] | None = None
|
|
136
|
+
if edge_map_raw:
|
|
137
|
+
edge_type_map = {}
|
|
138
|
+
for key, values in edge_map_raw.items():
|
|
139
|
+
parts = key.split(",")
|
|
140
|
+
edge_type_map[(parts[0], parts[1])] = values
|
|
141
|
+
|
|
142
|
+
excluded = list(excluded_raw) if excluded_raw else None
|
|
143
|
+
|
|
144
|
+
if not entity_types and not edge_types and not edge_type_map and not excluded:
|
|
145
|
+
return None, None, None, None
|
|
146
|
+
|
|
147
|
+
return entity_types, edge_types, edge_type_map, excluded
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def _log_falkordblite_diagnostics(error: Exception) -> None:
|
|
151
|
+
"""Log diagnostic info when FalkorDBLite fails to start."""
|
|
152
|
+
import platform
|
|
153
|
+
import subprocess
|
|
154
|
+
|
|
155
|
+
print(f"[gralkor] FalkorDBLite startup failed: {error}", flush=True)
|
|
156
|
+
print(f"[gralkor] Platform: {platform.platform()}, arch: {platform.machine()}", flush=True)
|
|
157
|
+
try:
|
|
158
|
+
from redislite import __redis_executable__, __falkordb_module__
|
|
159
|
+
|
|
160
|
+
for label, path in [("redis-server", __redis_executable__), ("FalkorDB module", __falkordb_module__)]:
|
|
161
|
+
if not path:
|
|
162
|
+
print(f"[gralkor] {label}: not found", flush=True)
|
|
163
|
+
continue
|
|
164
|
+
print(f"[gralkor] {label}: {path}", flush=True)
|
|
165
|
+
for cmd in [[path, "--version"] if "redis" in label else [], ["file", path], ["ldd", path]]:
|
|
166
|
+
if not cmd:
|
|
167
|
+
continue
|
|
168
|
+
try:
|
|
169
|
+
r = subprocess.run(cmd, capture_output=True, text=True, timeout=5)
|
|
170
|
+
out = r.stdout.strip() or r.stderr.strip()
|
|
171
|
+
if out:
|
|
172
|
+
for line in out.split("\n"):
|
|
173
|
+
print(f"[gralkor] {line}", flush=True)
|
|
174
|
+
except FileNotFoundError:
|
|
175
|
+
pass
|
|
176
|
+
except Exception:
|
|
177
|
+
pass
|
|
178
|
+
except Exception as diag_err:
|
|
179
|
+
print(f"[gralkor] Diagnostic collection failed: {diag_err}", flush=True)
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
# ── Graphiti singleton ────────────────────────────────────────
|
|
183
|
+
|
|
184
|
+
graphiti: Graphiti | None = None
|
|
185
|
+
ontology_entity_types: dict[str, type[BaseModel]] | None = None
|
|
186
|
+
ontology_edge_types: dict[str, type[BaseModel]] | None = None
|
|
187
|
+
ontology_edge_type_map: dict[tuple[str, str], list[str]] | None = None
|
|
188
|
+
ontology_excluded: list[str] | None = None
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
@asynccontextmanager
|
|
192
|
+
async def lifespan(_app: FastAPI):
|
|
193
|
+
global graphiti, ontology_entity_types, ontology_edge_types, ontology_edge_type_map, ontology_excluded
|
|
194
|
+
cfg = _load_config()
|
|
195
|
+
|
|
196
|
+
falkordb_uri = os.getenv("FALKORDB_URI")
|
|
197
|
+
|
|
198
|
+
if falkordb_uri:
|
|
199
|
+
# Legacy Docker mode: external FalkorDB via TCP
|
|
200
|
+
stripped = falkordb_uri.split("://", 1)[-1]
|
|
201
|
+
if ":" in stripped:
|
|
202
|
+
host, port_str = stripped.rsplit(":", 1)
|
|
203
|
+
port = int(port_str)
|
|
204
|
+
else:
|
|
205
|
+
host, port = stripped, 6379
|
|
206
|
+
driver = FalkorDriver(host=host, port=port)
|
|
207
|
+
else:
|
|
208
|
+
# Default: embedded FalkorDBLite (no Docker needed)
|
|
209
|
+
logging.getLogger("redislite").setLevel(logging.DEBUG)
|
|
210
|
+
|
|
211
|
+
from redislite.async_falkordb_client import AsyncFalkorDB
|
|
212
|
+
|
|
213
|
+
data_dir = os.getenv("FALKORDB_DATA_DIR", "./data/falkordb")
|
|
214
|
+
os.makedirs(data_dir, exist_ok=True)
|
|
215
|
+
db_path = os.path.join(data_dir, "gralkor.db")
|
|
216
|
+
try:
|
|
217
|
+
db = AsyncFalkorDB(db_path)
|
|
218
|
+
except Exception as e:
|
|
219
|
+
_log_falkordblite_diagnostics(e)
|
|
220
|
+
raise
|
|
221
|
+
driver = FalkorDriver(falkor_db=db)
|
|
222
|
+
|
|
223
|
+
graphiti = Graphiti(
|
|
224
|
+
graph_driver=driver,
|
|
225
|
+
llm_client=_build_llm_client(cfg),
|
|
226
|
+
embedder=_build_embedder(cfg),
|
|
227
|
+
)
|
|
228
|
+
# Only build indices on first boot; skip if they already exist.
|
|
229
|
+
existing = await graphiti.driver.execute_query("CALL db.indexes()")
|
|
230
|
+
if existing and existing[0]:
|
|
231
|
+
print(f"[gralkor] indices already exist ({len(existing[0])} found), skipping build", flush=True)
|
|
232
|
+
else:
|
|
233
|
+
print("[gralkor] building indices and constraints...", flush=True)
|
|
234
|
+
t0_idx = time.monotonic()
|
|
235
|
+
await graphiti.build_indices_and_constraints()
|
|
236
|
+
idx_ms = (time.monotonic() - t0_idx) * 1000
|
|
237
|
+
print(f"[gralkor] indices ready — {idx_ms:.0f}ms", flush=True)
|
|
238
|
+
|
|
239
|
+
# Configure logging level: DEBUG in test mode for full data visibility
|
|
240
|
+
log_level = logging.DEBUG if cfg.get("test") else logging.INFO
|
|
241
|
+
logger.setLevel(log_level)
|
|
242
|
+
if not logger.handlers:
|
|
243
|
+
handler = logging.StreamHandler()
|
|
244
|
+
handler.setFormatter(logging.Formatter("%(message)s"))
|
|
245
|
+
logger.addHandler(handler)
|
|
246
|
+
|
|
247
|
+
ontology_entity_types, ontology_edge_types, ontology_edge_type_map, ontology_excluded = _build_ontology(cfg)
|
|
248
|
+
if ontology_entity_types or ontology_edge_types:
|
|
249
|
+
entity_names = list(ontology_entity_types or {})
|
|
250
|
+
edge_names = list(ontology_edge_types or {})
|
|
251
|
+
print(f"[gralkor] ontology: entities={entity_names} edges={edge_names}", flush=True)
|
|
252
|
+
|
|
253
|
+
yield
|
|
254
|
+
await graphiti.close()
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
app = FastAPI(title="Gralkor Graphiti Server", lifespan=lifespan)
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
# ── Rate-limit passthrough ───────────────────────────────────
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
def _find_rate_limit_error(exc: Exception) -> Exception | None:
|
|
264
|
+
"""Walk exception chain to find an upstream rate-limit error."""
|
|
265
|
+
current: Exception | None = exc
|
|
266
|
+
seen: set[int] = set()
|
|
267
|
+
while current is not None and id(current) not in seen:
|
|
268
|
+
seen.add(id(current))
|
|
269
|
+
# Match openai.RateLimitError, anthropic.RateLimitError, etc.
|
|
270
|
+
if type(current).__name__ == "RateLimitError" or (
|
|
271
|
+
hasattr(current, "status_code") and getattr(current, "status_code", None) == 429
|
|
272
|
+
):
|
|
273
|
+
return current
|
|
274
|
+
current = current.__cause__ or current.__context__
|
|
275
|
+
return None
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
@app.middleware("http")
|
|
279
|
+
async def rate_limit_middleware(request, call_next):
|
|
280
|
+
try:
|
|
281
|
+
return await call_next(request)
|
|
282
|
+
except Exception as exc:
|
|
283
|
+
rl = _find_rate_limit_error(exc)
|
|
284
|
+
if rl is not None:
|
|
285
|
+
msg = str(rl).split("\n")[0][:200]
|
|
286
|
+
return JSONResponse(status_code=429, content={"detail": msg})
|
|
287
|
+
raise
|
|
288
|
+
|
|
289
|
+
|
|
290
|
+
# ── Idempotency store ────────────────────────────────────────
|
|
291
|
+
|
|
292
|
+
# In-memory store: idempotency_key -> (serialized_episode, monotonic_expiry)
|
|
293
|
+
_idempotency_store: dict[str, tuple[dict[str, Any], float]] = {}
|
|
294
|
+
_IDEMPOTENCY_TTL = 300 # 5 minutes
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
def _idempotency_check(key: str) -> dict[str, Any] | None:
|
|
298
|
+
"""Return cached episode if key exists and is not expired, else None."""
|
|
299
|
+
entry = _idempotency_store.get(key)
|
|
300
|
+
if entry is None:
|
|
301
|
+
return None
|
|
302
|
+
if entry[1] > time.monotonic():
|
|
303
|
+
return entry[0]
|
|
304
|
+
del _idempotency_store[key]
|
|
305
|
+
return None
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
def _idempotency_store_result(key: str, result: dict[str, Any]) -> None:
|
|
309
|
+
"""Cache the result under the idempotency key with TTL."""
|
|
310
|
+
_idempotency_store[key] = (result, time.monotonic() + _IDEMPOTENCY_TTL)
|
|
311
|
+
# Lazy cleanup when store grows large
|
|
312
|
+
if len(_idempotency_store) > 100:
|
|
313
|
+
now = time.monotonic()
|
|
314
|
+
expired = [k for k, (_, exp) in _idempotency_store.items() if exp <= now]
|
|
315
|
+
for k in expired:
|
|
316
|
+
del _idempotency_store[k]
|
|
317
|
+
|
|
318
|
+
|
|
319
|
+
# ── Request / response models ────────────────────────────────
|
|
320
|
+
|
|
321
|
+
|
|
322
|
+
class AddEpisodeRequest(BaseModel):
|
|
323
|
+
name: str
|
|
324
|
+
episode_body: str
|
|
325
|
+
source_description: str
|
|
326
|
+
group_id: str
|
|
327
|
+
reference_time: str | None = None
|
|
328
|
+
source: str | None = None
|
|
329
|
+
idempotency_key: str
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
class ContentBlock(BaseModel):
|
|
333
|
+
"""A content block within a conversation message.
|
|
334
|
+
|
|
335
|
+
Supported types:
|
|
336
|
+
- "text": Natural language content (user input or assistant response).
|
|
337
|
+
- "thinking": Internal reasoning trace from the assistant.
|
|
338
|
+
- "tool_use": Serialized tool call (tool name + input).
|
|
339
|
+
- "tool_result": Truncated tool output.
|
|
340
|
+
The server groups thinking, tool_use, and tool_result blocks for
|
|
341
|
+
behaviour distillation before ingestion.
|
|
342
|
+
"""
|
|
343
|
+
type: str
|
|
344
|
+
text: str
|
|
345
|
+
|
|
346
|
+
|
|
347
|
+
class ConversationMessage(BaseModel):
|
|
348
|
+
"""A single message in a conversation transcript.
|
|
349
|
+
|
|
350
|
+
role: "user" for human input, "assistant" for agent output.
|
|
351
|
+
content: Ordered list of content blocks. A message may contain
|
|
352
|
+
multiple blocks (e.g. thinking followed by text).
|
|
353
|
+
"""
|
|
354
|
+
role: str
|
|
355
|
+
content: list[ContentBlock]
|
|
356
|
+
|
|
357
|
+
|
|
358
|
+
class IngestMessagesRequest(BaseModel):
|
|
359
|
+
"""Ingest a structured conversation for knowledge graph extraction.
|
|
360
|
+
|
|
361
|
+
The server formats the transcript, distills thinking blocks into
|
|
362
|
+
behaviour summaries, and creates an episode in the knowledge graph.
|
|
363
|
+
"""
|
|
364
|
+
name: str
|
|
365
|
+
source_description: str
|
|
366
|
+
group_id: str
|
|
367
|
+
messages: list[ConversationMessage]
|
|
368
|
+
reference_time: str | None = None
|
|
369
|
+
idempotency_key: str
|
|
370
|
+
|
|
371
|
+
|
|
372
|
+
class SearchRequest(BaseModel):
|
|
373
|
+
query: str
|
|
374
|
+
group_ids: list[str]
|
|
375
|
+
num_results: int = 10
|
|
376
|
+
|
|
377
|
+
|
|
378
|
+
class GroupIdRequest(BaseModel):
|
|
379
|
+
group_id: str
|
|
380
|
+
|
|
381
|
+
|
|
382
|
+
# ── Serializers ───────────────────────────────────────────────
|
|
383
|
+
|
|
384
|
+
|
|
385
|
+
def _ts(dt: datetime | None) -> str | None:
|
|
386
|
+
return dt.isoformat() if dt else None
|
|
387
|
+
|
|
388
|
+
|
|
389
|
+
def _serialize_fact(edge: EntityEdge) -> dict[str, Any]:
|
|
390
|
+
return {
|
|
391
|
+
"uuid": edge.uuid,
|
|
392
|
+
"name": edge.name,
|
|
393
|
+
"fact": edge.fact,
|
|
394
|
+
"group_id": edge.group_id,
|
|
395
|
+
"valid_at": _ts(edge.valid_at),
|
|
396
|
+
"invalid_at": _ts(edge.invalid_at),
|
|
397
|
+
"expired_at": _ts(edge.expired_at),
|
|
398
|
+
"created_at": _ts(edge.created_at),
|
|
399
|
+
}
|
|
400
|
+
|
|
401
|
+
|
|
402
|
+
def _serialize_episode(ep: EpisodicNode) -> dict[str, Any]:
|
|
403
|
+
return {
|
|
404
|
+
"uuid": ep.uuid,
|
|
405
|
+
"name": ep.name,
|
|
406
|
+
"content": ep.content,
|
|
407
|
+
"source_description": ep.source_description,
|
|
408
|
+
"group_id": ep.group_id,
|
|
409
|
+
"created_at": _ts(ep.created_at),
|
|
410
|
+
}
|
|
411
|
+
|
|
412
|
+
|
|
413
|
+
# ── Transcript formatting & thinking distillation ─────────────
|
|
414
|
+
|
|
415
|
+
logger = logging.getLogger(__name__)
|
|
416
|
+
|
|
417
|
+
_DISTILL_SYSTEM_PROMPT = (
|
|
418
|
+
"You are a distillery for agentic thought and action. Given an AI agent's internal "
|
|
419
|
+
"thinking and tool usage from a conversation turn, capture the reasoning and actions "
|
|
420
|
+
"the agent took and contextualise them within the dialog. Write one to three sentences "
|
|
421
|
+
"— no filler, maximum distillation. Focus on reasoning, decisions, actions taken "
|
|
422
|
+
"(including which tools were used and why), and outcomes. "
|
|
423
|
+
"IMPORTANT: When the agent retrieves information from memory (memory_search results, "
|
|
424
|
+
"knowledge graph facts, etc.), do NOT repeat or summarize the retrieved content. "
|
|
425
|
+
"Instead, note that memory was consulted and focus on what the agent concluded, "
|
|
426
|
+
"decided, or did as a result. The retrieved facts are already stored — re-stating "
|
|
427
|
+
"them creates redundancy. Capture the thinking, not the remembering. "
|
|
428
|
+
"Write in first person, past tense. Output only the distilled text, nothing else."
|
|
429
|
+
)
|
|
430
|
+
|
|
431
|
+
|
|
432
|
+
async def _distill_one(llm_client: Any, thinking: str) -> str:
|
|
433
|
+
"""Distill a single turn's behaviour (thinking + tool use) into a summary."""
|
|
434
|
+
from graphiti_core.prompts.models import Message
|
|
435
|
+
|
|
436
|
+
messages = [
|
|
437
|
+
Message(role="system", content=_DISTILL_SYSTEM_PROMPT),
|
|
438
|
+
Message(role="user", content=thinking),
|
|
439
|
+
]
|
|
440
|
+
result = await llm_client.generate_response(messages, max_tokens=300)
|
|
441
|
+
return result.get("content", "").strip()
|
|
442
|
+
|
|
443
|
+
|
|
444
|
+
async def _distill_thinking(llm_client: Any, thinking_blocks: list[str]) -> list[str]:
|
|
445
|
+
"""Distill behaviour blocks (thinking + tool use) into summaries, one per turn.
|
|
446
|
+
|
|
447
|
+
Returns a list parallel to thinking_blocks. Failed entries are empty strings.
|
|
448
|
+
"""
|
|
449
|
+
|
|
450
|
+
async def _safe_distill(thinking: str) -> str:
|
|
451
|
+
if not thinking.strip():
|
|
452
|
+
return ""
|
|
453
|
+
try:
|
|
454
|
+
return await _distill_one(llm_client, thinking)
|
|
455
|
+
except Exception as e:
|
|
456
|
+
logger.warning("Behaviour distillation failed: %s", e)
|
|
457
|
+
return ""
|
|
458
|
+
|
|
459
|
+
return list(await asyncio.gather(*[_safe_distill(t) for t in thinking_blocks]))
|
|
460
|
+
|
|
461
|
+
|
|
462
|
+
async def _format_transcript(
|
|
463
|
+
msgs: list[ConversationMessage],
|
|
464
|
+
llm_client: Any | None,
|
|
465
|
+
) -> str:
|
|
466
|
+
"""Format structured messages into a transcript, distilling behaviour into summaries.
|
|
467
|
+
|
|
468
|
+
Each turn is a user message followed by assistant responses until the next
|
|
469
|
+
user message. Behaviour blocks (thinking, tool_use, tool_result) are distilled
|
|
470
|
+
into a single (behaviour: ...) line injected before the turn's assistant text.
|
|
471
|
+
"""
|
|
472
|
+
|
|
473
|
+
@dataclass
|
|
474
|
+
class Turn:
|
|
475
|
+
user_lines: list[str] = field(default_factory=list)
|
|
476
|
+
behaviour: list[str] = field(default_factory=list)
|
|
477
|
+
assistant_lines: list[str] = field(default_factory=list)
|
|
478
|
+
|
|
479
|
+
# Parse messages into turns
|
|
480
|
+
turns: list[Turn] = [Turn()]
|
|
481
|
+
for msg in msgs:
|
|
482
|
+
if msg.role == "user":
|
|
483
|
+
turns.append(Turn())
|
|
484
|
+
for block in msg.content:
|
|
485
|
+
if block.type == "text":
|
|
486
|
+
turns[-1].user_lines.append(block.text)
|
|
487
|
+
elif msg.role == "assistant":
|
|
488
|
+
for block in msg.content:
|
|
489
|
+
if block.type in ("thinking", "tool_use", "tool_result"):
|
|
490
|
+
turns[-1].behaviour.append(block.text)
|
|
491
|
+
elif block.type == "text":
|
|
492
|
+
turns[-1].assistant_lines.append(block.text)
|
|
493
|
+
|
|
494
|
+
# Distill behaviour blocks (only for turns that have them)
|
|
495
|
+
to_distill = [(i, "\n---\n".join(t.behaviour)) for i, t in enumerate(turns) if t.behaviour]
|
|
496
|
+
summaries: dict[int, str] = {}
|
|
497
|
+
if to_distill and llm_client:
|
|
498
|
+
texts = [text for _, text in to_distill]
|
|
499
|
+
sizes = [len(text) for text in texts]
|
|
500
|
+
logger.info("[gralkor] behaviour distillation — groups:%d sizes:%s totalChars:%d", len(texts), sizes, sum(sizes))
|
|
501
|
+
logger.debug("[gralkor] behaviour pre-distill:\n%s", "\n===\n".join(texts))
|
|
502
|
+
results = await _distill_thinking(llm_client, texts)
|
|
503
|
+
for (i, _), result in zip(to_distill, results):
|
|
504
|
+
if result:
|
|
505
|
+
summaries[i] = result
|
|
506
|
+
logger.info("[gralkor] behaviour distilled — %d/%d succeeded", len(summaries), len(texts))
|
|
507
|
+
logger.debug("[gralkor] behaviour post-distill: %s", summaries)
|
|
508
|
+
|
|
509
|
+
# Format transcript
|
|
510
|
+
lines: list[str] = []
|
|
511
|
+
for i, turn in enumerate(turns):
|
|
512
|
+
for text in turn.user_lines:
|
|
513
|
+
lines.append(f"User: {text}")
|
|
514
|
+
if i in summaries:
|
|
515
|
+
lines.append(f"Assistant: (behaviour: {summaries[i]})")
|
|
516
|
+
for text in turn.assistant_lines:
|
|
517
|
+
lines.append(f"Assistant: {text}")
|
|
518
|
+
|
|
519
|
+
return "\n".join(lines)
|
|
520
|
+
|
|
521
|
+
|
|
522
|
+
# ── Endpoints ─────────────────────────────────────────────────
|
|
523
|
+
|
|
524
|
+
|
|
525
|
+
@app.get("/health")
|
|
526
|
+
async def health():
|
|
527
|
+
result: dict = {"status": "ok"}
|
|
528
|
+
|
|
529
|
+
if graphiti is not None:
|
|
530
|
+
try:
|
|
531
|
+
node_result = await graphiti.driver.execute_query(
|
|
532
|
+
"MATCH (n) RETURN count(n) AS node_count"
|
|
533
|
+
)
|
|
534
|
+
edge_result = await graphiti.driver.execute_query(
|
|
535
|
+
"MATCH ()-[r]->() RETURN count(r) AS edge_count"
|
|
536
|
+
)
|
|
537
|
+
result["graph"] = {
|
|
538
|
+
"connected": True,
|
|
539
|
+
"node_count": node_result[0][0]["node_count"] if node_result and node_result[0] else 0,
|
|
540
|
+
"edge_count": edge_result[0][0]["edge_count"] if edge_result and edge_result[0] else 0,
|
|
541
|
+
}
|
|
542
|
+
except Exception as e:
|
|
543
|
+
result["graph"] = {"connected": False, "error": str(e)}
|
|
544
|
+
else:
|
|
545
|
+
result["graph"] = {"connected": False, "error": "graphiti not initialized"}
|
|
546
|
+
|
|
547
|
+
data_dir = os.getenv("FALKORDB_DATA_DIR", "")
|
|
548
|
+
if data_dir:
|
|
549
|
+
result["data_dir"] = data_dir
|
|
550
|
+
|
|
551
|
+
return result
|
|
552
|
+
|
|
553
|
+
|
|
554
|
+
@app.post("/episodes")
|
|
555
|
+
async def add_episode(req: AddEpisodeRequest):
|
|
556
|
+
cached = _idempotency_check(req.idempotency_key)
|
|
557
|
+
if cached is not None:
|
|
558
|
+
logger.info("[gralkor] add-episode idempotent hit — key:%s uuid:%s",
|
|
559
|
+
req.idempotency_key, cached.get("uuid"))
|
|
560
|
+
return cached
|
|
561
|
+
|
|
562
|
+
logger.info("[gralkor] add-episode — group:%s name:%s bodyChars:%d source:%s",
|
|
563
|
+
req.group_id, req.name, len(req.episode_body), req.source or "message")
|
|
564
|
+
logger.debug("[gralkor] add-episode body:\n%s", req.episode_body)
|
|
565
|
+
ref_time = (
|
|
566
|
+
datetime.fromisoformat(req.reference_time)
|
|
567
|
+
if req.reference_time
|
|
568
|
+
else datetime.now(timezone.utc)
|
|
569
|
+
)
|
|
570
|
+
episode_type = EpisodeType(req.source) if req.source else EpisodeType.message
|
|
571
|
+
t0 = time.monotonic()
|
|
572
|
+
result = await graphiti.add_episode(
|
|
573
|
+
name=req.name,
|
|
574
|
+
episode_body=req.episode_body,
|
|
575
|
+
source_description=req.source_description,
|
|
576
|
+
group_id=req.group_id,
|
|
577
|
+
reference_time=ref_time,
|
|
578
|
+
source=episode_type,
|
|
579
|
+
entity_types=ontology_entity_types,
|
|
580
|
+
edge_types=ontology_edge_types,
|
|
581
|
+
edge_type_map=ontology_edge_type_map,
|
|
582
|
+
excluded_entity_types=ontology_excluded,
|
|
583
|
+
)
|
|
584
|
+
duration_ms = (time.monotonic() - t0) * 1000
|
|
585
|
+
episode = result.episode
|
|
586
|
+
logger.info("[gralkor] episode added — uuid:%s duration:%.0fms", episode.uuid, duration_ms)
|
|
587
|
+
logger.debug("[gralkor] episode result: %s", _serialize_episode(episode))
|
|
588
|
+
serialized = _serialize_episode(episode)
|
|
589
|
+
_idempotency_store_result(req.idempotency_key, serialized)
|
|
590
|
+
return serialized
|
|
591
|
+
|
|
592
|
+
|
|
593
|
+
@app.post("/ingest-messages")
|
|
594
|
+
async def ingest_messages(req: IngestMessagesRequest):
|
|
595
|
+
cached = _idempotency_check(req.idempotency_key)
|
|
596
|
+
if cached is not None:
|
|
597
|
+
logger.info("[gralkor] ingest-messages idempotent hit — key:%s uuid:%s",
|
|
598
|
+
req.idempotency_key, cached.get("uuid"))
|
|
599
|
+
return cached
|
|
600
|
+
|
|
601
|
+
logger.info("[gralkor] ingest-messages — group:%s messages:%d", req.group_id, len(req.messages))
|
|
602
|
+
ref_time = (
|
|
603
|
+
datetime.fromisoformat(req.reference_time)
|
|
604
|
+
if req.reference_time
|
|
605
|
+
else datetime.now(timezone.utc)
|
|
606
|
+
)
|
|
607
|
+
llm = graphiti.llm_client if graphiti else None
|
|
608
|
+
episode_body = await _format_transcript(req.messages, llm)
|
|
609
|
+
|
|
610
|
+
logger.info("[gralkor] episode body — chars:%d lines:%d", len(episode_body), episode_body.count('\n') + 1)
|
|
611
|
+
logger.debug("[gralkor] episode body:\n%s", episode_body)
|
|
612
|
+
|
|
613
|
+
t0 = time.monotonic()
|
|
614
|
+
result = await graphiti.add_episode(
|
|
615
|
+
name=req.name,
|
|
616
|
+
episode_body=episode_body,
|
|
617
|
+
source_description=req.source_description,
|
|
618
|
+
group_id=req.group_id,
|
|
619
|
+
reference_time=ref_time,
|
|
620
|
+
source=EpisodeType.message,
|
|
621
|
+
entity_types=ontology_entity_types,
|
|
622
|
+
edge_types=ontology_edge_types,
|
|
623
|
+
edge_type_map=ontology_edge_type_map,
|
|
624
|
+
excluded_entity_types=ontology_excluded,
|
|
625
|
+
)
|
|
626
|
+
duration_ms = (time.monotonic() - t0) * 1000
|
|
627
|
+
episode = result.episode
|
|
628
|
+
logger.info("[gralkor] episode added — uuid:%s duration:%.0fms", episode.uuid, duration_ms)
|
|
629
|
+
logger.debug("[gralkor] episode result: %s", _serialize_episode(episode))
|
|
630
|
+
serialized = _serialize_episode(episode)
|
|
631
|
+
_idempotency_store_result(req.idempotency_key, serialized)
|
|
632
|
+
return serialized
|
|
633
|
+
|
|
634
|
+
|
|
635
|
+
@app.get("/episodes")
|
|
636
|
+
async def get_episodes(group_id: str, limit: int = 10):
|
|
637
|
+
_ensure_driver_graph([group_id])
|
|
638
|
+
episodes = await graphiti.retrieve_episodes(
|
|
639
|
+
reference_time=datetime.now(timezone.utc),
|
|
640
|
+
last_n=limit,
|
|
641
|
+
group_ids=[group_id],
|
|
642
|
+
)
|
|
643
|
+
return [_serialize_episode(ep) for ep in episodes]
|
|
644
|
+
|
|
645
|
+
|
|
646
|
+
@app.delete("/episodes/{uuid}")
|
|
647
|
+
async def delete_episode(uuid: str):
|
|
648
|
+
await graphiti.remove_episode(uuid)
|
|
649
|
+
return Response(status_code=204)
|
|
650
|
+
|
|
651
|
+
|
|
652
|
+
def _sanitize_query(query: str) -> str:
|
|
653
|
+
"""Strip backticks that cause RediSearch syntax errors.
|
|
654
|
+
|
|
655
|
+
graphiti-core's _SEPARATOR_MAP handles most special characters
|
|
656
|
+
but misses backticks. We strip them at the API boundary.
|
|
657
|
+
"""
|
|
658
|
+
return query.replace("`", " ")
|
|
659
|
+
|
|
660
|
+
|
|
661
|
+
def _ensure_driver_graph(group_ids: list[str] | None) -> None:
|
|
662
|
+
"""Route graphiti's driver to the correct FalkorDB named graph.
|
|
663
|
+
|
|
664
|
+
graphiti-core's add_episode() clones the driver when group_id differs from
|
|
665
|
+
the current database (graphiti.py:887-889), but search() does not. On fresh
|
|
666
|
+
boot the driver targets 'default_db' — an empty graph — so searches return
|
|
667
|
+
nothing until the first add_episode switches it. This helper applies the
|
|
668
|
+
same routing for read paths.
|
|
669
|
+
"""
|
|
670
|
+
if not group_ids:
|
|
671
|
+
return
|
|
672
|
+
target = group_ids[0]
|
|
673
|
+
if target != graphiti.driver._database:
|
|
674
|
+
graphiti.driver = graphiti.driver.clone(database=target)
|
|
675
|
+
graphiti.clients.driver = graphiti.driver
|
|
676
|
+
print(f"[gralkor] driver graph routed: {target}", flush=True)
|
|
677
|
+
|
|
678
|
+
|
|
679
|
+
def _prioritize_facts(
|
|
680
|
+
edges: list[EntityEdge], limit: int, reserved_ratio: float = 0.7,
|
|
681
|
+
) -> list[EntityEdge]:
|
|
682
|
+
"""Reserve slots for valid facts, fill the rest by relevance.
|
|
683
|
+
|
|
684
|
+
First ~70% of slots are reserved for valid facts (no invalid_at).
|
|
685
|
+
Remaining slots are filled from whatever Graphiti ranked highest
|
|
686
|
+
among the leftovers — valid or not — preserving relevance scoring.
|
|
687
|
+
"""
|
|
688
|
+
reserved_count = max(1, round(limit * reserved_ratio))
|
|
689
|
+
|
|
690
|
+
reserved: list[EntityEdge] = []
|
|
691
|
+
rest: list[EntityEdge] = []
|
|
692
|
+
for e in edges:
|
|
693
|
+
if len(reserved) < reserved_count and e.invalid_at is None:
|
|
694
|
+
reserved.append(e)
|
|
695
|
+
else:
|
|
696
|
+
rest.append(e)
|
|
697
|
+
|
|
698
|
+
remainder_count = limit - len(reserved)
|
|
699
|
+
return reserved + rest[:remainder_count]
|
|
700
|
+
|
|
701
|
+
|
|
702
|
+
@app.post("/search")
|
|
703
|
+
async def search(req: SearchRequest):
|
|
704
|
+
logger.info("[gralkor] search — query:%d chars group_ids:%s num_results:%d",
|
|
705
|
+
len(req.query), req.group_ids, req.num_results)
|
|
706
|
+
# graphiti.add_episode() clones the driver to target the correct FalkorDB
|
|
707
|
+
# named graph (database=group_id), but graphiti.search() does not — it just
|
|
708
|
+
# uses whatever graph the driver currently points at. Before the first
|
|
709
|
+
# add_episode, the driver targets 'default_db' (an empty graph), so all
|
|
710
|
+
# searches return 0 results. Fix: route to the correct graph here.
|
|
711
|
+
_ensure_driver_graph(req.group_ids)
|
|
712
|
+
t0 = time.monotonic()
|
|
713
|
+
# Over-fetch to compensate for expired facts that will be deprioritized.
|
|
714
|
+
fetch_limit = req.num_results * 2
|
|
715
|
+
try:
|
|
716
|
+
edges = await graphiti.search(
|
|
717
|
+
query=_sanitize_query(req.query),
|
|
718
|
+
group_ids=req.group_ids,
|
|
719
|
+
num_results=fetch_limit,
|
|
720
|
+
)
|
|
721
|
+
except Exception as e:
|
|
722
|
+
duration_ms = (time.monotonic() - t0) * 1000
|
|
723
|
+
logger.error("[gralkor] search failed — %.0fms: %s", duration_ms, e)
|
|
724
|
+
raise
|
|
725
|
+
duration_ms = (time.monotonic() - t0) * 1000
|
|
726
|
+
prioritized = _prioritize_facts(edges, req.num_results)
|
|
727
|
+
valid_count = sum(1 for e in prioritized if e.invalid_at is None)
|
|
728
|
+
result = [_serialize_fact(e) for e in prioritized]
|
|
729
|
+
logger.info("[gralkor] search result — %d facts (%d valid, %d non-valid) from %d fetched %.0fms",
|
|
730
|
+
len(prioritized), valid_count, len(prioritized) - valid_count, len(edges), duration_ms)
|
|
731
|
+
logger.debug("[gralkor] search facts: %s", result)
|
|
732
|
+
return {"facts": result}
|
|
733
|
+
|
|
734
|
+
|
|
735
|
+
@app.delete("/edges/{uuid}")
|
|
736
|
+
async def delete_edge(uuid: str):
|
|
737
|
+
driver = graphiti.driver
|
|
738
|
+
edge = await EntityEdge.get_by_uuid(driver, uuid)
|
|
739
|
+
await edge.delete(driver)
|
|
740
|
+
return Response(status_code=204)
|
|
741
|
+
|
|
742
|
+
|
|
743
|
+
@app.post("/clear")
|
|
744
|
+
async def clear_graph(req: GroupIdRequest):
|
|
745
|
+
_ensure_driver_graph([req.group_id])
|
|
746
|
+
driver = graphiti.driver
|
|
747
|
+
await Node.delete_by_group_id(driver, req.group_id)
|
|
748
|
+
return {"deleted": True}
|
|
749
|
+
|
|
750
|
+
|
|
751
|
+
@app.post("/build-indices")
|
|
752
|
+
async def build_indices():
|
|
753
|
+
await graphiti.build_indices_and_constraints()
|
|
754
|
+
return {"status": "ok"}
|
|
755
|
+
|
|
756
|
+
|
|
757
|
+
@app.post("/build-communities")
|
|
758
|
+
async def build_communities(req: GroupIdRequest):
|
|
759
|
+
_ensure_driver_graph([req.group_id])
|
|
760
|
+
communities, edges = await graphiti.build_communities(
|
|
761
|
+
group_ids=[req.group_id],
|
|
762
|
+
)
|
|
763
|
+
return {"communities": len(communities), "edges": len(edges)}
|