@contextableai/openclaw-memory-rebac 0.1.0 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,34 @@
1
+ /**
2
+ * Parallel Multi-Group Search + Merge/Re-rank
3
+ *
4
+ * Backend-agnostic: delegates per-group search to MemoryBackend.searchGroup().
5
+ * Issues parallel calls (one per authorized group_id), merges results,
6
+ * deduplicates by UUID, and re-ranks by score then recency.
7
+ */
8
+ import type { MemoryBackend, SearchResult } from "./backend.js";
9
+ export type { SearchResult };
10
+ export type SearchOptions = {
11
+ query: string;
12
+ groupIds: string[];
13
+ limit?: number;
14
+ sessionId?: string;
15
+ };
16
+ /**
17
+ * Search across multiple authorized group_ids in parallel.
18
+ * Merges and deduplicates results, returning up to `limit` items sorted by
19
+ * score (desc) then recency (desc).
20
+ */
21
+ export declare function searchAuthorizedMemories(backend: MemoryBackend, options: SearchOptions): Promise<SearchResult[]>;
22
+ /**
23
+ * Format search results into a text block for injecting into agent context.
24
+ */
25
+ export declare function formatResultsForContext(results: SearchResult[]): string;
26
+ /**
27
+ * Format results with long-term and session sections separated.
28
+ * Session group_ids start with "session-".
29
+ */
30
+ export declare function formatDualResults(longTermResults: SearchResult[], sessionResults: SearchResult[]): string;
31
+ /**
32
+ * Deduplicate session results against long-term results (by UUID).
33
+ */
34
+ export declare function deduplicateSessionResults(longTermResults: SearchResult[], sessionResults: SearchResult[]): SearchResult[];
package/dist/search.js ADDED
@@ -0,0 +1,98 @@
1
+ /**
2
+ * Parallel Multi-Group Search + Merge/Re-rank
3
+ *
4
+ * Backend-agnostic: delegates per-group search to MemoryBackend.searchGroup().
5
+ * Issues parallel calls (one per authorized group_id), merges results,
6
+ * deduplicates by UUID, and re-ranks by score then recency.
7
+ */
8
+ // ============================================================================
9
+ // Search
10
+ // ============================================================================
11
+ /**
12
+ * Search across multiple authorized group_ids in parallel.
13
+ * Merges and deduplicates results, returning up to `limit` items sorted by
14
+ * score (desc) then recency (desc).
15
+ */
16
+ export async function searchAuthorizedMemories(backend, options) {
17
+ const { query, groupIds, limit = 10, sessionId } = options;
18
+ if (groupIds.length === 0) {
19
+ return [];
20
+ }
21
+ // Fan out parallel searches across all authorized groups
22
+ const promises = groupIds.map((groupId) => backend.searchGroup({ query, groupId, limit, sessionId }));
23
+ const resultSets = await Promise.allSettled(promises);
24
+ // Collect all successful results — silently skip failed group searches
25
+ const allResults = [];
26
+ for (const result of resultSets) {
27
+ if (result.status === "fulfilled") {
28
+ allResults.push(...result.value);
29
+ }
30
+ }
31
+ // Deduplicate by UUID
32
+ const seen = new Set();
33
+ const deduped = allResults.filter((r) => {
34
+ if (seen.has(r.uuid))
35
+ return false;
36
+ seen.add(r.uuid);
37
+ return true;
38
+ });
39
+ // Sort: score descending (when available), then recency descending
40
+ deduped.sort((a, b) => {
41
+ if (a.score !== undefined && b.score !== undefined && a.score !== b.score) {
42
+ return b.score - a.score;
43
+ }
44
+ const dateA = new Date(a.created_at).getTime();
45
+ const dateB = new Date(b.created_at).getTime();
46
+ return dateB - dateA;
47
+ });
48
+ return deduped.slice(0, limit);
49
+ }
50
+ // ============================================================================
51
+ // Format for agent context
52
+ // ============================================================================
53
+ /**
54
+ * Format search results into a text block for injecting into agent context.
55
+ */
56
+ export function formatResultsForContext(results) {
57
+ if (results.length === 0)
58
+ return "";
59
+ return results.map((r, i) => formatResultLine(r, i + 1)).join("\n");
60
+ }
61
+ /**
62
+ * Format results with long-term and session sections separated.
63
+ * Session group_ids start with "session-".
64
+ */
65
+ export function formatDualResults(longTermResults, sessionResults) {
66
+ const parts = [];
67
+ let idx = 1;
68
+ for (const r of longTermResults) {
69
+ parts.push(formatResultLine(r, idx++));
70
+ }
71
+ if (sessionResults.length > 0) {
72
+ if (longTermResults.length > 0)
73
+ parts.push("Session memories:");
74
+ for (const r of sessionResults) {
75
+ parts.push(formatResultLine(r, idx++));
76
+ }
77
+ }
78
+ return parts.join("\n");
79
+ }
80
+ /**
81
+ * Format a single search result line with type-prefixed UUID.
82
+ * e.g. "[fact:da8650cb-...] Eric's birthday is Dec 17th (Eric -[HAS_BIRTHDAY]→ Dec 17th)"
83
+ */
84
+ function formatResultLine(r, idx) {
85
+ const typeLabel = r.type === "node" ? "entity" :
86
+ r.type === "fact" ? "fact" :
87
+ r.type === "chunk" ? "chunk" :
88
+ r.type === "summary" ? "summary" :
89
+ "completion";
90
+ return `${idx}. [${typeLabel}:${r.uuid}] ${r.summary} (${r.context})`;
91
+ }
92
+ /**
93
+ * Deduplicate session results against long-term results (by UUID).
94
+ */
95
+ export function deduplicateSessionResults(longTermResults, sessionResults) {
96
+ const longTermIds = new Set(longTermResults.map((r) => r.uuid));
97
+ return sessionResults.filter((r) => !longTermIds.has(r.uuid));
98
+ }
@@ -0,0 +1,80 @@
1
+ /**
2
+ * SpiceDB Client Wrapper
3
+ *
4
+ * Wraps @authzed/authzed-node for authorization operations:
5
+ * WriteSchema, WriteRelationships, DeleteRelationships, BulkImportRelationships,
6
+ * LookupResources, CheckPermission.
7
+ */
8
+ export type SpiceDbConfig = {
9
+ endpoint: string;
10
+ token: string;
11
+ insecure: boolean;
12
+ };
13
+ export type RelationshipTuple = {
14
+ resourceType: string;
15
+ resourceId: string;
16
+ relation: string;
17
+ subjectType: string;
18
+ subjectId: string;
19
+ };
20
+ export type ConsistencyMode = {
21
+ mode: "full";
22
+ } | {
23
+ mode: "at_least_as_fresh";
24
+ token: string;
25
+ } | {
26
+ mode: "minimize_latency";
27
+ };
28
+ export declare class SpiceDbClient {
29
+ private client;
30
+ private promises;
31
+ constructor(config: SpiceDbConfig);
32
+ writeSchema(schema: string): Promise<void>;
33
+ readSchema(): Promise<string>;
34
+ writeRelationships(tuples: RelationshipTuple[]): Promise<string | undefined>;
35
+ deleteRelationships(tuples: RelationshipTuple[]): Promise<void>;
36
+ deleteRelationshipsByFilter(params: {
37
+ resourceType: string;
38
+ resourceId: string;
39
+ relation?: string;
40
+ }): Promise<string | undefined>;
41
+ private toRelationship;
42
+ /**
43
+ * Bulk import relationships using the streaming ImportBulkRelationships RPC.
44
+ * More efficient than individual writeRelationships calls for large batches.
45
+ * Falls back to batched writeRelationships if the streaming RPC is unavailable.
46
+ */
47
+ bulkImportRelationships(tuples: RelationshipTuple[], batchSize?: number): Promise<number>;
48
+ private bulkImportViaStream;
49
+ private bulkImportViaWrite;
50
+ /**
51
+ * Read relationships matching a filter. Returns all tuples that match the
52
+ * specified resource type, optional resource ID, optional relation, and
53
+ * optional subject filter. Used by the cleanup command to find which
54
+ * Graphiti episodes have SpiceDB authorization relationships.
55
+ */
56
+ readRelationships(params: {
57
+ resourceType: string;
58
+ resourceId?: string;
59
+ relation?: string;
60
+ subjectType?: string;
61
+ subjectId?: string;
62
+ consistency?: ConsistencyMode;
63
+ }): Promise<RelationshipTuple[]>;
64
+ private buildConsistency;
65
+ checkPermission(params: {
66
+ resourceType: string;
67
+ resourceId: string;
68
+ permission: string;
69
+ subjectType: string;
70
+ subjectId: string;
71
+ consistency?: ConsistencyMode;
72
+ }): Promise<boolean>;
73
+ lookupResources(params: {
74
+ resourceType: string;
75
+ permission: string;
76
+ subjectType: string;
77
+ subjectId: string;
78
+ consistency?: ConsistencyMode;
79
+ }): Promise<string[]>;
80
+ }
@@ -0,0 +1,267 @@
1
+ /**
2
+ * SpiceDB Client Wrapper
3
+ *
4
+ * Wraps @authzed/authzed-node for authorization operations:
5
+ * WriteSchema, WriteRelationships, DeleteRelationships, BulkImportRelationships,
6
+ * LookupResources, CheckPermission.
7
+ */
8
+ import { v1 } from "@authzed/authzed-node";
9
+ // ============================================================================
10
+ // Client
11
+ // ============================================================================
12
+ export class SpiceDbClient {
13
+ client;
14
+ promises;
15
+ constructor(config) {
16
+ if (config.insecure) {
17
+ this.client = v1.NewClient(config.token, config.endpoint, v1.ClientSecurity.INSECURE_PLAINTEXT_CREDENTIALS);
18
+ }
19
+ else {
20
+ this.client = v1.NewClient(config.token, config.endpoint);
21
+ }
22
+ this.promises = this.client.promises;
23
+ }
24
+ // --------------------------------------------------------------------------
25
+ // Schema
26
+ // --------------------------------------------------------------------------
27
+ async writeSchema(schema) {
28
+ const request = v1.WriteSchemaRequest.create({ schema });
29
+ await this.promises.writeSchema(request);
30
+ }
31
+ async readSchema() {
32
+ const request = v1.ReadSchemaRequest.create({});
33
+ const response = await this.promises.readSchema(request);
34
+ return response.schemaText;
35
+ }
36
+ // --------------------------------------------------------------------------
37
+ // Relationships
38
+ // --------------------------------------------------------------------------
39
+ async writeRelationships(tuples) {
40
+ const updates = tuples.map((t) => v1.RelationshipUpdate.create({
41
+ operation: v1.RelationshipUpdate_Operation.TOUCH,
42
+ relationship: v1.Relationship.create({
43
+ resource: v1.ObjectReference.create({
44
+ objectType: t.resourceType,
45
+ objectId: t.resourceId,
46
+ }),
47
+ relation: t.relation,
48
+ subject: v1.SubjectReference.create({
49
+ object: v1.ObjectReference.create({
50
+ objectType: t.subjectType,
51
+ objectId: t.subjectId,
52
+ }),
53
+ }),
54
+ }),
55
+ }));
56
+ const request = v1.WriteRelationshipsRequest.create({ updates });
57
+ const response = await this.promises.writeRelationships(request);
58
+ return response.writtenAt?.token;
59
+ }
60
+ async deleteRelationships(tuples) {
61
+ const updates = tuples.map((t) => v1.RelationshipUpdate.create({
62
+ operation: v1.RelationshipUpdate_Operation.DELETE,
63
+ relationship: v1.Relationship.create({
64
+ resource: v1.ObjectReference.create({
65
+ objectType: t.resourceType,
66
+ objectId: t.resourceId,
67
+ }),
68
+ relation: t.relation,
69
+ subject: v1.SubjectReference.create({
70
+ object: v1.ObjectReference.create({
71
+ objectType: t.subjectType,
72
+ objectId: t.subjectId,
73
+ }),
74
+ }),
75
+ }),
76
+ }));
77
+ const request = v1.WriteRelationshipsRequest.create({ updates });
78
+ await this.promises.writeRelationships(request);
79
+ }
80
+ async deleteRelationshipsByFilter(params) {
81
+ const request = v1.DeleteRelationshipsRequest.create({
82
+ relationshipFilter: v1.RelationshipFilter.create({
83
+ resourceType: params.resourceType,
84
+ optionalResourceId: params.resourceId,
85
+ ...(params.relation ? { optionalRelation: params.relation } : {}),
86
+ }),
87
+ });
88
+ const response = await this.promises.deleteRelationships(request);
89
+ return response.deletedAt?.token;
90
+ }
91
+ // --------------------------------------------------------------------------
92
+ // Bulk Import
93
+ // --------------------------------------------------------------------------
94
+ toRelationship(t) {
95
+ return v1.Relationship.create({
96
+ resource: v1.ObjectReference.create({
97
+ objectType: t.resourceType,
98
+ objectId: t.resourceId,
99
+ }),
100
+ relation: t.relation,
101
+ subject: v1.SubjectReference.create({
102
+ object: v1.ObjectReference.create({
103
+ objectType: t.subjectType,
104
+ objectId: t.subjectId,
105
+ }),
106
+ }),
107
+ });
108
+ }
109
+ /**
110
+ * Bulk import relationships using the streaming ImportBulkRelationships RPC.
111
+ * More efficient than individual writeRelationships calls for large batches.
112
+ * Falls back to batched writeRelationships if the streaming RPC is unavailable.
113
+ */
114
+ async bulkImportRelationships(tuples, batchSize = 1000) {
115
+ if (tuples.length === 0)
116
+ return 0;
117
+ // Try streaming bulk import first (uses CREATE semantics — rejects duplicates)
118
+ if (typeof this.promises.bulkImportRelationships === "function") {
119
+ try {
120
+ return await this.bulkImportViaStream(tuples, batchSize);
121
+ }
122
+ catch (err) {
123
+ // ALREADY_EXISTS means some relationships exist (e.g. partial previous run).
124
+ // Fall through to batched writeRelationships which uses TOUCH (idempotent).
125
+ const msg = err instanceof Error ? err.message : String(err);
126
+ if (msg.includes("ALREADY_EXISTS")) {
127
+ return this.bulkImportViaWrite(tuples, batchSize);
128
+ }
129
+ throw err;
130
+ }
131
+ }
132
+ // Fallback: batched writeRelationships (uses TOUCH — idempotent)
133
+ return this.bulkImportViaWrite(tuples, batchSize);
134
+ }
135
+ bulkImportViaStream(tuples, batchSize) {
136
+ return new Promise((resolve, reject) => {
137
+ const stream = this.promises.bulkImportRelationships((err, response) => {
138
+ if (err)
139
+ reject(err);
140
+ else
141
+ resolve(Number(response?.numLoaded ?? tuples.length));
142
+ });
143
+ stream.on("error", (err) => {
144
+ reject(err);
145
+ });
146
+ for (let i = 0; i < tuples.length; i += batchSize) {
147
+ const chunk = tuples.slice(i, i + batchSize);
148
+ stream.write(v1.BulkImportRelationshipsRequest.create({
149
+ relationships: chunk.map((t) => this.toRelationship(t)),
150
+ }));
151
+ }
152
+ stream.end();
153
+ });
154
+ }
155
+ async bulkImportViaWrite(tuples, batchSize) {
156
+ let total = 0;
157
+ for (let i = 0; i < tuples.length; i += batchSize) {
158
+ const chunk = tuples.slice(i, i + batchSize);
159
+ await this.writeRelationships(chunk);
160
+ total += chunk.length;
161
+ }
162
+ return total;
163
+ }
164
+ // --------------------------------------------------------------------------
165
+ // Read Relationships
166
+ // --------------------------------------------------------------------------
167
+ /**
168
+ * Read relationships matching a filter. Returns all tuples that match the
169
+ * specified resource type, optional resource ID, optional relation, and
170
+ * optional subject filter. Used by the cleanup command to find which
171
+ * Graphiti episodes have SpiceDB authorization relationships.
172
+ */
173
+ async readRelationships(params) {
174
+ const filterFields = {
175
+ resourceType: params.resourceType,
176
+ };
177
+ if (params.resourceId) {
178
+ filterFields.optionalResourceId = params.resourceId;
179
+ }
180
+ if (params.relation) {
181
+ filterFields.optionalRelation = params.relation;
182
+ }
183
+ if (params.subjectType) {
184
+ const subjectFilter = {
185
+ subjectType: params.subjectType,
186
+ };
187
+ if (params.subjectId) {
188
+ subjectFilter.optionalSubjectId = params.subjectId;
189
+ }
190
+ filterFields.optionalSubjectFilter = v1.SubjectFilter.create(subjectFilter);
191
+ }
192
+ const request = v1.ReadRelationshipsRequest.create({
193
+ relationshipFilter: v1.RelationshipFilter.create(filterFields),
194
+ consistency: this.buildConsistency(params.consistency),
195
+ });
196
+ const results = await this.promises.readRelationships(request);
197
+ const tuples = [];
198
+ for (const r of results) {
199
+ const rel = r.relationship;
200
+ if (!rel?.resource || !rel.subject?.object)
201
+ continue;
202
+ tuples.push({
203
+ resourceType: rel.resource.objectType,
204
+ resourceId: rel.resource.objectId,
205
+ relation: rel.relation,
206
+ subjectType: rel.subject.object.objectType,
207
+ subjectId: rel.subject.object.objectId,
208
+ });
209
+ }
210
+ return tuples;
211
+ }
212
+ // --------------------------------------------------------------------------
213
+ // Permissions
214
+ // --------------------------------------------------------------------------
215
+ buildConsistency(mode) {
216
+ if (!mode || mode.mode === "minimize_latency") {
217
+ return v1.Consistency.create({
218
+ requirement: { oneofKind: "minimizeLatency", minimizeLatency: true },
219
+ });
220
+ }
221
+ if (mode.mode === "at_least_as_fresh") {
222
+ return v1.Consistency.create({
223
+ requirement: {
224
+ oneofKind: "atLeastAsFresh",
225
+ atLeastAsFresh: v1.ZedToken.create({ token: mode.token }),
226
+ },
227
+ });
228
+ }
229
+ return v1.Consistency.create({
230
+ requirement: { oneofKind: "fullyConsistent", fullyConsistent: true },
231
+ });
232
+ }
233
+ async checkPermission(params) {
234
+ const request = v1.CheckPermissionRequest.create({
235
+ resource: v1.ObjectReference.create({
236
+ objectType: params.resourceType,
237
+ objectId: params.resourceId,
238
+ }),
239
+ permission: params.permission,
240
+ subject: v1.SubjectReference.create({
241
+ object: v1.ObjectReference.create({
242
+ objectType: params.subjectType,
243
+ objectId: params.subjectId,
244
+ }),
245
+ }),
246
+ consistency: this.buildConsistency(params.consistency),
247
+ });
248
+ const response = await this.promises.checkPermission(request);
249
+ return (response.permissionship ===
250
+ v1.CheckPermissionResponse_Permissionship.HAS_PERMISSION);
251
+ }
252
+ async lookupResources(params) {
253
+ const request = v1.LookupResourcesRequest.create({
254
+ resourceObjectType: params.resourceType,
255
+ permission: params.permission,
256
+ subject: v1.SubjectReference.create({
257
+ object: v1.ObjectReference.create({
258
+ objectType: params.subjectType,
259
+ objectId: params.subjectId,
260
+ }),
261
+ }),
262
+ consistency: this.buildConsistency(params.consistency),
263
+ });
264
+ const results = await this.promises.lookupResources(request);
265
+ return results.map((r) => r.resourceObjectId);
266
+ }
267
+ }
@@ -0,0 +1,50 @@
1
+ # ============================================================================
2
+ # Graphiti FastAPI REST server configuration
3
+ # ============================================================================
4
+ # Each AI component (LLM, embedder, reranker) can independently point to a
5
+ # different service. If EMBEDDING_BASE_URL is not set, it defaults to
6
+ # LLM_BASE_URL (or the OpenAI API if LLM_BASE_URL is also unset).
7
+
8
+ # ============================================================================
9
+ # LLM (entity extraction)
10
+ # ============================================================================
11
+ # Default: OpenAI API. Set LLM_BASE_URL to use a different provider:
12
+ # Ollama: http://host.docker.internal:11434/v1
13
+ # vLLM: http://your-vllm-server:8000/v1
14
+ # OpenAI: (leave LLM_BASE_URL empty to use default)
15
+ LLM_BASE_URL=http://100.123.48.104:11434/v1
16
+ LLM_MODEL=qwen2.5:14b
17
+ LLM_API_KEY=not-needed
18
+
19
+ # ============================================================================
20
+ # Embedder
21
+ # ============================================================================
22
+ # Default: OpenAI text-embedding-3-small.
23
+ # If EMBEDDING_BASE_URL is empty, uses LLM_BASE_URL (or OpenAI default).
24
+ EMBEDDING_MODEL=nomic-embed-text
25
+ # EMBEDDING_BASE_URL=
26
+ # EMBEDDING_API_KEY=
27
+ EMBEDDING_DIM=768
28
+
29
+ # ============================================================================
30
+ # Reranker / cross-encoder
31
+ # ============================================================================
32
+ # Default: "bge" — runs BAAI/bge-reranker-v2-m3 locally via sentence-transformers.
33
+ # No API key or external service needed. Fast, accurate, free.
34
+ #
35
+ # Set to "openai" to use a remote LLM-based reranker instead.
36
+ RERANKER_PROVIDER=bge
37
+ # RERANKER_MODEL= # Only used when RERANKER_PROVIDER=openai
38
+ # RERANKER_BASE_URL= # Only used when RERANKER_PROVIDER=openai
39
+ # RERANKER_API_KEY= # Only used when RERANKER_PROVIDER=openai
40
+
41
+ # ============================================================================
42
+ # Neo4j
43
+ # ============================================================================
44
+ # NEO4J_USER=neo4j
45
+ # NEO4J_PASSWORD=graphiti_pw
46
+
47
+ # ============================================================================
48
+ # Server
49
+ # ============================================================================
50
+ # GRAPHITI_PORT=8000
@@ -48,8 +48,8 @@ services:
48
48
  context: .
49
49
  dockerfile: Dockerfile
50
50
  restart: unless-stopped
51
- expose:
52
- - "8000"
51
+ ports:
52
+ - "8000:8000"
53
53
  environment:
54
54
  # -- Graph database (Neo4j) --
55
55
  NEO4J_URI: bolt://neo4j:7687
@@ -98,6 +98,31 @@ def _create_reranker(settings: ExtendedSettings, llm_client):
98
98
  return BGERerankerClient()
99
99
 
100
100
 
101
+ class JsonSafeLLMClient(OpenAIGenericClient):
102
+ """Wrapper that ensures 'json' appears in messages when response_format is json_object.
103
+
104
+ Groq (and some other providers) require the word 'json' in the messages
105
+ when response_format={"type": "json_object"} is used. Graphiti's internal
106
+ prompts don't always include it, causing 400 errors.
107
+ """
108
+
109
+ async def _generate_response(self, messages, response_model=None, **kwargs):
110
+ # Check if any message already contains the word 'json' (case-insensitive)
111
+ has_json_mention = any('json' in m.content.lower() for m in messages)
112
+ if not has_json_mention:
113
+ # Inject into the first system message, or prepend one
114
+ injected = False
115
+ for m in messages:
116
+ if m.role == 'system':
117
+ m.content += '\nRespond in JSON format.'
118
+ injected = True
119
+ break
120
+ if not injected:
121
+ from graphiti_core.prompts.models import Message
122
+ messages.insert(0, Message(role='system', content='Respond in JSON format.'))
123
+ return await super()._generate_response(messages, response_model, **kwargs)
124
+
125
+
101
126
  def create_graphiti(settings: ExtendedSettings) -> OpenClawGraphiti:
102
127
  """Create an OpenClawGraphiti instance with per-component client configuration."""
103
128
 
@@ -109,7 +134,7 @@ def create_graphiti(settings: ExtendedSettings) -> OpenClawGraphiti:
109
134
  if settings.model_name:
110
135
  llm_config.model = settings.model_name
111
136
  llm_config.small_model = settings.model_name
112
- llm_client = OpenAIGenericClient(config=llm_config)
137
+ llm_client = JsonSafeLLMClient(config=llm_config)
113
138
 
114
139
  # -- Embedder --
115
140
  embedder_api_key = settings.embedding_api_key or settings.openai_api_key
@@ -86,6 +86,26 @@ def patch():
86
86
 
87
87
  app.dependency_overrides[original_get_graphiti] = patched_get_graphiti
88
88
 
89
+ # -- Endpoint: entity edges extracted from a specific episode --
90
+ # graphiti-core stores an `episodes` list on each RELATES_TO relationship
91
+ # tracking which episodes contributed to that fact. This endpoint exposes
92
+ # those UUIDs so the plugin can write per-fact SpiceDB relationships.
93
+ @app.get("/episodes/{episode_uuid}/edges")
94
+ async def get_episode_edges(episode_uuid: str):
95
+ """Return entity edge UUIDs that reference a specific episode."""
96
+ query = (
97
+ "MATCH ()-[r:RELATES_TO]-() "
98
+ "WHERE $episode_uuid IN r.episodes "
99
+ "RETURN DISTINCT r.uuid AS uuid"
100
+ )
101
+ # Use the raw Neo4j async driver directly to avoid differences
102
+ # in the Graphiti Neo4jDriver.execute_query() wrapper across versions.
103
+ raw_driver = singleton_client.driver.client
104
+ records, _, _ = await raw_driver.execute_query(
105
+ query, parameters_={"episode_uuid": episode_uuid}
106
+ )
107
+ return [{"uuid": r["uuid"]} for r in records]
108
+
89
109
  # -- Fix upstream AsyncWorker crash-on-error bug --
90
110
  # The worker loop only catches CancelledError; any other exception from
91
111
  # add_episode() kills the worker silently and no more jobs are processed.
@@ -117,12 +137,27 @@ def patch():
117
137
  bulk_mod = importlib.import_module("graphiti_core.utils.bulk_utils")
118
138
  original_bulk_add = bulk_mod.add_nodes_and_edges_bulk
119
139
 
120
- def _sanitize_attributes(attrs):
121
- """Flatten non-primitive attribute values to JSON strings for Neo4j."""
140
+ # Reserved keys that must not be overwritten by LLM-extracted attributes.
141
+ # See: https://github.com/contextablemark/openclaw-memory-rebac/issues/6
142
+ RESERVED_EDGE_KEYS = {
143
+ 'uuid', 'source_node_uuid', 'target_node_uuid', 'name',
144
+ 'fact', 'fact_embedding', 'group_id', 'episodes',
145
+ 'created_at', 'expired_at', 'valid_at', 'invalid_at',
146
+ }
147
+ RESERVED_NODE_KEYS = {
148
+ 'uuid', 'name', 'name_embedding', 'group_id', 'summary',
149
+ 'created_at', 'labels',
150
+ }
151
+
152
+ def _sanitize_attributes(attrs, reserved_keys):
153
+ """Flatten non-primitive values and strip reserved keys to prevent clobber."""
122
154
  if not attrs:
123
155
  return attrs
124
156
  sanitized = {}
125
157
  for k, v in attrs.items():
158
+ if k in reserved_keys:
159
+ logger.debug("Stripped reserved key %r from attributes", k)
160
+ continue
126
161
  if isinstance(v, (dict, list, set, tuple)):
127
162
  sanitized[k] = json.dumps(v, default=str)
128
163
  else:
@@ -133,10 +168,32 @@ def patch():
133
168
  entity_nodes, entity_edges, embedder):
134
169
  for node in entity_nodes:
135
170
  if node.attributes:
136
- node.attributes = _sanitize_attributes(node.attributes)
171
+ node.attributes = _sanitize_attributes(node.attributes, RESERVED_NODE_KEYS)
137
172
  for edge in entity_edges:
173
+ # DIAGNOSTIC: log clobber attempts BEFORE stripping (so we can
174
+ # verify the fix is catching them). Keep until confirmed in prod.
175
+ if edge.attributes and 'fact_embedding' in edge.attributes:
176
+ logger.warning(
177
+ "DIAG attributes_clobber: edge=%s has 'fact_embedding' in attributes! "
178
+ "value_type=%s (will be stripped)", edge.uuid,
179
+ type(edge.attributes.get('fact_embedding')),
180
+ )
138
181
  if edge.attributes:
139
- edge.attributes = _sanitize_attributes(edge.attributes)
182
+ edge.attributes = _sanitize_attributes(edge.attributes, RESERVED_EDGE_KEYS)
183
+ # DIAGNOSTIC: log edges with missing/invalid embeddings
184
+ emb = edge.fact_embedding
185
+ emb_ok = isinstance(emb, list) and len(emb) > 0 and all(isinstance(x, (int, float)) for x in emb[:5])
186
+ if not emb_ok:
187
+ logger.warning(
188
+ "DIAG bad_embedding: edge=%s name=%r type=%s len=%s "
189
+ "sample=%r fact=%r attrs_keys=%s src=%s tgt=%s",
190
+ edge.uuid, edge.name,
191
+ type(emb).__name__, len(emb) if isinstance(emb, (list, tuple)) else 'N/A',
192
+ emb[:3] if isinstance(emb, list) else emb,
193
+ edge.fact[:200] if edge.fact else None,
194
+ list((edge.attributes or {}).keys()),
195
+ edge.source_node_uuid, edge.target_node_uuid,
196
+ )
140
197
  return await original_bulk_add(
141
198
  driver, episodic_nodes, episodic_edges,
142
199
  entity_nodes, entity_edges, embedder,