@contextableai/openclaw-memory-rebac 0.1.0 → 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/authorization.d.ts +57 -0
- package/dist/authorization.js +133 -0
- package/dist/backend.d.ts +135 -0
- package/dist/backend.js +11 -0
- package/dist/backends/graphiti.d.ts +72 -0
- package/dist/backends/graphiti.js +222 -0
- package/dist/backends/registry.d.ts +14 -0
- package/dist/backends/registry.js +12 -0
- package/dist/cli.d.ts +23 -0
- package/dist/cli.js +446 -0
- package/dist/config.d.ts +34 -0
- package/dist/config.js +97 -0
- package/dist/index.d.ts +25 -0
- package/dist/index.js +638 -0
- package/dist/plugin.defaults.json +12 -0
- package/dist/search.d.ts +34 -0
- package/dist/search.js +98 -0
- package/dist/spicedb.d.ts +80 -0
- package/dist/spicedb.js +256 -0
- package/docker/graphiti/.env +50 -0
- package/docker/graphiti/docker-compose.yml +2 -2
- package/docker/graphiti/graphiti_overlay.py +26 -1
- package/docker/graphiti/startup.py +58 -4
- package/docker/spicedb/.env +14 -0
- package/package.json +8 -11
- package/authorization.ts +0 -191
- package/backend.ts +0 -176
- package/backends/backends.json +0 -3
- package/backends/graphiti.test.ts +0 -292
- package/backends/graphiti.ts +0 -345
- package/backends/registry.ts +0 -36
- package/cli.ts +0 -418
- package/config.ts +0 -141
- package/index.ts +0 -711
- package/search.ts +0 -139
- package/spicedb.ts +0 -355
- /package/{backends → dist/backends}/graphiti.defaults.json +0 -0
package/dist/search.d.ts
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Parallel Multi-Group Search + Merge/Re-rank
|
|
3
|
+
*
|
|
4
|
+
* Backend-agnostic: delegates per-group search to MemoryBackend.searchGroup().
|
|
5
|
+
* Issues parallel calls (one per authorized group_id), merges results,
|
|
6
|
+
* deduplicates by UUID, and re-ranks by score then recency.
|
|
7
|
+
*/
|
|
8
|
+
import type { MemoryBackend, SearchResult } from "./backend.js";
|
|
9
|
+
export type { SearchResult };
|
|
10
|
+
export type SearchOptions = {
|
|
11
|
+
query: string;
|
|
12
|
+
groupIds: string[];
|
|
13
|
+
limit?: number;
|
|
14
|
+
sessionId?: string;
|
|
15
|
+
};
|
|
16
|
+
/**
|
|
17
|
+
* Search across multiple authorized group_ids in parallel.
|
|
18
|
+
* Merges and deduplicates results, returning up to `limit` items sorted by
|
|
19
|
+
* score (desc) then recency (desc).
|
|
20
|
+
*/
|
|
21
|
+
export declare function searchAuthorizedMemories(backend: MemoryBackend, options: SearchOptions): Promise<SearchResult[]>;
|
|
22
|
+
/**
|
|
23
|
+
* Format search results into a text block for injecting into agent context.
|
|
24
|
+
*/
|
|
25
|
+
export declare function formatResultsForContext(results: SearchResult[]): string;
|
|
26
|
+
/**
|
|
27
|
+
* Format results with long-term and session sections separated.
|
|
28
|
+
* Session group_ids start with "session-".
|
|
29
|
+
*/
|
|
30
|
+
export declare function formatDualResults(longTermResults: SearchResult[], sessionResults: SearchResult[]): string;
|
|
31
|
+
/**
|
|
32
|
+
* Deduplicate session results against long-term results (by UUID).
|
|
33
|
+
*/
|
|
34
|
+
export declare function deduplicateSessionResults(longTermResults: SearchResult[], sessionResults: SearchResult[]): SearchResult[];
|
package/dist/search.js
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Parallel Multi-Group Search + Merge/Re-rank
|
|
3
|
+
*
|
|
4
|
+
* Backend-agnostic: delegates per-group search to MemoryBackend.searchGroup().
|
|
5
|
+
* Issues parallel calls (one per authorized group_id), merges results,
|
|
6
|
+
* deduplicates by UUID, and re-ranks by score then recency.
|
|
7
|
+
*/
|
|
8
|
+
// ============================================================================
|
|
9
|
+
// Search
|
|
10
|
+
// ============================================================================
|
|
11
|
+
/**
|
|
12
|
+
* Search across multiple authorized group_ids in parallel.
|
|
13
|
+
* Merges and deduplicates results, returning up to `limit` items sorted by
|
|
14
|
+
* score (desc) then recency (desc).
|
|
15
|
+
*/
|
|
16
|
+
export async function searchAuthorizedMemories(backend, options) {
|
|
17
|
+
const { query, groupIds, limit = 10, sessionId } = options;
|
|
18
|
+
if (groupIds.length === 0) {
|
|
19
|
+
return [];
|
|
20
|
+
}
|
|
21
|
+
// Fan out parallel searches across all authorized groups
|
|
22
|
+
const promises = groupIds.map((groupId) => backend.searchGroup({ query, groupId, limit, sessionId }));
|
|
23
|
+
const resultSets = await Promise.allSettled(promises);
|
|
24
|
+
// Collect all successful results — silently skip failed group searches
|
|
25
|
+
const allResults = [];
|
|
26
|
+
for (const result of resultSets) {
|
|
27
|
+
if (result.status === "fulfilled") {
|
|
28
|
+
allResults.push(...result.value);
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
// Deduplicate by UUID
|
|
32
|
+
const seen = new Set();
|
|
33
|
+
const deduped = allResults.filter((r) => {
|
|
34
|
+
if (seen.has(r.uuid))
|
|
35
|
+
return false;
|
|
36
|
+
seen.add(r.uuid);
|
|
37
|
+
return true;
|
|
38
|
+
});
|
|
39
|
+
// Sort: score descending (when available), then recency descending
|
|
40
|
+
deduped.sort((a, b) => {
|
|
41
|
+
if (a.score !== undefined && b.score !== undefined && a.score !== b.score) {
|
|
42
|
+
return b.score - a.score;
|
|
43
|
+
}
|
|
44
|
+
const dateA = new Date(a.created_at).getTime();
|
|
45
|
+
const dateB = new Date(b.created_at).getTime();
|
|
46
|
+
return dateB - dateA;
|
|
47
|
+
});
|
|
48
|
+
return deduped.slice(0, limit);
|
|
49
|
+
}
|
|
50
|
+
// ============================================================================
|
|
51
|
+
// Format for agent context
|
|
52
|
+
// ============================================================================
|
|
53
|
+
/**
|
|
54
|
+
* Format search results into a text block for injecting into agent context.
|
|
55
|
+
*/
|
|
56
|
+
export function formatResultsForContext(results) {
|
|
57
|
+
if (results.length === 0)
|
|
58
|
+
return "";
|
|
59
|
+
return results.map((r, i) => formatResultLine(r, i + 1)).join("\n");
|
|
60
|
+
}
|
|
61
|
+
/**
|
|
62
|
+
* Format results with long-term and session sections separated.
|
|
63
|
+
* Session group_ids start with "session-".
|
|
64
|
+
*/
|
|
65
|
+
export function formatDualResults(longTermResults, sessionResults) {
|
|
66
|
+
const parts = [];
|
|
67
|
+
let idx = 1;
|
|
68
|
+
for (const r of longTermResults) {
|
|
69
|
+
parts.push(formatResultLine(r, idx++));
|
|
70
|
+
}
|
|
71
|
+
if (sessionResults.length > 0) {
|
|
72
|
+
if (longTermResults.length > 0)
|
|
73
|
+
parts.push("Session memories:");
|
|
74
|
+
for (const r of sessionResults) {
|
|
75
|
+
parts.push(formatResultLine(r, idx++));
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
return parts.join("\n");
|
|
79
|
+
}
|
|
80
|
+
/**
|
|
81
|
+
* Format a single search result line with type-prefixed UUID.
|
|
82
|
+
* e.g. "[fact:da8650cb-...] Eric's birthday is Dec 17th (Eric -[HAS_BIRTHDAY]→ Dec 17th)"
|
|
83
|
+
*/
|
|
84
|
+
function formatResultLine(r, idx) {
|
|
85
|
+
const typeLabel = r.type === "node" ? "entity" :
|
|
86
|
+
r.type === "fact" ? "fact" :
|
|
87
|
+
r.type === "chunk" ? "chunk" :
|
|
88
|
+
r.type === "summary" ? "summary" :
|
|
89
|
+
"completion";
|
|
90
|
+
return `${idx}. [${typeLabel}:${r.uuid}] ${r.summary} (${r.context})`;
|
|
91
|
+
}
|
|
92
|
+
/**
|
|
93
|
+
* Deduplicate session results against long-term results (by UUID).
|
|
94
|
+
*/
|
|
95
|
+
export function deduplicateSessionResults(longTermResults, sessionResults) {
|
|
96
|
+
const longTermIds = new Set(longTermResults.map((r) => r.uuid));
|
|
97
|
+
return sessionResults.filter((r) => !longTermIds.has(r.uuid));
|
|
98
|
+
}
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* SpiceDB Client Wrapper
|
|
3
|
+
*
|
|
4
|
+
* Wraps @authzed/authzed-node for authorization operations:
|
|
5
|
+
* WriteSchema, WriteRelationships, DeleteRelationships, BulkImportRelationships,
|
|
6
|
+
* LookupResources, CheckPermission.
|
|
7
|
+
*/
|
|
8
|
+
export type SpiceDbConfig = {
|
|
9
|
+
endpoint: string;
|
|
10
|
+
token: string;
|
|
11
|
+
insecure: boolean;
|
|
12
|
+
};
|
|
13
|
+
export type RelationshipTuple = {
|
|
14
|
+
resourceType: string;
|
|
15
|
+
resourceId: string;
|
|
16
|
+
relation: string;
|
|
17
|
+
subjectType: string;
|
|
18
|
+
subjectId: string;
|
|
19
|
+
};
|
|
20
|
+
export type ConsistencyMode = {
|
|
21
|
+
mode: "full";
|
|
22
|
+
} | {
|
|
23
|
+
mode: "at_least_as_fresh";
|
|
24
|
+
token: string;
|
|
25
|
+
} | {
|
|
26
|
+
mode: "minimize_latency";
|
|
27
|
+
};
|
|
28
|
+
export declare class SpiceDbClient {
|
|
29
|
+
private client;
|
|
30
|
+
private promises;
|
|
31
|
+
constructor(config: SpiceDbConfig);
|
|
32
|
+
writeSchema(schema: string): Promise<void>;
|
|
33
|
+
readSchema(): Promise<string>;
|
|
34
|
+
writeRelationships(tuples: RelationshipTuple[]): Promise<string | undefined>;
|
|
35
|
+
deleteRelationships(tuples: RelationshipTuple[]): Promise<void>;
|
|
36
|
+
deleteRelationshipsByFilter(params: {
|
|
37
|
+
resourceType: string;
|
|
38
|
+
resourceId: string;
|
|
39
|
+
relation?: string;
|
|
40
|
+
}): Promise<string | undefined>;
|
|
41
|
+
private toRelationship;
|
|
42
|
+
/**
|
|
43
|
+
* Bulk import relationships using the streaming ImportBulkRelationships RPC.
|
|
44
|
+
* More efficient than individual writeRelationships calls for large batches.
|
|
45
|
+
* Falls back to batched writeRelationships if the streaming RPC is unavailable.
|
|
46
|
+
*/
|
|
47
|
+
bulkImportRelationships(tuples: RelationshipTuple[], batchSize?: number): Promise<number>;
|
|
48
|
+
private bulkImportViaStream;
|
|
49
|
+
private bulkImportViaWrite;
|
|
50
|
+
/**
|
|
51
|
+
* Read relationships matching a filter. Returns all tuples that match the
|
|
52
|
+
* specified resource type, optional resource ID, optional relation, and
|
|
53
|
+
* optional subject filter. Used by the cleanup command to find which
|
|
54
|
+
* Graphiti episodes have SpiceDB authorization relationships.
|
|
55
|
+
*/
|
|
56
|
+
readRelationships(params: {
|
|
57
|
+
resourceType: string;
|
|
58
|
+
resourceId?: string;
|
|
59
|
+
relation?: string;
|
|
60
|
+
subjectType?: string;
|
|
61
|
+
subjectId?: string;
|
|
62
|
+
consistency?: ConsistencyMode;
|
|
63
|
+
}): Promise<RelationshipTuple[]>;
|
|
64
|
+
private buildConsistency;
|
|
65
|
+
checkPermission(params: {
|
|
66
|
+
resourceType: string;
|
|
67
|
+
resourceId: string;
|
|
68
|
+
permission: string;
|
|
69
|
+
subjectType: string;
|
|
70
|
+
subjectId: string;
|
|
71
|
+
consistency?: ConsistencyMode;
|
|
72
|
+
}): Promise<boolean>;
|
|
73
|
+
lookupResources(params: {
|
|
74
|
+
resourceType: string;
|
|
75
|
+
permission: string;
|
|
76
|
+
subjectType: string;
|
|
77
|
+
subjectId: string;
|
|
78
|
+
consistency?: ConsistencyMode;
|
|
79
|
+
}): Promise<string[]>;
|
|
80
|
+
}
|
package/dist/spicedb.js
ADDED
|
@@ -0,0 +1,256 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* SpiceDB Client Wrapper
|
|
3
|
+
*
|
|
4
|
+
* Wraps @authzed/authzed-node for authorization operations:
|
|
5
|
+
* WriteSchema, WriteRelationships, DeleteRelationships, BulkImportRelationships,
|
|
6
|
+
* LookupResources, CheckPermission.
|
|
7
|
+
*/
|
|
8
|
+
import { v1 } from "@authzed/authzed-node";
|
|
9
|
+
// ============================================================================
|
|
10
|
+
// Client
|
|
11
|
+
// ============================================================================
|
|
12
|
+
export class SpiceDbClient {
|
|
13
|
+
client;
|
|
14
|
+
promises;
|
|
15
|
+
constructor(config) {
|
|
16
|
+
if (config.insecure) {
|
|
17
|
+
this.client = v1.NewClient(config.token, config.endpoint, v1.ClientSecurity.INSECURE_PLAINTEXT_CREDENTIALS);
|
|
18
|
+
}
|
|
19
|
+
else {
|
|
20
|
+
this.client = v1.NewClient(config.token, config.endpoint);
|
|
21
|
+
}
|
|
22
|
+
this.promises = this.client.promises;
|
|
23
|
+
}
|
|
24
|
+
// --------------------------------------------------------------------------
|
|
25
|
+
// Schema
|
|
26
|
+
// --------------------------------------------------------------------------
|
|
27
|
+
async writeSchema(schema) {
|
|
28
|
+
const request = v1.WriteSchemaRequest.create({ schema });
|
|
29
|
+
await this.promises.writeSchema(request);
|
|
30
|
+
}
|
|
31
|
+
async readSchema() {
|
|
32
|
+
const request = v1.ReadSchemaRequest.create({});
|
|
33
|
+
const response = await this.promises.readSchema(request);
|
|
34
|
+
return response.schemaText;
|
|
35
|
+
}
|
|
36
|
+
// --------------------------------------------------------------------------
|
|
37
|
+
// Relationships
|
|
38
|
+
// --------------------------------------------------------------------------
|
|
39
|
+
async writeRelationships(tuples) {
|
|
40
|
+
const updates = tuples.map((t) => v1.RelationshipUpdate.create({
|
|
41
|
+
operation: v1.RelationshipUpdate_Operation.TOUCH,
|
|
42
|
+
relationship: v1.Relationship.create({
|
|
43
|
+
resource: v1.ObjectReference.create({
|
|
44
|
+
objectType: t.resourceType,
|
|
45
|
+
objectId: t.resourceId,
|
|
46
|
+
}),
|
|
47
|
+
relation: t.relation,
|
|
48
|
+
subject: v1.SubjectReference.create({
|
|
49
|
+
object: v1.ObjectReference.create({
|
|
50
|
+
objectType: t.subjectType,
|
|
51
|
+
objectId: t.subjectId,
|
|
52
|
+
}),
|
|
53
|
+
}),
|
|
54
|
+
}),
|
|
55
|
+
}));
|
|
56
|
+
const request = v1.WriteRelationshipsRequest.create({ updates });
|
|
57
|
+
const response = await this.promises.writeRelationships(request);
|
|
58
|
+
return response.writtenAt?.token;
|
|
59
|
+
}
|
|
60
|
+
async deleteRelationships(tuples) {
|
|
61
|
+
const updates = tuples.map((t) => v1.RelationshipUpdate.create({
|
|
62
|
+
operation: v1.RelationshipUpdate_Operation.DELETE,
|
|
63
|
+
relationship: v1.Relationship.create({
|
|
64
|
+
resource: v1.ObjectReference.create({
|
|
65
|
+
objectType: t.resourceType,
|
|
66
|
+
objectId: t.resourceId,
|
|
67
|
+
}),
|
|
68
|
+
relation: t.relation,
|
|
69
|
+
subject: v1.SubjectReference.create({
|
|
70
|
+
object: v1.ObjectReference.create({
|
|
71
|
+
objectType: t.subjectType,
|
|
72
|
+
objectId: t.subjectId,
|
|
73
|
+
}),
|
|
74
|
+
}),
|
|
75
|
+
}),
|
|
76
|
+
}));
|
|
77
|
+
const request = v1.WriteRelationshipsRequest.create({ updates });
|
|
78
|
+
await this.promises.writeRelationships(request);
|
|
79
|
+
}
|
|
80
|
+
async deleteRelationshipsByFilter(params) {
|
|
81
|
+
const request = v1.DeleteRelationshipsRequest.create({
|
|
82
|
+
relationshipFilter: v1.RelationshipFilter.create({
|
|
83
|
+
resourceType: params.resourceType,
|
|
84
|
+
optionalResourceId: params.resourceId,
|
|
85
|
+
...(params.relation ? { optionalRelation: params.relation } : {}),
|
|
86
|
+
}),
|
|
87
|
+
});
|
|
88
|
+
const response = await this.promises.deleteRelationships(request);
|
|
89
|
+
return response.deletedAt?.token;
|
|
90
|
+
}
|
|
91
|
+
// --------------------------------------------------------------------------
|
|
92
|
+
// Bulk Import
|
|
93
|
+
// --------------------------------------------------------------------------
|
|
94
|
+
toRelationship(t) {
|
|
95
|
+
return v1.Relationship.create({
|
|
96
|
+
resource: v1.ObjectReference.create({
|
|
97
|
+
objectType: t.resourceType,
|
|
98
|
+
objectId: t.resourceId,
|
|
99
|
+
}),
|
|
100
|
+
relation: t.relation,
|
|
101
|
+
subject: v1.SubjectReference.create({
|
|
102
|
+
object: v1.ObjectReference.create({
|
|
103
|
+
objectType: t.subjectType,
|
|
104
|
+
objectId: t.subjectId,
|
|
105
|
+
}),
|
|
106
|
+
}),
|
|
107
|
+
});
|
|
108
|
+
}
|
|
109
|
+
/**
|
|
110
|
+
* Bulk import relationships using the streaming ImportBulkRelationships RPC.
|
|
111
|
+
* More efficient than individual writeRelationships calls for large batches.
|
|
112
|
+
* Falls back to batched writeRelationships if the streaming RPC is unavailable.
|
|
113
|
+
*/
|
|
114
|
+
async bulkImportRelationships(tuples, batchSize = 1000) {
|
|
115
|
+
if (tuples.length === 0)
|
|
116
|
+
return 0;
|
|
117
|
+
// Try streaming bulk import first
|
|
118
|
+
if (typeof this.promises.bulkImportRelationships === "function") {
|
|
119
|
+
return this.bulkImportViaStream(tuples, batchSize);
|
|
120
|
+
}
|
|
121
|
+
// Fallback: batched writeRelationships
|
|
122
|
+
return this.bulkImportViaWrite(tuples, batchSize);
|
|
123
|
+
}
|
|
124
|
+
bulkImportViaStream(tuples, batchSize) {
|
|
125
|
+
return new Promise((resolve, reject) => {
|
|
126
|
+
const stream = this.promises.bulkImportRelationships((err, response) => {
|
|
127
|
+
if (err)
|
|
128
|
+
reject(err);
|
|
129
|
+
else
|
|
130
|
+
resolve(Number(response?.numLoaded ?? tuples.length));
|
|
131
|
+
});
|
|
132
|
+
stream.on("error", (err) => {
|
|
133
|
+
reject(err);
|
|
134
|
+
});
|
|
135
|
+
for (let i = 0; i < tuples.length; i += batchSize) {
|
|
136
|
+
const chunk = tuples.slice(i, i + batchSize);
|
|
137
|
+
stream.write(v1.BulkImportRelationshipsRequest.create({
|
|
138
|
+
relationships: chunk.map((t) => this.toRelationship(t)),
|
|
139
|
+
}));
|
|
140
|
+
}
|
|
141
|
+
stream.end();
|
|
142
|
+
});
|
|
143
|
+
}
|
|
144
|
+
async bulkImportViaWrite(tuples, batchSize) {
|
|
145
|
+
let total = 0;
|
|
146
|
+
for (let i = 0; i < tuples.length; i += batchSize) {
|
|
147
|
+
const chunk = tuples.slice(i, i + batchSize);
|
|
148
|
+
await this.writeRelationships(chunk);
|
|
149
|
+
total += chunk.length;
|
|
150
|
+
}
|
|
151
|
+
return total;
|
|
152
|
+
}
|
|
153
|
+
// --------------------------------------------------------------------------
|
|
154
|
+
// Read Relationships
|
|
155
|
+
// --------------------------------------------------------------------------
|
|
156
|
+
/**
|
|
157
|
+
* Read relationships matching a filter. Returns all tuples that match the
|
|
158
|
+
* specified resource type, optional resource ID, optional relation, and
|
|
159
|
+
* optional subject filter. Used by the cleanup command to find which
|
|
160
|
+
* Graphiti episodes have SpiceDB authorization relationships.
|
|
161
|
+
*/
|
|
162
|
+
async readRelationships(params) {
|
|
163
|
+
const filterFields = {
|
|
164
|
+
resourceType: params.resourceType,
|
|
165
|
+
};
|
|
166
|
+
if (params.resourceId) {
|
|
167
|
+
filterFields.optionalResourceId = params.resourceId;
|
|
168
|
+
}
|
|
169
|
+
if (params.relation) {
|
|
170
|
+
filterFields.optionalRelation = params.relation;
|
|
171
|
+
}
|
|
172
|
+
if (params.subjectType) {
|
|
173
|
+
const subjectFilter = {
|
|
174
|
+
subjectType: params.subjectType,
|
|
175
|
+
};
|
|
176
|
+
if (params.subjectId) {
|
|
177
|
+
subjectFilter.optionalSubjectId = params.subjectId;
|
|
178
|
+
}
|
|
179
|
+
filterFields.optionalSubjectFilter = v1.SubjectFilter.create(subjectFilter);
|
|
180
|
+
}
|
|
181
|
+
const request = v1.ReadRelationshipsRequest.create({
|
|
182
|
+
relationshipFilter: v1.RelationshipFilter.create(filterFields),
|
|
183
|
+
consistency: this.buildConsistency(params.consistency),
|
|
184
|
+
});
|
|
185
|
+
const results = await this.promises.readRelationships(request);
|
|
186
|
+
const tuples = [];
|
|
187
|
+
for (const r of results) {
|
|
188
|
+
const rel = r.relationship;
|
|
189
|
+
if (!rel?.resource || !rel.subject?.object)
|
|
190
|
+
continue;
|
|
191
|
+
tuples.push({
|
|
192
|
+
resourceType: rel.resource.objectType,
|
|
193
|
+
resourceId: rel.resource.objectId,
|
|
194
|
+
relation: rel.relation,
|
|
195
|
+
subjectType: rel.subject.object.objectType,
|
|
196
|
+
subjectId: rel.subject.object.objectId,
|
|
197
|
+
});
|
|
198
|
+
}
|
|
199
|
+
return tuples;
|
|
200
|
+
}
|
|
201
|
+
// --------------------------------------------------------------------------
|
|
202
|
+
// Permissions
|
|
203
|
+
// --------------------------------------------------------------------------
|
|
204
|
+
buildConsistency(mode) {
|
|
205
|
+
if (!mode || mode.mode === "minimize_latency") {
|
|
206
|
+
return v1.Consistency.create({
|
|
207
|
+
requirement: { oneofKind: "minimizeLatency", minimizeLatency: true },
|
|
208
|
+
});
|
|
209
|
+
}
|
|
210
|
+
if (mode.mode === "at_least_as_fresh") {
|
|
211
|
+
return v1.Consistency.create({
|
|
212
|
+
requirement: {
|
|
213
|
+
oneofKind: "atLeastAsFresh",
|
|
214
|
+
atLeastAsFresh: v1.ZedToken.create({ token: mode.token }),
|
|
215
|
+
},
|
|
216
|
+
});
|
|
217
|
+
}
|
|
218
|
+
return v1.Consistency.create({
|
|
219
|
+
requirement: { oneofKind: "fullyConsistent", fullyConsistent: true },
|
|
220
|
+
});
|
|
221
|
+
}
|
|
222
|
+
async checkPermission(params) {
|
|
223
|
+
const request = v1.CheckPermissionRequest.create({
|
|
224
|
+
resource: v1.ObjectReference.create({
|
|
225
|
+
objectType: params.resourceType,
|
|
226
|
+
objectId: params.resourceId,
|
|
227
|
+
}),
|
|
228
|
+
permission: params.permission,
|
|
229
|
+
subject: v1.SubjectReference.create({
|
|
230
|
+
object: v1.ObjectReference.create({
|
|
231
|
+
objectType: params.subjectType,
|
|
232
|
+
objectId: params.subjectId,
|
|
233
|
+
}),
|
|
234
|
+
}),
|
|
235
|
+
consistency: this.buildConsistency(params.consistency),
|
|
236
|
+
});
|
|
237
|
+
const response = await this.promises.checkPermission(request);
|
|
238
|
+
return (response.permissionship ===
|
|
239
|
+
v1.CheckPermissionResponse_Permissionship.HAS_PERMISSION);
|
|
240
|
+
}
|
|
241
|
+
async lookupResources(params) {
|
|
242
|
+
const request = v1.LookupResourcesRequest.create({
|
|
243
|
+
resourceObjectType: params.resourceType,
|
|
244
|
+
permission: params.permission,
|
|
245
|
+
subject: v1.SubjectReference.create({
|
|
246
|
+
object: v1.ObjectReference.create({
|
|
247
|
+
objectType: params.subjectType,
|
|
248
|
+
objectId: params.subjectId,
|
|
249
|
+
}),
|
|
250
|
+
}),
|
|
251
|
+
consistency: this.buildConsistency(params.consistency),
|
|
252
|
+
});
|
|
253
|
+
const results = await this.promises.lookupResources(request);
|
|
254
|
+
return results.map((r) => r.resourceObjectId);
|
|
255
|
+
}
|
|
256
|
+
}
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
# ============================================================================
|
|
2
|
+
# Graphiti FastAPI REST server configuration
|
|
3
|
+
# ============================================================================
|
|
4
|
+
# Each AI component (LLM, embedder, reranker) can independently point to a
|
|
5
|
+
# different service. If EMBEDDING_BASE_URL is not set, it defaults to
|
|
6
|
+
# LLM_BASE_URL (or the OpenAI API if LLM_BASE_URL is also unset).
|
|
7
|
+
|
|
8
|
+
# ============================================================================
|
|
9
|
+
# LLM (entity extraction)
|
|
10
|
+
# ============================================================================
|
|
11
|
+
# Default: OpenAI API. Set LLM_BASE_URL to use a different provider:
|
|
12
|
+
# Ollama: http://host.docker.internal:11434/v1
|
|
13
|
+
# vLLM: http://your-vllm-server:8000/v1
|
|
14
|
+
# OpenAI: (leave LLM_BASE_URL empty to use default)
|
|
15
|
+
LLM_BASE_URL=http://100.123.48.104:11434/v1
|
|
16
|
+
LLM_MODEL=qwen2.5:14b
|
|
17
|
+
LLM_API_KEY=not-needed
|
|
18
|
+
|
|
19
|
+
# ============================================================================
|
|
20
|
+
# Embedder
|
|
21
|
+
# ============================================================================
|
|
22
|
+
# Default: OpenAI text-embedding-3-small.
|
|
23
|
+
# If EMBEDDING_BASE_URL is empty, uses LLM_BASE_URL (or OpenAI default).
|
|
24
|
+
EMBEDDING_MODEL=nomic-embed-text
|
|
25
|
+
# EMBEDDING_BASE_URL=
|
|
26
|
+
# EMBEDDING_API_KEY=
|
|
27
|
+
EMBEDDING_DIM=768
|
|
28
|
+
|
|
29
|
+
# ============================================================================
|
|
30
|
+
# Reranker / cross-encoder
|
|
31
|
+
# ============================================================================
|
|
32
|
+
# Default: "bge" — runs BAAI/bge-reranker-v2-m3 locally via sentence-transformers.
|
|
33
|
+
# No API key or external service needed. Fast, accurate, free.
|
|
34
|
+
#
|
|
35
|
+
# Set to "openai" to use a remote LLM-based reranker instead.
|
|
36
|
+
RERANKER_PROVIDER=bge
|
|
37
|
+
# RERANKER_MODEL= # Only used when RERANKER_PROVIDER=openai
|
|
38
|
+
# RERANKER_BASE_URL= # Only used when RERANKER_PROVIDER=openai
|
|
39
|
+
# RERANKER_API_KEY= # Only used when RERANKER_PROVIDER=openai
|
|
40
|
+
|
|
41
|
+
# ============================================================================
|
|
42
|
+
# Neo4j
|
|
43
|
+
# ============================================================================
|
|
44
|
+
# NEO4J_USER=neo4j
|
|
45
|
+
# NEO4J_PASSWORD=graphiti_pw
|
|
46
|
+
|
|
47
|
+
# ============================================================================
|
|
48
|
+
# Server
|
|
49
|
+
# ============================================================================
|
|
50
|
+
# GRAPHITI_PORT=8000
|
|
@@ -98,6 +98,31 @@ def _create_reranker(settings: ExtendedSettings, llm_client):
|
|
|
98
98
|
return BGERerankerClient()
|
|
99
99
|
|
|
100
100
|
|
|
101
|
+
class JsonSafeLLMClient(OpenAIGenericClient):
|
|
102
|
+
"""Wrapper that ensures 'json' appears in messages when response_format is json_object.
|
|
103
|
+
|
|
104
|
+
Groq (and some other providers) require the word 'json' in the messages
|
|
105
|
+
when response_format={"type": "json_object"} is used. Graphiti's internal
|
|
106
|
+
prompts don't always include it, causing 400 errors.
|
|
107
|
+
"""
|
|
108
|
+
|
|
109
|
+
async def _generate_response(self, messages, response_model=None, **kwargs):
|
|
110
|
+
# Check if any message already contains the word 'json' (case-insensitive)
|
|
111
|
+
has_json_mention = any('json' in m.content.lower() for m in messages)
|
|
112
|
+
if not has_json_mention:
|
|
113
|
+
# Inject into the first system message, or prepend one
|
|
114
|
+
injected = False
|
|
115
|
+
for m in messages:
|
|
116
|
+
if m.role == 'system':
|
|
117
|
+
m.content += '\nRespond in JSON format.'
|
|
118
|
+
injected = True
|
|
119
|
+
break
|
|
120
|
+
if not injected:
|
|
121
|
+
from graphiti_core.prompts.models import Message
|
|
122
|
+
messages.insert(0, Message(role='system', content='Respond in JSON format.'))
|
|
123
|
+
return await super()._generate_response(messages, response_model, **kwargs)
|
|
124
|
+
|
|
125
|
+
|
|
101
126
|
def create_graphiti(settings: ExtendedSettings) -> OpenClawGraphiti:
|
|
102
127
|
"""Create an OpenClawGraphiti instance with per-component client configuration."""
|
|
103
128
|
|
|
@@ -109,7 +134,7 @@ def create_graphiti(settings: ExtendedSettings) -> OpenClawGraphiti:
|
|
|
109
134
|
if settings.model_name:
|
|
110
135
|
llm_config.model = settings.model_name
|
|
111
136
|
llm_config.small_model = settings.model_name
|
|
112
|
-
llm_client =
|
|
137
|
+
llm_client = JsonSafeLLMClient(config=llm_config)
|
|
113
138
|
|
|
114
139
|
# -- Embedder --
|
|
115
140
|
embedder_api_key = settings.embedding_api_key or settings.openai_api_key
|
|
@@ -86,6 +86,23 @@ def patch():
|
|
|
86
86
|
|
|
87
87
|
app.dependency_overrides[original_get_graphiti] = patched_get_graphiti
|
|
88
88
|
|
|
89
|
+
# -- Endpoint: entity edges extracted from a specific episode --
|
|
90
|
+
# graphiti-core stores an `episodes` list on each RELATES_TO relationship
|
|
91
|
+
# tracking which episodes contributed to that fact. This endpoint exposes
|
|
92
|
+
# those UUIDs so the plugin can write per-fact SpiceDB relationships.
|
|
93
|
+
@app.get("/episodes/{episode_uuid}/edges")
|
|
94
|
+
async def get_episode_edges(episode_uuid: str):
|
|
95
|
+
"""Return entity edge UUIDs that reference a specific episode."""
|
|
96
|
+
query = (
|
|
97
|
+
"MATCH ()-[r:RELATES_TO]-() "
|
|
98
|
+
"WHERE $episode_uuid IN r.episodes "
|
|
99
|
+
"RETURN DISTINCT r.uuid AS uuid"
|
|
100
|
+
)
|
|
101
|
+
records, _, _ = await singleton_client.driver.execute_query(
|
|
102
|
+
query, {"episode_uuid": episode_uuid}
|
|
103
|
+
)
|
|
104
|
+
return [{"uuid": r["uuid"]} for r in records]
|
|
105
|
+
|
|
89
106
|
# -- Fix upstream AsyncWorker crash-on-error bug --
|
|
90
107
|
# The worker loop only catches CancelledError; any other exception from
|
|
91
108
|
# add_episode() kills the worker silently and no more jobs are processed.
|
|
@@ -117,12 +134,27 @@ def patch():
|
|
|
117
134
|
bulk_mod = importlib.import_module("graphiti_core.utils.bulk_utils")
|
|
118
135
|
original_bulk_add = bulk_mod.add_nodes_and_edges_bulk
|
|
119
136
|
|
|
120
|
-
|
|
121
|
-
|
|
137
|
+
# Reserved keys that must not be overwritten by LLM-extracted attributes.
|
|
138
|
+
# See: https://github.com/contextablemark/openclaw-memory-rebac/issues/6
|
|
139
|
+
RESERVED_EDGE_KEYS = {
|
|
140
|
+
'uuid', 'source_node_uuid', 'target_node_uuid', 'name',
|
|
141
|
+
'fact', 'fact_embedding', 'group_id', 'episodes',
|
|
142
|
+
'created_at', 'expired_at', 'valid_at', 'invalid_at',
|
|
143
|
+
}
|
|
144
|
+
RESERVED_NODE_KEYS = {
|
|
145
|
+
'uuid', 'name', 'name_embedding', 'group_id', 'summary',
|
|
146
|
+
'created_at', 'labels',
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
def _sanitize_attributes(attrs, reserved_keys):
|
|
150
|
+
"""Flatten non-primitive values and strip reserved keys to prevent clobber."""
|
|
122
151
|
if not attrs:
|
|
123
152
|
return attrs
|
|
124
153
|
sanitized = {}
|
|
125
154
|
for k, v in attrs.items():
|
|
155
|
+
if k in reserved_keys:
|
|
156
|
+
logger.debug("Stripped reserved key %r from attributes", k)
|
|
157
|
+
continue
|
|
126
158
|
if isinstance(v, (dict, list, set, tuple)):
|
|
127
159
|
sanitized[k] = json.dumps(v, default=str)
|
|
128
160
|
else:
|
|
@@ -133,10 +165,32 @@ def patch():
|
|
|
133
165
|
entity_nodes, entity_edges, embedder):
|
|
134
166
|
for node in entity_nodes:
|
|
135
167
|
if node.attributes:
|
|
136
|
-
node.attributes = _sanitize_attributes(node.attributes)
|
|
168
|
+
node.attributes = _sanitize_attributes(node.attributes, RESERVED_NODE_KEYS)
|
|
137
169
|
for edge in entity_edges:
|
|
170
|
+
# DIAGNOSTIC: log clobber attempts BEFORE stripping (so we can
|
|
171
|
+
# verify the fix is catching them). Keep until confirmed in prod.
|
|
172
|
+
if edge.attributes and 'fact_embedding' in edge.attributes:
|
|
173
|
+
logger.warning(
|
|
174
|
+
"DIAG attributes_clobber: edge=%s has 'fact_embedding' in attributes! "
|
|
175
|
+
"value_type=%s (will be stripped)", edge.uuid,
|
|
176
|
+
type(edge.attributes.get('fact_embedding')),
|
|
177
|
+
)
|
|
138
178
|
if edge.attributes:
|
|
139
|
-
edge.attributes = _sanitize_attributes(edge.attributes)
|
|
179
|
+
edge.attributes = _sanitize_attributes(edge.attributes, RESERVED_EDGE_KEYS)
|
|
180
|
+
# DIAGNOSTIC: log edges with missing/invalid embeddings
|
|
181
|
+
emb = edge.fact_embedding
|
|
182
|
+
emb_ok = isinstance(emb, list) and len(emb) > 0 and all(isinstance(x, (int, float)) for x in emb[:5])
|
|
183
|
+
if not emb_ok:
|
|
184
|
+
logger.warning(
|
|
185
|
+
"DIAG bad_embedding: edge=%s name=%r type=%s len=%s "
|
|
186
|
+
"sample=%r fact=%r attrs_keys=%s src=%s tgt=%s",
|
|
187
|
+
edge.uuid, edge.name,
|
|
188
|
+
type(emb).__name__, len(emb) if isinstance(emb, (list, tuple)) else 'N/A',
|
|
189
|
+
emb[:3] if isinstance(emb, list) else emb,
|
|
190
|
+
edge.fact[:200] if edge.fact else None,
|
|
191
|
+
list((edge.attributes or {}).keys()),
|
|
192
|
+
edge.source_node_uuid, edge.target_node_uuid,
|
|
193
|
+
)
|
|
140
194
|
return await original_bulk_add(
|
|
141
195
|
driver, episodic_nodes, episodic_edges,
|
|
142
196
|
entity_nodes, entity_edges, embedder,
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
# SpiceDB configuration
|
|
2
|
+
|
|
3
|
+
# Preshared key for gRPC authentication
|
|
4
|
+
SPICEDB_PRESHARED_KEY=dev_token
|
|
5
|
+
|
|
6
|
+
# PostgreSQL password for SpiceDB backing store
|
|
7
|
+
# SPICEDB_POSTGRES_PASSWORD=spicedb
|
|
8
|
+
|
|
9
|
+
# Disable TLS for local development (set to "false" for production)
|
|
10
|
+
# SPICEDB_GRPC_NO_TLS=true
|
|
11
|
+
|
|
12
|
+
# Port overrides (defaults: gRPC 50051, HTTP 8080)
|
|
13
|
+
# SPICEDB_GRPC_PORT=50051
|
|
14
|
+
# SPICEDB_HTTP_PORT=8080
|