@memorylayerai/vercel-ai 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +57 -0
- package/dist/index.cjs +313 -0
- package/dist/index.d.cts +554 -0
- package/dist/index.d.ts +554 -0
- package/dist/index.js +280 -0
- package/package.json +48 -0
- package/src/graph.ts +250 -0
- package/src/index.ts +20 -0
- package/src/provider.ts +154 -0
- package/src/tools.ts +133 -0
- package/tests/graph.test.ts +316 -0
- package/tsconfig.json +27 -0
- package/vitest.config.ts +13 -0
package/dist/index.js
ADDED
|
@@ -0,0 +1,280 @@
|
|
|
1
|
+
// src/provider.ts
|
|
2
|
+
function createMemoryLayerProvider(config) {
|
|
3
|
+
return {
|
|
4
|
+
specificationVersion: "v1",
|
|
5
|
+
provider: "memorylayer",
|
|
6
|
+
modelId: config.defaultModel || "default",
|
|
7
|
+
/**
|
|
8
|
+
* Generate a completion (non-streaming)
|
|
9
|
+
*/
|
|
10
|
+
async doGenerate(options) {
|
|
11
|
+
const messages = options.prompt.map((msg) => ({
|
|
12
|
+
role: msg.role,
|
|
13
|
+
content: msg.content
|
|
14
|
+
}));
|
|
15
|
+
const response = await config.client.router.complete({
|
|
16
|
+
messages,
|
|
17
|
+
projectId: config.projectId,
|
|
18
|
+
model: options.model || config.defaultModel,
|
|
19
|
+
temperature: options.temperature,
|
|
20
|
+
maxTokens: options.maxTokens
|
|
21
|
+
});
|
|
22
|
+
return {
|
|
23
|
+
text: response.choices[0].message.content,
|
|
24
|
+
finishReason: response.choices[0].finishReason,
|
|
25
|
+
usage: {
|
|
26
|
+
promptTokens: response.usage.promptTokens,
|
|
27
|
+
completionTokens: response.usage.completionTokens
|
|
28
|
+
},
|
|
29
|
+
rawCall: {
|
|
30
|
+
rawPrompt: messages,
|
|
31
|
+
rawSettings: {
|
|
32
|
+
model: options.model || config.defaultModel,
|
|
33
|
+
temperature: options.temperature,
|
|
34
|
+
maxTokens: options.maxTokens
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
};
|
|
38
|
+
},
|
|
39
|
+
/**
|
|
40
|
+
* Generate a streaming completion
|
|
41
|
+
*/
|
|
42
|
+
async doStream(options) {
|
|
43
|
+
const messages = options.prompt.map((msg) => ({
|
|
44
|
+
role: msg.role,
|
|
45
|
+
content: msg.content
|
|
46
|
+
}));
|
|
47
|
+
const stream = config.client.router.stream({
|
|
48
|
+
messages,
|
|
49
|
+
projectId: config.projectId,
|
|
50
|
+
model: options.model || config.defaultModel,
|
|
51
|
+
temperature: options.temperature,
|
|
52
|
+
maxTokens: options.maxTokens,
|
|
53
|
+
stream: true
|
|
54
|
+
});
|
|
55
|
+
const convertedStream = convertToVercelStream(stream);
|
|
56
|
+
return {
|
|
57
|
+
stream: convertedStream,
|
|
58
|
+
rawCall: {
|
|
59
|
+
rawPrompt: messages,
|
|
60
|
+
rawSettings: {
|
|
61
|
+
model: options.model || config.defaultModel,
|
|
62
|
+
temperature: options.temperature,
|
|
63
|
+
maxTokens: options.maxTokens
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
};
|
|
67
|
+
}
|
|
68
|
+
};
|
|
69
|
+
}
|
|
70
|
+
async function* convertToVercelStream(stream) {
|
|
71
|
+
try {
|
|
72
|
+
for await (const chunk of stream) {
|
|
73
|
+
yield {
|
|
74
|
+
type: "text-delta",
|
|
75
|
+
textDelta: chunk.choices[0]?.delta?.content || ""
|
|
76
|
+
};
|
|
77
|
+
if (chunk.choices[0]?.finishReason) {
|
|
78
|
+
yield {
|
|
79
|
+
type: "finish",
|
|
80
|
+
finishReason: chunk.choices[0].finishReason
|
|
81
|
+
};
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
} catch (error) {
|
|
85
|
+
throw {
|
|
86
|
+
name: "AI_APICallError",
|
|
87
|
+
message: error instanceof Error ? error.message : "Unknown error",
|
|
88
|
+
cause: error
|
|
89
|
+
};
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
// src/tools.ts
|
|
94
|
+
import { z } from "zod";
|
|
95
|
+
function memoryTool(config) {
|
|
96
|
+
return {
|
|
97
|
+
description: "Add a memory to the MemoryLayer system for long-term storage",
|
|
98
|
+
parameters: z.object({
|
|
99
|
+
content: z.string().describe("The content to remember"),
|
|
100
|
+
metadata: z.record(z.any()).optional().describe("Optional metadata to associate with the memory")
|
|
101
|
+
}),
|
|
102
|
+
execute: async ({ content, metadata }) => {
|
|
103
|
+
try {
|
|
104
|
+
const memory = await config.client.memories.add({
|
|
105
|
+
content,
|
|
106
|
+
metadata,
|
|
107
|
+
projectId: config.projectId
|
|
108
|
+
});
|
|
109
|
+
return {
|
|
110
|
+
success: true,
|
|
111
|
+
memoryId: memory.id,
|
|
112
|
+
message: `Memory stored successfully with ID: ${memory.id}`
|
|
113
|
+
};
|
|
114
|
+
} catch (error) {
|
|
115
|
+
return {
|
|
116
|
+
success: false,
|
|
117
|
+
error: error instanceof Error ? error.message : "Unknown error"
|
|
118
|
+
};
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
};
|
|
122
|
+
}
|
|
123
|
+
function searchTool(config) {
|
|
124
|
+
return {
|
|
125
|
+
description: "Search memories in the MemoryLayer system to retrieve relevant information",
|
|
126
|
+
parameters: z.object({
|
|
127
|
+
query: z.string().describe("The search query to find relevant memories"),
|
|
128
|
+
limit: z.number().optional().describe("Maximum number of results to return (default: 10)"),
|
|
129
|
+
threshold: z.number().optional().describe("Minimum relevance score threshold (0-1)")
|
|
130
|
+
}),
|
|
131
|
+
execute: async ({ query, limit, threshold }) => {
|
|
132
|
+
try {
|
|
133
|
+
const response = await config.client.search.search({
|
|
134
|
+
query,
|
|
135
|
+
projectId: config.projectId,
|
|
136
|
+
limit,
|
|
137
|
+
threshold
|
|
138
|
+
});
|
|
139
|
+
return {
|
|
140
|
+
success: true,
|
|
141
|
+
results: response.results.map((r) => ({
|
|
142
|
+
content: r.memory.content,
|
|
143
|
+
score: r.score,
|
|
144
|
+
metadata: r.memory.metadata,
|
|
145
|
+
id: r.memory.id
|
|
146
|
+
})),
|
|
147
|
+
total: response.total
|
|
148
|
+
};
|
|
149
|
+
} catch (error) {
|
|
150
|
+
return {
|
|
151
|
+
success: false,
|
|
152
|
+
error: error instanceof Error ? error.message : "Unknown error"
|
|
153
|
+
};
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
};
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
// src/graph.ts
|
|
160
|
+
import { tool } from "ai";
|
|
161
|
+
import { z as z2 } from "zod";
|
|
162
|
+
function graphTool(config) {
|
|
163
|
+
return tool({
|
|
164
|
+
description: "Fetch memory graph data showing nodes (memories, documents, entities) and their relationships",
|
|
165
|
+
parameters: z2.object({
|
|
166
|
+
limit: z2.number().optional().describe("Maximum number of nodes to return (default: 100)"),
|
|
167
|
+
nodeTypes: z2.array(z2.enum(["memory", "document", "entity"])).optional().describe("Filter by node types"),
|
|
168
|
+
relationshipTypes: z2.array(z2.enum(["extends", "updates", "derives", "similarity"])).optional().describe("Filter by relationship types"),
|
|
169
|
+
startDate: z2.string().optional().describe("Filter nodes created after this date (ISO 8601)"),
|
|
170
|
+
endDate: z2.string().optional().describe("Filter nodes created before this date (ISO 8601)")
|
|
171
|
+
}),
|
|
172
|
+
execute: async ({ limit, nodeTypes, relationshipTypes, startDate, endDate }) => {
|
|
173
|
+
const graphData = await config.client.graph.getGraph({
|
|
174
|
+
spaceId: config.spaceId,
|
|
175
|
+
limit,
|
|
176
|
+
nodeTypes,
|
|
177
|
+
relationshipTypes,
|
|
178
|
+
startDate,
|
|
179
|
+
endDate
|
|
180
|
+
});
|
|
181
|
+
return {
|
|
182
|
+
nodes: graphData.nodes.map((n) => ({
|
|
183
|
+
id: n.id,
|
|
184
|
+
type: n.type,
|
|
185
|
+
label: n.label,
|
|
186
|
+
status: n.data.status,
|
|
187
|
+
createdAt: n.data.createdAt
|
|
188
|
+
})),
|
|
189
|
+
edges: graphData.edges.map((e) => ({
|
|
190
|
+
id: e.id,
|
|
191
|
+
source: e.source,
|
|
192
|
+
target: e.target,
|
|
193
|
+
type: e.type,
|
|
194
|
+
label: e.label
|
|
195
|
+
})),
|
|
196
|
+
metadata: graphData.metadata
|
|
197
|
+
};
|
|
198
|
+
}
|
|
199
|
+
});
|
|
200
|
+
}
|
|
201
|
+
function nodeDetailsTool(config) {
|
|
202
|
+
return tool({
|
|
203
|
+
description: "Get detailed information about a specific node in the memory graph",
|
|
204
|
+
parameters: z2.object({
|
|
205
|
+
nodeId: z2.string().describe("The ID of the node to fetch details for")
|
|
206
|
+
}),
|
|
207
|
+
execute: async ({ nodeId }) => {
|
|
208
|
+
const details = await config.client.graph.getNodeDetails({ nodeId });
|
|
209
|
+
return {
|
|
210
|
+
node: {
|
|
211
|
+
id: details.node.id,
|
|
212
|
+
type: details.node.type,
|
|
213
|
+
label: details.node.label,
|
|
214
|
+
content: details.node.data.content,
|
|
215
|
+
status: details.node.data.status,
|
|
216
|
+
createdAt: details.node.data.createdAt,
|
|
217
|
+
expiresAt: details.node.data.expiresAt,
|
|
218
|
+
metadata: details.node.data.metadata
|
|
219
|
+
},
|
|
220
|
+
connectedNodes: details.connectedNodes.map((n) => ({
|
|
221
|
+
id: n.id,
|
|
222
|
+
type: n.type,
|
|
223
|
+
label: n.label
|
|
224
|
+
})),
|
|
225
|
+
edges: details.edges.map((e) => ({
|
|
226
|
+
id: e.id,
|
|
227
|
+
source: e.source,
|
|
228
|
+
target: e.target,
|
|
229
|
+
type: e.type,
|
|
230
|
+
label: e.label
|
|
231
|
+
}))
|
|
232
|
+
};
|
|
233
|
+
}
|
|
234
|
+
});
|
|
235
|
+
}
|
|
236
|
+
function nodeEdgesTool(config) {
|
|
237
|
+
return tool({
|
|
238
|
+
description: "Get edges and connected nodes for a specific node in the memory graph",
|
|
239
|
+
parameters: z2.object({
|
|
240
|
+
nodeId: z2.string().describe("The ID of the node to fetch edges for"),
|
|
241
|
+
edgeTypes: z2.array(z2.enum(["extends", "updates", "derives", "similarity"])).optional().describe("Filter by edge types")
|
|
242
|
+
}),
|
|
243
|
+
execute: async ({ nodeId, edgeTypes }) => {
|
|
244
|
+
const result = await config.client.graph.getNodeEdges({
|
|
245
|
+
nodeId,
|
|
246
|
+
edgeTypes
|
|
247
|
+
});
|
|
248
|
+
return {
|
|
249
|
+
edges: result.edges.map((e) => ({
|
|
250
|
+
id: e.id,
|
|
251
|
+
source: e.source,
|
|
252
|
+
target: e.target,
|
|
253
|
+
type: e.type,
|
|
254
|
+
label: e.label
|
|
255
|
+
})),
|
|
256
|
+
connectedNodes: result.connectedNodes.map((n) => ({
|
|
257
|
+
id: n.id,
|
|
258
|
+
type: n.type,
|
|
259
|
+
label: n.label
|
|
260
|
+
}))
|
|
261
|
+
};
|
|
262
|
+
}
|
|
263
|
+
});
|
|
264
|
+
}
|
|
265
|
+
function createGraphTools(config) {
|
|
266
|
+
return {
|
|
267
|
+
getGraph: graphTool(config),
|
|
268
|
+
getNodeDetails: nodeDetailsTool(config),
|
|
269
|
+
getNodeEdges: nodeEdgesTool(config)
|
|
270
|
+
};
|
|
271
|
+
}
|
|
272
|
+
export {
|
|
273
|
+
createGraphTools,
|
|
274
|
+
createMemoryLayerProvider,
|
|
275
|
+
graphTool,
|
|
276
|
+
memoryTool,
|
|
277
|
+
nodeDetailsTool,
|
|
278
|
+
nodeEdgesTool,
|
|
279
|
+
searchTool
|
|
280
|
+
};
|
package/package.json
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@memorylayerai/vercel-ai",
|
|
3
|
+
"version": "0.2.0",
|
|
4
|
+
"description": "Vercel AI SDK integration for MemoryLayer",
|
|
5
|
+
"main": "dist/index.js",
|
|
6
|
+
"module": "dist/index.mjs",
|
|
7
|
+
"types": "dist/index.d.ts",
|
|
8
|
+
"type": "module",
|
|
9
|
+
"exports": {
|
|
10
|
+
".": {
|
|
11
|
+
"import": "./dist/index.mjs",
|
|
12
|
+
"require": "./dist/index.js",
|
|
13
|
+
"types": "./dist/index.d.ts"
|
|
14
|
+
}
|
|
15
|
+
},
|
|
16
|
+
"scripts": {
|
|
17
|
+
"build": "tsup src/index.ts --format cjs,esm --dts",
|
|
18
|
+
"test": "vitest",
|
|
19
|
+
"test:watch": "vitest --watch",
|
|
20
|
+
"lint": "eslint src --ext .ts",
|
|
21
|
+
"typecheck": "tsc --noEmit"
|
|
22
|
+
},
|
|
23
|
+
"keywords": [
|
|
24
|
+
"memorylayer",
|
|
25
|
+
"vercel",
|
|
26
|
+
"ai-sdk",
|
|
27
|
+
"memory",
|
|
28
|
+
"ai"
|
|
29
|
+
],
|
|
30
|
+
"author": "MemoryLayer",
|
|
31
|
+
"license": "MIT",
|
|
32
|
+
"peerDependencies": {
|
|
33
|
+
"@memorylayerai/sdk": "^0.3.1",
|
|
34
|
+
"ai": "^3.0.0",
|
|
35
|
+
"zod": "^3.22.4"
|
|
36
|
+
},
|
|
37
|
+
"devDependencies": {
|
|
38
|
+
"@types/node": "^20.11.30",
|
|
39
|
+
"ai": "^3.4.33",
|
|
40
|
+
"fast-check": "^3.17.1",
|
|
41
|
+
"tsup": "^8.0.2",
|
|
42
|
+
"typescript": "^5.4.3",
|
|
43
|
+
"vitest": "^1.4.0"
|
|
44
|
+
},
|
|
45
|
+
"engines": {
|
|
46
|
+
"node": ">=16.0.0"
|
|
47
|
+
}
|
|
48
|
+
}
|
package/src/graph.ts
ADDED
|
@@ -0,0 +1,250 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Graph visualization helpers for Vercel AI SDK integration
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import type { MemoryLayerClient, GraphNode, GraphEdge } from '@memorylayerai/sdk';
|
|
6
|
+
import { tool } from 'ai';
|
|
7
|
+
import { z } from 'zod';
|
|
8
|
+
|
|
9
|
+
/**
|
|
10
|
+
* Configuration for graph tools
|
|
11
|
+
*/
|
|
12
|
+
export interface GraphToolConfig {
|
|
13
|
+
/** MemoryLayer client instance */
|
|
14
|
+
client: MemoryLayerClient;
|
|
15
|
+
/** Space/project ID for graph context */
|
|
16
|
+
spaceId: string;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
/**
|
|
20
|
+
* Create a tool for fetching graph data
|
|
21
|
+
*
|
|
22
|
+
* This tool allows AI models to fetch and visualize memory graph data.
|
|
23
|
+
*
|
|
24
|
+
* @param config - Graph tool configuration
|
|
25
|
+
* @returns Vercel AI SDK compatible tool
|
|
26
|
+
*
|
|
27
|
+
* @example
|
|
28
|
+
* ```typescript
|
|
29
|
+
* import { graphTool } from '@memorylayer/vercel-ai';
|
|
30
|
+
* import { MemoryLayerClient } from '@memorylayer/sdk';
|
|
31
|
+
* import { generateText } from 'ai';
|
|
32
|
+
*
|
|
33
|
+
* const client = new MemoryLayerClient({
|
|
34
|
+
* apiKey: process.env.MEMORYLAYER_API_KEY!,
|
|
35
|
+
* });
|
|
36
|
+
*
|
|
37
|
+
* const { text } = await generateText({
|
|
38
|
+
* model: openai('gpt-4'),
|
|
39
|
+
* prompt: 'Show me the memory graph',
|
|
40
|
+
* tools: {
|
|
41
|
+
* getGraph: graphTool({ client, spaceId: 'proj_abc123' }),
|
|
42
|
+
* },
|
|
43
|
+
* });
|
|
44
|
+
* ```
|
|
45
|
+
*
|
|
46
|
+
* Requirements: 6.1
|
|
47
|
+
*/
|
|
48
|
+
export function graphTool(config: GraphToolConfig) {
|
|
49
|
+
return tool({
|
|
50
|
+
description: 'Fetch memory graph data showing nodes (memories, documents, entities) and their relationships',
|
|
51
|
+
parameters: z.object({
|
|
52
|
+
limit: z.number().optional().describe('Maximum number of nodes to return (default: 100)'),
|
|
53
|
+
nodeTypes: z.array(z.enum(['memory', 'document', 'entity'])).optional().describe('Filter by node types'),
|
|
54
|
+
relationshipTypes: z.array(z.enum(['extends', 'updates', 'derives', 'similarity'])).optional().describe('Filter by relationship types'),
|
|
55
|
+
startDate: z.string().optional().describe('Filter nodes created after this date (ISO 8601)'),
|
|
56
|
+
endDate: z.string().optional().describe('Filter nodes created before this date (ISO 8601)'),
|
|
57
|
+
}),
|
|
58
|
+
execute: async ({ limit, nodeTypes, relationshipTypes, startDate, endDate }) => {
|
|
59
|
+
const graphData = await config.client.graph.getGraph({
|
|
60
|
+
spaceId: config.spaceId,
|
|
61
|
+
limit,
|
|
62
|
+
nodeTypes,
|
|
63
|
+
relationshipTypes,
|
|
64
|
+
startDate,
|
|
65
|
+
endDate,
|
|
66
|
+
});
|
|
67
|
+
|
|
68
|
+
return {
|
|
69
|
+
nodes: graphData.nodes.map((n: GraphNode) => ({
|
|
70
|
+
id: n.id,
|
|
71
|
+
type: n.type,
|
|
72
|
+
label: n.label,
|
|
73
|
+
status: n.data.status,
|
|
74
|
+
createdAt: n.data.createdAt,
|
|
75
|
+
})),
|
|
76
|
+
edges: graphData.edges.map((e: GraphEdge) => ({
|
|
77
|
+
id: e.id,
|
|
78
|
+
source: e.source,
|
|
79
|
+
target: e.target,
|
|
80
|
+
type: e.type,
|
|
81
|
+
label: e.label,
|
|
82
|
+
})),
|
|
83
|
+
metadata: graphData.metadata,
|
|
84
|
+
};
|
|
85
|
+
},
|
|
86
|
+
});
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
/**
|
|
90
|
+
* Create a tool for fetching node details
|
|
91
|
+
*
|
|
92
|
+
* This tool allows AI models to get detailed information about a specific node.
|
|
93
|
+
*
|
|
94
|
+
* @param config - Graph tool configuration
|
|
95
|
+
* @returns Vercel AI SDK compatible tool
|
|
96
|
+
*
|
|
97
|
+
* @example
|
|
98
|
+
* ```typescript
|
|
99
|
+
* import { nodeDetailsTool } from '@memorylayer/vercel-ai';
|
|
100
|
+
* import { MemoryLayerClient } from '@memorylayer/sdk';
|
|
101
|
+
* import { generateText } from 'ai';
|
|
102
|
+
*
|
|
103
|
+
* const client = new MemoryLayerClient({
|
|
104
|
+
* apiKey: process.env.MEMORYLAYER_API_KEY!,
|
|
105
|
+
* });
|
|
106
|
+
*
|
|
107
|
+
* const { text } = await generateText({
|
|
108
|
+
* model: openai('gpt-4'),
|
|
109
|
+
* prompt: 'Tell me about memory node-123',
|
|
110
|
+
* tools: {
|
|
111
|
+
* getNodeDetails: nodeDetailsTool({ client, spaceId: 'proj_abc123' }),
|
|
112
|
+
* },
|
|
113
|
+
* });
|
|
114
|
+
* ```
|
|
115
|
+
*
|
|
116
|
+
* Requirements: 6.2
|
|
117
|
+
*/
|
|
118
|
+
export function nodeDetailsTool(config: GraphToolConfig) {
|
|
119
|
+
return tool({
|
|
120
|
+
description: 'Get detailed information about a specific node in the memory graph',
|
|
121
|
+
parameters: z.object({
|
|
122
|
+
nodeId: z.string().describe('The ID of the node to fetch details for'),
|
|
123
|
+
}),
|
|
124
|
+
execute: async ({ nodeId }) => {
|
|
125
|
+
const details = await config.client.graph.getNodeDetails({ nodeId });
|
|
126
|
+
|
|
127
|
+
return {
|
|
128
|
+
node: {
|
|
129
|
+
id: details.node.id,
|
|
130
|
+
type: details.node.type,
|
|
131
|
+
label: details.node.label,
|
|
132
|
+
content: details.node.data.content,
|
|
133
|
+
status: details.node.data.status,
|
|
134
|
+
createdAt: details.node.data.createdAt,
|
|
135
|
+
expiresAt: details.node.data.expiresAt,
|
|
136
|
+
metadata: details.node.data.metadata,
|
|
137
|
+
},
|
|
138
|
+
connectedNodes: details.connectedNodes.map((n: GraphNode) => ({
|
|
139
|
+
id: n.id,
|
|
140
|
+
type: n.type,
|
|
141
|
+
label: n.label,
|
|
142
|
+
})),
|
|
143
|
+
edges: details.edges.map((e: GraphEdge) => ({
|
|
144
|
+
id: e.id,
|
|
145
|
+
source: e.source,
|
|
146
|
+
target: e.target,
|
|
147
|
+
type: e.type,
|
|
148
|
+
label: e.label,
|
|
149
|
+
})),
|
|
150
|
+
};
|
|
151
|
+
},
|
|
152
|
+
});
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
/**
|
|
156
|
+
* Create a tool for fetching node edges
|
|
157
|
+
*
|
|
158
|
+
* This tool allows AI models to get edges connected to a specific node.
|
|
159
|
+
*
|
|
160
|
+
* @param config - Graph tool configuration
|
|
161
|
+
* @returns Vercel AI SDK compatible tool
|
|
162
|
+
*
|
|
163
|
+
* @example
|
|
164
|
+
* ```typescript
|
|
165
|
+
* import { nodeEdgesTool } from '@memorylayer/vercel-ai';
|
|
166
|
+
* import { MemoryLayerClient } from '@memorylayer/sdk';
|
|
167
|
+
* import { generateText } from 'ai';
|
|
168
|
+
*
|
|
169
|
+
* const client = new MemoryLayerClient({
|
|
170
|
+
* apiKey: process.env.MEMORYLAYER_API_KEY!,
|
|
171
|
+
* });
|
|
172
|
+
*
|
|
173
|
+
* const { text } = await generateText({
|
|
174
|
+
* model: openai('gpt-4'),
|
|
175
|
+
* prompt: 'What is connected to memory node-123?',
|
|
176
|
+
* tools: {
|
|
177
|
+
* getNodeEdges: nodeEdgesTool({ client, spaceId: 'proj_abc123' }),
|
|
178
|
+
* },
|
|
179
|
+
* });
|
|
180
|
+
* ```
|
|
181
|
+
*
|
|
182
|
+
* Requirements: 6.3
|
|
183
|
+
*/
|
|
184
|
+
export function nodeEdgesTool(config: GraphToolConfig) {
|
|
185
|
+
return tool({
|
|
186
|
+
description: 'Get edges and connected nodes for a specific node in the memory graph',
|
|
187
|
+
parameters: z.object({
|
|
188
|
+
nodeId: z.string().describe('The ID of the node to fetch edges for'),
|
|
189
|
+
edgeTypes: z.array(z.enum(['extends', 'updates', 'derives', 'similarity'])).optional().describe('Filter by edge types'),
|
|
190
|
+
}),
|
|
191
|
+
execute: async ({ nodeId, edgeTypes }) => {
|
|
192
|
+
const result = await config.client.graph.getNodeEdges({
|
|
193
|
+
nodeId,
|
|
194
|
+
edgeTypes,
|
|
195
|
+
});
|
|
196
|
+
|
|
197
|
+
return {
|
|
198
|
+
edges: result.edges.map((e: GraphEdge) => ({
|
|
199
|
+
id: e.id,
|
|
200
|
+
source: e.source,
|
|
201
|
+
target: e.target,
|
|
202
|
+
type: e.type,
|
|
203
|
+
label: e.label,
|
|
204
|
+
})),
|
|
205
|
+
connectedNodes: result.connectedNodes.map((n: GraphNode) => ({
|
|
206
|
+
id: n.id,
|
|
207
|
+
type: n.type,
|
|
208
|
+
label: n.label,
|
|
209
|
+
})),
|
|
210
|
+
};
|
|
211
|
+
},
|
|
212
|
+
});
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
/**
|
|
216
|
+
* Create all graph tools at once
|
|
217
|
+
*
|
|
218
|
+
* Convenience function to create all graph-related tools.
|
|
219
|
+
*
|
|
220
|
+
* @param config - Graph tool configuration
|
|
221
|
+
* @returns Object with all graph tools
|
|
222
|
+
*
|
|
223
|
+
* @example
|
|
224
|
+
* ```typescript
|
|
225
|
+
* import { createGraphTools } from '@memorylayer/vercel-ai';
|
|
226
|
+
* import { MemoryLayerClient } from '@memorylayer/sdk';
|
|
227
|
+
* import { generateText } from 'ai';
|
|
228
|
+
*
|
|
229
|
+
* const client = new MemoryLayerClient({
|
|
230
|
+
* apiKey: process.env.MEMORYLAYER_API_KEY!,
|
|
231
|
+
* });
|
|
232
|
+
*
|
|
233
|
+
* const graphTools = createGraphTools({ client, spaceId: 'proj_abc123' });
|
|
234
|
+
*
|
|
235
|
+
* const { text } = await generateText({
|
|
236
|
+
* model: openai('gpt-4'),
|
|
237
|
+
* prompt: 'Analyze my memory graph',
|
|
238
|
+
* tools: graphTools,
|
|
239
|
+
* });
|
|
240
|
+
* ```
|
|
241
|
+
*
|
|
242
|
+
* Requirements: 6.1, 6.2, 6.3
|
|
243
|
+
*/
|
|
244
|
+
export function createGraphTools(config: GraphToolConfig) {
|
|
245
|
+
return {
|
|
246
|
+
getGraph: graphTool(config),
|
|
247
|
+
getNodeDetails: nodeDetailsTool(config),
|
|
248
|
+
getNodeEdges: nodeEdgesTool(config),
|
|
249
|
+
};
|
|
250
|
+
}
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* MemoryLayer Vercel AI SDK Integration
|
|
3
|
+
*
|
|
4
|
+
* Provides provider adapter and tool helpers for integrating MemoryLayer
|
|
5
|
+
* with the Vercel AI SDK.
|
|
6
|
+
*
|
|
7
|
+
* @packageDocumentation
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
// Provider adapter
|
|
11
|
+
export { createMemoryLayerProvider } from './provider.js';
|
|
12
|
+
export type { MemoryLayerProviderConfig } from './provider.js';
|
|
13
|
+
|
|
14
|
+
// Tool helpers
|
|
15
|
+
export { memoryTool, searchTool } from './tools.js';
|
|
16
|
+
export type { MemoryToolConfig } from './tools.js';
|
|
17
|
+
|
|
18
|
+
// Graph tools
|
|
19
|
+
export { graphTool, nodeDetailsTool, nodeEdgesTool, createGraphTools } from './graph.js';
|
|
20
|
+
export type { GraphToolConfig } from './graph.js';
|