@memvid/sdk 2.0.113
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +190 -0
- package/README.md +244 -0
- package/dist/__tests__/basic.test.d.ts +1 -0
- package/dist/__tests__/basic.test.js +41 -0
- package/dist/adapters/autogen.d.ts +23 -0
- package/dist/adapters/autogen.js +163 -0
- package/dist/adapters/basic.d.ts +1 -0
- package/dist/adapters/basic.js +11 -0
- package/dist/adapters/crewai.d.ts +23 -0
- package/dist/adapters/crewai.js +160 -0
- package/dist/adapters/google_adk.d.ts +25 -0
- package/dist/adapters/google_adk.js +158 -0
- package/dist/adapters/haystack.d.ts +1 -0
- package/dist/adapters/haystack.js +11 -0
- package/dist/adapters/langchain.d.ts +28 -0
- package/dist/adapters/langchain.js +156 -0
- package/dist/adapters/langgraph.d.ts +1 -0
- package/dist/adapters/langgraph.js +11 -0
- package/dist/adapters/llamaindex.d.ts +33 -0
- package/dist/adapters/llamaindex.js +195 -0
- package/dist/adapters/mcp.d.ts +1 -0
- package/dist/adapters/mcp.js +11 -0
- package/dist/adapters/openai.d.ts +26 -0
- package/dist/adapters/openai.js +169 -0
- package/dist/adapters/semantic_kernel.d.ts +1 -0
- package/dist/adapters/semantic_kernel.js +11 -0
- package/dist/adapters/vercel_ai.d.ts +27 -0
- package/dist/adapters/vercel_ai.js +148 -0
- package/dist/clip.d.ts +182 -0
- package/dist/clip.js +371 -0
- package/dist/embeddings.d.ts +156 -0
- package/dist/embeddings.js +289 -0
- package/dist/entities.d.ts +251 -0
- package/dist/entities.js +489 -0
- package/dist/error.d.ts +91 -0
- package/dist/error.js +203 -0
- package/dist/index.d.ts +53 -0
- package/dist/index.js +458 -0
- package/dist/noop.d.ts +2 -0
- package/dist/noop.js +55 -0
- package/dist/registry.d.ts +5 -0
- package/dist/registry.js +53 -0
- package/dist/types.d.ts +275 -0
- package/dist/types.js +2 -0
- package/index.node +0 -0
- package/package.json +81 -0
|
@@ -0,0 +1,195 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* LlamaIndex adapter exposing Memvid helpers as FunctionTool and QueryEngineTool.
|
|
4
|
+
*
|
|
5
|
+
* LlamaIndex.TS uses FunctionTool for wrapping functions and QueryEngineTool
|
|
6
|
+
* for wrapping query engines. This adapter provides:
|
|
7
|
+
* - memvid_put: Store documents in memory
|
|
8
|
+
* - memvid_find: Search for relevant documents
|
|
9
|
+
* - memvid_ask: Query with RAG-style answer synthesis
|
|
10
|
+
* - asQueryEngine(): Factory for creating a LlamaIndex QueryEngine
|
|
11
|
+
*
|
|
12
|
+
* Usage:
|
|
13
|
+
* import { use } from "memvid-sdk";
|
|
14
|
+
*
|
|
15
|
+
* const mem = await use("llamaindex", "knowledge.mv2");
|
|
16
|
+
*
|
|
17
|
+
* // Access tools for LlamaIndex agents
|
|
18
|
+
* const tools = mem.tools; // Array of FunctionTool objects
|
|
19
|
+
*
|
|
20
|
+
* // Use with LlamaIndex agent
|
|
21
|
+
* import { OpenAIAgent } from "llamaindex";
|
|
22
|
+
*
|
|
23
|
+
* const agent = new OpenAIAgent({
|
|
24
|
+
* tools: mem.tools,
|
|
25
|
+
* });
|
|
26
|
+
* const response = await agent.chat({ message: "What do you know about X?" });
|
|
27
|
+
*
|
|
28
|
+
* // Or use as a QueryEngine
|
|
29
|
+
* const queryEngine = mem.asQueryEngine();
|
|
30
|
+
* const result = await queryEngine.query({ query: "Summarize the key points" });
|
|
31
|
+
*
|
|
32
|
+
* Note: Requires llamaindex package to be installed.
|
|
33
|
+
*/
|
|
34
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
35
|
+
const noop_1 = require("../noop");
|
|
36
|
+
const registry_1 = require("../registry");
|
|
37
|
+
const KIND = "llamaindex";
|
|
38
|
+
// Dynamic import helper to avoid TypeScript module resolution issues
|
|
39
|
+
async function tryImportLlamaIndex() {
|
|
40
|
+
try {
|
|
41
|
+
// Use require for better compatibility with npm module resolution
|
|
42
|
+
// eslint-disable-next-line @typescript-eslint/no-var-requires
|
|
43
|
+
const llamaindexModule = require("llamaindex");
|
|
44
|
+
return {
|
|
45
|
+
FunctionTool: llamaindexModule.FunctionTool,
|
|
46
|
+
};
|
|
47
|
+
}
|
|
48
|
+
catch {
|
|
49
|
+
return null;
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
(0, registry_1.register)(KIND, async (core, _apiKey) => {
|
|
53
|
+
// Try to import LlamaIndex
|
|
54
|
+
const imports = await tryImportLlamaIndex();
|
|
55
|
+
if (!imports) {
|
|
56
|
+
return {
|
|
57
|
+
tools: (0, noop_1.createNoOp)("llamaindex adapter unavailable; install 'llamaindex' to enable", "memvid.adapters.llamaindex.tools"),
|
|
58
|
+
functions: [],
|
|
59
|
+
nodes: (0, noop_1.createNoOp)("llamaindex adapter nodes unavailable", "memvid.adapters.llamaindex.nodes"),
|
|
60
|
+
asQueryEngine: null,
|
|
61
|
+
};
|
|
62
|
+
}
|
|
63
|
+
const { FunctionTool } = imports;
|
|
64
|
+
// Define memvid_put function
|
|
65
|
+
async function memvidPut(params) {
|
|
66
|
+
const frameId = await core.put({
|
|
67
|
+
title: params.title,
|
|
68
|
+
label: params.label,
|
|
69
|
+
text: params.text,
|
|
70
|
+
metadata: params.metadata ?? {},
|
|
71
|
+
enable_embedding: true,
|
|
72
|
+
auto_tag: true,
|
|
73
|
+
extract_dates: true,
|
|
74
|
+
});
|
|
75
|
+
return `Document stored with frame_id: ${frameId}`;
|
|
76
|
+
}
|
|
77
|
+
// Define memvid_find function
|
|
78
|
+
async function memvidFind(params) {
|
|
79
|
+
const response = (await core.find(params.query, {
|
|
80
|
+
k: params.top_k ?? 5,
|
|
81
|
+
}));
|
|
82
|
+
const hits = response.hits ?? [];
|
|
83
|
+
if (hits.length === 0) {
|
|
84
|
+
return `No results found for query: '${params.query}'`;
|
|
85
|
+
}
|
|
86
|
+
const results = [];
|
|
87
|
+
for (let i = 0; i < hits.length; i++) {
|
|
88
|
+
const hit = hits[i];
|
|
89
|
+
const title = hit.title ?? "Untitled";
|
|
90
|
+
const snippet = (hit.text ?? hit.snippet ?? "").slice(0, 200);
|
|
91
|
+
const score = hit.score ?? 0;
|
|
92
|
+
results.push(`${i + 1}. [${title}] (score: ${score.toFixed(2)}): ${snippet}...`);
|
|
93
|
+
}
|
|
94
|
+
return `Found ${hits.length} results:\n${results.join("\n")}`;
|
|
95
|
+
}
|
|
96
|
+
// Define memvid_ask function
|
|
97
|
+
async function memvidAsk(params) {
|
|
98
|
+
const response = (await core.ask(params.question, {
|
|
99
|
+
mode: params.mode ?? "auto",
|
|
100
|
+
}));
|
|
101
|
+
const answer = response.answer ?? "No answer generated";
|
|
102
|
+
const sources = response.sources ?? [];
|
|
103
|
+
let result = `Answer: ${answer}`;
|
|
104
|
+
if (sources.length > 0) {
|
|
105
|
+
const sourceTitles = sources.slice(0, 3).map((s) => s.title ?? "Unknown");
|
|
106
|
+
result += `\n\nSources: ${sourceTitles.join(", ")}`;
|
|
107
|
+
}
|
|
108
|
+
return result;
|
|
109
|
+
}
|
|
110
|
+
// JSON schemas for LlamaIndex FunctionTool
|
|
111
|
+
const putSchema = {
|
|
112
|
+
type: "object",
|
|
113
|
+
properties: {
|
|
114
|
+
title: { type: "string", description: "Title of the document" },
|
|
115
|
+
label: { type: "string", description: "Category or label for the document" },
|
|
116
|
+
text: { type: "string", description: "Text content to store" },
|
|
117
|
+
metadata: {
|
|
118
|
+
type: "object",
|
|
119
|
+
description: "Optional key-value metadata",
|
|
120
|
+
additionalProperties: true,
|
|
121
|
+
},
|
|
122
|
+
},
|
|
123
|
+
required: ["title", "label", "text"],
|
|
124
|
+
};
|
|
125
|
+
const findSchema = {
|
|
126
|
+
type: "object",
|
|
127
|
+
properties: {
|
|
128
|
+
query: { type: "string", description: "Search query string" },
|
|
129
|
+
top_k: { type: "number", description: "Number of results to return (default: 5)" },
|
|
130
|
+
},
|
|
131
|
+
required: ["query"],
|
|
132
|
+
};
|
|
133
|
+
const askSchema = {
|
|
134
|
+
type: "object",
|
|
135
|
+
properties: {
|
|
136
|
+
question: { type: "string", description: "Question to answer" },
|
|
137
|
+
mode: {
|
|
138
|
+
type: "string",
|
|
139
|
+
enum: ["auto", "lex", "sem"],
|
|
140
|
+
description: "Search mode: 'auto' (hybrid), 'lex' (keyword), or 'sem' (semantic)",
|
|
141
|
+
},
|
|
142
|
+
},
|
|
143
|
+
required: ["question"],
|
|
144
|
+
};
|
|
145
|
+
// Create FunctionTool instances
|
|
146
|
+
const tools = [
|
|
147
|
+
FunctionTool.from(memvidPut, {
|
|
148
|
+
name: "memvid_put",
|
|
149
|
+
description: "Store a document in Memvid memory. Use this to save information " +
|
|
150
|
+
"that should be retrievable later.",
|
|
151
|
+
parameters: putSchema,
|
|
152
|
+
}),
|
|
153
|
+
FunctionTool.from(memvidFind, {
|
|
154
|
+
name: "memvid_find",
|
|
155
|
+
description: "Search Memvid memory for documents matching a query. " +
|
|
156
|
+
"Returns the most relevant documents with snippets.",
|
|
157
|
+
parameters: findSchema,
|
|
158
|
+
}),
|
|
159
|
+
FunctionTool.from(memvidAsk, {
|
|
160
|
+
name: "memvid_ask",
|
|
161
|
+
description: "Ask a question and get an answer synthesized from Memvid memory " +
|
|
162
|
+
"using retrieval-augmented generation.",
|
|
163
|
+
parameters: askSchema,
|
|
164
|
+
}),
|
|
165
|
+
];
|
|
166
|
+
// Query engine factory
|
|
167
|
+
const asQueryEngine = () => {
|
|
168
|
+
// Return a simple query engine-like object
|
|
169
|
+
// Note: For full LlamaIndex QueryEngine, users should use the native LlamaIndex integration
|
|
170
|
+
return {
|
|
171
|
+
async query(params) {
|
|
172
|
+
const queryStr = typeof params === "string" ? params : params.query;
|
|
173
|
+
const response = (await core.ask(queryStr));
|
|
174
|
+
const answer = response.answer ?? "No answer generated";
|
|
175
|
+
const sources = response.sources ?? [];
|
|
176
|
+
// Build source text
|
|
177
|
+
let sourceText = "";
|
|
178
|
+
if (sources.length > 0) {
|
|
179
|
+
const sourceTitles = sources.slice(0, 3).map((s) => s.title ?? "Unknown");
|
|
180
|
+
sourceText = `\n\nSources: ${sourceTitles.join(", ")}`;
|
|
181
|
+
}
|
|
182
|
+
return {
|
|
183
|
+
response: answer + sourceText,
|
|
184
|
+
sourceNodes: sources,
|
|
185
|
+
};
|
|
186
|
+
},
|
|
187
|
+
};
|
|
188
|
+
};
|
|
189
|
+
return {
|
|
190
|
+
tools,
|
|
191
|
+
functions: [memvidPut, memvidFind, memvidAsk],
|
|
192
|
+
nodes: (0, noop_1.createNoOp)("llamaindex adapter nodes not provided", "memvid.adapters.llamaindex.nodes"),
|
|
193
|
+
asQueryEngine,
|
|
194
|
+
};
|
|
195
|
+
});
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
const noop_1 = require("../noop");
|
|
4
|
+
const registry_1 = require("../registry");
|
|
5
|
+
const KIND = "mcp";
|
|
6
|
+
(0, registry_1.register)(KIND, async () => ({
|
|
7
|
+
tools: (0, noop_1.createNoOp)("mcp adapter unavailable; provide your MCP client configuration", "memvid.adapters.mcp.tools"),
|
|
8
|
+
functions: [],
|
|
9
|
+
nodes: (0, noop_1.createNoOp)("mcp adapter nodes unavailable", "memvid.adapters.mcp.nodes"),
|
|
10
|
+
asQueryEngine: null,
|
|
11
|
+
}));
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI adapter exposing Memvid core methods as OpenAI function calling tools.
|
|
3
|
+
*
|
|
4
|
+
* This adapter provides tools formatted for OpenAI's function calling API.
|
|
5
|
+
* The tools can be used directly with the OpenAI Chat Completions API.
|
|
6
|
+
*
|
|
7
|
+
* Usage:
|
|
8
|
+
* import { use } from "memvid-sdk";
|
|
9
|
+
*
|
|
10
|
+
* const mem = await use("openai", "knowledge.mv2");
|
|
11
|
+
*
|
|
12
|
+
* // Access tools for OpenAI function calling
|
|
13
|
+
* const tools = mem.tools; // Array of OpenAI tool definitions
|
|
14
|
+
*
|
|
15
|
+
* // Use with OpenAI API
|
|
16
|
+
* import OpenAI from "openai";
|
|
17
|
+
*
|
|
18
|
+
* const client = new OpenAI();
|
|
19
|
+
* const response = await client.chat.completions.create({
|
|
20
|
+
* model: "gpt-4o-mini",
|
|
21
|
+
* messages: [...],
|
|
22
|
+
* tools: mem.tools,
|
|
23
|
+
* tool_choice: "auto",
|
|
24
|
+
* });
|
|
25
|
+
*/
|
|
26
|
+
export {};
|
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* OpenAI adapter exposing Memvid core methods as OpenAI function calling tools.
|
|
4
|
+
*
|
|
5
|
+
* This adapter provides tools formatted for OpenAI's function calling API.
|
|
6
|
+
* The tools can be used directly with the OpenAI Chat Completions API.
|
|
7
|
+
*
|
|
8
|
+
* Usage:
|
|
9
|
+
* import { use } from "memvid-sdk";
|
|
10
|
+
*
|
|
11
|
+
* const mem = await use("openai", "knowledge.mv2");
|
|
12
|
+
*
|
|
13
|
+
* // Access tools for OpenAI function calling
|
|
14
|
+
* const tools = mem.tools; // Array of OpenAI tool definitions
|
|
15
|
+
*
|
|
16
|
+
* // Use with OpenAI API
|
|
17
|
+
* import OpenAI from "openai";
|
|
18
|
+
*
|
|
19
|
+
* const client = new OpenAI();
|
|
20
|
+
* const response = await client.chat.completions.create({
|
|
21
|
+
* model: "gpt-4o-mini",
|
|
22
|
+
* messages: [...],
|
|
23
|
+
* tools: mem.tools,
|
|
24
|
+
* tool_choice: "auto",
|
|
25
|
+
* });
|
|
26
|
+
*/
|
|
27
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
28
|
+
const noop_1 = require("../noop");
|
|
29
|
+
const registry_1 = require("../registry");
|
|
30
|
+
const KIND = "openai";
|
|
31
|
+
(0, registry_1.register)(KIND, async (core, apiKey) => {
|
|
32
|
+
// Set up API key if provided
|
|
33
|
+
const key = apiKey?.openai ?? apiKey?.default;
|
|
34
|
+
if (key && !process.env.OPENAI_API_KEY) {
|
|
35
|
+
process.env.OPENAI_API_KEY = key;
|
|
36
|
+
}
|
|
37
|
+
// Define OpenAI-format tool definitions
|
|
38
|
+
const tools = [
|
|
39
|
+
{
|
|
40
|
+
type: "function",
|
|
41
|
+
function: {
|
|
42
|
+
name: "memvid_put",
|
|
43
|
+
description: "Store a document in Memvid memory for later retrieval. " +
|
|
44
|
+
"Use this to save information that should be searchable later.",
|
|
45
|
+
parameters: {
|
|
46
|
+
type: "object",
|
|
47
|
+
properties: {
|
|
48
|
+
title: {
|
|
49
|
+
type: "string",
|
|
50
|
+
description: "Title of the document",
|
|
51
|
+
},
|
|
52
|
+
label: {
|
|
53
|
+
type: "string",
|
|
54
|
+
description: "Category or label for the document",
|
|
55
|
+
},
|
|
56
|
+
text: {
|
|
57
|
+
type: "string",
|
|
58
|
+
description: "Text content to store",
|
|
59
|
+
},
|
|
60
|
+
metadata: {
|
|
61
|
+
type: "object",
|
|
62
|
+
description: "Optional key-value metadata",
|
|
63
|
+
additionalProperties: true,
|
|
64
|
+
},
|
|
65
|
+
},
|
|
66
|
+
required: ["title", "label", "text"],
|
|
67
|
+
},
|
|
68
|
+
},
|
|
69
|
+
},
|
|
70
|
+
{
|
|
71
|
+
type: "function",
|
|
72
|
+
function: {
|
|
73
|
+
name: "memvid_find",
|
|
74
|
+
description: "Search Memvid memory for documents matching a query. " +
|
|
75
|
+
"Returns the most relevant documents with snippets.",
|
|
76
|
+
parameters: {
|
|
77
|
+
type: "object",
|
|
78
|
+
properties: {
|
|
79
|
+
query: {
|
|
80
|
+
type: "string",
|
|
81
|
+
description: "Search query string",
|
|
82
|
+
},
|
|
83
|
+
top_k: {
|
|
84
|
+
type: "number",
|
|
85
|
+
description: "Number of results to return (default: 5)",
|
|
86
|
+
},
|
|
87
|
+
},
|
|
88
|
+
required: ["query"],
|
|
89
|
+
},
|
|
90
|
+
},
|
|
91
|
+
},
|
|
92
|
+
{
|
|
93
|
+
type: "function",
|
|
94
|
+
function: {
|
|
95
|
+
name: "memvid_ask",
|
|
96
|
+
description: "Ask a question and get an answer synthesized from Memvid memory " +
|
|
97
|
+
"using retrieval-augmented generation.",
|
|
98
|
+
parameters: {
|
|
99
|
+
type: "object",
|
|
100
|
+
properties: {
|
|
101
|
+
question: {
|
|
102
|
+
type: "string",
|
|
103
|
+
description: "Question to answer",
|
|
104
|
+
},
|
|
105
|
+
mode: {
|
|
106
|
+
type: "string",
|
|
107
|
+
enum: ["auto", "lex", "sem"],
|
|
108
|
+
description: "Search mode: 'auto' (hybrid), 'lex' (keyword), or 'sem' (semantic)",
|
|
109
|
+
},
|
|
110
|
+
},
|
|
111
|
+
required: ["question"],
|
|
112
|
+
},
|
|
113
|
+
},
|
|
114
|
+
},
|
|
115
|
+
];
|
|
116
|
+
// Create executor functions for each tool
|
|
117
|
+
const executors = {
|
|
118
|
+
memvid_put: async (args) => {
|
|
119
|
+
const frameId = await core.put({
|
|
120
|
+
title: args.title,
|
|
121
|
+
label: args.label,
|
|
122
|
+
text: args.text,
|
|
123
|
+
metadata: args.metadata ?? {},
|
|
124
|
+
enable_embedding: true,
|
|
125
|
+
auto_tag: true,
|
|
126
|
+
extract_dates: true,
|
|
127
|
+
});
|
|
128
|
+
return `Document stored with frame_id: ${frameId}`;
|
|
129
|
+
},
|
|
130
|
+
memvid_find: async (args) => {
|
|
131
|
+
const query = args.query;
|
|
132
|
+
const topK = args.top_k ?? 5;
|
|
133
|
+
const response = (await core.find(query, { k: topK }));
|
|
134
|
+
const hits = response.hits ?? [];
|
|
135
|
+
if (hits.length === 0) {
|
|
136
|
+
return `No results found for query: '${query}'`;
|
|
137
|
+
}
|
|
138
|
+
const results = [];
|
|
139
|
+
for (let i = 0; i < hits.length; i++) {
|
|
140
|
+
const hit = hits[i];
|
|
141
|
+
const title = hit.title ?? "Untitled";
|
|
142
|
+
const snippet = (hit.text ?? hit.snippet ?? "").slice(0, 200);
|
|
143
|
+
const score = hit.score ?? 0;
|
|
144
|
+
results.push(`${i + 1}. [${title}] (score: ${score.toFixed(2)}): ${snippet}...`);
|
|
145
|
+
}
|
|
146
|
+
return `Found ${hits.length} results:\n${results.join("\n")}`;
|
|
147
|
+
},
|
|
148
|
+
memvid_ask: async (args) => {
|
|
149
|
+
const question = args.question;
|
|
150
|
+
const mode = args.mode ?? "auto";
|
|
151
|
+
const response = (await core.ask(question, { mode }));
|
|
152
|
+
const answer = response.answer ?? "No answer generated";
|
|
153
|
+
const sources = response.sources ?? [];
|
|
154
|
+
let result = `Answer: ${answer}`;
|
|
155
|
+
if (sources.length > 0) {
|
|
156
|
+
const sourceTitles = sources.slice(0, 3).map((s) => s.title ?? "Unknown");
|
|
157
|
+
result += `\n\nSources: ${sourceTitles.join(", ")}`;
|
|
158
|
+
}
|
|
159
|
+
return result;
|
|
160
|
+
},
|
|
161
|
+
};
|
|
162
|
+
// Return both tools and executors
|
|
163
|
+
return {
|
|
164
|
+
tools,
|
|
165
|
+
functions: executors, // Executors keyed by tool name
|
|
166
|
+
nodes: (0, noop_1.createNoOp)("openai adapter does not provide nodes", "memvid.adapters.openai.nodes"),
|
|
167
|
+
asQueryEngine: null,
|
|
168
|
+
};
|
|
169
|
+
});
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
const noop_1 = require("../noop");
|
|
4
|
+
const registry_1 = require("../registry");
|
|
5
|
+
const KIND = "semantic-kernel";
|
|
6
|
+
(0, registry_1.register)(KIND, async () => ({
|
|
7
|
+
tools: (0, noop_1.createNoOp)("semantic-kernel adapter unavailable; install 'semantic-kernel' to enable", "memvid.adapters.semantic-kernel.tools"),
|
|
8
|
+
functions: [],
|
|
9
|
+
nodes: (0, noop_1.createNoOp)("semantic-kernel adapter nodes unavailable", "memvid.adapters.semantic-kernel.nodes"),
|
|
10
|
+
asQueryEngine: null,
|
|
11
|
+
}));
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Vercel AI SDK adapter exposing Memvid core methods as Vercel AI tools.
|
|
3
|
+
*
|
|
4
|
+
* This adapter provides tools formatted for Vercel AI SDK's tool calling API.
|
|
5
|
+
* The tools can be used directly with generateText, streamText, and other AI SDK functions.
|
|
6
|
+
*
|
|
7
|
+
* Usage:
|
|
8
|
+
* import { use } from "memvid-sdk";
|
|
9
|
+
*
|
|
10
|
+
* const mem = await use("vercel-ai", "knowledge.mv2");
|
|
11
|
+
*
|
|
12
|
+
* // Access tools for Vercel AI SDK
|
|
13
|
+
* const tools = mem.tools; // Object with tool definitions
|
|
14
|
+
*
|
|
15
|
+
* // Use with Vercel AI SDK
|
|
16
|
+
* import { generateText } from "ai";
|
|
17
|
+
* import { openai } from "@ai-sdk/openai";
|
|
18
|
+
*
|
|
19
|
+
* const result = await generateText({
|
|
20
|
+
* model: openai("gpt-4o-mini"),
|
|
21
|
+
* tools: mem.tools,
|
|
22
|
+
* prompt: "Search for information about...",
|
|
23
|
+
* });
|
|
24
|
+
*
|
|
25
|
+
* Note: Requires zod to be installed for schema definitions.
|
|
26
|
+
*/
|
|
27
|
+
export {};
|
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Vercel AI SDK adapter exposing Memvid core methods as Vercel AI tools.
|
|
4
|
+
*
|
|
5
|
+
* This adapter provides tools formatted for Vercel AI SDK's tool calling API.
|
|
6
|
+
* The tools can be used directly with generateText, streamText, and other AI SDK functions.
|
|
7
|
+
*
|
|
8
|
+
* Usage:
|
|
9
|
+
* import { use } from "memvid-sdk";
|
|
10
|
+
*
|
|
11
|
+
* const mem = await use("vercel-ai", "knowledge.mv2");
|
|
12
|
+
*
|
|
13
|
+
* // Access tools for Vercel AI SDK
|
|
14
|
+
* const tools = mem.tools; // Object with tool definitions
|
|
15
|
+
*
|
|
16
|
+
* // Use with Vercel AI SDK
|
|
17
|
+
* import { generateText } from "ai";
|
|
18
|
+
* import { openai } from "@ai-sdk/openai";
|
|
19
|
+
*
|
|
20
|
+
* const result = await generateText({
|
|
21
|
+
* model: openai("gpt-4o-mini"),
|
|
22
|
+
* tools: mem.tools,
|
|
23
|
+
* prompt: "Search for information about...",
|
|
24
|
+
* });
|
|
25
|
+
*
|
|
26
|
+
* Note: Requires zod to be installed for schema definitions.
|
|
27
|
+
*/
|
|
28
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
29
|
+
const noop_1 = require("../noop");
|
|
30
|
+
const registry_1 = require("../registry");
|
|
31
|
+
const KIND = "vercel-ai";
|
|
32
|
+
// Dynamic import helper for AI SDK tool helper
|
|
33
|
+
function tryImportAI() {
|
|
34
|
+
try {
|
|
35
|
+
// eslint-disable-next-line @typescript-eslint/no-var-requires
|
|
36
|
+
const aiModule = require("ai");
|
|
37
|
+
return { tool: aiModule.tool, jsonSchema: aiModule.jsonSchema };
|
|
38
|
+
}
|
|
39
|
+
catch {
|
|
40
|
+
return null;
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
(0, registry_1.register)(KIND, async (core, _apiKey) => {
|
|
44
|
+
const imports = tryImportAI();
|
|
45
|
+
if (!imports) {
|
|
46
|
+
return {
|
|
47
|
+
tools: (0, noop_1.createNoOp)("vercel-ai adapter requires 'ai' package; install it to enable", "memvid.adapters.vercel-ai.tools"),
|
|
48
|
+
functions: [],
|
|
49
|
+
nodes: (0, noop_1.createNoOp)("vercel-ai adapter does not provide nodes", "memvid.adapters.vercel-ai.nodes"),
|
|
50
|
+
asQueryEngine: null,
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
const { tool, jsonSchema } = imports;
|
|
54
|
+
// Use JSON schemas directly to avoid Zod version conflicts
|
|
55
|
+
const putSchema = jsonSchema({
|
|
56
|
+
type: "object",
|
|
57
|
+
properties: {
|
|
58
|
+
title: { type: "string", description: "Title of the document" },
|
|
59
|
+
label: { type: "string", description: "Category or label for the document" },
|
|
60
|
+
text: { type: "string", description: "Text content to store" },
|
|
61
|
+
metadata: { type: "object", description: "Optional key-value metadata" },
|
|
62
|
+
},
|
|
63
|
+
required: ["title", "label", "text"],
|
|
64
|
+
});
|
|
65
|
+
const findSchema = jsonSchema({
|
|
66
|
+
type: "object",
|
|
67
|
+
properties: {
|
|
68
|
+
query: { type: "string", description: "Search query string" },
|
|
69
|
+
top_k: { type: "number", description: "Number of results to return (default: 5)" },
|
|
70
|
+
},
|
|
71
|
+
required: ["query"],
|
|
72
|
+
});
|
|
73
|
+
const askSchema = jsonSchema({
|
|
74
|
+
type: "object",
|
|
75
|
+
properties: {
|
|
76
|
+
question: { type: "string", description: "Question to answer" },
|
|
77
|
+
mode: { type: "string", enum: ["auto", "lex", "sem"], description: "Search mode: 'auto' (hybrid), 'lex' (keyword), or 'sem' (semantic)" },
|
|
78
|
+
},
|
|
79
|
+
required: ["question"],
|
|
80
|
+
});
|
|
81
|
+
// Vercel AI SDK v5 uses the tool() helper with inputSchema
|
|
82
|
+
const tools = {
|
|
83
|
+
memvid_put: tool({
|
|
84
|
+
description: "Store a document in Memvid memory for later retrieval. " +
|
|
85
|
+
"Use this to save information that should be searchable later.",
|
|
86
|
+
inputSchema: putSchema,
|
|
87
|
+
execute: async (args) => {
|
|
88
|
+
const frameId = await core.put({
|
|
89
|
+
title: args.title,
|
|
90
|
+
label: args.label,
|
|
91
|
+
text: args.text,
|
|
92
|
+
metadata: args.metadata ?? {},
|
|
93
|
+
enable_embedding: true,
|
|
94
|
+
auto_tag: true,
|
|
95
|
+
extract_dates: true,
|
|
96
|
+
});
|
|
97
|
+
return `Document stored with frame_id: ${frameId}`;
|
|
98
|
+
},
|
|
99
|
+
}),
|
|
100
|
+
memvid_find: tool({
|
|
101
|
+
description: "Search Memvid memory for documents matching a query. " +
|
|
102
|
+
"Returns the most relevant documents with snippets.",
|
|
103
|
+
inputSchema: findSchema,
|
|
104
|
+
execute: async (args) => {
|
|
105
|
+
const query = args.query;
|
|
106
|
+
const topK = args.top_k ?? 5;
|
|
107
|
+
const response = (await core.find(query, { k: topK }));
|
|
108
|
+
const hits = response.hits ?? [];
|
|
109
|
+
if (hits.length === 0) {
|
|
110
|
+
return `No results found for query: '${query}'`;
|
|
111
|
+
}
|
|
112
|
+
const results = [];
|
|
113
|
+
for (let i = 0; i < hits.length; i++) {
|
|
114
|
+
const hit = hits[i];
|
|
115
|
+
const title = hit.title ?? "Untitled";
|
|
116
|
+
const snippet = (hit.text ?? hit.snippet ?? "").slice(0, 200);
|
|
117
|
+
const score = hit.score ?? 0;
|
|
118
|
+
results.push(`${i + 1}. [${title}] (score: ${score.toFixed(2)}): ${snippet}...`);
|
|
119
|
+
}
|
|
120
|
+
return `Found ${hits.length} results:\n${results.join("\n")}`;
|
|
121
|
+
},
|
|
122
|
+
}),
|
|
123
|
+
memvid_ask: tool({
|
|
124
|
+
description: "Ask a question and get an answer synthesized from Memvid memory " +
|
|
125
|
+
"using retrieval-augmented generation.",
|
|
126
|
+
inputSchema: askSchema,
|
|
127
|
+
execute: async (args) => {
|
|
128
|
+
const question = args.question;
|
|
129
|
+
const mode = args.mode ?? "auto";
|
|
130
|
+
const response = (await core.ask(question, { mode }));
|
|
131
|
+
const answer = response.answer ?? "No answer generated";
|
|
132
|
+
const sources = response.sources ?? [];
|
|
133
|
+
let result = `Answer: ${answer}`;
|
|
134
|
+
if (sources.length > 0) {
|
|
135
|
+
const sourceTitles = sources.slice(0, 3).map((s) => s.title ?? "Unknown");
|
|
136
|
+
result += `\n\nSources: ${sourceTitles.join(", ")}`;
|
|
137
|
+
}
|
|
138
|
+
return result;
|
|
139
|
+
},
|
|
140
|
+
}),
|
|
141
|
+
};
|
|
142
|
+
return {
|
|
143
|
+
tools,
|
|
144
|
+
functions: [], // Raw functions not needed for Vercel AI
|
|
145
|
+
nodes: (0, noop_1.createNoOp)("vercel-ai adapter does not provide nodes", "memvid.adapters.vercel-ai.nodes"),
|
|
146
|
+
asQueryEngine: null,
|
|
147
|
+
};
|
|
148
|
+
});
|