@kognitivedev/vercel-ai-provider 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +22 -0
- package/dist/index.js +162 -0
- package/package.json +22 -0
- package/src/index.ts +221 -0
- package/tsconfig.json +12 -0
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import { LanguageModelV2 } from "@ai-sdk/provider";
|
|
2
|
+
export interface CognitiveLayerConfig {
|
|
3
|
+
appId: string;
|
|
4
|
+
defaultAgentId?: string;
|
|
5
|
+
baseUrl?: string;
|
|
6
|
+
apiKey?: string;
|
|
7
|
+
/**
|
|
8
|
+
* Delay in milliseconds before triggering memory processing after a response.
|
|
9
|
+
* Set to 0 to disable automatic processing.
|
|
10
|
+
* Default: 500ms
|
|
11
|
+
*/
|
|
12
|
+
processDelayMs?: number;
|
|
13
|
+
}
|
|
14
|
+
export type CLModelWrapper = (modelId: string, settings?: {
|
|
15
|
+
userId?: string;
|
|
16
|
+
agentId?: string;
|
|
17
|
+
sessionId?: string;
|
|
18
|
+
}) => LanguageModelV2;
|
|
19
|
+
export declare function createCognitiveLayer(config: {
|
|
20
|
+
provider: any;
|
|
21
|
+
clConfig: CognitiveLayerConfig;
|
|
22
|
+
}): CLModelWrapper;
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.createCognitiveLayer = createCognitiveLayer;
|
|
4
|
+
const ai_1 = require("ai");
|
|
5
|
+
function createCognitiveLayer(config) {
|
|
6
|
+
const { provider, clConfig } = config;
|
|
7
|
+
const baseUrl = clConfig.baseUrl || "http://localhost:3001";
|
|
8
|
+
// Default to 500ms delay to allow DB writes to settle
|
|
9
|
+
const processDelay = clConfig.processDelayMs !== undefined ? clConfig.processDelayMs : 500;
|
|
10
|
+
const logConversation = async (payload) => {
|
|
11
|
+
try {
|
|
12
|
+
await fetch(`${baseUrl}/api/cognitive/log`, {
|
|
13
|
+
method: "POST",
|
|
14
|
+
headers: { "Content-Type": "application/json" },
|
|
15
|
+
body: JSON.stringify(Object.assign(Object.assign({}, payload), { type: "conversation", timestamp: new Date().toISOString() })),
|
|
16
|
+
});
|
|
17
|
+
}
|
|
18
|
+
catch (e) {
|
|
19
|
+
console.error("CognitiveLayer: Log failed", e);
|
|
20
|
+
}
|
|
21
|
+
};
|
|
22
|
+
const triggerProcessing = (userId, agentId, sessionId) => {
|
|
23
|
+
const run = () => {
|
|
24
|
+
fetch(`${baseUrl}/api/cognitive/process`, {
|
|
25
|
+
method: "POST",
|
|
26
|
+
headers: { "Content-Type": "application/json" },
|
|
27
|
+
body: JSON.stringify({ userId, agentId, sessionId }),
|
|
28
|
+
}).catch(e => console.error("CognitiveLayer: Process trigger failed", e));
|
|
29
|
+
};
|
|
30
|
+
if (processDelay > 0) {
|
|
31
|
+
setTimeout(run, processDelay);
|
|
32
|
+
}
|
|
33
|
+
else {
|
|
34
|
+
run();
|
|
35
|
+
}
|
|
36
|
+
};
|
|
37
|
+
const withMemorySystemPrompt = (params, incomingMessages, memoryPrompt) => {
|
|
38
|
+
var _a;
|
|
39
|
+
const nextParams = Object.assign({}, params);
|
|
40
|
+
// 1) If caller provided a top-level system prompt, overwrite it.
|
|
41
|
+
if (nextParams.system) {
|
|
42
|
+
nextParams.system = memoryPrompt;
|
|
43
|
+
return { nextParams, messages: incomingMessages, mode: "overwrite-param" };
|
|
44
|
+
}
|
|
45
|
+
// 2) If first message is system, replace its content.
|
|
46
|
+
if (incomingMessages.length > 0 && ((_a = incomingMessages[0]) === null || _a === void 0 ? void 0 : _a.role) === "system") {
|
|
47
|
+
const updated = [...incomingMessages];
|
|
48
|
+
updated[0] = Object.assign(Object.assign({}, updated[0]), { content: memoryPrompt });
|
|
49
|
+
return { nextParams, messages: updated, mode: "overwrite-first-system" };
|
|
50
|
+
}
|
|
51
|
+
// 3) Otherwise prepend a system message.
|
|
52
|
+
const updated = [{ role: "system", content: memoryPrompt }, ...incomingMessages];
|
|
53
|
+
return { nextParams, messages: updated, mode: "prepend-system" };
|
|
54
|
+
};
|
|
55
|
+
return (modelId, settings) => {
|
|
56
|
+
const model = provider(modelId);
|
|
57
|
+
const userId = settings === null || settings === void 0 ? void 0 : settings.userId;
|
|
58
|
+
const agentId = (settings === null || settings === void 0 ? void 0 : settings.agentId) || clConfig.defaultAgentId || "default";
|
|
59
|
+
const sessionId = settings === null || settings === void 0 ? void 0 : settings.sessionId;
|
|
60
|
+
const sessionMissing = !!userId && !sessionId;
|
|
61
|
+
if (sessionMissing) {
|
|
62
|
+
console.warn("CognitiveLayer: sessionId is required to log and process memories; skipping logging until provided.");
|
|
63
|
+
}
|
|
64
|
+
return (0, ai_1.wrapLanguageModel)({
|
|
65
|
+
model,
|
|
66
|
+
middleware: {
|
|
67
|
+
async transformParams({ params }) {
|
|
68
|
+
if (!userId)
|
|
69
|
+
return params;
|
|
70
|
+
let systemPromptToAdd = "";
|
|
71
|
+
const incomingMessages = Array.isArray(params.prompt)
|
|
72
|
+
? params.prompt
|
|
73
|
+
: [];
|
|
74
|
+
try {
|
|
75
|
+
const url = `${baseUrl}/api/cognitive/snapshot?userId=${userId}&agentId=${agentId}&appId=${clConfig.appId}`;
|
|
76
|
+
const res = await fetch(url);
|
|
77
|
+
if (res.ok) {
|
|
78
|
+
const data = await res.json();
|
|
79
|
+
const systemBlock = data.systemBlock || "";
|
|
80
|
+
const userContextBlock = data.userContextBlock || "";
|
|
81
|
+
const agentHeuristics = systemBlock || "None";
|
|
82
|
+
const userContext = userContextBlock || "None";
|
|
83
|
+
systemPromptToAdd = systemBlock !== "" || userContextBlock !== "" ? `
|
|
84
|
+
<MemoryContext>
|
|
85
|
+
Use the following memory to stay consistent. Prefer UserContext facts for answers; AgentHeuristics guide style, safety, and priorities.
|
|
86
|
+
${agentHeuristics}
|
|
87
|
+
${userContext}
|
|
88
|
+
</MemoryContext>
|
|
89
|
+
`.trim() : "";
|
|
90
|
+
console.log("CL: snapshot fetched", {
|
|
91
|
+
userId,
|
|
92
|
+
agentId,
|
|
93
|
+
sessionId,
|
|
94
|
+
systemLen: systemBlock.length,
|
|
95
|
+
userLen: userContextBlock.length,
|
|
96
|
+
hasSystem: !!systemBlock,
|
|
97
|
+
hasUserContext: !!userContextBlock,
|
|
98
|
+
});
|
|
99
|
+
}
|
|
100
|
+
else {
|
|
101
|
+
console.warn("CognitiveLayer: snapshot fetch failed status", res.status);
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
catch (e) {
|
|
105
|
+
console.warn("CognitiveLayer: Failed to fetch snapshot", e);
|
|
106
|
+
}
|
|
107
|
+
if (!systemPromptToAdd) {
|
|
108
|
+
return Object.assign(Object.assign({}, params), { messages: incomingMessages });
|
|
109
|
+
}
|
|
110
|
+
const { nextParams, messages: messagesWithMemory } = withMemorySystemPrompt(params, incomingMessages, systemPromptToAdd);
|
|
111
|
+
console.log("CL: injecting memory system prompt", {
|
|
112
|
+
systemPromptToAdd
|
|
113
|
+
});
|
|
114
|
+
console.log("CL: messagesWithMemory", messagesWithMemory);
|
|
115
|
+
console.log("CL: nextParams", nextParams);
|
|
116
|
+
return Object.assign(Object.assign({}, nextParams), { prompt: messagesWithMemory });
|
|
117
|
+
},
|
|
118
|
+
async wrapGenerate({ doGenerate, params }) {
|
|
119
|
+
var _a;
|
|
120
|
+
const result = await doGenerate();
|
|
121
|
+
if (userId && sessionId) {
|
|
122
|
+
const messagesInput = params.messages || params.prompt || [];
|
|
123
|
+
const resultMessages = (_a = result === null || result === void 0 ? void 0 : result.response) === null || _a === void 0 ? void 0 : _a.messages;
|
|
124
|
+
const assistantMessage = (result === null || result === void 0 ? void 0 : result.text)
|
|
125
|
+
? [{ role: "assistant", content: [{ type: "text", text: result.text }] }]
|
|
126
|
+
: [];
|
|
127
|
+
const finalMessages = Array.isArray(resultMessages) && resultMessages.length > 0
|
|
128
|
+
? resultMessages
|
|
129
|
+
: [...messagesInput, ...assistantMessage];
|
|
130
|
+
logConversation({
|
|
131
|
+
userId,
|
|
132
|
+
agentId,
|
|
133
|
+
sessionId,
|
|
134
|
+
messages: finalMessages,
|
|
135
|
+
modelId,
|
|
136
|
+
}).then(() => triggerProcessing(userId, agentId, sessionId));
|
|
137
|
+
}
|
|
138
|
+
return result;
|
|
139
|
+
},
|
|
140
|
+
async wrapStream({ doStream, params }) {
|
|
141
|
+
var _a;
|
|
142
|
+
const result = await doStream();
|
|
143
|
+
if (userId && sessionId) {
|
|
144
|
+
const messagesInput = params.messages || params.prompt || [];
|
|
145
|
+
const resultMessages = (_a = result === null || result === void 0 ? void 0 : result.response) === null || _a === void 0 ? void 0 : _a.messages;
|
|
146
|
+
const finalMessages = Array.isArray(resultMessages) && resultMessages.length > 0
|
|
147
|
+
? resultMessages
|
|
148
|
+
: messagesInput;
|
|
149
|
+
logConversation({
|
|
150
|
+
userId,
|
|
151
|
+
agentId,
|
|
152
|
+
sessionId,
|
|
153
|
+
messages: finalMessages,
|
|
154
|
+
modelId,
|
|
155
|
+
}).then(() => triggerProcessing(userId, agentId, sessionId));
|
|
156
|
+
}
|
|
157
|
+
return result;
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
});
|
|
161
|
+
};
|
|
162
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@kognitivedev/vercel-ai-provider",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"main": "dist/index.js",
|
|
5
|
+
"types": "dist/index.d.ts",
|
|
6
|
+
"publishConfig": {
|
|
7
|
+
"access": "public"
|
|
8
|
+
},
|
|
9
|
+
"scripts": {
|
|
10
|
+
"build": "tsc",
|
|
11
|
+
"dev": "tsc -w",
|
|
12
|
+
"prepublishOnly": "npm run build"
|
|
13
|
+
},
|
|
14
|
+
"peerDependencies": {
|
|
15
|
+
"ai": "^4.0.0 || ^5.0.0"
|
|
16
|
+
},
|
|
17
|
+
"devDependencies": {
|
|
18
|
+
"typescript": "^5.0.0",
|
|
19
|
+
"ai": "latest",
|
|
20
|
+
"@types/node": "^20.0.0"
|
|
21
|
+
}
|
|
22
|
+
}
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
1
|
+
import { wrapLanguageModel } from "ai";
|
|
2
|
+
import { LanguageModelV2 } from "@ai-sdk/provider";
|
|
3
|
+
|
|
4
|
+
export interface CognitiveLayerConfig {
|
|
5
|
+
appId: string;
|
|
6
|
+
defaultAgentId?: string;
|
|
7
|
+
baseUrl?: string;
|
|
8
|
+
apiKey?: string;
|
|
9
|
+
/**
|
|
10
|
+
* Delay in milliseconds before triggering memory processing after a response.
|
|
11
|
+
* Set to 0 to disable automatic processing.
|
|
12
|
+
* Default: 500ms
|
|
13
|
+
*/
|
|
14
|
+
processDelayMs?: number;
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
export type CLModelWrapper = (
|
|
18
|
+
modelId: string,
|
|
19
|
+
settings?: { userId?: string; agentId?: string; sessionId?: string }
|
|
20
|
+
) => LanguageModelV2;
|
|
21
|
+
|
|
22
|
+
export function createCognitiveLayer(config: {
|
|
23
|
+
provider: any;
|
|
24
|
+
clConfig: CognitiveLayerConfig;
|
|
25
|
+
}): CLModelWrapper {
|
|
26
|
+
const { provider, clConfig } = config;
|
|
27
|
+
const baseUrl = clConfig.baseUrl || "http://localhost:3001";
|
|
28
|
+
// Default to 500ms delay to allow DB writes to settle
|
|
29
|
+
const processDelay = clConfig.processDelayMs !== undefined ? clConfig.processDelayMs : 500;
|
|
30
|
+
|
|
31
|
+
const logConversation = async (payload: {
|
|
32
|
+
userId: string;
|
|
33
|
+
agentId: string;
|
|
34
|
+
sessionId: string;
|
|
35
|
+
messages: any[];
|
|
36
|
+
memorySystemPrompt?: string;
|
|
37
|
+
modelId?: string;
|
|
38
|
+
}) => {
|
|
39
|
+
try {
|
|
40
|
+
await fetch(`${baseUrl}/api/cognitive/log`, {
|
|
41
|
+
method: "POST",
|
|
42
|
+
headers: { "Content-Type": "application/json" },
|
|
43
|
+
body: JSON.stringify({
|
|
44
|
+
...payload,
|
|
45
|
+
type: "conversation",
|
|
46
|
+
timestamp: new Date().toISOString(),
|
|
47
|
+
}),
|
|
48
|
+
});
|
|
49
|
+
} catch (e) {
|
|
50
|
+
console.error("CognitiveLayer: Log failed", e);
|
|
51
|
+
}
|
|
52
|
+
};
|
|
53
|
+
|
|
54
|
+
const triggerProcessing = (userId: string, agentId: string, sessionId: string) => {
|
|
55
|
+
const run = () => {
|
|
56
|
+
fetch(`${baseUrl}/api/cognitive/process`, {
|
|
57
|
+
method: "POST",
|
|
58
|
+
headers: { "Content-Type": "application/json" },
|
|
59
|
+
body: JSON.stringify({ userId, agentId, sessionId }),
|
|
60
|
+
}).catch(e => console.error("CognitiveLayer: Process trigger failed", e));
|
|
61
|
+
};
|
|
62
|
+
|
|
63
|
+
if (processDelay > 0) {
|
|
64
|
+
setTimeout(run, processDelay);
|
|
65
|
+
} else {
|
|
66
|
+
run();
|
|
67
|
+
}
|
|
68
|
+
};
|
|
69
|
+
|
|
70
|
+
const withMemorySystemPrompt = (
|
|
71
|
+
params: any,
|
|
72
|
+
incomingMessages: any[],
|
|
73
|
+
memoryPrompt: string
|
|
74
|
+
): { nextParams: any; messages: any[]; mode: "overwrite-param" | "overwrite-first-system" | "prepend-system" } => {
|
|
75
|
+
const nextParams = { ...params };
|
|
76
|
+
|
|
77
|
+
// 1) If caller provided a top-level system prompt, overwrite it.
|
|
78
|
+
if (nextParams.system) {
|
|
79
|
+
nextParams.system = memoryPrompt;
|
|
80
|
+
return { nextParams, messages: incomingMessages, mode: "overwrite-param" };
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
// 2) If first message is system, replace its content.
|
|
84
|
+
if (incomingMessages.length > 0 && incomingMessages[0]?.role === "system") {
|
|
85
|
+
const updated = [...incomingMessages];
|
|
86
|
+
updated[0] = { ...updated[0], content: memoryPrompt };
|
|
87
|
+
return { nextParams, messages: updated, mode: "overwrite-first-system" };
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
// 3) Otherwise prepend a system message.
|
|
91
|
+
const updated = [{ role: "system", content: memoryPrompt }, ...incomingMessages];
|
|
92
|
+
return { nextParams, messages: updated, mode: "prepend-system" };
|
|
93
|
+
};
|
|
94
|
+
|
|
95
|
+
return (modelId: string, settings?: { userId?: string; agentId?: string; sessionId?: string }) => {
|
|
96
|
+
const model = provider(modelId) as LanguageModelV2;
|
|
97
|
+
const userId = settings?.userId;
|
|
98
|
+
const agentId = settings?.agentId || clConfig.defaultAgentId || "default";
|
|
99
|
+
const sessionId = settings?.sessionId;
|
|
100
|
+
const sessionMissing = !!userId && !sessionId;
|
|
101
|
+
|
|
102
|
+
if (sessionMissing) {
|
|
103
|
+
console.warn("CognitiveLayer: sessionId is required to log and process memories; skipping logging until provided.");
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
return wrapLanguageModel({
|
|
107
|
+
model,
|
|
108
|
+
middleware: {
|
|
109
|
+
async transformParams({ params }) {
|
|
110
|
+
if (!userId) return params;
|
|
111
|
+
|
|
112
|
+
let systemPromptToAdd = "";
|
|
113
|
+
const incomingMessages = Array.isArray((params as any).prompt)
|
|
114
|
+
? (params as any).prompt
|
|
115
|
+
: [];
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
try {
|
|
119
|
+
const url = `${baseUrl}/api/cognitive/snapshot?userId=${userId}&agentId=${agentId}&appId=${clConfig.appId}`;
|
|
120
|
+
const res = await fetch(url);
|
|
121
|
+
if (res.ok) {
|
|
122
|
+
const data = await res.json();
|
|
123
|
+
const systemBlock = data.systemBlock || "";
|
|
124
|
+
const userContextBlock = data.userContextBlock || "";
|
|
125
|
+
const agentHeuristics = systemBlock || "None";
|
|
126
|
+
const userContext = userContextBlock || "None";
|
|
127
|
+
systemPromptToAdd = systemBlock !== "" || userContextBlock !== "" ? `
|
|
128
|
+
<MemoryContext>
|
|
129
|
+
Use the following memory to stay consistent. Prefer UserContext facts for answers; AgentHeuristics guide style, safety, and priorities.
|
|
130
|
+
${agentHeuristics}
|
|
131
|
+
${userContext}
|
|
132
|
+
</MemoryContext>
|
|
133
|
+
`.trim() : "";
|
|
134
|
+
|
|
135
|
+
console.log("CL: snapshot fetched", {
|
|
136
|
+
userId,
|
|
137
|
+
agentId,
|
|
138
|
+
sessionId,
|
|
139
|
+
systemLen: systemBlock.length,
|
|
140
|
+
userLen: userContextBlock.length,
|
|
141
|
+
hasSystem: !!systemBlock,
|
|
142
|
+
hasUserContext: !!userContextBlock,
|
|
143
|
+
});
|
|
144
|
+
} else {
|
|
145
|
+
console.warn("CognitiveLayer: snapshot fetch failed status", res.status);
|
|
146
|
+
}
|
|
147
|
+
} catch (e) {
|
|
148
|
+
console.warn("CognitiveLayer: Failed to fetch snapshot", e);
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
if (!systemPromptToAdd) {
|
|
152
|
+
return { ...params, messages: incomingMessages };
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
const { nextParams, messages: messagesWithMemory } = withMemorySystemPrompt(
|
|
156
|
+
params,
|
|
157
|
+
incomingMessages,
|
|
158
|
+
systemPromptToAdd
|
|
159
|
+
);
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
console.log("CL: injecting memory system prompt", {
|
|
164
|
+
systemPromptToAdd
|
|
165
|
+
});
|
|
166
|
+
|
|
167
|
+
console.log("CL: messagesWithMemory", messagesWithMemory);
|
|
168
|
+
console.log("CL: nextParams", nextParams);
|
|
169
|
+
|
|
170
|
+
return { ...nextParams, prompt: messagesWithMemory };
|
|
171
|
+
},
|
|
172
|
+
|
|
173
|
+
async wrapGenerate({ doGenerate, params }) {
|
|
174
|
+
const result = await doGenerate();
|
|
175
|
+
|
|
176
|
+
if (userId && sessionId) {
|
|
177
|
+
const messagesInput = (params as any).messages || (params as any).prompt || [];
|
|
178
|
+
const resultMessages = (result as any)?.response?.messages;
|
|
179
|
+
const assistantMessage = (result as any)?.text
|
|
180
|
+
? [{ role: "assistant", content: [{ type: "text", text: (result as any).text }] }]
|
|
181
|
+
: [];
|
|
182
|
+
const finalMessages = Array.isArray(resultMessages) && resultMessages.length > 0
|
|
183
|
+
? resultMessages
|
|
184
|
+
: [...messagesInput, ...assistantMessage];
|
|
185
|
+
|
|
186
|
+
logConversation({
|
|
187
|
+
userId,
|
|
188
|
+
agentId,
|
|
189
|
+
sessionId,
|
|
190
|
+
messages: finalMessages,
|
|
191
|
+
modelId,
|
|
192
|
+
}).then(() => triggerProcessing(userId, agentId, sessionId));
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
return result;
|
|
196
|
+
},
|
|
197
|
+
async wrapStream({ doStream, params }) {
|
|
198
|
+
const result = await doStream();
|
|
199
|
+
|
|
200
|
+
if (userId && sessionId) {
|
|
201
|
+
const messagesInput = (params as any).messages || (params as any).prompt || [];
|
|
202
|
+
const resultMessages = (result as any)?.response?.messages;
|
|
203
|
+
const finalMessages = Array.isArray(resultMessages) && resultMessages.length > 0
|
|
204
|
+
? resultMessages
|
|
205
|
+
: messagesInput;
|
|
206
|
+
|
|
207
|
+
logConversation({
|
|
208
|
+
userId,
|
|
209
|
+
agentId,
|
|
210
|
+
sessionId,
|
|
211
|
+
messages: finalMessages,
|
|
212
|
+
modelId,
|
|
213
|
+
}).then(() => triggerProcessing(userId, agentId, sessionId));
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
return result;
|
|
217
|
+
}
|
|
218
|
+
}
|
|
219
|
+
});
|
|
220
|
+
};
|
|
221
|
+
}
|