@kognitivedev/vercel-ai-provider 0.1.0 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,297 @@
1
+ # @kognitivedev/vercel-ai-provider
2
+
3
+ Vercel AI SDK provider wrapper that integrates the Kognitive memory layer into your AI applications. Automatically injects memory context and logs conversations for memory processing.
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ npm install @kognitivedev/vercel-ai-provider
9
+ ```
10
+
11
+ ### Peer Dependencies
12
+
13
+ This package requires the Vercel AI SDK:
14
+
15
+ ```bash
16
+ npm install ai
17
+ ```
18
+
19
+ ## Quick Start
20
+
21
+ ```typescript
22
+ import { createCognitiveLayer } from "@kognitivedev/vercel-ai-provider";
23
+ import { openai } from "@ai-sdk/openai";
24
+ import { generateText } from "ai";
25
+
26
+ // 1. Create the cognitive layer
27
+ const clModel = createCognitiveLayer({
28
+ provider: openai,
29
+ clConfig: {
30
+ appId: "my-app",
31
+ defaultAgentId: "assistant",
32
+ baseUrl: "http://localhost:3001"
33
+ }
34
+ });
35
+
36
+ // 2. Use it with Vercel AI SDK
37
+ const { text } = await generateText({
38
+ model: clModel("gpt-4o", {
39
+ userId: "user-123",
40
+ sessionId: "session-abc"
41
+ }),
42
+ prompt: "What's my favorite color?"
43
+ });
44
+ ```
45
+
46
+ ## Configuration
47
+
48
+ ### `CognitiveLayerConfig`
49
+
50
+ | Option | Type | Required | Default | Description |
51
+ |--------|------|----------|---------|-------------|
52
+ | `appId` | `string` | ✓ | - | Unique identifier for your application |
53
+ | `defaultAgentId` | `string` | - | `"default"` | Default agent ID when not specified per-request |
54
+ | `baseUrl` | `string` | - | `"http://localhost:3001"` | Kognitive backend API URL |
55
+ | `apiKey` | `string` | - | - | API key for authentication (if required) |
56
+ | `processDelayMs` | `number` | - | `500` | Delay before triggering memory processing (set to 0 to disable) |
57
+
58
+ ## API Reference
59
+
60
+ ### `createCognitiveLayer(config)`
61
+
62
+ Creates a model wrapper function that adds memory capabilities to any Vercel AI SDK provider.
63
+
64
+ **Parameters:**
65
+
66
+ ```typescript
67
+ createCognitiveLayer({
68
+ provider: any, // Vercel AI SDK provider (e.g., openai, anthropic)
69
+ clConfig: CognitiveLayerConfig
70
+ }): CLModelWrapper
71
+ ```
72
+
73
+ **Returns:** `CLModelWrapper` - A function to wrap models with memory capabilities.
74
+
75
+ ---
76
+
77
+ ### `CLModelWrapper`
78
+
79
+ The function returned by `createCognitiveLayer`.
80
+
81
+ ```typescript
82
+ type CLModelWrapper = (
83
+ modelId: string,
84
+ settings?: {
85
+ userId?: string;
86
+ agentId?: string;
87
+ sessionId?: string;
88
+ },
89
+ providerOptions?: Record<string, unknown>
90
+ ) => LanguageModelV2;
91
+ ```
92
+
93
+ **Parameters:**
94
+
95
+ | Parameter | Type | Required | Description |
96
+ |-----------|------|----------|-------------|
97
+ | `modelId` | `string` | ✓ | Model identifier (e.g., `"gpt-4o"`, `"claude-3-opus"`) |
98
+ | `settings.userId` | `string` | - | User identifier (required for memory features) |
99
+ | `settings.agentId` | `string` | - | Override default agent ID |
100
+ | `settings.sessionId` | `string` | - | Session identifier (required for logging) |
101
+ | `providerOptions` | `Record<string, unknown>` | - | Provider-specific options passed directly to the underlying provider |
102
+
103
+ ## Usage Examples
104
+
105
+ ### With OpenAI
106
+
107
+ ```typescript
108
+ import { createCognitiveLayer } from "@kognitivedev/vercel-ai-provider";
109
+ import { openai } from "@ai-sdk/openai";
110
+ import { generateText } from "ai";
111
+
112
+ const clModel = createCognitiveLayer({
113
+ provider: openai,
114
+ clConfig: {
115
+ appId: "my-app",
116
+ baseUrl: "https://api.kognitive.dev"
117
+ }
118
+ });
119
+
120
+ const { text } = await generateText({
121
+ model: clModel("gpt-4o", {
122
+ userId: "user-123",
123
+ sessionId: "session-abc"
124
+ }),
125
+ prompt: "Remember that my favorite color is blue"
126
+ });
127
+ ```
128
+
129
+ ### With OpenRouter (Provider Options)
130
+
131
+ Pass provider-specific options as the third parameter:
132
+
133
+ ```typescript
134
+ import { createCognitiveLayer } from "@kognitivedev/vercel-ai-provider";
135
+ import { createOpenRouter } from "@openrouter/ai-sdk-provider";
136
+ import { generateText } from "ai";
137
+
138
+ const openrouter = createOpenRouter({
139
+ apiKey: process.env.OPENROUTER_API_KEY
140
+ });
141
+
142
+ const clModel = createCognitiveLayer({
143
+ provider: openrouter.chat,
144
+ clConfig: {
145
+ appId: "my-app",
146
+ baseUrl: "https://api.kognitive.dev"
147
+ }
148
+ });
149
+
150
+ // Pass provider-specific options as the third parameter
151
+ const { text } = await generateText({
152
+ model: clModel("moonshotai/kimi-k2-0905", {
153
+ userId: "user-123",
154
+ sessionId: "session-abc"
155
+ }, {
156
+ provider: {
157
+ only: ["openai"]
158
+ }
159
+ }),
160
+ prompt: "What's the weather like?"
161
+ });
162
+ ```
163
+
164
+ ### With Anthropic
165
+
166
+ ```typescript
167
+ import { createCognitiveLayer } from "@kognitivedev/vercel-ai-provider";
168
+ import { anthropic } from "@ai-sdk/anthropic";
169
+ import { streamText } from "ai";
170
+
171
+ const clModel = createCognitiveLayer({
172
+ provider: anthropic,
173
+ clConfig: {
174
+ appId: "my-app",
175
+ defaultAgentId: "claude-assistant"
176
+ }
177
+ });
178
+
179
+ const result = await streamText({
180
+ model: clModel("claude-3-5-sonnet-latest", {
181
+ userId: "user-456",
182
+ sessionId: "chat-xyz"
183
+ }),
184
+ prompt: "What did I tell you about my favorite color?"
185
+ });
186
+
187
+ for await (const chunk of result.textStream) {
188
+ process.stdout.write(chunk);
189
+ }
190
+ ```
191
+
192
+ ### With System Prompts
193
+
194
+ The provider automatically injects memory context into your system prompts:
195
+
196
+ ```typescript
197
+ const { text } = await generateText({
198
+ model: clModel("gpt-4o", {
199
+ userId: "user-123",
200
+ sessionId: "session-abc"
201
+ }),
202
+ system: "You are a helpful assistant.",
203
+ prompt: "What do you know about me?"
204
+ });
205
+
206
+ // Memory context is automatically appended to system prompt
207
+ ```
208
+
209
+ ### Without Memory (Anonymous Users)
210
+
211
+ Skip memory features by omitting `userId`:
212
+
213
+ ```typescript
214
+ const { text } = await generateText({
215
+ model: clModel("gpt-4o"),
216
+ prompt: "General question without memory"
217
+ });
218
+ ```
219
+
220
+ ## How It Works
221
+
222
+ ### Memory Injection Flow
223
+
224
+ 1. **Request Interception**: When a request is made, the middleware fetches the user's memory snapshot
225
+ 2. **Context Injection**: Memory context is injected into the system prompt as `<MemoryContext>` block
226
+ 3. **Response Processing**: After the response, the conversation is logged
227
+ 4. **Background Processing**: Memory extraction and management runs asynchronously
228
+
229
+ ### Memory Context Format
230
+
231
+ The injected memory context follows this structure:
232
+
233
+ ```xml
234
+ <MemoryContext>
235
+ Use the following memory to stay consistent. Prefer UserContext facts for answers; AgentHeuristics guide style, safety, and priorities.
236
+ <AgentHeuristics>
237
+ - User prefers concise responses
238
+ - Always greet user by name
239
+ </AgentHeuristics>
240
+ <UserContext>
241
+ <Facts>
242
+ - User's name is John
243
+ - Favorite color is blue
244
+ </Facts>
245
+ <State>
246
+ - Currently working on a project
247
+ </State>
248
+ </UserContext>
249
+ </MemoryContext>
250
+ ```
251
+
252
+ ## Backend API Integration
253
+
254
+ The provider communicates with your Kognitive backend via these endpoints:
255
+
256
+ | Endpoint | Method | Description |
257
+ |----------|--------|-------------|
258
+ | `/api/cognitive/snapshot` | GET | Fetches user's memory snapshot |
259
+ | `/api/cognitive/log` | POST | Logs conversation for processing |
260
+ | `/api/cognitive/process` | POST | Triggers memory extraction/management |
261
+
262
+ ### Query Parameters for Snapshot
263
+
264
+ ```
265
+ GET /api/cognitive/snapshot?userId={userId}&agentId={agentId}&appId={appId}
266
+ ```
267
+
268
+ ## Troubleshooting
269
+
270
+ ### Memory not being injected
271
+
272
+ 1. Ensure `userId` and `sessionId` are provided
273
+ 2. Check that the backend is running at the configured `baseUrl`
274
+ 3. Verify the snapshot endpoint returns data
275
+
276
+ ### Console warnings
277
+
278
+ ```
279
+ CognitiveLayer: sessionId is required to log and process memories; skipping logging until provided.
280
+ ```
281
+
282
+ This warning appears when `userId` is provided but `sessionId` is missing. Add `sessionId` to enable logging.
283
+
284
+ ### Processing delay
285
+
286
+ The default 500ms delay before triggering memory processing allows database writes to settle. Adjust with `processDelayMs`:
287
+
288
+ ```typescript
289
+ clConfig: {
290
+ processDelayMs: 1000 // 1 second delay
291
+ // processDelayMs: 0 // Immediate processing
292
+ }
293
+ ```
294
+
295
+ ## License
296
+
297
+ MIT
package/dist/index.d.ts CHANGED
@@ -15,7 +15,7 @@ export type CLModelWrapper = (modelId: string, settings?: {
15
15
  userId?: string;
16
16
  agentId?: string;
17
17
  sessionId?: string;
18
- }) => LanguageModelV2;
18
+ }, providerOptions?: Record<string, unknown>) => LanguageModelV2;
19
19
  export declare function createCognitiveLayer(config: {
20
20
  provider: any;
21
21
  clConfig: CognitiveLayerConfig;
package/dist/index.js CHANGED
@@ -52,8 +52,11 @@ function createCognitiveLayer(config) {
52
52
  const updated = [{ role: "system", content: memoryPrompt }, ...incomingMessages];
53
53
  return { nextParams, messages: updated, mode: "prepend-system" };
54
54
  };
55
- return (modelId, settings) => {
56
- const model = provider(modelId);
55
+ return (modelId, settings, providerOptions) => {
56
+ // Pass provider options through to the underlying provider
57
+ const model = (providerOptions
58
+ ? provider(modelId, providerOptions)
59
+ : provider(modelId));
57
60
  const userId = settings === null || settings === void 0 ? void 0 : settings.userId;
58
61
  const agentId = (settings === null || settings === void 0 ? void 0 : settings.agentId) || clConfig.defaultAgentId || "default";
59
62
  const sessionId = settings === null || settings === void 0 ? void 0 : settings.sessionId;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@kognitivedev/vercel-ai-provider",
3
- "version": "0.1.0",
3
+ "version": "0.1.2",
4
4
  "main": "dist/index.js",
5
5
  "types": "dist/index.d.ts",
6
6
  "publishConfig": {
package/src/index.ts CHANGED
@@ -16,7 +16,8 @@ export interface CognitiveLayerConfig {
16
16
 
17
17
  export type CLModelWrapper = (
18
18
  modelId: string,
19
- settings?: { userId?: string; agentId?: string; sessionId?: string }
19
+ settings?: { userId?: string; agentId?: string; sessionId?: string },
20
+ providerOptions?: Record<string, unknown>
20
21
  ) => LanguageModelV2;
21
22
 
22
23
  export function createCognitiveLayer(config: {
@@ -92,8 +93,17 @@ export function createCognitiveLayer(config: {
92
93
  return { nextParams, messages: updated, mode: "prepend-system" };
93
94
  };
94
95
 
95
- return (modelId: string, settings?: { userId?: string; agentId?: string; sessionId?: string }) => {
96
- const model = provider(modelId) as LanguageModelV2;
96
+ return (
97
+ modelId: string,
98
+ settings?: { userId?: string; agentId?: string; sessionId?: string },
99
+ providerOptions?: Record<string, unknown>
100
+ ) => {
101
+ // Pass provider options through to the underlying provider
102
+ const model = (
103
+ providerOptions
104
+ ? provider(modelId, providerOptions)
105
+ : provider(modelId)
106
+ ) as LanguageModelV2;
97
107
  const userId = settings?.userId;
98
108
  const agentId = settings?.agentId || clConfig.defaultAgentId || "default";
99
109
  const sessionId = settings?.sessionId;
@@ -158,7 +168,7 @@ ${userContext}
158
168
  systemPromptToAdd
159
169
  );
160
170
 
161
-
171
+
162
172
 
163
173
  console.log("CL: injecting memory system prompt", {
164
174
  systemPromptToAdd
@@ -169,7 +179,7 @@ ${userContext}
169
179
 
170
180
  return { ...nextParams, prompt: messagesWithMemory };
171
181
  },
172
-
182
+
173
183
  async wrapGenerate({ doGenerate, params }) {
174
184
  const result = await doGenerate();
175
185