@kognitivedev/vercel-ai-provider 0.1.5 → 0.1.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1,7 +1,24 @@
1
1
  "use strict";
2
+ var __rest = (this && this.__rest) || function (s, e) {
3
+ var t = {};
4
+ for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)
5
+ t[p] = s[p];
6
+ if (s != null && typeof Object.getOwnPropertySymbols === "function")
7
+ for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {
8
+ if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))
9
+ t[p[i]] = s[p[i]];
10
+ }
11
+ return t;
12
+ };
2
13
  Object.defineProperty(exports, "__esModule", { value: true });
3
14
  exports.createCognitiveLayer = createCognitiveLayer;
4
15
  const ai_1 = require("ai");
16
+ function isValidId(value) {
17
+ if (value == null || typeof value !== "string")
18
+ return false;
19
+ const trimmed = value.trim();
20
+ return trimmed !== "" && trimmed !== "null" && trimmed !== "undefined";
21
+ }
5
22
  const LOG_LEVEL_PRIORITY = {
6
23
  none: 0,
7
24
  error: 1,
@@ -37,10 +54,22 @@ function createLogger(logLevel) {
37
54
  },
38
55
  };
39
56
  }
57
+ const PROMPT_CACHE_TTL_MS = 60000; // 1 minute
58
+ /**
59
+ * Interpolate {{variable}} placeholders in a template string.
60
+ * Unmatched variables are left as-is.
61
+ */
62
+ function interpolateTemplate(content, variables) {
63
+ return content.replace(/\{\{(\w+)\}\}/g, (_, key) => { var _a; return (_a = variables[key]) !== null && _a !== void 0 ? _a : `{{${key}}}`; });
64
+ }
40
65
  // Session-scoped snapshot cache: sessionKey → formatted memory block
41
66
  const sessionSnapshots = new Map();
42
67
  // Regex to detect if memory has already been injected
43
68
  const MEMORY_TAG_REGEX = /<MemoryContext>/i;
69
+ // Symbol-keyed property to track session settings on model objects
70
+ const SESSION_KEY = Symbol.for("cl:session");
71
+ // Session key → prompt metadata (populated by cl.streamText/cl.generateText, read by middleware)
72
+ const sessionPromptMetadata = new Map();
44
73
  /**
45
74
  * Check if any system message already contains a <MemoryContext> block.
46
75
  */
@@ -65,11 +94,43 @@ function createCognitiveLayer(config) {
65
94
  // Default to 'info' log level
66
95
  const logLevel = clConfig.logLevel || 'info';
67
96
  const logger = createLogger(logLevel);
97
+ const authHeaders = {
98
+ "Content-Type": "application/json",
99
+ "Authorization": `Bearer ${clConfig.apiKey}`,
100
+ };
101
+ // Prompt cache: slug → CachedPrompt
102
+ const promptCache = new Map();
103
+ const resolvePrompt = async (slug) => {
104
+ const cached = promptCache.get(slug);
105
+ if (cached && Date.now() - cached.fetchedAt < PROMPT_CACHE_TTL_MS) {
106
+ logger.debug("Using cached prompt", { slug, version: cached.version });
107
+ return cached;
108
+ }
109
+ const res = await fetch(`${baseUrl}/api/cognitive/prompt?slug=${encodeURIComponent(slug)}`, {
110
+ headers: { "Authorization": `Bearer ${clConfig.apiKey}` },
111
+ });
112
+ if (!res.ok) {
113
+ const body = await res.text();
114
+ throw new Error(`Failed to resolve prompt "${slug}": ${res.status} ${body}`);
115
+ }
116
+ const data = await res.json();
117
+ const entry = {
118
+ promptId: data.promptId,
119
+ slug: data.slug,
120
+ version: data.version,
121
+ content: data.content,
122
+ fetchedAt: Date.now(),
123
+ gatewaySlug: data.gatewaySlug,
124
+ };
125
+ promptCache.set(slug, entry);
126
+ logger.info("Prompt resolved", { slug, version: entry.version });
127
+ return entry;
128
+ };
68
129
  const logConversation = async (payload) => {
69
130
  try {
70
131
  await fetch(`${baseUrl}/api/cognitive/log`, {
71
132
  method: "POST",
72
- headers: { "Content-Type": "application/json" },
133
+ headers: authHeaders,
73
134
  body: JSON.stringify(Object.assign(Object.assign({}, payload), { type: "conversation", timestamp: new Date().toISOString() })),
74
135
  });
75
136
  }
@@ -77,12 +138,12 @@ function createCognitiveLayer(config) {
77
138
  logger.error("Log failed", e);
78
139
  }
79
140
  };
80
- const triggerProcessing = (userId, agentId, sessionId) => {
141
+ const triggerProcessing = (userId, projectId, sessionId) => {
81
142
  const run = () => {
82
143
  fetch(`${baseUrl}/api/cognitive/process`, {
83
144
  method: "POST",
84
- headers: { "Content-Type": "application/json" },
85
- body: JSON.stringify({ userId, agentId, sessionId }),
145
+ headers: authHeaders,
146
+ body: JSON.stringify({ userId, sessionId }),
86
147
  }).catch(e => logger.error("Process trigger failed", e));
87
148
  };
88
149
  if (processDelay > 0) {
@@ -95,157 +156,298 @@ function createCognitiveLayer(config) {
95
156
  const withMemorySystemPrompt = (params, incomingMessages, memoryPrompt) => {
96
157
  var _a;
97
158
  const nextParams = Object.assign({}, params);
98
- // 1) If caller provided a top-level system prompt, overwrite it.
99
- if (nextParams.system) {
100
- nextParams.system = memoryPrompt;
101
- return { nextParams, messages: incomingMessages, mode: "overwrite-param" };
102
- }
103
- // 2) If first message is system, replace its content.
159
+ // 1) If first message is system, append memory to its content (without mutating original).
104
160
  if (incomingMessages.length > 0 && ((_a = incomingMessages[0]) === null || _a === void 0 ? void 0 : _a.role) === "system") {
105
- const updated = [...incomingMessages];
106
- let systemMessage = updated[0];
107
- if (typeof systemMessage.content === "string")
108
- systemMessage.content = systemMessage + "\n\n" + memoryPrompt;
109
- updated[0] = Object.assign(Object.assign({}, updated[0]), systemMessage);
161
+ const original = incomingMessages[0];
162
+ const updatedContent = typeof original.content === "string"
163
+ ? original.content + "\n\n" + memoryPrompt
164
+ : memoryPrompt;
165
+ const updated = [Object.assign(Object.assign({}, original), { content: updatedContent }), ...incomingMessages.slice(1)];
110
166
  return { nextParams, messages: updated, mode: "overwrite-first-system" };
111
167
  }
112
- // 3) Otherwise prepend a system message.
168
+ // 2) Otherwise prepend a system message.
113
169
  const updated = [{ role: "system", content: memoryPrompt }, ...incomingMessages];
114
170
  return { nextParams, messages: updated, mode: "prepend-system" };
115
171
  };
116
- return (modelId, settings, providerOptions) => {
117
- // Pass provider options through to the underlying provider
118
- const model = (providerOptions
119
- ? provider(modelId, providerOptions)
120
- : provider(modelId));
121
- const userId = settings === null || settings === void 0 ? void 0 : settings.userId;
122
- const agentId = (settings === null || settings === void 0 ? void 0 : settings.agentId) || clConfig.defaultAgentId || "default";
123
- const sessionId = settings === null || settings === void 0 ? void 0 : settings.sessionId;
124
- const sessionMissing = !!userId && !sessionId;
125
- if (sessionMissing) {
126
- logger.warn("sessionId is required to log and process memories; skipping logging until provided.");
127
- }
128
- return (0, ai_1.wrapLanguageModel)({
129
- model,
130
- middleware: {
131
- async transformParams({ params }) {
132
- if (!userId)
133
- return params;
134
- const incomingMessages = Array.isArray(params.prompt)
135
- ? params.prompt
136
- : [];
137
- // 1) Check if memory is already injected in messages
138
- if (hasExistingMemoryInjection(incomingMessages)) {
139
- logger.debug("Memory already injected, skipping");
140
- return params;
141
- }
142
- // 2) Check session cache
143
- const sessionKey = `${userId}:${agentId}:${sessionId || "default"}`;
144
- let systemPromptToAdd = sessionSnapshots.get(sessionKey);
145
- // 3) Fetch snapshot only if not cached
146
- if (systemPromptToAdd === undefined) {
147
- try {
148
- const url = `${baseUrl}/api/cognitive/snapshot?userId=${userId}&agentId=${agentId}&appId=${clConfig.appId}`;
149
- const res = await fetch(url);
150
- if (res.ok) {
151
- const data = await res.json();
152
- const systemBlock = data.systemBlock || "";
153
- const userContextBlock = data.userContextBlock || "";
154
- systemPromptToAdd =
155
- systemBlock !== "" || userContextBlock !== ""
156
- ? `
172
+ const buildMiddleware = (userId, projectId, sessionId, modelId) => ({
173
+ specificationVersion: 'v3',
174
+ async transformParams({ params }) {
175
+ if (!isValidId(userId))
176
+ return params;
177
+ const incomingMessages = Array.isArray(params.prompt)
178
+ ? params.prompt
179
+ : [];
180
+ // 1) Check if memory is already injected in messages
181
+ if (hasExistingMemoryInjection(incomingMessages)) {
182
+ logger.debug("Memory already injected, skipping");
183
+ return params;
184
+ }
185
+ // 2) Check session cache
186
+ const sessionKey = `${userId}:${projectId}:${sessionId || "default"}`;
187
+ let systemPromptToAdd = sessionSnapshots.get(sessionKey);
188
+ // 3) Fetch snapshot only if not cached
189
+ if (systemPromptToAdd === undefined) {
190
+ try {
191
+ const url = `${baseUrl}/api/cognitive/snapshot?userId=${userId}`;
192
+ const res = await fetch(url, {
193
+ headers: { "Authorization": `Bearer ${clConfig.apiKey}` },
194
+ });
195
+ if (res.ok) {
196
+ const data = await res.json();
197
+ const systemBlock = data.systemBlock || "";
198
+ const userContextBlock = data.userContextBlock || "";
199
+ systemPromptToAdd =
200
+ systemBlock !== "" || userContextBlock !== ""
201
+ ? `
157
202
  <MemoryContext>
158
203
  Use the following memory to stay consistent. Prefer UserContext facts for answers; AgentHeuristics guide style, safety, and priorities.
159
204
  ${systemBlock || "None"}
160
205
  ${userContextBlock || "None"}
161
206
  </MemoryContext>
162
- `.trim()
163
- : "";
164
- // Cache the snapshot for this session
165
- sessionSnapshots.set(sessionKey, systemPromptToAdd);
166
- logger.info("Snapshot fetched and cached", {
167
- userId,
168
- agentId,
169
- sessionId,
170
- sessionKey,
171
- systemLen: systemBlock.length,
172
- userLen: userContextBlock.length,
173
- });
174
- // At debug level, log the full snapshot data
175
- logger.debug("Full snapshot data", {
176
- systemBlock,
177
- userContextBlock,
178
- rawData: data,
179
- });
180
- }
181
- else {
182
- logger.warn("Snapshot fetch failed", { status: res.status });
183
- systemPromptToAdd = "";
184
- sessionSnapshots.set(sessionKey, systemPromptToAdd);
185
- }
186
- }
187
- catch (e) {
188
- logger.warn("Failed to fetch snapshot", e);
189
- systemPromptToAdd = "";
190
- sessionSnapshots.set(sessionKey, systemPromptToAdd);
191
- }
192
- }
193
- else {
194
- logger.debug("Using cached snapshot for session", { sessionKey });
195
- }
196
- if (!systemPromptToAdd) {
197
- return Object.assign(Object.assign({}, params), { messages: incomingMessages });
198
- }
199
- const { nextParams, messages: messagesWithMemory } = withMemorySystemPrompt(params, incomingMessages, systemPromptToAdd);
200
- logger.info("Injecting memory system prompt", {
201
- sessionKey,
202
- promptLength: systemPromptToAdd.length,
203
- });
204
- logger.debug("Injected prompt content", { systemPromptToAdd });
205
- return Object.assign(Object.assign({}, nextParams), { prompt: messagesWithMemory });
206
- },
207
- async wrapGenerate({ doGenerate, params }) {
208
- var _a;
209
- const result = await doGenerate();
210
- if (userId && sessionId) {
211
- const messagesInput = params.messages || params.prompt || [];
212
- const resultMessages = (_a = result === null || result === void 0 ? void 0 : result.response) === null || _a === void 0 ? void 0 : _a.messages;
213
- const assistantMessage = (result === null || result === void 0 ? void 0 : result.text)
214
- ? [{ role: "assistant", content: [{ type: "text", text: result.text }] }]
215
- : [];
216
- const finalMessages = Array.isArray(resultMessages) && resultMessages.length > 0
217
- ? resultMessages
218
- : [...messagesInput, ...assistantMessage];
219
- logConversation({
207
+ `.trim()
208
+ : "";
209
+ // Cache the snapshot for this session
210
+ sessionSnapshots.set(sessionKey, systemPromptToAdd);
211
+ logger.info("Snapshot fetched and cached", {
220
212
  userId,
221
- agentId,
213
+ projectId,
222
214
  sessionId,
223
- messages: finalMessages,
224
- modelId,
225
- }).then(() => triggerProcessing(userId, agentId, sessionId));
215
+ sessionKey,
216
+ systemLen: systemBlock.length,
217
+ userLen: userContextBlock.length,
218
+ });
219
+ // At debug level, log the full snapshot data
220
+ logger.debug("Full snapshot data", {
221
+ systemBlock,
222
+ userContextBlock,
223
+ rawData: data,
224
+ });
226
225
  }
227
- return result;
228
- },
229
- async wrapStream({ doStream, params }) {
230
- var _a;
231
- const result = await doStream();
232
- if (userId && sessionId) {
233
- const messagesInput = params.messages || params.prompt || [];
234
- const resultMessages = (_a = result === null || result === void 0 ? void 0 : result.response) === null || _a === void 0 ? void 0 : _a.messages;
235
- const finalMessages = Array.isArray(resultMessages) && resultMessages.length > 0
236
- ? resultMessages
237
- : messagesInput;
238
- logConversation({
239
- userId,
240
- agentId,
241
- sessionId,
242
- messages: finalMessages,
243
- modelId,
244
- }).then(() => triggerProcessing(userId, agentId, sessionId));
226
+ else {
227
+ logger.warn("Snapshot fetch failed", { status: res.status });
228
+ systemPromptToAdd = "";
229
+ sessionSnapshots.set(sessionKey, systemPromptToAdd);
245
230
  }
246
- return result;
247
231
  }
232
+ catch (e) {
233
+ logger.warn("Failed to fetch snapshot", e);
234
+ systemPromptToAdd = "";
235
+ sessionSnapshots.set(sessionKey, systemPromptToAdd);
236
+ }
237
+ }
238
+ else {
239
+ logger.debug("Using cached snapshot for session", { sessionKey });
240
+ }
241
+ if (!systemPromptToAdd) {
242
+ return params;
243
+ }
244
+ const { nextParams, messages: messagesWithMemory } = withMemorySystemPrompt(params, incomingMessages, systemPromptToAdd);
245
+ logger.info("Injecting memory system prompt", {
246
+ sessionKey,
247
+ promptLength: systemPromptToAdd.length,
248
+ });
249
+ logger.debug("Injected prompt content", { systemPromptToAdd });
250
+ return Object.assign(Object.assign({}, nextParams), { prompt: messagesWithMemory });
251
+ },
252
+ async wrapGenerate({ doGenerate, params }) {
253
+ var _a, _b;
254
+ let result;
255
+ try {
256
+ result = await doGenerate();
257
+ }
258
+ catch (err) {
259
+ logger.error("doGenerate failed", err);
260
+ logger.error("doGenerate params.prompt", JSON.stringify((_a = params.prompt) === null || _a === void 0 ? void 0 : _a.map((m) => ({ role: m.role, contentType: typeof m.content, contentLength: Array.isArray(m.content) ? m.content.length : undefined })), null, 2));
261
+ throw err;
262
+ }
263
+ if (isValidId(userId) && isValidId(sessionId)) {
264
+ const sessionKey = `${userId}:${projectId}:${sessionId}`;
265
+ const promptMeta = sessionPromptMetadata.get(sessionKey);
266
+ const messagesInput = params.messages || params.prompt || [];
267
+ const resultMessages = (_b = result === null || result === void 0 ? void 0 : result.response) === null || _b === void 0 ? void 0 : _b.messages;
268
+ const assistantMessage = (result === null || result === void 0 ? void 0 : result.text)
269
+ ? [{ role: "assistant", content: [{ type: "text", text: result.text }] }]
270
+ : [];
271
+ const finalMessages = Array.isArray(resultMessages) && resultMessages.length > 0
272
+ ? resultMessages
273
+ : [...messagesInput, ...assistantMessage];
274
+ logConversation(Object.assign({ userId,
275
+ projectId,
276
+ sessionId, messages: finalMessages, modelId, usage: result.usage }, (promptMeta && {
277
+ promptSlug: promptMeta.promptSlug,
278
+ promptVersion: promptMeta.promptVersion,
279
+ promptId: promptMeta.promptId,
280
+ }))).then(() => triggerProcessing(userId, projectId, sessionId));
281
+ }
282
+ return result;
283
+ },
284
+ async wrapStream({ doStream, params }) {
285
+ var _a;
286
+ let result;
287
+ try {
288
+ logger.debug("Starting doStream with params", JSON.stringify(params, null, 2));
289
+ result = await doStream();
290
+ }
291
+ catch (err) {
292
+ console.log(err.cause);
293
+ console.log(err.stack);
294
+ logger.error("doStream failed", err);
295
+ throw err;
248
296
  }
297
+ if (isValidId(userId) && isValidId(sessionId)) {
298
+ const sessionKey = `${userId}:${projectId}:${sessionId}`;
299
+ const promptMeta = sessionPromptMetadata.get(sessionKey);
300
+ const messagesInput = params.messages || params.prompt || [];
301
+ const resultMessages = (_a = result === null || result === void 0 ? void 0 : result.response) === null || _a === void 0 ? void 0 : _a.messages;
302
+ const finalMessages = Array.isArray(resultMessages) && resultMessages.length > 0
303
+ ? resultMessages
304
+ : messagesInput;
305
+ let streamUsage;
306
+ let accumulatedText = '';
307
+ const originalStream = result.stream;
308
+ const transformStream = new TransformStream({
309
+ transform(chunk, controller) {
310
+ if (chunk.type === 'text-delta') {
311
+ accumulatedText += chunk.delta;
312
+ }
313
+ if (chunk.type === 'finish' && chunk.usage) {
314
+ streamUsage = chunk.usage;
315
+ }
316
+ controller.enqueue(chunk);
317
+ },
318
+ flush() {
319
+ const allMessages = accumulatedText
320
+ ? [...finalMessages, { role: "assistant", content: [{ type: "text", text: accumulatedText }] }]
321
+ : finalMessages;
322
+ logConversation(Object.assign({ userId,
323
+ projectId,
324
+ sessionId, messages: allMessages, modelId, usage: streamUsage }, (promptMeta && {
325
+ promptSlug: promptMeta.promptSlug,
326
+ promptVersion: promptMeta.promptVersion,
327
+ promptId: promptMeta.promptId,
328
+ }))).then(() => triggerProcessing(userId, projectId, sessionId));
329
+ }
330
+ });
331
+ result.stream = originalStream.pipeThrough(transformStream);
332
+ }
333
+ return result;
334
+ }
335
+ });
336
+ const resolveModel = (originalModel, gatewaySlug) => {
337
+ if (!gatewaySlug || !clConfig.providerFactory) {
338
+ if (gatewaySlug && !clConfig.providerFactory) {
339
+ logger.warn("Gateway config found but no providerFactory provided");
340
+ }
341
+ return originalModel;
342
+ }
343
+ try {
344
+ const gatewayURL = `${baseUrl}/api/cognitive/gateway/${gatewaySlug}`;
345
+ const modelId = originalModel.modelId || 'default';
346
+ const rawModel = clConfig.providerFactory(gatewayURL, clConfig.apiKey)(modelId);
347
+ const session = originalModel[SESSION_KEY];
348
+ if (!session)
349
+ return rawModel;
350
+ const wrapped = (0, ai_1.wrapLanguageModel)({
351
+ model: rawModel,
352
+ middleware: buildMiddleware(session.userId, session.projectId, session.sessionId, modelId),
353
+ });
354
+ wrapped[SESSION_KEY] = session;
355
+ return wrapped;
356
+ }
357
+ catch (err) {
358
+ logger.error("Failed to create gateway model, falling back to original", err);
359
+ return originalModel;
360
+ }
361
+ };
362
+ const clWrapper = (modelId, settings, providerOptions) => {
363
+ // Pass provider options through to the underlying provider
364
+ const model = (providerOptions
365
+ ? provider(modelId, providerOptions)
366
+ : provider(modelId));
367
+ const userId = settings === null || settings === void 0 ? void 0 : settings.userId;
368
+ const projectId = (settings === null || settings === void 0 ? void 0 : settings.projectId) || clConfig.projectId || "default";
369
+ const sessionId = settings === null || settings === void 0 ? void 0 : settings.sessionId;
370
+ const sessionMissing = isValidId(userId) && !isValidId(sessionId);
371
+ if (sessionMissing) {
372
+ logger.warn("sessionId is required to log and process memories; skipping logging until provided.");
373
+ }
374
+ const wrappedModel = (0, ai_1.wrapLanguageModel)({
375
+ model: model,
376
+ middleware: buildMiddleware(userId, projectId, sessionId, modelId),
377
+ });
378
+ // Track session settings on the model for use in cl.streamText/cl.generateText
379
+ if (isValidId(userId) && isValidId(sessionId)) {
380
+ wrappedModel[SESSION_KEY] = { userId, projectId, sessionId };
381
+ }
382
+ return wrappedModel;
383
+ };
384
+ const clStreamText = async (options) => {
385
+ const { prompt: promptConfig } = options, rest = __rest(options, ["prompt"]);
386
+ // Resolve and interpolate prompt
387
+ const resolved = await resolvePrompt(promptConfig.slug);
388
+ const system = promptConfig.variables
389
+ ? interpolateTemplate(resolved.content, promptConfig.variables)
390
+ : resolved.content;
391
+ // Store prompt metadata for the session (read by middleware during logging)
392
+ const session = options.model[SESSION_KEY];
393
+ if (session) {
394
+ const sessionKey = `${session.userId}:${session.projectId}:${session.sessionId}`;
395
+ sessionPromptMetadata.set(sessionKey, {
396
+ promptSlug: resolved.slug,
397
+ promptVersion: resolved.version,
398
+ promptId: resolved.promptId,
399
+ });
400
+ }
401
+ logger.info("cl.streamText called", {
402
+ slug: promptConfig.slug,
403
+ version: resolved.version,
404
+ systemLength: system.length,
405
+ });
406
+ const model = resolveModel(options.model, resolved.gatewaySlug);
407
+ return (0, ai_1.streamText)(Object.assign(Object.assign({}, rest), { model, system }));
408
+ };
409
+ const clGenerateText = async (options) => {
410
+ const { prompt: promptConfig } = options, rest = __rest(options, ["prompt"]);
411
+ // Resolve and interpolate prompt
412
+ const resolved = await resolvePrompt(promptConfig.slug);
413
+ const system = promptConfig.variables
414
+ ? interpolateTemplate(resolved.content, promptConfig.variables)
415
+ : resolved.content;
416
+ // Store prompt metadata for the session (read by middleware during logging)
417
+ const session = options.model[SESSION_KEY];
418
+ if (session) {
419
+ const sessionKey = `${session.userId}:${session.projectId}:${session.sessionId}`;
420
+ sessionPromptMetadata.set(sessionKey, {
421
+ promptSlug: resolved.slug,
422
+ promptVersion: resolved.version,
423
+ promptId: resolved.promptId,
424
+ });
425
+ }
426
+ logger.info("cl.generateText called", {
427
+ slug: promptConfig.slug,
428
+ version: resolved.version,
429
+ systemLength: system.length,
249
430
  });
431
+ const model = resolveModel(options.model, resolved.gatewaySlug);
432
+ return (0, ai_1.generateText)(Object.assign(Object.assign({}, rest), { model, system }));
250
433
  };
434
+ // Return the model wrapper function with streamText/generateText attached
435
+ return Object.assign(clWrapper, {
436
+ streamText: clStreamText,
437
+ generateText: clGenerateText,
438
+ resolvePrompt,
439
+ logConversation,
440
+ triggerProcessing,
441
+ clearPromptCache: () => promptCache.clear(),
442
+ clearSessionCache: (sessionKey) => {
443
+ if (sessionKey) {
444
+ sessionSnapshots.delete(sessionKey);
445
+ sessionPromptMetadata.delete(sessionKey);
446
+ }
447
+ else {
448
+ sessionSnapshots.clear();
449
+ sessionPromptMetadata.clear();
450
+ }
451
+ },
452
+ });
251
453
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@kognitivedev/vercel-ai-provider",
3
- "version": "0.1.5",
3
+ "version": "0.1.7",
4
4
  "main": "dist/index.js",
5
5
  "types": "dist/index.d.ts",
6
6
  "publishConfig": {
@@ -9,14 +9,16 @@
9
9
  "scripts": {
10
10
  "build": "tsc",
11
11
  "dev": "tsc -w",
12
+ "test": "vitest run",
12
13
  "prepublishOnly": "npm run build"
13
14
  },
14
15
  "peerDependencies": {
15
- "ai": "^4.0.0 || ^5.0.0"
16
+ "ai": "^5.0.0 || ^6.0.0"
16
17
  },
17
18
  "devDependencies": {
18
19
  "typescript": "^5.0.0",
19
- "ai": "latest",
20
- "@types/node": "^20.0.0"
20
+ "ai": "^6.0.0",
21
+ "@types/node": "^20.0.0",
22
+ "vitest": "^3.0.0"
21
23
  }
22
24
  }