@kognitivedev/vercel-ai-provider 0.2.22 → 0.2.26

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@kognitivedev/vercel-ai-provider",
3
- "version": "0.2.22",
3
+ "version": "0.2.26",
4
4
  "main": "dist/index.js",
5
5
  "types": "dist/index.d.ts",
6
6
  "publishConfig": {
@@ -8,13 +8,15 @@
8
8
  },
9
9
  "scripts": {
10
10
  "build": "tsc",
11
- "dev": "tsc -w",
11
+ "dev": "tsc -w --noCheck",
12
12
  "test": "vitest run",
13
13
  "prepublishOnly": "npm run build"
14
14
  },
15
15
  "dependencies": {
16
- "@kognitivedev/prompthub": "^0.2.22",
17
- "@kognitivedev/shared": "^0.2.22"
16
+ "@kognitivedev/memory": "^0.2.26",
17
+ "@kognitivedev/prompthub": "^0.2.26",
18
+ "@kognitivedev/shared": "^0.2.26",
19
+ "@kognitivedev/tools": "^0.2.26"
18
20
  },
19
21
  "peerDependencies": {
20
22
  "ai": "^5.0.0 || ^6.0.0"
@@ -1,6 +1,7 @@
1
1
  import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
2
2
  import type { LanguageModel } from "ai";
3
3
  import { MockLanguageModelV3, convertArrayToReadableStream } from "ai/test";
4
+ import { ModerationError } from "@kognitivedev/shared";
4
5
 
5
6
  const promptHubMocks = vi.hoisted(() => ({
6
7
  resolvePrompt: vi.fn(),
@@ -22,7 +23,11 @@ vi.mock("ai", async (importOriginal) => {
22
23
  });
23
24
 
24
25
  import { streamText as aiStreamText, generateText as aiGenerateText } from "ai";
25
- import { createCognitiveLayer } from "../index";
26
+ import {
27
+ createCognitiveLayer,
28
+ toGeneratedMessageEventStream,
29
+ toMessageEventStream,
30
+ } from "../index";
26
31
 
27
32
  function minimalFinishUsage() {
28
33
  return {
@@ -73,6 +78,12 @@ function setupSnapshotAndLogFetch() {
73
78
  if (urlStr.includes("/api/cognitive/log")) {
74
79
  return new Response(JSON.stringify({ ok: true }), { status: 201 });
75
80
  }
81
+ if (urlStr.includes("/api/cognitive/agent-run")) {
82
+ return new Response(JSON.stringify({ ok: true, runDbId: "run-db-1" }), { status: 200 });
83
+ }
84
+ if (urlStr.includes("/api/cognitive/trace-events")) {
85
+ return new Response(JSON.stringify({ ok: true, traceDbId: "trace-db-1" }), { status: 200 });
86
+ }
76
87
  if (urlStr.includes("/api/cognitive/process")) {
77
88
  return new Response(JSON.stringify({ ok: true }), { status: 200 });
78
89
  }
@@ -156,6 +167,61 @@ describe("createCognitiveLayer extras", () => {
156
167
  cl.clearSessionCache();
157
168
  });
158
169
 
170
+ it("supports direct system usage without a managed prompt", async () => {
171
+ setupSnapshotAndLogFetch();
172
+
173
+ const cl = makeLayer();
174
+ const model = cl("mock-model", {
175
+ userId: "user-1",
176
+ projectId: "project-1",
177
+ sessionId: "session-1",
178
+ });
179
+
180
+ await cl.streamText({
181
+ model,
182
+ system: "Direct system prompt",
183
+ messages: [{ role: "user", content: "Hi" }],
184
+ } as any);
185
+
186
+ expect(aiStreamText).toHaveBeenCalledWith(
187
+ expect.objectContaining({
188
+ system: "Direct system prompt",
189
+ }),
190
+ );
191
+ expect(promptHubMocks.resolvePrompt).not.toHaveBeenCalled();
192
+ });
193
+
194
+ it("optionally auto-injects the topic memory tool into cl.streamText", async () => {
195
+ setupSnapshotAndLogFetch();
196
+
197
+ const cl = makeLayer();
198
+ const model = cl("mock-model", {
199
+ userId: "user-1",
200
+ projectId: "project-1",
201
+ sessionId: "session-1",
202
+ });
203
+
204
+ await cl.streamText({
205
+ model,
206
+ system: "Direct system prompt",
207
+ messages: [{ role: "user", content: "What do you know about Evital?" }],
208
+ tools: {
209
+ weather: { description: "Weather tool" },
210
+ },
211
+ kognitive: { autoTopicMemoryTool: true },
212
+ } as any);
213
+
214
+ expect(aiStreamText).toHaveBeenCalledWith(expect.objectContaining({
215
+ tools: expect.objectContaining({
216
+ weather: expect.any(Object),
217
+ "get-topic-memory": expect.objectContaining({
218
+ description: expect.stringContaining("Retrieve topic-scoped historical memory"),
219
+ execute: expect.any(Function),
220
+ }),
221
+ }),
222
+ }));
223
+ });
224
+
159
225
  it("uses a gateway model when providerFactory is available", async () => {
160
226
  setupSnapshotAndLogFetch();
161
227
  promptHubMocks.resolvePrompt.mockResolvedValue({
@@ -194,6 +260,70 @@ describe("createCognitiveLayer extras", () => {
194
260
  );
195
261
  });
196
262
 
263
+ it("converts streamText fullStream into message events", async () => {
264
+ const stream = toMessageEventStream({
265
+ fullStream: (async function* () {
266
+ yield { type: "tool-call", toolCallId: "tool-1", toolName: "get-topic-memory", input: { entity: "Evital" } };
267
+ yield { type: "tool-result", toolCallId: "tool-1", toolName: "get-topic-memory", result: { summary: "Known details" } };
268
+ yield { type: "text-delta", delta: "Known details" };
269
+ })(),
270
+ });
271
+
272
+ const reader = stream.getReader();
273
+ const chunks: Array<{ event: string; data: unknown }> = [];
274
+ while (true) {
275
+ const { done, value } = await reader.read();
276
+ if (done) break;
277
+ chunks.push(value);
278
+ }
279
+
280
+ expect(chunks).toEqual([
281
+ {
282
+ event: "messages",
283
+ data: { type: "tool-call", id: "tool-1", name: "get-topic-memory", input: { entity: "Evital" } },
284
+ },
285
+ {
286
+ event: "messages",
287
+ data: { type: "tool-result", id: "tool-1", name: "get-topic-memory", result: { summary: "Known details" } },
288
+ },
289
+ {
290
+ event: "messages",
291
+ data: { token: "Known details" },
292
+ },
293
+ ]);
294
+ });
295
+
296
+ it("converts generateText results into message events", async () => {
297
+ const stream = toGeneratedMessageEventStream({
298
+ text: "Known details",
299
+ toolCalls: [{ toolCallId: "tool-1", toolName: "get-topic-memory", input: { entity: "Evital" } }],
300
+ toolResults: [{ toolCallId: "tool-1", toolName: "get-topic-memory", result: { summary: "Known details" } }],
301
+ });
302
+
303
+ const reader = stream.getReader();
304
+ const chunks: Array<{ event: string; data: unknown }> = [];
305
+ while (true) {
306
+ const { done, value } = await reader.read();
307
+ if (done) break;
308
+ chunks.push(value);
309
+ }
310
+
311
+ expect(chunks).toEqual([
312
+ {
313
+ event: "messages",
314
+ data: { type: "tool-call", id: "tool-1", name: "get-topic-memory", input: { entity: "Evital" } },
315
+ },
316
+ {
317
+ event: "messages",
318
+ data: { type: "tool-result", id: "tool-1", name: "get-topic-memory", result: { summary: "Known details" } },
319
+ },
320
+ {
321
+ event: "messages",
322
+ data: { token: "Known details" },
323
+ },
324
+ ]);
325
+ });
326
+
197
327
  it("passes prompt tag and stores tag/ab metadata in logging payload", async () => {
198
328
  const fetchSpy = setupSnapshotAndLogFetch();
199
329
 
@@ -324,4 +454,31 @@ describe("createCognitiveLayer extras", () => {
324
454
  );
325
455
  expect(vi.mocked(aiGenerateText).mock.calls[0][0]).not.toHaveProperty("system");
326
456
  });
457
+
458
+ it("rethrows moderation errors from prompt resolution", async () => {
459
+ setupSnapshotAndLogFetch();
460
+ promptHubMocks.resolvePrompt.mockRejectedValue(new ModerationError({
461
+ error: "USER_RESTRICTED",
462
+ code: "user_restricted",
463
+ kind: "restriction",
464
+ surface: "prompt_hub",
465
+ reasonCode: "policy_violation",
466
+ message: "Prompt blocked.",
467
+ retryable: false,
468
+ expiresAt: null,
469
+ }));
470
+
471
+ const cl = makeLayer();
472
+ const model = cl("mock-model", {
473
+ userId: "user-1",
474
+ projectId: "project-1",
475
+ sessionId: "session-1",
476
+ });
477
+
478
+ await expect(cl.generateText({
479
+ model,
480
+ messages: [{ role: "user", content: "Hi" }],
481
+ prompt: { slug: "blocked" },
482
+ } as any)).rejects.toBeInstanceOf(ModerationError);
483
+ });
327
484
  });
@@ -5,9 +5,13 @@ import { MockLanguageModelV3, convertArrayToReadableStream } from "ai/test";
5
5
 
6
6
  describe("wrapStream logging", () => {
7
7
  let fetchCalls: { url: string; body: any }[];
8
+ let traceEventCalls: { url: string; body: any }[];
9
+ let agentRunCalls: { url: string; body: any }[];
8
10
 
9
11
  beforeEach(() => {
10
12
  fetchCalls = [];
13
+ traceEventCalls = [];
14
+ agentRunCalls = [];
11
15
 
12
16
  vi.stubGlobal(
13
17
  "fetch",
@@ -27,6 +31,18 @@ describe("wrapStream logging", () => {
27
31
  return new Response(JSON.stringify({ ok: true }), { status: 200 });
28
32
  }
29
33
 
34
+ if (urlStr.includes("/api/cognitive/agent-run")) {
35
+ const body = JSON.parse(init?.body as string);
36
+ agentRunCalls.push({ url: urlStr, body });
37
+ return new Response(JSON.stringify({ ok: true, runDbId: "run-db-1" }), { status: 200 });
38
+ }
39
+
40
+ if (urlStr.includes("/api/cognitive/trace-events")) {
41
+ const body = JSON.parse(init?.body as string);
42
+ traceEventCalls.push({ url: urlStr, body });
43
+ return new Response(JSON.stringify({ ok: true, traceDbId: "trace-db-1" }), { status: 200 });
44
+ }
45
+
30
46
  if (urlStr.includes("/api/cognitive/process")) {
31
47
  return new Response(JSON.stringify({ ok: true }), { status: 200 });
32
48
  }
@@ -106,7 +122,7 @@ describe("wrapStream logging", () => {
106
122
 
107
123
  const messages = logCall!.body.messages;
108
124
 
109
- // Assistant message should contain text + tool-call parts
125
+ // Assistant message should contain the full replay-safe turn.
110
126
  const assistantMsg = messages.find((m: any) => m.role === "assistant");
111
127
  expect(assistantMsg).toBeDefined();
112
128
  expect(assistantMsg.content).toEqual([
@@ -117,12 +133,6 @@ describe("wrapStream logging", () => {
117
133
  toolName: "get_weather",
118
134
  input: '{"city":"London"}',
119
135
  },
120
- ]);
121
-
122
- // Tool results should be in a separate tool message
123
- const toolMsg = messages.find((m: any) => m.role === "tool");
124
- expect(toolMsg).toBeDefined();
125
- expect(toolMsg.content).toEqual([
126
136
  {
127
137
  type: "tool-result",
128
138
  toolCallId: "call-1",
@@ -130,6 +140,7 @@ describe("wrapStream logging", () => {
130
140
  result: { temperature: 15, unit: "celsius" },
131
141
  },
132
142
  ]);
143
+ expect(messages.some((m: any) => m.role === "tool")).toBe(false);
133
144
 
134
145
  // Spans should include the tool call with populated previews
135
146
  const spans = logCall!.body.spans;
@@ -138,6 +149,19 @@ describe("wrapStream logging", () => {
138
149
  expect(toolSpan.toolName).toBe("get_weather");
139
150
  expect(toolSpan.inputPreview).toContain("London");
140
151
  expect(toolSpan.outputPreview).toContain("15");
152
+ expect(traceEventCalls.some((call) => call.body.start)).toBe(true);
153
+ expect(traceEventCalls.some((call) =>
154
+ Array.isArray(call.body.events) &&
155
+ call.body.events.some((event: any) => event.eventType === "tool.started"))
156
+ ).toBe(true);
157
+ expect(traceEventCalls.some((call) =>
158
+ Array.isArray(call.body.events) &&
159
+ call.body.events.some((event: any) => event.eventType === "tool.completed"))
160
+ ).toBe(true);
161
+ expect(agentRunCalls).toHaveLength(2);
162
+ expect(agentRunCalls[0].body.status).toBe("running");
163
+ expect(agentRunCalls[1].body.status).toBe("running");
164
+ expect(agentRunCalls[0].body.runId).toBe(agentRunCalls[1].body.runId);
141
165
  });
142
166
 
143
167
  it("should include tool definitions in logged conversation when tools are present", async () => {
@@ -272,8 +296,126 @@ describe("wrapStream logging", () => {
272
296
  const messages = logCall!.body.messages;
273
297
  const assistantMsg = messages.find((m: any) => m.role === "assistant");
274
298
  expect(assistantMsg).toBeDefined();
275
- expect(assistantMsg.content).toEqual([
276
- { type: "text", text: "Hello world" },
277
- ]);
299
+ expect(assistantMsg.content).toBe("Hello world");
300
+ expect(traceEventCalls.some((call) =>
301
+ Array.isArray(call.body.events) &&
302
+ call.body.events.some((event: any) => event.eventType === "assistant.progress"))
303
+ ).toBe(true);
304
+ expect(traceEventCalls.some((call) => call.body.finish?.state === "completed")).toBe(true);
305
+ expect(agentRunCalls).toHaveLength(2);
306
+ expect(agentRunCalls[0].body.runId).toBe(agentRunCalls[1].body.runId);
307
+ expect(logCall!.body.turnIndex).toBe(0);
308
+ expect(typeof logCall!.body.turnId).toBe("string");
309
+ });
310
+
311
+ it("reuses one session run but increments turn metadata per turn", async () => {
312
+ const makeModel = () =>
313
+ new MockLanguageModelV3({
314
+ doStream: async () => ({
315
+ stream: convertArrayToReadableStream([
316
+ { type: "text-start" as const, id: "t1" },
317
+ { type: "text-delta" as const, id: "t1", delta: "Hello" },
318
+ { type: "text-end" as const, id: "t1" },
319
+ {
320
+ type: "finish" as const,
321
+ finishReason: { unified: "stop" as const, raw: undefined },
322
+ usage: {
323
+ inputTokens: { total: 10, noCache: undefined, cacheRead: undefined, cacheWrite: undefined },
324
+ outputTokens: { total: 5, text: undefined, reasoning: undefined },
325
+ },
326
+ },
327
+ ] satisfies import("@ai-sdk/provider").LanguageModelV3StreamPart[]),
328
+ }),
329
+ });
330
+
331
+ const cl = createCognitiveLayer({
332
+ provider: () => makeModel(),
333
+ clConfig: {
334
+ apiKey: "test-api-key",
335
+ appId: "test-app",
336
+ projectId: "test-project",
337
+ processDelayMs: 0,
338
+ logLevel: "none",
339
+ },
340
+ });
341
+
342
+ const model = cl("mock-model", {
343
+ userId: "user-1",
344
+ projectId: "project-1",
345
+ sessionId: "session-1",
346
+ });
347
+
348
+ await streamText({ model, messages: [{ role: "user", content: "first" }] }).text;
349
+ await streamText({ model, messages: [{ role: "user", content: "second" }] }).text;
350
+ await new Promise((r) => setTimeout(r, 100));
351
+
352
+ const logCalls = fetchCalls.filter((call) => call.url.includes("/api/cognitive/log"));
353
+ const runCalls = agentRunCalls.filter((call) => call.url.includes("/api/cognitive/agent-run"));
354
+
355
+ expect(logCalls).toHaveLength(2);
356
+ expect(logCalls.map((call) => call.body.turnIndex)).toEqual([0, 1]);
357
+ expect(new Set(logCalls.map((call) => call.body.turnId)).size).toBe(2);
358
+ expect(new Set(runCalls.map((call) => call.body.runId)).size).toBe(1);
359
+ });
360
+
361
+ it("forwards automatic thread title config from providerOptions to the log payload", async () => {
362
+ const mockModel = new MockLanguageModelV3({
363
+ doStream: async () => ({
364
+ stream: convertArrayToReadableStream([
365
+ { type: "text-start" as const, id: "t1" },
366
+ { type: "text-delta" as const, id: "t1", delta: "Knowledge base ready" },
367
+ { type: "text-end" as const, id: "t1" },
368
+ {
369
+ type: "finish" as const,
370
+ finishReason: { unified: "stop" as const, raw: undefined },
371
+ usage: {
372
+ inputTokens: { total: 12, noCache: undefined, cacheRead: undefined, cacheWrite: undefined },
373
+ outputTokens: { total: 4, text: undefined, reasoning: undefined },
374
+ },
375
+ },
376
+ ] satisfies import("@ai-sdk/provider").LanguageModelV3StreamPart[]),
377
+ }),
378
+ });
379
+
380
+ const cl = createCognitiveLayer({
381
+ provider: () => mockModel,
382
+ clConfig: {
383
+ apiKey: "test-api-key",
384
+ appId: "test-app",
385
+ projectId: "test-project",
386
+ processDelayMs: 0,
387
+ logLevel: "none",
388
+ },
389
+ });
390
+
391
+ const model = cl("mock-model", {
392
+ userId: "user-1",
393
+ projectId: "project-1",
394
+ sessionId: "session-1",
395
+ });
396
+
397
+ await streamText({
398
+ model,
399
+ messages: [{ role: "user", content: "Search the knowledge base for onboarding docs" }],
400
+ providerOptions: {
401
+ kognitive: {
402
+ automaticTitle: {
403
+ trigger: "first-message",
404
+ strategy: "first-message",
405
+ maxLength: 60,
406
+ },
407
+ },
408
+ },
409
+ }).text;
410
+
411
+ await new Promise((r) => setTimeout(r, 100));
412
+
413
+ const logCall = fetchCalls.find((call) => call.url.includes("/api/cognitive/log"));
414
+ expect(logCall).toBeDefined();
415
+ expect(logCall!.body.automaticTitle).toEqual({
416
+ trigger: "first-message",
417
+ strategy: "first-message",
418
+ maxLength: 60,
419
+ });
278
420
  });
279
421
  });