@kognitivedev/vercel-ai-provider 0.2.1 → 0.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md ADDED
@@ -0,0 +1,11 @@
1
+ # @kognitivedev/vercel-ai-provider
2
+
3
+ ## 0.2.3
4
+
5
+ ### Patch Changes
6
+
7
+ - fix: resolve workspace dependencies for external consumers
8
+
9
+ - Updated dependencies []:
10
+ - @kognitivedev/prompthub@0.1.1
11
+ - @kognitivedev/shared@0.2.3
@@ -116,6 +116,68 @@ const test_1 = require("ai/test");
116
116
  (0, vitest_1.expect)(toolSpan.inputPreview).toContain("London");
117
117
  (0, vitest_1.expect)(toolSpan.outputPreview).toContain("15");
118
118
  });
119
+ (0, vitest_1.it)("should include tool definitions in logged conversation when tools are present", async () => {
120
+ const mockModel = new test_1.MockLanguageModelV3({
121
+ doStream: async () => ({
122
+ stream: (0, test_1.convertArrayToReadableStream)([
123
+ { type: "text-start", id: "t1" },
124
+ { type: "text-delta", id: "t1", delta: "Sure" },
125
+ { type: "text-end", id: "t1" },
126
+ {
127
+ type: "finish",
128
+ finishReason: {
129
+ unified: "stop",
130
+ raw: undefined,
131
+ },
132
+ usage: {
133
+ inputTokens: { total: 10, noCache: undefined, cacheRead: undefined, cacheWrite: undefined },
134
+ outputTokens: { total: 5, text: undefined, reasoning: undefined },
135
+ },
136
+ },
137
+ ]),
138
+ }),
139
+ });
140
+ const mockProvider = () => mockModel;
141
+ const cl = (0, index_1.createCognitiveLayer)({
142
+ provider: mockProvider,
143
+ clConfig: {
144
+ apiKey: "test-api-key",
145
+ appId: "test-app",
146
+ projectId: "test-project",
147
+ processDelayMs: 0,
148
+ logLevel: "none",
149
+ },
150
+ });
151
+ const model = cl("mock-model", {
152
+ userId: "user-1",
153
+ projectId: "project-1",
154
+ sessionId: "session-1",
155
+ });
156
+ const result = (0, ai_1.streamText)({
157
+ model,
158
+ messages: [{ role: "user", content: "What's the weather?" }],
159
+ tools: {
160
+ get_weather: {
161
+ description: "Get the current weather for a location",
162
+ parameters: {
163
+ type: "object",
164
+ properties: {
165
+ city: { type: "string", description: "City name" },
166
+ },
167
+ required: ["city"],
168
+ },
169
+ },
170
+ },
171
+ });
172
+ await result.text;
173
+ await new Promise((r) => setTimeout(r, 100));
174
+ const logCall = fetchCalls.find((c) => c.url.includes("/api/cognitive/log"));
175
+ (0, vitest_1.expect)(logCall).toBeDefined();
176
+ (0, vitest_1.expect)(logCall.body.tools).toBeDefined();
177
+ (0, vitest_1.expect)(logCall.body.tools).toEqual(vitest_1.expect.arrayContaining([
178
+ vitest_1.expect.objectContaining({ name: "get_weather" }),
179
+ ]));
180
+ });
119
181
  (0, vitest_1.it)("should include assistant message in logged conversation after streaming", async () => {
120
182
  const mockModel = new test_1.MockLanguageModelV3({
121
183
  doStream: async () => ({
package/dist/index.d.ts CHANGED
@@ -64,6 +64,12 @@ export interface LogConversationPayload {
64
64
  startedAt?: string;
65
65
  endedAt?: string;
66
66
  durationMs?: number;
67
+ tools?: Array<{
68
+ name: string;
69
+ description?: string;
70
+ parameters?: Record<string, unknown>;
71
+ }>;
72
+ agentRunId?: string;
67
73
  metadata?: Record<string, unknown>;
68
74
  spans?: Array<{
69
75
  spanKey: string;
package/dist/index.js CHANGED
@@ -15,6 +15,7 @@ exports.renderTemplate = void 0;
15
15
  exports.createCognitiveLayer = createCognitiveLayer;
16
16
  const ai_1 = require("ai");
17
17
  const crypto_1 = require("crypto");
18
+ const prompthub_1 = require("@kognitivedev/prompthub");
18
19
  var template_1 = require("./template");
19
20
  Object.defineProperty(exports, "renderTemplate", { enumerable: true, get: function () { return template_1.renderTemplate; } });
20
21
  const template_2 = require("./template");
@@ -71,7 +72,6 @@ function createLogger(logLevel) {
71
72
  },
72
73
  };
73
74
  }
74
- const PROMPT_CACHE_TTL_MS = 60000; // 1 minute
75
75
  function getContentText(content) {
76
76
  if (typeof content === "string")
77
77
  return content;
@@ -158,6 +158,21 @@ function buildTraceSpansFromMessages(messages) {
158
158
  }
159
159
  return spans;
160
160
  }
161
+ function extractToolDefinitions(params) {
162
+ const tools = params.tools;
163
+ if (!Array.isArray(tools) || tools.length === 0)
164
+ return undefined;
165
+ const defs = [];
166
+ for (const tool of tools) {
167
+ if (tool.type === 'function') {
168
+ defs.push(Object.assign(Object.assign({ name: tool.name }, (tool.description && { description: tool.description })), (tool.inputSchema && { parameters: tool.inputSchema })));
169
+ }
170
+ else if (tool.type === 'provider' || tool.type === 'provider-defined') {
171
+ defs.push(Object.assign({ name: tool.name }, (tool.args && { parameters: tool.args })));
172
+ }
173
+ }
174
+ return defs.length > 0 ? defs : undefined;
175
+ }
161
176
  // Session-scoped snapshot cache: sessionKey → formatted memory block
162
177
  const sessionSnapshots = new Map();
163
178
  // Regex to detect if memory has already been injected
@@ -194,48 +209,20 @@ function createCognitiveLayer(config) {
194
209
  "Content-Type": "application/json",
195
210
  "Authorization": `Bearer ${clConfig.apiKey}`,
196
211
  };
197
- // Prompt cache: slug → CachedPrompt
198
- const promptCache = new Map();
212
+ const promptClient = (0, prompthub_1.createPromptHubClient)({
213
+ baseUrl,
214
+ apiKey: clConfig.apiKey,
215
+ logger,
216
+ });
199
217
  const resolvePrompt = async (slug, userId) => {
200
218
  var _a;
201
- const cacheKey = userId ? `${slug}:${userId}` : slug;
202
- const cached = promptCache.get(cacheKey);
203
- if (cached && Date.now() - cached.fetchedAt < PROMPT_CACHE_TTL_MS) {
204
- logger.debug("Using cached prompt", { slug, version: cached.version });
205
- return cached;
206
- }
207
- const url = new URL(`${baseUrl}/api/cognitive/prompt`);
208
- url.searchParams.set("slug", slug);
209
- if (userId)
210
- url.searchParams.set("userId", userId);
211
219
  logger.debug("Resolving prompt from backend", {
212
220
  slug,
213
221
  userId,
214
- url: url.toString(),
215
222
  baseUrl,
216
223
  apiKeyHint: maskSecret(clConfig.apiKey),
217
224
  });
218
- const res = await fetch(url.toString(), {
219
- headers: { "Authorization": `Bearer ${clConfig.apiKey}` },
220
- });
221
- logger.debug("Prompt resolve response received", {
222
- slug,
223
- userId,
224
- status: res.status,
225
- ok: res.ok,
226
- contentType: res.headers.get("content-type"),
227
- });
228
- if (!res.ok) {
229
- const body = await res.text();
230
- logger.debug("Prompt resolve response body preview", {
231
- slug,
232
- userId,
233
- status: res.status,
234
- bodyPreview: previewText(body),
235
- });
236
- throw new Error(`Failed to resolve prompt "${slug}": ${res.status} ${body}`);
237
- }
238
- const data = await res.json();
225
+ const data = await promptClient.resolvePrompt({ slug, userId });
239
226
  const entry = {
240
227
  promptId: data.promptId,
241
228
  slug: data.slug,
@@ -244,7 +231,6 @@ function createCognitiveLayer(config) {
244
231
  fetchedAt: Date.now(),
245
232
  gatewaySlug: data.gatewaySlug,
246
233
  };
247
- promptCache.set(cacheKey, entry);
248
234
  logger.debug("Prompt resolved payload", {
249
235
  slug,
250
236
  resolvedSlug: entry.slug,
@@ -404,7 +390,7 @@ ${userContextBlock || "None"}
404
390
  return Object.assign(Object.assign({}, nextParams), { prompt: messagesWithMemory });
405
391
  },
406
392
  async wrapGenerate({ doGenerate, params }) {
407
- var _a;
393
+ var _a, _b, _c;
408
394
  const startedAt = new Date();
409
395
  let result;
410
396
  try {
@@ -419,6 +405,7 @@ ${userContextBlock || "None"}
419
405
  const endedAt = new Date();
420
406
  const sessionKey = `${userId}:${projectId}:${sessionId}`;
421
407
  const promptMeta = sessionPromptMetadata.get(sessionKey);
408
+ const agentRunId = (_c = (_b = params === null || params === void 0 ? void 0 : params.providerMetadata) === null || _b === void 0 ? void 0 : _b.kognitive) === null || _c === void 0 ? void 0 : _c.agentRunId;
422
409
  const messagesInput = params.prompt || params.messages || [];
423
410
  // Build assistant message from result.content (V2/V3 GenerateResult)
424
411
  const resultContent = Array.isArray(result === null || result === void 0 ? void 0 : result.content) ? result.content : [];
@@ -450,13 +437,14 @@ ${userContextBlock || "None"}
450
437
  const finalMessages = [...messagesInput, ...assistantMessage];
451
438
  const { requestPreview, responsePreview } = buildTracePreviews(finalMessages);
452
439
  const spans = buildTraceSpansFromMessages(finalMessages);
453
- logConversation(Object.assign(Object.assign({ userId,
440
+ const toolDefs = extractToolDefinitions(params);
441
+ logConversation(Object.assign(Object.assign(Object.assign(Object.assign({ userId,
454
442
  projectId,
455
443
  sessionId, messages: finalMessages, modelId, usage: result.usage }, (promptMeta && {
456
444
  promptSlug: promptMeta.promptSlug,
457
445
  promptVersion: promptMeta.promptVersion,
458
446
  promptId: promptMeta.promptId,
459
- })), { traceId: (0, crypto_1.randomUUID)(), requestPreview,
447
+ })), (toolDefs && { tools: toolDefs })), (agentRunId && { agentRunId })), { traceId: (0, crypto_1.randomUUID)(), requestPreview,
460
448
  responsePreview, state: "completed", startedAt: startedAt.toISOString(), endedAt: endedAt.toISOString(), durationMs: endedAt.getTime() - startedAt.getTime(), metadata: {
461
449
  appId: clConfig.appId,
462
450
  }, spans })).then(() => triggerProcessing(userId, projectId, sessionId));
@@ -464,7 +452,7 @@ ${userContextBlock || "None"}
464
452
  return result;
465
453
  },
466
454
  async wrapStream({ doStream, params }) {
467
- var _a;
455
+ var _a, _b, _c;
468
456
  const startedAt = new Date();
469
457
  const traceId = (0, crypto_1.randomUUID)();
470
458
  let result;
@@ -481,8 +469,9 @@ ${userContextBlock || "None"}
481
469
  if (isValidId(userId) && isValidId(sessionId)) {
482
470
  const sessionKey = `${userId}:${projectId}:${sessionId}`;
483
471
  const promptMeta = sessionPromptMetadata.get(sessionKey);
472
+ const agentRunId = (_b = (_a = params === null || params === void 0 ? void 0 : params.providerMetadata) === null || _a === void 0 ? void 0 : _a.kognitive) === null || _b === void 0 ? void 0 : _b.agentRunId;
484
473
  const messagesInput = params.prompt || params.messages || [];
485
- const resultMessages = (_a = result === null || result === void 0 ? void 0 : result.response) === null || _a === void 0 ? void 0 : _a.messages;
474
+ const resultMessages = (_c = result === null || result === void 0 ? void 0 : result.response) === null || _c === void 0 ? void 0 : _c.messages;
486
475
  const finalMessages = Array.isArray(resultMessages) && resultMessages.length > 0
487
476
  ? resultMessages
488
477
  : messagesInput;
@@ -554,18 +543,21 @@ ${userContextBlock || "None"}
554
543
  }
555
544
  const { requestPreview, responsePreview } = buildTracePreviews(allMessages);
556
545
  const spans = buildTraceSpansFromMessages(allMessages);
557
- await logConversation(Object.assign(Object.assign({ userId,
546
+ const toolDefs = extractToolDefinitions(params);
547
+ // Fire-and-forget: do not await so the stream closes immediately,
548
+ // allowing the AI SDK's multi-step continuation logic to proceed.
549
+ logConversation(Object.assign(Object.assign(Object.assign(Object.assign({ userId,
558
550
  projectId,
559
551
  sessionId, messages: allMessages, modelId, usage: streamUsage }, (promptMeta && {
560
552
  promptSlug: promptMeta.promptSlug,
561
553
  promptVersion: promptMeta.promptVersion,
562
554
  promptId: promptMeta.promptId,
563
- })), { traceId,
555
+ })), (toolDefs && { tools: toolDefs })), (agentRunId && { agentRunId })), { traceId,
564
556
  requestPreview,
565
557
  responsePreview, state: "completed", startedAt: startedAt.toISOString(), endedAt: endedAt.toISOString(), durationMs: endedAt.getTime() - startedAt.getTime(), metadata: {
566
558
  appId: clConfig.appId,
567
- }, spans }));
568
- triggerProcessing(userId, projectId, sessionId);
559
+ }, spans })).then(() => triggerProcessing(userId, projectId, sessionId))
560
+ .catch((e) => logger.error("Stream log failed", e));
569
561
  }
570
562
  });
571
563
  result.stream = originalStream.pipeThrough(transformStream);
@@ -708,7 +700,7 @@ ${userContextBlock || "None"}
708
700
  resolvePrompt,
709
701
  logConversation,
710
702
  triggerProcessing,
711
- clearPromptCache: () => promptCache.clear(),
703
+ clearPromptCache: () => promptClient.clearPromptCache(),
712
704
  clearSessionCache: (sessionKey) => {
713
705
  if (sessionKey) {
714
706
  sessionSnapshots.delete(sessionKey);
@@ -1,2 +1 @@
1
- export type TemplateVariables = Record<string, string | boolean>;
2
- export declare function renderTemplate(template: string, variables: TemplateVariables): string;
1
+ export { renderTemplate, type TemplateVariables } from "@kognitivedev/shared";
package/dist/template.js CHANGED
@@ -1,13 +1,5 @@
1
1
  "use strict";
2
- var __importDefault = (this && this.__importDefault) || function (mod) {
3
- return (mod && mod.__esModule) ? mod : { "default": mod };
4
- };
5
2
  Object.defineProperty(exports, "__esModule", { value: true });
6
- exports.renderTemplate = renderTemplate;
7
- // Use the pre-built dist to avoid `require.extensions` warning in webpack/Next.js
8
- // eslint-disable-next-line @typescript-eslint/no-require-imports
9
- const handlebars_1 = __importDefault(require("handlebars/dist/cjs/handlebars"));
10
- function renderTemplate(template, variables) {
11
- const compiled = handlebars_1.default.compile(template, { noEscape: true });
12
- return compiled(variables);
13
- }
3
+ exports.renderTemplate = void 0;
4
+ var shared_1 = require("@kognitivedev/shared");
5
+ Object.defineProperty(exports, "renderTemplate", { enumerable: true, get: function () { return shared_1.renderTemplate; } });
package/package.json CHANGED
@@ -1,27 +1,42 @@
1
1
  {
2
- "name": "@kognitivedev/vercel-ai-provider",
3
- "version": "0.2.1",
4
- "main": "dist/index.js",
5
- "types": "dist/index.d.ts",
6
- "publishConfig": {
7
- "access": "public"
8
- },
9
- "scripts": {
10
- "build": "tsc",
11
- "dev": "tsc -w",
12
- "test": "vitest run",
13
- "prepublishOnly": "npm run build"
14
- },
15
- "dependencies": {
16
- "handlebars": "^4.7.8"
17
- },
18
- "peerDependencies": {
19
- "ai": "^5.0.0 || ^6.0.0"
20
- },
21
- "devDependencies": {
22
- "typescript": "^5.0.0",
23
- "ai": "^6.0.0",
24
- "@types/node": "^20.0.0",
25
- "vitest": "^3.0.0"
26
- }
27
- }
2
+ "name": "@kognitivedev/vercel-ai-provider",
3
+ "version": "0.2.3",
4
+ "main": "dist/index.js",
5
+ "types": "dist/index.d.ts",
6
+ "publishConfig": {
7
+ "access": "public"
8
+ },
9
+ "scripts": {
10
+ "build": "tsc",
11
+ "dev": "tsc -w",
12
+ "test": "vitest run",
13
+ "prepublishOnly": "npm run build"
14
+ },
15
+ "dependencies": {
16
+ "@kognitivedev/prompthub": "workspace:*",
17
+ "@kognitivedev/shared": "workspace:*"
18
+ },
19
+ "peerDependencies": {
20
+ "ai": "^5.0.0 || ^6.0.0"
21
+ },
22
+ "devDependencies": {
23
+ "typescript": "^5.0.0",
24
+ "ai": "^6.0.0",
25
+ "@types/node": "^20.0.0",
26
+ "vitest": "^3.0.0"
27
+ },
28
+ "description": "Vercel AI SDK middleware for memory injection and prompt management",
29
+ "keywords": [
30
+ "kognitive",
31
+ "vercel-ai-sdk",
32
+ "memory",
33
+ "middleware"
34
+ ],
35
+ "license": "MIT",
36
+ "repository": {
37
+ "type": "git",
38
+ "url": "https://github.com/kognitivedev/kognitive",
39
+ "directory": "packages/provider"
40
+ },
41
+ "homepage": "https://kognitive.dev"
42
+ }
@@ -140,6 +140,77 @@ describe("wrapStream logging", () => {
140
140
  expect(toolSpan.outputPreview).toContain("15");
141
141
  });
142
142
 
143
+ it("should include tool definitions in logged conversation when tools are present", async () => {
144
+ const mockModel = new MockLanguageModelV3({
145
+ doStream: async () => ({
146
+ stream: convertArrayToReadableStream([
147
+ { type: "text-start" as const, id: "t1" },
148
+ { type: "text-delta" as const, id: "t1", delta: "Sure" },
149
+ { type: "text-end" as const, id: "t1" },
150
+ {
151
+ type: "finish" as const,
152
+ finishReason: {
153
+ unified: "stop" as const,
154
+ raw: undefined,
155
+ },
156
+ usage: {
157
+ inputTokens: { total: 10, noCache: undefined, cacheRead: undefined, cacheWrite: undefined },
158
+ outputTokens: { total: 5, text: undefined, reasoning: undefined },
159
+ },
160
+ },
161
+ ] satisfies import("@ai-sdk/provider").LanguageModelV3StreamPart[]),
162
+ }),
163
+ });
164
+
165
+ const mockProvider = () => mockModel;
166
+
167
+ const cl = createCognitiveLayer({
168
+ provider: mockProvider,
169
+ clConfig: {
170
+ apiKey: "test-api-key",
171
+ appId: "test-app",
172
+ projectId: "test-project",
173
+ processDelayMs: 0,
174
+ logLevel: "none",
175
+ },
176
+ });
177
+
178
+ const model = cl("mock-model", {
179
+ userId: "user-1",
180
+ projectId: "project-1",
181
+ sessionId: "session-1",
182
+ });
183
+
184
+ const result = streamText({
185
+ model,
186
+ messages: [{ role: "user", content: "What's the weather?" }],
187
+ tools: {
188
+ get_weather: {
189
+ description: "Get the current weather for a location",
190
+ parameters: {
191
+ type: "object",
192
+ properties: {
193
+ city: { type: "string", description: "City name" },
194
+ },
195
+ required: ["city"],
196
+ },
197
+ },
198
+ } as any,
199
+ });
200
+
201
+ await result.text;
202
+ await new Promise((r) => setTimeout(r, 100));
203
+
204
+ const logCall = fetchCalls.find((c) => c.url.includes("/api/cognitive/log"));
205
+ expect(logCall).toBeDefined();
206
+ expect(logCall!.body.tools).toBeDefined();
207
+ expect(logCall!.body.tools).toEqual(
208
+ expect.arrayContaining([
209
+ expect.objectContaining({ name: "get_weather" }),
210
+ ])
211
+ );
212
+ });
213
+
143
214
  it("should include assistant message in logged conversation after streaming", async () => {
144
215
  const mockModel = new MockLanguageModelV3({
145
216
  doStream: async () => ({
package/src/index.ts CHANGED
@@ -5,6 +5,7 @@ import {
5
5
  type LanguageModel,
6
6
  } from "ai";
7
7
  import { randomUUID } from "crypto";
8
+ import { createPromptHubClient } from "@kognitivedev/prompthub";
8
9
  export { renderTemplate, type TemplateVariables } from "./template";
9
10
  import { renderTemplate } from "./template";
10
11
 
@@ -133,6 +134,8 @@ export interface LogConversationPayload {
133
134
  startedAt?: string;
134
135
  endedAt?: string;
135
136
  durationMs?: number;
137
+ tools?: Array<{ name: string; description?: string; parameters?: Record<string, unknown> }>;
138
+ agentRunId?: string;
136
139
  metadata?: Record<string, unknown>;
137
140
  spans?: Array<{
138
141
  spanKey: string;
@@ -169,8 +172,6 @@ export interface CachedPrompt {
169
172
  gatewaySlug?: string;
170
173
  }
171
174
 
172
- const PROMPT_CACHE_TTL_MS = 60_000; // 1 minute
173
-
174
175
  function getContentText(content: any): string {
175
176
  if (typeof content === "string") return content;
176
177
  if (!Array.isArray(content)) return "";
@@ -276,6 +277,28 @@ function buildTraceSpansFromMessages(messages: any[]): Array<{
276
277
  return spans;
277
278
  }
278
279
 
280
+ function extractToolDefinitions(params: any): Array<{ name: string; description?: string; parameters?: Record<string, unknown> }> | undefined {
281
+ const tools = (params as any).tools;
282
+ if (!Array.isArray(tools) || tools.length === 0) return undefined;
283
+
284
+ const defs: Array<{ name: string; description?: string; parameters?: Record<string, unknown> }> = [];
285
+ for (const tool of tools) {
286
+ if (tool.type === 'function') {
287
+ defs.push({
288
+ name: tool.name,
289
+ ...(tool.description && { description: tool.description }),
290
+ ...(tool.inputSchema && { parameters: tool.inputSchema as Record<string, unknown> }),
291
+ });
292
+ } else if (tool.type === 'provider' || tool.type === 'provider-defined') {
293
+ defs.push({
294
+ name: tool.name,
295
+ ...(tool.args && { parameters: tool.args as Record<string, unknown> }),
296
+ });
297
+ }
298
+ }
299
+ return defs.length > 0 ? defs : undefined;
300
+ }
301
+
279
302
  // Session-scoped snapshot cache: sessionKey → formatted memory block
280
303
  const sessionSnapshots = new Map<string, string>();
281
304
 
@@ -323,51 +346,21 @@ export function createCognitiveLayer(config: {
323
346
  "Authorization": `Bearer ${clConfig.apiKey}`,
324
347
  };
325
348
 
326
- // Prompt cache: slug → CachedPrompt
327
- const promptCache = new Map<string, CachedPrompt>();
349
+ const promptClient = createPromptHubClient({
350
+ baseUrl,
351
+ apiKey: clConfig.apiKey,
352
+ logger,
353
+ });
328
354
 
329
355
  const resolvePrompt = async (slug: string, userId?: string): Promise<CachedPrompt> => {
330
- const cacheKey = userId ? `${slug}:${userId}` : slug;
331
- const cached = promptCache.get(cacheKey);
332
- if (cached && Date.now() - cached.fetchedAt < PROMPT_CACHE_TTL_MS) {
333
- logger.debug("Using cached prompt", { slug, version: cached.version });
334
- return cached;
335
- }
336
-
337
- const url = new URL(`${baseUrl}/api/cognitive/prompt`);
338
- url.searchParams.set("slug", slug);
339
- if (userId) url.searchParams.set("userId", userId);
340
-
341
356
  logger.debug("Resolving prompt from backend", {
342
357
  slug,
343
358
  userId,
344
- url: url.toString(),
345
359
  baseUrl,
346
360
  apiKeyHint: maskSecret(clConfig.apiKey),
347
361
  });
348
362
 
349
- const res = await fetch(url.toString(), {
350
- headers: { "Authorization": `Bearer ${clConfig.apiKey}` },
351
- });
352
- logger.debug("Prompt resolve response received", {
353
- slug,
354
- userId,
355
- status: res.status,
356
- ok: res.ok,
357
- contentType: res.headers.get("content-type"),
358
- });
359
- if (!res.ok) {
360
- const body = await res.text();
361
- logger.debug("Prompt resolve response body preview", {
362
- slug,
363
- userId,
364
- status: res.status,
365
- bodyPreview: previewText(body),
366
- });
367
- throw new Error(`Failed to resolve prompt "${slug}": ${res.status} ${body}`);
368
- }
369
-
370
- const data = await res.json();
363
+ const data = await promptClient.resolvePrompt({ slug, userId });
371
364
  const entry: CachedPrompt = {
372
365
  promptId: data.promptId,
373
366
  slug: data.slug,
@@ -376,7 +369,6 @@ export function createCognitiveLayer(config: {
376
369
  fetchedAt: Date.now(),
377
370
  gatewaySlug: data.gatewaySlug,
378
371
  };
379
- promptCache.set(cacheKey, entry);
380
372
  logger.debug("Prompt resolved payload", {
381
373
  slug,
382
374
  resolvedSlug: entry.slug,
@@ -574,6 +566,7 @@ ${userContextBlock || "None"}
574
566
  const endedAt = new Date();
575
567
  const sessionKey = `${userId}:${projectId}:${sessionId}`;
576
568
  const promptMeta = sessionPromptMetadata.get(sessionKey);
569
+ const agentRunId = (params as any)?.providerMetadata?.kognitive?.agentRunId as string | undefined;
577
570
 
578
571
  const messagesInput = (params as any).prompt || (params as any).messages || [];
579
572
 
@@ -605,6 +598,7 @@ ${userContextBlock || "None"}
605
598
  const finalMessages = [...messagesInput, ...assistantMessage];
606
599
  const { requestPreview, responsePreview } = buildTracePreviews(finalMessages);
607
600
  const spans = buildTraceSpansFromMessages(finalMessages);
601
+ const toolDefs = extractToolDefinitions(params);
608
602
 
609
603
  logConversation({
610
604
  userId,
@@ -618,6 +612,8 @@ ${userContextBlock || "None"}
618
612
  promptVersion: promptMeta.promptVersion,
619
613
  promptId: promptMeta.promptId,
620
614
  }),
615
+ ...(toolDefs && { tools: toolDefs }),
616
+ ...(agentRunId && { agentRunId }),
621
617
  traceId: randomUUID(),
622
618
  requestPreview,
623
619
  responsePreview,
@@ -652,6 +648,7 @@ ${userContextBlock || "None"}
652
648
  if (isValidId(userId) && isValidId(sessionId)) {
653
649
  const sessionKey = `${userId}:${projectId}:${sessionId}`;
654
650
  const promptMeta = sessionPromptMetadata.get(sessionKey);
651
+ const agentRunId = (params as any)?.providerMetadata?.kognitive?.agentRunId as string | undefined;
655
652
 
656
653
  const messagesInput = (params as any).prompt || (params as any).messages || [];
657
654
  const resultMessages = (result as any)?.response?.messages;
@@ -730,8 +727,11 @@ ${userContextBlock || "None"}
730
727
 
731
728
  const { requestPreview, responsePreview } = buildTracePreviews(allMessages);
732
729
  const spans = buildTraceSpansFromMessages(allMessages);
730
+ const toolDefs = extractToolDefinitions(params);
733
731
 
734
- await logConversation({
732
+ // Fire-and-forget: do not await so the stream closes immediately,
733
+ // allowing the AI SDK's multi-step continuation logic to proceed.
734
+ logConversation({
735
735
  userId,
736
736
  projectId,
737
737
  sessionId,
@@ -743,6 +743,8 @@ ${userContextBlock || "None"}
743
743
  promptVersion: promptMeta.promptVersion,
744
744
  promptId: promptMeta.promptId,
745
745
  }),
746
+ ...(toolDefs && { tools: toolDefs }),
747
+ ...(agentRunId && { agentRunId }),
746
748
  traceId,
747
749
  requestPreview,
748
750
  responsePreview,
@@ -754,8 +756,8 @@ ${userContextBlock || "None"}
754
756
  appId: clConfig.appId,
755
757
  },
756
758
  spans,
757
- });
758
- triggerProcessing(userId, projectId, sessionId);
759
+ }).then(() => triggerProcessing(userId, projectId, sessionId))
760
+ .catch((e) => logger.error("Stream log failed", e));
759
761
  }
760
762
  });
761
763
 
@@ -924,7 +926,7 @@ ${userContextBlock || "None"}
924
926
  resolvePrompt,
925
927
  logConversation,
926
928
  triggerProcessing,
927
- clearPromptCache: () => promptCache.clear(),
929
+ clearPromptCache: () => promptClient.clearPromptCache(),
928
930
  clearSessionCache: (sessionKey?: string) => {
929
931
  if (sessionKey) {
930
932
  sessionSnapshots.delete(sessionKey);
package/src/template.ts CHANGED
@@ -1,10 +1 @@
1
- // Use the pre-built dist to avoid `require.extensions` warning in webpack/Next.js
2
- // eslint-disable-next-line @typescript-eslint/no-require-imports
3
- import Handlebars from "handlebars/dist/cjs/handlebars";
4
-
5
- export type TemplateVariables = Record<string, string | boolean>;
6
-
7
- export function renderTemplate(template: string, variables: TemplateVariables): string {
8
- const compiled = Handlebars.compile(template, { noEscape: true });
9
- return compiled(variables);
10
- }
1
+ export { renderTemplate, type TemplateVariables } from "@kognitivedev/shared";
package/tsconfig.json CHANGED
@@ -2,9 +2,11 @@
2
2
  "extends": "../../tsconfig.json",
3
3
  "compilerOptions": {
4
4
  "module": "commonjs",
5
+ "rootDir": "src",
5
6
  "outDir": "dist",
6
7
  "declaration": true,
7
- "noEmit": false
8
+ "noEmit": false,
9
+ "incremental": false
8
10
  },
9
11
  "include": [
10
12
  "src"