@kognitivedev/vercel-ai-provider 0.1.6 → 0.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -55,7 +55,7 @@ const { text } = await generateText({
55
55
  | `apiKey` | `string` | - | - | API key for authentication (if required) |
56
56
  | `processDelayMs` | `number` | - | `500` | Delay before triggering memory processing (set to 0 to disable) |
57
57
  | `logLevel` | `LogLevel` | - | `'info'` | Controls console logging verbosity |
58
- | `providerFactory` | `(baseURL: string) => (modelId: string) => LanguageModel` | - | - | Factory for creating a provider that routes through a gateway URL |
58
+ | `providerFactory` | `(baseURL: string, apiKey: string) => (modelId: string) => LanguageModel` | - | - | Factory for creating a provider that routes through a gateway URL |
59
59
 
60
60
  ### `LogLevel`
61
61
 
@@ -317,8 +317,8 @@ const cl = createCognitiveLayer({
317
317
  clConfig: {
318
318
  appId: "my-app",
319
319
  baseUrl: "https://api.kognitive.dev",
320
- providerFactory: (baseURL) => {
321
- const gatewayProvider = createOpenAI({ baseURL });
320
+ providerFactory: (baseURL, apiKey) => {
321
+ const gatewayProvider = createOpenAI({ baseURL, apiKey });
322
322
  return (modelId) => gatewayProvider(modelId);
323
323
  }
324
324
  }
package/dist/index.d.ts CHANGED
@@ -27,7 +27,7 @@ export interface CognitiveLayerConfig {
27
27
  /**
28
28
  * Factory for creating a provider that routes through a gateway URL.
29
29
  */
30
- providerFactory?: (baseURL: string) => (modelId: string) => LanguageModel;
30
+ providerFactory?: (baseURL: string, apiKey: string) => (modelId: string) => LanguageModel;
31
31
  }
32
32
  export type CLModelWrapper = (modelId: string, settings?: {
33
33
  userId?: string;
@@ -59,7 +59,7 @@ export interface LogConversationPayload {
59
59
  export type CognitiveLayer = CLModelWrapper & {
60
60
  streamText: (options: CLStreamTextOptions) => Promise<ReturnType<typeof aiStreamText>>;
61
61
  generateText: (options: CLGenerateTextOptions) => ReturnType<typeof aiGenerateText>;
62
- resolvePrompt: (slug: string) => Promise<CachedPrompt>;
62
+ resolvePrompt: (slug: string, userId?: string) => Promise<CachedPrompt>;
63
63
  logConversation: (payload: LogConversationPayload) => Promise<void>;
64
64
  triggerProcessing: (userId: string, projectId: string, sessionId: string) => void;
65
65
  clearPromptCache: () => void;
package/dist/index.js CHANGED
@@ -13,6 +13,12 @@ var __rest = (this && this.__rest) || function (s, e) {
13
13
  Object.defineProperty(exports, "__esModule", { value: true });
14
14
  exports.createCognitiveLayer = createCognitiveLayer;
15
15
  const ai_1 = require("ai");
16
+ function isValidId(value) {
17
+ if (value == null || typeof value !== "string")
18
+ return false;
19
+ const trimmed = value.trim();
20
+ return trimmed !== "" && trimmed !== "null" && trimmed !== "undefined";
21
+ }
16
22
  const LOG_LEVEL_PRIORITY = {
17
23
  none: 0,
18
24
  error: 1,
@@ -94,13 +100,18 @@ function createCognitiveLayer(config) {
94
100
  };
95
101
  // Prompt cache: slug → CachedPrompt
96
102
  const promptCache = new Map();
97
- const resolvePrompt = async (slug) => {
98
- const cached = promptCache.get(slug);
103
+ const resolvePrompt = async (slug, userId) => {
104
+ const cacheKey = userId ? `${slug}:${userId}` : slug;
105
+ const cached = promptCache.get(cacheKey);
99
106
  if (cached && Date.now() - cached.fetchedAt < PROMPT_CACHE_TTL_MS) {
100
107
  logger.debug("Using cached prompt", { slug, version: cached.version });
101
108
  return cached;
102
109
  }
103
- const res = await fetch(`${baseUrl}/api/cognitive/prompt?slug=${encodeURIComponent(slug)}`, {
110
+ const url = new URL(`${baseUrl}/api/cognitive/prompt`);
111
+ url.searchParams.set("slug", slug);
112
+ if (userId)
113
+ url.searchParams.set("userId", userId);
114
+ const res = await fetch(url.toString(), {
104
115
  headers: { "Authorization": `Bearer ${clConfig.apiKey}` },
105
116
  });
106
117
  if (!res.ok) {
@@ -116,7 +127,7 @@ function createCognitiveLayer(config) {
116
127
  fetchedAt: Date.now(),
117
128
  gatewaySlug: data.gatewaySlug,
118
129
  };
119
- promptCache.set(slug, entry);
130
+ promptCache.set(cacheKey, entry);
120
131
  logger.info("Prompt resolved", { slug, version: entry.version });
121
132
  return entry;
122
133
  };
@@ -166,7 +177,7 @@ function createCognitiveLayer(config) {
166
177
  const buildMiddleware = (userId, projectId, sessionId, modelId) => ({
167
178
  specificationVersion: 'v3',
168
179
  async transformParams({ params }) {
169
- if (!userId)
180
+ if (!isValidId(userId))
170
181
  return params;
171
182
  const incomingMessages = Array.isArray(params.prompt)
172
183
  ? params.prompt
@@ -254,7 +265,7 @@ ${userContextBlock || "None"}
254
265
  logger.error("doGenerate params.prompt", JSON.stringify((_a = params.prompt) === null || _a === void 0 ? void 0 : _a.map((m) => ({ role: m.role, contentType: typeof m.content, contentLength: Array.isArray(m.content) ? m.content.length : undefined })), null, 2));
255
266
  throw err;
256
267
  }
257
- if (userId && sessionId) {
268
+ if (isValidId(userId) && isValidId(sessionId)) {
258
269
  const sessionKey = `${userId}:${projectId}:${sessionId}`;
259
270
  const promptMeta = sessionPromptMetadata.get(sessionKey);
260
271
  const messagesInput = params.messages || params.prompt || [];
@@ -288,7 +299,7 @@ ${userContextBlock || "None"}
288
299
  logger.error("doStream failed", err);
289
300
  throw err;
290
301
  }
291
- if (userId && sessionId) {
302
+ if (isValidId(userId) && isValidId(sessionId)) {
292
303
  const sessionKey = `${userId}:${projectId}:${sessionId}`;
293
304
  const promptMeta = sessionPromptMetadata.get(sessionKey);
294
305
  const messagesInput = params.messages || params.prompt || [];
@@ -337,7 +348,7 @@ ${userContextBlock || "None"}
337
348
  try {
338
349
  const gatewayURL = `${baseUrl}/api/cognitive/gateway/${gatewaySlug}`;
339
350
  const modelId = originalModel.modelId || 'default';
340
- const rawModel = clConfig.providerFactory(gatewayURL)(modelId);
351
+ const rawModel = clConfig.providerFactory(gatewayURL, clConfig.apiKey)(modelId);
341
352
  const session = originalModel[SESSION_KEY];
342
353
  if (!session)
343
354
  return rawModel;
@@ -361,7 +372,7 @@ ${userContextBlock || "None"}
361
372
  const userId = settings === null || settings === void 0 ? void 0 : settings.userId;
362
373
  const projectId = (settings === null || settings === void 0 ? void 0 : settings.projectId) || clConfig.projectId || "default";
363
374
  const sessionId = settings === null || settings === void 0 ? void 0 : settings.sessionId;
364
- const sessionMissing = !!userId && !sessionId;
375
+ const sessionMissing = isValidId(userId) && !isValidId(sessionId);
365
376
  if (sessionMissing) {
366
377
  logger.warn("sessionId is required to log and process memories; skipping logging until provided.");
367
378
  }
@@ -370,60 +381,90 @@ ${userContextBlock || "None"}
370
381
  middleware: buildMiddleware(userId, projectId, sessionId, modelId),
371
382
  });
372
383
  // Track session settings on the model for use in cl.streamText/cl.generateText
373
- if (userId && sessionId) {
374
- wrappedModel[SESSION_KEY] = { userId, projectId, sessionId };
384
+ // Always store if userId is valid — sessionId may be missing but userId is still
385
+ // needed for prompt resolution (e.g. A/B test assignment)
386
+ if (isValidId(userId)) {
387
+ wrappedModel[SESSION_KEY] = { userId, projectId, sessionId: isValidId(sessionId) ? sessionId : undefined };
375
388
  }
376
389
  return wrappedModel;
377
390
  };
378
391
  const clStreamText = async (options) => {
379
392
  const { prompt: promptConfig } = options, rest = __rest(options, ["prompt"]);
380
- // Resolve and interpolate prompt
381
- const resolved = await resolvePrompt(promptConfig.slug);
382
- const system = promptConfig.variables
383
- ? interpolateTemplate(resolved.content, promptConfig.variables)
384
- : resolved.content;
385
- // Store prompt metadata for the session (read by middleware during logging)
386
393
  const session = options.model[SESSION_KEY];
387
- if (session) {
388
- const sessionKey = `${session.userId}:${session.projectId}:${session.sessionId}`;
389
- sessionPromptMetadata.set(sessionKey, {
390
- promptSlug: resolved.slug,
391
- promptVersion: resolved.version,
392
- promptId: resolved.promptId,
394
+ // Resolve and interpolate prompt (graceful fallback on failure)
395
+ let resolved = null;
396
+ try {
397
+ resolved = await resolvePrompt(promptConfig.slug, session === null || session === void 0 ? void 0 : session.userId);
398
+ }
399
+ catch (err) {
400
+ logger.warn(`Failed to resolve prompt "${promptConfig.slug}", streaming without system prompt.`, err);
401
+ }
402
+ let system;
403
+ if (resolved) {
404
+ system = promptConfig.variables
405
+ ? interpolateTemplate(resolved.content, promptConfig.variables)
406
+ : resolved.content;
407
+ // Store prompt metadata for the session (read by middleware during logging)
408
+ if (session === null || session === void 0 ? void 0 : session.sessionId) {
409
+ const sessionKey = `${session.userId}:${session.projectId}:${session.sessionId}`;
410
+ sessionPromptMetadata.set(sessionKey, {
411
+ promptSlug: resolved.slug,
412
+ promptVersion: resolved.version,
413
+ promptId: resolved.promptId,
414
+ });
415
+ }
416
+ logger.info("cl.streamText called", {
417
+ slug: promptConfig.slug,
418
+ version: resolved.version,
419
+ systemLength: system.length,
393
420
  });
394
421
  }
395
- logger.info("cl.streamText called", {
396
- slug: promptConfig.slug,
397
- version: resolved.version,
398
- systemLength: system.length,
399
- });
400
- const model = resolveModel(options.model, resolved.gatewaySlug);
401
- return (0, ai_1.streamText)(Object.assign(Object.assign({}, rest), { model, system }));
422
+ else {
423
+ logger.info("cl.streamText called without resolved prompt", {
424
+ slug: promptConfig.slug,
425
+ });
426
+ }
427
+ const model = resolveModel(options.model, resolved === null || resolved === void 0 ? void 0 : resolved.gatewaySlug);
428
+ return (0, ai_1.streamText)(Object.assign(Object.assign(Object.assign({}, rest), { model }), (system && { system })));
402
429
  };
403
430
  const clGenerateText = async (options) => {
404
431
  const { prompt: promptConfig } = options, rest = __rest(options, ["prompt"]);
405
- // Resolve and interpolate prompt
406
- const resolved = await resolvePrompt(promptConfig.slug);
407
- const system = promptConfig.variables
408
- ? interpolateTemplate(resolved.content, promptConfig.variables)
409
- : resolved.content;
410
- // Store prompt metadata for the session (read by middleware during logging)
411
432
  const session = options.model[SESSION_KEY];
412
- if (session) {
413
- const sessionKey = `${session.userId}:${session.projectId}:${session.sessionId}`;
414
- sessionPromptMetadata.set(sessionKey, {
415
- promptSlug: resolved.slug,
416
- promptVersion: resolved.version,
417
- promptId: resolved.promptId,
433
+ // Resolve and interpolate prompt (graceful fallback on failure)
434
+ let resolved = null;
435
+ try {
436
+ resolved = await resolvePrompt(promptConfig.slug, session === null || session === void 0 ? void 0 : session.userId);
437
+ }
438
+ catch (err) {
439
+ logger.warn(`Failed to resolve prompt "${promptConfig.slug}", generating without system prompt.`, err);
440
+ }
441
+ let system;
442
+ if (resolved) {
443
+ system = promptConfig.variables
444
+ ? interpolateTemplate(resolved.content, promptConfig.variables)
445
+ : resolved.content;
446
+ // Store prompt metadata for the session (read by middleware during logging)
447
+ if (session === null || session === void 0 ? void 0 : session.sessionId) {
448
+ const sessionKey = `${session.userId}:${session.projectId}:${session.sessionId}`;
449
+ sessionPromptMetadata.set(sessionKey, {
450
+ promptSlug: resolved.slug,
451
+ promptVersion: resolved.version,
452
+ promptId: resolved.promptId,
453
+ });
454
+ }
455
+ logger.info("cl.generateText called", {
456
+ slug: promptConfig.slug,
457
+ version: resolved.version,
458
+ systemLength: system.length,
418
459
  });
419
460
  }
420
- logger.info("cl.generateText called", {
421
- slug: promptConfig.slug,
422
- version: resolved.version,
423
- systemLength: system.length,
424
- });
425
- const model = resolveModel(options.model, resolved.gatewaySlug);
426
- return (0, ai_1.generateText)(Object.assign(Object.assign({}, rest), { model, system }));
461
+ else {
462
+ logger.info("cl.generateText called without resolved prompt", {
463
+ slug: promptConfig.slug,
464
+ });
465
+ }
466
+ const model = resolveModel(options.model, resolved === null || resolved === void 0 ? void 0 : resolved.gatewaySlug);
467
+ return (0, ai_1.generateText)(Object.assign(Object.assign(Object.assign({}, rest), { model }), (system && { system })));
427
468
  };
428
469
  // Return the model wrapper function with streamText/generateText attached
429
470
  return Object.assign(clWrapper, {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@kognitivedev/vercel-ai-provider",
3
- "version": "0.1.6",
3
+ "version": "0.1.8",
4
4
  "main": "dist/index.js",
5
5
  "types": "dist/index.d.ts",
6
6
  "publishConfig": {
package/src/index.ts CHANGED
@@ -15,6 +15,12 @@ import {
15
15
  */
16
16
  export type LogLevel = 'none' | 'error' | 'warn' | 'info' | 'debug';
17
17
 
18
+ function isValidId(value: string | undefined | null): value is string {
19
+ if (value == null || typeof value !== "string") return false;
20
+ const trimmed = value.trim();
21
+ return trimmed !== "" && trimmed !== "null" && trimmed !== "undefined";
22
+ }
23
+
18
24
  const LOG_LEVEL_PRIORITY: Record<LogLevel, number> = {
19
25
  none: 0,
20
26
  error: 1,
@@ -72,7 +78,7 @@ export interface CognitiveLayerConfig {
72
78
  /**
73
79
  * Factory for creating a provider that routes through a gateway URL.
74
80
  */
75
- providerFactory?: (baseURL: string) => (modelId: string) => LanguageModel;
81
+ providerFactory?: (baseURL: string, apiKey: string) => (modelId: string) => LanguageModel;
76
82
  }
77
83
 
78
84
  export type CLModelWrapper = (
@@ -110,7 +116,7 @@ export interface LogConversationPayload {
110
116
  export type CognitiveLayer = CLModelWrapper & {
111
117
  streamText: (options: CLStreamTextOptions) => Promise<ReturnType<typeof aiStreamText>>;
112
118
  generateText: (options: CLGenerateTextOptions) => ReturnType<typeof aiGenerateText>;
113
- resolvePrompt: (slug: string) => Promise<CachedPrompt>;
119
+ resolvePrompt: (slug: string, userId?: string) => Promise<CachedPrompt>;
114
120
  logConversation: (payload: LogConversationPayload) => Promise<void>;
115
121
  triggerProcessing: (userId: string, projectId: string, sessionId: string) => void;
116
122
  clearPromptCache: () => void;
@@ -188,14 +194,19 @@ export function createCognitiveLayer(config: {
188
194
  // Prompt cache: slug → CachedPrompt
189
195
  const promptCache = new Map<string, CachedPrompt>();
190
196
 
191
- const resolvePrompt = async (slug: string): Promise<CachedPrompt> => {
192
- const cached = promptCache.get(slug);
197
+ const resolvePrompt = async (slug: string, userId?: string): Promise<CachedPrompt> => {
198
+ const cacheKey = userId ? `${slug}:${userId}` : slug;
199
+ const cached = promptCache.get(cacheKey);
193
200
  if (cached && Date.now() - cached.fetchedAt < PROMPT_CACHE_TTL_MS) {
194
201
  logger.debug("Using cached prompt", { slug, version: cached.version });
195
202
  return cached;
196
203
  }
197
204
 
198
- const res = await fetch(`${baseUrl}/api/cognitive/prompt?slug=${encodeURIComponent(slug)}`, {
205
+ const url = new URL(`${baseUrl}/api/cognitive/prompt`);
206
+ url.searchParams.set("slug", slug);
207
+ if (userId) url.searchParams.set("userId", userId);
208
+
209
+ const res = await fetch(url.toString(), {
199
210
  headers: { "Authorization": `Bearer ${clConfig.apiKey}` },
200
211
  });
201
212
  if (!res.ok) {
@@ -212,7 +223,7 @@ export function createCognitiveLayer(config: {
212
223
  fetchedAt: Date.now(),
213
224
  gatewaySlug: data.gatewaySlug,
214
225
  };
215
- promptCache.set(slug, entry);
226
+ promptCache.set(cacheKey, entry);
216
227
  logger.info("Prompt resolved", { slug, version: entry.version });
217
228
  return entry;
218
229
  };
@@ -275,7 +286,7 @@ export function createCognitiveLayer(config: {
275
286
  const buildMiddleware = (userId: string | undefined, projectId: string, sessionId: string | undefined, modelId: string) => ({
276
287
  specificationVersion: 'v3' as const,
277
288
  async transformParams({ params }: { params: any }) {
278
- if (!userId) return params;
289
+ if (!isValidId(userId)) return params;
279
290
 
280
291
  const incomingMessages = Array.isArray((params as any).prompt)
281
292
  ? (params as any).prompt
@@ -373,7 +384,7 @@ ${userContextBlock || "None"}
373
384
  throw err;
374
385
  }
375
386
 
376
- if (userId && sessionId) {
387
+ if (isValidId(userId) && isValidId(sessionId)) {
377
388
  const sessionKey = `${userId}:${projectId}:${sessionId}`;
378
389
  const promptMeta = sessionPromptMetadata.get(sessionKey);
379
390
 
@@ -416,7 +427,7 @@ ${userContextBlock || "None"}
416
427
  throw err;
417
428
  }
418
429
 
419
- if (userId && sessionId) {
430
+ if (isValidId(userId) && isValidId(sessionId)) {
420
431
  const sessionKey = `${userId}:${projectId}:${sessionId}`;
421
432
  const promptMeta = sessionPromptMetadata.get(sessionKey);
422
433
 
@@ -479,7 +490,7 @@ ${userContextBlock || "None"}
479
490
  try {
480
491
  const gatewayURL = `${baseUrl}/api/cognitive/gateway/${gatewaySlug}`;
481
492
  const modelId = (originalModel as any).modelId || 'default';
482
- const rawModel = clConfig.providerFactory(gatewayURL)(modelId);
493
+ const rawModel = clConfig.providerFactory(gatewayURL, clConfig.apiKey)(modelId);
483
494
 
484
495
  const session = (originalModel as any)[SESSION_KEY];
485
496
  if (!session) return rawModel as LanguageModel;
@@ -510,7 +521,7 @@ ${userContextBlock || "None"}
510
521
  const userId = settings?.userId;
511
522
  const projectId = settings?.projectId || clConfig.projectId || "default";
512
523
  const sessionId = settings?.sessionId;
513
- const sessionMissing = !!userId && !sessionId;
524
+ const sessionMissing = isValidId(userId) && !isValidId(sessionId);
514
525
 
515
526
  if (sessionMissing) {
516
527
  logger.warn("sessionId is required to log and process memories; skipping logging until provided.");
@@ -522,8 +533,10 @@ ${userContextBlock || "None"}
522
533
  }) as LanguageModel;
523
534
 
524
535
  // Track session settings on the model for use in cl.streamText/cl.generateText
525
- if (userId && sessionId) {
526
- (wrappedModel as any)[SESSION_KEY] = { userId, projectId, sessionId };
536
+ // Always store if userId is valid — sessionId may be missing but userId is still
537
+ // needed for prompt resolution (e.g. A/B test assignment)
538
+ if (isValidId(userId)) {
539
+ (wrappedModel as any)[SESSION_KEY] = { userId, projectId, sessionId: isValidId(sessionId) ? sessionId : undefined };
527
540
  }
528
541
 
529
542
  return wrappedModel;
@@ -532,61 +545,89 @@ ${userContextBlock || "None"}
532
545
  const clStreamText = async (options: CLStreamTextOptions) => {
533
546
  const { prompt: promptConfig, ...rest } = options;
534
547
 
535
- // Resolve and interpolate prompt
536
- const resolved = await resolvePrompt(promptConfig.slug);
537
- const system = promptConfig.variables
538
- ? interpolateTemplate(resolved.content, promptConfig.variables)
539
- : resolved.content;
540
-
541
- // Store prompt metadata for the session (read by middleware during logging)
542
- const session = (options.model as any)[SESSION_KEY] as { userId: string; projectId: string; sessionId: string } | undefined;
543
- if (session) {
544
- const sessionKey = `${session.userId}:${session.projectId}:${session.sessionId}`;
545
- sessionPromptMetadata.set(sessionKey, {
546
- promptSlug: resolved.slug,
547
- promptVersion: resolved.version,
548
- promptId: resolved.promptId,
549
- });
548
+ const session = (options.model as any)[SESSION_KEY] as { userId: string; projectId: string; sessionId?: string } | undefined;
549
+
550
+ // Resolve and interpolate prompt (graceful fallback on failure)
551
+ let resolved: CachedPrompt | null = null;
552
+ try {
553
+ resolved = await resolvePrompt(promptConfig.slug, session?.userId);
554
+ } catch (err) {
555
+ logger.warn(`Failed to resolve prompt "${promptConfig.slug}", streaming without system prompt.`, err);
550
556
  }
551
557
 
552
- logger.info("cl.streamText called", {
553
- slug: promptConfig.slug,
554
- version: resolved.version,
555
- systemLength: system.length,
556
- });
558
+ let system: string | undefined;
559
+ if (resolved) {
560
+ system = promptConfig.variables
561
+ ? interpolateTemplate(resolved.content, promptConfig.variables)
562
+ : resolved.content;
563
+
564
+ // Store prompt metadata for the session (read by middleware during logging)
565
+ if (session?.sessionId) {
566
+ const sessionKey = `${session.userId}:${session.projectId}:${session.sessionId}`;
567
+ sessionPromptMetadata.set(sessionKey, {
568
+ promptSlug: resolved.slug,
569
+ promptVersion: resolved.version,
570
+ promptId: resolved.promptId,
571
+ });
572
+ }
557
573
 
558
- const model = resolveModel(options.model, resolved.gatewaySlug);
559
- return aiStreamText({ ...rest, model, system } as any);
574
+ logger.info("cl.streamText called", {
575
+ slug: promptConfig.slug,
576
+ version: resolved.version,
577
+ systemLength: system.length,
578
+ });
579
+ } else {
580
+ logger.info("cl.streamText called without resolved prompt", {
581
+ slug: promptConfig.slug,
582
+ });
583
+ }
584
+
585
+ const model = resolveModel(options.model, resolved?.gatewaySlug);
586
+ return aiStreamText({ ...rest, model, ...(system && { system }) } as any);
560
587
  };
561
588
 
562
589
  const clGenerateText = async (options: CLGenerateTextOptions) => {
563
590
  const { prompt: promptConfig, ...rest } = options;
564
591
 
565
- // Resolve and interpolate prompt
566
- const resolved = await resolvePrompt(promptConfig.slug);
567
- const system = promptConfig.variables
568
- ? interpolateTemplate(resolved.content, promptConfig.variables)
569
- : resolved.content;
570
-
571
- // Store prompt metadata for the session (read by middleware during logging)
572
- const session = (options.model as any)[SESSION_KEY] as { userId: string; projectId: string; sessionId: string } | undefined;
573
- if (session) {
574
- const sessionKey = `${session.userId}:${session.projectId}:${session.sessionId}`;
575
- sessionPromptMetadata.set(sessionKey, {
576
- promptSlug: resolved.slug,
577
- promptVersion: resolved.version,
578
- promptId: resolved.promptId,
579
- });
592
+ const session = (options.model as any)[SESSION_KEY] as { userId: string; projectId: string; sessionId?: string } | undefined;
593
+
594
+ // Resolve and interpolate prompt (graceful fallback on failure)
595
+ let resolved: CachedPrompt | null = null;
596
+ try {
597
+ resolved = await resolvePrompt(promptConfig.slug, session?.userId);
598
+ } catch (err) {
599
+ logger.warn(`Failed to resolve prompt "${promptConfig.slug}", generating without system prompt.`, err);
580
600
  }
581
601
 
582
- logger.info("cl.generateText called", {
583
- slug: promptConfig.slug,
584
- version: resolved.version,
585
- systemLength: system.length,
586
- });
602
+ let system: string | undefined;
603
+ if (resolved) {
604
+ system = promptConfig.variables
605
+ ? interpolateTemplate(resolved.content, promptConfig.variables)
606
+ : resolved.content;
607
+
608
+ // Store prompt metadata for the session (read by middleware during logging)
609
+ if (session?.sessionId) {
610
+ const sessionKey = `${session.userId}:${session.projectId}:${session.sessionId}`;
611
+ sessionPromptMetadata.set(sessionKey, {
612
+ promptSlug: resolved.slug,
613
+ promptVersion: resolved.version,
614
+ promptId: resolved.promptId,
615
+ });
616
+ }
617
+
618
+ logger.info("cl.generateText called", {
619
+ slug: promptConfig.slug,
620
+ version: resolved.version,
621
+ systemLength: system.length,
622
+ });
623
+ } else {
624
+ logger.info("cl.generateText called without resolved prompt", {
625
+ slug: promptConfig.slug,
626
+ });
627
+ }
587
628
 
588
- const model = resolveModel(options.model, resolved.gatewaySlug);
589
- return aiGenerateText({ ...rest, model, system } as any);
629
+ const model = resolveModel(options.model, resolved?.gatewaySlug);
630
+ return aiGenerateText({ ...rest, model, ...(system && { system }) } as any);
590
631
  };
591
632
 
592
633
  // Return the model wrapper function with streamText/generateText attached