@kognitivedev/vercel-ai-provider 0.1.7 → 0.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -59,7 +59,7 @@ export interface LogConversationPayload {
59
59
  export type CognitiveLayer = CLModelWrapper & {
60
60
  streamText: (options: CLStreamTextOptions) => Promise<ReturnType<typeof aiStreamText>>;
61
61
  generateText: (options: CLGenerateTextOptions) => ReturnType<typeof aiGenerateText>;
62
- resolvePrompt: (slug: string) => Promise<CachedPrompt>;
62
+ resolvePrompt: (slug: string, userId?: string) => Promise<CachedPrompt>;
63
63
  logConversation: (payload: LogConversationPayload) => Promise<void>;
64
64
  triggerProcessing: (userId: string, projectId: string, sessionId: string) => void;
65
65
  clearPromptCache: () => void;
package/dist/index.js CHANGED
@@ -100,13 +100,18 @@ function createCognitiveLayer(config) {
100
100
  };
101
101
  // Prompt cache: slug → CachedPrompt
102
102
  const promptCache = new Map();
103
- const resolvePrompt = async (slug) => {
104
- const cached = promptCache.get(slug);
103
+ const resolvePrompt = async (slug, userId) => {
104
+ const cacheKey = userId ? `${slug}:${userId}` : slug;
105
+ const cached = promptCache.get(cacheKey);
105
106
  if (cached && Date.now() - cached.fetchedAt < PROMPT_CACHE_TTL_MS) {
106
107
  logger.debug("Using cached prompt", { slug, version: cached.version });
107
108
  return cached;
108
109
  }
109
- const res = await fetch(`${baseUrl}/api/cognitive/prompt?slug=${encodeURIComponent(slug)}`, {
110
+ const url = new URL(`${baseUrl}/api/cognitive/prompt`);
111
+ url.searchParams.set("slug", slug);
112
+ if (userId)
113
+ url.searchParams.set("userId", userId);
114
+ const res = await fetch(url.toString(), {
110
115
  headers: { "Authorization": `Bearer ${clConfig.apiKey}` },
111
116
  });
112
117
  if (!res.ok) {
@@ -122,7 +127,7 @@ function createCognitiveLayer(config) {
122
127
  fetchedAt: Date.now(),
123
128
  gatewaySlug: data.gatewaySlug,
124
129
  };
125
- promptCache.set(slug, entry);
130
+ promptCache.set(cacheKey, entry);
126
131
  logger.info("Prompt resolved", { slug, version: entry.version });
127
132
  return entry;
128
133
  };
@@ -376,60 +381,90 @@ ${userContextBlock || "None"}
376
381
  middleware: buildMiddleware(userId, projectId, sessionId, modelId),
377
382
  });
378
383
  // Track session settings on the model for use in cl.streamText/cl.generateText
379
- if (isValidId(userId) && isValidId(sessionId)) {
380
- wrappedModel[SESSION_KEY] = { userId, projectId, sessionId };
384
+ // Always store if userId is valid — sessionId may be missing but userId is still
385
+ // needed for prompt resolution (e.g. A/B test assignment)
386
+ if (isValidId(userId)) {
387
+ wrappedModel[SESSION_KEY] = { userId, projectId, sessionId: isValidId(sessionId) ? sessionId : undefined };
381
388
  }
382
389
  return wrappedModel;
383
390
  };
384
391
  const clStreamText = async (options) => {
385
392
  const { prompt: promptConfig } = options, rest = __rest(options, ["prompt"]);
386
- // Resolve and interpolate prompt
387
- const resolved = await resolvePrompt(promptConfig.slug);
388
- const system = promptConfig.variables
389
- ? interpolateTemplate(resolved.content, promptConfig.variables)
390
- : resolved.content;
391
- // Store prompt metadata for the session (read by middleware during logging)
392
393
  const session = options.model[SESSION_KEY];
393
- if (session) {
394
- const sessionKey = `${session.userId}:${session.projectId}:${session.sessionId}`;
395
- sessionPromptMetadata.set(sessionKey, {
396
- promptSlug: resolved.slug,
397
- promptVersion: resolved.version,
398
- promptId: resolved.promptId,
394
+ // Resolve and interpolate prompt (graceful fallback on failure)
395
+ let resolved = null;
396
+ try {
397
+ resolved = await resolvePrompt(promptConfig.slug, session === null || session === void 0 ? void 0 : session.userId);
398
+ }
399
+ catch (err) {
400
+ logger.warn(`Failed to resolve prompt "${promptConfig.slug}", streaming without system prompt.`, err);
401
+ }
402
+ let system;
403
+ if (resolved) {
404
+ system = promptConfig.variables
405
+ ? interpolateTemplate(resolved.content, promptConfig.variables)
406
+ : resolved.content;
407
+ // Store prompt metadata for the session (read by middleware during logging)
408
+ if (session === null || session === void 0 ? void 0 : session.sessionId) {
409
+ const sessionKey = `${session.userId}:${session.projectId}:${session.sessionId}`;
410
+ sessionPromptMetadata.set(sessionKey, {
411
+ promptSlug: resolved.slug,
412
+ promptVersion: resolved.version,
413
+ promptId: resolved.promptId,
414
+ });
415
+ }
416
+ logger.info("cl.streamText called", {
417
+ slug: promptConfig.slug,
418
+ version: resolved.version,
419
+ systemLength: system.length,
399
420
  });
400
421
  }
401
- logger.info("cl.streamText called", {
402
- slug: promptConfig.slug,
403
- version: resolved.version,
404
- systemLength: system.length,
405
- });
406
- const model = resolveModel(options.model, resolved.gatewaySlug);
407
- return (0, ai_1.streamText)(Object.assign(Object.assign({}, rest), { model, system }));
422
+ else {
423
+ logger.info("cl.streamText called without resolved prompt", {
424
+ slug: promptConfig.slug,
425
+ });
426
+ }
427
+ const model = resolveModel(options.model, resolved === null || resolved === void 0 ? void 0 : resolved.gatewaySlug);
428
+ return (0, ai_1.streamText)(Object.assign(Object.assign(Object.assign({}, rest), { model }), (system && { system })));
408
429
  };
409
430
  const clGenerateText = async (options) => {
410
431
  const { prompt: promptConfig } = options, rest = __rest(options, ["prompt"]);
411
- // Resolve and interpolate prompt
412
- const resolved = await resolvePrompt(promptConfig.slug);
413
- const system = promptConfig.variables
414
- ? interpolateTemplate(resolved.content, promptConfig.variables)
415
- : resolved.content;
416
- // Store prompt metadata for the session (read by middleware during logging)
417
432
  const session = options.model[SESSION_KEY];
418
- if (session) {
419
- const sessionKey = `${session.userId}:${session.projectId}:${session.sessionId}`;
420
- sessionPromptMetadata.set(sessionKey, {
421
- promptSlug: resolved.slug,
422
- promptVersion: resolved.version,
423
- promptId: resolved.promptId,
433
+ // Resolve and interpolate prompt (graceful fallback on failure)
434
+ let resolved = null;
435
+ try {
436
+ resolved = await resolvePrompt(promptConfig.slug, session === null || session === void 0 ? void 0 : session.userId);
437
+ }
438
+ catch (err) {
439
+ logger.warn(`Failed to resolve prompt "${promptConfig.slug}", generating without system prompt.`, err);
440
+ }
441
+ let system;
442
+ if (resolved) {
443
+ system = promptConfig.variables
444
+ ? interpolateTemplate(resolved.content, promptConfig.variables)
445
+ : resolved.content;
446
+ // Store prompt metadata for the session (read by middleware during logging)
447
+ if (session === null || session === void 0 ? void 0 : session.sessionId) {
448
+ const sessionKey = `${session.userId}:${session.projectId}:${session.sessionId}`;
449
+ sessionPromptMetadata.set(sessionKey, {
450
+ promptSlug: resolved.slug,
451
+ promptVersion: resolved.version,
452
+ promptId: resolved.promptId,
453
+ });
454
+ }
455
+ logger.info("cl.generateText called", {
456
+ slug: promptConfig.slug,
457
+ version: resolved.version,
458
+ systemLength: system.length,
424
459
  });
425
460
  }
426
- logger.info("cl.generateText called", {
427
- slug: promptConfig.slug,
428
- version: resolved.version,
429
- systemLength: system.length,
430
- });
431
- const model = resolveModel(options.model, resolved.gatewaySlug);
432
- return (0, ai_1.generateText)(Object.assign(Object.assign({}, rest), { model, system }));
461
+ else {
462
+ logger.info("cl.generateText called without resolved prompt", {
463
+ slug: promptConfig.slug,
464
+ });
465
+ }
466
+ const model = resolveModel(options.model, resolved === null || resolved === void 0 ? void 0 : resolved.gatewaySlug);
467
+ return (0, ai_1.generateText)(Object.assign(Object.assign(Object.assign({}, rest), { model }), (system && { system })));
433
468
  };
434
469
  // Return the model wrapper function with streamText/generateText attached
435
470
  return Object.assign(clWrapper, {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@kognitivedev/vercel-ai-provider",
3
- "version": "0.1.7",
3
+ "version": "0.1.8",
4
4
  "main": "dist/index.js",
5
5
  "types": "dist/index.d.ts",
6
6
  "publishConfig": {
package/src/index.ts CHANGED
@@ -116,7 +116,7 @@ export interface LogConversationPayload {
116
116
  export type CognitiveLayer = CLModelWrapper & {
117
117
  streamText: (options: CLStreamTextOptions) => Promise<ReturnType<typeof aiStreamText>>;
118
118
  generateText: (options: CLGenerateTextOptions) => ReturnType<typeof aiGenerateText>;
119
- resolvePrompt: (slug: string) => Promise<CachedPrompt>;
119
+ resolvePrompt: (slug: string, userId?: string) => Promise<CachedPrompt>;
120
120
  logConversation: (payload: LogConversationPayload) => Promise<void>;
121
121
  triggerProcessing: (userId: string, projectId: string, sessionId: string) => void;
122
122
  clearPromptCache: () => void;
@@ -194,14 +194,19 @@ export function createCognitiveLayer(config: {
194
194
  // Prompt cache: slug → CachedPrompt
195
195
  const promptCache = new Map<string, CachedPrompt>();
196
196
 
197
- const resolvePrompt = async (slug: string): Promise<CachedPrompt> => {
198
- const cached = promptCache.get(slug);
197
+ const resolvePrompt = async (slug: string, userId?: string): Promise<CachedPrompt> => {
198
+ const cacheKey = userId ? `${slug}:${userId}` : slug;
199
+ const cached = promptCache.get(cacheKey);
199
200
  if (cached && Date.now() - cached.fetchedAt < PROMPT_CACHE_TTL_MS) {
200
201
  logger.debug("Using cached prompt", { slug, version: cached.version });
201
202
  return cached;
202
203
  }
203
204
 
204
- const res = await fetch(`${baseUrl}/api/cognitive/prompt?slug=${encodeURIComponent(slug)}`, {
205
+ const url = new URL(`${baseUrl}/api/cognitive/prompt`);
206
+ url.searchParams.set("slug", slug);
207
+ if (userId) url.searchParams.set("userId", userId);
208
+
209
+ const res = await fetch(url.toString(), {
205
210
  headers: { "Authorization": `Bearer ${clConfig.apiKey}` },
206
211
  });
207
212
  if (!res.ok) {
@@ -218,7 +223,7 @@ export function createCognitiveLayer(config: {
218
223
  fetchedAt: Date.now(),
219
224
  gatewaySlug: data.gatewaySlug,
220
225
  };
221
- promptCache.set(slug, entry);
226
+ promptCache.set(cacheKey, entry);
222
227
  logger.info("Prompt resolved", { slug, version: entry.version });
223
228
  return entry;
224
229
  };
@@ -528,8 +533,10 @@ ${userContextBlock || "None"}
528
533
  }) as LanguageModel;
529
534
 
530
535
  // Track session settings on the model for use in cl.streamText/cl.generateText
531
- if (isValidId(userId) && isValidId(sessionId)) {
532
- (wrappedModel as any)[SESSION_KEY] = { userId, projectId, sessionId };
536
+ // Always store if userId is valid — sessionId may be missing but userId is still
537
+ // needed for prompt resolution (e.g. A/B test assignment)
538
+ if (isValidId(userId)) {
539
+ (wrappedModel as any)[SESSION_KEY] = { userId, projectId, sessionId: isValidId(sessionId) ? sessionId : undefined };
533
540
  }
534
541
 
535
542
  return wrappedModel;
@@ -538,61 +545,89 @@ ${userContextBlock || "None"}
538
545
  const clStreamText = async (options: CLStreamTextOptions) => {
539
546
  const { prompt: promptConfig, ...rest } = options;
540
547
 
541
- // Resolve and interpolate prompt
542
- const resolved = await resolvePrompt(promptConfig.slug);
543
- const system = promptConfig.variables
544
- ? interpolateTemplate(resolved.content, promptConfig.variables)
545
- : resolved.content;
546
-
547
- // Store prompt metadata for the session (read by middleware during logging)
548
- const session = (options.model as any)[SESSION_KEY] as { userId: string; projectId: string; sessionId: string } | undefined;
549
- if (session) {
550
- const sessionKey = `${session.userId}:${session.projectId}:${session.sessionId}`;
551
- sessionPromptMetadata.set(sessionKey, {
552
- promptSlug: resolved.slug,
553
- promptVersion: resolved.version,
554
- promptId: resolved.promptId,
555
- });
548
+ const session = (options.model as any)[SESSION_KEY] as { userId: string; projectId: string; sessionId?: string } | undefined;
549
+
550
+ // Resolve and interpolate prompt (graceful fallback on failure)
551
+ let resolved: CachedPrompt | null = null;
552
+ try {
553
+ resolved = await resolvePrompt(promptConfig.slug, session?.userId);
554
+ } catch (err) {
555
+ logger.warn(`Failed to resolve prompt "${promptConfig.slug}", streaming without system prompt.`, err);
556
556
  }
557
557
 
558
- logger.info("cl.streamText called", {
559
- slug: promptConfig.slug,
560
- version: resolved.version,
561
- systemLength: system.length,
562
- });
558
+ let system: string | undefined;
559
+ if (resolved) {
560
+ system = promptConfig.variables
561
+ ? interpolateTemplate(resolved.content, promptConfig.variables)
562
+ : resolved.content;
563
+
564
+ // Store prompt metadata for the session (read by middleware during logging)
565
+ if (session?.sessionId) {
566
+ const sessionKey = `${session.userId}:${session.projectId}:${session.sessionId}`;
567
+ sessionPromptMetadata.set(sessionKey, {
568
+ promptSlug: resolved.slug,
569
+ promptVersion: resolved.version,
570
+ promptId: resolved.promptId,
571
+ });
572
+ }
573
+
574
+ logger.info("cl.streamText called", {
575
+ slug: promptConfig.slug,
576
+ version: resolved.version,
577
+ systemLength: system.length,
578
+ });
579
+ } else {
580
+ logger.info("cl.streamText called without resolved prompt", {
581
+ slug: promptConfig.slug,
582
+ });
583
+ }
563
584
 
564
- const model = resolveModel(options.model, resolved.gatewaySlug);
565
- return aiStreamText({ ...rest, model, system } as any);
585
+ const model = resolveModel(options.model, resolved?.gatewaySlug);
586
+ return aiStreamText({ ...rest, model, ...(system && { system }) } as any);
566
587
  };
567
588
 
568
589
  const clGenerateText = async (options: CLGenerateTextOptions) => {
569
590
  const { prompt: promptConfig, ...rest } = options;
570
591
 
571
- // Resolve and interpolate prompt
572
- const resolved = await resolvePrompt(promptConfig.slug);
573
- const system = promptConfig.variables
574
- ? interpolateTemplate(resolved.content, promptConfig.variables)
575
- : resolved.content;
576
-
577
- // Store prompt metadata for the session (read by middleware during logging)
578
- const session = (options.model as any)[SESSION_KEY] as { userId: string; projectId: string; sessionId: string } | undefined;
579
- if (session) {
580
- const sessionKey = `${session.userId}:${session.projectId}:${session.sessionId}`;
581
- sessionPromptMetadata.set(sessionKey, {
582
- promptSlug: resolved.slug,
583
- promptVersion: resolved.version,
584
- promptId: resolved.promptId,
585
- });
592
+ const session = (options.model as any)[SESSION_KEY] as { userId: string; projectId: string; sessionId?: string } | undefined;
593
+
594
+ // Resolve and interpolate prompt (graceful fallback on failure)
595
+ let resolved: CachedPrompt | null = null;
596
+ try {
597
+ resolved = await resolvePrompt(promptConfig.slug, session?.userId);
598
+ } catch (err) {
599
+ logger.warn(`Failed to resolve prompt "${promptConfig.slug}", generating without system prompt.`, err);
586
600
  }
587
601
 
588
- logger.info("cl.generateText called", {
589
- slug: promptConfig.slug,
590
- version: resolved.version,
591
- systemLength: system.length,
592
- });
602
+ let system: string | undefined;
603
+ if (resolved) {
604
+ system = promptConfig.variables
605
+ ? interpolateTemplate(resolved.content, promptConfig.variables)
606
+ : resolved.content;
607
+
608
+ // Store prompt metadata for the session (read by middleware during logging)
609
+ if (session?.sessionId) {
610
+ const sessionKey = `${session.userId}:${session.projectId}:${session.sessionId}`;
611
+ sessionPromptMetadata.set(sessionKey, {
612
+ promptSlug: resolved.slug,
613
+ promptVersion: resolved.version,
614
+ promptId: resolved.promptId,
615
+ });
616
+ }
617
+
618
+ logger.info("cl.generateText called", {
619
+ slug: promptConfig.slug,
620
+ version: resolved.version,
621
+ systemLength: system.length,
622
+ });
623
+ } else {
624
+ logger.info("cl.generateText called without resolved prompt", {
625
+ slug: promptConfig.slug,
626
+ });
627
+ }
593
628
 
594
- const model = resolveModel(options.model, resolved.gatewaySlug);
595
- return aiGenerateText({ ...rest, model, system } as any);
629
+ const model = resolveModel(options.model, resolved?.gatewaySlug);
630
+ return aiGenerateText({ ...rest, model, ...(system && { system }) } as any);
596
631
  };
597
632
 
598
633
  // Return the model wrapper function with streamText/generateText attached