@kognitivedev/vercel-ai-provider 0.2.13 → 0.2.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,39 @@
|
|
|
1
1
|
# @kognitivedev/vercel-ai-provider
|
|
2
2
|
|
|
3
|
+
## 0.2.16
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- release
|
|
8
|
+
|
|
9
|
+
- Updated dependencies []:
|
|
10
|
+
- @kognitivedev/prompthub@0.1.14
|
|
11
|
+
- @kognitivedev/shared@0.2.16
|
|
12
|
+
|
|
13
|
+
## 0.2.15
|
|
14
|
+
|
|
15
|
+
### Patch Changes
|
|
16
|
+
|
|
17
|
+
- release
|
|
18
|
+
|
|
19
|
+
- Updated dependencies []:
|
|
20
|
+
- @kognitivedev/prompthub@0.1.13
|
|
21
|
+
- @kognitivedev/shared@0.2.15
|
|
22
|
+
|
|
23
|
+
## 0.2.14
|
|
24
|
+
|
|
25
|
+
### Patch Changes
|
|
26
|
+
|
|
27
|
+
- release
|
|
28
|
+
|
|
29
|
+
- release
|
|
30
|
+
|
|
31
|
+
- release
|
|
32
|
+
|
|
33
|
+
- Updated dependencies []:
|
|
34
|
+
- @kognitivedev/prompthub@0.1.12
|
|
35
|
+
- @kognitivedev/shared@0.2.14
|
|
36
|
+
|
|
3
37
|
## 0.2.13
|
|
4
38
|
|
|
5
39
|
### Patch Changes
|
|
@@ -1,42 +1,91 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
const vitest_1 = require("vitest");
|
|
4
|
-
const
|
|
5
|
-
wrapLanguageModel: vitest_1.vi.fn(({ model }) => model),
|
|
6
|
-
streamText: vitest_1.vi.fn(async (options) => ({ options })),
|
|
7
|
-
generateText: vitest_1.vi.fn(async (options) => ({ options })),
|
|
8
|
-
}));
|
|
4
|
+
const test_1 = require("ai/test");
|
|
9
5
|
const promptHubMocks = vitest_1.vi.hoisted(() => ({
|
|
10
6
|
resolvePrompt: vitest_1.vi.fn(),
|
|
11
7
|
}));
|
|
12
|
-
vitest_1.vi.mock("ai", () => ({
|
|
13
|
-
wrapLanguageModel: aiMocks.wrapLanguageModel,
|
|
14
|
-
streamText: aiMocks.streamText,
|
|
15
|
-
generateText: aiMocks.generateText,
|
|
16
|
-
}));
|
|
17
8
|
vitest_1.vi.mock("@kognitivedev/prompthub", () => ({
|
|
18
9
|
createPromptHubClient: vitest_1.vi.fn(() => ({
|
|
19
10
|
resolvePrompt: promptHubMocks.resolvePrompt,
|
|
20
11
|
})),
|
|
21
12
|
}));
|
|
13
|
+
vitest_1.vi.mock("ai", async (importOriginal) => {
|
|
14
|
+
const mod = await importOriginal();
|
|
15
|
+
return Object.assign(Object.assign({}, mod), { streamText: vitest_1.vi.fn((...args) => mod.streamText(...args)), generateText: vitest_1.vi.fn((...args) => mod.generateText(...args)) });
|
|
16
|
+
});
|
|
17
|
+
const ai_1 = require("ai");
|
|
22
18
|
const index_1 = require("../index");
|
|
19
|
+
function minimalFinishUsage() {
|
|
20
|
+
return {
|
|
21
|
+
inputTokens: { total: 1, noCache: undefined, cacheRead: undefined, cacheWrite: undefined },
|
|
22
|
+
outputTokens: { total: 1, text: undefined, reasoning: undefined },
|
|
23
|
+
};
|
|
24
|
+
}
|
|
25
|
+
function createDualMockModel(modelId = "mock-model") {
|
|
26
|
+
return new test_1.MockLanguageModelV3({
|
|
27
|
+
modelId,
|
|
28
|
+
doGenerate: async () => ({
|
|
29
|
+
content: [{ type: "text", text: "ok" }],
|
|
30
|
+
finishReason: { unified: "stop", raw: undefined },
|
|
31
|
+
usage: minimalFinishUsage(),
|
|
32
|
+
warnings: [],
|
|
33
|
+
}),
|
|
34
|
+
doStream: async () => ({
|
|
35
|
+
stream: (0, test_1.convertArrayToReadableStream)([
|
|
36
|
+
{ type: "text-start", id: "t1" },
|
|
37
|
+
{ type: "text-delta", id: "t1", delta: "x" },
|
|
38
|
+
{ type: "text-end", id: "t1" },
|
|
39
|
+
{
|
|
40
|
+
type: "finish",
|
|
41
|
+
finishReason: { unified: "stop", raw: undefined },
|
|
42
|
+
usage: minimalFinishUsage(),
|
|
43
|
+
},
|
|
44
|
+
]),
|
|
45
|
+
}),
|
|
46
|
+
});
|
|
47
|
+
}
|
|
48
|
+
/** Default provider for tests — real LanguageModel so `wrapLanguageModel` middleware runs. */
|
|
49
|
+
function defaultProvider(modelId) {
|
|
50
|
+
const m = createDualMockModel(modelId);
|
|
51
|
+
return m;
|
|
52
|
+
}
|
|
53
|
+
function setupSnapshotAndLogFetch() {
|
|
54
|
+
const fetchSpy = vitest_1.vi.fn(async (url, init) => {
|
|
55
|
+
const urlStr = typeof url === "string" ? url : url.toString();
|
|
56
|
+
if (urlStr.includes("/api/cognitive/snapshot")) {
|
|
57
|
+
return new Response(JSON.stringify({ systemBlock: "", userContextBlock: "" }), { status: 200, headers: { "Content-Type": "application/json" } });
|
|
58
|
+
}
|
|
59
|
+
if (urlStr.includes("/api/cognitive/log")) {
|
|
60
|
+
return new Response(JSON.stringify({ ok: true }), { status: 201 });
|
|
61
|
+
}
|
|
62
|
+
if (urlStr.includes("/api/cognitive/process")) {
|
|
63
|
+
return new Response(JSON.stringify({ ok: true }), { status: 200 });
|
|
64
|
+
}
|
|
65
|
+
return new Response("not found", { status: 404 });
|
|
66
|
+
});
|
|
67
|
+
vitest_1.vi.stubGlobal("fetch", fetchSpy);
|
|
68
|
+
return fetchSpy;
|
|
69
|
+
}
|
|
23
70
|
function makeLayer(overrides) {
|
|
71
|
+
var _a;
|
|
24
72
|
return (0, index_1.createCognitiveLayer)({
|
|
25
|
-
provider: (
|
|
73
|
+
provider: (_a = overrides === null || overrides === void 0 ? void 0 : overrides.provider) !== null && _a !== void 0 ? _a : defaultProvider,
|
|
26
74
|
clConfig: Object.assign({ apiKey: "test-api-key", baseUrl: "https://backend.example", projectId: "project-1", processDelayMs: 0, logLevel: "debug" }, overrides),
|
|
27
75
|
});
|
|
28
76
|
}
|
|
29
77
|
(0, vitest_1.describe)("createCognitiveLayer extras", () => {
|
|
30
78
|
(0, vitest_1.beforeEach)(() => {
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
aiMocks.generateText.mockClear();
|
|
79
|
+
vitest_1.vi.mocked(ai_1.streamText).mockClear();
|
|
80
|
+
vitest_1.vi.mocked(ai_1.generateText).mockClear();
|
|
34
81
|
promptHubMocks.resolvePrompt.mockReset();
|
|
35
82
|
});
|
|
36
83
|
(0, vitest_1.afterEach)(() => {
|
|
84
|
+
vitest_1.vi.unstubAllGlobals();
|
|
37
85
|
vitest_1.vi.restoreAllMocks();
|
|
38
86
|
});
|
|
39
87
|
(0, vitest_1.it)("resolves prompts and renders variables", async () => {
|
|
88
|
+
setupSnapshotAndLogFetch();
|
|
40
89
|
promptHubMocks.resolvePrompt.mockResolvedValue({
|
|
41
90
|
promptId: "prompt-1",
|
|
42
91
|
slug: "welcome",
|
|
@@ -63,7 +112,7 @@ function makeLayer(overrides) {
|
|
|
63
112
|
userId: "user-1",
|
|
64
113
|
tag: undefined,
|
|
65
114
|
});
|
|
66
|
-
(0, vitest_1.expect)(
|
|
115
|
+
(0, vitest_1.expect)(ai_1.streamText).toHaveBeenCalledWith(vitest_1.expect.objectContaining({
|
|
67
116
|
system: "Hello Ada",
|
|
68
117
|
model: vitest_1.expect.objectContaining({ modelId: "mock-model" }),
|
|
69
118
|
}));
|
|
@@ -73,6 +122,7 @@ function makeLayer(overrides) {
|
|
|
73
122
|
cl.clearSessionCache();
|
|
74
123
|
});
|
|
75
124
|
(0, vitest_1.it)("uses a gateway model when providerFactory is available", async () => {
|
|
125
|
+
setupSnapshotAndLogFetch();
|
|
76
126
|
promptHubMocks.resolvePrompt.mockResolvedValue({
|
|
77
127
|
promptId: "prompt-2",
|
|
78
128
|
slug: "gateway",
|
|
@@ -81,7 +131,7 @@ function makeLayer(overrides) {
|
|
|
81
131
|
fetchedAt: Date.now(),
|
|
82
132
|
gatewaySlug: "gateway-a",
|
|
83
133
|
});
|
|
84
|
-
const providerFactory = vitest_1.vi.fn(() => vitest_1.vi.fn((
|
|
134
|
+
const providerFactory = vitest_1.vi.fn(() => vitest_1.vi.fn((mid) => createDualMockModel(mid)));
|
|
85
135
|
const cl = makeLayer({ providerFactory });
|
|
86
136
|
const model = cl("mock-model", {
|
|
87
137
|
userId: "user-1",
|
|
@@ -94,13 +144,15 @@ function makeLayer(overrides) {
|
|
|
94
144
|
prompt: { slug: "gateway" },
|
|
95
145
|
});
|
|
96
146
|
(0, vitest_1.expect)(providerFactory).toHaveBeenCalledWith("https://backend.example/api/cognitive/gateway/gateway-a", "test-api-key");
|
|
97
|
-
(
|
|
98
|
-
|
|
147
|
+
// Gateway path reuses the base model's modelId when calling the factory (routing is via gateway base URL).
|
|
148
|
+
(0, vitest_1.expect)(ai_1.generateText).toHaveBeenCalledWith(vitest_1.expect.objectContaining({
|
|
149
|
+
model: vitest_1.expect.objectContaining({ modelId: "mock-model" }),
|
|
99
150
|
system: "Prompt body",
|
|
100
151
|
}));
|
|
101
152
|
});
|
|
102
153
|
(0, vitest_1.it)("passes prompt tag and stores tag/ab metadata in logging payload", async () => {
|
|
103
154
|
var _a, _b, _c, _d;
|
|
155
|
+
const fetchSpy = setupSnapshotAndLogFetch();
|
|
104
156
|
const backendResponse = {
|
|
105
157
|
promptId: "prompt-4",
|
|
106
158
|
slug: "tagged-welcome",
|
|
@@ -113,9 +165,6 @@ function makeLayer(overrides) {
|
|
|
113
165
|
variant: "variant",
|
|
114
166
|
};
|
|
115
167
|
promptHubMocks.resolvePrompt.mockResolvedValue(backendResponse);
|
|
116
|
-
const fetchSpy = vitest_1.vi
|
|
117
|
-
.spyOn(globalThis, "fetch")
|
|
118
|
-
.mockResolvedValue(new Response(JSON.stringify({ ok: true }), { status: 201 }));
|
|
119
168
|
const cl = makeLayer();
|
|
120
169
|
const model = cl("mock-model", {
|
|
121
170
|
userId: "user-2",
|
|
@@ -154,9 +203,9 @@ function makeLayer(overrides) {
|
|
|
154
203
|
(0, vitest_1.expect)((_b = calledWithBody.metadata) === null || _b === void 0 ? void 0 : _b.promptTag).toBe("production");
|
|
155
204
|
(0, vitest_1.expect)((_c = calledWithBody.metadata) === null || _c === void 0 ? void 0 : _c.abTestId).toBe("ab-test-1");
|
|
156
205
|
(0, vitest_1.expect)((_d = calledWithBody.metadata) === null || _d === void 0 ? void 0 : _d.variant).toBe("variant");
|
|
157
|
-
fetchSpy.mockRestore();
|
|
158
206
|
});
|
|
159
207
|
(0, vitest_1.it)("falls back to the original model when gateway model creation fails", async () => {
|
|
208
|
+
setupSnapshotAndLogFetch();
|
|
160
209
|
promptHubMocks.resolvePrompt.mockResolvedValue({
|
|
161
210
|
promptId: "prompt-3",
|
|
162
211
|
slug: "gateway",
|
|
@@ -182,13 +231,14 @@ function makeLayer(overrides) {
|
|
|
182
231
|
prompt: { slug: "gateway" },
|
|
183
232
|
});
|
|
184
233
|
(0, vitest_1.expect)(errorSpy).toHaveBeenCalledWith(vitest_1.expect.stringContaining("Failed to create gateway model, falling back to original"), vitest_1.expect.any(Error));
|
|
185
|
-
(0, vitest_1.expect)(
|
|
234
|
+
(0, vitest_1.expect)(ai_1.streamText).toHaveBeenCalledWith(vitest_1.expect.objectContaining({
|
|
186
235
|
model: vitest_1.expect.objectContaining({ modelId: "mock-model" }),
|
|
187
236
|
}));
|
|
188
237
|
});
|
|
189
238
|
(0, vitest_1.it)("falls back cleanly when prompt resolution fails", async () => {
|
|
190
|
-
|
|
239
|
+
setupSnapshotAndLogFetch();
|
|
191
240
|
const warnSpy = vitest_1.vi.spyOn(console, "warn").mockImplementation(() => { });
|
|
241
|
+
promptHubMocks.resolvePrompt.mockRejectedValue(new Error("not found"));
|
|
192
242
|
const cl = makeLayer();
|
|
193
243
|
const model = cl("mock-model", {
|
|
194
244
|
userId: "user-1",
|
|
@@ -201,9 +251,9 @@ function makeLayer(overrides) {
|
|
|
201
251
|
prompt: { slug: "missing" },
|
|
202
252
|
});
|
|
203
253
|
(0, vitest_1.expect)(warnSpy).toHaveBeenCalledWith(vitest_1.expect.stringContaining('Failed to resolve prompt "missing", generating without system prompt.'), vitest_1.expect.any(Error));
|
|
204
|
-
(0, vitest_1.expect)(
|
|
254
|
+
(0, vitest_1.expect)(ai_1.generateText).toHaveBeenCalledWith(vitest_1.expect.objectContaining({
|
|
205
255
|
model: vitest_1.expect.objectContaining({ modelId: "mock-model" }),
|
|
206
256
|
}));
|
|
207
|
-
(0, vitest_1.expect)(
|
|
257
|
+
(0, vitest_1.expect)(vitest_1.vi.mocked(ai_1.generateText).mock.calls[0][0]).not.toHaveProperty("system");
|
|
208
258
|
});
|
|
209
259
|
});
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@kognitivedev/vercel-ai-provider",
|
|
3
|
-
"version": "0.2.
|
|
3
|
+
"version": "0.2.16",
|
|
4
4
|
"main": "dist/index.js",
|
|
5
5
|
"types": "dist/index.d.ts",
|
|
6
6
|
"publishConfig": {
|
|
@@ -13,8 +13,8 @@
|
|
|
13
13
|
"prepublishOnly": "npm run build"
|
|
14
14
|
},
|
|
15
15
|
"dependencies": {
|
|
16
|
-
"@kognitivedev/prompthub": "^0.1.
|
|
17
|
-
"@kognitivedev/shared": "^0.2.
|
|
16
|
+
"@kognitivedev/prompthub": "^0.1.14",
|
|
17
|
+
"@kognitivedev/shared": "^0.2.16"
|
|
18
18
|
},
|
|
19
19
|
"peerDependencies": {
|
|
20
20
|
"ai": "^5.0.0 || ^6.0.0"
|
|
@@ -1,35 +1,93 @@
|
|
|
1
1
|
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
|
2
2
|
import type { LanguageModel } from "ai";
|
|
3
|
-
|
|
4
|
-
const aiMocks = vi.hoisted(() => ({
|
|
5
|
-
wrapLanguageModel: vi.fn(({ model }: { model: unknown }) => model),
|
|
6
|
-
streamText: vi.fn(async (options: Record<string, unknown>) => ({ options })),
|
|
7
|
-
generateText: vi.fn(async (options: Record<string, unknown>) => ({ options })),
|
|
8
|
-
}));
|
|
3
|
+
import { MockLanguageModelV3, convertArrayToReadableStream } from "ai/test";
|
|
9
4
|
|
|
10
5
|
const promptHubMocks = vi.hoisted(() => ({
|
|
11
6
|
resolvePrompt: vi.fn(),
|
|
12
7
|
}));
|
|
13
8
|
|
|
14
|
-
vi.mock("ai", () => ({
|
|
15
|
-
wrapLanguageModel: aiMocks.wrapLanguageModel,
|
|
16
|
-
streamText: aiMocks.streamText,
|
|
17
|
-
generateText: aiMocks.generateText,
|
|
18
|
-
}));
|
|
19
|
-
|
|
20
9
|
vi.mock("@kognitivedev/prompthub", () => ({
|
|
21
10
|
createPromptHubClient: vi.fn(() => ({
|
|
22
11
|
resolvePrompt: promptHubMocks.resolvePrompt,
|
|
23
12
|
})),
|
|
24
13
|
}));
|
|
25
14
|
|
|
15
|
+
vi.mock("ai", async (importOriginal) => {
|
|
16
|
+
const mod = await importOriginal<typeof import("ai")>();
|
|
17
|
+
return {
|
|
18
|
+
...mod,
|
|
19
|
+
streamText: vi.fn((...args: Parameters<typeof mod.streamText>) => mod.streamText(...args)),
|
|
20
|
+
generateText: vi.fn((...args: Parameters<typeof mod.generateText>) => mod.generateText(...args)),
|
|
21
|
+
};
|
|
22
|
+
});
|
|
23
|
+
|
|
24
|
+
import { streamText as aiStreamText, generateText as aiGenerateText } from "ai";
|
|
26
25
|
import { createCognitiveLayer } from "../index";
|
|
27
26
|
|
|
27
|
+
function minimalFinishUsage() {
|
|
28
|
+
return {
|
|
29
|
+
inputTokens: { total: 1, noCache: undefined, cacheRead: undefined, cacheWrite: undefined },
|
|
30
|
+
outputTokens: { total: 1, text: undefined, reasoning: undefined },
|
|
31
|
+
} as const;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
function createDualMockModel(modelId = "mock-model") {
|
|
35
|
+
return new MockLanguageModelV3({
|
|
36
|
+
modelId,
|
|
37
|
+
doGenerate: async () => ({
|
|
38
|
+
content: [{ type: "text" as const, text: "ok" }],
|
|
39
|
+
finishReason: { unified: "stop" as const, raw: undefined },
|
|
40
|
+
usage: minimalFinishUsage(),
|
|
41
|
+
warnings: [],
|
|
42
|
+
}),
|
|
43
|
+
doStream: async () => ({
|
|
44
|
+
stream: convertArrayToReadableStream([
|
|
45
|
+
{ type: "text-start" as const, id: "t1" },
|
|
46
|
+
{ type: "text-delta" as const, id: "t1", delta: "x" },
|
|
47
|
+
{ type: "text-end" as const, id: "t1" },
|
|
48
|
+
{
|
|
49
|
+
type: "finish" as const,
|
|
50
|
+
finishReason: { unified: "stop" as const, raw: undefined },
|
|
51
|
+
usage: minimalFinishUsage(),
|
|
52
|
+
},
|
|
53
|
+
]),
|
|
54
|
+
}),
|
|
55
|
+
});
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
/** Default provider for tests — real LanguageModel so `wrapLanguageModel` middleware runs. */
|
|
59
|
+
function defaultProvider(modelId: string): LanguageModel {
|
|
60
|
+
const m = createDualMockModel(modelId);
|
|
61
|
+
return m as unknown as LanguageModel;
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
function setupSnapshotAndLogFetch() {
|
|
65
|
+
const fetchSpy = vi.fn(async (url: string | URL | Request, init?: RequestInit) => {
|
|
66
|
+
const urlStr = typeof url === "string" ? url : url.toString();
|
|
67
|
+
if (urlStr.includes("/api/cognitive/snapshot")) {
|
|
68
|
+
return new Response(
|
|
69
|
+
JSON.stringify({ systemBlock: "", userContextBlock: "" }),
|
|
70
|
+
{ status: 200, headers: { "Content-Type": "application/json" } },
|
|
71
|
+
);
|
|
72
|
+
}
|
|
73
|
+
if (urlStr.includes("/api/cognitive/log")) {
|
|
74
|
+
return new Response(JSON.stringify({ ok: true }), { status: 201 });
|
|
75
|
+
}
|
|
76
|
+
if (urlStr.includes("/api/cognitive/process")) {
|
|
77
|
+
return new Response(JSON.stringify({ ok: true }), { status: 200 });
|
|
78
|
+
}
|
|
79
|
+
return new Response("not found", { status: 404 });
|
|
80
|
+
});
|
|
81
|
+
vi.stubGlobal("fetch", fetchSpy);
|
|
82
|
+
return fetchSpy;
|
|
83
|
+
}
|
|
84
|
+
|
|
28
85
|
function makeLayer(overrides?: {
|
|
29
86
|
providerFactory?: (baseURL: string, apiKey: string) => (modelId: string) => LanguageModel;
|
|
87
|
+
provider?: (modelId: string) => LanguageModel;
|
|
30
88
|
}) {
|
|
31
89
|
return createCognitiveLayer({
|
|
32
|
-
provider:
|
|
90
|
+
provider: overrides?.provider ?? defaultProvider,
|
|
33
91
|
clConfig: {
|
|
34
92
|
apiKey: "test-api-key",
|
|
35
93
|
baseUrl: "https://backend.example",
|
|
@@ -43,17 +101,18 @@ function makeLayer(overrides?: {
|
|
|
43
101
|
|
|
44
102
|
describe("createCognitiveLayer extras", () => {
|
|
45
103
|
beforeEach(() => {
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
aiMocks.generateText.mockClear();
|
|
104
|
+
vi.mocked(aiStreamText).mockClear();
|
|
105
|
+
vi.mocked(aiGenerateText).mockClear();
|
|
49
106
|
promptHubMocks.resolvePrompt.mockReset();
|
|
50
107
|
});
|
|
51
108
|
|
|
52
109
|
afterEach(() => {
|
|
110
|
+
vi.unstubAllGlobals();
|
|
53
111
|
vi.restoreAllMocks();
|
|
54
112
|
});
|
|
55
113
|
|
|
56
114
|
it("resolves prompts and renders variables", async () => {
|
|
115
|
+
setupSnapshotAndLogFetch();
|
|
57
116
|
promptHubMocks.resolvePrompt.mockResolvedValue({
|
|
58
117
|
promptId: "prompt-1",
|
|
59
118
|
slug: "welcome",
|
|
@@ -83,14 +142,14 @@ describe("createCognitiveLayer extras", () => {
|
|
|
83
142
|
userId: "user-1",
|
|
84
143
|
tag: undefined,
|
|
85
144
|
});
|
|
86
|
-
expect(
|
|
145
|
+
expect(aiStreamText).toHaveBeenCalledWith(
|
|
87
146
|
expect.objectContaining({
|
|
88
147
|
system: "Hello Ada",
|
|
89
148
|
model: expect.objectContaining({ modelId: "mock-model" }),
|
|
90
|
-
})
|
|
149
|
+
}),
|
|
91
150
|
);
|
|
92
151
|
expect(warnSpy).toHaveBeenCalledWith(
|
|
93
|
-
expect.stringContaining("Gateway config found but no providerFactory provided")
|
|
152
|
+
expect.stringContaining("Gateway config found but no providerFactory provided"),
|
|
94
153
|
);
|
|
95
154
|
expect(logSpy).toHaveBeenCalled();
|
|
96
155
|
cl.clearSessionCache("user-1:project-1:session-1");
|
|
@@ -98,6 +157,7 @@ describe("createCognitiveLayer extras", () => {
|
|
|
98
157
|
});
|
|
99
158
|
|
|
100
159
|
it("uses a gateway model when providerFactory is available", async () => {
|
|
160
|
+
setupSnapshotAndLogFetch();
|
|
101
161
|
promptHubMocks.resolvePrompt.mockResolvedValue({
|
|
102
162
|
promptId: "prompt-2",
|
|
103
163
|
slug: "gateway",
|
|
@@ -107,7 +167,7 @@ describe("createCognitiveLayer extras", () => {
|
|
|
107
167
|
gatewaySlug: "gateway-a",
|
|
108
168
|
});
|
|
109
169
|
|
|
110
|
-
const providerFactory = vi.fn(() => vi.fn((
|
|
170
|
+
const providerFactory = vi.fn(() => vi.fn((mid: string) => createDualMockModel(mid) as unknown as LanguageModel));
|
|
111
171
|
const cl = makeLayer({ providerFactory });
|
|
112
172
|
const model = cl("mock-model", {
|
|
113
173
|
userId: "user-1",
|
|
@@ -123,17 +183,20 @@ describe("createCognitiveLayer extras", () => {
|
|
|
123
183
|
|
|
124
184
|
expect(providerFactory).toHaveBeenCalledWith(
|
|
125
185
|
"https://backend.example/api/cognitive/gateway/gateway-a",
|
|
126
|
-
"test-api-key"
|
|
186
|
+
"test-api-key",
|
|
127
187
|
);
|
|
128
|
-
|
|
188
|
+
// Gateway path reuses the base model's modelId when calling the factory (routing is via gateway base URL).
|
|
189
|
+
expect(aiGenerateText).toHaveBeenCalledWith(
|
|
129
190
|
expect.objectContaining({
|
|
130
|
-
model: expect.objectContaining({ modelId: "
|
|
191
|
+
model: expect.objectContaining({ modelId: "mock-model" }),
|
|
131
192
|
system: "Prompt body",
|
|
132
|
-
})
|
|
193
|
+
}),
|
|
133
194
|
);
|
|
134
195
|
});
|
|
135
196
|
|
|
136
197
|
it("passes prompt tag and stores tag/ab metadata in logging payload", async () => {
|
|
198
|
+
const fetchSpy = setupSnapshotAndLogFetch();
|
|
199
|
+
|
|
137
200
|
const backendResponse = {
|
|
138
201
|
promptId: "prompt-4",
|
|
139
202
|
slug: "tagged-welcome",
|
|
@@ -147,10 +210,6 @@ describe("createCognitiveLayer extras", () => {
|
|
|
147
210
|
};
|
|
148
211
|
promptHubMocks.resolvePrompt.mockResolvedValue(backendResponse);
|
|
149
212
|
|
|
150
|
-
const fetchSpy = vi
|
|
151
|
-
.spyOn(globalThis, "fetch")
|
|
152
|
-
.mockResolvedValue(new Response(JSON.stringify({ ok: true }), { status: 201 }));
|
|
153
|
-
|
|
154
213
|
const cl = makeLayer();
|
|
155
214
|
const model = cl("mock-model", {
|
|
156
215
|
userId: "user-2",
|
|
@@ -179,12 +238,10 @@ describe("createCognitiveLayer extras", () => {
|
|
|
179
238
|
Authorization: "Bearer test-api-key",
|
|
180
239
|
}),
|
|
181
240
|
body: expect.stringContaining('"promptSlug":"tagged-welcome"'),
|
|
182
|
-
})
|
|
241
|
+
}),
|
|
183
242
|
);
|
|
184
243
|
|
|
185
|
-
const logCall = fetchSpy.mock.calls.find(
|
|
186
|
-
([url]) => String(url).includes("api/cognitive/log"),
|
|
187
|
-
);
|
|
244
|
+
const logCall = fetchSpy.mock.calls.find(([url]) => String(url).includes("api/cognitive/log"));
|
|
188
245
|
expect(logCall).toBeDefined();
|
|
189
246
|
const calledWithBody = JSON.parse(logCall![1]!.body as string);
|
|
190
247
|
expect(calledWithBody.promptSlug).toBe("tagged-welcome");
|
|
@@ -197,11 +254,10 @@ describe("createCognitiveLayer extras", () => {
|
|
|
197
254
|
expect(calledWithBody.metadata?.promptTag).toBe("production");
|
|
198
255
|
expect(calledWithBody.metadata?.abTestId).toBe("ab-test-1");
|
|
199
256
|
expect(calledWithBody.metadata?.variant).toBe("variant");
|
|
200
|
-
|
|
201
|
-
fetchSpy.mockRestore();
|
|
202
257
|
});
|
|
203
258
|
|
|
204
259
|
it("falls back to the original model when gateway model creation fails", async () => {
|
|
260
|
+
setupSnapshotAndLogFetch();
|
|
205
261
|
promptHubMocks.resolvePrompt.mockResolvedValue({
|
|
206
262
|
promptId: "prompt-3",
|
|
207
263
|
slug: "gateway",
|
|
@@ -231,18 +287,19 @@ describe("createCognitiveLayer extras", () => {
|
|
|
231
287
|
|
|
232
288
|
expect(errorSpy).toHaveBeenCalledWith(
|
|
233
289
|
expect.stringContaining("Failed to create gateway model, falling back to original"),
|
|
234
|
-
expect.any(Error)
|
|
290
|
+
expect.any(Error),
|
|
235
291
|
);
|
|
236
|
-
expect(
|
|
292
|
+
expect(aiStreamText).toHaveBeenCalledWith(
|
|
237
293
|
expect.objectContaining({
|
|
238
294
|
model: expect.objectContaining({ modelId: "mock-model" }),
|
|
239
|
-
})
|
|
295
|
+
}),
|
|
240
296
|
);
|
|
241
297
|
});
|
|
242
298
|
|
|
243
299
|
it("falls back cleanly when prompt resolution fails", async () => {
|
|
244
|
-
|
|
300
|
+
setupSnapshotAndLogFetch();
|
|
245
301
|
const warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {});
|
|
302
|
+
promptHubMocks.resolvePrompt.mockRejectedValue(new Error("not found"));
|
|
246
303
|
const cl = makeLayer();
|
|
247
304
|
const model = cl("mock-model", {
|
|
248
305
|
userId: "user-1",
|
|
@@ -258,13 +315,13 @@ describe("createCognitiveLayer extras", () => {
|
|
|
258
315
|
|
|
259
316
|
expect(warnSpy).toHaveBeenCalledWith(
|
|
260
317
|
expect.stringContaining('Failed to resolve prompt "missing", generating without system prompt.'),
|
|
261
|
-
expect.any(Error)
|
|
318
|
+
expect.any(Error),
|
|
262
319
|
);
|
|
263
|
-
expect(
|
|
320
|
+
expect(aiGenerateText).toHaveBeenCalledWith(
|
|
264
321
|
expect.objectContaining({
|
|
265
322
|
model: expect.objectContaining({ modelId: "mock-model" }),
|
|
266
|
-
})
|
|
323
|
+
}),
|
|
267
324
|
);
|
|
268
|
-
expect(
|
|
325
|
+
expect(vi.mocked(aiGenerateText).mock.calls[0][0]).not.toHaveProperty("system");
|
|
269
326
|
});
|
|
270
327
|
});
|