@passelin/mock-bff 0.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/ai.js ADDED
@@ -0,0 +1,289 @@
1
+ import { generateText } from "ai";
2
+ import { z } from "zod";
3
+ import { openai, createOpenAI } from "@ai-sdk/openai";
4
+ import { anthropic, createAnthropic } from "@ai-sdk/anthropic";
5
+ import { createOllama } from "ai-sdk-ollama";
6
+ import { shortHash } from "./utils.js";
7
+ function parseJsonValue(text) {
8
+ const trimmed = text.trim();
9
+ try {
10
+ return JSON.parse(trimmed);
11
+ }
12
+ catch {
13
+ const objStart = trimmed.indexOf("{");
14
+ const objEnd = trimmed.lastIndexOf("}");
15
+ if (objStart >= 0 && objEnd > objStart) {
16
+ return JSON.parse(trimmed.slice(objStart, objEnd + 1));
17
+ }
18
+ const arrStart = trimmed.indexOf("[");
19
+ const arrEnd = trimmed.lastIndexOf("]");
20
+ if (arrStart >= 0 && arrEnd > arrStart) {
21
+ return JSON.parse(trimmed.slice(arrStart, arrEnd + 1));
22
+ }
23
+ throw new Error("Model output is not valid JSON");
24
+ }
25
+ }
26
+ const DEFAULT_PROMPT_TEMPLATE = `You are an HTTP server for a Single Page Application.
27
+ Read the incoming HTTP request and return the most realistic successful HTTP response for a production-style REST API.
28
+
29
+ Output requirements:
30
+ 1. Return exactly one JSON object with these top-level keys:
31
+ - \`status\`: number
32
+ - \`contentType\`: string (mime-type)
33
+ - \`body\`: JSON value or string (depending on content type)
34
+ 2. Do not include prose, commentary, explanations, or markdown.
35
+ 3. The response must always be a successful HTTP response (2xx only).
36
+
37
+ Content negotiation:
38
+ 1. Inspect the \`Accept\` header to determine the response format.
39
+
40
+ 2. Default behavior (critical):
41
+ - If the \`Accept\` header resembles a typical browser request (e.g. includes multiple types like \`text/html\`, \`application/xhtml+xml\`, \`application/xml\`, \`image/*\`, \`*/*\`), treat it as NO explicit preference.
42
+ - In these cases, ALWAYS return \`application/json\`.
43
+ - If \`*/*\` is present, treat it as no preference and return JSON.
44
+
45
+ 3. Explicit format selection:
46
+ - Only return a non-JSON format (e.g. \`text/html\`) if:
47
+ - The \`Accept\` header specifies a single clear mime type, OR
48
+ - One mime type has a strictly higher q-value than all others and is not a wildcard.
49
+ - Examples that should return HTML:
50
+ - \`Accept: text/html\`
51
+ - \`Accept: text/html;q=1.0, application/json;q=0.5\`
52
+
53
+ 4. Ambiguous or browser-style headers:
54
+ - If multiple types are listed without a clear single winner (even if ordered), IGNORE ordering and return JSON.
55
+
56
+ 5. If the requested type is unsupported or unclear, default to \`application/json\`.
57
+
58
+ 6. For non-JSON responses (only when explicitly required), return a realistic representation (e.g. full HTML document as a string).
59
+
60
+ 7. Always set the \`Content-Type\` header accordingly.
61
+
62
+ Response behavior:
63
+ 1. Follow standard REST conventions:
64
+ - \`POST\` creates a resource and returns the created entity.
65
+ - \`GET /collection\` returns an array.
66
+ - \`GET /collection/:id\` returns a single entity.
67
+ - \`PATCH\` partially updates fields and returns the updated entity.
68
+ - \`PUT\` replaces the entity and returns the replaced entity.
69
+ - \`DELETE\` returns \`204\` with \`body: null\` or a confirmation object.
70
+ 2. Support nested resources such as \`/users/:id/comments/:commentId\`.
71
+ 3. IDs must be unique and realistic.
72
+ 4. Timestamps must be realistic ISO-8601 strings.
73
+ 5. Prefer realistic defaults when information is missing.
74
+
75
+ Conflict resolution:
76
+ 1. Always return a successful response (2xx). Never return 4xx or 5xx.
77
+ 2. If format expectations conflict, prioritize:
78
+ - Explicit \`Accept\` header rules (as defined above)
79
+ - Otherwise default to JSON
80
+
81
+ Data modeling rules:
82
+ 1. Use the provided schema and endpoint hints whenever relevant.
83
+ 2. Preserve field names and types exactly as defined.
84
+ 3. Populate optional fields only when realistic.
85
+ 4. Keep generated values internally consistent.
86
+ 5. IDs should be unique numbers (random).
87
+ 6. Output VALID JSON ONLY. Do not add ellipsis or other non valid output.
88
+
89
+ ADDITIONAL CONTEXT:
90
+
91
+ {{context}}
92
+
93
+ SIMILAR EXAMPLES:
94
+ {{similar_examples_json}}
95
+
96
+ THE REQUEST:
97
+
98
+ Timestamp: {{datetime_iso}}
99
+ Method: {{method}}
100
+ Path: {{path}}
101
+ Query params: {{query_json}}
102
+ Body: {{body_json}}
103
+ Headers: {{headers_json}}`;
104
+ function renderPromptTemplate(template, input, now) {
105
+ const map = {
106
+ datetime_iso: now.toISOString(),
107
+ date: now.toISOString().slice(0, 10),
108
+ method: input.method,
109
+ path: input.path,
110
+ query_json: JSON.stringify(input.query),
111
+ body_json: JSON.stringify(input.body),
112
+ headers_json: JSON.stringify(input.requestHeaders ?? {}),
113
+ context: input.context.slice(-4000),
114
+ similar_examples_json: JSON.stringify(input.nearbyExamples.slice(0, 6)),
115
+ };
116
+ return template.replace(/\{\{\s*([a-zA-Z0-9_]+)\s*\}\}/g, (_m, key) => map[key] ?? "");
117
+ }
118
+ export function buildPrompt(input, config, now) {
119
+ const template = config.aiPromptTemplate?.trim()
120
+ ? config.aiPromptTemplate
121
+ : DEFAULT_PROMPT_TEMPLATE;
122
+ return renderPromptTemplate(template, input, now);
123
+ }
124
+ function serializeCause(cause) {
125
+ if (!cause)
126
+ return undefined;
127
+ if (cause instanceof Error) {
128
+ return {
129
+ name: cause.name,
130
+ message: cause.message,
131
+ stack: cause.stack,
132
+ cause: serializeCause(cause.cause),
133
+ };
134
+ }
135
+ if (typeof cause === "object") {
136
+ try {
137
+ return JSON.parse(JSON.stringify(cause));
138
+ }
139
+ catch {
140
+ return String(cause);
141
+ }
142
+ }
143
+ return cause;
144
+ }
145
+ function logAiError(event) {
146
+ const err = event.error;
147
+ const line = {
148
+ level: event.level ?? "error",
149
+ ts: new Date().toISOString(),
150
+ kind: "mock-bff-ai-error",
151
+ reason: event.reason,
152
+ provider: event.provider,
153
+ model: event.model,
154
+ method: event.method,
155
+ path: event.path,
156
+ message: err?.message,
157
+ name: err?.name,
158
+ stack: err?.stack,
159
+ cause: serializeCause(err?.cause),
160
+ };
161
+ process.stderr.write(`${JSON.stringify(line)}\n`);
162
+ }
163
+ function selectModel(provider, model, config) {
164
+ if (provider === "openai") {
165
+ const baseURL = process.env.OPENAI_BASE_URL ?? config.providerBaseUrls?.openai;
166
+ if (baseURL)
167
+ return createOpenAI({ baseURL })(model);
168
+ return openai(model);
169
+ }
170
+ if (provider === "anthropic") {
171
+ const baseURL = process.env.ANTHROPIC_BASE_URL ?? config.providerBaseUrls?.anthropic;
172
+ if (baseURL)
173
+ return createAnthropic({ baseURL })(model);
174
+ return anthropic(model);
175
+ }
176
+ if (provider === "ollama") {
177
+ const baseURL = process.env.OLLAMA_BASE_URL ??
178
+ config.providerBaseUrls?.ollama ??
179
+ "http://127.0.0.1:11434";
180
+ return createOllama({ baseURL })(model);
181
+ }
182
+ return openai(model);
183
+ }
184
+ export async function generateMockResponse(input, config) {
185
+ const provider = process.env.MOCK_AI_PROVIDER ?? config.aiProvider ?? "openai";
186
+ const now = new Date();
187
+ const prompt = buildPrompt(input, config, now);
188
+ if (provider === "none")
189
+ return null;
190
+ const providerKeyMissing = (provider === "openai" && !process.env.OPENAI_API_KEY) ||
191
+ (provider === "anthropic" && !process.env.ANTHROPIC_API_KEY);
192
+ if (providerKeyMissing) {
193
+ logAiError({
194
+ level: "warn",
195
+ reason: "missing-provider-key",
196
+ provider,
197
+ model: process.env.MOCK_AI_MODEL ?? config.aiModel,
198
+ method: input.method,
199
+ path: input.path,
200
+ });
201
+ return null;
202
+ }
203
+ if (!["openai", "anthropic", "ollama"].includes(provider)) {
204
+ logAiError({
205
+ level: "warn",
206
+ reason: "unsupported-provider",
207
+ provider,
208
+ model: process.env.MOCK_AI_MODEL ?? config.aiModel,
209
+ method: input.method,
210
+ path: input.path,
211
+ });
212
+ return null;
213
+ }
214
+ try {
215
+ const model = selectModel(provider, process.env.MOCK_AI_MODEL ??
216
+ config.aiModel ??
217
+ (provider === "anthropic"
218
+ ? "claude-3-5-sonnet-latest"
219
+ : provider === "ollama"
220
+ ? "llama3.1:8b"
221
+ : "gpt-5.4-mini"), config);
222
+ const result = await generateText({
223
+ model,
224
+ prompt,
225
+ providerOptions: config.aiSeed !== undefined
226
+ ? { openai: { seed: config.aiSeed } }
227
+ : undefined,
228
+ });
229
+ process.stderr.write(`${JSON.stringify({
230
+ level: "info",
231
+ ts: new Date().toISOString(),
232
+ kind: "mock-bff-ai-result",
233
+ provider,
234
+ model: process.env.MOCK_AI_MODEL ??
235
+ config.aiModel ??
236
+ (provider === "anthropic"
237
+ ? "claude-3-5-sonnet-latest"
238
+ : provider === "ollama"
239
+ ? "llama3.1:8b"
240
+ : "gpt-5.4-mini"),
241
+ method: input.method,
242
+ path: input.path,
243
+ finishReason: result.finishReason,
244
+ text: result.text,
245
+ })}\n`);
246
+ const parsedRaw = parseJsonValue(result.text);
247
+ const parsed = z
248
+ .object({
249
+ status: z.number().int().min(200).max(299),
250
+ contentType: z.string().min(1),
251
+ body: z.unknown(),
252
+ })
253
+ .parse(parsedRaw);
254
+ const body = parsed.body;
255
+ const status = parsed.status;
256
+ const headers = {
257
+ "content-type": parsed.contentType || "application/json",
258
+ "x-mock-source": "ai",
259
+ };
260
+ return {
261
+ requestSignature: {
262
+ method: input.method,
263
+ path: input.path,
264
+ queryHash: shortHash(JSON.stringify(input.query)),
265
+ bodyHash: shortHash(JSON.stringify(input.body ?? {})),
266
+ },
267
+ requestSnapshot: { query: input.query, body: input.body },
268
+ response: { status, headers, body },
269
+ meta: {
270
+ source: "ai",
271
+ createdAt: new Date().toISOString(),
272
+ seed: config.aiSeed,
273
+ notes: `vercel-ai-sdk:${provider}`,
274
+ ...(config.aiStorePrompt ? { prompt } : {}),
275
+ },
276
+ };
277
+ }
278
+ catch (error) {
279
+ logAiError({
280
+ reason: "generateText-failed",
281
+ provider,
282
+ model: process.env.MOCK_AI_MODEL ?? config.aiModel,
283
+ method: input.method,
284
+ path: input.path,
285
+ error,
286
+ });
287
+ return null;
288
+ }
289
+ }