@firstlovecenter/ai-chat 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1450 @@
1
+ import { AnthropicVertex } from '@anthropic-ai/vertex-sdk';
2
+ import { randomUUID } from 'crypto';
3
+ export { GoogleAuth } from 'google-auth-library';
4
+
5
+ // src/server/tools/types.ts
6
+ var TERMINAL_TOOL_NAME = "present";
7
+ function ok(data) {
8
+ return { ok: true, data };
9
+ }
10
+ function err(code, message) {
11
+ return { ok: false, error: { code, message } };
12
+ }
13
+
14
+ // src/server/agent.ts
15
+ var DEFAULT_MAX_TOOL_TURNS = 12;
16
+ var DEFAULT_MAX_OUTPUT_TOKENS = 4096;
17
+ async function runAgent(input) {
18
+ const provider = input.provider;
19
+ const maxToolTurns = input.maxToolTurns ?? DEFAULT_MAX_TOOL_TURNS;
20
+ const maxOutputTokens = input.maxOutputTokens ?? DEFAULT_MAX_OUTPUT_TOKENS;
21
+ const transcript = [];
22
+ transcript.push({ kind: "user", text: input.question });
23
+ const messages = [{ role: "user", text: input.question }];
24
+ const system = input.systemBlocks;
25
+ const toolSchemas = Object.values(input.tools).map((t) => t.schema);
26
+ let toolCallCount = 0;
27
+ let presentPayload = null;
28
+ for (let turn = 0; turn < maxToolTurns; turn++) {
29
+ const response = await provider.runTurn({
30
+ system,
31
+ tools: toolSchemas,
32
+ messages,
33
+ maxOutputTokens
34
+ });
35
+ if (response.text) {
36
+ transcript.push({ kind: "assistant_text", text: response.text });
37
+ }
38
+ if (response.toolCalls.length === 0) {
39
+ if (presentPayload) break;
40
+ return {
41
+ ok: false,
42
+ error: {
43
+ code: "AGENT_NO_PRESENT",
44
+ message: "The agent ended without calling present(). Try rephrasing."
45
+ },
46
+ transcript
47
+ };
48
+ }
49
+ messages.push({
50
+ role: "assistant",
51
+ text: response.text,
52
+ toolCalls: response.toolCalls,
53
+ providerData: response.providerData
54
+ });
55
+ const toolResults = [];
56
+ for (const tc of response.toolCalls) {
57
+ transcript.push({ kind: "tool_use", name: tc.name, input: tc.input });
58
+ const tool = input.tools[tc.name];
59
+ if (!tool) {
60
+ const errResult = {
61
+ ok: false,
62
+ error: { code: "UNKNOWN_TOOL", message: `Unknown tool: ${tc.name}` }
63
+ };
64
+ transcript.push({ kind: "tool_result", name: tc.name, result: errResult });
65
+ toolResults.push({
66
+ toolCallId: tc.id,
67
+ toolName: tc.name,
68
+ isError: true,
69
+ content: JSON.stringify(errResult.error)
70
+ });
71
+ continue;
72
+ }
73
+ const result = await tool.execute(tc.input, {
74
+ ...input.ctx,
75
+ toolCallCount
76
+ });
77
+ transcript.push({ kind: "tool_result", name: tc.name, result });
78
+ toolCallCount += 1;
79
+ if (tc.name === TERMINAL_TOOL_NAME && result.ok) {
80
+ if (toolCallCount < 3) {
81
+ const violation = {
82
+ code: "SELF_VERIFY_REQUIRED",
83
+ message: "Per FR-8.3 you must run at least one CROSS-CHECK tool call (a different metric, a different period, or a run_sql sanity-check) before present. Make that extra call now, then call present again."
84
+ };
85
+ toolResults.push({
86
+ toolCallId: tc.id,
87
+ toolName: tc.name,
88
+ isError: true,
89
+ content: JSON.stringify(violation)
90
+ });
91
+ continue;
92
+ }
93
+ presentPayload = result.data;
94
+ break;
95
+ }
96
+ toolResults.push({
97
+ toolCallId: tc.id,
98
+ toolName: tc.name,
99
+ isError: !result.ok,
100
+ content: JSON.stringify(result.ok ? result.data : result.error)
101
+ });
102
+ }
103
+ if (presentPayload) break;
104
+ messages.push({ role: "tool", results: toolResults });
105
+ }
106
+ if (!presentPayload) {
107
+ return {
108
+ ok: false,
109
+ error: {
110
+ code: "AGENT_TURN_LIMIT",
111
+ message: `Agent exceeded the ${maxToolTurns}-turn budget without calling present().`
112
+ },
113
+ transcript
114
+ };
115
+ }
116
+ return { ok: true, structured: presentPayload, toolCallCount, transcript };
117
+ }
118
+ var ClaudeToolProvider = class {
119
+ id = "claude";
120
+ client;
121
+ modelId;
122
+ constructor(opts) {
123
+ this.modelId = opts.modelId;
124
+ this.client = new AnthropicVertex({
125
+ projectId: opts.projectId,
126
+ region: opts.location,
127
+ googleAuth: opts.auth
128
+ });
129
+ patchVertexBuildRequestSync(this.client);
130
+ }
131
+ async runTurn(input) {
132
+ const system = input.system.map((b) => ({
133
+ type: "text",
134
+ text: b.text,
135
+ ...b.cached ? { cache_control: { type: "ephemeral" } } : {}
136
+ }));
137
+ const messages = toAnthropicMessages(input.messages);
138
+ const response = await this.client.messages.create({
139
+ model: this.modelId,
140
+ max_tokens: input.maxOutputTokens,
141
+ system,
142
+ tools: input.tools,
143
+ messages
144
+ });
145
+ return fromAnthropicResponse(response);
146
+ }
147
+ };
148
+ function createClaudeProvider(opts) {
149
+ return new ClaudeToolProvider(opts);
150
+ }
151
+ function toAnthropicMessages(messages) {
152
+ const out = [];
153
+ for (const msg of messages) {
154
+ if (msg.role === "user") {
155
+ out.push({ role: "user", content: msg.text });
156
+ } else if (msg.role === "assistant") {
157
+ const blocks = [];
158
+ if (msg.text) {
159
+ blocks.push({ type: "text", text: msg.text });
160
+ }
161
+ for (const tc of msg.toolCalls) {
162
+ blocks.push({
163
+ type: "tool_use",
164
+ id: tc.id,
165
+ name: tc.name,
166
+ input: tc.input
167
+ });
168
+ }
169
+ out.push({ role: "assistant", content: blocks });
170
+ } else {
171
+ const content = msg.results.map((r) => ({
172
+ type: "tool_result",
173
+ tool_use_id: r.toolCallId,
174
+ is_error: r.isError,
175
+ content: r.content
176
+ }));
177
+ out.push({ role: "user", content });
178
+ }
179
+ }
180
+ return out;
181
+ }
182
+ function fromAnthropicResponse(response) {
183
+ const textParts = [];
184
+ const toolCalls = [];
185
+ for (const block of response.content) {
186
+ if (block.type === "text") {
187
+ textParts.push(block.text);
188
+ } else if (block.type === "tool_use") {
189
+ const tu = block;
190
+ toolCalls.push({
191
+ id: tu.id,
192
+ name: tu.name,
193
+ input: tu.input ?? {}
194
+ });
195
+ }
196
+ }
197
+ return {
198
+ text: textParts.join(""),
199
+ toolCalls,
200
+ stopReason: normalizeStopReason(response.stop_reason)
201
+ };
202
+ }
203
+ function normalizeStopReason(reason) {
204
+ switch (reason) {
205
+ case "tool_use":
206
+ return "tool_use";
207
+ case "end_turn":
208
+ return "end_turn";
209
+ case "max_tokens":
210
+ return "max_tokens";
211
+ default:
212
+ return "other";
213
+ }
214
+ }
215
+ var MODEL_ENDPOINTS = /* @__PURE__ */ new Set(["/v1/messages", "/v1/messages?beta=true"]);
216
+ var VERTEX_DEFAULT_VERSION = "vertex-2023-10-16";
217
+ function isObj(value) {
218
+ return value != null && typeof value === "object" && !Array.isArray(value);
219
+ }
220
+ function patchVertexBuildRequestSync(client) {
221
+ const proto = Object.getPrototypeOf(client);
222
+ const grandparent = Object.getPrototypeOf(proto);
223
+ proto.buildRequest = function patchedBuildRequest(options, extra) {
224
+ if (isObj(options.body)) {
225
+ options.body = { ...options.body };
226
+ }
227
+ if (isObj(options.body) && !options.body.anthropic_version) {
228
+ options.body.anthropic_version = VERTEX_DEFAULT_VERSION;
229
+ }
230
+ if (options.path && MODEL_ENDPOINTS.has(options.path) && options.method === "post" && isObj(options.body)) {
231
+ if (!this.projectId) throw new Error("AnthropicVertex: projectId is required");
232
+ const model = options.body.model;
233
+ options.body.model = void 0;
234
+ const stream = options.body.stream ?? false;
235
+ const specifier = stream ? "streamRawPredict" : "rawPredict";
236
+ options.path = `/projects/${this.projectId}/locations/${this.region}/publishers/anthropic/models/${model}:${specifier}`;
237
+ }
238
+ if (options.path === "/v1/messages/count_tokens" || options.path === "/v1/messages/count_tokens?beta=true" && options.method === "post") {
239
+ if (!this.projectId) throw new Error("AnthropicVertex: projectId is required");
240
+ options.path = `/projects/${this.projectId}/locations/${this.region}/publishers/anthropic/models/count-tokens:rawPredict`;
241
+ }
242
+ return grandparent.buildRequest.call(this, options, extra);
243
+ };
244
+ }
245
+
246
+ // src/server/providers/schema.ts
247
+ function toGeminiSchema(schema) {
248
+ const out = walk(schema);
249
+ return out ?? { type: "object" };
250
+ }
251
+ function walk(node) {
252
+ if (Array.isArray(node)) return node.map(walk);
253
+ if (!isObject(node)) return node;
254
+ if ("const" in node) {
255
+ const c = node.const;
256
+ const inferred = inferConstType(c);
257
+ const next = { ...node };
258
+ delete next.const;
259
+ next.type = next.type ?? inferred;
260
+ next.enum = [c];
261
+ return walk(next);
262
+ }
263
+ const out = {};
264
+ for (const [k, v] of Object.entries(node)) {
265
+ if (k === "oneOf") {
266
+ out.anyOf = v.map(walk);
267
+ continue;
268
+ }
269
+ if (k === "additionalProperties") {
270
+ continue;
271
+ }
272
+ if (k === "properties" && isObject(v)) {
273
+ const props = {};
274
+ for (const [pk, pv] of Object.entries(v)) {
275
+ props[pk] = walk(pv);
276
+ }
277
+ out.properties = props;
278
+ continue;
279
+ }
280
+ if (k === "items") {
281
+ out.items = walk(v);
282
+ continue;
283
+ }
284
+ if (k === "anyOf" || k === "allOf") {
285
+ out[k] = v.map(walk);
286
+ continue;
287
+ }
288
+ if (k === "enum" && Array.isArray(v)) {
289
+ const allStrings = v.every((x) => typeof x === "string");
290
+ if (allStrings) {
291
+ out.enum = v;
292
+ continue;
293
+ }
294
+ const allNumbers = v.length > 0 && v.every((x) => typeof x === "number");
295
+ if (allNumbers) {
296
+ const nums = v;
297
+ out.minimum = Math.min(...nums);
298
+ out.maximum = Math.max(...nums);
299
+ }
300
+ continue;
301
+ }
302
+ out[k] = v;
303
+ }
304
+ if (out.type === "array" && out.items === void 0) {
305
+ out.items = { type: "string" };
306
+ }
307
+ return out;
308
+ }
309
+ function inferConstType(c) {
310
+ if (typeof c === "string") return "string";
311
+ if (typeof c === "number") return Number.isInteger(c) ? "integer" : "number";
312
+ if (typeof c === "boolean") return "boolean";
313
+ return "string";
314
+ }
315
+ function isObject(v) {
316
+ return v != null && typeof v === "object" && !Array.isArray(v);
317
+ }
318
+
319
+ // src/server/providers/gemini.ts
320
+ var GeminiToolProvider = class {
321
+ id = "gemini";
322
+ auth;
323
+ projectId;
324
+ location;
325
+ modelId;
326
+ fetchImpl;
327
+ constructor(opts) {
328
+ this.auth = opts.auth;
329
+ this.projectId = opts.projectId;
330
+ this.location = opts.location;
331
+ this.modelId = opts.modelId;
332
+ this.fetchImpl = opts.fetchImpl ?? fetch;
333
+ }
334
+ async runTurn(input) {
335
+ const accessToken = await this.auth.getAccessToken();
336
+ if (!accessToken) throw new Error("Failed to obtain GCP access token");
337
+ const url = `https://${vertexHost(this.location)}/v1/projects/${this.projectId}/locations/${this.location}/publishers/google/models/${this.modelId}:generateContent`;
338
+ const body = {
339
+ systemInstruction: {
340
+ parts: [{ text: input.system.map((b) => b.text).join("\n\n") }]
341
+ },
342
+ contents: toGeminiContents(input.messages),
343
+ tools: [
344
+ {
345
+ functionDeclarations: input.tools.map((t) => ({
346
+ name: t.name,
347
+ description: t.description,
348
+ parameters: toGeminiSchema(t.input_schema)
349
+ }))
350
+ }
351
+ ],
352
+ toolConfig: {
353
+ functionCallingConfig: { mode: "AUTO" }
354
+ },
355
+ generationConfig: {
356
+ maxOutputTokens: input.maxOutputTokens,
357
+ temperature: 0
358
+ }
359
+ };
360
+ const res = await this.fetchImpl(url, {
361
+ method: "POST",
362
+ headers: {
363
+ Authorization: `Bearer ${accessToken}`,
364
+ "Content-Type": "application/json"
365
+ },
366
+ body: JSON.stringify(body)
367
+ });
368
+ if (!res.ok) {
369
+ const detail = await safeReadText(res);
370
+ throw new Error(`Vertex Gemini request failed (${res.status}): ${detail}`);
371
+ }
372
+ const json = await res.json();
373
+ if (json.error) {
374
+ throw new Error(
375
+ `Vertex Gemini returned error: ${json.error.status ?? json.error.code ?? "??"} ${json.error.message ?? ""}`
376
+ );
377
+ }
378
+ return fromGeminiResponse(json);
379
+ }
380
+ };
381
+ function createGeminiProvider(opts) {
382
+ return new GeminiToolProvider(opts);
383
+ }
384
+ function vertexHost(location) {
385
+ return location === "global" ? "aiplatform.googleapis.com" : `${location}-aiplatform.googleapis.com`;
386
+ }
387
+ function toGeminiContents(messages) {
388
+ const out = [];
389
+ for (const msg of messages) {
390
+ if (msg.role === "user") {
391
+ out.push({ role: "user", parts: [{ text: msg.text }] });
392
+ } else if (msg.role === "assistant") {
393
+ if (Array.isArray(msg.providerData?.parts)) {
394
+ const parts2 = msg.providerData.parts;
395
+ out.push({ role: "model", parts: parts2 });
396
+ continue;
397
+ }
398
+ const parts = [];
399
+ if (msg.text) parts.push({ text: msg.text });
400
+ for (const tc of msg.toolCalls) {
401
+ parts.push({ functionCall: { name: tc.name, args: tc.input } });
402
+ }
403
+ if (parts.length === 0) parts.push({ text: "" });
404
+ out.push({ role: "model", parts });
405
+ } else {
406
+ const parts = msg.results.map((r) => ({
407
+ functionResponse: {
408
+ name: r.toolName,
409
+ response: parseToolResultContent(r.content, r.isError)
410
+ }
411
+ }));
412
+ out.push({ role: "user", parts });
413
+ }
414
+ }
415
+ return out;
416
+ }
417
+ function parseToolResultContent(content, isError) {
418
+ let parsed = null;
419
+ try {
420
+ parsed = JSON.parse(content);
421
+ } catch {
422
+ parsed = content;
423
+ }
424
+ if (isError) {
425
+ return { error: parsed };
426
+ }
427
+ if (parsed && typeof parsed === "object" && !Array.isArray(parsed)) {
428
+ return parsed;
429
+ }
430
+ return { result: parsed };
431
+ }
432
+ function fromGeminiResponse(json) {
433
+ const candidate = json.candidates?.[0];
434
+ const parts = candidate?.content?.parts ?? [];
435
+ const textParts = [];
436
+ const toolCalls = [];
437
+ let idx = 0;
438
+ for (const part of parts) {
439
+ if (typeof part.text === "string" && part.thought !== true) {
440
+ textParts.push(part.text);
441
+ } else if (part.functionCall) {
442
+ toolCalls.push({
443
+ id: `gem_${idx}_${part.functionCall.name}`,
444
+ name: part.functionCall.name,
445
+ input: part.functionCall.args ?? {}
446
+ });
447
+ idx += 1;
448
+ }
449
+ }
450
+ return {
451
+ text: textParts.join(""),
452
+ toolCalls,
453
+ stopReason: normalizeFinishReason(candidate?.finishReason, toolCalls.length > 0),
454
+ // Preserve the raw parts so a subsequent runTurn() echoes the
455
+ // assistant turn back faithfully, including any `thoughtSignature`
456
+ // entries thinking-mode requires.
457
+ providerData: { parts }
458
+ };
459
+ }
460
+ function normalizeFinishReason(reason, hasToolCalls) {
461
+ if (hasToolCalls) return "tool_use";
462
+ switch (reason) {
463
+ case "STOP":
464
+ return "end_turn";
465
+ case "MAX_TOKENS":
466
+ return "max_tokens";
467
+ default:
468
+ return reason ? "other" : "end_turn";
469
+ }
470
+ }
471
+ async function safeReadText(res) {
472
+ try {
473
+ return (await res.text()).slice(0, 500);
474
+ } catch {
475
+ return "";
476
+ }
477
+ }
478
+
479
+ // src/server/providers/index.ts
480
+ var toolProviders = [
481
+ {
482
+ id: "claude",
483
+ label: "Claude (Vertex)",
484
+ description: "Anthropic Messages API hosted on GCP Vertex AI.",
485
+ createProvider(opts) {
486
+ return createClaudeProvider({
487
+ auth: opts.auth,
488
+ projectId: opts.projectId,
489
+ location: opts.location ?? opts.defaultLocation,
490
+ modelId: opts.modelIds.claude
491
+ });
492
+ }
493
+ },
494
+ {
495
+ id: "gemini",
496
+ label: "Gemini (Vertex)",
497
+ description: "Google Gemini hosted on GCP Vertex AI (raw generateContent).",
498
+ createProvider(opts) {
499
+ return createGeminiProvider({
500
+ auth: opts.auth,
501
+ projectId: opts.projectId,
502
+ location: opts.location ?? opts.defaultLocation,
503
+ modelId: opts.modelIds.gemini
504
+ });
505
+ }
506
+ }
507
+ ];
508
+ function getToolProvider(id) {
509
+ return toolProviders.find((p) => p.id === id);
510
+ }
511
+ function vertexHost2(location) {
512
+ return location === "global" ? "aiplatform.googleapis.com" : `${location}-aiplatform.googleapis.com`;
513
+ }
514
+ async function getAccessToken(auth) {
515
+ const token = await auth.getAccessToken();
516
+ if (!token) throw new Error("Failed to obtain GCP access token");
517
+ return token;
518
+ }
519
+ function createAnthropicVertexClient(args) {
520
+ const client = new AnthropicVertex({
521
+ projectId: args.projectId,
522
+ region: args.location,
523
+ googleAuth: args.auth
524
+ });
525
+ patchVertexBuildRequestSync2(client);
526
+ return client;
527
+ }
528
+ var MODEL_ENDPOINTS2 = /* @__PURE__ */ new Set(["/v1/messages", "/v1/messages?beta=true"]);
529
+ var VERTEX_DEFAULT_VERSION2 = "vertex-2023-10-16";
530
+ function isObj2(value) {
531
+ return value != null && typeof value === "object" && !Array.isArray(value);
532
+ }
533
+ function patchVertexBuildRequestSync2(client) {
534
+ const proto = Object.getPrototypeOf(client);
535
+ const grandparent = Object.getPrototypeOf(proto);
536
+ proto.buildRequest = function patchedBuildRequest(options, extra) {
537
+ if (isObj2(options.body)) {
538
+ options.body = { ...options.body };
539
+ }
540
+ if (isObj2(options.body) && !options.body.anthropic_version) {
541
+ options.body.anthropic_version = VERTEX_DEFAULT_VERSION2;
542
+ }
543
+ if (options.path && MODEL_ENDPOINTS2.has(options.path) && options.method === "post" && isObj2(options.body)) {
544
+ if (!this.projectId) throw new Error("AnthropicVertex: projectId is required");
545
+ const model = options.body.model;
546
+ options.body.model = void 0;
547
+ const stream = options.body.stream ?? false;
548
+ const specifier = stream ? "streamRawPredict" : "rawPredict";
549
+ options.path = `/projects/${this.projectId}/locations/${this.region}/publishers/anthropic/models/${model}:${specifier}`;
550
+ }
551
+ if (options.path === "/v1/messages/count_tokens" || options.path === "/v1/messages/count_tokens?beta=true" && options.method === "post") {
552
+ if (!this.projectId) throw new Error("AnthropicVertex: projectId is required");
553
+ options.path = `/projects/${this.projectId}/locations/${this.region}/publishers/anthropic/models/count-tokens:rawPredict`;
554
+ }
555
+ return grandparent.buildRequest.call(this, options, extra);
556
+ };
557
+ }
558
+
559
+ // src/server/narrators/claude.ts
560
+ var NARRATIVE_SYSTEM = `You are the prose narrator for the FLC Data Intelligence agent.
561
+
562
+ You receive:
563
+ - The user's original question.
564
+ - The full structured answer the agent already computed (blocks +
565
+ raw_numbers).
566
+ - One specific paragraph_brief block's topic and key_facts.
567
+
568
+ Your job: write 2\u20134 short sentences (\u2264 80 words total) of natural,
569
+ useful prose for THIS paragraph block.
570
+
571
+ Rules:
572
+ - Use only the facts in key_facts and raw_numbers. Do not invent
573
+ numbers, dates, country names, or trends.
574
+ - Be neutral and direct. No hedging ("it appears", "perhaps").
575
+ - No headings, no markdown, no bullet lists. Plain prose.
576
+ - Reference the same numbers verbatim \u2014 if raw_numbers says
577
+ 1234.56, say "1,234.56" or "$1,235", never "about 1.2k" if that
578
+ drifts.
579
+ - Do not begin with "Here is", "Below", "Based on the data".
580
+ - Stop. Do not repeat yourself.`;
581
+ async function* streamClaudeNarration(opts) {
582
+ const client = createAnthropicVertexClient({
583
+ projectId: opts.projectId,
584
+ location: opts.location,
585
+ auth: opts.auth
586
+ });
587
+ const stream = await client.messages.stream({
588
+ model: opts.modelId,
589
+ max_tokens: 400,
590
+ system: NARRATIVE_SYSTEM,
591
+ messages: [{ role: "user", content: buildNarrativeUserMessage(opts.input) }]
592
+ });
593
+ for await (const event of stream) {
594
+ if (event.type === "content_block_delta" && "delta" in event && event.delta && "type" in event.delta && event.delta.type === "text_delta") {
595
+ yield event.delta.text;
596
+ }
597
+ }
598
+ }
599
+ function buildNarrativeUserMessage(input) {
600
+ return [
601
+ `User question: ${input.question}`,
602
+ "",
603
+ `Block topic: ${input.topic}`,
604
+ `Block key_facts:
605
+ ${input.keyFacts.map((f) => `- ${f}`).join("\n")}`,
606
+ "",
607
+ "Full structured answer (for reference; do not restate the whole thing):",
608
+ JSON.stringify(input.structured, null, 2)
609
+ ].join("\n");
610
+ }
611
+
612
+ // src/server/narrators/gemini.ts
613
+ var NARRATIVE_SYSTEM2 = `You are the prose narrator for the FLC Data Intelligence agent.
614
+
615
+ You receive:
616
+ - The user's original question.
617
+ - The full structured answer the agent already computed (blocks +
618
+ raw_numbers).
619
+ - One specific paragraph_brief block's topic and key_facts.
620
+
621
+ Your job: write 2\u20134 short sentences (\u2264 80 words total) of natural,
622
+ useful prose for THIS paragraph block.
623
+
624
+ Rules:
625
+ - Use only the facts in key_facts and raw_numbers. Do not invent
626
+ numbers, dates, country names, or trends.
627
+ - Be neutral and direct. No hedging.
628
+ - No headings, no markdown, no bullet lists. Plain prose.
629
+ - Reference the same numbers verbatim.
630
+ - Do not begin with "Here is" or "Based on the data".
631
+ - Stop. Do not repeat yourself.`;
632
+ async function* streamGeminiNarration(opts) {
633
+ const accessToken = await getAccessToken(opts.auth);
634
+ const url = `https://${vertexHost2(opts.location)}/v1/projects/${opts.projectId}/locations/${opts.location}/publishers/google/models/${opts.modelId}:streamGenerateContent?alt=sse`;
635
+ const res = await fetch(url, {
636
+ method: "POST",
637
+ headers: {
638
+ Authorization: `Bearer ${accessToken}`,
639
+ "Content-Type": "application/json"
640
+ },
641
+ body: JSON.stringify({
642
+ systemInstruction: { parts: [{ text: NARRATIVE_SYSTEM2 }] },
643
+ contents: [
644
+ {
645
+ role: "user",
646
+ parts: [{ text: buildNarrativeUserMessage(opts.input) }]
647
+ }
648
+ ],
649
+ generationConfig: { maxOutputTokens: 400, temperature: 0 }
650
+ })
651
+ });
652
+ if (!res.ok || !res.body) {
653
+ const detail = await safeReadText2(res);
654
+ throw new Error(`Vertex Gemini request failed (${res.status}): ${detail}`);
655
+ }
656
+ yield* parseGeminiSseTextDeltas(res.body);
657
+ }
658
+ async function safeReadText2(res) {
659
+ try {
660
+ return (await res.text()).slice(0, 500);
661
+ } catch {
662
+ return "";
663
+ }
664
+ }
665
+ async function* parseGeminiSseTextDeltas(body) {
666
+ const reader = body.getReader();
667
+ const decoder = new TextDecoder();
668
+ let buffered = "";
669
+ while (true) {
670
+ const { value, done } = await reader.read();
671
+ if (done) break;
672
+ buffered += decoder.decode(value, { stream: true });
673
+ let newlineIndex;
674
+ while ((newlineIndex = buffered.indexOf("\n")) !== -1) {
675
+ const line = buffered.slice(0, newlineIndex).trim();
676
+ buffered = buffered.slice(newlineIndex + 1);
677
+ if (!line.startsWith("data:")) continue;
678
+ const payload = line.slice(5).trim();
679
+ if (!payload) continue;
680
+ try {
681
+ const chunk = JSON.parse(payload);
682
+ const parts = chunk.candidates?.[0]?.content?.parts ?? [];
683
+ for (const part of parts) {
684
+ if (typeof part.text === "string" && part.text.length > 0) {
685
+ yield part.text;
686
+ }
687
+ }
688
+ } catch {
689
+ }
690
+ }
691
+ }
692
+ }
693
+
694
+ // src/server/narrators/grok.ts
695
+ var NARRATIVE_SYSTEM3 = `You are the prose narrator for the FLC Data Intelligence agent.
696
+
697
+ You receive:
698
+ - The user's original question.
699
+ - The full structured answer the agent already computed (blocks +
700
+ raw_numbers).
701
+ - One specific paragraph_brief block's topic and key_facts.
702
+
703
+ Your job: write 2\u20134 short sentences (\u2264 80 words total) of natural,
704
+ useful prose for THIS paragraph block.
705
+
706
+ Rules:
707
+ - Use only the facts in key_facts and raw_numbers. Do not invent
708
+ numbers, dates, country names, or trends.
709
+ - Be neutral and direct. No hedging.
710
+ - No headings, no markdown, no bullet lists. Plain prose.
711
+ - Reference the same numbers verbatim.
712
+ - Do not begin with "Here is" or "Based on the data".
713
+ - Stop. Do not repeat yourself.`;
714
+ async function* streamGrokNarration(opts) {
715
+ const accessToken = await getAccessToken(opts.auth);
716
+ const url = `https://${vertexHost2(opts.location)}/v1beta1/projects/${opts.projectId}/locations/${opts.location}/endpoints/openapi/chat/completions`;
717
+ const res = await fetch(url, {
718
+ method: "POST",
719
+ headers: {
720
+ Authorization: `Bearer ${accessToken}`,
721
+ "Content-Type": "application/json"
722
+ },
723
+ body: JSON.stringify({
724
+ model: opts.modelId,
725
+ max_tokens: 400,
726
+ stream: true,
727
+ messages: [
728
+ { role: "system", content: NARRATIVE_SYSTEM3 },
729
+ { role: "user", content: buildNarrativeUserMessage(opts.input) }
730
+ ]
731
+ })
732
+ });
733
+ if (!res.ok || !res.body) {
734
+ const detail = await safeReadText3(res);
735
+ throw new Error(`Vertex Grok request failed (${res.status}): ${detail}`);
736
+ }
737
+ yield* parseSseDeltas(res.body);
738
+ }
739
+ async function safeReadText3(res) {
740
+ try {
741
+ return (await res.text()).slice(0, 500);
742
+ } catch {
743
+ return "";
744
+ }
745
+ }
746
+ async function* parseSseDeltas(body) {
747
+ const reader = body.getReader();
748
+ const decoder = new TextDecoder();
749
+ let buffered = "";
750
+ while (true) {
751
+ const { value, done } = await reader.read();
752
+ if (done) break;
753
+ buffered += decoder.decode(value, { stream: true });
754
+ let newlineIndex;
755
+ while ((newlineIndex = buffered.indexOf("\n")) !== -1) {
756
+ const line = buffered.slice(0, newlineIndex).trim();
757
+ buffered = buffered.slice(newlineIndex + 1);
758
+ if (!line.startsWith("data:")) continue;
759
+ const payload = line.slice(5).trim();
760
+ if (!payload || payload === "[DONE]") {
761
+ if (payload === "[DONE]") return;
762
+ continue;
763
+ }
764
+ try {
765
+ const chunk = JSON.parse(payload);
766
+ const delta = chunk.choices?.[0]?.delta?.content;
767
+ if (typeof delta === "string" && delta.length > 0) yield delta;
768
+ } catch {
769
+ }
770
+ }
771
+ }
772
+ }
773
+
774
+ // src/server/narrators/index.ts
775
+ function getNarrator(id) {
776
+ switch (id) {
777
+ case "claude":
778
+ return streamClaudeNarration;
779
+ case "gemini":
780
+ return streamGeminiNarration;
781
+ case "grok":
782
+ return streamGrokNarration;
783
+ default: {
784
+ const _exhaustive = id;
785
+ throw new Error(`Unknown narrator id: ${String(_exhaustive)}`);
786
+ }
787
+ }
788
+ }
789
+
790
+ // src/server/routes/agent-custom.ts
791
+ var NARRATOR_IDS = /* @__PURE__ */ new Set([
792
+ "claude",
793
+ "gemini",
794
+ "grok"
795
+ ]);
796
+ function isNarratorId(value) {
797
+ return NARRATOR_IDS.has(value);
798
+ }
799
+ function jsonError(status, code, message) {
800
+ return new Response(JSON.stringify({ error: { code, message } }), {
801
+ status,
802
+ headers: { "Content-Type": "application/json" }
803
+ });
804
+ }
805
+ function defaultGenerateSessionId() {
806
+ return randomUUID().replace(/-/g, "").slice(0, 16);
807
+ }
808
+ function pickNarratorModelId(vertex, narratorId) {
809
+ if (narratorId === "claude") return vertex.modelIds.claude;
810
+ if (narratorId === "gemini") return vertex.modelIds.gemini;
811
+ const extra = vertex.modelIds;
812
+ if (typeof extra.grok === "string" && extra.grok.length > 0) {
813
+ return extra.grok;
814
+ }
815
+ throw new Error(
816
+ "Narrator 'grok' selected but VertexPort.modelIds.grok is not pinned."
817
+ );
818
+ }
819
+ function createAgentCustomRoutes(ctx) {
820
+ const { persistence, auth, scope, tools, vertex, logger, hooks } = ctx;
821
+ return {
822
+ /** Next.js-compatible POST handler. */
823
+ POST: async (req) => {
824
+ if (hooks?.onRequest) {
825
+ const short = await hooks.onRequest(req);
826
+ if (short) return short;
827
+ }
828
+ const authResult = await auth.requireAuth(req);
829
+ if (!authResult.ok) return authResult.response;
830
+ const { scope: callerScope, userId } = authResult;
831
+ if (hooks?.onAuthenticated) {
832
+ const short = await hooks.onAuthenticated({
833
+ req,
834
+ scope: callerScope,
835
+ userId
836
+ });
837
+ if (short) return short;
838
+ }
839
+ const body = await req.json().catch(() => null);
840
+ const question = typeof body?.question === "string" ? body.question.trim() : "";
841
+ if (!question) {
842
+ return jsonError(
843
+ 400,
844
+ "VALIDATION_FAILED",
845
+ "question must be a non-empty string."
846
+ );
847
+ }
848
+ const rawChatSessionId = body?.chatSessionId;
849
+ const incomingChatSessionId = typeof rawChatSessionId === "number" && Number.isInteger(rawChatSessionId) ? rawChatSessionId : null;
850
+ const aiSettings = await persistence.getAiSettings();
851
+ let chatSessionId;
852
+ if (incomingChatSessionId !== null) {
853
+ const owned = await persistence.getSession(incomingChatSessionId, userId);
854
+ if (!owned) {
855
+ return jsonError(404, "NOT_FOUND", "Chat session not found.");
856
+ }
857
+ chatSessionId = owned.id;
858
+ } else {
859
+ const created = await persistence.createSession({
860
+ userId,
861
+ title: question.slice(0, 200)
862
+ });
863
+ chatSessionId = created.id;
864
+ }
865
+ await persistence.appendMessage({
866
+ sessionId: chatSessionId,
867
+ role: "user",
868
+ question
869
+ });
870
+ const sessionId = hooks?.generateSessionId ? await hooks.generateSessionId({
871
+ scope: callerScope,
872
+ userId,
873
+ chatSessionId: incomingChatSessionId
874
+ }) : defaultGenerateSessionId();
875
+ const scopeSummary = await scope.buildScopeSummary(callerScope);
876
+ const scopeLabel = await scope.resolveScopeLabel(callerScope);
877
+ const toolContext = {
878
+ scope: callerScope,
879
+ sessionId,
880
+ scopeSummary,
881
+ toolCallCount: 0
882
+ };
883
+ const systemBlocks = await tools.buildSystemBlocks(toolContext);
884
+ const def = getToolProvider(aiSettings.toolProvider);
885
+ if (!def) {
886
+ return jsonError(
887
+ 400,
888
+ "INVALID_PROVIDER",
889
+ `Unknown tool provider in ai_settings: ${aiSettings.toolProvider}`
890
+ );
891
+ }
892
+ const provider = def.createProvider({
893
+ auth: vertex.auth,
894
+ projectId: vertex.projectId,
895
+ defaultLocation: vertex.defaultLocation,
896
+ modelIds: vertex.modelIds,
897
+ location: aiSettings.gcpLocation
898
+ });
899
+ let narratorId;
900
+ if (ctx.resolveNarratorId) {
901
+ narratorId = await ctx.resolveNarratorId(callerScope);
902
+ } else if (isNarratorId(aiSettings.toolProvider)) {
903
+ narratorId = aiSettings.toolProvider;
904
+ } else {
905
+ return jsonError(
906
+ 400,
907
+ "INVALID_NARRATOR",
908
+ `Cannot derive narrator from tool provider: ${aiSettings.toolProvider}`
909
+ );
910
+ }
911
+ const stream = new ReadableStream({
912
+ async start(controller) {
913
+ const encoder = new TextEncoder();
914
+ let closed = false;
915
+ const send = (event, data) => {
916
+ if (closed) return;
917
+ try {
918
+ controller.enqueue(
919
+ encoder.encode(
920
+ `event: ${event}
921
+ data: ${JSON.stringify(data)}
922
+
923
+ `
924
+ )
925
+ );
926
+ } catch {
927
+ }
928
+ };
929
+ const persistedBlocks = [];
930
+ const persistedProse = {};
931
+ let persistedError = null;
932
+ let sessionStarted = false;
933
+ try {
934
+ if (hooks?.onSessionStart) {
935
+ await hooks.onSessionStart({
936
+ scope: callerScope,
937
+ sessionId,
938
+ userId
939
+ });
940
+ }
941
+ sessionStarted = true;
942
+ send("meta", { chatSessionId, scopeLabel });
943
+ const agentResult = await runAgent({
944
+ question,
945
+ ctx: toolContext,
946
+ tools: tools.tools,
947
+ systemBlocks,
948
+ provider
949
+ });
950
+ if (!agentResult.ok) {
951
+ persistedError = agentResult.error;
952
+ send("error", agentResult.error);
953
+ send("done", {});
954
+ return;
955
+ }
956
+ const { structured } = agentResult;
957
+ let narratorModelId = null;
958
+ const narratorFn = getNarrator(narratorId);
959
+ for (let i = 0; i < structured.blocks.length; i++) {
960
+ const block = structured.blocks[i];
961
+ persistedBlocks[i] = block;
962
+ send("block", { index: i, ...block });
963
+ if (block.kind === "paragraph_brief") {
964
+ persistedProse[i] = "";
965
+ try {
966
+ if (narratorModelId === null) {
967
+ narratorModelId = pickNarratorModelId(vertex, narratorId);
968
+ }
969
+ for await (const token of narratorFn({
970
+ auth: vertex.auth,
971
+ projectId: vertex.projectId,
972
+ location: aiSettings.gcpLocation,
973
+ modelId: narratorModelId,
974
+ input: {
975
+ question,
976
+ structured,
977
+ topic: block.topic,
978
+ keyFacts: block.key_facts,
979
+ blockIndex: i
980
+ }
981
+ })) {
982
+ persistedProse[i] += token;
983
+ send("prose", { block_index: i, delta: token });
984
+ }
985
+ } catch (e) {
986
+ const fallback = block.key_facts.join(". ") + ".";
987
+ persistedProse[i] = fallback;
988
+ send("prose", {
989
+ block_index: i,
990
+ delta: block.key_facts.join(". ")
991
+ });
992
+ send("prose", { block_index: i, delta: "." });
993
+ send("error", {
994
+ code: "NARRATOR_FAILED",
995
+ message: `Prose stream failed (${e.message}); fell back to key facts.`
996
+ });
997
+ }
998
+ }
999
+ }
1000
+ send("done", {});
1001
+ } catch (e) {
1002
+ const message = e.message ?? "Internal error";
1003
+ persistedError = { code: "INTERNAL", message };
1004
+ logger?.error?.(
1005
+ { chatSessionId, sessionId, err: message },
1006
+ "[agent-custom] stream errored"
1007
+ );
1008
+ try {
1009
+ controller.enqueue(
1010
+ encoder.encode(
1011
+ `event: error
1012
+ data: ${JSON.stringify({ code: "INTERNAL", message })}
1013
+
1014
+ `
1015
+ )
1016
+ );
1017
+ controller.enqueue(encoder.encode(`event: done
1018
+ data: {}
1019
+
1020
+ `));
1021
+ } catch {
1022
+ }
1023
+ } finally {
1024
+ if (hooks?.onSessionEnd) {
1025
+ const cause = req.signal.aborted ? "abort" : persistedError ? "error" : "complete";
1026
+ try {
1027
+ await hooks.onSessionEnd({
1028
+ scope: callerScope,
1029
+ sessionId,
1030
+ userId,
1031
+ cause
1032
+ });
1033
+ } catch (err2) {
1034
+ logger?.warn?.(
1035
+ {
1036
+ chatSessionId,
1037
+ sessionId,
1038
+ sessionStarted,
1039
+ err: err2.message
1040
+ },
1041
+ "[agent-custom] onSessionEnd hook failed"
1042
+ );
1043
+ }
1044
+ }
1045
+ try {
1046
+ await persistence.appendMessage({
1047
+ sessionId: chatSessionId,
1048
+ role: "assistant",
1049
+ blocks: persistedBlocks.length ? persistedBlocks : null,
1050
+ prose: Object.keys(persistedProse).length ? persistedProse : null,
1051
+ errorJson: persistedError
1052
+ });
1053
+ } catch (err2) {
1054
+ logger?.warn?.(
1055
+ { chatSessionId, sessionId, err: err2.message },
1056
+ "[agent-custom] failed to persist assistant turn"
1057
+ );
1058
+ }
1059
+ try {
1060
+ closed = true;
1061
+ controller.close();
1062
+ } catch {
1063
+ }
1064
+ }
1065
+ }
1066
+ });
1067
+ return new Response(stream, {
1068
+ headers: {
1069
+ "Content-Type": "text/event-stream; charset=utf-8",
1070
+ "Cache-Control": "no-cache, no-transform",
1071
+ Connection: "keep-alive"
1072
+ }
1073
+ });
1074
+ }
1075
+ };
1076
+ }
1077
+
1078
+ // src/server/routes/chat-sessions.ts
1079
+ var DEFAULT_TITLE = "New chat";
1080
+ var TITLE_MAX = 200;
1081
+ var STATUS_BY_CODE = {
1082
+ UNAUTHORIZED: 401,
1083
+ NOT_FOUND: 404,
1084
+ VALIDATION_FAILED: 400,
1085
+ INTERNAL: 500
1086
+ };
1087
+ function apiError(code, message) {
1088
+ return Response.json(
1089
+ { error: { code, message, details: {} } },
1090
+ { status: STATUS_BY_CODE[code] }
1091
+ );
1092
+ }
1093
+ function okJson(data) {
1094
+ return Response.json(data);
1095
+ }
1096
+ function serializeSession(s) {
1097
+ return {
1098
+ id: s.id,
1099
+ title: s.title,
1100
+ createdAt: s.createdAt ? s.createdAt.toISOString() : null,
1101
+ updatedAt: s.updatedAt ? s.updatedAt.toISOString() : null
1102
+ };
1103
+ }
1104
+ function parseSessionId(raw) {
1105
+ const id = Number(raw);
1106
+ if (!Number.isInteger(id) || id <= 0) return null;
1107
+ return id;
1108
+ }
1109
+ function createChatSessionsRoutes(ctx) {
1110
+ const { persistence, auth, logger, hooks } = ctx;
1111
+ async function gate(req) {
1112
+ if (hooks?.onRequest) {
1113
+ const r = await hooks.onRequest(req);
1114
+ if (r) return { short: r };
1115
+ }
1116
+ const authed = await auth.requireAuth(req);
1117
+ if (!authed.ok) return { short: authed.response };
1118
+ if (hooks?.onAuthenticated) {
1119
+ const r = await hooks.onAuthenticated({
1120
+ req,
1121
+ scope: authed.scope,
1122
+ userId: authed.userId
1123
+ });
1124
+ if (r) return { short: r };
1125
+ }
1126
+ return { ok: true, scope: authed.scope, userId: authed.userId };
1127
+ }
1128
+ const list = {
1129
+ /**
1130
+ * `GET /api/chat/sessions` — caller's recent sessions, newest first,
1131
+ * capped at 100. Response: `{ sessions: [{ id, title, createdAt, updatedAt }] }`.
1132
+ */
1133
+ GET: async (req) => {
1134
+ try {
1135
+ const g = await gate(req);
1136
+ if ("short" in g) return g.short;
1137
+ const rows = await persistence.listSessionsForUser(g.userId, { limit: 100 });
1138
+ return okJson({ sessions: rows.map(serializeSession) });
1139
+ } catch (err2) {
1140
+ logger?.error("[chat-sessions] list.GET failed", err2);
1141
+ const msg = err2 instanceof Error ? err2.message : "Internal error";
1142
+ return apiError("INTERNAL", msg);
1143
+ }
1144
+ },
1145
+ /**
1146
+ * `POST /api/chat/sessions` — body `{ title?: string }`. Trims and caps
1147
+ * title at 200 chars; defaults to "New chat" when blank.
1148
+ * Response: `{ session: { id, title, createdAt, updatedAt } }`.
1149
+ */
1150
+ POST: async (req) => {
1151
+ try {
1152
+ const g = await gate(req);
1153
+ if ("short" in g) return g.short;
1154
+ const body = await req.json().catch(() => ({}));
1155
+ const rawTitle = typeof body.title === "string" ? body.title.trim() : "";
1156
+ const title = rawTitle ? rawTitle.slice(0, TITLE_MAX) : DEFAULT_TITLE;
1157
+ const created = await persistence.createSession({ userId: g.userId, title });
1158
+ return okJson({ session: serializeSession(created) });
1159
+ } catch (err2) {
1160
+ logger?.error("[chat-sessions] list.POST failed", err2);
1161
+ const msg = err2 instanceof Error ? err2.message : "Internal error";
1162
+ return apiError("INTERNAL", msg);
1163
+ }
1164
+ }
1165
+ };
1166
+ const detail = {
1167
+ /**
1168
+ * `GET /api/chat/sessions/[id]` — session metadata + ordered messages.
1169
+ * 404 when the id doesn't exist or doesn't belong to the caller (we never
1170
+ * differentiate the two, to avoid leaking the id space).
1171
+ * Response: `{ session: { id, title, createdAt, updatedAt },
1172
+ * messages: [{ id, role, question, blocks, prose, errorJson, createdAt }] }`.
1173
+ */
1174
+ GET: async (req, params) => {
1175
+ try {
1176
+ const g = await gate(req);
1177
+ if ("short" in g) return g.short;
1178
+ const id = parseSessionId(params.id);
1179
+ if (id === null) return apiError("NOT_FOUND", "Chat session not found.");
1180
+ const meta = await persistence.getSession(id, g.userId);
1181
+ if (!meta) return apiError("NOT_FOUND", "Chat session not found.");
1182
+ const messages = await persistence.listMessagesForSession(id, g.userId);
1183
+ return okJson({
1184
+ session: serializeSession(meta),
1185
+ messages: messages.map((m) => ({
1186
+ id: m.id,
1187
+ role: m.role,
1188
+ question: m.question,
1189
+ blocks: m.blocks,
1190
+ prose: m.prose,
1191
+ errorJson: m.errorJson,
1192
+ createdAt: m.createdAt ? m.createdAt.toISOString() : null
1193
+ }))
1194
+ });
1195
+ } catch (err2) {
1196
+ logger?.error("[chat-sessions] detail.GET failed", err2);
1197
+ const msg = err2 instanceof Error ? err2.message : "Internal error";
1198
+ return apiError("INTERNAL", msg);
1199
+ }
1200
+ },
1201
+ /**
1202
+ * `PATCH /api/chat/sessions/[id]` — rename. Body `{ title: string }`,
1203
+ * trimmed and capped at 200 chars. Response: `{ ok: true }`.
1204
+ */
1205
+ PATCH: async (req, params) => {
1206
+ try {
1207
+ const g = await gate(req);
1208
+ if ("short" in g) return g.short;
1209
+ const id = parseSessionId(params.id);
1210
+ if (id === null) return apiError("NOT_FOUND", "Chat session not found.");
1211
+ const meta = await persistence.getSession(id, g.userId);
1212
+ if (!meta) return apiError("NOT_FOUND", "Chat session not found.");
1213
+ const body = await req.json().catch(() => ({}));
1214
+ if (typeof body.title !== "string") {
1215
+ return apiError("VALIDATION_FAILED", "title must be a string.");
1216
+ }
1217
+ const trimmed = body.title.trim();
1218
+ if (!trimmed) {
1219
+ return apiError("VALIDATION_FAILED", "title must not be empty.");
1220
+ }
1221
+ await persistence.updateSession(id, g.userId, {
1222
+ title: trimmed.slice(0, TITLE_MAX)
1223
+ });
1224
+ return okJson({ ok: true });
1225
+ } catch (err2) {
1226
+ logger?.error("[chat-sessions] detail.PATCH failed", err2);
1227
+ const msg = err2 instanceof Error ? err2.message : "Internal error";
1228
+ return apiError("INTERNAL", msg);
1229
+ }
1230
+ },
1231
+ /**
1232
+ * `DELETE /api/chat/sessions/[id]` — drop session and its messages.
1233
+ * Response: `{ ok: true }`.
1234
+ */
1235
+ DELETE: async (req, params) => {
1236
+ try {
1237
+ const g = await gate(req);
1238
+ if ("short" in g) return g.short;
1239
+ const id = parseSessionId(params.id);
1240
+ if (id === null) return apiError("NOT_FOUND", "Chat session not found.");
1241
+ const meta = await persistence.getSession(id, g.userId);
1242
+ if (!meta) return apiError("NOT_FOUND", "Chat session not found.");
1243
+ await persistence.deleteSession(id, g.userId);
1244
+ return okJson({ ok: true });
1245
+ } catch (err2) {
1246
+ logger?.error("[chat-sessions] detail.DELETE failed", err2);
1247
+ const msg = err2 instanceof Error ? err2.message : "Internal error";
1248
+ return apiError("INTERNAL", msg);
1249
+ }
1250
+ }
1251
+ };
1252
+ return { list, detail };
1253
+ }
1254
+
1255
+ // src/server/routes/admin-settings.ts
1256
+ var VALID_LOCATIONS = ["us-east5", "global"];
1257
+ function isStringRecord(v) {
1258
+ return typeof v === "object" && v !== null && !Array.isArray(v);
1259
+ }
1260
+ function jsonResponse(body, status = 200) {
1261
+ return new Response(JSON.stringify(body), {
1262
+ status,
1263
+ headers: { "content-type": "application/json" }
1264
+ });
1265
+ }
1266
+ function toWire(settings) {
1267
+ return {
1268
+ tool_provider: settings.toolProvider,
1269
+ gcp_location: settings.gcpLocation,
1270
+ chat_interface: settings.chatInterface,
1271
+ updated_at: settings.updatedAt ? settings.updatedAt.toISOString() : null,
1272
+ updated_by_user_id: settings.updatedByUserId
1273
+ };
1274
+ }
1275
+ function createAdminSettingsRoutes(ctx) {
1276
+ const { persistence, auth, toolProviders: toolProviders2, chatInterfaces, logger, hooks } = ctx;
1277
+ async function GET(req) {
1278
+ if (hooks?.onRequest) {
1279
+ const short = await hooks.onRequest(req);
1280
+ if (short) return short;
1281
+ }
1282
+ const result = await auth.requireAuth(req);
1283
+ if (!result.ok) return result.response;
1284
+ if (hooks?.onAuthenticated) {
1285
+ const short = await hooks.onAuthenticated({
1286
+ req,
1287
+ scope: result.scope,
1288
+ userId: result.userId
1289
+ });
1290
+ if (short) return short;
1291
+ }
1292
+ if (!auth.isSuperAdmin(result.scope)) {
1293
+ return jsonResponse(
1294
+ { error: "forbidden", message: "Only super admins can read AI settings." },
1295
+ 403
1296
+ );
1297
+ }
1298
+ const settings = await persistence.getAiSettings();
1299
+ return jsonResponse(toWire(settings));
1300
+ }
1301
+ async function PATCH(req) {
1302
+ if (hooks?.onRequest) {
1303
+ const short = await hooks.onRequest(req);
1304
+ if (short) return short;
1305
+ }
1306
+ const result = await auth.requireAuth(req);
1307
+ if (!result.ok) return result.response;
1308
+ if (hooks?.onAuthenticated) {
1309
+ const short = await hooks.onAuthenticated({
1310
+ req,
1311
+ scope: result.scope,
1312
+ userId: result.userId
1313
+ });
1314
+ if (short) return short;
1315
+ }
1316
+ if (!auth.isSuperAdmin(result.scope)) {
1317
+ return jsonResponse(
1318
+ { error: "forbidden", message: "Only super admins can change AI settings." },
1319
+ 403
1320
+ );
1321
+ }
1322
+ const body = await req.json().catch(() => null);
1323
+ if (!isStringRecord(body)) {
1324
+ return jsonResponse({ error: "invalid_body", message: "Body must be JSON." }, 400);
1325
+ }
1326
+ const patch = {};
1327
+ if ("tool_provider" in body) {
1328
+ const v = body.tool_provider;
1329
+ const validIds = toolProviders2.map((p) => p.id);
1330
+ if (typeof v !== "string" || !validIds.includes(v)) {
1331
+ return jsonResponse({ error: "invalid_tool_provider" }, 400);
1332
+ }
1333
+ patch.toolProvider = v;
1334
+ }
1335
+ if ("gcp_location" in body) {
1336
+ const v = body.gcp_location;
1337
+ if (typeof v !== "string" || !VALID_LOCATIONS.includes(v)) {
1338
+ return jsonResponse({ error: "invalid_gcp_location" }, 400);
1339
+ }
1340
+ patch.gcpLocation = v;
1341
+ }
1342
+ if ("chat_interface" in body) {
1343
+ const v = body.chat_interface;
1344
+ const validIds = chatInterfaces.map((i) => i.id);
1345
+ if (typeof v !== "string" || !validIds.includes(v)) {
1346
+ return jsonResponse({ error: "invalid_chat_interface" }, 400);
1347
+ }
1348
+ patch.chatInterface = v;
1349
+ }
1350
+ if (patch.toolProvider === void 0 && patch.gcpLocation === void 0 && patch.chatInterface === void 0) {
1351
+ return jsonResponse(
1352
+ {
1353
+ error: "empty_patch",
1354
+ message: "Body must set at least one of tool_provider, gcp_location, chat_interface."
1355
+ },
1356
+ 400
1357
+ );
1358
+ }
1359
+ try {
1360
+ const updated = await persistence.updateAiSettings(patch, result.userId);
1361
+ return jsonResponse(toWire(updated));
1362
+ } catch (err2) {
1363
+ logger?.error("admin-settings PATCH failed", err2);
1364
+ throw err2;
1365
+ }
1366
+ }
1367
+ return { GET, PATCH };
1368
+ }
1369
+
1370
+ // src/server/configure.ts
1371
+ var BUILTIN_CHAT_INTERFACE_IDS = ["custom", "vercel"];
1372
+ function configureAiChat(opts) {
1373
+ const toolProviders2 = [
1374
+ ...toolProviders,
1375
+ ...opts.extraToolProviders ?? []
1376
+ ];
1377
+ const getProvider = (id) => toolProviders2.find((p) => p.id === id) ?? getToolProvider(id);
1378
+ const chatInterfaces = opts.chatInterfaces ?? BUILTIN_CHAT_INTERFACE_IDS.map((id) => ({ id }));
1379
+ const runAgentBound = async ({
1380
+ question,
1381
+ ctx,
1382
+ providerId,
1383
+ location,
1384
+ maxToolTurns,
1385
+ maxOutputTokens
1386
+ }) => {
1387
+ const settings = await opts.persistence.getAiSettings();
1388
+ const def = getProvider(providerId ?? settings.toolProvider);
1389
+ if (!def) {
1390
+ throw new Error(
1391
+ `Unknown tool provider '${providerId ?? settings.toolProvider}'. Registered: ${toolProviders2.map((p) => p.id).join(", ")}.`
1392
+ );
1393
+ }
1394
+ const provider = def.createProvider({
1395
+ auth: opts.vertex.auth,
1396
+ projectId: opts.vertex.projectId,
1397
+ defaultLocation: opts.vertex.defaultLocation,
1398
+ modelIds: opts.vertex.modelIds,
1399
+ location: location ?? settings.gcpLocation
1400
+ });
1401
+ const systemBlocks = await opts.tools.buildSystemBlocks(ctx);
1402
+ const input = {
1403
+ question,
1404
+ ctx,
1405
+ tools: opts.tools.tools,
1406
+ systemBlocks,
1407
+ provider,
1408
+ maxToolTurns,
1409
+ maxOutputTokens
1410
+ };
1411
+ return runAgent(input);
1412
+ };
1413
+ const sharedHooks = opts.hooks ? {
1414
+ onRequest: opts.hooks.onRequest,
1415
+ onAuthenticated: opts.hooks.onAuthenticated
1416
+ } : void 0;
1417
+ const agentCustom = createAgentCustomRoutes({
1418
+ persistence: opts.persistence,
1419
+ auth: opts.auth,
1420
+ scope: opts.scope,
1421
+ tools: opts.tools,
1422
+ vertex: opts.vertex,
1423
+ logger: opts.logger,
1424
+ resolveNarratorId: opts.resolveNarratorId,
1425
+ hooks: opts.hooks
1426
+ });
1427
+ const chatSessions = createChatSessionsRoutes({
1428
+ persistence: opts.persistence,
1429
+ auth: opts.auth,
1430
+ logger: opts.logger,
1431
+ hooks: sharedHooks
1432
+ });
1433
+ const adminSettings = createAdminSettingsRoutes({
1434
+ persistence: opts.persistence,
1435
+ auth: opts.auth,
1436
+ toolProviders: toolProviders2,
1437
+ chatInterfaces,
1438
+ logger: opts.logger,
1439
+ hooks: sharedHooks
1440
+ });
1441
+ return {
1442
+ runAgent: runAgentBound,
1443
+ routes: { agentCustom, chatSessions, adminSettings },
1444
+ registries: { toolProviders: toolProviders2, chatInterfaces }
1445
+ };
1446
+ }
1447
+
1448
+ export { BUILTIN_CHAT_INTERFACE_IDS, DEFAULT_MAX_OUTPUT_TOKENS, DEFAULT_MAX_TOOL_TURNS, TERMINAL_TOOL_NAME, configureAiChat, err, getToolProvider, ok, runAgent, toolProviders };
1449
+ //# sourceMappingURL=index.js.map
1450
+ //# sourceMappingURL=index.js.map