@fgv/ts-extras 5.1.0-16 → 5.1.0-17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.browser.js +2 -1
- package/dist/packlets/ai-assist/apiClient.js +570 -58
- package/dist/packlets/ai-assist/chatRequestBuilders.js +180 -0
- package/dist/packlets/ai-assist/index.js +4 -3
- package/dist/packlets/ai-assist/model.js +20 -3
- package/dist/packlets/ai-assist/registry.js +66 -10
- package/dist/packlets/ai-assist/sseParser.js +122 -0
- package/dist/packlets/ai-assist/streamingAdapters/anthropic.js +192 -0
- package/dist/packlets/ai-assist/streamingAdapters/common.js +77 -0
- package/dist/packlets/ai-assist/streamingAdapters/gemini.js +160 -0
- package/dist/packlets/ai-assist/streamingAdapters/openaiChat.js +149 -0
- package/dist/packlets/ai-assist/streamingAdapters/openaiResponses.js +163 -0
- package/dist/packlets/ai-assist/streamingAdapters/proxy.js +157 -0
- package/dist/packlets/ai-assist/streamingClient.js +88 -0
- package/dist/packlets/conversion/converters.js +1 -1
- package/dist/packlets/zip-file-tree/zipFileTreeAccessors.js +2 -2
- package/dist/ts-extras.d.ts +512 -5
- package/lib/index.browser.d.ts +2 -1
- package/lib/index.browser.js +3 -1
- package/lib/packlets/ai-assist/apiClient.d.ts +103 -1
- package/lib/packlets/ai-assist/apiClient.js +574 -58
- package/lib/packlets/ai-assist/chatRequestBuilders.d.ts +89 -0
- package/lib/packlets/ai-assist/chatRequestBuilders.js +189 -0
- package/lib/packlets/ai-assist/index.d.ts +4 -3
- package/lib/packlets/ai-assist/index.js +10 -1
- package/lib/packlets/ai-assist/model.d.ts +271 -2
- package/lib/packlets/ai-assist/model.js +21 -3
- package/lib/packlets/ai-assist/registry.d.ts +10 -1
- package/lib/packlets/ai-assist/registry.js +67 -11
- package/lib/packlets/ai-assist/sseParser.d.ts +45 -0
- package/lib/packlets/ai-assist/sseParser.js +127 -0
- package/lib/packlets/ai-assist/streamingAdapters/anthropic.d.ts +18 -0
- package/lib/packlets/ai-assist/streamingAdapters/anthropic.js +195 -0
- package/lib/packlets/ai-assist/streamingAdapters/common.d.ts +71 -0
- package/lib/packlets/ai-assist/streamingAdapters/common.js +81 -0
- package/lib/packlets/ai-assist/streamingAdapters/gemini.d.ts +19 -0
- package/lib/packlets/ai-assist/streamingAdapters/gemini.js +163 -0
- package/lib/packlets/ai-assist/streamingAdapters/openaiChat.d.ts +18 -0
- package/lib/packlets/ai-assist/streamingAdapters/openaiChat.js +152 -0
- package/lib/packlets/ai-assist/streamingAdapters/openaiResponses.d.ts +19 -0
- package/lib/packlets/ai-assist/streamingAdapters/openaiResponses.js +166 -0
- package/lib/packlets/ai-assist/streamingAdapters/proxy.d.ts +34 -0
- package/lib/packlets/ai-assist/streamingAdapters/proxy.js +160 -0
- package/lib/packlets/ai-assist/streamingClient.d.ts +33 -0
- package/lib/packlets/ai-assist/streamingClient.js +93 -0
- package/lib/packlets/conversion/converters.d.ts +1 -1
- package/lib/packlets/conversion/converters.js +1 -1
- package/lib/packlets/zip-file-tree/zipFileTreeAccessors.d.ts +2 -2
- package/lib/packlets/zip-file-tree/zipFileTreeAccessors.js +2 -2
- package/package.json +7 -7
|
@@ -20,7 +20,11 @@
|
|
|
20
20
|
// SOFTWARE.
|
|
21
21
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
22
22
|
exports.callProviderCompletion = callProviderCompletion;
|
|
23
|
+
exports.callProviderImageGeneration = callProviderImageGeneration;
|
|
24
|
+
exports.callProviderListModels = callProviderListModels;
|
|
25
|
+
exports.callProxiedListModels = callProxiedListModels;
|
|
23
26
|
exports.callProxiedCompletion = callProxiedCompletion;
|
|
27
|
+
exports.callProxiedImageGeneration = callProxiedImageGeneration;
|
|
24
28
|
/**
|
|
25
29
|
* Chat completion client for AI assist with support for multiple provider APIs.
|
|
26
30
|
*
|
|
@@ -36,31 +40,17 @@ exports.callProxiedCompletion = callProxiedCompletion;
|
|
|
36
40
|
const ts_json_base_1 = require("@fgv/ts-json-base");
|
|
37
41
|
const ts_utils_1 = require("@fgv/ts-utils");
|
|
38
42
|
const model_1 = require("./model");
|
|
43
|
+
const chatRequestBuilders_1 = require("./chatRequestBuilders");
|
|
44
|
+
const registry_1 = require("./registry");
|
|
39
45
|
const toolFormats_1 = require("./toolFormats");
|
|
40
46
|
// ============================================================================
|
|
41
47
|
// Shared helpers
|
|
42
48
|
// ============================================================================
|
|
43
|
-
/**
|
|
44
|
-
* Builds the messages array from prompt + optional correction messages.
|
|
45
|
-
* @internal
|
|
46
|
-
*/
|
|
47
|
-
function buildMessages(prompt, additionalMessages) {
|
|
48
|
-
const messages = [
|
|
49
|
-
{ role: 'system', content: prompt.system },
|
|
50
|
-
{ role: 'user', content: prompt.user }
|
|
51
|
-
];
|
|
52
|
-
if (additionalMessages) {
|
|
53
|
-
for (const msg of additionalMessages) {
|
|
54
|
-
messages.push({ role: msg.role, content: msg.content });
|
|
55
|
-
}
|
|
56
|
-
}
|
|
57
|
-
return messages;
|
|
58
|
-
}
|
|
59
49
|
/**
|
|
60
50
|
* Makes an HTTP request and returns the parsed JSON, or a failure.
|
|
61
51
|
* @internal
|
|
62
52
|
*/
|
|
63
|
-
async function fetchJson(url, headers, body, logger) {
|
|
53
|
+
async function fetchJson(url, headers, body, logger, signal) {
|
|
64
54
|
/* c8 ignore next 1 - optional logger */
|
|
65
55
|
logger === null || logger === void 0 ? void 0 : logger.detail(`AI API request: POST ${url}`);
|
|
66
56
|
let response;
|
|
@@ -68,7 +58,8 @@ async function fetchJson(url, headers, body, logger) {
|
|
|
68
58
|
response = await fetch(url, {
|
|
69
59
|
method: 'POST',
|
|
70
60
|
headers: Object.assign({ 'Content-Type': 'application/json' }, headers),
|
|
71
|
-
body: JSON.stringify(body)
|
|
61
|
+
body: JSON.stringify(body),
|
|
62
|
+
signal
|
|
72
63
|
});
|
|
73
64
|
}
|
|
74
65
|
catch (err) {
|
|
@@ -101,6 +92,47 @@ async function fetchJson(url, headers, body, logger) {
|
|
|
101
92
|
}
|
|
102
93
|
return (0, ts_utils_1.succeed)(json);
|
|
103
94
|
}
|
|
95
|
+
/**
|
|
96
|
+
* Makes an HTTP GET request and returns the parsed JSON, or a failure.
|
|
97
|
+
* @internal
|
|
98
|
+
*/
|
|
99
|
+
async function fetchGetJson(url, headers, logger, signal) {
|
|
100
|
+
/* c8 ignore next 1 - optional logger */
|
|
101
|
+
logger === null || logger === void 0 ? void 0 : logger.detail(`AI API request: GET ${url}`);
|
|
102
|
+
let response;
|
|
103
|
+
try {
|
|
104
|
+
response = await fetch(url, { method: 'GET', headers, signal });
|
|
105
|
+
}
|
|
106
|
+
catch (err) {
|
|
107
|
+
const detail = err instanceof Error ? err.message : String(err);
|
|
108
|
+
/* c8 ignore next 1 - optional logger */
|
|
109
|
+
logger === null || logger === void 0 ? void 0 : logger.error(`AI API request failed: ${detail}`);
|
|
110
|
+
return (0, ts_utils_1.fail)(`AI API request failed: ${detail}`);
|
|
111
|
+
}
|
|
112
|
+
if (!response.ok) {
|
|
113
|
+
const errorText = await response.text().catch(() => 'unknown error');
|
|
114
|
+
/* c8 ignore next 1 - optional logger */
|
|
115
|
+
logger === null || logger === void 0 ? void 0 : logger.error(`AI API returned ${response.status}: ${errorText}`);
|
|
116
|
+
return (0, ts_utils_1.fail)(`AI API returned ${response.status}: ${errorText}`);
|
|
117
|
+
}
|
|
118
|
+
/* c8 ignore next 1 - optional logger */
|
|
119
|
+
logger === null || logger === void 0 ? void 0 : logger.detail(`AI API response: ${response.status}`);
|
|
120
|
+
let json;
|
|
121
|
+
try {
|
|
122
|
+
json = await response.json();
|
|
123
|
+
}
|
|
124
|
+
catch (_a) {
|
|
125
|
+
/* c8 ignore next 1 - optional logger */
|
|
126
|
+
logger === null || logger === void 0 ? void 0 : logger.error('AI API returned invalid JSON response');
|
|
127
|
+
return (0, ts_utils_1.fail)('AI API returned invalid JSON response');
|
|
128
|
+
}
|
|
129
|
+
if (!(0, ts_json_base_1.isJsonObject)(json)) {
|
|
130
|
+
/* c8 ignore next 1 - optional logger */
|
|
131
|
+
logger === null || logger === void 0 ? void 0 : logger.error('AI API returned non-object JSON response');
|
|
132
|
+
return (0, ts_utils_1.fail)('AI API returned non-object JSON response');
|
|
133
|
+
}
|
|
134
|
+
return (0, ts_utils_1.succeed)(json);
|
|
135
|
+
}
|
|
104
136
|
const openAiMessage = ts_utils_1.Validators.object({
|
|
105
137
|
content: ts_utils_1.Validators.string
|
|
106
138
|
});
|
|
@@ -153,16 +185,18 @@ const geminiResponse = ts_utils_1.Validators.object({
|
|
|
153
185
|
* Works for xAI Grok, OpenAI, Groq, and Mistral.
|
|
154
186
|
* @internal
|
|
155
187
|
*/
|
|
156
|
-
async function callOpenAiCompletion(config, prompt, additionalMessages, temperature = 0.7, logger) {
|
|
188
|
+
async function callOpenAiCompletion(config, prompt, additionalMessages, temperature = 0.7, logger, signal) {
|
|
157
189
|
const url = `${config.baseUrl}/chat/completions`;
|
|
158
|
-
const messages = buildMessages(prompt,
|
|
190
|
+
const messages = (0, chatRequestBuilders_1.buildMessages)(prompt.system, (0, chatRequestBuilders_1.buildOpenAiChatUserContent)(prompt), {
|
|
191
|
+
tail: additionalMessages
|
|
192
|
+
});
|
|
159
193
|
const body = { model: config.model, messages, temperature };
|
|
160
194
|
const headers = {
|
|
161
195
|
Authorization: `Bearer ${config.apiKey}`
|
|
162
196
|
};
|
|
163
197
|
/* c8 ignore next 1 - optional logger */
|
|
164
198
|
logger === null || logger === void 0 ? void 0 : logger.info(`OpenAI completion: model=${config.model}`);
|
|
165
|
-
const jsonResult = await fetchJson(url, headers, body, logger);
|
|
199
|
+
const jsonResult = await fetchJson(url, headers, body, logger, signal);
|
|
166
200
|
if (jsonResult.isFailure()) {
|
|
167
201
|
return (0, ts_utils_1.fail)(jsonResult.message);
|
|
168
202
|
}
|
|
@@ -201,9 +235,11 @@ function extractResponsesApiText(output) {
|
|
|
201
235
|
* Used when tools are configured for an openai-format provider.
|
|
202
236
|
* @internal
|
|
203
237
|
*/
|
|
204
|
-
async function callOpenAiResponsesCompletion(config, prompt, tools, additionalMessages, temperature = 0.7, logger) {
|
|
238
|
+
async function callOpenAiResponsesCompletion(config, prompt, tools, additionalMessages, temperature = 0.7, logger, signal) {
|
|
205
239
|
const url = `${config.baseUrl}/responses`;
|
|
206
|
-
const input = buildMessages(prompt,
|
|
240
|
+
const input = (0, chatRequestBuilders_1.buildMessages)(prompt.system, (0, chatRequestBuilders_1.buildOpenAiResponsesUserContent)(prompt), {
|
|
241
|
+
tail: additionalMessages
|
|
242
|
+
});
|
|
207
243
|
const body = {
|
|
208
244
|
model: config.model,
|
|
209
245
|
input,
|
|
@@ -215,7 +251,7 @@ async function callOpenAiResponsesCompletion(config, prompt, tools, additionalMe
|
|
|
215
251
|
};
|
|
216
252
|
/* c8 ignore next 1 - optional logger */
|
|
217
253
|
logger === null || logger === void 0 ? void 0 : logger.info(`OpenAI Responses API: model=${config.model}, tools=${tools.map((t) => t.type).join(',')}`);
|
|
218
|
-
const jsonResult = await fetchJson(url, headers, body, logger);
|
|
254
|
+
const jsonResult = await fetchJson(url, headers, body, logger, signal);
|
|
219
255
|
if (jsonResult.isFailure()) {
|
|
220
256
|
return (0, ts_utils_1.fail)(jsonResult.message);
|
|
221
257
|
}
|
|
@@ -260,18 +296,10 @@ function extractAnthropicText(content) {
|
|
|
260
296
|
* mixed content block responses.
|
|
261
297
|
* @internal
|
|
262
298
|
*/
|
|
263
|
-
async function callAnthropicCompletion(config, prompt, additionalMessages, temperature = 0.7, logger, tools) {
|
|
299
|
+
async function callAnthropicCompletion(config, prompt, additionalMessages, temperature = 0.7, logger, tools, signal) {
|
|
264
300
|
const url = `${config.baseUrl}/messages`;
|
|
265
301
|
// Anthropic uses system as a top-level field, not in messages
|
|
266
|
-
const messages =
|
|
267
|
-
if (additionalMessages) {
|
|
268
|
-
for (const msg of additionalMessages) {
|
|
269
|
-
// Anthropic doesn't have a system role in messages
|
|
270
|
-
if (msg.role !== 'system') {
|
|
271
|
-
messages.push({ role: msg.role, content: msg.content });
|
|
272
|
-
}
|
|
273
|
-
}
|
|
274
|
-
}
|
|
302
|
+
const messages = (0, chatRequestBuilders_1.buildAnthropicMessages)(prompt, { tail: additionalMessages });
|
|
275
303
|
const body = {
|
|
276
304
|
model: config.model,
|
|
277
305
|
system: prompt.system,
|
|
@@ -293,7 +321,7 @@ async function callAnthropicCompletion(config, prompt, additionalMessages, tempe
|
|
|
293
321
|
'anthropic-version': '2023-06-01',
|
|
294
322
|
'anthropic-dangerous-direct-browser-access': 'true'
|
|
295
323
|
};
|
|
296
|
-
const jsonResult = await fetchJson(url, headers, body, logger);
|
|
324
|
+
const jsonResult = await fetchJson(url, headers, body, logger, signal);
|
|
297
325
|
if (jsonResult.isFailure()) {
|
|
298
326
|
return (0, ts_utils_1.fail)(jsonResult.message);
|
|
299
327
|
}
|
|
@@ -328,22 +356,10 @@ async function callAnthropicCompletion(config, prompt, additionalMessages, tempe
|
|
|
328
356
|
* When tools are configured, includes Google Search grounding.
|
|
329
357
|
* @internal
|
|
330
358
|
*/
|
|
331
|
-
async function callGeminiCompletion(config, prompt, additionalMessages, temperature = 0.7, logger, tools) {
|
|
359
|
+
async function callGeminiCompletion(config, prompt, additionalMessages, temperature = 0.7, logger, tools, signal) {
|
|
332
360
|
const url = `${config.baseUrl}/models/${config.model}:generateContent`;
|
|
333
361
|
// Gemini uses 'contents' with 'parts', and 'model' role instead of 'assistant'
|
|
334
|
-
const contents =
|
|
335
|
-
{ role: 'user', parts: [{ text: prompt.user }] }
|
|
336
|
-
];
|
|
337
|
-
if (additionalMessages) {
|
|
338
|
-
for (const msg of additionalMessages) {
|
|
339
|
-
if (msg.role !== 'system') {
|
|
340
|
-
contents.push({
|
|
341
|
-
role: msg.role === 'assistant' ? 'model' : msg.role,
|
|
342
|
-
parts: [{ text: msg.content }]
|
|
343
|
-
});
|
|
344
|
-
}
|
|
345
|
-
}
|
|
346
|
-
}
|
|
362
|
+
const contents = (0, chatRequestBuilders_1.buildGeminiContents)(prompt, { tail: additionalMessages });
|
|
347
363
|
const body = {
|
|
348
364
|
systemInstruction: { parts: [{ text: prompt.system }] },
|
|
349
365
|
contents,
|
|
@@ -361,7 +377,7 @@ async function callGeminiCompletion(config, prompt, additionalMessages, temperat
|
|
|
361
377
|
const headers = {
|
|
362
378
|
'x-goog-api-key': config.apiKey
|
|
363
379
|
};
|
|
364
|
-
const jsonResult = await fetchJson(url, headers, body, logger);
|
|
380
|
+
const jsonResult = await fetchJson(url, headers, body, logger, signal);
|
|
365
381
|
if (jsonResult.isFailure()) {
|
|
366
382
|
return (0, ts_utils_1.fail)(jsonResult.message);
|
|
367
383
|
}
|
|
@@ -397,10 +413,13 @@ async function callGeminiCompletion(config, prompt, additionalMessages, temperat
|
|
|
397
413
|
* @public
|
|
398
414
|
*/
|
|
399
415
|
async function callProviderCompletion(params) {
|
|
400
|
-
const { descriptor, apiKey, prompt, additionalMessages, temperature = 0.7, modelOverride, logger, tools } = params;
|
|
416
|
+
const { descriptor, apiKey, prompt, additionalMessages, temperature = 0.7, modelOverride, logger, tools, signal } = params;
|
|
401
417
|
if (!descriptor.baseUrl) {
|
|
402
418
|
return (0, ts_utils_1.fail)(`provider "${descriptor.id}" has no API endpoint configured`);
|
|
403
419
|
}
|
|
420
|
+
if (prompt.attachments.length > 0 && !descriptor.acceptsImageInput) {
|
|
421
|
+
return (0, ts_utils_1.fail)(`provider "${descriptor.id}" does not accept image input`);
|
|
422
|
+
}
|
|
404
423
|
const hasTools = tools !== undefined && tools.length > 0;
|
|
405
424
|
const modelContext = hasTools ? 'tools' : undefined;
|
|
406
425
|
const config = {
|
|
@@ -418,19 +437,465 @@ async function callProviderCompletion(params) {
|
|
|
418
437
|
switch (descriptor.apiFormat) {
|
|
419
438
|
case 'openai':
|
|
420
439
|
if (hasTools) {
|
|
421
|
-
return callOpenAiResponsesCompletion(config, prompt, tools, additionalMessages, temperature, logger);
|
|
440
|
+
return callOpenAiResponsesCompletion(config, prompt, tools, additionalMessages, temperature, logger, signal);
|
|
441
|
+
}
|
|
442
|
+
return callOpenAiCompletion(config, prompt, additionalMessages, temperature, logger, signal);
|
|
443
|
+
case 'anthropic':
|
|
444
|
+
return callAnthropicCompletion(config, prompt, additionalMessages, temperature, logger, tools, signal);
|
|
445
|
+
case 'gemini':
|
|
446
|
+
return callGeminiCompletion(config, prompt, additionalMessages, temperature, logger, tools, signal);
|
|
447
|
+
/* c8 ignore next 4 - defensive coding: exhaustive switch guaranteed by TypeScript */
|
|
448
|
+
default: {
|
|
449
|
+
const _exhaustive = descriptor.apiFormat;
|
|
450
|
+
return (0, ts_utils_1.fail)(`unsupported API format: ${String(_exhaustive)}`);
|
|
451
|
+
}
|
|
452
|
+
}
|
|
453
|
+
}
|
|
454
|
+
const openAiImageItem = ts_utils_1.Validators.object({
|
|
455
|
+
b64_json: ts_utils_1.Validators.string,
|
|
456
|
+
revised_prompt: ts_utils_1.Validators.string.optional()
|
|
457
|
+
});
|
|
458
|
+
const openAiImageResponse = ts_utils_1.Validators.object({
|
|
459
|
+
data: ts_utils_1.Validators.arrayOf(openAiImageItem).withConstraint((arr) => arr.length > 0)
|
|
460
|
+
});
|
|
461
|
+
const imagenPrediction = ts_utils_1.Validators.object({
|
|
462
|
+
bytesBase64Encoded: ts_utils_1.Validators.string,
|
|
463
|
+
mimeType: ts_utils_1.Validators.string.optional()
|
|
464
|
+
});
|
|
465
|
+
const imagenResponse = ts_utils_1.Validators.object({
|
|
466
|
+
predictions: ts_utils_1.Validators.arrayOf(imagenPrediction).withConstraint((arr) => arr.length > 0)
|
|
467
|
+
});
|
|
468
|
+
// ---- Proxied image generation response ----
|
|
469
|
+
const proxiedGeneratedImage = ts_utils_1.Validators.object({
|
|
470
|
+
mimeType: ts_utils_1.Validators.string,
|
|
471
|
+
base64: ts_utils_1.Validators.string,
|
|
472
|
+
revisedPrompt: ts_utils_1.Validators.string.optional()
|
|
473
|
+
});
|
|
474
|
+
const proxiedImageGenerationResponse = ts_utils_1.Validators.object({
|
|
475
|
+
images: ts_utils_1.Validators.arrayOf(proxiedGeneratedImage).withConstraint((arr) => arr.length > 0)
|
|
476
|
+
});
|
|
477
|
+
const proxiedListModelsEntry = ts_utils_1.Validators.object({
|
|
478
|
+
id: ts_utils_1.Validators.string,
|
|
479
|
+
capabilities: ts_utils_1.Validators.arrayOf(ts_utils_1.Validators.enumeratedValue(['chat', 'tools', 'vision', 'image-generation'])),
|
|
480
|
+
displayName: ts_utils_1.Validators.string.optional()
|
|
481
|
+
});
|
|
482
|
+
const proxiedListModelsResponse = ts_utils_1.Validators.object({
|
|
483
|
+
models: ts_utils_1.Validators.arrayOf(proxiedListModelsEntry)
|
|
484
|
+
});
|
|
485
|
+
// ============================================================================
|
|
486
|
+
// Image generation — adapters
|
|
487
|
+
// ============================================================================
|
|
488
|
+
/**
|
|
489
|
+
* Calls the OpenAI Images API. Used for both `openai-images` and `xai-images`
|
|
490
|
+
* formats — the request shape is the same; the only difference is whether the
|
|
491
|
+
* `size` field is honored (OpenAI: yes, xAI: ignored at the provider).
|
|
492
|
+
*
|
|
493
|
+
* @internal
|
|
494
|
+
*/
|
|
495
|
+
async function callOpenAiImageGeneration(config, request, defaultMimeType, logger, signal) {
|
|
496
|
+
var _a, _b;
|
|
497
|
+
const url = `${config.baseUrl}/images/generations`;
|
|
498
|
+
const opts = (_a = request.options) !== null && _a !== void 0 ? _a : {};
|
|
499
|
+
const body = {
|
|
500
|
+
model: config.model,
|
|
501
|
+
prompt: request.prompt,
|
|
502
|
+
n: (_b = opts.count) !== null && _b !== void 0 ? _b : 1,
|
|
503
|
+
response_format: 'b64_json'
|
|
504
|
+
};
|
|
505
|
+
if (opts.size !== undefined) {
|
|
506
|
+
body.size = opts.size;
|
|
507
|
+
}
|
|
508
|
+
if (opts.quality !== undefined) {
|
|
509
|
+
body.quality = opts.quality;
|
|
510
|
+
}
|
|
511
|
+
if (opts.seed !== undefined) {
|
|
512
|
+
body.seed = opts.seed;
|
|
513
|
+
}
|
|
514
|
+
const headers = {
|
|
515
|
+
Authorization: `Bearer ${config.apiKey}`
|
|
516
|
+
};
|
|
517
|
+
/* c8 ignore next 1 - optional logger */
|
|
518
|
+
logger === null || logger === void 0 ? void 0 : logger.info(`Image generation: model=${config.model}, n=${body.n}`);
|
|
519
|
+
const jsonResult = await fetchJson(url, headers, body, logger, signal);
|
|
520
|
+
if (jsonResult.isFailure()) {
|
|
521
|
+
return (0, ts_utils_1.fail)(jsonResult.message);
|
|
522
|
+
}
|
|
523
|
+
return openAiImageResponse
|
|
524
|
+
.validate(jsonResult.value)
|
|
525
|
+
.withErrorFormat((msg) => `OpenAI images API response: ${msg}`)
|
|
526
|
+
.onSuccess((response) => {
|
|
527
|
+
const images = response.data.map((item) => (Object.assign({ mimeType: defaultMimeType, base64: item.b64_json }, (item.revised_prompt !== undefined ? { revisedPrompt: item.revised_prompt } : {}))));
|
|
528
|
+
return (0, ts_utils_1.succeed)({ images });
|
|
529
|
+
});
|
|
530
|
+
}
|
|
531
|
+
/**
|
|
532
|
+
* Calls the Gemini Imagen `:predict` endpoint.
|
|
533
|
+
* @internal
|
|
534
|
+
*/
|
|
535
|
+
async function callImagenGeneration(config, request, logger, signal) {
|
|
536
|
+
var _a, _b, _c, _d;
|
|
537
|
+
const url = `${config.baseUrl}/models/${config.model}:predict`;
|
|
538
|
+
const opts = (_a = request.options) !== null && _a !== void 0 ? _a : {};
|
|
539
|
+
const parameters = {
|
|
540
|
+
sampleCount: (_b = opts.count) !== null && _b !== void 0 ? _b : 1
|
|
541
|
+
};
|
|
542
|
+
if (((_c = opts.imagen) === null || _c === void 0 ? void 0 : _c.aspectRatio) !== undefined) {
|
|
543
|
+
parameters.aspectRatio = opts.imagen.aspectRatio;
|
|
544
|
+
}
|
|
545
|
+
if (((_d = opts.imagen) === null || _d === void 0 ? void 0 : _d.negativePrompt) !== undefined) {
|
|
546
|
+
parameters.negativePrompt = opts.imagen.negativePrompt;
|
|
547
|
+
}
|
|
548
|
+
if (opts.seed !== undefined) {
|
|
549
|
+
parameters.seed = opts.seed;
|
|
550
|
+
}
|
|
551
|
+
const body = {
|
|
552
|
+
instances: [{ prompt: request.prompt }],
|
|
553
|
+
parameters
|
|
554
|
+
};
|
|
555
|
+
const headers = {
|
|
556
|
+
'x-goog-api-key': config.apiKey
|
|
557
|
+
};
|
|
558
|
+
/* c8 ignore next 1 - optional logger */
|
|
559
|
+
logger === null || logger === void 0 ? void 0 : logger.info(`Imagen generation: model=${config.model}, n=${parameters.sampleCount}`);
|
|
560
|
+
const jsonResult = await fetchJson(url, headers, body, logger, signal);
|
|
561
|
+
if (jsonResult.isFailure()) {
|
|
562
|
+
return (0, ts_utils_1.fail)(jsonResult.message);
|
|
563
|
+
}
|
|
564
|
+
return imagenResponse
|
|
565
|
+
.validate(jsonResult.value)
|
|
566
|
+
.withErrorFormat((msg) => `Imagen API response: ${msg}`)
|
|
567
|
+
.onSuccess((response) => {
|
|
568
|
+
const images = response.predictions.map((p) => {
|
|
569
|
+
var _a;
|
|
570
|
+
return ({
|
|
571
|
+
mimeType: (_a = p.mimeType) !== null && _a !== void 0 ? _a : 'image/png',
|
|
572
|
+
base64: p.bytesBase64Encoded
|
|
573
|
+
});
|
|
574
|
+
});
|
|
575
|
+
return (0, ts_utils_1.succeed)({ images });
|
|
576
|
+
});
|
|
577
|
+
}
|
|
578
|
+
// ============================================================================
|
|
579
|
+
// Image generation — dispatcher
|
|
580
|
+
// ============================================================================
|
|
581
|
+
/**
|
|
582
|
+
* Calls the appropriate image-generation API for a given provider.
|
|
583
|
+
*
|
|
584
|
+
* Routes based on `descriptor.imageApiFormat`:
|
|
585
|
+
* - `'openai-images'` for OpenAI (DALL-E, gpt-image-1)
|
|
586
|
+
* - `'xai-images'` for xAI Grok image models
|
|
587
|
+
* - `'gemini-imagen'` for Google Imagen
|
|
588
|
+
*
|
|
589
|
+
* Image-model selection reuses the existing `'image'` {@link ModelSpecKey}.
|
|
590
|
+
*
|
|
591
|
+
* @param params - Request parameters including descriptor, API key, and prompt
|
|
592
|
+
* @returns The generated images, or a failure
|
|
593
|
+
* @public
|
|
594
|
+
*/
|
|
595
|
+
async function callProviderImageGeneration(params) {
|
|
596
|
+
const { descriptor, apiKey, params: request, modelOverride, logger, signal } = params;
|
|
597
|
+
if (descriptor.imageApiFormat === undefined) {
|
|
598
|
+
return (0, ts_utils_1.fail)(`provider "${descriptor.id}" does not support image generation`);
|
|
599
|
+
}
|
|
600
|
+
if (!descriptor.baseUrl) {
|
|
601
|
+
return (0, ts_utils_1.fail)(`provider "${descriptor.id}" has no API endpoint configured`);
|
|
602
|
+
}
|
|
603
|
+
const config = {
|
|
604
|
+
baseUrl: descriptor.baseUrl,
|
|
605
|
+
apiKey,
|
|
606
|
+
model: (0, model_1.resolveModel)(modelOverride !== null && modelOverride !== void 0 ? modelOverride : descriptor.defaultModel, 'image')
|
|
607
|
+
};
|
|
608
|
+
/* c8 ignore next 6 - optional logger diagnostic output */
|
|
609
|
+
if (logger) {
|
|
610
|
+
logger.info(`AI image generation: provider=${descriptor.id}, format=${descriptor.imageApiFormat}, ` +
|
|
611
|
+
`model=${config.model}`);
|
|
612
|
+
}
|
|
613
|
+
switch (descriptor.imageApiFormat) {
|
|
614
|
+
case 'openai-images':
|
|
615
|
+
return callOpenAiImageGeneration(config, request, 'image/png', logger, signal);
|
|
616
|
+
case 'xai-images':
|
|
617
|
+
return callOpenAiImageGeneration(config, request, 'image/jpeg', logger, signal);
|
|
618
|
+
case 'gemini-imagen':
|
|
619
|
+
return callImagenGeneration(config, request, logger, signal);
|
|
620
|
+
/* c8 ignore next 4 - defensive coding: exhaustive switch guaranteed by TypeScript */
|
|
621
|
+
default: {
|
|
622
|
+
const _exhaustive = descriptor.imageApiFormat;
|
|
623
|
+
return (0, ts_utils_1.fail)(`unsupported image API format: ${String(_exhaustive)}`);
|
|
624
|
+
}
|
|
625
|
+
}
|
|
626
|
+
}
|
|
627
|
+
const openAiListEntry = ts_utils_1.Validators.object({
|
|
628
|
+
id: ts_utils_1.Validators.string
|
|
629
|
+
});
|
|
630
|
+
const openAiListResponse = ts_utils_1.Validators.object({
|
|
631
|
+
data: ts_utils_1.Validators.arrayOf(openAiListEntry)
|
|
632
|
+
});
|
|
633
|
+
const anthropicListEntry = ts_utils_1.Validators.object({
|
|
634
|
+
id: ts_utils_1.Validators.string,
|
|
635
|
+
display_name: ts_utils_1.Validators.string.optional()
|
|
636
|
+
});
|
|
637
|
+
const anthropicListResponse = ts_utils_1.Validators.object({
|
|
638
|
+
data: ts_utils_1.Validators.arrayOf(anthropicListEntry)
|
|
639
|
+
});
|
|
640
|
+
const geminiListEntry = ts_utils_1.Validators.object({
|
|
641
|
+
name: ts_utils_1.Validators.string,
|
|
642
|
+
displayName: ts_utils_1.Validators.string.optional(),
|
|
643
|
+
supportedGenerationMethods: ts_utils_1.Validators.arrayOf(ts_utils_1.Validators.string).optional()
|
|
644
|
+
});
|
|
645
|
+
const geminiListResponse = ts_utils_1.Validators.object({
|
|
646
|
+
models: ts_utils_1.Validators.arrayOf(geminiListEntry)
|
|
647
|
+
});
|
|
648
|
+
// ============================================================================
|
|
649
|
+
// List models — capability resolution
|
|
650
|
+
// ============================================================================
|
|
651
|
+
/**
|
|
652
|
+
* Translates Gemini's `supportedGenerationMethods` strings into our abstract
|
|
653
|
+
* capability vocabulary. Methods without a mapping are ignored.
|
|
654
|
+
* @internal
|
|
655
|
+
*/
|
|
656
|
+
function geminiMethodsToCapabilities(methods) {
|
|
657
|
+
const out = [];
|
|
658
|
+
for (const m of methods) {
|
|
659
|
+
if (m === 'generateContent') {
|
|
660
|
+
out.push('chat');
|
|
661
|
+
}
|
|
662
|
+
else if (m === 'predict') {
|
|
663
|
+
out.push('image-generation');
|
|
664
|
+
}
|
|
665
|
+
}
|
|
666
|
+
return out;
|
|
667
|
+
}
|
|
668
|
+
/**
|
|
669
|
+
* Strips the `models/` prefix Gemini includes on listed model names.
|
|
670
|
+
* @internal
|
|
671
|
+
*/
|
|
672
|
+
function geminiBareId(name) {
|
|
673
|
+
return name.startsWith('models/') ? name.substring('models/'.length) : name;
|
|
674
|
+
}
|
|
675
|
+
/**
|
|
676
|
+
* Applies a capability config to a model id. Walks per-provider rules then
|
|
677
|
+
* global rules; unions all matching rules' capabilities. Returns the union
|
|
678
|
+
* and the first matching `displayName` (if any).
|
|
679
|
+
* @internal
|
|
680
|
+
*/
|
|
681
|
+
function applyCapabilityConfig(config, providerId, modelId) {
|
|
682
|
+
var _a, _b, _c;
|
|
683
|
+
const caps = new Set();
|
|
684
|
+
let displayName;
|
|
685
|
+
const rulesets = [
|
|
686
|
+
(_b = (_a = config.perProvider) === null || _a === void 0 ? void 0 : _a[providerId]) !== null && _b !== void 0 ? _b : [],
|
|
687
|
+
(_c = config.global) !== null && _c !== void 0 ? _c : []
|
|
688
|
+
];
|
|
689
|
+
for (const rules of rulesets) {
|
|
690
|
+
for (const rule of rules) {
|
|
691
|
+
rule.idPattern.lastIndex = 0;
|
|
692
|
+
if (rule.idPattern.test(modelId)) {
|
|
693
|
+
for (const cap of rule.capabilities) {
|
|
694
|
+
caps.add(cap);
|
|
695
|
+
}
|
|
696
|
+
if (displayName === undefined && rule.displayName !== undefined) {
|
|
697
|
+
displayName = typeof rule.displayName === 'function' ? rule.displayName(modelId) : rule.displayName;
|
|
698
|
+
}
|
|
422
699
|
}
|
|
423
|
-
|
|
700
|
+
}
|
|
701
|
+
}
|
|
702
|
+
return { capabilities: Array.from(caps), displayName };
|
|
703
|
+
}
|
|
704
|
+
/**
|
|
705
|
+
* Combines provider-native capability info (when supplied) and config-derived
|
|
706
|
+
* capability info into a final {@link IAiModelInfo}.
|
|
707
|
+
* @internal
|
|
708
|
+
*/
|
|
709
|
+
function buildModelInfo(providerId, id, nativeCapabilities, nativeDisplayName, config) {
|
|
710
|
+
const fromConfig = applyCapabilityConfig(config, providerId, id);
|
|
711
|
+
const all = new Set([...nativeCapabilities, ...fromConfig.capabilities]);
|
|
712
|
+
return Object.assign({ id, capabilities: all }, (nativeDisplayName !== undefined
|
|
713
|
+
? { displayName: nativeDisplayName }
|
|
714
|
+
: fromConfig.displayName !== undefined
|
|
715
|
+
? { displayName: fromConfig.displayName }
|
|
716
|
+
: {}));
|
|
717
|
+
}
|
|
718
|
+
// ============================================================================
|
|
719
|
+
// List models — adapters
|
|
720
|
+
// ============================================================================
|
|
721
|
+
/**
|
|
722
|
+
* Calls the OpenAI-style `GET /models` endpoint. Used by openai, xai-grok,
|
|
723
|
+
* groq, and mistral. Provider supplies no capability info — capabilities are
|
|
724
|
+
* derived entirely from the config.
|
|
725
|
+
* @internal
|
|
726
|
+
*/
|
|
727
|
+
async function callOpenAiListModels(config, providerId, capabilityConfig, logger, signal) {
|
|
728
|
+
const url = `${config.baseUrl}/models`;
|
|
729
|
+
const headers = {
|
|
730
|
+
Authorization: `Bearer ${config.apiKey}`
|
|
731
|
+
};
|
|
732
|
+
/* c8 ignore next 1 - optional logger */
|
|
733
|
+
logger === null || logger === void 0 ? void 0 : logger.info(`List models: provider=${providerId}, format=openai`);
|
|
734
|
+
const jsonResult = await fetchGetJson(url, headers, logger, signal);
|
|
735
|
+
if (jsonResult.isFailure()) {
|
|
736
|
+
return (0, ts_utils_1.fail)(jsonResult.message);
|
|
737
|
+
}
|
|
738
|
+
return openAiListResponse
|
|
739
|
+
.validate(jsonResult.value)
|
|
740
|
+
.withErrorFormat((msg) => `OpenAI models API response: ${msg}`)
|
|
741
|
+
.onSuccess((response) => {
|
|
742
|
+
const models = response.data.map((entry) => buildModelInfo(providerId, entry.id, [], undefined, capabilityConfig));
|
|
743
|
+
return (0, ts_utils_1.succeed)(models);
|
|
744
|
+
});
|
|
745
|
+
}
|
|
746
|
+
/**
|
|
747
|
+
* Calls the Anthropic `GET /models` endpoint. Provider supplies a
|
|
748
|
+
* `display_name` but no native capability info.
|
|
749
|
+
* @internal
|
|
750
|
+
*/
|
|
751
|
+
async function callAnthropicListModels(config, providerId, capabilityConfig, logger, signal) {
|
|
752
|
+
const url = `${config.baseUrl}/models`;
|
|
753
|
+
const headers = {
|
|
754
|
+
'x-api-key': config.apiKey,
|
|
755
|
+
'anthropic-version': '2023-06-01',
|
|
756
|
+
'anthropic-dangerous-direct-browser-access': 'true'
|
|
757
|
+
};
|
|
758
|
+
/* c8 ignore next 1 - optional logger */
|
|
759
|
+
logger === null || logger === void 0 ? void 0 : logger.info(`List models: provider=${providerId}, format=anthropic`);
|
|
760
|
+
const jsonResult = await fetchGetJson(url, headers, logger, signal);
|
|
761
|
+
if (jsonResult.isFailure()) {
|
|
762
|
+
return (0, ts_utils_1.fail)(jsonResult.message);
|
|
763
|
+
}
|
|
764
|
+
return anthropicListResponse
|
|
765
|
+
.validate(jsonResult.value)
|
|
766
|
+
.withErrorFormat((msg) => `Anthropic models API response: ${msg}`)
|
|
767
|
+
.onSuccess((response) => {
|
|
768
|
+
const models = response.data.map((entry) => buildModelInfo(providerId, entry.id, [], entry.display_name, capabilityConfig));
|
|
769
|
+
return (0, ts_utils_1.succeed)(models);
|
|
770
|
+
});
|
|
771
|
+
}
|
|
772
|
+
/**
|
|
773
|
+
* Calls the Gemini `GET /models` endpoint. Provider supplies both a
|
|
774
|
+
* `displayName` and `supportedGenerationMethods` — translated to native
|
|
775
|
+
* capabilities and unioned with config-derived capabilities.
|
|
776
|
+
* @internal
|
|
777
|
+
*/
|
|
778
|
+
async function callGeminiListModels(config, providerId, capabilityConfig, logger, signal) {
|
|
779
|
+
const url = `${config.baseUrl}/models`;
|
|
780
|
+
const headers = {
|
|
781
|
+
'x-goog-api-key': config.apiKey
|
|
782
|
+
};
|
|
783
|
+
/* c8 ignore next 1 - optional logger */
|
|
784
|
+
logger === null || logger === void 0 ? void 0 : logger.info(`List models: provider=${providerId}, format=gemini`);
|
|
785
|
+
const jsonResult = await fetchGetJson(url, headers, logger, signal);
|
|
786
|
+
if (jsonResult.isFailure()) {
|
|
787
|
+
return (0, ts_utils_1.fail)(jsonResult.message);
|
|
788
|
+
}
|
|
789
|
+
return geminiListResponse
|
|
790
|
+
.validate(jsonResult.value)
|
|
791
|
+
.withErrorFormat((msg) => `Gemini models API response: ${msg}`)
|
|
792
|
+
.onSuccess((response) => {
|
|
793
|
+
const models = response.models.map((entry) => {
|
|
794
|
+
const id = geminiBareId(entry.name);
|
|
795
|
+
const native = entry.supportedGenerationMethods
|
|
796
|
+
? geminiMethodsToCapabilities(entry.supportedGenerationMethods)
|
|
797
|
+
: [];
|
|
798
|
+
return buildModelInfo(providerId, id, native, entry.displayName, capabilityConfig);
|
|
799
|
+
});
|
|
800
|
+
return (0, ts_utils_1.succeed)(models);
|
|
801
|
+
});
|
|
802
|
+
}
|
|
803
|
+
// ============================================================================
|
|
804
|
+
// List models — dispatcher
|
|
805
|
+
// ============================================================================
|
|
806
|
+
/**
|
|
807
|
+
* Lists models available from a provider, with capabilities resolved from
|
|
808
|
+
* native provider info (where supplied) and a configurable rule set.
|
|
809
|
+
*
|
|
810
|
+
* Routes based on `descriptor.apiFormat` — listing reuses the existing
|
|
811
|
+
* format dispatch and does not require a separate descriptor field.
|
|
812
|
+
*
|
|
813
|
+
* @param params - Request parameters including descriptor, API key, and optional capability filter
|
|
814
|
+
* @returns The resolved model list, or a failure
|
|
815
|
+
* @public
|
|
816
|
+
*/
|
|
817
|
+
async function callProviderListModels(params) {
|
|
818
|
+
const { descriptor, apiKey, capability, capabilityConfig, logger, signal } = params;
|
|
819
|
+
if (!descriptor.baseUrl) {
|
|
820
|
+
return (0, ts_utils_1.fail)(`provider "${descriptor.id}" has no API endpoint configured`);
|
|
821
|
+
}
|
|
822
|
+
const config = {
|
|
823
|
+
baseUrl: descriptor.baseUrl,
|
|
824
|
+
apiKey,
|
|
825
|
+
model: '' // unused by listing
|
|
826
|
+
};
|
|
827
|
+
const effectiveConfig = capabilityConfig !== null && capabilityConfig !== void 0 ? capabilityConfig : registry_1.DEFAULT_MODEL_CAPABILITY_CONFIG;
|
|
828
|
+
let listResult;
|
|
829
|
+
switch (descriptor.apiFormat) {
|
|
830
|
+
case 'openai':
|
|
831
|
+
listResult = await callOpenAiListModels(config, descriptor.id, effectiveConfig, logger, signal);
|
|
832
|
+
break;
|
|
424
833
|
case 'anthropic':
|
|
425
|
-
|
|
834
|
+
listResult = await callAnthropicListModels(config, descriptor.id, effectiveConfig, logger, signal);
|
|
835
|
+
break;
|
|
426
836
|
case 'gemini':
|
|
427
|
-
|
|
837
|
+
listResult = await callGeminiListModels(config, descriptor.id, effectiveConfig, logger, signal);
|
|
838
|
+
break;
|
|
428
839
|
/* c8 ignore next 4 - defensive coding: exhaustive switch guaranteed by TypeScript */
|
|
429
840
|
default: {
|
|
430
841
|
const _exhaustive = descriptor.apiFormat;
|
|
431
842
|
return (0, ts_utils_1.fail)(`unsupported API format: ${String(_exhaustive)}`);
|
|
432
843
|
}
|
|
433
844
|
}
|
|
845
|
+
if (listResult.isFailure()) {
|
|
846
|
+
return listResult;
|
|
847
|
+
}
|
|
848
|
+
if (capability === undefined) {
|
|
849
|
+
return listResult;
|
|
850
|
+
}
|
|
851
|
+
return (0, ts_utils_1.succeed)(listResult.value.filter((m) => m.capabilities.has(capability)));
|
|
852
|
+
}
|
|
853
|
+
// ============================================================================
|
|
854
|
+
// Proxied list models
|
|
855
|
+
// ============================================================================
|
|
856
|
+
/**
|
|
857
|
+
* Calls the model-listing endpoint on a proxy server.
|
|
858
|
+
*
|
|
859
|
+
* @remarks
|
|
860
|
+
* Proxy contract:
|
|
861
|
+
* - Endpoint: `POST ${proxyUrl}/api/ai/list-models`
|
|
862
|
+
* - Request body: `{providerId, apiKey, capability?}`. Capability config is
|
|
863
|
+
* not forwarded — the proxy applies its own (typically the same default
|
|
864
|
+
* the library ships).
|
|
865
|
+
* - Success response body: an `IAiModelInfo[]` (under key `models`) where
|
|
866
|
+
* `capabilities` is serialized as a string array (not Set, which doesn't
|
|
867
|
+
* round-trip through JSON).
|
|
868
|
+
* - Error response body: `{error: string}`, surfaced as `proxy: ${error}`.
|
|
869
|
+
*
|
|
870
|
+
* @public
|
|
871
|
+
*/
|
|
872
|
+
async function callProxiedListModels(proxyUrl, params) {
|
|
873
|
+
const { descriptor, apiKey, capability, logger, signal } = params;
|
|
874
|
+
const body = {
|
|
875
|
+
providerId: descriptor.id,
|
|
876
|
+
apiKey
|
|
877
|
+
};
|
|
878
|
+
if (capability !== undefined) {
|
|
879
|
+
body.capability = capability;
|
|
880
|
+
}
|
|
881
|
+
/* c8 ignore next 1 - optional logger */
|
|
882
|
+
logger === null || logger === void 0 ? void 0 : logger.info(`AI list-models proxy request: provider=${descriptor.id}, proxy=${proxyUrl}`);
|
|
883
|
+
const url = `${proxyUrl}/api/ai/list-models`;
|
|
884
|
+
const jsonResult = await fetchJson(url, {}, body, logger, signal);
|
|
885
|
+
if (jsonResult.isFailure()) {
|
|
886
|
+
return (0, ts_utils_1.fail)(jsonResult.message);
|
|
887
|
+
}
|
|
888
|
+
const response = jsonResult.value;
|
|
889
|
+
if (typeof response.error === 'string') {
|
|
890
|
+
return (0, ts_utils_1.fail)(`proxy: ${response.error}`);
|
|
891
|
+
}
|
|
892
|
+
return proxiedListModelsResponse
|
|
893
|
+
.validate(response)
|
|
894
|
+
.withErrorFormat((msg) => `proxy returned invalid response: ${msg}`)
|
|
895
|
+
.onSuccess((parsed) => {
|
|
896
|
+
const models = parsed.models.map((m) => (Object.assign({ id: m.id, capabilities: new Set(m.capabilities) }, (m.displayName !== undefined ? { displayName: m.displayName } : {}))));
|
|
897
|
+
return (0, ts_utils_1.succeed)(models);
|
|
898
|
+
});
|
|
434
899
|
}
|
|
435
900
|
// ============================================================================
|
|
436
901
|
// Proxied completion (routes through a backend server)
|
|
@@ -449,11 +914,15 @@ async function callProviderCompletion(params) {
|
|
|
449
914
|
* @public
|
|
450
915
|
*/
|
|
451
916
|
async function callProxiedCompletion(proxyUrl, params) {
|
|
452
|
-
const { descriptor, apiKey, prompt, additionalMessages, temperature, modelOverride, logger, tools } = params;
|
|
917
|
+
const { descriptor, apiKey, prompt, additionalMessages, temperature, modelOverride, logger, tools, signal } = params;
|
|
918
|
+
const promptBody = { system: prompt.system, user: prompt.user };
|
|
919
|
+
if (prompt.attachments.length > 0) {
|
|
920
|
+
promptBody.attachments = prompt.attachments;
|
|
921
|
+
}
|
|
453
922
|
const body = {
|
|
454
923
|
providerId: descriptor.id,
|
|
455
924
|
apiKey,
|
|
456
|
-
prompt:
|
|
925
|
+
prompt: promptBody,
|
|
457
926
|
temperature: temperature !== null && temperature !== void 0 ? temperature : 0.7
|
|
458
927
|
};
|
|
459
928
|
if (additionalMessages && additionalMessages.length > 0) {
|
|
@@ -468,7 +937,7 @@ async function callProxiedCompletion(proxyUrl, params) {
|
|
|
468
937
|
/* c8 ignore next 1 - optional logger */
|
|
469
938
|
logger === null || logger === void 0 ? void 0 : logger.info(`AI proxy request: provider=${descriptor.id}, proxy=${proxyUrl}`);
|
|
470
939
|
const url = `${proxyUrl}/api/ai/completion`;
|
|
471
|
-
const jsonResult = await fetchJson(url, {}, body, logger);
|
|
940
|
+
const jsonResult = await fetchJson(url, {}, body, logger, signal);
|
|
472
941
|
if (jsonResult.isFailure()) {
|
|
473
942
|
return (0, ts_utils_1.fail)(jsonResult.message);
|
|
474
943
|
}
|
|
@@ -485,4 +954,51 @@ async function callProxiedCompletion(proxyUrl, params) {
|
|
|
485
954
|
truncated: response.truncated === true
|
|
486
955
|
});
|
|
487
956
|
}
|
|
957
|
+
// ============================================================================
|
|
958
|
+
// Proxied image generation
|
|
959
|
+
// ============================================================================
|
|
960
|
+
/**
|
|
961
|
+
* Calls the image-generation endpoint on a proxy server instead of calling
|
|
962
|
+
* the provider API directly from the browser.
|
|
963
|
+
*
|
|
964
|
+
* @remarks
|
|
965
|
+
* The proxy contract:
|
|
966
|
+
* - Endpoint: `POST ${proxyUrl}/api/ai/image-generation`
|
|
967
|
+
* - Request body: `{providerId, apiKey, params, modelOverride?}`
|
|
968
|
+
* - Success response body: an {@link IAiImageGenerationResponse}
|
|
969
|
+
* - Error response body: `{error: string}` (surfaced as `proxy: ${error}`)
|
|
970
|
+
*
|
|
971
|
+
* The proxy server is responsible for descriptor lookup, model resolution,
|
|
972
|
+
* provider dispatch, and response normalization.
|
|
973
|
+
*
|
|
974
|
+
* @param proxyUrl - Base URL of the proxy server (e.g. `http://localhost:3001`)
|
|
975
|
+
* @param params - Same parameters as {@link callProviderImageGeneration}
|
|
976
|
+
* @returns The generated images, or a failure
|
|
977
|
+
* @public
|
|
978
|
+
*/
|
|
979
|
+
async function callProxiedImageGeneration(proxyUrl, params) {
|
|
980
|
+
const { descriptor, apiKey, params: request, modelOverride, logger, signal } = params;
|
|
981
|
+
const body = {
|
|
982
|
+
providerId: descriptor.id,
|
|
983
|
+
apiKey,
|
|
984
|
+
params: request
|
|
985
|
+
};
|
|
986
|
+
if (modelOverride !== undefined) {
|
|
987
|
+
body.modelOverride = modelOverride;
|
|
988
|
+
}
|
|
989
|
+
/* c8 ignore next 1 - optional logger */
|
|
990
|
+
logger === null || logger === void 0 ? void 0 : logger.info(`AI image proxy request: provider=${descriptor.id}, proxy=${proxyUrl}`);
|
|
991
|
+
const url = `${proxyUrl}/api/ai/image-generation`;
|
|
992
|
+
const jsonResult = await fetchJson(url, {}, body, logger, signal);
|
|
993
|
+
if (jsonResult.isFailure()) {
|
|
994
|
+
return (0, ts_utils_1.fail)(jsonResult.message);
|
|
995
|
+
}
|
|
996
|
+
const response = jsonResult.value;
|
|
997
|
+
if (typeof response.error === 'string') {
|
|
998
|
+
return (0, ts_utils_1.fail)(`proxy: ${response.error}`);
|
|
999
|
+
}
|
|
1000
|
+
return proxiedImageGenerationResponse
|
|
1001
|
+
.validate(response)
|
|
1002
|
+
.withErrorFormat((msg) => `proxy returned invalid response: ${msg}`);
|
|
1003
|
+
}
|
|
488
1004
|
//# sourceMappingURL=apiClient.js.map
|