@fgv/ts-extras 5.1.0-16 → 5.1.0-17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.browser.js +2 -1
- package/dist/packlets/ai-assist/apiClient.js +570 -58
- package/dist/packlets/ai-assist/chatRequestBuilders.js +180 -0
- package/dist/packlets/ai-assist/index.js +4 -3
- package/dist/packlets/ai-assist/model.js +20 -3
- package/dist/packlets/ai-assist/registry.js +66 -10
- package/dist/packlets/ai-assist/sseParser.js +122 -0
- package/dist/packlets/ai-assist/streamingAdapters/anthropic.js +192 -0
- package/dist/packlets/ai-assist/streamingAdapters/common.js +77 -0
- package/dist/packlets/ai-assist/streamingAdapters/gemini.js +160 -0
- package/dist/packlets/ai-assist/streamingAdapters/openaiChat.js +149 -0
- package/dist/packlets/ai-assist/streamingAdapters/openaiResponses.js +163 -0
- package/dist/packlets/ai-assist/streamingAdapters/proxy.js +157 -0
- package/dist/packlets/ai-assist/streamingClient.js +88 -0
- package/dist/packlets/conversion/converters.js +1 -1
- package/dist/packlets/zip-file-tree/zipFileTreeAccessors.js +2 -2
- package/dist/ts-extras.d.ts +512 -5
- package/lib/index.browser.d.ts +2 -1
- package/lib/index.browser.js +3 -1
- package/lib/packlets/ai-assist/apiClient.d.ts +103 -1
- package/lib/packlets/ai-assist/apiClient.js +574 -58
- package/lib/packlets/ai-assist/chatRequestBuilders.d.ts +89 -0
- package/lib/packlets/ai-assist/chatRequestBuilders.js +189 -0
- package/lib/packlets/ai-assist/index.d.ts +4 -3
- package/lib/packlets/ai-assist/index.js +10 -1
- package/lib/packlets/ai-assist/model.d.ts +271 -2
- package/lib/packlets/ai-assist/model.js +21 -3
- package/lib/packlets/ai-assist/registry.d.ts +10 -1
- package/lib/packlets/ai-assist/registry.js +67 -11
- package/lib/packlets/ai-assist/sseParser.d.ts +45 -0
- package/lib/packlets/ai-assist/sseParser.js +127 -0
- package/lib/packlets/ai-assist/streamingAdapters/anthropic.d.ts +18 -0
- package/lib/packlets/ai-assist/streamingAdapters/anthropic.js +195 -0
- package/lib/packlets/ai-assist/streamingAdapters/common.d.ts +71 -0
- package/lib/packlets/ai-assist/streamingAdapters/common.js +81 -0
- package/lib/packlets/ai-assist/streamingAdapters/gemini.d.ts +19 -0
- package/lib/packlets/ai-assist/streamingAdapters/gemini.js +163 -0
- package/lib/packlets/ai-assist/streamingAdapters/openaiChat.d.ts +18 -0
- package/lib/packlets/ai-assist/streamingAdapters/openaiChat.js +152 -0
- package/lib/packlets/ai-assist/streamingAdapters/openaiResponses.d.ts +19 -0
- package/lib/packlets/ai-assist/streamingAdapters/openaiResponses.js +166 -0
- package/lib/packlets/ai-assist/streamingAdapters/proxy.d.ts +34 -0
- package/lib/packlets/ai-assist/streamingAdapters/proxy.js +160 -0
- package/lib/packlets/ai-assist/streamingClient.d.ts +33 -0
- package/lib/packlets/ai-assist/streamingClient.js +93 -0
- package/lib/packlets/conversion/converters.d.ts +1 -1
- package/lib/packlets/conversion/converters.js +1 -1
- package/lib/packlets/zip-file-tree/zipFileTreeAccessors.d.ts +2 -2
- package/lib/packlets/zip-file-tree/zipFileTreeAccessors.js +2 -2
- package/package.json +7 -7
|
@@ -32,31 +32,17 @@
|
|
|
32
32
|
import { isJsonObject } from '@fgv/ts-json-base';
|
|
33
33
|
import { fail, succeed, Validators } from '@fgv/ts-utils';
|
|
34
34
|
import { resolveModel } from './model';
|
|
35
|
+
import { buildAnthropicMessages, buildGeminiContents, buildMessages, buildOpenAiChatUserContent, buildOpenAiResponsesUserContent } from './chatRequestBuilders';
|
|
36
|
+
import { DEFAULT_MODEL_CAPABILITY_CONFIG } from './registry';
|
|
35
37
|
import { toAnthropicTools, toGeminiTools, toResponsesApiTools } from './toolFormats';
|
|
36
38
|
// ============================================================================
|
|
37
39
|
// Shared helpers
|
|
38
40
|
// ============================================================================
|
|
39
|
-
/**
|
|
40
|
-
* Builds the messages array from prompt + optional correction messages.
|
|
41
|
-
* @internal
|
|
42
|
-
*/
|
|
43
|
-
function buildMessages(prompt, additionalMessages) {
|
|
44
|
-
const messages = [
|
|
45
|
-
{ role: 'system', content: prompt.system },
|
|
46
|
-
{ role: 'user', content: prompt.user }
|
|
47
|
-
];
|
|
48
|
-
if (additionalMessages) {
|
|
49
|
-
for (const msg of additionalMessages) {
|
|
50
|
-
messages.push({ role: msg.role, content: msg.content });
|
|
51
|
-
}
|
|
52
|
-
}
|
|
53
|
-
return messages;
|
|
54
|
-
}
|
|
55
41
|
/**
|
|
56
42
|
* Makes an HTTP request and returns the parsed JSON, or a failure.
|
|
57
43
|
* @internal
|
|
58
44
|
*/
|
|
59
|
-
async function fetchJson(url, headers, body, logger) {
|
|
45
|
+
async function fetchJson(url, headers, body, logger, signal) {
|
|
60
46
|
/* c8 ignore next 1 - optional logger */
|
|
61
47
|
logger === null || logger === void 0 ? void 0 : logger.detail(`AI API request: POST ${url}`);
|
|
62
48
|
let response;
|
|
@@ -64,7 +50,8 @@ async function fetchJson(url, headers, body, logger) {
|
|
|
64
50
|
response = await fetch(url, {
|
|
65
51
|
method: 'POST',
|
|
66
52
|
headers: Object.assign({ 'Content-Type': 'application/json' }, headers),
|
|
67
|
-
body: JSON.stringify(body)
|
|
53
|
+
body: JSON.stringify(body),
|
|
54
|
+
signal
|
|
68
55
|
});
|
|
69
56
|
}
|
|
70
57
|
catch (err) {
|
|
@@ -97,6 +84,47 @@ async function fetchJson(url, headers, body, logger) {
|
|
|
97
84
|
}
|
|
98
85
|
return succeed(json);
|
|
99
86
|
}
|
|
87
|
+
/**
|
|
88
|
+
* Makes an HTTP GET request and returns the parsed JSON, or a failure.
|
|
89
|
+
* @internal
|
|
90
|
+
*/
|
|
91
|
+
async function fetchGetJson(url, headers, logger, signal) {
|
|
92
|
+
/* c8 ignore next 1 - optional logger */
|
|
93
|
+
logger === null || logger === void 0 ? void 0 : logger.detail(`AI API request: GET ${url}`);
|
|
94
|
+
let response;
|
|
95
|
+
try {
|
|
96
|
+
response = await fetch(url, { method: 'GET', headers, signal });
|
|
97
|
+
}
|
|
98
|
+
catch (err) {
|
|
99
|
+
const detail = err instanceof Error ? err.message : String(err);
|
|
100
|
+
/* c8 ignore next 1 - optional logger */
|
|
101
|
+
logger === null || logger === void 0 ? void 0 : logger.error(`AI API request failed: ${detail}`);
|
|
102
|
+
return fail(`AI API request failed: ${detail}`);
|
|
103
|
+
}
|
|
104
|
+
if (!response.ok) {
|
|
105
|
+
const errorText = await response.text().catch(() => 'unknown error');
|
|
106
|
+
/* c8 ignore next 1 - optional logger */
|
|
107
|
+
logger === null || logger === void 0 ? void 0 : logger.error(`AI API returned ${response.status}: ${errorText}`);
|
|
108
|
+
return fail(`AI API returned ${response.status}: ${errorText}`);
|
|
109
|
+
}
|
|
110
|
+
/* c8 ignore next 1 - optional logger */
|
|
111
|
+
logger === null || logger === void 0 ? void 0 : logger.detail(`AI API response: ${response.status}`);
|
|
112
|
+
let json;
|
|
113
|
+
try {
|
|
114
|
+
json = await response.json();
|
|
115
|
+
}
|
|
116
|
+
catch (_a) {
|
|
117
|
+
/* c8 ignore next 1 - optional logger */
|
|
118
|
+
logger === null || logger === void 0 ? void 0 : logger.error('AI API returned invalid JSON response');
|
|
119
|
+
return fail('AI API returned invalid JSON response');
|
|
120
|
+
}
|
|
121
|
+
if (!isJsonObject(json)) {
|
|
122
|
+
/* c8 ignore next 1 - optional logger */
|
|
123
|
+
logger === null || logger === void 0 ? void 0 : logger.error('AI API returned non-object JSON response');
|
|
124
|
+
return fail('AI API returned non-object JSON response');
|
|
125
|
+
}
|
|
126
|
+
return succeed(json);
|
|
127
|
+
}
|
|
100
128
|
const openAiMessage = Validators.object({
|
|
101
129
|
content: Validators.string
|
|
102
130
|
});
|
|
@@ -149,16 +177,18 @@ const geminiResponse = Validators.object({
|
|
|
149
177
|
* Works for xAI Grok, OpenAI, Groq, and Mistral.
|
|
150
178
|
* @internal
|
|
151
179
|
*/
|
|
152
|
-
async function callOpenAiCompletion(config, prompt, additionalMessages, temperature = 0.7, logger) {
|
|
180
|
+
async function callOpenAiCompletion(config, prompt, additionalMessages, temperature = 0.7, logger, signal) {
|
|
153
181
|
const url = `${config.baseUrl}/chat/completions`;
|
|
154
|
-
const messages = buildMessages(prompt,
|
|
182
|
+
const messages = buildMessages(prompt.system, buildOpenAiChatUserContent(prompt), {
|
|
183
|
+
tail: additionalMessages
|
|
184
|
+
});
|
|
155
185
|
const body = { model: config.model, messages, temperature };
|
|
156
186
|
const headers = {
|
|
157
187
|
Authorization: `Bearer ${config.apiKey}`
|
|
158
188
|
};
|
|
159
189
|
/* c8 ignore next 1 - optional logger */
|
|
160
190
|
logger === null || logger === void 0 ? void 0 : logger.info(`OpenAI completion: model=${config.model}`);
|
|
161
|
-
const jsonResult = await fetchJson(url, headers, body, logger);
|
|
191
|
+
const jsonResult = await fetchJson(url, headers, body, logger, signal);
|
|
162
192
|
if (jsonResult.isFailure()) {
|
|
163
193
|
return fail(jsonResult.message);
|
|
164
194
|
}
|
|
@@ -197,9 +227,11 @@ function extractResponsesApiText(output) {
|
|
|
197
227
|
* Used when tools are configured for an openai-format provider.
|
|
198
228
|
* @internal
|
|
199
229
|
*/
|
|
200
|
-
async function callOpenAiResponsesCompletion(config, prompt, tools, additionalMessages, temperature = 0.7, logger) {
|
|
230
|
+
async function callOpenAiResponsesCompletion(config, prompt, tools, additionalMessages, temperature = 0.7, logger, signal) {
|
|
201
231
|
const url = `${config.baseUrl}/responses`;
|
|
202
|
-
const input = buildMessages(prompt,
|
|
232
|
+
const input = buildMessages(prompt.system, buildOpenAiResponsesUserContent(prompt), {
|
|
233
|
+
tail: additionalMessages
|
|
234
|
+
});
|
|
203
235
|
const body = {
|
|
204
236
|
model: config.model,
|
|
205
237
|
input,
|
|
@@ -211,7 +243,7 @@ async function callOpenAiResponsesCompletion(config, prompt, tools, additionalMe
|
|
|
211
243
|
};
|
|
212
244
|
/* c8 ignore next 1 - optional logger */
|
|
213
245
|
logger === null || logger === void 0 ? void 0 : logger.info(`OpenAI Responses API: model=${config.model}, tools=${tools.map((t) => t.type).join(',')}`);
|
|
214
|
-
const jsonResult = await fetchJson(url, headers, body, logger);
|
|
246
|
+
const jsonResult = await fetchJson(url, headers, body, logger, signal);
|
|
215
247
|
if (jsonResult.isFailure()) {
|
|
216
248
|
return fail(jsonResult.message);
|
|
217
249
|
}
|
|
@@ -256,18 +288,10 @@ function extractAnthropicText(content) {
|
|
|
256
288
|
* mixed content block responses.
|
|
257
289
|
* @internal
|
|
258
290
|
*/
|
|
259
|
-
async function callAnthropicCompletion(config, prompt, additionalMessages, temperature = 0.7, logger, tools) {
|
|
291
|
+
async function callAnthropicCompletion(config, prompt, additionalMessages, temperature = 0.7, logger, tools, signal) {
|
|
260
292
|
const url = `${config.baseUrl}/messages`;
|
|
261
293
|
// Anthropic uses system as a top-level field, not in messages
|
|
262
|
-
const messages =
|
|
263
|
-
if (additionalMessages) {
|
|
264
|
-
for (const msg of additionalMessages) {
|
|
265
|
-
// Anthropic doesn't have a system role in messages
|
|
266
|
-
if (msg.role !== 'system') {
|
|
267
|
-
messages.push({ role: msg.role, content: msg.content });
|
|
268
|
-
}
|
|
269
|
-
}
|
|
270
|
-
}
|
|
294
|
+
const messages = buildAnthropicMessages(prompt, { tail: additionalMessages });
|
|
271
295
|
const body = {
|
|
272
296
|
model: config.model,
|
|
273
297
|
system: prompt.system,
|
|
@@ -289,7 +313,7 @@ async function callAnthropicCompletion(config, prompt, additionalMessages, tempe
|
|
|
289
313
|
'anthropic-version': '2023-06-01',
|
|
290
314
|
'anthropic-dangerous-direct-browser-access': 'true'
|
|
291
315
|
};
|
|
292
|
-
const jsonResult = await fetchJson(url, headers, body, logger);
|
|
316
|
+
const jsonResult = await fetchJson(url, headers, body, logger, signal);
|
|
293
317
|
if (jsonResult.isFailure()) {
|
|
294
318
|
return fail(jsonResult.message);
|
|
295
319
|
}
|
|
@@ -324,22 +348,10 @@ async function callAnthropicCompletion(config, prompt, additionalMessages, tempe
|
|
|
324
348
|
* When tools are configured, includes Google Search grounding.
|
|
325
349
|
* @internal
|
|
326
350
|
*/
|
|
327
|
-
async function callGeminiCompletion(config, prompt, additionalMessages, temperature = 0.7, logger, tools) {
|
|
351
|
+
async function callGeminiCompletion(config, prompt, additionalMessages, temperature = 0.7, logger, tools, signal) {
|
|
328
352
|
const url = `${config.baseUrl}/models/${config.model}:generateContent`;
|
|
329
353
|
// Gemini uses 'contents' with 'parts', and 'model' role instead of 'assistant'
|
|
330
|
-
const contents =
|
|
331
|
-
{ role: 'user', parts: [{ text: prompt.user }] }
|
|
332
|
-
];
|
|
333
|
-
if (additionalMessages) {
|
|
334
|
-
for (const msg of additionalMessages) {
|
|
335
|
-
if (msg.role !== 'system') {
|
|
336
|
-
contents.push({
|
|
337
|
-
role: msg.role === 'assistant' ? 'model' : msg.role,
|
|
338
|
-
parts: [{ text: msg.content }]
|
|
339
|
-
});
|
|
340
|
-
}
|
|
341
|
-
}
|
|
342
|
-
}
|
|
354
|
+
const contents = buildGeminiContents(prompt, { tail: additionalMessages });
|
|
343
355
|
const body = {
|
|
344
356
|
systemInstruction: { parts: [{ text: prompt.system }] },
|
|
345
357
|
contents,
|
|
@@ -357,7 +369,7 @@ async function callGeminiCompletion(config, prompt, additionalMessages, temperat
|
|
|
357
369
|
const headers = {
|
|
358
370
|
'x-goog-api-key': config.apiKey
|
|
359
371
|
};
|
|
360
|
-
const jsonResult = await fetchJson(url, headers, body, logger);
|
|
372
|
+
const jsonResult = await fetchJson(url, headers, body, logger, signal);
|
|
361
373
|
if (jsonResult.isFailure()) {
|
|
362
374
|
return fail(jsonResult.message);
|
|
363
375
|
}
|
|
@@ -393,10 +405,13 @@ async function callGeminiCompletion(config, prompt, additionalMessages, temperat
|
|
|
393
405
|
* @public
|
|
394
406
|
*/
|
|
395
407
|
export async function callProviderCompletion(params) {
|
|
396
|
-
const { descriptor, apiKey, prompt, additionalMessages, temperature = 0.7, modelOverride, logger, tools } = params;
|
|
408
|
+
const { descriptor, apiKey, prompt, additionalMessages, temperature = 0.7, modelOverride, logger, tools, signal } = params;
|
|
397
409
|
if (!descriptor.baseUrl) {
|
|
398
410
|
return fail(`provider "${descriptor.id}" has no API endpoint configured`);
|
|
399
411
|
}
|
|
412
|
+
if (prompt.attachments.length > 0 && !descriptor.acceptsImageInput) {
|
|
413
|
+
return fail(`provider "${descriptor.id}" does not accept image input`);
|
|
414
|
+
}
|
|
400
415
|
const hasTools = tools !== undefined && tools.length > 0;
|
|
401
416
|
const modelContext = hasTools ? 'tools' : undefined;
|
|
402
417
|
const config = {
|
|
@@ -414,19 +429,465 @@ export async function callProviderCompletion(params) {
|
|
|
414
429
|
switch (descriptor.apiFormat) {
|
|
415
430
|
case 'openai':
|
|
416
431
|
if (hasTools) {
|
|
417
|
-
return callOpenAiResponsesCompletion(config, prompt, tools, additionalMessages, temperature, logger);
|
|
432
|
+
return callOpenAiResponsesCompletion(config, prompt, tools, additionalMessages, temperature, logger, signal);
|
|
433
|
+
}
|
|
434
|
+
return callOpenAiCompletion(config, prompt, additionalMessages, temperature, logger, signal);
|
|
435
|
+
case 'anthropic':
|
|
436
|
+
return callAnthropicCompletion(config, prompt, additionalMessages, temperature, logger, tools, signal);
|
|
437
|
+
case 'gemini':
|
|
438
|
+
return callGeminiCompletion(config, prompt, additionalMessages, temperature, logger, tools, signal);
|
|
439
|
+
/* c8 ignore next 4 - defensive coding: exhaustive switch guaranteed by TypeScript */
|
|
440
|
+
default: {
|
|
441
|
+
const _exhaustive = descriptor.apiFormat;
|
|
442
|
+
return fail(`unsupported API format: ${String(_exhaustive)}`);
|
|
443
|
+
}
|
|
444
|
+
}
|
|
445
|
+
}
|
|
446
|
+
const openAiImageItem = Validators.object({
|
|
447
|
+
b64_json: Validators.string,
|
|
448
|
+
revised_prompt: Validators.string.optional()
|
|
449
|
+
});
|
|
450
|
+
const openAiImageResponse = Validators.object({
|
|
451
|
+
data: Validators.arrayOf(openAiImageItem).withConstraint((arr) => arr.length > 0)
|
|
452
|
+
});
|
|
453
|
+
const imagenPrediction = Validators.object({
|
|
454
|
+
bytesBase64Encoded: Validators.string,
|
|
455
|
+
mimeType: Validators.string.optional()
|
|
456
|
+
});
|
|
457
|
+
const imagenResponse = Validators.object({
|
|
458
|
+
predictions: Validators.arrayOf(imagenPrediction).withConstraint((arr) => arr.length > 0)
|
|
459
|
+
});
|
|
460
|
+
// ---- Proxied image generation response ----
|
|
461
|
+
const proxiedGeneratedImage = Validators.object({
|
|
462
|
+
mimeType: Validators.string,
|
|
463
|
+
base64: Validators.string,
|
|
464
|
+
revisedPrompt: Validators.string.optional()
|
|
465
|
+
});
|
|
466
|
+
const proxiedImageGenerationResponse = Validators.object({
|
|
467
|
+
images: Validators.arrayOf(proxiedGeneratedImage).withConstraint((arr) => arr.length > 0)
|
|
468
|
+
});
|
|
469
|
+
const proxiedListModelsEntry = Validators.object({
|
|
470
|
+
id: Validators.string,
|
|
471
|
+
capabilities: Validators.arrayOf(Validators.enumeratedValue(['chat', 'tools', 'vision', 'image-generation'])),
|
|
472
|
+
displayName: Validators.string.optional()
|
|
473
|
+
});
|
|
474
|
+
const proxiedListModelsResponse = Validators.object({
|
|
475
|
+
models: Validators.arrayOf(proxiedListModelsEntry)
|
|
476
|
+
});
|
|
477
|
+
// ============================================================================
|
|
478
|
+
// Image generation — adapters
|
|
479
|
+
// ============================================================================
|
|
480
|
+
/**
|
|
481
|
+
* Calls the OpenAI Images API. Used for both `openai-images` and `xai-images`
|
|
482
|
+
* formats — the request shape is the same; the only difference is whether the
|
|
483
|
+
* `size` field is honored (OpenAI: yes, xAI: ignored at the provider).
|
|
484
|
+
*
|
|
485
|
+
* @internal
|
|
486
|
+
*/
|
|
487
|
+
async function callOpenAiImageGeneration(config, request, defaultMimeType, logger, signal) {
|
|
488
|
+
var _a, _b;
|
|
489
|
+
const url = `${config.baseUrl}/images/generations`;
|
|
490
|
+
const opts = (_a = request.options) !== null && _a !== void 0 ? _a : {};
|
|
491
|
+
const body = {
|
|
492
|
+
model: config.model,
|
|
493
|
+
prompt: request.prompt,
|
|
494
|
+
n: (_b = opts.count) !== null && _b !== void 0 ? _b : 1,
|
|
495
|
+
response_format: 'b64_json'
|
|
496
|
+
};
|
|
497
|
+
if (opts.size !== undefined) {
|
|
498
|
+
body.size = opts.size;
|
|
499
|
+
}
|
|
500
|
+
if (opts.quality !== undefined) {
|
|
501
|
+
body.quality = opts.quality;
|
|
502
|
+
}
|
|
503
|
+
if (opts.seed !== undefined) {
|
|
504
|
+
body.seed = opts.seed;
|
|
505
|
+
}
|
|
506
|
+
const headers = {
|
|
507
|
+
Authorization: `Bearer ${config.apiKey}`
|
|
508
|
+
};
|
|
509
|
+
/* c8 ignore next 1 - optional logger */
|
|
510
|
+
logger === null || logger === void 0 ? void 0 : logger.info(`Image generation: model=${config.model}, n=${body.n}`);
|
|
511
|
+
const jsonResult = await fetchJson(url, headers, body, logger, signal);
|
|
512
|
+
if (jsonResult.isFailure()) {
|
|
513
|
+
return fail(jsonResult.message);
|
|
514
|
+
}
|
|
515
|
+
return openAiImageResponse
|
|
516
|
+
.validate(jsonResult.value)
|
|
517
|
+
.withErrorFormat((msg) => `OpenAI images API response: ${msg}`)
|
|
518
|
+
.onSuccess((response) => {
|
|
519
|
+
const images = response.data.map((item) => (Object.assign({ mimeType: defaultMimeType, base64: item.b64_json }, (item.revised_prompt !== undefined ? { revisedPrompt: item.revised_prompt } : {}))));
|
|
520
|
+
return succeed({ images });
|
|
521
|
+
});
|
|
522
|
+
}
|
|
523
|
+
/**
|
|
524
|
+
* Calls the Gemini Imagen `:predict` endpoint.
|
|
525
|
+
* @internal
|
|
526
|
+
*/
|
|
527
|
+
async function callImagenGeneration(config, request, logger, signal) {
|
|
528
|
+
var _a, _b, _c, _d;
|
|
529
|
+
const url = `${config.baseUrl}/models/${config.model}:predict`;
|
|
530
|
+
const opts = (_a = request.options) !== null && _a !== void 0 ? _a : {};
|
|
531
|
+
const parameters = {
|
|
532
|
+
sampleCount: (_b = opts.count) !== null && _b !== void 0 ? _b : 1
|
|
533
|
+
};
|
|
534
|
+
if (((_c = opts.imagen) === null || _c === void 0 ? void 0 : _c.aspectRatio) !== undefined) {
|
|
535
|
+
parameters.aspectRatio = opts.imagen.aspectRatio;
|
|
536
|
+
}
|
|
537
|
+
if (((_d = opts.imagen) === null || _d === void 0 ? void 0 : _d.negativePrompt) !== undefined) {
|
|
538
|
+
parameters.negativePrompt = opts.imagen.negativePrompt;
|
|
539
|
+
}
|
|
540
|
+
if (opts.seed !== undefined) {
|
|
541
|
+
parameters.seed = opts.seed;
|
|
542
|
+
}
|
|
543
|
+
const body = {
|
|
544
|
+
instances: [{ prompt: request.prompt }],
|
|
545
|
+
parameters
|
|
546
|
+
};
|
|
547
|
+
const headers = {
|
|
548
|
+
'x-goog-api-key': config.apiKey
|
|
549
|
+
};
|
|
550
|
+
/* c8 ignore next 1 - optional logger */
|
|
551
|
+
logger === null || logger === void 0 ? void 0 : logger.info(`Imagen generation: model=${config.model}, n=${parameters.sampleCount}`);
|
|
552
|
+
const jsonResult = await fetchJson(url, headers, body, logger, signal);
|
|
553
|
+
if (jsonResult.isFailure()) {
|
|
554
|
+
return fail(jsonResult.message);
|
|
555
|
+
}
|
|
556
|
+
return imagenResponse
|
|
557
|
+
.validate(jsonResult.value)
|
|
558
|
+
.withErrorFormat((msg) => `Imagen API response: ${msg}`)
|
|
559
|
+
.onSuccess((response) => {
|
|
560
|
+
const images = response.predictions.map((p) => {
|
|
561
|
+
var _a;
|
|
562
|
+
return ({
|
|
563
|
+
mimeType: (_a = p.mimeType) !== null && _a !== void 0 ? _a : 'image/png',
|
|
564
|
+
base64: p.bytesBase64Encoded
|
|
565
|
+
});
|
|
566
|
+
});
|
|
567
|
+
return succeed({ images });
|
|
568
|
+
});
|
|
569
|
+
}
|
|
570
|
+
// ============================================================================
|
|
571
|
+
// Image generation — dispatcher
|
|
572
|
+
// ============================================================================
|
|
573
|
+
/**
|
|
574
|
+
* Calls the appropriate image-generation API for a given provider.
|
|
575
|
+
*
|
|
576
|
+
* Routes based on `descriptor.imageApiFormat`:
|
|
577
|
+
* - `'openai-images'` for OpenAI (DALL-E, gpt-image-1)
|
|
578
|
+
* - `'xai-images'` for xAI Grok image models
|
|
579
|
+
* - `'gemini-imagen'` for Google Imagen
|
|
580
|
+
*
|
|
581
|
+
* Image-model selection reuses the existing `'image'` {@link ModelSpecKey}.
|
|
582
|
+
*
|
|
583
|
+
* @param params - Request parameters including descriptor, API key, and prompt
|
|
584
|
+
* @returns The generated images, or a failure
|
|
585
|
+
* @public
|
|
586
|
+
*/
|
|
587
|
+
export async function callProviderImageGeneration(params) {
|
|
588
|
+
const { descriptor, apiKey, params: request, modelOverride, logger, signal } = params;
|
|
589
|
+
if (descriptor.imageApiFormat === undefined) {
|
|
590
|
+
return fail(`provider "${descriptor.id}" does not support image generation`);
|
|
591
|
+
}
|
|
592
|
+
if (!descriptor.baseUrl) {
|
|
593
|
+
return fail(`provider "${descriptor.id}" has no API endpoint configured`);
|
|
594
|
+
}
|
|
595
|
+
const config = {
|
|
596
|
+
baseUrl: descriptor.baseUrl,
|
|
597
|
+
apiKey,
|
|
598
|
+
model: resolveModel(modelOverride !== null && modelOverride !== void 0 ? modelOverride : descriptor.defaultModel, 'image')
|
|
599
|
+
};
|
|
600
|
+
/* c8 ignore next 6 - optional logger diagnostic output */
|
|
601
|
+
if (logger) {
|
|
602
|
+
logger.info(`AI image generation: provider=${descriptor.id}, format=${descriptor.imageApiFormat}, ` +
|
|
603
|
+
`model=${config.model}`);
|
|
604
|
+
}
|
|
605
|
+
switch (descriptor.imageApiFormat) {
|
|
606
|
+
case 'openai-images':
|
|
607
|
+
return callOpenAiImageGeneration(config, request, 'image/png', logger, signal);
|
|
608
|
+
case 'xai-images':
|
|
609
|
+
return callOpenAiImageGeneration(config, request, 'image/jpeg', logger, signal);
|
|
610
|
+
case 'gemini-imagen':
|
|
611
|
+
return callImagenGeneration(config, request, logger, signal);
|
|
612
|
+
/* c8 ignore next 4 - defensive coding: exhaustive switch guaranteed by TypeScript */
|
|
613
|
+
default: {
|
|
614
|
+
const _exhaustive = descriptor.imageApiFormat;
|
|
615
|
+
return fail(`unsupported image API format: ${String(_exhaustive)}`);
|
|
616
|
+
}
|
|
617
|
+
}
|
|
618
|
+
}
|
|
619
|
+
const openAiListEntry = Validators.object({
|
|
620
|
+
id: Validators.string
|
|
621
|
+
});
|
|
622
|
+
const openAiListResponse = Validators.object({
|
|
623
|
+
data: Validators.arrayOf(openAiListEntry)
|
|
624
|
+
});
|
|
625
|
+
const anthropicListEntry = Validators.object({
|
|
626
|
+
id: Validators.string,
|
|
627
|
+
display_name: Validators.string.optional()
|
|
628
|
+
});
|
|
629
|
+
const anthropicListResponse = Validators.object({
|
|
630
|
+
data: Validators.arrayOf(anthropicListEntry)
|
|
631
|
+
});
|
|
632
|
+
const geminiListEntry = Validators.object({
|
|
633
|
+
name: Validators.string,
|
|
634
|
+
displayName: Validators.string.optional(),
|
|
635
|
+
supportedGenerationMethods: Validators.arrayOf(Validators.string).optional()
|
|
636
|
+
});
|
|
637
|
+
const geminiListResponse = Validators.object({
|
|
638
|
+
models: Validators.arrayOf(geminiListEntry)
|
|
639
|
+
});
|
|
640
|
+
// ============================================================================
|
|
641
|
+
// List models — capability resolution
|
|
642
|
+
// ============================================================================
|
|
643
|
+
/**
|
|
644
|
+
* Translates Gemini's `supportedGenerationMethods` strings into our abstract
|
|
645
|
+
* capability vocabulary. Methods without a mapping are ignored.
|
|
646
|
+
* @internal
|
|
647
|
+
*/
|
|
648
|
+
function geminiMethodsToCapabilities(methods) {
|
|
649
|
+
const out = [];
|
|
650
|
+
for (const m of methods) {
|
|
651
|
+
if (m === 'generateContent') {
|
|
652
|
+
out.push('chat');
|
|
653
|
+
}
|
|
654
|
+
else if (m === 'predict') {
|
|
655
|
+
out.push('image-generation');
|
|
656
|
+
}
|
|
657
|
+
}
|
|
658
|
+
return out;
|
|
659
|
+
}
|
|
660
|
+
/**
|
|
661
|
+
* Strips the `models/` prefix Gemini includes on listed model names.
|
|
662
|
+
* @internal
|
|
663
|
+
*/
|
|
664
|
+
function geminiBareId(name) {
|
|
665
|
+
return name.startsWith('models/') ? name.substring('models/'.length) : name;
|
|
666
|
+
}
|
|
667
|
+
/**
|
|
668
|
+
* Applies a capability config to a model id. Walks per-provider rules then
|
|
669
|
+
* global rules; unions all matching rules' capabilities. Returns the union
|
|
670
|
+
* and the first matching `displayName` (if any).
|
|
671
|
+
* @internal
|
|
672
|
+
*/
|
|
673
|
+
function applyCapabilityConfig(config, providerId, modelId) {
|
|
674
|
+
var _a, _b, _c;
|
|
675
|
+
const caps = new Set();
|
|
676
|
+
let displayName;
|
|
677
|
+
const rulesets = [
|
|
678
|
+
(_b = (_a = config.perProvider) === null || _a === void 0 ? void 0 : _a[providerId]) !== null && _b !== void 0 ? _b : [],
|
|
679
|
+
(_c = config.global) !== null && _c !== void 0 ? _c : []
|
|
680
|
+
];
|
|
681
|
+
for (const rules of rulesets) {
|
|
682
|
+
for (const rule of rules) {
|
|
683
|
+
rule.idPattern.lastIndex = 0;
|
|
684
|
+
if (rule.idPattern.test(modelId)) {
|
|
685
|
+
for (const cap of rule.capabilities) {
|
|
686
|
+
caps.add(cap);
|
|
687
|
+
}
|
|
688
|
+
if (displayName === undefined && rule.displayName !== undefined) {
|
|
689
|
+
displayName = typeof rule.displayName === 'function' ? rule.displayName(modelId) : rule.displayName;
|
|
690
|
+
}
|
|
418
691
|
}
|
|
419
|
-
|
|
692
|
+
}
|
|
693
|
+
}
|
|
694
|
+
return { capabilities: Array.from(caps), displayName };
|
|
695
|
+
}
|
|
696
|
+
/**
|
|
697
|
+
* Combines provider-native capability info (when supplied) and config-derived
|
|
698
|
+
* capability info into a final {@link IAiModelInfo}.
|
|
699
|
+
* @internal
|
|
700
|
+
*/
|
|
701
|
+
function buildModelInfo(providerId, id, nativeCapabilities, nativeDisplayName, config) {
|
|
702
|
+
const fromConfig = applyCapabilityConfig(config, providerId, id);
|
|
703
|
+
const all = new Set([...nativeCapabilities, ...fromConfig.capabilities]);
|
|
704
|
+
return Object.assign({ id, capabilities: all }, (nativeDisplayName !== undefined
|
|
705
|
+
? { displayName: nativeDisplayName }
|
|
706
|
+
: fromConfig.displayName !== undefined
|
|
707
|
+
? { displayName: fromConfig.displayName }
|
|
708
|
+
: {}));
|
|
709
|
+
}
|
|
710
|
+
// ============================================================================
|
|
711
|
+
// List models — adapters
|
|
712
|
+
// ============================================================================
|
|
713
|
+
/**
|
|
714
|
+
* Calls the OpenAI-style `GET /models` endpoint. Used by openai, xai-grok,
|
|
715
|
+
* groq, and mistral. Provider supplies no capability info — capabilities are
|
|
716
|
+
* derived entirely from the config.
|
|
717
|
+
* @internal
|
|
718
|
+
*/
|
|
719
|
+
async function callOpenAiListModels(config, providerId, capabilityConfig, logger, signal) {
|
|
720
|
+
const url = `${config.baseUrl}/models`;
|
|
721
|
+
const headers = {
|
|
722
|
+
Authorization: `Bearer ${config.apiKey}`
|
|
723
|
+
};
|
|
724
|
+
/* c8 ignore next 1 - optional logger */
|
|
725
|
+
logger === null || logger === void 0 ? void 0 : logger.info(`List models: provider=${providerId}, format=openai`);
|
|
726
|
+
const jsonResult = await fetchGetJson(url, headers, logger, signal);
|
|
727
|
+
if (jsonResult.isFailure()) {
|
|
728
|
+
return fail(jsonResult.message);
|
|
729
|
+
}
|
|
730
|
+
return openAiListResponse
|
|
731
|
+
.validate(jsonResult.value)
|
|
732
|
+
.withErrorFormat((msg) => `OpenAI models API response: ${msg}`)
|
|
733
|
+
.onSuccess((response) => {
|
|
734
|
+
const models = response.data.map((entry) => buildModelInfo(providerId, entry.id, [], undefined, capabilityConfig));
|
|
735
|
+
return succeed(models);
|
|
736
|
+
});
|
|
737
|
+
}
|
|
738
|
+
/**
|
|
739
|
+
* Calls the Anthropic `GET /models` endpoint. Provider supplies a
|
|
740
|
+
* `display_name` but no native capability info.
|
|
741
|
+
* @internal
|
|
742
|
+
*/
|
|
743
|
+
async function callAnthropicListModels(config, providerId, capabilityConfig, logger, signal) {
|
|
744
|
+
const url = `${config.baseUrl}/models`;
|
|
745
|
+
const headers = {
|
|
746
|
+
'x-api-key': config.apiKey,
|
|
747
|
+
'anthropic-version': '2023-06-01',
|
|
748
|
+
'anthropic-dangerous-direct-browser-access': 'true'
|
|
749
|
+
};
|
|
750
|
+
/* c8 ignore next 1 - optional logger */
|
|
751
|
+
logger === null || logger === void 0 ? void 0 : logger.info(`List models: provider=${providerId}, format=anthropic`);
|
|
752
|
+
const jsonResult = await fetchGetJson(url, headers, logger, signal);
|
|
753
|
+
if (jsonResult.isFailure()) {
|
|
754
|
+
return fail(jsonResult.message);
|
|
755
|
+
}
|
|
756
|
+
return anthropicListResponse
|
|
757
|
+
.validate(jsonResult.value)
|
|
758
|
+
.withErrorFormat((msg) => `Anthropic models API response: ${msg}`)
|
|
759
|
+
.onSuccess((response) => {
|
|
760
|
+
const models = response.data.map((entry) => buildModelInfo(providerId, entry.id, [], entry.display_name, capabilityConfig));
|
|
761
|
+
return succeed(models);
|
|
762
|
+
});
|
|
763
|
+
}
|
|
764
|
+
/**
|
|
765
|
+
* Calls the Gemini `GET /models` endpoint. Provider supplies both a
|
|
766
|
+
* `displayName` and `supportedGenerationMethods` — translated to native
|
|
767
|
+
* capabilities and unioned with config-derived capabilities.
|
|
768
|
+
* @internal
|
|
769
|
+
*/
|
|
770
|
+
async function callGeminiListModels(config, providerId, capabilityConfig, logger, signal) {
|
|
771
|
+
const url = `${config.baseUrl}/models`;
|
|
772
|
+
const headers = {
|
|
773
|
+
'x-goog-api-key': config.apiKey
|
|
774
|
+
};
|
|
775
|
+
/* c8 ignore next 1 - optional logger */
|
|
776
|
+
logger === null || logger === void 0 ? void 0 : logger.info(`List models: provider=${providerId}, format=gemini`);
|
|
777
|
+
const jsonResult = await fetchGetJson(url, headers, logger, signal);
|
|
778
|
+
if (jsonResult.isFailure()) {
|
|
779
|
+
return fail(jsonResult.message);
|
|
780
|
+
}
|
|
781
|
+
return geminiListResponse
|
|
782
|
+
.validate(jsonResult.value)
|
|
783
|
+
.withErrorFormat((msg) => `Gemini models API response: ${msg}`)
|
|
784
|
+
.onSuccess((response) => {
|
|
785
|
+
const models = response.models.map((entry) => {
|
|
786
|
+
const id = geminiBareId(entry.name);
|
|
787
|
+
const native = entry.supportedGenerationMethods
|
|
788
|
+
? geminiMethodsToCapabilities(entry.supportedGenerationMethods)
|
|
789
|
+
: [];
|
|
790
|
+
return buildModelInfo(providerId, id, native, entry.displayName, capabilityConfig);
|
|
791
|
+
});
|
|
792
|
+
return succeed(models);
|
|
793
|
+
});
|
|
794
|
+
}
|
|
795
|
+
// ============================================================================
|
|
796
|
+
// List models — dispatcher
|
|
797
|
+
// ============================================================================
|
|
798
|
+
/**
|
|
799
|
+
* Lists models available from a provider, with capabilities resolved from
|
|
800
|
+
* native provider info (where supplied) and a configurable rule set.
|
|
801
|
+
*
|
|
802
|
+
* Routes based on `descriptor.apiFormat` — listing reuses the existing
|
|
803
|
+
* format dispatch and does not require a separate descriptor field.
|
|
804
|
+
*
|
|
805
|
+
* @param params - Request parameters including descriptor, API key, and optional capability filter
|
|
806
|
+
* @returns The resolved model list, or a failure
|
|
807
|
+
* @public
|
|
808
|
+
*/
|
|
809
|
+
export async function callProviderListModels(params) {
|
|
810
|
+
const { descriptor, apiKey, capability, capabilityConfig, logger, signal } = params;
|
|
811
|
+
if (!descriptor.baseUrl) {
|
|
812
|
+
return fail(`provider "${descriptor.id}" has no API endpoint configured`);
|
|
813
|
+
}
|
|
814
|
+
const config = {
|
|
815
|
+
baseUrl: descriptor.baseUrl,
|
|
816
|
+
apiKey,
|
|
817
|
+
model: '' // unused by listing
|
|
818
|
+
};
|
|
819
|
+
const effectiveConfig = capabilityConfig !== null && capabilityConfig !== void 0 ? capabilityConfig : DEFAULT_MODEL_CAPABILITY_CONFIG;
|
|
820
|
+
let listResult;
|
|
821
|
+
switch (descriptor.apiFormat) {
|
|
822
|
+
case 'openai':
|
|
823
|
+
listResult = await callOpenAiListModels(config, descriptor.id, effectiveConfig, logger, signal);
|
|
824
|
+
break;
|
|
420
825
|
case 'anthropic':
|
|
421
|
-
|
|
826
|
+
listResult = await callAnthropicListModels(config, descriptor.id, effectiveConfig, logger, signal);
|
|
827
|
+
break;
|
|
422
828
|
case 'gemini':
|
|
423
|
-
|
|
829
|
+
listResult = await callGeminiListModels(config, descriptor.id, effectiveConfig, logger, signal);
|
|
830
|
+
break;
|
|
424
831
|
/* c8 ignore next 4 - defensive coding: exhaustive switch guaranteed by TypeScript */
|
|
425
832
|
default: {
|
|
426
833
|
const _exhaustive = descriptor.apiFormat;
|
|
427
834
|
return fail(`unsupported API format: ${String(_exhaustive)}`);
|
|
428
835
|
}
|
|
429
836
|
}
|
|
837
|
+
if (listResult.isFailure()) {
|
|
838
|
+
return listResult;
|
|
839
|
+
}
|
|
840
|
+
if (capability === undefined) {
|
|
841
|
+
return listResult;
|
|
842
|
+
}
|
|
843
|
+
return succeed(listResult.value.filter((m) => m.capabilities.has(capability)));
|
|
844
|
+
}
|
|
845
|
+
// ============================================================================
|
|
846
|
+
// Proxied list models
|
|
847
|
+
// ============================================================================
|
|
848
|
+
/**
|
|
849
|
+
* Calls the model-listing endpoint on a proxy server.
|
|
850
|
+
*
|
|
851
|
+
* @remarks
|
|
852
|
+
* Proxy contract:
|
|
853
|
+
* - Endpoint: `POST ${proxyUrl}/api/ai/list-models`
|
|
854
|
+
* - Request body: `{providerId, apiKey, capability?}`. Capability config is
|
|
855
|
+
* not forwarded — the proxy applies its own (typically the same default
|
|
856
|
+
* the library ships).
|
|
857
|
+
* - Success response body: an `IAiModelInfo[]` (under key `models`) where
|
|
858
|
+
* `capabilities` is serialized as a string array (not Set, which doesn't
|
|
859
|
+
* round-trip through JSON).
|
|
860
|
+
* - Error response body: `{error: string}`, surfaced as `proxy: ${error}`.
|
|
861
|
+
*
|
|
862
|
+
* @public
|
|
863
|
+
*/
|
|
864
|
+
export async function callProxiedListModels(proxyUrl, params) {
|
|
865
|
+
const { descriptor, apiKey, capability, logger, signal } = params;
|
|
866
|
+
const body = {
|
|
867
|
+
providerId: descriptor.id,
|
|
868
|
+
apiKey
|
|
869
|
+
};
|
|
870
|
+
if (capability !== undefined) {
|
|
871
|
+
body.capability = capability;
|
|
872
|
+
}
|
|
873
|
+
/* c8 ignore next 1 - optional logger */
|
|
874
|
+
logger === null || logger === void 0 ? void 0 : logger.info(`AI list-models proxy request: provider=${descriptor.id}, proxy=${proxyUrl}`);
|
|
875
|
+
const url = `${proxyUrl}/api/ai/list-models`;
|
|
876
|
+
const jsonResult = await fetchJson(url, {}, body, logger, signal);
|
|
877
|
+
if (jsonResult.isFailure()) {
|
|
878
|
+
return fail(jsonResult.message);
|
|
879
|
+
}
|
|
880
|
+
const response = jsonResult.value;
|
|
881
|
+
if (typeof response.error === 'string') {
|
|
882
|
+
return fail(`proxy: ${response.error}`);
|
|
883
|
+
}
|
|
884
|
+
return proxiedListModelsResponse
|
|
885
|
+
.validate(response)
|
|
886
|
+
.withErrorFormat((msg) => `proxy returned invalid response: ${msg}`)
|
|
887
|
+
.onSuccess((parsed) => {
|
|
888
|
+
const models = parsed.models.map((m) => (Object.assign({ id: m.id, capabilities: new Set(m.capabilities) }, (m.displayName !== undefined ? { displayName: m.displayName } : {}))));
|
|
889
|
+
return succeed(models);
|
|
890
|
+
});
|
|
430
891
|
}
|
|
431
892
|
// ============================================================================
|
|
432
893
|
// Proxied completion (routes through a backend server)
|
|
@@ -445,11 +906,15 @@ export async function callProviderCompletion(params) {
|
|
|
445
906
|
* @public
|
|
446
907
|
*/
|
|
447
908
|
export async function callProxiedCompletion(proxyUrl, params) {
|
|
448
|
-
const { descriptor, apiKey, prompt, additionalMessages, temperature, modelOverride, logger, tools } = params;
|
|
909
|
+
const { descriptor, apiKey, prompt, additionalMessages, temperature, modelOverride, logger, tools, signal } = params;
|
|
910
|
+
const promptBody = { system: prompt.system, user: prompt.user };
|
|
911
|
+
if (prompt.attachments.length > 0) {
|
|
912
|
+
promptBody.attachments = prompt.attachments;
|
|
913
|
+
}
|
|
449
914
|
const body = {
|
|
450
915
|
providerId: descriptor.id,
|
|
451
916
|
apiKey,
|
|
452
|
-
prompt:
|
|
917
|
+
prompt: promptBody,
|
|
453
918
|
temperature: temperature !== null && temperature !== void 0 ? temperature : 0.7
|
|
454
919
|
};
|
|
455
920
|
if (additionalMessages && additionalMessages.length > 0) {
|
|
@@ -464,7 +929,7 @@ export async function callProxiedCompletion(proxyUrl, params) {
|
|
|
464
929
|
/* c8 ignore next 1 - optional logger */
|
|
465
930
|
logger === null || logger === void 0 ? void 0 : logger.info(`AI proxy request: provider=${descriptor.id}, proxy=${proxyUrl}`);
|
|
466
931
|
const url = `${proxyUrl}/api/ai/completion`;
|
|
467
|
-
const jsonResult = await fetchJson(url, {}, body, logger);
|
|
932
|
+
const jsonResult = await fetchJson(url, {}, body, logger, signal);
|
|
468
933
|
if (jsonResult.isFailure()) {
|
|
469
934
|
return fail(jsonResult.message);
|
|
470
935
|
}
|
|
@@ -481,4 +946,51 @@ export async function callProxiedCompletion(proxyUrl, params) {
|
|
|
481
946
|
truncated: response.truncated === true
|
|
482
947
|
});
|
|
483
948
|
}
|
|
949
|
+
// ============================================================================
|
|
950
|
+
// Proxied image generation
|
|
951
|
+
// ============================================================================
|
|
952
|
+
/**
|
|
953
|
+
* Calls the image-generation endpoint on a proxy server instead of calling
|
|
954
|
+
* the provider API directly from the browser.
|
|
955
|
+
*
|
|
956
|
+
* @remarks
|
|
957
|
+
* The proxy contract:
|
|
958
|
+
* - Endpoint: `POST ${proxyUrl}/api/ai/image-generation`
|
|
959
|
+
* - Request body: `{providerId, apiKey, params, modelOverride?}`
|
|
960
|
+
* - Success response body: an {@link IAiImageGenerationResponse}
|
|
961
|
+
* - Error response body: `{error: string}` (surfaced as `proxy: ${error}`)
|
|
962
|
+
*
|
|
963
|
+
* The proxy server is responsible for descriptor lookup, model resolution,
|
|
964
|
+
* provider dispatch, and response normalization.
|
|
965
|
+
*
|
|
966
|
+
* @param proxyUrl - Base URL of the proxy server (e.g. `http://localhost:3001`)
|
|
967
|
+
* @param params - Same parameters as {@link callProviderImageGeneration}
|
|
968
|
+
* @returns The generated images, or a failure
|
|
969
|
+
* @public
|
|
970
|
+
*/
|
|
971
|
+
export async function callProxiedImageGeneration(proxyUrl, params) {
|
|
972
|
+
const { descriptor, apiKey, params: request, modelOverride, logger, signal } = params;
|
|
973
|
+
const body = {
|
|
974
|
+
providerId: descriptor.id,
|
|
975
|
+
apiKey,
|
|
976
|
+
params: request
|
|
977
|
+
};
|
|
978
|
+
if (modelOverride !== undefined) {
|
|
979
|
+
body.modelOverride = modelOverride;
|
|
980
|
+
}
|
|
981
|
+
/* c8 ignore next 1 - optional logger */
|
|
982
|
+
logger === null || logger === void 0 ? void 0 : logger.info(`AI image proxy request: provider=${descriptor.id}, proxy=${proxyUrl}`);
|
|
983
|
+
const url = `${proxyUrl}/api/ai/image-generation`;
|
|
984
|
+
const jsonResult = await fetchJson(url, {}, body, logger, signal);
|
|
985
|
+
if (jsonResult.isFailure()) {
|
|
986
|
+
return fail(jsonResult.message);
|
|
987
|
+
}
|
|
988
|
+
const response = jsonResult.value;
|
|
989
|
+
if (typeof response.error === 'string') {
|
|
990
|
+
return fail(`proxy: ${response.error}`);
|
|
991
|
+
}
|
|
992
|
+
return proxiedImageGenerationResponse
|
|
993
|
+
.validate(response)
|
|
994
|
+
.withErrorFormat((msg) => `proxy returned invalid response: ${msg}`);
|
|
995
|
+
}
|
|
484
996
|
//# sourceMappingURL=apiClient.js.map
|