@llmgateway/ai-sdk-provider 1.0.3 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/dist/index.d.mts +96 -64
- package/dist/index.d.ts +96 -64
- package/dist/index.js +1571 -673
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1532 -662
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +50 -22
- package/dist/internal/index.d.ts +50 -22
- package/dist/internal/index.js +1517 -660
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +1479 -650
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +6 -8
package/dist/index.js
CHANGED
|
@@ -1,10 +1,12 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
+
var __create = Object.create;
|
|
2
3
|
var __defProp = Object.defineProperty;
|
|
3
4
|
var __defProps = Object.defineProperties;
|
|
4
5
|
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
5
6
|
var __getOwnPropDescs = Object.getOwnPropertyDescriptors;
|
|
6
7
|
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
7
8
|
var __getOwnPropSymbols = Object.getOwnPropertySymbols;
|
|
9
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
8
10
|
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
9
11
|
var __propIsEnum = Object.prototype.propertyIsEnumerable;
|
|
10
12
|
var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
|
|
@@ -20,21 +22,9 @@ var __spreadValues = (a, b) => {
|
|
|
20
22
|
return a;
|
|
21
23
|
};
|
|
22
24
|
var __spreadProps = (a, b) => __defProps(a, __getOwnPropDescs(b));
|
|
23
|
-
var __objRest = (source, exclude) => {
|
|
24
|
-
var target = {};
|
|
25
|
-
for (var prop in source)
|
|
26
|
-
if (__hasOwnProp.call(source, prop) && exclude.indexOf(prop) < 0)
|
|
27
|
-
target[prop] = source[prop];
|
|
28
|
-
if (source != null && __getOwnPropSymbols)
|
|
29
|
-
for (var prop of __getOwnPropSymbols(source)) {
|
|
30
|
-
if (exclude.indexOf(prop) < 0 && __propIsEnum.call(source, prop))
|
|
31
|
-
target[prop] = source[prop];
|
|
32
|
-
}
|
|
33
|
-
return target;
|
|
34
|
-
};
|
|
35
25
|
var __export = (target, all) => {
|
|
36
|
-
for (var
|
|
37
|
-
__defProp(target,
|
|
26
|
+
for (var name14 in all)
|
|
27
|
+
__defProp(target, name14, { get: all[name14], enumerable: true });
|
|
38
28
|
};
|
|
39
29
|
var __copyProps = (to, from, except, desc) => {
|
|
40
30
|
if (from && typeof from === "object" || typeof from === "function") {
|
|
@@ -44,6 +34,14 @@ var __copyProps = (to, from, except, desc) => {
|
|
|
44
34
|
}
|
|
45
35
|
return to;
|
|
46
36
|
};
|
|
37
|
+
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
|
|
38
|
+
// If the importer is in node compatibility mode or this is not an ESM
|
|
39
|
+
// file that has been converted to a CommonJS file using a Babel-
|
|
40
|
+
// compatible transform (i.e. "__esModule" has not been set), then set
|
|
41
|
+
// "default" to the CommonJS "module.exports" for node compatibility.
|
|
42
|
+
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
|
|
43
|
+
mod
|
|
44
|
+
));
|
|
47
45
|
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
48
46
|
|
|
49
47
|
// src/index.ts
|
|
@@ -55,75 +53,1005 @@ __export(index_exports, {
|
|
|
55
53
|
});
|
|
56
54
|
module.exports = __toCommonJS(index_exports);
|
|
57
55
|
|
|
58
|
-
//
|
|
59
|
-
var
|
|
56
|
+
// node_modules/.pnpm/@ai-sdk+provider@2.0.0-beta.1/node_modules/@ai-sdk/provider/dist/index.mjs
|
|
57
|
+
var marker = "vercel.ai.error";
|
|
58
|
+
var symbol = Symbol.for(marker);
|
|
59
|
+
var _a;
|
|
60
|
+
var _AISDKError = class _AISDKError2 extends Error {
|
|
61
|
+
/**
|
|
62
|
+
* Creates an AI SDK Error.
|
|
63
|
+
*
|
|
64
|
+
* @param {Object} params - The parameters for creating the error.
|
|
65
|
+
* @param {string} params.name - The name of the error.
|
|
66
|
+
* @param {string} params.message - The error message.
|
|
67
|
+
* @param {unknown} [params.cause] - The underlying cause of the error.
|
|
68
|
+
*/
|
|
69
|
+
constructor({
|
|
70
|
+
name: name14,
|
|
71
|
+
message,
|
|
72
|
+
cause
|
|
73
|
+
}) {
|
|
74
|
+
super(message);
|
|
75
|
+
this[_a] = true;
|
|
76
|
+
this.name = name14;
|
|
77
|
+
this.cause = cause;
|
|
78
|
+
}
|
|
79
|
+
/**
|
|
80
|
+
* Checks if the given error is an AI SDK Error.
|
|
81
|
+
* @param {unknown} error - The error to check.
|
|
82
|
+
* @returns {boolean} True if the error is an AI SDK Error, false otherwise.
|
|
83
|
+
*/
|
|
84
|
+
static isInstance(error) {
|
|
85
|
+
return _AISDKError2.hasMarker(error, marker);
|
|
86
|
+
}
|
|
87
|
+
static hasMarker(error, marker15) {
|
|
88
|
+
const markerSymbol = Symbol.for(marker15);
|
|
89
|
+
return error != null && typeof error === "object" && markerSymbol in error && typeof error[markerSymbol] === "boolean" && error[markerSymbol] === true;
|
|
90
|
+
}
|
|
91
|
+
};
|
|
92
|
+
_a = symbol;
|
|
93
|
+
var AISDKError = _AISDKError;
|
|
94
|
+
var name = "AI_APICallError";
|
|
95
|
+
var marker2 = `vercel.ai.error.${name}`;
|
|
96
|
+
var symbol2 = Symbol.for(marker2);
|
|
97
|
+
var _a2;
|
|
98
|
+
var APICallError = class extends AISDKError {
|
|
99
|
+
constructor({
|
|
100
|
+
message,
|
|
101
|
+
url,
|
|
102
|
+
requestBodyValues,
|
|
103
|
+
statusCode,
|
|
104
|
+
responseHeaders,
|
|
105
|
+
responseBody,
|
|
106
|
+
cause,
|
|
107
|
+
isRetryable = statusCode != null && (statusCode === 408 || // request timeout
|
|
108
|
+
statusCode === 409 || // conflict
|
|
109
|
+
statusCode === 429 || // too many requests
|
|
110
|
+
statusCode >= 500),
|
|
111
|
+
// server error
|
|
112
|
+
data
|
|
113
|
+
}) {
|
|
114
|
+
super({ name, message, cause });
|
|
115
|
+
this[_a2] = true;
|
|
116
|
+
this.url = url;
|
|
117
|
+
this.requestBodyValues = requestBodyValues;
|
|
118
|
+
this.statusCode = statusCode;
|
|
119
|
+
this.responseHeaders = responseHeaders;
|
|
120
|
+
this.responseBody = responseBody;
|
|
121
|
+
this.isRetryable = isRetryable;
|
|
122
|
+
this.data = data;
|
|
123
|
+
}
|
|
124
|
+
static isInstance(error) {
|
|
125
|
+
return AISDKError.hasMarker(error, marker2);
|
|
126
|
+
}
|
|
127
|
+
};
|
|
128
|
+
_a2 = symbol2;
|
|
129
|
+
var name2 = "AI_EmptyResponseBodyError";
|
|
130
|
+
var marker3 = `vercel.ai.error.${name2}`;
|
|
131
|
+
var symbol3 = Symbol.for(marker3);
|
|
132
|
+
var _a3;
|
|
133
|
+
var EmptyResponseBodyError = class extends AISDKError {
|
|
134
|
+
// used in isInstance
|
|
135
|
+
constructor({ message = "Empty response body" } = {}) {
|
|
136
|
+
super({ name: name2, message });
|
|
137
|
+
this[_a3] = true;
|
|
138
|
+
}
|
|
139
|
+
static isInstance(error) {
|
|
140
|
+
return AISDKError.hasMarker(error, marker3);
|
|
141
|
+
}
|
|
142
|
+
};
|
|
143
|
+
_a3 = symbol3;
|
|
144
|
+
function getErrorMessage(error) {
|
|
145
|
+
if (error == null) {
|
|
146
|
+
return "unknown error";
|
|
147
|
+
}
|
|
148
|
+
if (typeof error === "string") {
|
|
149
|
+
return error;
|
|
150
|
+
}
|
|
151
|
+
if (error instanceof Error) {
|
|
152
|
+
return error.message;
|
|
153
|
+
}
|
|
154
|
+
return JSON.stringify(error);
|
|
155
|
+
}
|
|
156
|
+
var name3 = "AI_InvalidArgumentError";
|
|
157
|
+
var marker4 = `vercel.ai.error.${name3}`;
|
|
158
|
+
var symbol4 = Symbol.for(marker4);
|
|
159
|
+
var _a4;
|
|
160
|
+
var InvalidArgumentError = class extends AISDKError {
|
|
161
|
+
constructor({
|
|
162
|
+
message,
|
|
163
|
+
cause,
|
|
164
|
+
argument
|
|
165
|
+
}) {
|
|
166
|
+
super({ name: name3, message, cause });
|
|
167
|
+
this[_a4] = true;
|
|
168
|
+
this.argument = argument;
|
|
169
|
+
}
|
|
170
|
+
static isInstance(error) {
|
|
171
|
+
return AISDKError.hasMarker(error, marker4);
|
|
172
|
+
}
|
|
173
|
+
};
|
|
174
|
+
_a4 = symbol4;
|
|
175
|
+
var name4 = "AI_InvalidPromptError";
|
|
176
|
+
var marker5 = `vercel.ai.error.${name4}`;
|
|
177
|
+
var symbol5 = Symbol.for(marker5);
|
|
178
|
+
var _a5;
|
|
179
|
+
var InvalidPromptError = class extends AISDKError {
|
|
180
|
+
constructor({
|
|
181
|
+
prompt,
|
|
182
|
+
message,
|
|
183
|
+
cause
|
|
184
|
+
}) {
|
|
185
|
+
super({ name: name4, message: `Invalid prompt: ${message}`, cause });
|
|
186
|
+
this[_a5] = true;
|
|
187
|
+
this.prompt = prompt;
|
|
188
|
+
}
|
|
189
|
+
static isInstance(error) {
|
|
190
|
+
return AISDKError.hasMarker(error, marker5);
|
|
191
|
+
}
|
|
192
|
+
};
|
|
193
|
+
_a5 = symbol5;
|
|
194
|
+
var name5 = "AI_InvalidResponseDataError";
|
|
195
|
+
var marker6 = `vercel.ai.error.${name5}`;
|
|
196
|
+
var symbol6 = Symbol.for(marker6);
|
|
197
|
+
var _a6;
|
|
198
|
+
var InvalidResponseDataError = class extends AISDKError {
|
|
199
|
+
constructor({
|
|
200
|
+
data,
|
|
201
|
+
message = `Invalid response data: ${JSON.stringify(data)}.`
|
|
202
|
+
}) {
|
|
203
|
+
super({ name: name5, message });
|
|
204
|
+
this[_a6] = true;
|
|
205
|
+
this.data = data;
|
|
206
|
+
}
|
|
207
|
+
static isInstance(error) {
|
|
208
|
+
return AISDKError.hasMarker(error, marker6);
|
|
209
|
+
}
|
|
210
|
+
};
|
|
211
|
+
_a6 = symbol6;
|
|
212
|
+
var name6 = "AI_JSONParseError";
|
|
213
|
+
var marker7 = `vercel.ai.error.${name6}`;
|
|
214
|
+
var symbol7 = Symbol.for(marker7);
|
|
215
|
+
var _a7;
|
|
216
|
+
var JSONParseError = class extends AISDKError {
|
|
217
|
+
constructor({ text, cause }) {
|
|
218
|
+
super({
|
|
219
|
+
name: name6,
|
|
220
|
+
message: `JSON parsing failed: Text: ${text}.
|
|
221
|
+
Error message: ${getErrorMessage(cause)}`,
|
|
222
|
+
cause
|
|
223
|
+
});
|
|
224
|
+
this[_a7] = true;
|
|
225
|
+
this.text = text;
|
|
226
|
+
}
|
|
227
|
+
static isInstance(error) {
|
|
228
|
+
return AISDKError.hasMarker(error, marker7);
|
|
229
|
+
}
|
|
230
|
+
};
|
|
231
|
+
_a7 = symbol7;
|
|
232
|
+
var name7 = "AI_LoadAPIKeyError";
|
|
233
|
+
var marker8 = `vercel.ai.error.${name7}`;
|
|
234
|
+
var symbol8 = Symbol.for(marker8);
|
|
235
|
+
var _a8;
|
|
236
|
+
var LoadAPIKeyError = class extends AISDKError {
|
|
237
|
+
// used in isInstance
|
|
238
|
+
constructor({ message }) {
|
|
239
|
+
super({ name: name7, message });
|
|
240
|
+
this[_a8] = true;
|
|
241
|
+
}
|
|
242
|
+
static isInstance(error) {
|
|
243
|
+
return AISDKError.hasMarker(error, marker8);
|
|
244
|
+
}
|
|
245
|
+
};
|
|
246
|
+
_a8 = symbol8;
|
|
247
|
+
var name8 = "AI_LoadSettingError";
|
|
248
|
+
var marker9 = `vercel.ai.error.${name8}`;
|
|
249
|
+
var symbol9 = Symbol.for(marker9);
|
|
250
|
+
var _a9;
|
|
251
|
+
_a9 = symbol9;
|
|
252
|
+
var name9 = "AI_NoContentGeneratedError";
|
|
253
|
+
var marker10 = `vercel.ai.error.${name9}`;
|
|
254
|
+
var symbol10 = Symbol.for(marker10);
|
|
255
|
+
var _a10;
|
|
256
|
+
_a10 = symbol10;
|
|
257
|
+
var name10 = "AI_NoSuchModelError";
|
|
258
|
+
var marker11 = `vercel.ai.error.${name10}`;
|
|
259
|
+
var symbol11 = Symbol.for(marker11);
|
|
260
|
+
var _a11;
|
|
261
|
+
_a11 = symbol11;
|
|
262
|
+
var name11 = "AI_TooManyEmbeddingValuesForCallError";
|
|
263
|
+
var marker12 = `vercel.ai.error.${name11}`;
|
|
264
|
+
var symbol12 = Symbol.for(marker12);
|
|
265
|
+
var _a12;
|
|
266
|
+
_a12 = symbol12;
|
|
267
|
+
var name12 = "AI_TypeValidationError";
|
|
268
|
+
var marker13 = `vercel.ai.error.${name12}`;
|
|
269
|
+
var symbol13 = Symbol.for(marker13);
|
|
270
|
+
var _a13;
|
|
271
|
+
var _TypeValidationError = class _TypeValidationError2 extends AISDKError {
|
|
272
|
+
constructor({ value, cause }) {
|
|
273
|
+
super({
|
|
274
|
+
name: name12,
|
|
275
|
+
message: `Type validation failed: Value: ${JSON.stringify(value)}.
|
|
276
|
+
Error message: ${getErrorMessage(cause)}`,
|
|
277
|
+
cause
|
|
278
|
+
});
|
|
279
|
+
this[_a13] = true;
|
|
280
|
+
this.value = value;
|
|
281
|
+
}
|
|
282
|
+
static isInstance(error) {
|
|
283
|
+
return AISDKError.hasMarker(error, marker13);
|
|
284
|
+
}
|
|
285
|
+
/**
|
|
286
|
+
* Wraps an error into a TypeValidationError.
|
|
287
|
+
* If the cause is already a TypeValidationError with the same value, it returns the cause.
|
|
288
|
+
* Otherwise, it creates a new TypeValidationError.
|
|
289
|
+
*
|
|
290
|
+
* @param {Object} params - The parameters for wrapping the error.
|
|
291
|
+
* @param {unknown} params.value - The value that failed validation.
|
|
292
|
+
* @param {unknown} params.cause - The original error or cause of the validation failure.
|
|
293
|
+
* @returns {TypeValidationError} A TypeValidationError instance.
|
|
294
|
+
*/
|
|
295
|
+
static wrap({
|
|
296
|
+
value,
|
|
297
|
+
cause
|
|
298
|
+
}) {
|
|
299
|
+
return _TypeValidationError2.isInstance(cause) && cause.value === value ? cause : new _TypeValidationError2({ value, cause });
|
|
300
|
+
}
|
|
301
|
+
};
|
|
302
|
+
_a13 = symbol13;
|
|
303
|
+
var TypeValidationError = _TypeValidationError;
|
|
304
|
+
var name13 = "AI_UnsupportedFunctionalityError";
|
|
305
|
+
var marker14 = `vercel.ai.error.${name13}`;
|
|
306
|
+
var symbol14 = Symbol.for(marker14);
|
|
307
|
+
var _a14;
|
|
308
|
+
var UnsupportedFunctionalityError = class extends AISDKError {
|
|
309
|
+
constructor({
|
|
310
|
+
functionality,
|
|
311
|
+
message = `'${functionality}' functionality not supported.`
|
|
312
|
+
}) {
|
|
313
|
+
super({ name: name13, message });
|
|
314
|
+
this[_a14] = true;
|
|
315
|
+
this.functionality = functionality;
|
|
316
|
+
}
|
|
317
|
+
static isInstance(error) {
|
|
318
|
+
return AISDKError.hasMarker(error, marker14);
|
|
319
|
+
}
|
|
320
|
+
};
|
|
321
|
+
_a14 = symbol14;
|
|
60
322
|
|
|
61
|
-
//
|
|
323
|
+
// node_modules/.pnpm/eventsource-parser@3.0.3/node_modules/eventsource-parser/dist/index.js
|
|
324
|
+
var ParseError = class extends Error {
|
|
325
|
+
constructor(message, options) {
|
|
326
|
+
super(message), this.name = "ParseError", this.type = options.type, this.field = options.field, this.value = options.value, this.line = options.line;
|
|
327
|
+
}
|
|
328
|
+
};
|
|
329
|
+
function noop(_arg) {
|
|
330
|
+
}
|
|
331
|
+
function createParser(callbacks) {
|
|
332
|
+
if (typeof callbacks == "function")
|
|
333
|
+
throw new TypeError(
|
|
334
|
+
"`callbacks` must be an object, got a function instead. Did you mean `{onEvent: fn}`?"
|
|
335
|
+
);
|
|
336
|
+
const { onEvent = noop, onError = noop, onRetry = noop, onComment } = callbacks;
|
|
337
|
+
let incompleteLine = "", isFirstChunk = true, id, data = "", eventType = "";
|
|
338
|
+
function feed(newChunk) {
|
|
339
|
+
const chunk = isFirstChunk ? newChunk.replace(/^\xEF\xBB\xBF/, "") : newChunk, [complete, incomplete] = splitLines(`${incompleteLine}${chunk}`);
|
|
340
|
+
for (const line of complete)
|
|
341
|
+
parseLine(line);
|
|
342
|
+
incompleteLine = incomplete, isFirstChunk = false;
|
|
343
|
+
}
|
|
344
|
+
function parseLine(line) {
|
|
345
|
+
if (line === "") {
|
|
346
|
+
dispatchEvent();
|
|
347
|
+
return;
|
|
348
|
+
}
|
|
349
|
+
if (line.startsWith(":")) {
|
|
350
|
+
onComment && onComment(line.slice(line.startsWith(": ") ? 2 : 1));
|
|
351
|
+
return;
|
|
352
|
+
}
|
|
353
|
+
const fieldSeparatorIndex = line.indexOf(":");
|
|
354
|
+
if (fieldSeparatorIndex !== -1) {
|
|
355
|
+
const field = line.slice(0, fieldSeparatorIndex), offset = line[fieldSeparatorIndex + 1] === " " ? 2 : 1, value = line.slice(fieldSeparatorIndex + offset);
|
|
356
|
+
processField(field, value, line);
|
|
357
|
+
return;
|
|
358
|
+
}
|
|
359
|
+
processField(line, "", line);
|
|
360
|
+
}
|
|
361
|
+
function processField(field, value, line) {
|
|
362
|
+
switch (field) {
|
|
363
|
+
case "event":
|
|
364
|
+
eventType = value;
|
|
365
|
+
break;
|
|
366
|
+
case "data":
|
|
367
|
+
data = `${data}${value}
|
|
368
|
+
`;
|
|
369
|
+
break;
|
|
370
|
+
case "id":
|
|
371
|
+
id = value.includes("\0") ? void 0 : value;
|
|
372
|
+
break;
|
|
373
|
+
case "retry":
|
|
374
|
+
/^\d+$/.test(value) ? onRetry(parseInt(value, 10)) : onError(
|
|
375
|
+
new ParseError(`Invalid \`retry\` value: "${value}"`, {
|
|
376
|
+
type: "invalid-retry",
|
|
377
|
+
value,
|
|
378
|
+
line
|
|
379
|
+
})
|
|
380
|
+
);
|
|
381
|
+
break;
|
|
382
|
+
default:
|
|
383
|
+
onError(
|
|
384
|
+
new ParseError(
|
|
385
|
+
`Unknown field "${field.length > 20 ? `${field.slice(0, 20)}\u2026` : field}"`,
|
|
386
|
+
{ type: "unknown-field", field, value, line }
|
|
387
|
+
)
|
|
388
|
+
);
|
|
389
|
+
break;
|
|
390
|
+
}
|
|
391
|
+
}
|
|
392
|
+
function dispatchEvent() {
|
|
393
|
+
data.length > 0 && onEvent({
|
|
394
|
+
id,
|
|
395
|
+
event: eventType || void 0,
|
|
396
|
+
// If the data buffer's last character is a U+000A LINE FEED (LF) character,
|
|
397
|
+
// then remove the last character from the data buffer.
|
|
398
|
+
data: data.endsWith(`
|
|
399
|
+
`) ? data.slice(0, -1) : data
|
|
400
|
+
}), id = void 0, data = "", eventType = "";
|
|
401
|
+
}
|
|
402
|
+
function reset(options = {}) {
|
|
403
|
+
incompleteLine && options.consume && parseLine(incompleteLine), isFirstChunk = true, id = void 0, data = "", eventType = "", incompleteLine = "";
|
|
404
|
+
}
|
|
405
|
+
return { feed, reset };
|
|
406
|
+
}
|
|
407
|
+
function splitLines(chunk) {
|
|
408
|
+
const lines = [];
|
|
409
|
+
let incompleteLine = "", searchIndex = 0;
|
|
410
|
+
for (; searchIndex < chunk.length; ) {
|
|
411
|
+
const crIndex = chunk.indexOf("\r", searchIndex), lfIndex = chunk.indexOf(`
|
|
412
|
+
`, searchIndex);
|
|
413
|
+
let lineEnd = -1;
|
|
414
|
+
if (crIndex !== -1 && lfIndex !== -1 ? lineEnd = Math.min(crIndex, lfIndex) : crIndex !== -1 ? lineEnd = crIndex : lfIndex !== -1 && (lineEnd = lfIndex), lineEnd === -1) {
|
|
415
|
+
incompleteLine = chunk.slice(searchIndex);
|
|
416
|
+
break;
|
|
417
|
+
} else {
|
|
418
|
+
const line = chunk.slice(searchIndex, lineEnd);
|
|
419
|
+
lines.push(line), searchIndex = lineEnd + 1, chunk[searchIndex - 1] === "\r" && chunk[searchIndex] === `
|
|
420
|
+
` && searchIndex++;
|
|
421
|
+
}
|
|
422
|
+
}
|
|
423
|
+
return [lines, incompleteLine];
|
|
424
|
+
}
|
|
425
|
+
|
|
426
|
+
// node_modules/.pnpm/eventsource-parser@3.0.3/node_modules/eventsource-parser/dist/stream.js
|
|
427
|
+
var EventSourceParserStream = class extends TransformStream {
|
|
428
|
+
constructor({ onError, onRetry, onComment } = {}) {
|
|
429
|
+
let parser;
|
|
430
|
+
super({
|
|
431
|
+
start(controller) {
|
|
432
|
+
parser = createParser({
|
|
433
|
+
onEvent: (event) => {
|
|
434
|
+
controller.enqueue(event);
|
|
435
|
+
},
|
|
436
|
+
onError(error) {
|
|
437
|
+
onError === "terminate" ? controller.error(error) : typeof onError == "function" && onError(error);
|
|
438
|
+
},
|
|
439
|
+
onRetry,
|
|
440
|
+
onComment
|
|
441
|
+
});
|
|
442
|
+
},
|
|
443
|
+
transform(chunk) {
|
|
444
|
+
parser.feed(chunk);
|
|
445
|
+
}
|
|
446
|
+
});
|
|
447
|
+
}
|
|
448
|
+
};
|
|
449
|
+
|
|
450
|
+
// node_modules/.pnpm/@ai-sdk+provider-utils@3.0.0-beta.5_zod@3.25.34/node_modules/@ai-sdk/provider-utils/dist/index.mjs
|
|
451
|
+
var z4 = __toESM(require("zod/v4"), 1);
|
|
452
|
+
|
|
453
|
+
// node_modules/.pnpm/zod-to-json-schema@3.24.5_zod@3.25.34/node_modules/zod-to-json-schema/dist/esm/Options.js
|
|
454
|
+
var ignoreOverride = Symbol("Let zodToJsonSchema decide on which parser to use");
|
|
455
|
+
|
|
456
|
+
// node_modules/.pnpm/zod-to-json-schema@3.24.5_zod@3.25.34/node_modules/zod-to-json-schema/dist/esm/selectParser.js
|
|
457
|
+
var import_zod4 = require("zod");
|
|
458
|
+
|
|
459
|
+
// node_modules/.pnpm/zod-to-json-schema@3.24.5_zod@3.25.34/node_modules/zod-to-json-schema/dist/esm/parsers/array.js
|
|
62
460
|
var import_zod = require("zod");
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
461
|
+
|
|
462
|
+
// node_modules/.pnpm/zod-to-json-schema@3.24.5_zod@3.25.34/node_modules/zod-to-json-schema/dist/esm/parsers/record.js
|
|
463
|
+
var import_zod2 = require("zod");
|
|
464
|
+
|
|
465
|
+
// node_modules/.pnpm/zod-to-json-schema@3.24.5_zod@3.25.34/node_modules/zod-to-json-schema/dist/esm/parsers/string.js
|
|
466
|
+
var ALPHA_NUMERIC = new Set("ABCDEFGHIJKLMNOPQRSTUVXYZabcdefghijklmnopqrstuvxyz0123456789");
|
|
467
|
+
|
|
468
|
+
// node_modules/.pnpm/zod-to-json-schema@3.24.5_zod@3.25.34/node_modules/zod-to-json-schema/dist/esm/parsers/object.js
|
|
469
|
+
var import_zod3 = require("zod");
|
|
470
|
+
|
|
471
|
+
// node_modules/.pnpm/@ai-sdk+provider-utils@3.0.0-beta.5_zod@3.25.34/node_modules/@ai-sdk/provider-utils/dist/index.mjs
|
|
472
|
+
function combineHeaders(...headers) {
|
|
473
|
+
return headers.reduce(
|
|
474
|
+
(combinedHeaders, currentHeaders) => __spreadValues(__spreadValues({}, combinedHeaders), currentHeaders != null ? currentHeaders : {}),
|
|
475
|
+
{}
|
|
476
|
+
);
|
|
477
|
+
}
|
|
478
|
+
function extractResponseHeaders(response) {
|
|
479
|
+
return Object.fromEntries([...response.headers]);
|
|
480
|
+
}
|
|
481
|
+
var createIdGenerator = ({
|
|
482
|
+
prefix,
|
|
483
|
+
size = 16,
|
|
484
|
+
alphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
|
|
485
|
+
separator = "-"
|
|
486
|
+
} = {}) => {
|
|
487
|
+
const generator = () => {
|
|
488
|
+
const alphabetLength = alphabet.length;
|
|
489
|
+
const chars = new Array(size);
|
|
490
|
+
for (let i = 0; i < size; i++) {
|
|
491
|
+
chars[i] = alphabet[Math.random() * alphabetLength | 0];
|
|
492
|
+
}
|
|
493
|
+
return chars.join("");
|
|
494
|
+
};
|
|
495
|
+
if (prefix == null) {
|
|
496
|
+
return generator;
|
|
497
|
+
}
|
|
498
|
+
if (alphabet.includes(separator)) {
|
|
499
|
+
throw new InvalidArgumentError({
|
|
500
|
+
argument: "separator",
|
|
501
|
+
message: `The separator "${separator}" must not be part of the alphabet "${alphabet}".`
|
|
502
|
+
});
|
|
503
|
+
}
|
|
504
|
+
return () => `${prefix}${separator}${generator()}`;
|
|
505
|
+
};
|
|
506
|
+
var generateId = createIdGenerator();
|
|
507
|
+
function isAbortError(error) {
|
|
508
|
+
return error instanceof Error && (error.name === "AbortError" || error.name === "ResponseAborted" || // Next.js
|
|
509
|
+
error.name === "TimeoutError");
|
|
510
|
+
}
|
|
511
|
+
var FETCH_FAILED_ERROR_MESSAGES = ["fetch failed", "failed to fetch"];
|
|
512
|
+
function handleFetchError({
|
|
513
|
+
error,
|
|
514
|
+
url,
|
|
515
|
+
requestBodyValues
|
|
516
|
+
}) {
|
|
517
|
+
if (isAbortError(error)) {
|
|
518
|
+
return error;
|
|
519
|
+
}
|
|
520
|
+
if (error instanceof TypeError && FETCH_FAILED_ERROR_MESSAGES.includes(error.message.toLowerCase())) {
|
|
521
|
+
const cause = error.cause;
|
|
522
|
+
if (cause != null) {
|
|
523
|
+
return new APICallError({
|
|
524
|
+
message: `Cannot connect to API: ${cause.message}`,
|
|
525
|
+
cause,
|
|
526
|
+
url,
|
|
527
|
+
requestBodyValues,
|
|
528
|
+
isRetryable: true
|
|
529
|
+
// retry when network error
|
|
530
|
+
});
|
|
531
|
+
}
|
|
532
|
+
}
|
|
533
|
+
return error;
|
|
534
|
+
}
|
|
535
|
+
function removeUndefinedEntries(record) {
|
|
536
|
+
return Object.fromEntries(
|
|
537
|
+
Object.entries(record).filter(([_key, value]) => value != null)
|
|
538
|
+
);
|
|
539
|
+
}
|
|
540
|
+
function loadApiKey({
|
|
541
|
+
apiKey,
|
|
542
|
+
environmentVariableName,
|
|
543
|
+
apiKeyParameterName = "apiKey",
|
|
544
|
+
description
|
|
545
|
+
}) {
|
|
546
|
+
if (typeof apiKey === "string") {
|
|
547
|
+
return apiKey;
|
|
548
|
+
}
|
|
549
|
+
if (apiKey != null) {
|
|
550
|
+
throw new LoadAPIKeyError({
|
|
551
|
+
message: `${description} API key must be a string.`
|
|
552
|
+
});
|
|
553
|
+
}
|
|
554
|
+
if (typeof process === "undefined") {
|
|
555
|
+
throw new LoadAPIKeyError({
|
|
556
|
+
message: `${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter. Environment variables is not supported in this environment.`
|
|
557
|
+
});
|
|
558
|
+
}
|
|
559
|
+
apiKey = process.env[environmentVariableName];
|
|
560
|
+
if (apiKey == null) {
|
|
561
|
+
throw new LoadAPIKeyError({
|
|
562
|
+
message: `${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter or the ${environmentVariableName} environment variable.`
|
|
563
|
+
});
|
|
564
|
+
}
|
|
565
|
+
if (typeof apiKey !== "string") {
|
|
566
|
+
throw new LoadAPIKeyError({
|
|
567
|
+
message: `${description} API key must be a string. The value of the ${environmentVariableName} environment variable is not a string.`
|
|
568
|
+
});
|
|
569
|
+
}
|
|
570
|
+
return apiKey;
|
|
571
|
+
}
|
|
572
|
+
var suspectProtoRx = /"__proto__"\s*:/;
|
|
573
|
+
var suspectConstructorRx = /"constructor"\s*:/;
|
|
574
|
+
function _parse(text) {
|
|
575
|
+
const obj = JSON.parse(text);
|
|
576
|
+
if (obj === null || typeof obj !== "object") {
|
|
577
|
+
return obj;
|
|
578
|
+
}
|
|
579
|
+
if (suspectProtoRx.test(text) === false && suspectConstructorRx.test(text) === false) {
|
|
580
|
+
return obj;
|
|
581
|
+
}
|
|
582
|
+
return filter(obj);
|
|
583
|
+
}
|
|
584
|
+
function filter(obj) {
|
|
585
|
+
let next = [obj];
|
|
586
|
+
while (next.length) {
|
|
587
|
+
const nodes = next;
|
|
588
|
+
next = [];
|
|
589
|
+
for (const node of nodes) {
|
|
590
|
+
if (Object.prototype.hasOwnProperty.call(node, "__proto__")) {
|
|
591
|
+
throw new SyntaxError("Object contains forbidden prototype property");
|
|
592
|
+
}
|
|
593
|
+
if (Object.prototype.hasOwnProperty.call(node, "constructor") && Object.prototype.hasOwnProperty.call(node.constructor, "prototype")) {
|
|
594
|
+
throw new SyntaxError("Object contains forbidden prototype property");
|
|
595
|
+
}
|
|
596
|
+
for (const key in node) {
|
|
597
|
+
const value = node[key];
|
|
598
|
+
if (value && typeof value === "object") {
|
|
599
|
+
next.push(value);
|
|
600
|
+
}
|
|
601
|
+
}
|
|
602
|
+
}
|
|
603
|
+
}
|
|
604
|
+
return obj;
|
|
605
|
+
}
|
|
606
|
+
function secureJsonParse(text) {
|
|
607
|
+
const { stackTraceLimit } = Error;
|
|
608
|
+
Error.stackTraceLimit = 0;
|
|
609
|
+
try {
|
|
610
|
+
return _parse(text);
|
|
611
|
+
} finally {
|
|
612
|
+
Error.stackTraceLimit = stackTraceLimit;
|
|
613
|
+
}
|
|
614
|
+
}
|
|
615
|
+
var validatorSymbol = Symbol.for("vercel.ai.validator");
|
|
616
|
+
function validator(validate) {
|
|
617
|
+
return { [validatorSymbol]: true, validate };
|
|
618
|
+
}
|
|
619
|
+
function isValidator(value) {
|
|
620
|
+
return typeof value === "object" && value !== null && validatorSymbol in value && value[validatorSymbol] === true && "validate" in value;
|
|
621
|
+
}
|
|
622
|
+
function asValidator(value) {
|
|
623
|
+
return isValidator(value) ? value : standardSchemaValidator(value);
|
|
624
|
+
}
|
|
625
|
+
function standardSchemaValidator(standardSchema) {
|
|
626
|
+
return validator(async (value) => {
|
|
627
|
+
const result = await standardSchema["~standard"].validate(value);
|
|
628
|
+
return result.issues == null ? { success: true, value: result.value } : {
|
|
629
|
+
success: false,
|
|
630
|
+
error: new TypeValidationError({
|
|
631
|
+
value,
|
|
632
|
+
cause: result.issues
|
|
633
|
+
})
|
|
634
|
+
};
|
|
635
|
+
});
|
|
636
|
+
}
|
|
637
|
+
async function validateTypes({
|
|
638
|
+
value,
|
|
639
|
+
schema
|
|
640
|
+
}) {
|
|
641
|
+
const result = await safeValidateTypes({ value, schema });
|
|
642
|
+
if (!result.success) {
|
|
643
|
+
throw TypeValidationError.wrap({ value, cause: result.error });
|
|
644
|
+
}
|
|
645
|
+
return result.value;
|
|
646
|
+
}
|
|
647
|
+
async function safeValidateTypes({
|
|
648
|
+
value,
|
|
649
|
+
schema
|
|
650
|
+
}) {
|
|
651
|
+
const validator2 = asValidator(schema);
|
|
652
|
+
try {
|
|
653
|
+
if (validator2.validate == null) {
|
|
654
|
+
return { success: true, value, rawValue: value };
|
|
655
|
+
}
|
|
656
|
+
const result = await validator2.validate(value);
|
|
657
|
+
if (result.success) {
|
|
658
|
+
return { success: true, value: result.value, rawValue: value };
|
|
659
|
+
}
|
|
660
|
+
return {
|
|
661
|
+
success: false,
|
|
662
|
+
error: TypeValidationError.wrap({ value, cause: result.error }),
|
|
663
|
+
rawValue: value
|
|
664
|
+
};
|
|
665
|
+
} catch (error) {
|
|
666
|
+
return {
|
|
667
|
+
success: false,
|
|
668
|
+
error: TypeValidationError.wrap({ value, cause: error }),
|
|
669
|
+
rawValue: value
|
|
670
|
+
};
|
|
671
|
+
}
|
|
672
|
+
}
|
|
673
|
+
async function parseJSON({
|
|
674
|
+
text,
|
|
675
|
+
schema
|
|
676
|
+
}) {
|
|
677
|
+
try {
|
|
678
|
+
const value = secureJsonParse(text);
|
|
679
|
+
if (schema == null) {
|
|
680
|
+
return value;
|
|
681
|
+
}
|
|
682
|
+
return validateTypes({ value, schema });
|
|
683
|
+
} catch (error) {
|
|
684
|
+
if (JSONParseError.isInstance(error) || TypeValidationError.isInstance(error)) {
|
|
685
|
+
throw error;
|
|
686
|
+
}
|
|
687
|
+
throw new JSONParseError({ text, cause: error });
|
|
688
|
+
}
|
|
689
|
+
}
|
|
690
|
+
async function safeParseJSON({
|
|
691
|
+
text,
|
|
692
|
+
schema
|
|
693
|
+
}) {
|
|
694
|
+
try {
|
|
695
|
+
const value = secureJsonParse(text);
|
|
696
|
+
if (schema == null) {
|
|
697
|
+
return { success: true, value, rawValue: value };
|
|
698
|
+
}
|
|
699
|
+
return await safeValidateTypes({ value, schema });
|
|
700
|
+
} catch (error) {
|
|
701
|
+
return {
|
|
702
|
+
success: false,
|
|
703
|
+
error: JSONParseError.isInstance(error) ? error : new JSONParseError({ text, cause: error }),
|
|
704
|
+
rawValue: void 0
|
|
705
|
+
};
|
|
706
|
+
}
|
|
707
|
+
}
|
|
708
|
+
function isParsableJson(input) {
|
|
709
|
+
try {
|
|
710
|
+
secureJsonParse(input);
|
|
711
|
+
return true;
|
|
712
|
+
} catch (e) {
|
|
713
|
+
return false;
|
|
714
|
+
}
|
|
715
|
+
}
|
|
716
|
+
function parseJsonEventStream({
|
|
717
|
+
stream,
|
|
718
|
+
schema
|
|
719
|
+
}) {
|
|
720
|
+
return stream.pipeThrough(new TextDecoderStream()).pipeThrough(new EventSourceParserStream()).pipeThrough(
|
|
721
|
+
new TransformStream({
|
|
722
|
+
async transform({ data }, controller) {
|
|
723
|
+
if (data === "[DONE]") {
|
|
724
|
+
return;
|
|
725
|
+
}
|
|
726
|
+
controller.enqueue(await safeParseJSON({ text: data, schema }));
|
|
727
|
+
}
|
|
728
|
+
})
|
|
729
|
+
);
|
|
730
|
+
}
|
|
731
|
+
var getOriginalFetch2 = () => globalThis.fetch;
|
|
732
|
+
var postJsonToApi = async ({
|
|
733
|
+
url,
|
|
734
|
+
headers,
|
|
735
|
+
body,
|
|
736
|
+
failedResponseHandler,
|
|
737
|
+
successfulResponseHandler,
|
|
738
|
+
abortSignal,
|
|
739
|
+
fetch
|
|
740
|
+
}) => postToApi({
|
|
741
|
+
url,
|
|
742
|
+
headers: __spreadValues({
|
|
743
|
+
"Content-Type": "application/json"
|
|
744
|
+
}, headers),
|
|
745
|
+
body: {
|
|
746
|
+
content: JSON.stringify(body),
|
|
747
|
+
values: body
|
|
748
|
+
},
|
|
749
|
+
failedResponseHandler,
|
|
750
|
+
successfulResponseHandler,
|
|
751
|
+
abortSignal,
|
|
752
|
+
fetch
|
|
66
753
|
});
|
|
67
|
-
var
|
|
68
|
-
|
|
69
|
-
|
|
754
|
+
var postToApi = async ({
|
|
755
|
+
url,
|
|
756
|
+
headers = {},
|
|
757
|
+
body,
|
|
758
|
+
successfulResponseHandler,
|
|
759
|
+
failedResponseHandler,
|
|
760
|
+
abortSignal,
|
|
761
|
+
fetch = getOriginalFetch2()
|
|
762
|
+
}) => {
|
|
763
|
+
try {
|
|
764
|
+
const response = await fetch(url, {
|
|
765
|
+
method: "POST",
|
|
766
|
+
headers: removeUndefinedEntries(headers),
|
|
767
|
+
body: body.content,
|
|
768
|
+
signal: abortSignal
|
|
769
|
+
});
|
|
770
|
+
const responseHeaders = extractResponseHeaders(response);
|
|
771
|
+
if (!response.ok) {
|
|
772
|
+
let errorInformation;
|
|
773
|
+
try {
|
|
774
|
+
errorInformation = await failedResponseHandler({
|
|
775
|
+
response,
|
|
776
|
+
url,
|
|
777
|
+
requestBodyValues: body.values
|
|
778
|
+
});
|
|
779
|
+
} catch (error) {
|
|
780
|
+
if (isAbortError(error) || APICallError.isInstance(error)) {
|
|
781
|
+
throw error;
|
|
782
|
+
}
|
|
783
|
+
throw new APICallError({
|
|
784
|
+
message: "Failed to process error response",
|
|
785
|
+
cause: error,
|
|
786
|
+
statusCode: response.status,
|
|
787
|
+
url,
|
|
788
|
+
responseHeaders,
|
|
789
|
+
requestBodyValues: body.values
|
|
790
|
+
});
|
|
791
|
+
}
|
|
792
|
+
throw errorInformation.value;
|
|
793
|
+
}
|
|
794
|
+
try {
|
|
795
|
+
return await successfulResponseHandler({
|
|
796
|
+
response,
|
|
797
|
+
url,
|
|
798
|
+
requestBodyValues: body.values
|
|
799
|
+
});
|
|
800
|
+
} catch (error) {
|
|
801
|
+
if (error instanceof Error) {
|
|
802
|
+
if (isAbortError(error) || APICallError.isInstance(error)) {
|
|
803
|
+
throw error;
|
|
804
|
+
}
|
|
805
|
+
}
|
|
806
|
+
throw new APICallError({
|
|
807
|
+
message: "Failed to process successful response",
|
|
808
|
+
cause: error,
|
|
809
|
+
statusCode: response.status,
|
|
810
|
+
url,
|
|
811
|
+
responseHeaders,
|
|
812
|
+
requestBodyValues: body.values
|
|
813
|
+
});
|
|
814
|
+
}
|
|
815
|
+
} catch (error) {
|
|
816
|
+
throw handleFetchError({ error, url, requestBodyValues: body.values });
|
|
817
|
+
}
|
|
818
|
+
};
|
|
819
|
+
var createJsonErrorResponseHandler = ({
|
|
820
|
+
errorSchema,
|
|
821
|
+
errorToMessage,
|
|
822
|
+
isRetryable
|
|
823
|
+
}) => async ({ response, url, requestBodyValues }) => {
|
|
824
|
+
const responseBody = await response.text();
|
|
825
|
+
const responseHeaders = extractResponseHeaders(response);
|
|
826
|
+
if (responseBody.trim() === "") {
|
|
827
|
+
return {
|
|
828
|
+
responseHeaders,
|
|
829
|
+
value: new APICallError({
|
|
830
|
+
message: response.statusText,
|
|
831
|
+
url,
|
|
832
|
+
requestBodyValues,
|
|
833
|
+
statusCode: response.status,
|
|
834
|
+
responseHeaders,
|
|
835
|
+
responseBody,
|
|
836
|
+
isRetryable: isRetryable == null ? void 0 : isRetryable(response)
|
|
837
|
+
})
|
|
838
|
+
};
|
|
839
|
+
}
|
|
840
|
+
try {
|
|
841
|
+
const parsedError = await parseJSON({
|
|
842
|
+
text: responseBody,
|
|
843
|
+
schema: errorSchema
|
|
844
|
+
});
|
|
845
|
+
return {
|
|
846
|
+
responseHeaders,
|
|
847
|
+
value: new APICallError({
|
|
848
|
+
message: errorToMessage(parsedError),
|
|
849
|
+
url,
|
|
850
|
+
requestBodyValues,
|
|
851
|
+
statusCode: response.status,
|
|
852
|
+
responseHeaders,
|
|
853
|
+
responseBody,
|
|
854
|
+
data: parsedError,
|
|
855
|
+
isRetryable: isRetryable == null ? void 0 : isRetryable(response, parsedError)
|
|
856
|
+
})
|
|
857
|
+
};
|
|
858
|
+
} catch (parseError) {
|
|
859
|
+
return {
|
|
860
|
+
responseHeaders,
|
|
861
|
+
value: new APICallError({
|
|
862
|
+
message: response.statusText,
|
|
863
|
+
url,
|
|
864
|
+
requestBodyValues,
|
|
865
|
+
statusCode: response.status,
|
|
866
|
+
responseHeaders,
|
|
867
|
+
responseBody,
|
|
868
|
+
isRetryable: isRetryable == null ? void 0 : isRetryable(response)
|
|
869
|
+
})
|
|
870
|
+
};
|
|
871
|
+
}
|
|
872
|
+
};
|
|
873
|
+
var createEventSourceResponseHandler = (chunkSchema) => async ({ response }) => {
|
|
874
|
+
const responseHeaders = extractResponseHeaders(response);
|
|
875
|
+
if (response.body == null) {
|
|
876
|
+
throw new EmptyResponseBodyError({});
|
|
877
|
+
}
|
|
878
|
+
return {
|
|
879
|
+
responseHeaders,
|
|
880
|
+
value: parseJsonEventStream({
|
|
881
|
+
stream: response.body,
|
|
882
|
+
schema: chunkSchema
|
|
883
|
+
})
|
|
884
|
+
};
|
|
885
|
+
};
|
|
886
|
+
var createJsonResponseHandler = (responseSchema) => async ({ response, url, requestBodyValues }) => {
|
|
887
|
+
const responseBody = await response.text();
|
|
888
|
+
const parsedResult = await safeParseJSON({
|
|
889
|
+
text: responseBody,
|
|
890
|
+
schema: responseSchema
|
|
891
|
+
});
|
|
892
|
+
const responseHeaders = extractResponseHeaders(response);
|
|
893
|
+
if (!parsedResult.success) {
|
|
894
|
+
throw new APICallError({
|
|
895
|
+
message: "Invalid JSON response",
|
|
896
|
+
cause: parsedResult.error,
|
|
897
|
+
statusCode: response.status,
|
|
898
|
+
responseHeaders,
|
|
899
|
+
responseBody,
|
|
900
|
+
url,
|
|
901
|
+
requestBodyValues
|
|
902
|
+
});
|
|
903
|
+
}
|
|
904
|
+
return {
|
|
905
|
+
responseHeaders,
|
|
906
|
+
value: parsedResult.value,
|
|
907
|
+
rawValue: parsedResult.rawValue
|
|
908
|
+
};
|
|
909
|
+
};
|
|
910
|
+
var schemaSymbol = Symbol.for("vercel.ai.schema");
|
|
911
|
+
var { btoa, atob } = globalThis;
|
|
912
|
+
function convertUint8ArrayToBase64(array) {
|
|
913
|
+
let latin1string = "";
|
|
914
|
+
for (let i = 0; i < array.length; i++) {
|
|
915
|
+
latin1string += String.fromCodePoint(array[i]);
|
|
916
|
+
}
|
|
917
|
+
return btoa(latin1string);
|
|
918
|
+
}
|
|
919
|
+
function withoutTrailingSlash(url) {
|
|
920
|
+
return url == null ? void 0 : url.replace(/\/$/, "");
|
|
921
|
+
}
|
|
922
|
+
|
|
923
|
+
// src/schemas/reasoning-details.ts
|
|
924
|
+
var import_v4 = require("zod/v4");
|
|
925
|
+
var ReasoningDetailSummarySchema = import_v4.z.object({
|
|
926
|
+
type: import_v4.z.literal("reasoning.summary" /* Summary */),
|
|
927
|
+
summary: import_v4.z.string()
|
|
928
|
+
});
|
|
929
|
+
var ReasoningDetailEncryptedSchema = import_v4.z.object({
|
|
930
|
+
type: import_v4.z.literal("reasoning.encrypted" /* Encrypted */),
|
|
931
|
+
data: import_v4.z.string()
|
|
70
932
|
});
|
|
71
|
-
var ReasoningDetailTextSchema =
|
|
72
|
-
type:
|
|
73
|
-
text:
|
|
74
|
-
signature:
|
|
933
|
+
var ReasoningDetailTextSchema = import_v4.z.object({
|
|
934
|
+
type: import_v4.z.literal("reasoning.text" /* Text */),
|
|
935
|
+
text: import_v4.z.string().nullish(),
|
|
936
|
+
signature: import_v4.z.string().nullish()
|
|
75
937
|
});
|
|
76
|
-
var ReasoningDetailUnionSchema =
|
|
938
|
+
var ReasoningDetailUnionSchema = import_v4.z.union([
|
|
77
939
|
ReasoningDetailSummarySchema,
|
|
78
940
|
ReasoningDetailEncryptedSchema,
|
|
79
941
|
ReasoningDetailTextSchema
|
|
80
942
|
]);
|
|
81
|
-
var ReasoningDetailsWithUnknownSchema =
|
|
943
|
+
var ReasoningDetailsWithUnknownSchema = import_v4.z.union([
|
|
82
944
|
ReasoningDetailUnionSchema,
|
|
83
|
-
|
|
945
|
+
import_v4.z.unknown().transform(() => null)
|
|
84
946
|
]);
|
|
85
|
-
var ReasoningDetailArraySchema =
|
|
947
|
+
var ReasoningDetailArraySchema = import_v4.z.array(ReasoningDetailsWithUnknownSchema).transform((d) => d.filter((d2) => !!d2));
|
|
86
948
|
|
|
87
|
-
// src/
|
|
88
|
-
var
|
|
89
|
-
var
|
|
90
|
-
|
|
949
|
+
// src/schemas/error-response.ts
|
|
950
|
+
var import_v42 = require("zod/v4");
|
|
951
|
+
var LLMGatewayErrorResponseSchema = import_v42.z.object({
|
|
952
|
+
error: import_v42.z.object({
|
|
953
|
+
code: import_v42.z.union([import_v42.z.string(), import_v42.z.number()]).nullable().optional().default(null),
|
|
954
|
+
message: import_v42.z.string(),
|
|
955
|
+
type: import_v42.z.string().nullable().optional().default(null),
|
|
956
|
+
param: import_v42.z.any().nullable().optional().default(null)
|
|
957
|
+
})
|
|
958
|
+
});
|
|
959
|
+
var llmgatewayFailedResponseHandler = createJsonErrorResponseHandler({
|
|
960
|
+
errorSchema: LLMGatewayErrorResponseSchema,
|
|
961
|
+
errorToMessage: (data) => data.error.message
|
|
962
|
+
});
|
|
963
|
+
|
|
964
|
+
// src/utils/map-finish-reason.ts
|
|
965
|
+
function mapLLMGatewayFinishReason(finishReason) {
|
|
966
|
+
switch (finishReason) {
|
|
967
|
+
case "stop":
|
|
968
|
+
return "stop";
|
|
969
|
+
case "length":
|
|
970
|
+
return "length";
|
|
971
|
+
case "content_filter":
|
|
972
|
+
return "content-filter";
|
|
973
|
+
case "function_call":
|
|
974
|
+
case "tool_calls":
|
|
975
|
+
return "tool-calls";
|
|
976
|
+
default:
|
|
977
|
+
return "unknown";
|
|
978
|
+
}
|
|
979
|
+
}
|
|
91
980
|
|
|
92
|
-
// src/
|
|
93
|
-
|
|
981
|
+
// src/chat/is-url.ts
|
|
982
|
+
function isUrl({
|
|
983
|
+
url,
|
|
984
|
+
protocols
|
|
985
|
+
}) {
|
|
986
|
+
try {
|
|
987
|
+
const urlObj = new URL(url);
|
|
988
|
+
return protocols.has(urlObj.protocol);
|
|
989
|
+
} catch (_) {
|
|
990
|
+
return false;
|
|
991
|
+
}
|
|
992
|
+
}
|
|
993
|
+
|
|
994
|
+
// src/chat/file-url-utils.ts
|
|
995
|
+
function getFileUrl({
|
|
996
|
+
part,
|
|
997
|
+
defaultMediaType
|
|
998
|
+
}) {
|
|
999
|
+
var _a15, _b;
|
|
1000
|
+
if (part.data instanceof Uint8Array) {
|
|
1001
|
+
const base64 = convertUint8ArrayToBase64(part.data);
|
|
1002
|
+
return `data:${(_a15 = part.mediaType) != null ? _a15 : defaultMediaType};base64,${base64}`;
|
|
1003
|
+
}
|
|
1004
|
+
const stringUrl = part.data.toString();
|
|
1005
|
+
if (isUrl({
|
|
1006
|
+
url: stringUrl,
|
|
1007
|
+
protocols: /* @__PURE__ */ new Set(["http:", "https:"])
|
|
1008
|
+
})) {
|
|
1009
|
+
return stringUrl;
|
|
1010
|
+
}
|
|
1011
|
+
return stringUrl.startsWith("data:") ? stringUrl : `data:${(_b = part.mediaType) != null ? _b : defaultMediaType};base64,${stringUrl}`;
|
|
1012
|
+
}
|
|
1013
|
+
|
|
1014
|
+
// src/chat/convert-to-llmgateway-chat-messages.ts
|
|
94
1015
|
function getCacheControl(providerMetadata) {
|
|
95
|
-
var
|
|
1016
|
+
var _a15, _b, _c;
|
|
96
1017
|
const anthropic = providerMetadata == null ? void 0 : providerMetadata.anthropic;
|
|
97
1018
|
const llmgateway2 = providerMetadata == null ? void 0 : providerMetadata.llmgateway;
|
|
98
|
-
return (_c = (_b = (
|
|
1019
|
+
return (_c = (_b = (_a15 = llmgateway2 == null ? void 0 : llmgateway2.cacheControl) != null ? _a15 : llmgateway2 == null ? void 0 : llmgateway2.cache_control) != null ? _b : anthropic == null ? void 0 : anthropic.cacheControl) != null ? _c : anthropic == null ? void 0 : anthropic.cache_control;
|
|
99
1020
|
}
|
|
100
1021
|
function convertToLLMGatewayChatMessages(prompt) {
|
|
101
|
-
var
|
|
1022
|
+
var _a15, _b, _c;
|
|
102
1023
|
const messages = [];
|
|
103
|
-
for (const { role, content,
|
|
1024
|
+
for (const { role, content, providerOptions } of prompt) {
|
|
104
1025
|
switch (role) {
|
|
105
1026
|
case "system": {
|
|
106
1027
|
messages.push({
|
|
107
1028
|
role: "system",
|
|
108
1029
|
content,
|
|
109
|
-
cache_control: getCacheControl(
|
|
1030
|
+
cache_control: getCacheControl(providerOptions)
|
|
110
1031
|
});
|
|
111
1032
|
break;
|
|
112
1033
|
}
|
|
113
1034
|
case "user": {
|
|
114
|
-
if (content.length === 1 && ((
|
|
1035
|
+
if (content.length === 1 && ((_a15 = content[0]) == null ? void 0 : _a15.type) === "text") {
|
|
1036
|
+
const cacheControl = (_b = getCacheControl(providerOptions)) != null ? _b : getCacheControl(content[0].providerOptions);
|
|
1037
|
+
const contentWithCacheControl = cacheControl ? [
|
|
1038
|
+
{
|
|
1039
|
+
type: "text",
|
|
1040
|
+
text: content[0].text,
|
|
1041
|
+
cache_control: cacheControl
|
|
1042
|
+
}
|
|
1043
|
+
] : content[0].text;
|
|
115
1044
|
messages.push({
|
|
116
1045
|
role: "user",
|
|
117
|
-
content:
|
|
118
|
-
cache_control: (_b = getCacheControl(providerMetadata)) != null ? _b : getCacheControl(content[0].providerMetadata)
|
|
1046
|
+
content: contentWithCacheControl
|
|
119
1047
|
});
|
|
120
1048
|
break;
|
|
121
1049
|
}
|
|
122
|
-
const messageCacheControl = getCacheControl(
|
|
1050
|
+
const messageCacheControl = getCacheControl(providerOptions);
|
|
123
1051
|
const contentParts = content.map(
|
|
124
1052
|
(part) => {
|
|
125
|
-
var
|
|
126
|
-
const cacheControl = (
|
|
1053
|
+
var _a16, _b2, _c2, _d, _e, _f;
|
|
1054
|
+
const cacheControl = (_a16 = getCacheControl(part.providerOptions)) != null ? _a16 : messageCacheControl;
|
|
127
1055
|
switch (part.type) {
|
|
128
1056
|
case "text":
|
|
129
1057
|
return {
|
|
@@ -132,33 +1060,55 @@ function convertToLLMGatewayChatMessages(prompt) {
|
|
|
132
1060
|
// For text parts, only use part-specific cache control
|
|
133
1061
|
cache_control: cacheControl
|
|
134
1062
|
};
|
|
135
|
-
case "
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
1063
|
+
case "file": {
|
|
1064
|
+
if ((_b2 = part.mediaType) == null ? void 0 : _b2.startsWith("image/")) {
|
|
1065
|
+
const url = getFileUrl({
|
|
1066
|
+
part,
|
|
1067
|
+
defaultMediaType: "image/jpeg"
|
|
1068
|
+
});
|
|
1069
|
+
return {
|
|
1070
|
+
type: "image_url",
|
|
1071
|
+
image_url: {
|
|
1072
|
+
url
|
|
1073
|
+
},
|
|
1074
|
+
// For image parts, use part-specific or message-level cache control
|
|
1075
|
+
cache_control: cacheControl
|
|
1076
|
+
};
|
|
1077
|
+
}
|
|
1078
|
+
const fileName = String(
|
|
1079
|
+
(_f = (_e = (_d = (_c2 = part.providerOptions) == null ? void 0 : _c2.llmgateway) == null ? void 0 : _d.filename) != null ? _e : part.filename) != null ? _f : ""
|
|
1080
|
+
);
|
|
1081
|
+
const fileData = getFileUrl({
|
|
1082
|
+
part,
|
|
1083
|
+
defaultMediaType: "application/pdf"
|
|
1084
|
+
});
|
|
1085
|
+
if (isUrl({
|
|
1086
|
+
url: fileData,
|
|
1087
|
+
protocols: /* @__PURE__ */ new Set(["http:", "https:"])
|
|
1088
|
+
})) {
|
|
1089
|
+
return {
|
|
1090
|
+
type: "file",
|
|
1091
|
+
file: {
|
|
1092
|
+
filename: fileName,
|
|
1093
|
+
file_data: fileData
|
|
1094
|
+
}
|
|
1095
|
+
};
|
|
1096
|
+
}
|
|
147
1097
|
return {
|
|
148
1098
|
type: "file",
|
|
149
1099
|
file: {
|
|
150
|
-
filename:
|
|
151
|
-
|
|
152
|
-
),
|
|
153
|
-
file_data: part.data instanceof Uint8Array ? `data:${part.mimeType};base64,${(0, import_provider_utils.convertUint8ArrayToBase64)(part.data)}` : `data:${part.mimeType};base64,${part.data}`
|
|
1100
|
+
filename: fileName,
|
|
1101
|
+
file_data: fileData
|
|
154
1102
|
},
|
|
155
1103
|
cache_control: cacheControl
|
|
156
1104
|
};
|
|
1105
|
+
}
|
|
157
1106
|
default: {
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
1107
|
+
return {
|
|
1108
|
+
type: "text",
|
|
1109
|
+
text: "",
|
|
1110
|
+
cache_control: cacheControl
|
|
1111
|
+
};
|
|
162
1112
|
}
|
|
163
1113
|
}
|
|
164
1114
|
}
|
|
@@ -186,7 +1136,7 @@ function convertToLLMGatewayChatMessages(prompt) {
|
|
|
186
1136
|
type: "function",
|
|
187
1137
|
function: {
|
|
188
1138
|
name: part.toolName,
|
|
189
|
-
arguments: JSON.stringify(part.
|
|
1139
|
+
arguments: JSON.stringify(part.input)
|
|
190
1140
|
}
|
|
191
1141
|
});
|
|
192
1142
|
break;
|
|
@@ -195,23 +1145,14 @@ function convertToLLMGatewayChatMessages(prompt) {
|
|
|
195
1145
|
reasoning += part.text;
|
|
196
1146
|
reasoningDetails.push({
|
|
197
1147
|
type: "reasoning.text" /* Text */,
|
|
198
|
-
text: part.text
|
|
199
|
-
signature: part.signature
|
|
200
|
-
});
|
|
201
|
-
break;
|
|
202
|
-
}
|
|
203
|
-
case "redacted-reasoning": {
|
|
204
|
-
reasoningDetails.push({
|
|
205
|
-
type: "reasoning.encrypted" /* Encrypted */,
|
|
206
|
-
data: part.data
|
|
1148
|
+
text: part.text
|
|
207
1149
|
});
|
|
208
1150
|
break;
|
|
209
1151
|
}
|
|
210
1152
|
case "file":
|
|
211
1153
|
break;
|
|
212
1154
|
default: {
|
|
213
|
-
|
|
214
|
-
throw new Error(`Unsupported part: ${_exhaustiveCheck}`);
|
|
1155
|
+
break;
|
|
215
1156
|
}
|
|
216
1157
|
}
|
|
217
1158
|
}
|
|
@@ -221,95 +1162,188 @@ function convertToLLMGatewayChatMessages(prompt) {
|
|
|
221
1162
|
tool_calls: toolCalls.length > 0 ? toolCalls : void 0,
|
|
222
1163
|
reasoning: reasoning || void 0,
|
|
223
1164
|
reasoning_details: reasoningDetails.length > 0 ? reasoningDetails : void 0,
|
|
224
|
-
cache_control: getCacheControl(
|
|
1165
|
+
cache_control: getCacheControl(providerOptions)
|
|
225
1166
|
});
|
|
226
1167
|
break;
|
|
227
1168
|
}
|
|
228
1169
|
case "tool": {
|
|
229
1170
|
for (const toolResponse of content) {
|
|
1171
|
+
const content2 = getToolResultContent(toolResponse);
|
|
230
1172
|
messages.push({
|
|
231
1173
|
role: "tool",
|
|
232
1174
|
tool_call_id: toolResponse.toolCallId,
|
|
233
|
-
content:
|
|
234
|
-
cache_control: (_c = getCacheControl(
|
|
1175
|
+
content: content2,
|
|
1176
|
+
cache_control: (_c = getCacheControl(providerOptions)) != null ? _c : getCacheControl(toolResponse.providerOptions)
|
|
235
1177
|
});
|
|
236
1178
|
}
|
|
237
1179
|
break;
|
|
238
1180
|
}
|
|
239
1181
|
default: {
|
|
240
|
-
|
|
241
|
-
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
|
1182
|
+
break;
|
|
242
1183
|
}
|
|
243
1184
|
}
|
|
244
1185
|
}
|
|
245
1186
|
return messages;
|
|
246
1187
|
}
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
function mapLLMGatewayChatLogProbsOutput(logprobs) {
|
|
250
|
-
var _a, _b;
|
|
251
|
-
return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
|
|
252
|
-
token,
|
|
253
|
-
logprob,
|
|
254
|
-
topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
|
|
255
|
-
token: token2,
|
|
256
|
-
logprob: logprob2
|
|
257
|
-
})) : []
|
|
258
|
-
}))) != null ? _b : void 0;
|
|
1188
|
+
function getToolResultContent(input) {
|
|
1189
|
+
return input.output.type === "text" ? input.output.value : JSON.stringify(input.output.value);
|
|
259
1190
|
}
|
|
260
1191
|
|
|
261
|
-
// src/
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
1192
|
+
// src/chat/get-tool-choice.ts
|
|
1193
|
+
var import_v43 = require("zod/v4");
|
|
1194
|
+
var ChatCompletionToolChoiceSchema = import_v43.z.union([
|
|
1195
|
+
import_v43.z.literal("auto"),
|
|
1196
|
+
import_v43.z.literal("none"),
|
|
1197
|
+
import_v43.z.literal("required"),
|
|
1198
|
+
import_v43.z.object({
|
|
1199
|
+
type: import_v43.z.literal("function"),
|
|
1200
|
+
function: import_v43.z.object({
|
|
1201
|
+
name: import_v43.z.string()
|
|
1202
|
+
})
|
|
1203
|
+
})
|
|
1204
|
+
]);
|
|
1205
|
+
function getChatCompletionToolChoice(toolChoice) {
|
|
1206
|
+
switch (toolChoice.type) {
|
|
1207
|
+
case "auto":
|
|
1208
|
+
case "none":
|
|
1209
|
+
case "required":
|
|
1210
|
+
return toolChoice.type;
|
|
1211
|
+
case "tool": {
|
|
1212
|
+
return {
|
|
1213
|
+
type: "function",
|
|
1214
|
+
function: { name: toolChoice.toolName }
|
|
1215
|
+
};
|
|
1216
|
+
}
|
|
1217
|
+
default: {
|
|
1218
|
+
toolChoice;
|
|
1219
|
+
throw new Error(`Invalid tool choice type: ${toolChoice}`);
|
|
1220
|
+
}
|
|
275
1221
|
}
|
|
276
1222
|
}
|
|
277
1223
|
|
|
278
|
-
// src/
|
|
279
|
-
var
|
|
280
|
-
var
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
1224
|
+
// src/chat/schemas.ts
|
|
1225
|
+
var import_v44 = require("zod/v4");
|
|
1226
|
+
var LLMGatewayChatCompletionBaseResponseSchema = import_v44.z.object({
|
|
1227
|
+
id: import_v44.z.string().optional(),
|
|
1228
|
+
model: import_v44.z.string().optional(),
|
|
1229
|
+
usage: import_v44.z.object({
|
|
1230
|
+
prompt_tokens: import_v44.z.number(),
|
|
1231
|
+
prompt_tokens_details: import_v44.z.object({
|
|
1232
|
+
cached_tokens: import_v44.z.number()
|
|
1233
|
+
}).nullish(),
|
|
1234
|
+
completion_tokens: import_v44.z.number(),
|
|
1235
|
+
completion_tokens_details: import_v44.z.object({
|
|
1236
|
+
reasoning_tokens: import_v44.z.number()
|
|
1237
|
+
}).nullish(),
|
|
1238
|
+
total_tokens: import_v44.z.number(),
|
|
1239
|
+
cost: import_v44.z.number().optional(),
|
|
1240
|
+
cost_details: import_v44.z.object({
|
|
1241
|
+
upstream_inference_cost: import_v44.z.number().nullish()
|
|
1242
|
+
}).nullish()
|
|
1243
|
+
}).nullish()
|
|
288
1244
|
});
|
|
289
|
-
var
|
|
290
|
-
|
|
291
|
-
|
|
1245
|
+
var LLMGatewayNonStreamChatCompletionResponseSchema = LLMGatewayChatCompletionBaseResponseSchema.extend({
|
|
1246
|
+
choices: import_v44.z.array(
|
|
1247
|
+
import_v44.z.object({
|
|
1248
|
+
message: import_v44.z.object({
|
|
1249
|
+
role: import_v44.z.literal("assistant"),
|
|
1250
|
+
content: import_v44.z.string().nullable().optional(),
|
|
1251
|
+
reasoning: import_v44.z.string().nullable().optional(),
|
|
1252
|
+
reasoning_details: ReasoningDetailArraySchema.nullish(),
|
|
1253
|
+
tool_calls: import_v44.z.array(
|
|
1254
|
+
import_v44.z.object({
|
|
1255
|
+
id: import_v44.z.string().optional().nullable(),
|
|
1256
|
+
type: import_v44.z.literal("function"),
|
|
1257
|
+
function: import_v44.z.object({
|
|
1258
|
+
name: import_v44.z.string(),
|
|
1259
|
+
arguments: import_v44.z.string()
|
|
1260
|
+
})
|
|
1261
|
+
})
|
|
1262
|
+
).optional()
|
|
1263
|
+
}),
|
|
1264
|
+
index: import_v44.z.number().nullish(),
|
|
1265
|
+
logprobs: import_v44.z.object({
|
|
1266
|
+
content: import_v44.z.array(
|
|
1267
|
+
import_v44.z.object({
|
|
1268
|
+
token: import_v44.z.string(),
|
|
1269
|
+
logprob: import_v44.z.number(),
|
|
1270
|
+
top_logprobs: import_v44.z.array(
|
|
1271
|
+
import_v44.z.object({
|
|
1272
|
+
token: import_v44.z.string(),
|
|
1273
|
+
logprob: import_v44.z.number()
|
|
1274
|
+
})
|
|
1275
|
+
)
|
|
1276
|
+
})
|
|
1277
|
+
).nullable()
|
|
1278
|
+
}).nullable().optional(),
|
|
1279
|
+
finish_reason: import_v44.z.string().optional().nullable()
|
|
1280
|
+
})
|
|
1281
|
+
)
|
|
292
1282
|
});
|
|
1283
|
+
var LLMGatewayStreamChatCompletionChunkSchema = import_v44.z.union([
|
|
1284
|
+
LLMGatewayChatCompletionBaseResponseSchema.extend({
|
|
1285
|
+
choices: import_v44.z.array(
|
|
1286
|
+
import_v44.z.object({
|
|
1287
|
+
delta: import_v44.z.object({
|
|
1288
|
+
role: import_v44.z.enum(["assistant"]).optional(),
|
|
1289
|
+
content: import_v44.z.string().nullish(),
|
|
1290
|
+
reasoning: import_v44.z.string().nullish().optional(),
|
|
1291
|
+
reasoning_details: ReasoningDetailArraySchema.nullish(),
|
|
1292
|
+
tool_calls: import_v44.z.array(
|
|
1293
|
+
import_v44.z.object({
|
|
1294
|
+
index: import_v44.z.number().nullish(),
|
|
1295
|
+
id: import_v44.z.string().nullish(),
|
|
1296
|
+
type: import_v44.z.literal("function").optional(),
|
|
1297
|
+
function: import_v44.z.object({
|
|
1298
|
+
name: import_v44.z.string().nullish(),
|
|
1299
|
+
arguments: import_v44.z.string().nullish()
|
|
1300
|
+
})
|
|
1301
|
+
})
|
|
1302
|
+
).nullish()
|
|
1303
|
+
}).nullish(),
|
|
1304
|
+
logprobs: import_v44.z.object({
|
|
1305
|
+
content: import_v44.z.array(
|
|
1306
|
+
import_v44.z.object({
|
|
1307
|
+
token: import_v44.z.string(),
|
|
1308
|
+
logprob: import_v44.z.number(),
|
|
1309
|
+
top_logprobs: import_v44.z.array(
|
|
1310
|
+
import_v44.z.object({
|
|
1311
|
+
token: import_v44.z.string(),
|
|
1312
|
+
logprob: import_v44.z.number()
|
|
1313
|
+
})
|
|
1314
|
+
)
|
|
1315
|
+
})
|
|
1316
|
+
).nullable()
|
|
1317
|
+
}).nullish(),
|
|
1318
|
+
finish_reason: import_v44.z.string().nullable().optional(),
|
|
1319
|
+
index: import_v44.z.number().nullish()
|
|
1320
|
+
})
|
|
1321
|
+
)
|
|
1322
|
+
}),
|
|
1323
|
+
LLMGatewayErrorResponseSchema
|
|
1324
|
+
]);
|
|
293
1325
|
|
|
294
|
-
// src/
|
|
295
|
-
function isFunctionTool(tool) {
|
|
296
|
-
return "parameters" in tool;
|
|
297
|
-
}
|
|
1326
|
+
// src/chat/index.ts
|
|
298
1327
|
var LLMGatewayChatLanguageModel = class {
|
|
299
1328
|
constructor(modelId, settings, config) {
|
|
300
|
-
this.specificationVersion = "
|
|
1329
|
+
this.specificationVersion = "v2";
|
|
1330
|
+
this.provider = "llmgateway";
|
|
301
1331
|
this.defaultObjectGenerationMode = "tool";
|
|
1332
|
+
this.supportedUrls = {
|
|
1333
|
+
"image/*": [
|
|
1334
|
+
/^data:image\/[a-zA-Z]+;base64,/,
|
|
1335
|
+
/^https?:\/\/.+\.(jpg|jpeg|png|gif|webp)$/i
|
|
1336
|
+
],
|
|
1337
|
+
// 'text/*': [/^data:text\//, /^https?:\/\/.+$/],
|
|
1338
|
+
"application/*": [/^data:application\//, /^https?:\/\/.+$/]
|
|
1339
|
+
};
|
|
302
1340
|
this.modelId = modelId;
|
|
303
1341
|
this.settings = settings;
|
|
304
1342
|
this.config = config;
|
|
305
1343
|
}
|
|
306
|
-
get provider() {
|
|
307
|
-
return this.config.provider;
|
|
308
|
-
}
|
|
309
1344
|
getArgs({
|
|
310
|
-
mode,
|
|
311
1345
|
prompt,
|
|
312
|
-
|
|
1346
|
+
maxOutputTokens,
|
|
313
1347
|
temperature,
|
|
314
1348
|
topP,
|
|
315
1349
|
frequencyPenalty,
|
|
@@ -318,12 +1352,10 @@ var LLMGatewayChatLanguageModel = class {
|
|
|
318
1352
|
stopSequences,
|
|
319
1353
|
responseFormat,
|
|
320
1354
|
topK,
|
|
321
|
-
|
|
1355
|
+
tools,
|
|
1356
|
+
toolChoice
|
|
322
1357
|
}) {
|
|
323
|
-
|
|
324
|
-
const type = mode.type;
|
|
325
|
-
const extraCallingBody = (_a = providerMetadata == null ? void 0 : providerMetadata.llmgateway) != null ? _a : {};
|
|
326
|
-
const baseArgs = __spreadValues(__spreadValues(__spreadValues({
|
|
1358
|
+
const baseArgs = __spreadValues(__spreadValues({
|
|
327
1359
|
// model id:
|
|
328
1360
|
model: this.modelId,
|
|
329
1361
|
models: this.settings.models,
|
|
@@ -334,7 +1366,7 @@ var LLMGatewayChatLanguageModel = class {
|
|
|
334
1366
|
user: this.settings.user,
|
|
335
1367
|
parallel_tool_calls: this.settings.parallelToolCalls,
|
|
336
1368
|
// standardized settings:
|
|
337
|
-
max_tokens:
|
|
1369
|
+
max_tokens: maxOutputTokens,
|
|
338
1370
|
temperature,
|
|
339
1371
|
top_p: topP,
|
|
340
1372
|
frequency_penalty: frequencyPenalty,
|
|
@@ -349,97 +1381,72 @@ var LLMGatewayChatLanguageModel = class {
|
|
|
349
1381
|
include_reasoning: this.settings.includeReasoning,
|
|
350
1382
|
reasoning: this.settings.reasoning,
|
|
351
1383
|
usage: this.settings.usage
|
|
352
|
-
}, this.config.extraBody), this.settings.extraBody)
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
}
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
parameters: mode.tool.parameters
|
|
372
|
-
}
|
|
373
|
-
}
|
|
374
|
-
]
|
|
375
|
-
});
|
|
376
|
-
}
|
|
377
|
-
// Handle all non-text types with a single default case
|
|
378
|
-
default: {
|
|
379
|
-
const _exhaustiveCheck = type;
|
|
380
|
-
throw new import_provider.UnsupportedFunctionalityError({
|
|
381
|
-
functionality: `${_exhaustiveCheck} mode`
|
|
382
|
-
});
|
|
383
|
-
}
|
|
1384
|
+
}, this.config.extraBody), this.settings.extraBody);
|
|
1385
|
+
if ((responseFormat == null ? void 0 : responseFormat.type) === "json") {
|
|
1386
|
+
return __spreadProps(__spreadValues({}, baseArgs), {
|
|
1387
|
+
response_format: { type: "json_object" }
|
|
1388
|
+
});
|
|
1389
|
+
}
|
|
1390
|
+
if (tools && tools.length > 0) {
|
|
1391
|
+
const mappedTools = tools.filter((tool) => tool.type === "function").map((tool) => ({
|
|
1392
|
+
type: "function",
|
|
1393
|
+
function: {
|
|
1394
|
+
name: tool.name,
|
|
1395
|
+
description: tool.type,
|
|
1396
|
+
parameters: tool.inputSchema
|
|
1397
|
+
}
|
|
1398
|
+
}));
|
|
1399
|
+
return __spreadProps(__spreadValues({}, baseArgs), {
|
|
1400
|
+
tools: mappedTools,
|
|
1401
|
+
tool_choice: toolChoice ? getChatCompletionToolChoice(toolChoice) : void 0
|
|
1402
|
+
});
|
|
384
1403
|
}
|
|
1404
|
+
return baseArgs;
|
|
385
1405
|
}
|
|
386
1406
|
async doGenerate(options) {
|
|
387
|
-
var _b, _c, _d, _e, _f, _g, _h, _i, _j;
|
|
388
|
-
const
|
|
389
|
-
const
|
|
1407
|
+
var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w;
|
|
1408
|
+
const providerOptions = options.providerOptions || {};
|
|
1409
|
+
const llmgatewayOptions = providerOptions.llmgateway || {};
|
|
1410
|
+
const args = __spreadValues(__spreadValues({}, this.getArgs(options)), llmgatewayOptions);
|
|
1411
|
+
const { value: response, responseHeaders } = await postJsonToApi({
|
|
390
1412
|
url: this.config.url({
|
|
391
1413
|
path: "/chat/completions",
|
|
392
1414
|
modelId: this.modelId
|
|
393
1415
|
}),
|
|
394
|
-
headers:
|
|
1416
|
+
headers: combineHeaders(this.config.headers(), options.headers),
|
|
395
1417
|
body: args,
|
|
396
1418
|
failedResponseHandler: llmgatewayFailedResponseHandler,
|
|
397
|
-
successfulResponseHandler:
|
|
1419
|
+
successfulResponseHandler: createJsonResponseHandler(
|
|
398
1420
|
LLMGatewayNonStreamChatCompletionResponseSchema
|
|
399
1421
|
),
|
|
400
1422
|
abortSignal: options.abortSignal,
|
|
401
1423
|
fetch: this.config.fetch
|
|
402
1424
|
});
|
|
403
|
-
const _a = args, { messages: rawPrompt } = _a, rawSettings = __objRest(_a, ["messages"]);
|
|
404
1425
|
const choice = response.choices[0];
|
|
405
1426
|
if (!choice) {
|
|
406
1427
|
throw new Error("No choice in response");
|
|
407
1428
|
}
|
|
408
1429
|
const usageInfo = response.usage ? {
|
|
409
|
-
|
|
410
|
-
|
|
1430
|
+
inputTokens: (_a15 = response.usage.prompt_tokens) != null ? _a15 : 0,
|
|
1431
|
+
outputTokens: (_b = response.usage.completion_tokens) != null ? _b : 0,
|
|
1432
|
+
totalTokens: ((_c = response.usage.prompt_tokens) != null ? _c : 0) + ((_d = response.usage.completion_tokens) != null ? _d : 0),
|
|
1433
|
+
reasoningTokens: (_f = (_e = response.usage.completion_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : 0,
|
|
1434
|
+
cachedInputTokens: (_h = (_g = response.usage.prompt_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : 0
|
|
411
1435
|
} : {
|
|
412
|
-
|
|
413
|
-
|
|
1436
|
+
inputTokens: 0,
|
|
1437
|
+
outputTokens: 0,
|
|
1438
|
+
totalTokens: 0,
|
|
1439
|
+
reasoningTokens: 0,
|
|
1440
|
+
cachedInputTokens: 0
|
|
414
1441
|
};
|
|
415
|
-
const
|
|
416
|
-
if (response.usage && ((_d = this.settings.usage) == null ? void 0 : _d.include)) {
|
|
417
|
-
providerMetadata.llmgateway = {
|
|
418
|
-
usage: {
|
|
419
|
-
promptTokens: response.usage.prompt_tokens,
|
|
420
|
-
promptTokensDetails: response.usage.prompt_tokens_details ? {
|
|
421
|
-
cachedTokens: (_e = response.usage.prompt_tokens_details.cached_tokens) != null ? _e : 0
|
|
422
|
-
} : void 0,
|
|
423
|
-
completionTokens: response.usage.completion_tokens,
|
|
424
|
-
completionTokensDetails: response.usage.completion_tokens_details ? {
|
|
425
|
-
reasoningTokens: (_f = response.usage.completion_tokens_details.reasoning_tokens) != null ? _f : 0
|
|
426
|
-
} : void 0,
|
|
427
|
-
cost: response.usage.cost,
|
|
428
|
-
totalTokens: (_g = response.usage.total_tokens) != null ? _g : 0
|
|
429
|
-
}
|
|
430
|
-
};
|
|
431
|
-
}
|
|
432
|
-
const hasProviderMetadata = Object.keys(providerMetadata).length > 0;
|
|
433
|
-
const reasoningDetails = (_h = choice.message.reasoning_details) != null ? _h : [];
|
|
1442
|
+
const reasoningDetails = (_i = choice.message.reasoning_details) != null ? _i : [];
|
|
434
1443
|
const reasoning = reasoningDetails.length > 0 ? reasoningDetails.map((detail) => {
|
|
435
|
-
var _a2;
|
|
436
1444
|
switch (detail.type) {
|
|
437
1445
|
case "reasoning.text" /* Text */: {
|
|
438
1446
|
if (detail.text) {
|
|
439
1447
|
return {
|
|
440
|
-
type: "
|
|
441
|
-
text: detail.text
|
|
442
|
-
signature: (_a2 = detail.signature) != null ? _a2 : void 0
|
|
1448
|
+
type: "reasoning",
|
|
1449
|
+
text: detail.text
|
|
443
1450
|
};
|
|
444
1451
|
}
|
|
445
1452
|
break;
|
|
@@ -447,7 +1454,7 @@ var LLMGatewayChatLanguageModel = class {
|
|
|
447
1454
|
case "reasoning.summary" /* Summary */: {
|
|
448
1455
|
if (detail.summary) {
|
|
449
1456
|
return {
|
|
450
|
-
type: "
|
|
1457
|
+
type: "reasoning",
|
|
451
1458
|
text: detail.summary
|
|
452
1459
|
};
|
|
453
1460
|
}
|
|
@@ -456,85 +1463,119 @@ var LLMGatewayChatLanguageModel = class {
|
|
|
456
1463
|
case "reasoning.encrypted" /* Encrypted */: {
|
|
457
1464
|
if (detail.data) {
|
|
458
1465
|
return {
|
|
459
|
-
type: "
|
|
460
|
-
|
|
1466
|
+
type: "reasoning",
|
|
1467
|
+
text: "[REDACTED]"
|
|
461
1468
|
};
|
|
462
1469
|
}
|
|
463
1470
|
break;
|
|
464
1471
|
}
|
|
465
1472
|
default: {
|
|
466
|
-
detail;
|
|
1473
|
+
const _exhaustiveCheck = detail;
|
|
1474
|
+
return _exhaustiveCheck;
|
|
467
1475
|
}
|
|
468
1476
|
}
|
|
469
1477
|
return null;
|
|
470
1478
|
}).filter((p) => p !== null) : choice.message.reasoning ? [
|
|
471
1479
|
{
|
|
472
|
-
type: "
|
|
1480
|
+
type: "reasoning",
|
|
473
1481
|
text: choice.message.reasoning
|
|
474
1482
|
}
|
|
475
1483
|
] : [];
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
1484
|
+
const content = [];
|
|
1485
|
+
content.push(...reasoning);
|
|
1486
|
+
if (choice.message.content) {
|
|
1487
|
+
content.push({
|
|
1488
|
+
type: "text",
|
|
1489
|
+
text: choice.message.content
|
|
1490
|
+
});
|
|
1491
|
+
}
|
|
1492
|
+
if (choice.message.tool_calls) {
|
|
1493
|
+
for (const toolCall of choice.message.tool_calls) {
|
|
1494
|
+
content.push({
|
|
1495
|
+
type: "tool-call",
|
|
1496
|
+
toolCallId: (_j = toolCall.id) != null ? _j : generateId(),
|
|
488
1497
|
toolName: toolCall.function.name,
|
|
489
|
-
|
|
490
|
-
};
|
|
491
|
-
}
|
|
1498
|
+
input: toolCall.function.arguments
|
|
1499
|
+
});
|
|
1500
|
+
}
|
|
1501
|
+
}
|
|
1502
|
+
return {
|
|
1503
|
+
content,
|
|
492
1504
|
finishReason: mapLLMGatewayFinishReason(choice.finish_reason),
|
|
493
1505
|
usage: usageInfo,
|
|
494
|
-
rawCall: { rawPrompt, rawSettings },
|
|
495
|
-
rawResponse: { headers: responseHeaders },
|
|
496
1506
|
warnings: [],
|
|
497
|
-
|
|
498
|
-
|
|
1507
|
+
providerMetadata: {
|
|
1508
|
+
llmgateway: {
|
|
1509
|
+
usage: {
|
|
1510
|
+
promptTokens: (_k = usageInfo.inputTokens) != null ? _k : 0,
|
|
1511
|
+
completionTokens: (_l = usageInfo.outputTokens) != null ? _l : 0,
|
|
1512
|
+
totalTokens: (_m = usageInfo.totalTokens) != null ? _m : 0,
|
|
1513
|
+
cost: (_n = response.usage) == null ? void 0 : _n.cost,
|
|
1514
|
+
promptTokensDetails: {
|
|
1515
|
+
cachedTokens: (_q = (_p = (_o = response.usage) == null ? void 0 : _o.prompt_tokens_details) == null ? void 0 : _p.cached_tokens) != null ? _q : 0
|
|
1516
|
+
},
|
|
1517
|
+
completionTokensDetails: {
|
|
1518
|
+
reasoningTokens: (_t = (_s = (_r = response.usage) == null ? void 0 : _r.completion_tokens_details) == null ? void 0 : _s.reasoning_tokens) != null ? _t : 0
|
|
1519
|
+
},
|
|
1520
|
+
costDetails: {
|
|
1521
|
+
upstreamInferenceCost: (_w = (_v = (_u = response.usage) == null ? void 0 : _u.cost_details) == null ? void 0 : _v.upstream_inference_cost) != null ? _w : 0
|
|
1522
|
+
}
|
|
1523
|
+
}
|
|
1524
|
+
}
|
|
1525
|
+
},
|
|
1526
|
+
request: { body: args },
|
|
1527
|
+
response: {
|
|
1528
|
+
id: response.id,
|
|
1529
|
+
modelId: response.model,
|
|
1530
|
+
headers: responseHeaders
|
|
1531
|
+
}
|
|
1532
|
+
};
|
|
499
1533
|
}
|
|
500
1534
|
async doStream(options) {
|
|
501
|
-
var
|
|
502
|
-
const
|
|
503
|
-
const
|
|
1535
|
+
var _a15;
|
|
1536
|
+
const providerOptions = options.providerOptions || {};
|
|
1537
|
+
const llmgatewayOptions = providerOptions.llmgateway || {};
|
|
1538
|
+
const args = __spreadValues(__spreadValues({}, this.getArgs(options)), llmgatewayOptions);
|
|
1539
|
+
const { value: response, responseHeaders } = await postJsonToApi({
|
|
504
1540
|
url: this.config.url({
|
|
505
1541
|
path: "/chat/completions",
|
|
506
1542
|
modelId: this.modelId
|
|
507
1543
|
}),
|
|
508
|
-
headers:
|
|
1544
|
+
headers: combineHeaders(this.config.headers(), options.headers),
|
|
509
1545
|
body: __spreadProps(__spreadValues({}, args), {
|
|
510
1546
|
stream: true,
|
|
511
1547
|
// only include stream_options when in strict compatibility mode:
|
|
512
1548
|
stream_options: this.config.compatibility === "strict" ? __spreadValues({
|
|
513
1549
|
include_usage: true
|
|
514
|
-
}, ((
|
|
1550
|
+
}, ((_a15 = this.settings.usage) == null ? void 0 : _a15.include) ? { include_usage: true } : {}) : void 0
|
|
515
1551
|
}),
|
|
516
1552
|
failedResponseHandler: llmgatewayFailedResponseHandler,
|
|
517
|
-
successfulResponseHandler:
|
|
1553
|
+
successfulResponseHandler: createEventSourceResponseHandler(
|
|
518
1554
|
LLMGatewayStreamChatCompletionChunkSchema
|
|
519
1555
|
),
|
|
520
1556
|
abortSignal: options.abortSignal,
|
|
521
1557
|
fetch: this.config.fetch
|
|
522
1558
|
});
|
|
523
|
-
const _b = args, { messages: rawPrompt } = _b, rawSettings = __objRest(_b, ["messages"]);
|
|
524
1559
|
const toolCalls = [];
|
|
525
1560
|
let finishReason = "other";
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
1561
|
+
const usage = {
|
|
1562
|
+
inputTokens: Number.NaN,
|
|
1563
|
+
outputTokens: Number.NaN,
|
|
1564
|
+
totalTokens: Number.NaN,
|
|
1565
|
+
reasoningTokens: Number.NaN,
|
|
1566
|
+
cachedInputTokens: Number.NaN
|
|
529
1567
|
};
|
|
530
|
-
let logprobs;
|
|
531
1568
|
const llmgatewayUsage = {};
|
|
532
|
-
|
|
1569
|
+
let textStarted = false;
|
|
1570
|
+
let reasoningStarted = false;
|
|
1571
|
+
let textId;
|
|
1572
|
+
let reasoningId;
|
|
1573
|
+
let llmgatewayResponseId;
|
|
533
1574
|
return {
|
|
534
1575
|
stream: response.pipeThrough(
|
|
535
1576
|
new TransformStream({
|
|
536
1577
|
transform(chunk, controller) {
|
|
537
|
-
var
|
|
1578
|
+
var _a16, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
|
|
538
1579
|
if (!chunk.success) {
|
|
539
1580
|
finishReason = "error";
|
|
540
1581
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
@@ -547,6 +1588,7 @@ var LLMGatewayChatLanguageModel = class {
|
|
|
547
1588
|
return;
|
|
548
1589
|
}
|
|
549
1590
|
if (value.id) {
|
|
1591
|
+
llmgatewayResponseId = value.id;
|
|
550
1592
|
controller.enqueue({
|
|
551
1593
|
type: "response-metadata",
|
|
552
1594
|
id: value.id
|
|
@@ -559,20 +1601,23 @@ var LLMGatewayChatLanguageModel = class {
|
|
|
559
1601
|
});
|
|
560
1602
|
}
|
|
561
1603
|
if (value.usage != null) {
|
|
562
|
-
usage =
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
};
|
|
1604
|
+
usage.inputTokens = value.usage.prompt_tokens;
|
|
1605
|
+
usage.outputTokens = value.usage.completion_tokens;
|
|
1606
|
+
usage.totalTokens = value.usage.prompt_tokens + value.usage.completion_tokens;
|
|
566
1607
|
llmgatewayUsage.promptTokens = value.usage.prompt_tokens;
|
|
567
1608
|
if (value.usage.prompt_tokens_details) {
|
|
1609
|
+
const cachedInputTokens = (_a16 = value.usage.prompt_tokens_details.cached_tokens) != null ? _a16 : 0;
|
|
1610
|
+
usage.cachedInputTokens = cachedInputTokens;
|
|
568
1611
|
llmgatewayUsage.promptTokensDetails = {
|
|
569
|
-
cachedTokens:
|
|
1612
|
+
cachedTokens: cachedInputTokens
|
|
570
1613
|
};
|
|
571
1614
|
}
|
|
572
1615
|
llmgatewayUsage.completionTokens = value.usage.completion_tokens;
|
|
573
1616
|
if (value.usage.completion_tokens_details) {
|
|
1617
|
+
const reasoningTokens = (_b = value.usage.completion_tokens_details.reasoning_tokens) != null ? _b : 0;
|
|
1618
|
+
usage.reasoningTokens = reasoningTokens;
|
|
574
1619
|
llmgatewayUsage.completionTokensDetails = {
|
|
575
|
-
reasoningTokens
|
|
1620
|
+
reasoningTokens
|
|
576
1621
|
};
|
|
577
1622
|
}
|
|
578
1623
|
llmgatewayUsage.cost = value.usage.cost;
|
|
@@ -586,51 +1631,39 @@ var LLMGatewayChatLanguageModel = class {
|
|
|
586
1631
|
return;
|
|
587
1632
|
}
|
|
588
1633
|
const delta = choice.delta;
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
1634
|
+
const emitReasoningChunk = (chunkText) => {
|
|
1635
|
+
if (!reasoningStarted) {
|
|
1636
|
+
reasoningId = llmgatewayResponseId || generateId();
|
|
1637
|
+
controller.enqueue({
|
|
1638
|
+
type: "reasoning-start",
|
|
1639
|
+
id: reasoningId
|
|
1640
|
+
});
|
|
1641
|
+
reasoningStarted = true;
|
|
1642
|
+
}
|
|
596
1643
|
controller.enqueue({
|
|
597
|
-
type: "reasoning",
|
|
598
|
-
|
|
1644
|
+
type: "reasoning-delta",
|
|
1645
|
+
delta: chunkText,
|
|
1646
|
+
id: reasoningId || generateId()
|
|
599
1647
|
});
|
|
600
|
-
}
|
|
1648
|
+
};
|
|
601
1649
|
if (delta.reasoning_details && delta.reasoning_details.length > 0) {
|
|
602
1650
|
for (const detail of delta.reasoning_details) {
|
|
603
1651
|
switch (detail.type) {
|
|
604
1652
|
case "reasoning.text" /* Text */: {
|
|
605
1653
|
if (detail.text) {
|
|
606
|
-
|
|
607
|
-
type: "reasoning",
|
|
608
|
-
textDelta: detail.text
|
|
609
|
-
});
|
|
610
|
-
}
|
|
611
|
-
if (detail.signature) {
|
|
612
|
-
controller.enqueue({
|
|
613
|
-
type: "reasoning-signature",
|
|
614
|
-
signature: detail.signature
|
|
615
|
-
});
|
|
1654
|
+
emitReasoningChunk(detail.text);
|
|
616
1655
|
}
|
|
617
1656
|
break;
|
|
618
1657
|
}
|
|
619
1658
|
case "reasoning.encrypted" /* Encrypted */: {
|
|
620
1659
|
if (detail.data) {
|
|
621
|
-
|
|
622
|
-
type: "redacted-reasoning",
|
|
623
|
-
data: detail.data
|
|
624
|
-
});
|
|
1660
|
+
emitReasoningChunk("[REDACTED]");
|
|
625
1661
|
}
|
|
626
1662
|
break;
|
|
627
1663
|
}
|
|
628
1664
|
case "reasoning.summary" /* Summary */: {
|
|
629
1665
|
if (detail.summary) {
|
|
630
|
-
|
|
631
|
-
type: "reasoning",
|
|
632
|
-
textDelta: detail.summary
|
|
633
|
-
});
|
|
1666
|
+
emitReasoningChunk(detail.summary);
|
|
634
1667
|
}
|
|
635
1668
|
break;
|
|
636
1669
|
}
|
|
@@ -640,34 +1673,42 @@ var LLMGatewayChatLanguageModel = class {
|
|
|
640
1673
|
}
|
|
641
1674
|
}
|
|
642
1675
|
}
|
|
1676
|
+
} else if (delta.reasoning != null) {
|
|
1677
|
+
emitReasoningChunk(delta.reasoning);
|
|
643
1678
|
}
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
1679
|
+
if (delta.content != null) {
|
|
1680
|
+
if (!textStarted) {
|
|
1681
|
+
textId = llmgatewayResponseId || generateId();
|
|
1682
|
+
controller.enqueue({
|
|
1683
|
+
type: "text-start",
|
|
1684
|
+
id: textId
|
|
1685
|
+
});
|
|
1686
|
+
textStarted = true;
|
|
650
1687
|
}
|
|
651
|
-
|
|
1688
|
+
controller.enqueue({
|
|
1689
|
+
type: "text-delta",
|
|
1690
|
+
delta: delta.content,
|
|
1691
|
+
id: textId || generateId()
|
|
1692
|
+
});
|
|
652
1693
|
}
|
|
653
1694
|
if (delta.tool_calls != null) {
|
|
654
1695
|
for (const toolCallDelta of delta.tool_calls) {
|
|
655
|
-
const index = toolCallDelta.index;
|
|
1696
|
+
const index = (_c = toolCallDelta.index) != null ? _c : toolCalls.length - 1;
|
|
656
1697
|
if (toolCalls[index] == null) {
|
|
657
1698
|
if (toolCallDelta.type !== "function") {
|
|
658
|
-
throw new
|
|
1699
|
+
throw new InvalidResponseDataError({
|
|
659
1700
|
data: toolCallDelta,
|
|
660
1701
|
message: `Expected 'function' type.`
|
|
661
1702
|
});
|
|
662
1703
|
}
|
|
663
1704
|
if (toolCallDelta.id == null) {
|
|
664
|
-
throw new
|
|
1705
|
+
throw new InvalidResponseDataError({
|
|
665
1706
|
data: toolCallDelta,
|
|
666
1707
|
message: `Expected 'id' to be a string.`
|
|
667
1708
|
});
|
|
668
1709
|
}
|
|
669
|
-
if (((
|
|
670
|
-
throw new
|
|
1710
|
+
if (((_d = toolCallDelta.function) == null ? void 0 : _d.name) == null) {
|
|
1711
|
+
throw new InvalidResponseDataError({
|
|
671
1712
|
data: toolCallDelta,
|
|
672
1713
|
message: `Expected 'function.name' to be a string.`
|
|
673
1714
|
});
|
|
@@ -677,28 +1718,36 @@ var LLMGatewayChatLanguageModel = class {
|
|
|
677
1718
|
type: "function",
|
|
678
1719
|
function: {
|
|
679
1720
|
name: toolCallDelta.function.name,
|
|
680
|
-
arguments: (
|
|
1721
|
+
arguments: (_e = toolCallDelta.function.arguments) != null ? _e : ""
|
|
681
1722
|
},
|
|
1723
|
+
inputStarted: false,
|
|
682
1724
|
sent: false
|
|
683
1725
|
};
|
|
684
1726
|
const toolCall2 = toolCalls[index];
|
|
685
1727
|
if (toolCall2 == null) {
|
|
686
1728
|
throw new Error("Tool call is missing");
|
|
687
1729
|
}
|
|
688
|
-
if (((
|
|
1730
|
+
if (((_f = toolCall2.function) == null ? void 0 : _f.name) != null && ((_g = toolCall2.function) == null ? void 0 : _g.arguments) != null && isParsableJson(toolCall2.function.arguments)) {
|
|
1731
|
+
toolCall2.inputStarted = true;
|
|
689
1732
|
controller.enqueue({
|
|
690
|
-
type: "tool-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
1733
|
+
type: "tool-input-start",
|
|
1734
|
+
id: toolCall2.id,
|
|
1735
|
+
toolName: toolCall2.function.name
|
|
1736
|
+
});
|
|
1737
|
+
controller.enqueue({
|
|
1738
|
+
type: "tool-input-delta",
|
|
1739
|
+
id: toolCall2.id,
|
|
1740
|
+
delta: toolCall2.function.arguments
|
|
1741
|
+
});
|
|
1742
|
+
controller.enqueue({
|
|
1743
|
+
type: "tool-input-end",
|
|
1744
|
+
id: toolCall2.id
|
|
695
1745
|
});
|
|
696
1746
|
controller.enqueue({
|
|
697
1747
|
type: "tool-call",
|
|
698
|
-
|
|
699
|
-
toolCallId: (_g = toolCall2.id) != null ? _g : (0, import_provider_utils3.generateId)(),
|
|
1748
|
+
toolCallId: toolCall2.id,
|
|
700
1749
|
toolName: toolCall2.function.name,
|
|
701
|
-
|
|
1750
|
+
input: toolCall2.function.arguments
|
|
702
1751
|
});
|
|
703
1752
|
toolCall2.sent = true;
|
|
704
1753
|
}
|
|
@@ -708,23 +1757,28 @@ var LLMGatewayChatLanguageModel = class {
|
|
|
708
1757
|
if (toolCall == null) {
|
|
709
1758
|
throw new Error("Tool call is missing");
|
|
710
1759
|
}
|
|
1760
|
+
if (!toolCall.inputStarted) {
|
|
1761
|
+
toolCall.inputStarted = true;
|
|
1762
|
+
controller.enqueue({
|
|
1763
|
+
type: "tool-input-start",
|
|
1764
|
+
id: toolCall.id,
|
|
1765
|
+
toolName: toolCall.function.name
|
|
1766
|
+
});
|
|
1767
|
+
}
|
|
711
1768
|
if (((_h = toolCallDelta.function) == null ? void 0 : _h.arguments) != null) {
|
|
712
1769
|
toolCall.function.arguments += (_j = (_i = toolCallDelta.function) == null ? void 0 : _i.arguments) != null ? _j : "";
|
|
713
1770
|
}
|
|
714
1771
|
controller.enqueue({
|
|
715
|
-
type: "tool-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
toolName: toolCall.function.name,
|
|
719
|
-
argsTextDelta: (_k = toolCallDelta.function.arguments) != null ? _k : ""
|
|
1772
|
+
type: "tool-input-delta",
|
|
1773
|
+
id: toolCall.id,
|
|
1774
|
+
delta: (_k = toolCallDelta.function.arguments) != null ? _k : ""
|
|
720
1775
|
});
|
|
721
|
-
if (((_l = toolCall.function) == null ? void 0 : _l.name) != null && ((_m = toolCall.function) == null ? void 0 : _m.arguments) != null &&
|
|
1776
|
+
if (((_l = toolCall.function) == null ? void 0 : _l.name) != null && ((_m = toolCall.function) == null ? void 0 : _m.arguments) != null && isParsableJson(toolCall.function.arguments)) {
|
|
722
1777
|
controller.enqueue({
|
|
723
1778
|
type: "tool-call",
|
|
724
|
-
|
|
725
|
-
toolCallId: (_n = toolCall.id) != null ? _n : (0, import_provider_utils3.generateId)(),
|
|
1779
|
+
toolCallId: (_n = toolCall.id) != null ? _n : generateId(),
|
|
726
1780
|
toolName: toolCall.function.name,
|
|
727
|
-
|
|
1781
|
+
input: toolCall.function.arguments
|
|
728
1782
|
});
|
|
729
1783
|
toolCall.sent = true;
|
|
730
1784
|
}
|
|
@@ -732,198 +1786,54 @@ var LLMGatewayChatLanguageModel = class {
|
|
|
732
1786
|
}
|
|
733
1787
|
},
|
|
734
1788
|
flush(controller) {
|
|
735
|
-
var
|
|
1789
|
+
var _a16;
|
|
736
1790
|
if (finishReason === "tool-calls") {
|
|
737
1791
|
for (const toolCall of toolCalls) {
|
|
738
|
-
if (!toolCall.sent) {
|
|
1792
|
+
if (toolCall && !toolCall.sent) {
|
|
739
1793
|
controller.enqueue({
|
|
740
1794
|
type: "tool-call",
|
|
741
|
-
|
|
742
|
-
toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils3.generateId)(),
|
|
1795
|
+
toolCallId: (_a16 = toolCall.id) != null ? _a16 : generateId(),
|
|
743
1796
|
toolName: toolCall.function.name,
|
|
744
1797
|
// Coerce invalid arguments to an empty JSON object
|
|
745
|
-
|
|
1798
|
+
input: isParsableJson(toolCall.function.arguments) ? toolCall.function.arguments : "{}"
|
|
746
1799
|
});
|
|
747
1800
|
toolCall.sent = true;
|
|
748
1801
|
}
|
|
749
1802
|
}
|
|
750
1803
|
}
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
};
|
|
1804
|
+
if (textStarted) {
|
|
1805
|
+
controller.enqueue({
|
|
1806
|
+
type: "text-end",
|
|
1807
|
+
id: textId || generateId()
|
|
1808
|
+
});
|
|
1809
|
+
}
|
|
1810
|
+
if (reasoningStarted) {
|
|
1811
|
+
controller.enqueue({
|
|
1812
|
+
type: "reasoning-end",
|
|
1813
|
+
id: reasoningId || generateId()
|
|
1814
|
+
});
|
|
756
1815
|
}
|
|
757
|
-
|
|
758
|
-
controller.enqueue(__spreadValues({
|
|
1816
|
+
controller.enqueue({
|
|
759
1817
|
type: "finish",
|
|
760
1818
|
finishReason,
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
1819
|
+
usage,
|
|
1820
|
+
providerMetadata: {
|
|
1821
|
+
llmgateway: {
|
|
1822
|
+
usage: llmgatewayUsage
|
|
1823
|
+
}
|
|
1824
|
+
}
|
|
1825
|
+
});
|
|
764
1826
|
}
|
|
765
1827
|
})
|
|
766
1828
|
),
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
1829
|
+
warnings: [],
|
|
1830
|
+
request: { body: args },
|
|
1831
|
+
response: { headers: responseHeaders }
|
|
770
1832
|
};
|
|
771
1833
|
}
|
|
772
1834
|
};
|
|
773
|
-
var LLMGatewayChatCompletionBaseResponseSchema = import_zod3.z.object({
|
|
774
|
-
id: import_zod3.z.string().optional(),
|
|
775
|
-
model: import_zod3.z.string().optional(),
|
|
776
|
-
usage: import_zod3.z.object({
|
|
777
|
-
prompt_tokens: import_zod3.z.number(),
|
|
778
|
-
prompt_tokens_details: import_zod3.z.object({
|
|
779
|
-
cached_tokens: import_zod3.z.number()
|
|
780
|
-
}).nullish(),
|
|
781
|
-
completion_tokens: import_zod3.z.number(),
|
|
782
|
-
completion_tokens_details: import_zod3.z.object({
|
|
783
|
-
reasoning_tokens: import_zod3.z.number()
|
|
784
|
-
}).nullish(),
|
|
785
|
-
total_tokens: import_zod3.z.number(),
|
|
786
|
-
cost: import_zod3.z.number().optional()
|
|
787
|
-
}).nullish()
|
|
788
|
-
});
|
|
789
|
-
var LLMGatewayNonStreamChatCompletionResponseSchema = LLMGatewayChatCompletionBaseResponseSchema.extend({
|
|
790
|
-
choices: import_zod3.z.array(
|
|
791
|
-
import_zod3.z.object({
|
|
792
|
-
message: import_zod3.z.object({
|
|
793
|
-
role: import_zod3.z.literal("assistant"),
|
|
794
|
-
content: import_zod3.z.string().nullable().optional(),
|
|
795
|
-
reasoning: import_zod3.z.string().nullable().optional(),
|
|
796
|
-
reasoning_details: ReasoningDetailArraySchema.nullish(),
|
|
797
|
-
tool_calls: import_zod3.z.array(
|
|
798
|
-
import_zod3.z.object({
|
|
799
|
-
id: import_zod3.z.string().optional().nullable(),
|
|
800
|
-
type: import_zod3.z.literal("function"),
|
|
801
|
-
function: import_zod3.z.object({
|
|
802
|
-
name: import_zod3.z.string(),
|
|
803
|
-
arguments: import_zod3.z.string()
|
|
804
|
-
})
|
|
805
|
-
})
|
|
806
|
-
).optional()
|
|
807
|
-
}),
|
|
808
|
-
index: import_zod3.z.number(),
|
|
809
|
-
logprobs: import_zod3.z.object({
|
|
810
|
-
content: import_zod3.z.array(
|
|
811
|
-
import_zod3.z.object({
|
|
812
|
-
token: import_zod3.z.string(),
|
|
813
|
-
logprob: import_zod3.z.number(),
|
|
814
|
-
top_logprobs: import_zod3.z.array(
|
|
815
|
-
import_zod3.z.object({
|
|
816
|
-
token: import_zod3.z.string(),
|
|
817
|
-
logprob: import_zod3.z.number()
|
|
818
|
-
})
|
|
819
|
-
)
|
|
820
|
-
})
|
|
821
|
-
).nullable()
|
|
822
|
-
}).nullable().optional(),
|
|
823
|
-
finish_reason: import_zod3.z.string().optional().nullable()
|
|
824
|
-
})
|
|
825
|
-
)
|
|
826
|
-
});
|
|
827
|
-
var LLMGatewayStreamChatCompletionChunkSchema = import_zod3.z.union([
|
|
828
|
-
LLMGatewayChatCompletionBaseResponseSchema.extend({
|
|
829
|
-
choices: import_zod3.z.array(
|
|
830
|
-
import_zod3.z.object({
|
|
831
|
-
delta: import_zod3.z.object({
|
|
832
|
-
role: import_zod3.z.enum(["assistant"]).optional(),
|
|
833
|
-
content: import_zod3.z.string().nullish(),
|
|
834
|
-
reasoning: import_zod3.z.string().nullish().optional(),
|
|
835
|
-
reasoning_details: ReasoningDetailArraySchema.nullish(),
|
|
836
|
-
tool_calls: import_zod3.z.array(
|
|
837
|
-
import_zod3.z.object({
|
|
838
|
-
index: import_zod3.z.number(),
|
|
839
|
-
id: import_zod3.z.string().nullish(),
|
|
840
|
-
type: import_zod3.z.literal("function").optional(),
|
|
841
|
-
function: import_zod3.z.object({
|
|
842
|
-
name: import_zod3.z.string().nullish(),
|
|
843
|
-
arguments: import_zod3.z.string().nullish()
|
|
844
|
-
})
|
|
845
|
-
})
|
|
846
|
-
).nullish()
|
|
847
|
-
}).nullish(),
|
|
848
|
-
logprobs: import_zod3.z.object({
|
|
849
|
-
content: import_zod3.z.array(
|
|
850
|
-
import_zod3.z.object({
|
|
851
|
-
token: import_zod3.z.string(),
|
|
852
|
-
logprob: import_zod3.z.number(),
|
|
853
|
-
top_logprobs: import_zod3.z.array(
|
|
854
|
-
import_zod3.z.object({
|
|
855
|
-
token: import_zod3.z.string(),
|
|
856
|
-
logprob: import_zod3.z.number()
|
|
857
|
-
})
|
|
858
|
-
)
|
|
859
|
-
})
|
|
860
|
-
).nullable()
|
|
861
|
-
}).nullish(),
|
|
862
|
-
finish_reason: import_zod3.z.string().nullable().optional(),
|
|
863
|
-
index: import_zod3.z.number()
|
|
864
|
-
})
|
|
865
|
-
)
|
|
866
|
-
}),
|
|
867
|
-
LLMGatewayErrorResponseSchema
|
|
868
|
-
]);
|
|
869
|
-
function prepareToolsAndToolChoice(mode) {
|
|
870
|
-
var _a;
|
|
871
|
-
const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
|
|
872
|
-
if (tools == null) {
|
|
873
|
-
return { tools: void 0, tool_choice: void 0 };
|
|
874
|
-
}
|
|
875
|
-
const mappedTools = tools.map((tool) => {
|
|
876
|
-
if (isFunctionTool(tool)) {
|
|
877
|
-
return {
|
|
878
|
-
type: "function",
|
|
879
|
-
function: {
|
|
880
|
-
name: tool.name,
|
|
881
|
-
description: tool.description,
|
|
882
|
-
parameters: tool.parameters
|
|
883
|
-
}
|
|
884
|
-
};
|
|
885
|
-
}
|
|
886
|
-
return {
|
|
887
|
-
type: "function",
|
|
888
|
-
function: {
|
|
889
|
-
name: tool.name
|
|
890
|
-
}
|
|
891
|
-
};
|
|
892
|
-
});
|
|
893
|
-
const toolChoice = mode.toolChoice;
|
|
894
|
-
if (toolChoice == null) {
|
|
895
|
-
return { tools: mappedTools, tool_choice: void 0 };
|
|
896
|
-
}
|
|
897
|
-
const type = toolChoice.type;
|
|
898
|
-
switch (type) {
|
|
899
|
-
case "auto":
|
|
900
|
-
case "none":
|
|
901
|
-
case "required":
|
|
902
|
-
return { tools: mappedTools, tool_choice: type };
|
|
903
|
-
case "tool":
|
|
904
|
-
return {
|
|
905
|
-
tools: mappedTools,
|
|
906
|
-
tool_choice: {
|
|
907
|
-
type: "function",
|
|
908
|
-
function: {
|
|
909
|
-
name: toolChoice.toolName
|
|
910
|
-
}
|
|
911
|
-
}
|
|
912
|
-
};
|
|
913
|
-
default: {
|
|
914
|
-
const _exhaustiveCheck = type;
|
|
915
|
-
throw new Error(`Unsupported tool choice type: ${_exhaustiveCheck}`);
|
|
916
|
-
}
|
|
917
|
-
}
|
|
918
|
-
}
|
|
919
|
-
|
|
920
|
-
// src/llmgateway-completion-language-model.ts
|
|
921
|
-
var import_provider3 = require("@ai-sdk/provider");
|
|
922
|
-
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
|
923
|
-
var import_zod4 = require("zod");
|
|
924
1835
|
|
|
925
|
-
// src/convert-to-llmgateway-completion-prompt.ts
|
|
926
|
-
var import_provider2 = require("@ai-sdk/provider");
|
|
1836
|
+
// src/completion/convert-to-llmgateway-completion-prompt.ts
|
|
927
1837
|
function convertToLLMGatewayCompletionPrompt({
|
|
928
1838
|
prompt,
|
|
929
1839
|
inputFormat,
|
|
@@ -943,8 +1853,8 @@ function convertToLLMGatewayCompletionPrompt({
|
|
|
943
1853
|
for (const { role, content } of prompt) {
|
|
944
1854
|
switch (role) {
|
|
945
1855
|
case "system": {
|
|
946
|
-
throw new
|
|
947
|
-
message:
|
|
1856
|
+
throw new InvalidPromptError({
|
|
1857
|
+
message: `Unexpected system message in prompt: ${content}`,
|
|
948
1858
|
prompt
|
|
949
1859
|
});
|
|
950
1860
|
}
|
|
@@ -954,21 +1864,13 @@ function convertToLLMGatewayCompletionPrompt({
|
|
|
954
1864
|
case "text": {
|
|
955
1865
|
return part.text;
|
|
956
1866
|
}
|
|
957
|
-
case "image": {
|
|
958
|
-
throw new import_provider2.UnsupportedFunctionalityError({
|
|
959
|
-
functionality: "images"
|
|
960
|
-
});
|
|
961
|
-
}
|
|
962
1867
|
case "file": {
|
|
963
|
-
throw new
|
|
1868
|
+
throw new UnsupportedFunctionalityError({
|
|
964
1869
|
functionality: "file attachments"
|
|
965
1870
|
});
|
|
966
1871
|
}
|
|
967
1872
|
default: {
|
|
968
|
-
|
|
969
|
-
throw new Error(
|
|
970
|
-
`Unsupported content type: ${_exhaustiveCheck}`
|
|
971
|
-
);
|
|
1873
|
+
return "";
|
|
972
1874
|
}
|
|
973
1875
|
}
|
|
974
1876
|
}).join("");
|
|
@@ -979,39 +1881,38 @@ ${userMessage}
|
|
|
979
1881
|
break;
|
|
980
1882
|
}
|
|
981
1883
|
case "assistant": {
|
|
982
|
-
const assistantMessage = content.map(
|
|
983
|
-
|
|
984
|
-
|
|
985
|
-
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
);
|
|
1884
|
+
const assistantMessage = content.map(
|
|
1885
|
+
(part) => {
|
|
1886
|
+
switch (part.type) {
|
|
1887
|
+
case "text": {
|
|
1888
|
+
return part.text;
|
|
1889
|
+
}
|
|
1890
|
+
case "tool-call": {
|
|
1891
|
+
throw new UnsupportedFunctionalityError({
|
|
1892
|
+
functionality: "tool-call messages"
|
|
1893
|
+
});
|
|
1894
|
+
}
|
|
1895
|
+
case "tool-result": {
|
|
1896
|
+
throw new UnsupportedFunctionalityError({
|
|
1897
|
+
functionality: "tool-result messages"
|
|
1898
|
+
});
|
|
1899
|
+
}
|
|
1900
|
+
case "reasoning": {
|
|
1901
|
+
throw new UnsupportedFunctionalityError({
|
|
1902
|
+
functionality: "reasoning messages"
|
|
1903
|
+
});
|
|
1904
|
+
}
|
|
1905
|
+
case "file": {
|
|
1906
|
+
throw new UnsupportedFunctionalityError({
|
|
1907
|
+
functionality: "file attachments"
|
|
1908
|
+
});
|
|
1909
|
+
}
|
|
1910
|
+
default: {
|
|
1911
|
+
return "";
|
|
1912
|
+
}
|
|
1012
1913
|
}
|
|
1013
1914
|
}
|
|
1014
|
-
|
|
1915
|
+
).join("");
|
|
1015
1916
|
text += `${assistant}:
|
|
1016
1917
|
${assistantMessage}
|
|
1017
1918
|
|
|
@@ -1019,13 +1920,12 @@ ${assistantMessage}
|
|
|
1019
1920
|
break;
|
|
1020
1921
|
}
|
|
1021
1922
|
case "tool": {
|
|
1022
|
-
throw new
|
|
1923
|
+
throw new UnsupportedFunctionalityError({
|
|
1023
1924
|
functionality: "tool messages"
|
|
1024
1925
|
});
|
|
1025
1926
|
}
|
|
1026
1927
|
default: {
|
|
1027
|
-
|
|
1028
|
-
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
|
1928
|
+
break;
|
|
1029
1929
|
}
|
|
1030
1930
|
}
|
|
1031
1931
|
}
|
|
@@ -1036,40 +1936,63 @@ ${assistantMessage}
|
|
|
1036
1936
|
};
|
|
1037
1937
|
}
|
|
1038
1938
|
|
|
1039
|
-
// src/
|
|
1040
|
-
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
(
|
|
1048
|
-
|
|
1049
|
-
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1939
|
+
// src/completion/schemas.ts
|
|
1940
|
+
var import_v45 = require("zod/v4");
|
|
1941
|
+
var LLMGatewayCompletionChunkSchema = import_v45.z.union([
|
|
1942
|
+
import_v45.z.object({
|
|
1943
|
+
id: import_v45.z.string().optional(),
|
|
1944
|
+
model: import_v45.z.string().optional(),
|
|
1945
|
+
choices: import_v45.z.array(
|
|
1946
|
+
import_v45.z.object({
|
|
1947
|
+
text: import_v45.z.string(),
|
|
1948
|
+
reasoning: import_v45.z.string().nullish().optional(),
|
|
1949
|
+
reasoning_details: ReasoningDetailArraySchema.nullish(),
|
|
1950
|
+
finish_reason: import_v45.z.string().nullish(),
|
|
1951
|
+
index: import_v45.z.number().nullish(),
|
|
1952
|
+
logprobs: import_v45.z.object({
|
|
1953
|
+
tokens: import_v45.z.array(import_v45.z.string()),
|
|
1954
|
+
token_logprobs: import_v45.z.array(import_v45.z.number()),
|
|
1955
|
+
top_logprobs: import_v45.z.array(import_v45.z.record(import_v45.z.string(), import_v45.z.number())).nullable()
|
|
1956
|
+
}).nullable().optional()
|
|
1957
|
+
})
|
|
1958
|
+
),
|
|
1959
|
+
usage: import_v45.z.object({
|
|
1960
|
+
prompt_tokens: import_v45.z.number(),
|
|
1961
|
+
prompt_tokens_details: import_v45.z.object({
|
|
1962
|
+
cached_tokens: import_v45.z.number()
|
|
1963
|
+
}).nullish(),
|
|
1964
|
+
completion_tokens: import_v45.z.number(),
|
|
1965
|
+
completion_tokens_details: import_v45.z.object({
|
|
1966
|
+
reasoning_tokens: import_v45.z.number()
|
|
1967
|
+
}).nullish(),
|
|
1968
|
+
total_tokens: import_v45.z.number(),
|
|
1969
|
+
cost: import_v45.z.number().optional()
|
|
1970
|
+
}).nullish()
|
|
1971
|
+
}),
|
|
1972
|
+
LLMGatewayErrorResponseSchema
|
|
1973
|
+
]);
|
|
1055
1974
|
|
|
1056
|
-
// src/
|
|
1975
|
+
// src/completion/index.ts
|
|
1057
1976
|
var LLMGatewayCompletionLanguageModel = class {
|
|
1058
1977
|
constructor(modelId, settings, config) {
|
|
1059
|
-
this.specificationVersion = "
|
|
1978
|
+
this.specificationVersion = "v2";
|
|
1979
|
+
this.provider = "llmgateway";
|
|
1980
|
+
this.supportedUrls = {
|
|
1981
|
+
"image/*": [
|
|
1982
|
+
/^data:image\/[a-zA-Z]+;base64,/,
|
|
1983
|
+
/^https?:\/\/.+\.(jpg|jpeg|png|gif|webp)$/i
|
|
1984
|
+
],
|
|
1985
|
+
"text/*": [/^data:text\//, /^https?:\/\/.+$/],
|
|
1986
|
+
"application/*": [/^data:application\//, /^https?:\/\/.+$/]
|
|
1987
|
+
};
|
|
1060
1988
|
this.defaultObjectGenerationMode = void 0;
|
|
1061
1989
|
this.modelId = modelId;
|
|
1062
1990
|
this.settings = settings;
|
|
1063
1991
|
this.config = config;
|
|
1064
1992
|
}
|
|
1065
|
-
get provider() {
|
|
1066
|
-
return this.config.provider;
|
|
1067
|
-
}
|
|
1068
1993
|
getArgs({
|
|
1069
|
-
mode,
|
|
1070
|
-
inputFormat,
|
|
1071
1994
|
prompt,
|
|
1072
|
-
|
|
1995
|
+
maxOutputTokens,
|
|
1073
1996
|
temperature,
|
|
1074
1997
|
topP,
|
|
1075
1998
|
frequencyPenalty,
|
|
@@ -1078,16 +2001,24 @@ var LLMGatewayCompletionLanguageModel = class {
|
|
|
1078
2001
|
responseFormat,
|
|
1079
2002
|
topK,
|
|
1080
2003
|
stopSequences,
|
|
1081
|
-
|
|
2004
|
+
tools,
|
|
2005
|
+
toolChoice
|
|
1082
2006
|
}) {
|
|
1083
|
-
var _a, _b;
|
|
1084
|
-
const type = mode.type;
|
|
1085
|
-
const extraCallingBody = (_a = providerMetadata == null ? void 0 : providerMetadata.llmgateway) != null ? _a : {};
|
|
1086
2007
|
const { prompt: completionPrompt } = convertToLLMGatewayCompletionPrompt({
|
|
1087
2008
|
prompt,
|
|
1088
|
-
inputFormat
|
|
2009
|
+
inputFormat: "prompt"
|
|
1089
2010
|
});
|
|
1090
|
-
|
|
2011
|
+
if (tools == null ? void 0 : tools.length) {
|
|
2012
|
+
throw new UnsupportedFunctionalityError({
|
|
2013
|
+
functionality: "tools"
|
|
2014
|
+
});
|
|
2015
|
+
}
|
|
2016
|
+
if (toolChoice) {
|
|
2017
|
+
throw new UnsupportedFunctionalityError({
|
|
2018
|
+
functionality: "toolChoice"
|
|
2019
|
+
});
|
|
2020
|
+
}
|
|
2021
|
+
return __spreadValues(__spreadValues({
|
|
1091
2022
|
// model id:
|
|
1092
2023
|
model: this.modelId,
|
|
1093
2024
|
models: this.settings.models,
|
|
@@ -1097,7 +2028,7 @@ var LLMGatewayCompletionLanguageModel = class {
|
|
|
1097
2028
|
suffix: this.settings.suffix,
|
|
1098
2029
|
user: this.settings.user,
|
|
1099
2030
|
// standardized settings:
|
|
1100
|
-
max_tokens:
|
|
2031
|
+
max_tokens: maxOutputTokens,
|
|
1101
2032
|
temperature,
|
|
1102
2033
|
top_p: topP,
|
|
1103
2034
|
frequency_penalty: frequencyPenalty,
|
|
@@ -1107,59 +2038,31 @@ var LLMGatewayCompletionLanguageModel = class {
|
|
|
1107
2038
|
response_format: responseFormat,
|
|
1108
2039
|
top_k: topK,
|
|
1109
2040
|
// prompt:
|
|
1110
|
-
prompt: completionPrompt
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
throw new import_provider3.UnsupportedFunctionalityError({
|
|
1116
|
-
functionality: "tools"
|
|
1117
|
-
});
|
|
1118
|
-
}
|
|
1119
|
-
if (mode.toolChoice) {
|
|
1120
|
-
throw new import_provider3.UnsupportedFunctionalityError({
|
|
1121
|
-
functionality: "toolChoice"
|
|
1122
|
-
});
|
|
1123
|
-
}
|
|
1124
|
-
return baseArgs;
|
|
1125
|
-
}
|
|
1126
|
-
case "object-json": {
|
|
1127
|
-
throw new import_provider3.UnsupportedFunctionalityError({
|
|
1128
|
-
functionality: "object-json mode"
|
|
1129
|
-
});
|
|
1130
|
-
}
|
|
1131
|
-
case "object-tool": {
|
|
1132
|
-
throw new import_provider3.UnsupportedFunctionalityError({
|
|
1133
|
-
functionality: "object-tool mode"
|
|
1134
|
-
});
|
|
1135
|
-
}
|
|
1136
|
-
// Handle all non-text types with a single default case
|
|
1137
|
-
default: {
|
|
1138
|
-
const _exhaustiveCheck = type;
|
|
1139
|
-
throw new import_provider3.UnsupportedFunctionalityError({
|
|
1140
|
-
functionality: `${_exhaustiveCheck} mode`
|
|
1141
|
-
});
|
|
1142
|
-
}
|
|
1143
|
-
}
|
|
2041
|
+
prompt: completionPrompt,
|
|
2042
|
+
// LLMGateway specific settings:
|
|
2043
|
+
include_reasoning: this.settings.includeReasoning,
|
|
2044
|
+
reasoning: this.settings.reasoning
|
|
2045
|
+
}, this.config.extraBody), this.settings.extraBody);
|
|
1144
2046
|
}
|
|
1145
2047
|
async doGenerate(options) {
|
|
1146
|
-
var _b, _c, _d, _e, _f;
|
|
1147
|
-
const
|
|
1148
|
-
const
|
|
2048
|
+
var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o;
|
|
2049
|
+
const providerOptions = options.providerOptions || {};
|
|
2050
|
+
const llmgatewayOptions = providerOptions.llmgateway || {};
|
|
2051
|
+
const args = __spreadValues(__spreadValues({}, this.getArgs(options)), llmgatewayOptions);
|
|
2052
|
+
const { value: response, responseHeaders } = await postJsonToApi({
|
|
1149
2053
|
url: this.config.url({
|
|
1150
2054
|
path: "/completions",
|
|
1151
2055
|
modelId: this.modelId
|
|
1152
2056
|
}),
|
|
1153
|
-
headers:
|
|
2057
|
+
headers: combineHeaders(this.config.headers(), options.headers),
|
|
1154
2058
|
body: args,
|
|
1155
2059
|
failedResponseHandler: llmgatewayFailedResponseHandler,
|
|
1156
|
-
successfulResponseHandler:
|
|
2060
|
+
successfulResponseHandler: createJsonResponseHandler(
|
|
1157
2061
|
LLMGatewayCompletionChunkSchema
|
|
1158
2062
|
),
|
|
1159
2063
|
abortSignal: options.abortSignal,
|
|
1160
2064
|
fetch: this.config.fetch
|
|
1161
2065
|
});
|
|
1162
|
-
const _a = args, { prompt: rawPrompt } = _a, rawSettings = __objRest(_a, ["prompt"]);
|
|
1163
2066
|
if ("error" in response) {
|
|
1164
2067
|
throw new Error(`${response.error.message}`);
|
|
1165
2068
|
}
|
|
@@ -1168,54 +2071,62 @@ var LLMGatewayCompletionLanguageModel = class {
|
|
|
1168
2071
|
throw new Error("No choice in LLMGateway completion response");
|
|
1169
2072
|
}
|
|
1170
2073
|
return {
|
|
1171
|
-
|
|
1172
|
-
|
|
1173
|
-
|
|
1174
|
-
|
|
1175
|
-
|
|
1176
|
-
|
|
2074
|
+
content: [
|
|
2075
|
+
{
|
|
2076
|
+
type: "text",
|
|
2077
|
+
text: (_a15 = choice.text) != null ? _a15 : ""
|
|
2078
|
+
}
|
|
2079
|
+
],
|
|
2080
|
+
finishReason: mapLLMGatewayFinishReason(choice.finish_reason),
|
|
1177
2081
|
usage: {
|
|
1178
|
-
|
|
1179
|
-
|
|
2082
|
+
inputTokens: (_c = (_b = response.usage) == null ? void 0 : _b.prompt_tokens) != null ? _c : 0,
|
|
2083
|
+
outputTokens: (_e = (_d = response.usage) == null ? void 0 : _d.completion_tokens) != null ? _e : 0,
|
|
2084
|
+
totalTokens: ((_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : 0) + ((_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : 0),
|
|
2085
|
+
reasoningTokens: (_l = (_k = (_j = response.usage) == null ? void 0 : _j.completion_tokens_details) == null ? void 0 : _k.reasoning_tokens) != null ? _l : 0,
|
|
2086
|
+
cachedInputTokens: (_o = (_n = (_m = response.usage) == null ? void 0 : _m.prompt_tokens_details) == null ? void 0 : _n.cached_tokens) != null ? _o : 0
|
|
1180
2087
|
},
|
|
1181
|
-
|
|
1182
|
-
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
warnings: []
|
|
2088
|
+
warnings: [],
|
|
2089
|
+
response: {
|
|
2090
|
+
headers: responseHeaders
|
|
2091
|
+
}
|
|
1186
2092
|
};
|
|
1187
2093
|
}
|
|
1188
2094
|
async doStream(options) {
|
|
1189
|
-
const
|
|
1190
|
-
const
|
|
2095
|
+
const providerOptions = options.providerOptions || {};
|
|
2096
|
+
const llmgatewayOptions = providerOptions.llmgateway || {};
|
|
2097
|
+
const args = __spreadValues(__spreadValues({}, this.getArgs(options)), llmgatewayOptions);
|
|
2098
|
+
const { value: response, responseHeaders } = await postJsonToApi({
|
|
1191
2099
|
url: this.config.url({
|
|
1192
2100
|
path: "/completions",
|
|
1193
2101
|
modelId: this.modelId
|
|
1194
2102
|
}),
|
|
1195
|
-
headers:
|
|
1196
|
-
body: __spreadProps(__spreadValues({},
|
|
2103
|
+
headers: combineHeaders(this.config.headers(), options.headers),
|
|
2104
|
+
body: __spreadProps(__spreadValues({}, args), {
|
|
1197
2105
|
stream: true,
|
|
1198
2106
|
// only include stream_options when in strict compatibility mode:
|
|
1199
2107
|
stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
|
|
1200
2108
|
}),
|
|
1201
2109
|
failedResponseHandler: llmgatewayFailedResponseHandler,
|
|
1202
|
-
successfulResponseHandler:
|
|
2110
|
+
successfulResponseHandler: createEventSourceResponseHandler(
|
|
1203
2111
|
LLMGatewayCompletionChunkSchema
|
|
1204
2112
|
),
|
|
1205
2113
|
abortSignal: options.abortSignal,
|
|
1206
2114
|
fetch: this.config.fetch
|
|
1207
2115
|
});
|
|
1208
|
-
const _a = args, { prompt: rawPrompt } = _a, rawSettings = __objRest(_a, ["prompt"]);
|
|
1209
2116
|
let finishReason = "other";
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
2117
|
+
const usage = {
|
|
2118
|
+
inputTokens: Number.NaN,
|
|
2119
|
+
outputTokens: Number.NaN,
|
|
2120
|
+
totalTokens: Number.NaN,
|
|
2121
|
+
reasoningTokens: Number.NaN,
|
|
2122
|
+
cachedInputTokens: Number.NaN
|
|
1213
2123
|
};
|
|
1214
|
-
|
|
2124
|
+
const llmgatewayUsage = {};
|
|
1215
2125
|
return {
|
|
1216
2126
|
stream: response.pipeThrough(
|
|
1217
2127
|
new TransformStream({
|
|
1218
2128
|
transform(chunk, controller) {
|
|
2129
|
+
var _a15, _b;
|
|
1219
2130
|
if (!chunk.success) {
|
|
1220
2131
|
finishReason = "error";
|
|
1221
2132
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
@@ -1228,10 +2139,27 @@ var LLMGatewayCompletionLanguageModel = class {
|
|
|
1228
2139
|
return;
|
|
1229
2140
|
}
|
|
1230
2141
|
if (value.usage != null) {
|
|
1231
|
-
usage =
|
|
1232
|
-
|
|
1233
|
-
|
|
1234
|
-
|
|
2142
|
+
usage.inputTokens = value.usage.prompt_tokens;
|
|
2143
|
+
usage.outputTokens = value.usage.completion_tokens;
|
|
2144
|
+
usage.totalTokens = value.usage.prompt_tokens + value.usage.completion_tokens;
|
|
2145
|
+
llmgatewayUsage.promptTokens = value.usage.prompt_tokens;
|
|
2146
|
+
if (value.usage.prompt_tokens_details) {
|
|
2147
|
+
const cachedInputTokens = (_a15 = value.usage.prompt_tokens_details.cached_tokens) != null ? _a15 : 0;
|
|
2148
|
+
usage.cachedInputTokens = cachedInputTokens;
|
|
2149
|
+
llmgatewayUsage.promptTokensDetails = {
|
|
2150
|
+
cachedTokens: cachedInputTokens
|
|
2151
|
+
};
|
|
2152
|
+
}
|
|
2153
|
+
llmgatewayUsage.completionTokens = value.usage.completion_tokens;
|
|
2154
|
+
if (value.usage.completion_tokens_details) {
|
|
2155
|
+
const reasoningTokens = (_b = value.usage.completion_tokens_details.reasoning_tokens) != null ? _b : 0;
|
|
2156
|
+
usage.reasoningTokens = reasoningTokens;
|
|
2157
|
+
llmgatewayUsage.completionTokensDetails = {
|
|
2158
|
+
reasoningTokens
|
|
2159
|
+
};
|
|
2160
|
+
}
|
|
2161
|
+
llmgatewayUsage.cost = value.usage.cost;
|
|
2162
|
+
llmgatewayUsage.totalTokens = value.usage.total_tokens;
|
|
1235
2163
|
}
|
|
1236
2164
|
const choice = value.choices[0];
|
|
1237
2165
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
@@ -1240,69 +2168,40 @@ var LLMGatewayCompletionLanguageModel = class {
|
|
|
1240
2168
|
if ((choice == null ? void 0 : choice.text) != null) {
|
|
1241
2169
|
controller.enqueue({
|
|
1242
2170
|
type: "text-delta",
|
|
1243
|
-
|
|
2171
|
+
delta: choice.text,
|
|
2172
|
+
id: generateId()
|
|
1244
2173
|
});
|
|
1245
2174
|
}
|
|
1246
|
-
const mappedLogprobs = mapLLMGatewayCompletionLogprobs(
|
|
1247
|
-
choice == null ? void 0 : choice.logprobs
|
|
1248
|
-
);
|
|
1249
|
-
if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
|
|
1250
|
-
if (logprobs === void 0) {
|
|
1251
|
-
logprobs = [];
|
|
1252
|
-
}
|
|
1253
|
-
logprobs.push(...mappedLogprobs);
|
|
1254
|
-
}
|
|
1255
2175
|
},
|
|
1256
2176
|
flush(controller) {
|
|
1257
2177
|
controller.enqueue({
|
|
1258
2178
|
type: "finish",
|
|
1259
2179
|
finishReason,
|
|
1260
|
-
|
|
1261
|
-
|
|
2180
|
+
usage,
|
|
2181
|
+
providerMetadata: {
|
|
2182
|
+
llmgateway: {
|
|
2183
|
+
usage: llmgatewayUsage
|
|
2184
|
+
}
|
|
2185
|
+
}
|
|
1262
2186
|
});
|
|
1263
2187
|
}
|
|
1264
2188
|
})
|
|
1265
2189
|
),
|
|
1266
|
-
|
|
1267
|
-
|
|
1268
|
-
|
|
2190
|
+
response: {
|
|
2191
|
+
headers: responseHeaders
|
|
2192
|
+
}
|
|
1269
2193
|
};
|
|
1270
2194
|
}
|
|
1271
2195
|
};
|
|
1272
|
-
var LLMGatewayCompletionChunkSchema = import_zod4.z.union([
|
|
1273
|
-
import_zod4.z.object({
|
|
1274
|
-
id: import_zod4.z.string().optional(),
|
|
1275
|
-
model: import_zod4.z.string().optional(),
|
|
1276
|
-
choices: import_zod4.z.array(
|
|
1277
|
-
import_zod4.z.object({
|
|
1278
|
-
text: import_zod4.z.string(),
|
|
1279
|
-
reasoning: import_zod4.z.string().nullish().optional(),
|
|
1280
|
-
reasoning_details: ReasoningDetailArraySchema.nullish(),
|
|
1281
|
-
finish_reason: import_zod4.z.string().nullish(),
|
|
1282
|
-
index: import_zod4.z.number(),
|
|
1283
|
-
logprobs: import_zod4.z.object({
|
|
1284
|
-
tokens: import_zod4.z.array(import_zod4.z.string()),
|
|
1285
|
-
token_logprobs: import_zod4.z.array(import_zod4.z.number()),
|
|
1286
|
-
top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
|
|
1287
|
-
}).nullable().optional()
|
|
1288
|
-
})
|
|
1289
|
-
),
|
|
1290
|
-
usage: import_zod4.z.object({
|
|
1291
|
-
prompt_tokens: import_zod4.z.number(),
|
|
1292
|
-
completion_tokens: import_zod4.z.number()
|
|
1293
|
-
}).optional().nullable()
|
|
1294
|
-
}),
|
|
1295
|
-
LLMGatewayErrorResponseSchema
|
|
1296
|
-
]);
|
|
1297
2196
|
|
|
1298
|
-
// src/
|
|
2197
|
+
// src/facade.ts
|
|
1299
2198
|
var LLMGateway = class {
|
|
1300
2199
|
/**
|
|
1301
2200
|
* Creates a new LLMGateway provider instance.
|
|
1302
2201
|
*/
|
|
1303
2202
|
constructor(options = {}) {
|
|
1304
|
-
var
|
|
1305
|
-
this.baseURL = (_b =
|
|
2203
|
+
var _a15, _b;
|
|
2204
|
+
this.baseURL = (_b = withoutTrailingSlash((_a15 = options.baseURL) != null ? _a15 : options.baseUrl)) != null ? _b : "https://api.llmgateway.io/v1";
|
|
1306
2205
|
this.apiKey = options.apiKey;
|
|
1307
2206
|
this.headers = options.headers;
|
|
1308
2207
|
}
|
|
@@ -1310,9 +2209,9 @@ var LLMGateway = class {
|
|
|
1310
2209
|
return {
|
|
1311
2210
|
baseURL: this.baseURL,
|
|
1312
2211
|
headers: () => __spreadValues({
|
|
1313
|
-
Authorization: `Bearer ${
|
|
2212
|
+
Authorization: `Bearer ${loadApiKey({
|
|
1314
2213
|
apiKey: this.apiKey,
|
|
1315
|
-
environmentVariableName: "
|
|
2214
|
+
environmentVariableName: "LLM_GATEWAY_API_KEY",
|
|
1316
2215
|
description: "LLMGateway"
|
|
1317
2216
|
})}`
|
|
1318
2217
|
}, this.headers)
|
|
@@ -1336,16 +2235,15 @@ var LLMGateway = class {
|
|
|
1336
2235
|
}
|
|
1337
2236
|
};
|
|
1338
2237
|
|
|
1339
|
-
// src/
|
|
1340
|
-
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
|
2238
|
+
// src/provider.ts
|
|
1341
2239
|
function createLLMGateway(options = {}) {
|
|
1342
|
-
var
|
|
1343
|
-
const baseURL = (_b =
|
|
2240
|
+
var _a15, _b, _c;
|
|
2241
|
+
const baseURL = (_b = withoutTrailingSlash((_a15 = options.baseURL) != null ? _a15 : options.baseUrl)) != null ? _b : "https://api.llmgateway.io/v1";
|
|
1344
2242
|
const compatibility = (_c = options.compatibility) != null ? _c : "compatible";
|
|
1345
2243
|
const getHeaders = () => __spreadValues({
|
|
1346
|
-
Authorization: `Bearer ${
|
|
2244
|
+
Authorization: `Bearer ${loadApiKey({
|
|
1347
2245
|
apiKey: options.apiKey,
|
|
1348
|
-
environmentVariableName: "
|
|
2246
|
+
environmentVariableName: "LLM_GATEWAY_API_KEY",
|
|
1349
2247
|
description: "LLMGateway"
|
|
1350
2248
|
})}`
|
|
1351
2249
|
}, options.headers);
|