@llmgateway/ai-sdk-provider 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +201 -0
- package/README.md +187 -0
- package/dist/index.cjs +1398 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +263 -0
- package/dist/index.d.ts +263 -0
- package/dist/index.js +1390 -0
- package/dist/index.js.map +1 -0
- package/dist/internal/index.cjs +1298 -0
- package/dist/internal/index.cjs.map +1 -0
- package/dist/internal/index.d.cts +184 -0
- package/dist/internal/index.d.ts +184 -0
- package/dist/internal/index.js +1291 -0
- package/dist/internal/index.js.map +1 -0
- package/package.json +82 -0
package/dist/index.cjs
ADDED
|
@@ -0,0 +1,1398 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __defProps = Object.defineProperties;
|
|
4
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
5
|
+
var __getOwnPropDescs = Object.getOwnPropertyDescriptors;
|
|
6
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
7
|
+
var __getOwnPropSymbols = Object.getOwnPropertySymbols;
|
|
8
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
9
|
+
var __propIsEnum = Object.prototype.propertyIsEnumerable;
|
|
10
|
+
var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
|
|
11
|
+
var __spreadValues = (a, b) => {
|
|
12
|
+
for (var prop in b || (b = {}))
|
|
13
|
+
if (__hasOwnProp.call(b, prop))
|
|
14
|
+
__defNormalProp(a, prop, b[prop]);
|
|
15
|
+
if (__getOwnPropSymbols)
|
|
16
|
+
for (var prop of __getOwnPropSymbols(b)) {
|
|
17
|
+
if (__propIsEnum.call(b, prop))
|
|
18
|
+
__defNormalProp(a, prop, b[prop]);
|
|
19
|
+
}
|
|
20
|
+
return a;
|
|
21
|
+
};
|
|
22
|
+
var __spreadProps = (a, b) => __defProps(a, __getOwnPropDescs(b));
|
|
23
|
+
var __objRest = (source, exclude) => {
|
|
24
|
+
var target = {};
|
|
25
|
+
for (var prop in source)
|
|
26
|
+
if (__hasOwnProp.call(source, prop) && exclude.indexOf(prop) < 0)
|
|
27
|
+
target[prop] = source[prop];
|
|
28
|
+
if (source != null && __getOwnPropSymbols)
|
|
29
|
+
for (var prop of __getOwnPropSymbols(source)) {
|
|
30
|
+
if (exclude.indexOf(prop) < 0 && __propIsEnum.call(source, prop))
|
|
31
|
+
target[prop] = source[prop];
|
|
32
|
+
}
|
|
33
|
+
return target;
|
|
34
|
+
};
|
|
35
|
+
var __export = (target, all) => {
|
|
36
|
+
for (var name in all)
|
|
37
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
38
|
+
};
|
|
39
|
+
var __copyProps = (to, from, except, desc) => {
|
|
40
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
41
|
+
for (let key of __getOwnPropNames(from))
|
|
42
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
43
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
44
|
+
}
|
|
45
|
+
return to;
|
|
46
|
+
};
|
|
47
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
48
|
+
|
|
49
|
+
// src/index.ts
|
|
50
|
+
var index_exports = {};
|
|
51
|
+
__export(index_exports, {
|
|
52
|
+
LLMGateway: () => LLMGateway,
|
|
53
|
+
createLLMGateway: () => createLLMGateway,
|
|
54
|
+
llmgateway: () => llmgateway
|
|
55
|
+
});
|
|
56
|
+
module.exports = __toCommonJS(index_exports);
|
|
57
|
+
|
|
58
|
+
// src/llmgateway-facade.ts
|
|
59
|
+
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
|
60
|
+
|
|
61
|
+
// src/schemas/reasoning-details.ts
|
|
62
|
+
var import_zod = require("zod");
|
|
63
|
+
var ReasoningDetailSummarySchema = import_zod.z.object({
|
|
64
|
+
type: import_zod.z.literal("reasoning.summary" /* Summary */),
|
|
65
|
+
summary: import_zod.z.string()
|
|
66
|
+
});
|
|
67
|
+
var ReasoningDetailEncryptedSchema = import_zod.z.object({
|
|
68
|
+
type: import_zod.z.literal("reasoning.encrypted" /* Encrypted */),
|
|
69
|
+
data: import_zod.z.string()
|
|
70
|
+
});
|
|
71
|
+
var ReasoningDetailTextSchema = import_zod.z.object({
|
|
72
|
+
type: import_zod.z.literal("reasoning.text" /* Text */),
|
|
73
|
+
text: import_zod.z.string().nullish(),
|
|
74
|
+
signature: import_zod.z.string().nullish()
|
|
75
|
+
});
|
|
76
|
+
var ReasoningDetailUnionSchema = import_zod.z.union([
|
|
77
|
+
ReasoningDetailSummarySchema,
|
|
78
|
+
ReasoningDetailEncryptedSchema,
|
|
79
|
+
ReasoningDetailTextSchema
|
|
80
|
+
]);
|
|
81
|
+
var ReasoningDetailsWithUnknownSchema = import_zod.z.union([
|
|
82
|
+
ReasoningDetailUnionSchema,
|
|
83
|
+
import_zod.z.unknown().transform(() => null)
|
|
84
|
+
]);
|
|
85
|
+
var ReasoningDetailArraySchema = import_zod.z.array(ReasoningDetailsWithUnknownSchema).transform((d) => d.filter((d2) => !!d2));
|
|
86
|
+
|
|
87
|
+
// src/llmgateway-chat-language-model.ts
|
|
88
|
+
var import_provider = require("@ai-sdk/provider");
|
|
89
|
+
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
|
90
|
+
var import_zod3 = require("zod");
|
|
91
|
+
|
|
92
|
+
// src/convert-to-llmgateway-chat-messages.ts
|
|
93
|
+
var import_provider_utils = require("@ai-sdk/provider-utils");
|
|
94
|
+
function getCacheControl(providerMetadata) {
|
|
95
|
+
var _a, _b, _c;
|
|
96
|
+
const anthropic = providerMetadata == null ? void 0 : providerMetadata.anthropic;
|
|
97
|
+
const llmgateway2 = providerMetadata == null ? void 0 : providerMetadata.llmgateway;
|
|
98
|
+
return (_c = (_b = (_a = llmgateway2 == null ? void 0 : llmgateway2.cacheControl) != null ? _a : llmgateway2 == null ? void 0 : llmgateway2.cache_control) != null ? _b : anthropic == null ? void 0 : anthropic.cacheControl) != null ? _c : anthropic == null ? void 0 : anthropic.cache_control;
|
|
99
|
+
}
|
|
100
|
+
function convertToLLMGatewayChatMessages(prompt) {
|
|
101
|
+
var _a, _b, _c;
|
|
102
|
+
const messages = [];
|
|
103
|
+
for (const { role, content, providerMetadata } of prompt) {
|
|
104
|
+
switch (role) {
|
|
105
|
+
case "system": {
|
|
106
|
+
messages.push({
|
|
107
|
+
role: "system",
|
|
108
|
+
content,
|
|
109
|
+
cache_control: getCacheControl(providerMetadata)
|
|
110
|
+
});
|
|
111
|
+
break;
|
|
112
|
+
}
|
|
113
|
+
case "user": {
|
|
114
|
+
if (content.length === 1 && ((_a = content[0]) == null ? void 0 : _a.type) === "text") {
|
|
115
|
+
messages.push({
|
|
116
|
+
role: "user",
|
|
117
|
+
content: content[0].text,
|
|
118
|
+
cache_control: (_b = getCacheControl(providerMetadata)) != null ? _b : getCacheControl(content[0].providerMetadata)
|
|
119
|
+
});
|
|
120
|
+
break;
|
|
121
|
+
}
|
|
122
|
+
const messageCacheControl = getCacheControl(providerMetadata);
|
|
123
|
+
const contentParts = content.map(
|
|
124
|
+
(part) => {
|
|
125
|
+
var _a2, _b2, _c2, _d;
|
|
126
|
+
const cacheControl = (_a2 = getCacheControl(part.providerMetadata)) != null ? _a2 : messageCacheControl;
|
|
127
|
+
switch (part.type) {
|
|
128
|
+
case "text":
|
|
129
|
+
return {
|
|
130
|
+
type: "text",
|
|
131
|
+
text: part.text,
|
|
132
|
+
// For text parts, only use part-specific cache control
|
|
133
|
+
cache_control: cacheControl
|
|
134
|
+
};
|
|
135
|
+
case "image":
|
|
136
|
+
return {
|
|
137
|
+
type: "image_url",
|
|
138
|
+
image_url: {
|
|
139
|
+
url: part.image instanceof URL ? part.image.toString() : `data:${(_b2 = part.mimeType) != null ? _b2 : "image/jpeg"};base64,${(0, import_provider_utils.convertUint8ArrayToBase64)(
|
|
140
|
+
part.image
|
|
141
|
+
)}`
|
|
142
|
+
},
|
|
143
|
+
// For image parts, use part-specific or message-level cache control
|
|
144
|
+
cache_control: cacheControl
|
|
145
|
+
};
|
|
146
|
+
case "file":
|
|
147
|
+
return {
|
|
148
|
+
type: "file",
|
|
149
|
+
file: {
|
|
150
|
+
filename: String(
|
|
151
|
+
(_d = (_c2 = part.providerMetadata) == null ? void 0 : _c2.llmgateway) == null ? void 0 : _d.filename
|
|
152
|
+
),
|
|
153
|
+
file_data: part.data instanceof Uint8Array ? `data:${part.mimeType};base64,${(0, import_provider_utils.convertUint8ArrayToBase64)(part.data)}` : `data:${part.mimeType};base64,${part.data}`
|
|
154
|
+
},
|
|
155
|
+
cache_control: cacheControl
|
|
156
|
+
};
|
|
157
|
+
default: {
|
|
158
|
+
const _exhaustiveCheck = part;
|
|
159
|
+
throw new Error(
|
|
160
|
+
`Unsupported content part type: ${_exhaustiveCheck}`
|
|
161
|
+
);
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
);
|
|
166
|
+
messages.push({
|
|
167
|
+
role: "user",
|
|
168
|
+
content: contentParts
|
|
169
|
+
});
|
|
170
|
+
break;
|
|
171
|
+
}
|
|
172
|
+
case "assistant": {
|
|
173
|
+
let text = "";
|
|
174
|
+
let reasoning = "";
|
|
175
|
+
const reasoningDetails = [];
|
|
176
|
+
const toolCalls = [];
|
|
177
|
+
for (const part of content) {
|
|
178
|
+
switch (part.type) {
|
|
179
|
+
case "text": {
|
|
180
|
+
text += part.text;
|
|
181
|
+
break;
|
|
182
|
+
}
|
|
183
|
+
case "tool-call": {
|
|
184
|
+
toolCalls.push({
|
|
185
|
+
id: part.toolCallId,
|
|
186
|
+
type: "function",
|
|
187
|
+
function: {
|
|
188
|
+
name: part.toolName,
|
|
189
|
+
arguments: JSON.stringify(part.args)
|
|
190
|
+
}
|
|
191
|
+
});
|
|
192
|
+
break;
|
|
193
|
+
}
|
|
194
|
+
case "reasoning": {
|
|
195
|
+
reasoning += part.text;
|
|
196
|
+
reasoningDetails.push({
|
|
197
|
+
type: "reasoning.text" /* Text */,
|
|
198
|
+
text: part.text,
|
|
199
|
+
signature: part.signature
|
|
200
|
+
});
|
|
201
|
+
break;
|
|
202
|
+
}
|
|
203
|
+
case "redacted-reasoning": {
|
|
204
|
+
reasoningDetails.push({
|
|
205
|
+
type: "reasoning.encrypted" /* Encrypted */,
|
|
206
|
+
data: part.data
|
|
207
|
+
});
|
|
208
|
+
break;
|
|
209
|
+
}
|
|
210
|
+
case "file":
|
|
211
|
+
break;
|
|
212
|
+
default: {
|
|
213
|
+
const _exhaustiveCheck = part;
|
|
214
|
+
throw new Error(`Unsupported part: ${_exhaustiveCheck}`);
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
}
|
|
218
|
+
messages.push({
|
|
219
|
+
role: "assistant",
|
|
220
|
+
content: text,
|
|
221
|
+
tool_calls: toolCalls.length > 0 ? toolCalls : void 0,
|
|
222
|
+
reasoning: reasoning || void 0,
|
|
223
|
+
reasoning_details: reasoningDetails.length > 0 ? reasoningDetails : void 0,
|
|
224
|
+
cache_control: getCacheControl(providerMetadata)
|
|
225
|
+
});
|
|
226
|
+
break;
|
|
227
|
+
}
|
|
228
|
+
case "tool": {
|
|
229
|
+
for (const toolResponse of content) {
|
|
230
|
+
messages.push({
|
|
231
|
+
role: "tool",
|
|
232
|
+
tool_call_id: toolResponse.toolCallId,
|
|
233
|
+
content: JSON.stringify(toolResponse.result),
|
|
234
|
+
cache_control: (_c = getCacheControl(providerMetadata)) != null ? _c : getCacheControl(toolResponse.providerMetadata)
|
|
235
|
+
});
|
|
236
|
+
}
|
|
237
|
+
break;
|
|
238
|
+
}
|
|
239
|
+
default: {
|
|
240
|
+
const _exhaustiveCheck = role;
|
|
241
|
+
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
|
242
|
+
}
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
return messages;
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
// src/map-llmgateway-chat-logprobs.ts
|
|
249
|
+
function mapLLMGatewayChatLogProbsOutput(logprobs) {
|
|
250
|
+
var _a, _b;
|
|
251
|
+
return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
|
|
252
|
+
token,
|
|
253
|
+
logprob,
|
|
254
|
+
topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
|
|
255
|
+
token: token2,
|
|
256
|
+
logprob: logprob2
|
|
257
|
+
})) : []
|
|
258
|
+
}))) != null ? _b : void 0;
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
// src/map-llmgateway-finish-reason.ts
|
|
262
|
+
function mapLLMGatewayFinishReason(finishReason) {
|
|
263
|
+
switch (finishReason) {
|
|
264
|
+
case "stop":
|
|
265
|
+
return "stop";
|
|
266
|
+
case "length":
|
|
267
|
+
return "length";
|
|
268
|
+
case "content_filter":
|
|
269
|
+
return "content-filter";
|
|
270
|
+
case "function_call":
|
|
271
|
+
case "tool_calls":
|
|
272
|
+
return "tool-calls";
|
|
273
|
+
default:
|
|
274
|
+
return "unknown";
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
// src/llmgateway-error.ts
|
|
279
|
+
var import_provider_utils2 = require("@ai-sdk/provider-utils");
|
|
280
|
+
var import_zod2 = require("zod");
|
|
281
|
+
var LLMGatewayErrorResponseSchema = import_zod2.z.object({
|
|
282
|
+
error: import_zod2.z.object({
|
|
283
|
+
message: import_zod2.z.string(),
|
|
284
|
+
type: import_zod2.z.string(),
|
|
285
|
+
param: import_zod2.z.any().nullable(),
|
|
286
|
+
code: import_zod2.z.string().nullable()
|
|
287
|
+
})
|
|
288
|
+
});
|
|
289
|
+
var llmgatewayFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
|
|
290
|
+
errorSchema: LLMGatewayErrorResponseSchema,
|
|
291
|
+
errorToMessage: (data) => data.error.message
|
|
292
|
+
});
|
|
293
|
+
|
|
294
|
+
// src/llmgateway-chat-language-model.ts
|
|
295
|
+
function isFunctionTool(tool) {
|
|
296
|
+
return "parameters" in tool;
|
|
297
|
+
}
|
|
298
|
+
var LLMGatewayChatLanguageModel = class {
|
|
299
|
+
constructor(modelId, settings, config) {
|
|
300
|
+
this.specificationVersion = "v1";
|
|
301
|
+
this.defaultObjectGenerationMode = "tool";
|
|
302
|
+
this.modelId = modelId;
|
|
303
|
+
this.settings = settings;
|
|
304
|
+
this.config = config;
|
|
305
|
+
}
|
|
306
|
+
get provider() {
|
|
307
|
+
return this.config.provider;
|
|
308
|
+
}
|
|
309
|
+
getArgs({
|
|
310
|
+
mode,
|
|
311
|
+
prompt,
|
|
312
|
+
maxTokens,
|
|
313
|
+
temperature,
|
|
314
|
+
topP,
|
|
315
|
+
frequencyPenalty,
|
|
316
|
+
presencePenalty,
|
|
317
|
+
seed,
|
|
318
|
+
stopSequences,
|
|
319
|
+
responseFormat,
|
|
320
|
+
topK,
|
|
321
|
+
providerMetadata
|
|
322
|
+
}) {
|
|
323
|
+
var _a;
|
|
324
|
+
const type = mode.type;
|
|
325
|
+
const extraCallingBody = (_a = providerMetadata == null ? void 0 : providerMetadata.llmgateway) != null ? _a : {};
|
|
326
|
+
const baseArgs = __spreadValues(__spreadValues(__spreadValues({
|
|
327
|
+
// model id:
|
|
328
|
+
model: this.modelId,
|
|
329
|
+
models: this.settings.models,
|
|
330
|
+
// model specific settings:
|
|
331
|
+
logit_bias: this.settings.logitBias,
|
|
332
|
+
logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
|
|
333
|
+
top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
|
|
334
|
+
user: this.settings.user,
|
|
335
|
+
parallel_tool_calls: this.settings.parallelToolCalls,
|
|
336
|
+
// standardized settings:
|
|
337
|
+
max_tokens: maxTokens,
|
|
338
|
+
temperature,
|
|
339
|
+
top_p: topP,
|
|
340
|
+
frequency_penalty: frequencyPenalty,
|
|
341
|
+
presence_penalty: presencePenalty,
|
|
342
|
+
seed,
|
|
343
|
+
stop: stopSequences,
|
|
344
|
+
response_format: responseFormat,
|
|
345
|
+
top_k: topK,
|
|
346
|
+
// messages:
|
|
347
|
+
messages: convertToLLMGatewayChatMessages(prompt),
|
|
348
|
+
// LLMGateway specific settings:
|
|
349
|
+
include_reasoning: this.settings.includeReasoning,
|
|
350
|
+
reasoning: this.settings.reasoning,
|
|
351
|
+
usage: this.settings.usage
|
|
352
|
+
}, this.config.extraBody), this.settings.extraBody), extraCallingBody);
|
|
353
|
+
switch (type) {
|
|
354
|
+
case "regular": {
|
|
355
|
+
return __spreadValues(__spreadValues({}, baseArgs), prepareToolsAndToolChoice(mode));
|
|
356
|
+
}
|
|
357
|
+
case "object-json": {
|
|
358
|
+
return __spreadProps(__spreadValues({}, baseArgs), {
|
|
359
|
+
response_format: { type: "json_object" }
|
|
360
|
+
});
|
|
361
|
+
}
|
|
362
|
+
case "object-tool": {
|
|
363
|
+
return __spreadProps(__spreadValues({}, baseArgs), {
|
|
364
|
+
tool_choice: { type: "function", function: { name: mode.tool.name } },
|
|
365
|
+
tools: [
|
|
366
|
+
{
|
|
367
|
+
type: "function",
|
|
368
|
+
function: {
|
|
369
|
+
name: mode.tool.name,
|
|
370
|
+
description: mode.tool.description,
|
|
371
|
+
parameters: mode.tool.parameters
|
|
372
|
+
}
|
|
373
|
+
}
|
|
374
|
+
]
|
|
375
|
+
});
|
|
376
|
+
}
|
|
377
|
+
// Handle all non-text types with a single default case
|
|
378
|
+
default: {
|
|
379
|
+
const _exhaustiveCheck = type;
|
|
380
|
+
throw new import_provider.UnsupportedFunctionalityError({
|
|
381
|
+
functionality: `${_exhaustiveCheck} mode`
|
|
382
|
+
});
|
|
383
|
+
}
|
|
384
|
+
}
|
|
385
|
+
}
|
|
386
|
+
async doGenerate(options) {
|
|
387
|
+
var _b, _c, _d, _e, _f, _g, _h, _i, _j;
|
|
388
|
+
const args = this.getArgs(options);
|
|
389
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
|
|
390
|
+
url: this.config.url({
|
|
391
|
+
path: "/chat/completions",
|
|
392
|
+
modelId: this.modelId
|
|
393
|
+
}),
|
|
394
|
+
headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
|
|
395
|
+
body: args,
|
|
396
|
+
failedResponseHandler: llmgatewayFailedResponseHandler,
|
|
397
|
+
successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
|
|
398
|
+
LLMGatewayNonStreamChatCompletionResponseSchema
|
|
399
|
+
),
|
|
400
|
+
abortSignal: options.abortSignal,
|
|
401
|
+
fetch: this.config.fetch
|
|
402
|
+
});
|
|
403
|
+
const _a = args, { messages: rawPrompt } = _a, rawSettings = __objRest(_a, ["messages"]);
|
|
404
|
+
const choice = response.choices[0];
|
|
405
|
+
if (!choice) {
|
|
406
|
+
throw new Error("No choice in response");
|
|
407
|
+
}
|
|
408
|
+
const usageInfo = response.usage ? {
|
|
409
|
+
promptTokens: (_b = response.usage.prompt_tokens) != null ? _b : 0,
|
|
410
|
+
completionTokens: (_c = response.usage.completion_tokens) != null ? _c : 0
|
|
411
|
+
} : {
|
|
412
|
+
promptTokens: 0,
|
|
413
|
+
completionTokens: 0
|
|
414
|
+
};
|
|
415
|
+
const providerMetadata = {};
|
|
416
|
+
if (response.usage && ((_d = this.settings.usage) == null ? void 0 : _d.include)) {
|
|
417
|
+
providerMetadata.llmgateway = {
|
|
418
|
+
usage: {
|
|
419
|
+
promptTokens: response.usage.prompt_tokens,
|
|
420
|
+
promptTokensDetails: response.usage.prompt_tokens_details ? {
|
|
421
|
+
cachedTokens: (_e = response.usage.prompt_tokens_details.cached_tokens) != null ? _e : 0
|
|
422
|
+
} : void 0,
|
|
423
|
+
completionTokens: response.usage.completion_tokens,
|
|
424
|
+
completionTokensDetails: response.usage.completion_tokens_details ? {
|
|
425
|
+
reasoningTokens: (_f = response.usage.completion_tokens_details.reasoning_tokens) != null ? _f : 0
|
|
426
|
+
} : void 0,
|
|
427
|
+
cost: response.usage.cost,
|
|
428
|
+
totalTokens: (_g = response.usage.total_tokens) != null ? _g : 0
|
|
429
|
+
}
|
|
430
|
+
};
|
|
431
|
+
}
|
|
432
|
+
const hasProviderMetadata = Object.keys(providerMetadata).length > 0;
|
|
433
|
+
const reasoningDetails = (_h = choice.message.reasoning_details) != null ? _h : [];
|
|
434
|
+
const reasoning = reasoningDetails.length > 0 ? reasoningDetails.map((detail) => {
|
|
435
|
+
var _a2;
|
|
436
|
+
switch (detail.type) {
|
|
437
|
+
case "reasoning.text" /* Text */: {
|
|
438
|
+
if (detail.text) {
|
|
439
|
+
return {
|
|
440
|
+
type: "text",
|
|
441
|
+
text: detail.text,
|
|
442
|
+
signature: (_a2 = detail.signature) != null ? _a2 : void 0
|
|
443
|
+
};
|
|
444
|
+
}
|
|
445
|
+
break;
|
|
446
|
+
}
|
|
447
|
+
case "reasoning.summary" /* Summary */: {
|
|
448
|
+
if (detail.summary) {
|
|
449
|
+
return {
|
|
450
|
+
type: "text",
|
|
451
|
+
text: detail.summary
|
|
452
|
+
};
|
|
453
|
+
}
|
|
454
|
+
break;
|
|
455
|
+
}
|
|
456
|
+
case "reasoning.encrypted" /* Encrypted */: {
|
|
457
|
+
if (detail.data) {
|
|
458
|
+
return {
|
|
459
|
+
type: "redacted",
|
|
460
|
+
data: detail.data
|
|
461
|
+
};
|
|
462
|
+
}
|
|
463
|
+
break;
|
|
464
|
+
}
|
|
465
|
+
default: {
|
|
466
|
+
detail;
|
|
467
|
+
}
|
|
468
|
+
}
|
|
469
|
+
return null;
|
|
470
|
+
}).filter((p) => p !== null) : choice.message.reasoning ? [
|
|
471
|
+
{
|
|
472
|
+
type: "text",
|
|
473
|
+
text: choice.message.reasoning
|
|
474
|
+
}
|
|
475
|
+
] : [];
|
|
476
|
+
return __spreadValues({
|
|
477
|
+
response: {
|
|
478
|
+
id: response.id,
|
|
479
|
+
modelId: response.model
|
|
480
|
+
},
|
|
481
|
+
text: (_i = choice.message.content) != null ? _i : void 0,
|
|
482
|
+
reasoning,
|
|
483
|
+
toolCalls: (_j = choice.message.tool_calls) == null ? void 0 : _j.map((toolCall) => {
|
|
484
|
+
var _a2;
|
|
485
|
+
return {
|
|
486
|
+
toolCallType: "function",
|
|
487
|
+
toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils3.generateId)(),
|
|
488
|
+
toolName: toolCall.function.name,
|
|
489
|
+
args: toolCall.function.arguments
|
|
490
|
+
};
|
|
491
|
+
}),
|
|
492
|
+
finishReason: mapLLMGatewayFinishReason(choice.finish_reason),
|
|
493
|
+
usage: usageInfo,
|
|
494
|
+
rawCall: { rawPrompt, rawSettings },
|
|
495
|
+
rawResponse: { headers: responseHeaders },
|
|
496
|
+
warnings: [],
|
|
497
|
+
logprobs: mapLLMGatewayChatLogProbsOutput(choice.logprobs)
|
|
498
|
+
}, hasProviderMetadata ? { providerMetadata } : {});
|
|
499
|
+
}
|
|
500
|
+
async doStream(options) {
|
|
501
|
+
var _a, _c;
|
|
502
|
+
const args = this.getArgs(options);
|
|
503
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
|
|
504
|
+
url: this.config.url({
|
|
505
|
+
path: "/chat/completions",
|
|
506
|
+
modelId: this.modelId
|
|
507
|
+
}),
|
|
508
|
+
headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
|
|
509
|
+
body: __spreadProps(__spreadValues({}, args), {
|
|
510
|
+
stream: true,
|
|
511
|
+
// only include stream_options when in strict compatibility mode:
|
|
512
|
+
stream_options: this.config.compatibility === "strict" ? __spreadValues({
|
|
513
|
+
include_usage: true
|
|
514
|
+
}, ((_a = this.settings.usage) == null ? void 0 : _a.include) ? { include_usage: true } : {}) : void 0
|
|
515
|
+
}),
|
|
516
|
+
failedResponseHandler: llmgatewayFailedResponseHandler,
|
|
517
|
+
successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(
|
|
518
|
+
LLMGatewayStreamChatCompletionChunkSchema
|
|
519
|
+
),
|
|
520
|
+
abortSignal: options.abortSignal,
|
|
521
|
+
fetch: this.config.fetch
|
|
522
|
+
});
|
|
523
|
+
const _b = args, { messages: rawPrompt } = _b, rawSettings = __objRest(_b, ["messages"]);
|
|
524
|
+
const toolCalls = [];
|
|
525
|
+
let finishReason = "other";
|
|
526
|
+
let usage = {
|
|
527
|
+
promptTokens: Number.NaN,
|
|
528
|
+
completionTokens: Number.NaN
|
|
529
|
+
};
|
|
530
|
+
let logprobs;
|
|
531
|
+
const llmgatewayUsage = {};
|
|
532
|
+
const shouldIncludeUsageAccounting = !!((_c = this.settings.usage) == null ? void 0 : _c.include);
|
|
533
|
+
return {
|
|
534
|
+
stream: response.pipeThrough(
|
|
535
|
+
new TransformStream({
|
|
536
|
+
transform(chunk, controller) {
|
|
537
|
+
var _a2, _b2, _c2, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
|
|
538
|
+
if (!chunk.success) {
|
|
539
|
+
finishReason = "error";
|
|
540
|
+
controller.enqueue({ type: "error", error: chunk.error });
|
|
541
|
+
return;
|
|
542
|
+
}
|
|
543
|
+
const value = chunk.value;
|
|
544
|
+
if ("error" in value) {
|
|
545
|
+
finishReason = "error";
|
|
546
|
+
controller.enqueue({ type: "error", error: value.error });
|
|
547
|
+
return;
|
|
548
|
+
}
|
|
549
|
+
if (value.id) {
|
|
550
|
+
controller.enqueue({
|
|
551
|
+
type: "response-metadata",
|
|
552
|
+
id: value.id
|
|
553
|
+
});
|
|
554
|
+
}
|
|
555
|
+
if (value.model) {
|
|
556
|
+
controller.enqueue({
|
|
557
|
+
type: "response-metadata",
|
|
558
|
+
modelId: value.model
|
|
559
|
+
});
|
|
560
|
+
}
|
|
561
|
+
if (value.usage != null) {
|
|
562
|
+
usage = {
|
|
563
|
+
promptTokens: value.usage.prompt_tokens,
|
|
564
|
+
completionTokens: value.usage.completion_tokens
|
|
565
|
+
};
|
|
566
|
+
llmgatewayUsage.promptTokens = value.usage.prompt_tokens;
|
|
567
|
+
if (value.usage.prompt_tokens_details) {
|
|
568
|
+
llmgatewayUsage.promptTokensDetails = {
|
|
569
|
+
cachedTokens: (_a2 = value.usage.prompt_tokens_details.cached_tokens) != null ? _a2 : 0
|
|
570
|
+
};
|
|
571
|
+
}
|
|
572
|
+
llmgatewayUsage.completionTokens = value.usage.completion_tokens;
|
|
573
|
+
if (value.usage.completion_tokens_details) {
|
|
574
|
+
llmgatewayUsage.completionTokensDetails = {
|
|
575
|
+
reasoningTokens: (_b2 = value.usage.completion_tokens_details.reasoning_tokens) != null ? _b2 : 0
|
|
576
|
+
};
|
|
577
|
+
}
|
|
578
|
+
llmgatewayUsage.cost = value.usage.cost;
|
|
579
|
+
llmgatewayUsage.totalTokens = value.usage.total_tokens;
|
|
580
|
+
}
|
|
581
|
+
const choice = value.choices[0];
|
|
582
|
+
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
583
|
+
finishReason = mapLLMGatewayFinishReason(choice.finish_reason);
|
|
584
|
+
}
|
|
585
|
+
if ((choice == null ? void 0 : choice.delta) == null) {
|
|
586
|
+
return;
|
|
587
|
+
}
|
|
588
|
+
const delta = choice.delta;
|
|
589
|
+
if (delta.content != null) {
|
|
590
|
+
controller.enqueue({
|
|
591
|
+
type: "text-delta",
|
|
592
|
+
textDelta: delta.content
|
|
593
|
+
});
|
|
594
|
+
}
|
|
595
|
+
if (delta.reasoning != null) {
|
|
596
|
+
controller.enqueue({
|
|
597
|
+
type: "reasoning",
|
|
598
|
+
textDelta: delta.reasoning
|
|
599
|
+
});
|
|
600
|
+
}
|
|
601
|
+
if (delta.reasoning_details && delta.reasoning_details.length > 0) {
|
|
602
|
+
for (const detail of delta.reasoning_details) {
|
|
603
|
+
switch (detail.type) {
|
|
604
|
+
case "reasoning.text" /* Text */: {
|
|
605
|
+
if (detail.text) {
|
|
606
|
+
controller.enqueue({
|
|
607
|
+
type: "reasoning",
|
|
608
|
+
textDelta: detail.text
|
|
609
|
+
});
|
|
610
|
+
}
|
|
611
|
+
if (detail.signature) {
|
|
612
|
+
controller.enqueue({
|
|
613
|
+
type: "reasoning-signature",
|
|
614
|
+
signature: detail.signature
|
|
615
|
+
});
|
|
616
|
+
}
|
|
617
|
+
break;
|
|
618
|
+
}
|
|
619
|
+
case "reasoning.encrypted" /* Encrypted */: {
|
|
620
|
+
if (detail.data) {
|
|
621
|
+
controller.enqueue({
|
|
622
|
+
type: "redacted-reasoning",
|
|
623
|
+
data: detail.data
|
|
624
|
+
});
|
|
625
|
+
}
|
|
626
|
+
break;
|
|
627
|
+
}
|
|
628
|
+
case "reasoning.summary" /* Summary */: {
|
|
629
|
+
if (detail.summary) {
|
|
630
|
+
controller.enqueue({
|
|
631
|
+
type: "reasoning",
|
|
632
|
+
textDelta: detail.summary
|
|
633
|
+
});
|
|
634
|
+
}
|
|
635
|
+
break;
|
|
636
|
+
}
|
|
637
|
+
default: {
|
|
638
|
+
detail;
|
|
639
|
+
break;
|
|
640
|
+
}
|
|
641
|
+
}
|
|
642
|
+
}
|
|
643
|
+
}
|
|
644
|
+
const mappedLogprobs = mapLLMGatewayChatLogProbsOutput(
|
|
645
|
+
choice == null ? void 0 : choice.logprobs
|
|
646
|
+
);
|
|
647
|
+
if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
|
|
648
|
+
if (logprobs === void 0) {
|
|
649
|
+
logprobs = [];
|
|
650
|
+
}
|
|
651
|
+
logprobs.push(...mappedLogprobs);
|
|
652
|
+
}
|
|
653
|
+
if (delta.tool_calls != null) {
|
|
654
|
+
for (const toolCallDelta of delta.tool_calls) {
|
|
655
|
+
const index = toolCallDelta.index;
|
|
656
|
+
if (toolCalls[index] == null) {
|
|
657
|
+
if (toolCallDelta.type !== "function") {
|
|
658
|
+
throw new import_provider.InvalidResponseDataError({
|
|
659
|
+
data: toolCallDelta,
|
|
660
|
+
message: `Expected 'function' type.`
|
|
661
|
+
});
|
|
662
|
+
}
|
|
663
|
+
if (toolCallDelta.id == null) {
|
|
664
|
+
throw new import_provider.InvalidResponseDataError({
|
|
665
|
+
data: toolCallDelta,
|
|
666
|
+
message: `Expected 'id' to be a string.`
|
|
667
|
+
});
|
|
668
|
+
}
|
|
669
|
+
if (((_c2 = toolCallDelta.function) == null ? void 0 : _c2.name) == null) {
|
|
670
|
+
throw new import_provider.InvalidResponseDataError({
|
|
671
|
+
data: toolCallDelta,
|
|
672
|
+
message: `Expected 'function.name' to be a string.`
|
|
673
|
+
});
|
|
674
|
+
}
|
|
675
|
+
toolCalls[index] = {
|
|
676
|
+
id: toolCallDelta.id,
|
|
677
|
+
type: "function",
|
|
678
|
+
function: {
|
|
679
|
+
name: toolCallDelta.function.name,
|
|
680
|
+
arguments: (_d = toolCallDelta.function.arguments) != null ? _d : ""
|
|
681
|
+
},
|
|
682
|
+
sent: false
|
|
683
|
+
};
|
|
684
|
+
const toolCall2 = toolCalls[index];
|
|
685
|
+
if (toolCall2 == null) {
|
|
686
|
+
throw new Error("Tool call is missing");
|
|
687
|
+
}
|
|
688
|
+
if (((_e = toolCall2.function) == null ? void 0 : _e.name) != null && ((_f = toolCall2.function) == null ? void 0 : _f.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall2.function.arguments)) {
|
|
689
|
+
controller.enqueue({
|
|
690
|
+
type: "tool-call-delta",
|
|
691
|
+
toolCallType: "function",
|
|
692
|
+
toolCallId: toolCall2.id,
|
|
693
|
+
toolName: toolCall2.function.name,
|
|
694
|
+
argsTextDelta: toolCall2.function.arguments
|
|
695
|
+
});
|
|
696
|
+
controller.enqueue({
|
|
697
|
+
type: "tool-call",
|
|
698
|
+
toolCallType: "function",
|
|
699
|
+
toolCallId: (_g = toolCall2.id) != null ? _g : (0, import_provider_utils3.generateId)(),
|
|
700
|
+
toolName: toolCall2.function.name,
|
|
701
|
+
args: toolCall2.function.arguments
|
|
702
|
+
});
|
|
703
|
+
toolCall2.sent = true;
|
|
704
|
+
}
|
|
705
|
+
continue;
|
|
706
|
+
}
|
|
707
|
+
const toolCall = toolCalls[index];
|
|
708
|
+
if (toolCall == null) {
|
|
709
|
+
throw new Error("Tool call is missing");
|
|
710
|
+
}
|
|
711
|
+
if (((_h = toolCallDelta.function) == null ? void 0 : _h.arguments) != null) {
|
|
712
|
+
toolCall.function.arguments += (_j = (_i = toolCallDelta.function) == null ? void 0 : _i.arguments) != null ? _j : "";
|
|
713
|
+
}
|
|
714
|
+
controller.enqueue({
|
|
715
|
+
type: "tool-call-delta",
|
|
716
|
+
toolCallType: "function",
|
|
717
|
+
toolCallId: toolCall.id,
|
|
718
|
+
toolName: toolCall.function.name,
|
|
719
|
+
argsTextDelta: (_k = toolCallDelta.function.arguments) != null ? _k : ""
|
|
720
|
+
});
|
|
721
|
+
if (((_l = toolCall.function) == null ? void 0 : _l.name) != null && ((_m = toolCall.function) == null ? void 0 : _m.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
|
|
722
|
+
controller.enqueue({
|
|
723
|
+
type: "tool-call",
|
|
724
|
+
toolCallType: "function",
|
|
725
|
+
toolCallId: (_n = toolCall.id) != null ? _n : (0, import_provider_utils3.generateId)(),
|
|
726
|
+
toolName: toolCall.function.name,
|
|
727
|
+
args: toolCall.function.arguments
|
|
728
|
+
});
|
|
729
|
+
toolCall.sent = true;
|
|
730
|
+
}
|
|
731
|
+
}
|
|
732
|
+
}
|
|
733
|
+
},
|
|
734
|
+
flush(controller) {
|
|
735
|
+
var _a2;
|
|
736
|
+
if (finishReason === "tool-calls") {
|
|
737
|
+
for (const toolCall of toolCalls) {
|
|
738
|
+
if (!toolCall.sent) {
|
|
739
|
+
controller.enqueue({
|
|
740
|
+
type: "tool-call",
|
|
741
|
+
toolCallType: "function",
|
|
742
|
+
toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils3.generateId)(),
|
|
743
|
+
toolName: toolCall.function.name,
|
|
744
|
+
// Coerce invalid arguments to an empty JSON object
|
|
745
|
+
args: (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments) ? toolCall.function.arguments : "{}"
|
|
746
|
+
});
|
|
747
|
+
toolCall.sent = true;
|
|
748
|
+
}
|
|
749
|
+
}
|
|
750
|
+
}
|
|
751
|
+
const providerMetadata = {};
|
|
752
|
+
if (shouldIncludeUsageAccounting && (llmgatewayUsage.totalTokens !== void 0 || llmgatewayUsage.cost !== void 0 || llmgatewayUsage.promptTokensDetails !== void 0 || llmgatewayUsage.completionTokensDetails !== void 0)) {
|
|
753
|
+
providerMetadata.llmgateway = {
|
|
754
|
+
usage: llmgatewayUsage
|
|
755
|
+
};
|
|
756
|
+
}
|
|
757
|
+
const hasProviderMetadata = Object.keys(providerMetadata).length > 0 && shouldIncludeUsageAccounting;
|
|
758
|
+
controller.enqueue(__spreadValues({
|
|
759
|
+
type: "finish",
|
|
760
|
+
finishReason,
|
|
761
|
+
logprobs,
|
|
762
|
+
usage
|
|
763
|
+
}, hasProviderMetadata ? { providerMetadata } : {}));
|
|
764
|
+
}
|
|
765
|
+
})
|
|
766
|
+
),
|
|
767
|
+
rawCall: { rawPrompt, rawSettings },
|
|
768
|
+
rawResponse: { headers: responseHeaders },
|
|
769
|
+
warnings: []
|
|
770
|
+
};
|
|
771
|
+
}
|
|
772
|
+
};
|
|
773
|
+
var LLMGatewayChatCompletionBaseResponseSchema = import_zod3.z.object({
|
|
774
|
+
id: import_zod3.z.string().optional(),
|
|
775
|
+
model: import_zod3.z.string().optional(),
|
|
776
|
+
usage: import_zod3.z.object({
|
|
777
|
+
prompt_tokens: import_zod3.z.number(),
|
|
778
|
+
prompt_tokens_details: import_zod3.z.object({
|
|
779
|
+
cached_tokens: import_zod3.z.number()
|
|
780
|
+
}).nullish(),
|
|
781
|
+
completion_tokens: import_zod3.z.number(),
|
|
782
|
+
completion_tokens_details: import_zod3.z.object({
|
|
783
|
+
reasoning_tokens: import_zod3.z.number()
|
|
784
|
+
}).nullish(),
|
|
785
|
+
total_tokens: import_zod3.z.number(),
|
|
786
|
+
cost: import_zod3.z.number().optional()
|
|
787
|
+
}).nullish()
|
|
788
|
+
});
|
|
789
|
+
var LLMGatewayNonStreamChatCompletionResponseSchema = LLMGatewayChatCompletionBaseResponseSchema.extend({
|
|
790
|
+
choices: import_zod3.z.array(
|
|
791
|
+
import_zod3.z.object({
|
|
792
|
+
message: import_zod3.z.object({
|
|
793
|
+
role: import_zod3.z.literal("assistant"),
|
|
794
|
+
content: import_zod3.z.string().nullable().optional(),
|
|
795
|
+
reasoning: import_zod3.z.string().nullable().optional(),
|
|
796
|
+
reasoning_details: ReasoningDetailArraySchema.nullish(),
|
|
797
|
+
tool_calls: import_zod3.z.array(
|
|
798
|
+
import_zod3.z.object({
|
|
799
|
+
id: import_zod3.z.string().optional().nullable(),
|
|
800
|
+
type: import_zod3.z.literal("function"),
|
|
801
|
+
function: import_zod3.z.object({
|
|
802
|
+
name: import_zod3.z.string(),
|
|
803
|
+
arguments: import_zod3.z.string()
|
|
804
|
+
})
|
|
805
|
+
})
|
|
806
|
+
).optional()
|
|
807
|
+
}),
|
|
808
|
+
index: import_zod3.z.number(),
|
|
809
|
+
logprobs: import_zod3.z.object({
|
|
810
|
+
content: import_zod3.z.array(
|
|
811
|
+
import_zod3.z.object({
|
|
812
|
+
token: import_zod3.z.string(),
|
|
813
|
+
logprob: import_zod3.z.number(),
|
|
814
|
+
top_logprobs: import_zod3.z.array(
|
|
815
|
+
import_zod3.z.object({
|
|
816
|
+
token: import_zod3.z.string(),
|
|
817
|
+
logprob: import_zod3.z.number()
|
|
818
|
+
})
|
|
819
|
+
)
|
|
820
|
+
})
|
|
821
|
+
).nullable()
|
|
822
|
+
}).nullable().optional(),
|
|
823
|
+
finish_reason: import_zod3.z.string().optional().nullable()
|
|
824
|
+
})
|
|
825
|
+
)
|
|
826
|
+
});
|
|
827
|
+
var LLMGatewayStreamChatCompletionChunkSchema = import_zod3.z.union([
|
|
828
|
+
LLMGatewayChatCompletionBaseResponseSchema.extend({
|
|
829
|
+
choices: import_zod3.z.array(
|
|
830
|
+
import_zod3.z.object({
|
|
831
|
+
delta: import_zod3.z.object({
|
|
832
|
+
role: import_zod3.z.enum(["assistant"]).optional(),
|
|
833
|
+
content: import_zod3.z.string().nullish(),
|
|
834
|
+
reasoning: import_zod3.z.string().nullish().optional(),
|
|
835
|
+
reasoning_details: ReasoningDetailArraySchema.nullish(),
|
|
836
|
+
tool_calls: import_zod3.z.array(
|
|
837
|
+
import_zod3.z.object({
|
|
838
|
+
index: import_zod3.z.number(),
|
|
839
|
+
id: import_zod3.z.string().nullish(),
|
|
840
|
+
type: import_zod3.z.literal("function").optional(),
|
|
841
|
+
function: import_zod3.z.object({
|
|
842
|
+
name: import_zod3.z.string().nullish(),
|
|
843
|
+
arguments: import_zod3.z.string().nullish()
|
|
844
|
+
})
|
|
845
|
+
})
|
|
846
|
+
).nullish()
|
|
847
|
+
}).nullish(),
|
|
848
|
+
logprobs: import_zod3.z.object({
|
|
849
|
+
content: import_zod3.z.array(
|
|
850
|
+
import_zod3.z.object({
|
|
851
|
+
token: import_zod3.z.string(),
|
|
852
|
+
logprob: import_zod3.z.number(),
|
|
853
|
+
top_logprobs: import_zod3.z.array(
|
|
854
|
+
import_zod3.z.object({
|
|
855
|
+
token: import_zod3.z.string(),
|
|
856
|
+
logprob: import_zod3.z.number()
|
|
857
|
+
})
|
|
858
|
+
)
|
|
859
|
+
})
|
|
860
|
+
).nullable()
|
|
861
|
+
}).nullish(),
|
|
862
|
+
finish_reason: import_zod3.z.string().nullable().optional(),
|
|
863
|
+
index: import_zod3.z.number()
|
|
864
|
+
})
|
|
865
|
+
)
|
|
866
|
+
}),
|
|
867
|
+
LLMGatewayErrorResponseSchema
|
|
868
|
+
]);
|
|
869
|
+
function prepareToolsAndToolChoice(mode) {
|
|
870
|
+
var _a;
|
|
871
|
+
const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
|
|
872
|
+
if (tools == null) {
|
|
873
|
+
return { tools: void 0, tool_choice: void 0 };
|
|
874
|
+
}
|
|
875
|
+
const mappedTools = tools.map((tool) => {
|
|
876
|
+
if (isFunctionTool(tool)) {
|
|
877
|
+
return {
|
|
878
|
+
type: "function",
|
|
879
|
+
function: {
|
|
880
|
+
name: tool.name,
|
|
881
|
+
description: tool.description,
|
|
882
|
+
parameters: tool.parameters
|
|
883
|
+
}
|
|
884
|
+
};
|
|
885
|
+
}
|
|
886
|
+
return {
|
|
887
|
+
type: "function",
|
|
888
|
+
function: {
|
|
889
|
+
name: tool.name
|
|
890
|
+
}
|
|
891
|
+
};
|
|
892
|
+
});
|
|
893
|
+
const toolChoice = mode.toolChoice;
|
|
894
|
+
if (toolChoice == null) {
|
|
895
|
+
return { tools: mappedTools, tool_choice: void 0 };
|
|
896
|
+
}
|
|
897
|
+
const type = toolChoice.type;
|
|
898
|
+
switch (type) {
|
|
899
|
+
case "auto":
|
|
900
|
+
case "none":
|
|
901
|
+
case "required":
|
|
902
|
+
return { tools: mappedTools, tool_choice: type };
|
|
903
|
+
case "tool":
|
|
904
|
+
return {
|
|
905
|
+
tools: mappedTools,
|
|
906
|
+
tool_choice: {
|
|
907
|
+
type: "function",
|
|
908
|
+
function: {
|
|
909
|
+
name: toolChoice.toolName
|
|
910
|
+
}
|
|
911
|
+
}
|
|
912
|
+
};
|
|
913
|
+
default: {
|
|
914
|
+
const _exhaustiveCheck = type;
|
|
915
|
+
throw new Error(`Unsupported tool choice type: ${_exhaustiveCheck}`);
|
|
916
|
+
}
|
|
917
|
+
}
|
|
918
|
+
}
|
|
919
|
+
|
|
920
|
+
// src/llmgateway-completion-language-model.ts
|
|
921
|
+
var import_provider3 = require("@ai-sdk/provider");
|
|
922
|
+
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
|
923
|
+
var import_zod4 = require("zod");
|
|
924
|
+
|
|
925
|
+
// src/convert-to-llmgateway-completion-prompt.ts
|
|
926
|
+
var import_provider2 = require("@ai-sdk/provider");
|
|
927
|
+
function convertToLLMGatewayCompletionPrompt({
|
|
928
|
+
prompt,
|
|
929
|
+
inputFormat,
|
|
930
|
+
user = "user",
|
|
931
|
+
assistant = "assistant"
|
|
932
|
+
}) {
|
|
933
|
+
if (inputFormat === "prompt" && prompt.length === 1 && prompt[0] && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0] && prompt[0].content[0].type === "text") {
|
|
934
|
+
return { prompt: prompt[0].content[0].text };
|
|
935
|
+
}
|
|
936
|
+
let text = "";
|
|
937
|
+
if (prompt[0] && prompt[0].role === "system") {
|
|
938
|
+
text += `${prompt[0].content}
|
|
939
|
+
|
|
940
|
+
`;
|
|
941
|
+
prompt = prompt.slice(1);
|
|
942
|
+
}
|
|
943
|
+
for (const { role, content } of prompt) {
|
|
944
|
+
switch (role) {
|
|
945
|
+
case "system": {
|
|
946
|
+
throw new import_provider2.InvalidPromptError({
|
|
947
|
+
message: "Unexpected system message in prompt: ${content}",
|
|
948
|
+
prompt
|
|
949
|
+
});
|
|
950
|
+
}
|
|
951
|
+
case "user": {
|
|
952
|
+
const userMessage = content.map((part) => {
|
|
953
|
+
switch (part.type) {
|
|
954
|
+
case "text": {
|
|
955
|
+
return part.text;
|
|
956
|
+
}
|
|
957
|
+
case "image": {
|
|
958
|
+
throw new import_provider2.UnsupportedFunctionalityError({
|
|
959
|
+
functionality: "images"
|
|
960
|
+
});
|
|
961
|
+
}
|
|
962
|
+
case "file": {
|
|
963
|
+
throw new import_provider2.UnsupportedFunctionalityError({
|
|
964
|
+
functionality: "file attachments"
|
|
965
|
+
});
|
|
966
|
+
}
|
|
967
|
+
default: {
|
|
968
|
+
const _exhaustiveCheck = part;
|
|
969
|
+
throw new Error(
|
|
970
|
+
`Unsupported content type: ${_exhaustiveCheck}`
|
|
971
|
+
);
|
|
972
|
+
}
|
|
973
|
+
}
|
|
974
|
+
}).join("");
|
|
975
|
+
text += `${user}:
|
|
976
|
+
${userMessage}
|
|
977
|
+
|
|
978
|
+
`;
|
|
979
|
+
break;
|
|
980
|
+
}
|
|
981
|
+
case "assistant": {
|
|
982
|
+
const assistantMessage = content.map((part) => {
|
|
983
|
+
switch (part.type) {
|
|
984
|
+
case "text": {
|
|
985
|
+
return part.text;
|
|
986
|
+
}
|
|
987
|
+
case "tool-call": {
|
|
988
|
+
throw new import_provider2.UnsupportedFunctionalityError({
|
|
989
|
+
functionality: "tool-call messages"
|
|
990
|
+
});
|
|
991
|
+
}
|
|
992
|
+
case "reasoning": {
|
|
993
|
+
throw new import_provider2.UnsupportedFunctionalityError({
|
|
994
|
+
functionality: "reasoning messages"
|
|
995
|
+
});
|
|
996
|
+
}
|
|
997
|
+
case "redacted-reasoning": {
|
|
998
|
+
throw new import_provider2.UnsupportedFunctionalityError({
|
|
999
|
+
functionality: "redacted reasoning messages"
|
|
1000
|
+
});
|
|
1001
|
+
}
|
|
1002
|
+
case "file": {
|
|
1003
|
+
throw new import_provider2.UnsupportedFunctionalityError({
|
|
1004
|
+
functionality: "file attachments"
|
|
1005
|
+
});
|
|
1006
|
+
}
|
|
1007
|
+
default: {
|
|
1008
|
+
const _exhaustiveCheck = part;
|
|
1009
|
+
throw new Error(
|
|
1010
|
+
`Unsupported content type: ${_exhaustiveCheck}`
|
|
1011
|
+
);
|
|
1012
|
+
}
|
|
1013
|
+
}
|
|
1014
|
+
}).join("");
|
|
1015
|
+
text += `${assistant}:
|
|
1016
|
+
${assistantMessage}
|
|
1017
|
+
|
|
1018
|
+
`;
|
|
1019
|
+
break;
|
|
1020
|
+
}
|
|
1021
|
+
case "tool": {
|
|
1022
|
+
throw new import_provider2.UnsupportedFunctionalityError({
|
|
1023
|
+
functionality: "tool messages"
|
|
1024
|
+
});
|
|
1025
|
+
}
|
|
1026
|
+
default: {
|
|
1027
|
+
const _exhaustiveCheck = role;
|
|
1028
|
+
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
|
1029
|
+
}
|
|
1030
|
+
}
|
|
1031
|
+
}
|
|
1032
|
+
text += `${assistant}:
|
|
1033
|
+
`;
|
|
1034
|
+
return {
|
|
1035
|
+
prompt: text
|
|
1036
|
+
};
|
|
1037
|
+
}
|
|
1038
|
+
|
|
1039
|
+
// src/map-llmgateway-completion-logprobs.ts
|
|
1040
|
+
function mapLLMGatewayCompletionLogprobs(logprobs) {
|
|
1041
|
+
return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => {
|
|
1042
|
+
var _a, _b;
|
|
1043
|
+
return {
|
|
1044
|
+
token,
|
|
1045
|
+
logprob: (_a = logprobs.token_logprobs[index]) != null ? _a : 0,
|
|
1046
|
+
topLogprobs: logprobs.top_logprobs ? Object.entries((_b = logprobs.top_logprobs[index]) != null ? _b : {}).map(
|
|
1047
|
+
([token2, logprob]) => ({
|
|
1048
|
+
token: token2,
|
|
1049
|
+
logprob
|
|
1050
|
+
})
|
|
1051
|
+
) : []
|
|
1052
|
+
};
|
|
1053
|
+
});
|
|
1054
|
+
}
|
|
1055
|
+
|
|
1056
|
+
// src/llmgateway-completion-language-model.ts
|
|
1057
|
+
var LLMGatewayCompletionLanguageModel = class {
|
|
1058
|
+
constructor(modelId, settings, config) {
|
|
1059
|
+
this.specificationVersion = "v1";
|
|
1060
|
+
this.defaultObjectGenerationMode = void 0;
|
|
1061
|
+
this.modelId = modelId;
|
|
1062
|
+
this.settings = settings;
|
|
1063
|
+
this.config = config;
|
|
1064
|
+
}
|
|
1065
|
+
get provider() {
|
|
1066
|
+
return this.config.provider;
|
|
1067
|
+
}
|
|
1068
|
+
getArgs({
|
|
1069
|
+
mode,
|
|
1070
|
+
inputFormat,
|
|
1071
|
+
prompt,
|
|
1072
|
+
maxTokens,
|
|
1073
|
+
temperature,
|
|
1074
|
+
topP,
|
|
1075
|
+
frequencyPenalty,
|
|
1076
|
+
presencePenalty,
|
|
1077
|
+
seed,
|
|
1078
|
+
responseFormat,
|
|
1079
|
+
topK,
|
|
1080
|
+
stopSequences,
|
|
1081
|
+
providerMetadata
|
|
1082
|
+
}) {
|
|
1083
|
+
var _a, _b;
|
|
1084
|
+
const type = mode.type;
|
|
1085
|
+
const extraCallingBody = (_a = providerMetadata == null ? void 0 : providerMetadata.llmgateway) != null ? _a : {};
|
|
1086
|
+
const { prompt: completionPrompt } = convertToLLMGatewayCompletionPrompt({
|
|
1087
|
+
prompt,
|
|
1088
|
+
inputFormat
|
|
1089
|
+
});
|
|
1090
|
+
const baseArgs = __spreadValues(__spreadValues(__spreadValues({
|
|
1091
|
+
// model id:
|
|
1092
|
+
model: this.modelId,
|
|
1093
|
+
models: this.settings.models,
|
|
1094
|
+
// model specific settings:
|
|
1095
|
+
logit_bias: this.settings.logitBias,
|
|
1096
|
+
logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
|
|
1097
|
+
suffix: this.settings.suffix,
|
|
1098
|
+
user: this.settings.user,
|
|
1099
|
+
// standardized settings:
|
|
1100
|
+
max_tokens: maxTokens,
|
|
1101
|
+
temperature,
|
|
1102
|
+
top_p: topP,
|
|
1103
|
+
frequency_penalty: frequencyPenalty,
|
|
1104
|
+
presence_penalty: presencePenalty,
|
|
1105
|
+
seed,
|
|
1106
|
+
stop: stopSequences,
|
|
1107
|
+
response_format: responseFormat,
|
|
1108
|
+
top_k: topK,
|
|
1109
|
+
// prompt:
|
|
1110
|
+
prompt: completionPrompt
|
|
1111
|
+
}, this.config.extraBody), this.settings.extraBody), extraCallingBody);
|
|
1112
|
+
switch (type) {
|
|
1113
|
+
case "regular": {
|
|
1114
|
+
if ((_b = mode.tools) == null ? void 0 : _b.length) {
|
|
1115
|
+
throw new import_provider3.UnsupportedFunctionalityError({
|
|
1116
|
+
functionality: "tools"
|
|
1117
|
+
});
|
|
1118
|
+
}
|
|
1119
|
+
if (mode.toolChoice) {
|
|
1120
|
+
throw new import_provider3.UnsupportedFunctionalityError({
|
|
1121
|
+
functionality: "toolChoice"
|
|
1122
|
+
});
|
|
1123
|
+
}
|
|
1124
|
+
return baseArgs;
|
|
1125
|
+
}
|
|
1126
|
+
case "object-json": {
|
|
1127
|
+
throw new import_provider3.UnsupportedFunctionalityError({
|
|
1128
|
+
functionality: "object-json mode"
|
|
1129
|
+
});
|
|
1130
|
+
}
|
|
1131
|
+
case "object-tool": {
|
|
1132
|
+
throw new import_provider3.UnsupportedFunctionalityError({
|
|
1133
|
+
functionality: "object-tool mode"
|
|
1134
|
+
});
|
|
1135
|
+
}
|
|
1136
|
+
// Handle all non-text types with a single default case
|
|
1137
|
+
default: {
|
|
1138
|
+
const _exhaustiveCheck = type;
|
|
1139
|
+
throw new import_provider3.UnsupportedFunctionalityError({
|
|
1140
|
+
functionality: `${_exhaustiveCheck} mode`
|
|
1141
|
+
});
|
|
1142
|
+
}
|
|
1143
|
+
}
|
|
1144
|
+
}
|
|
1145
|
+
async doGenerate(options) {
|
|
1146
|
+
var _b, _c, _d, _e, _f;
|
|
1147
|
+
const args = this.getArgs(options);
|
|
1148
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
|
|
1149
|
+
url: this.config.url({
|
|
1150
|
+
path: "/completions",
|
|
1151
|
+
modelId: this.modelId
|
|
1152
|
+
}),
|
|
1153
|
+
headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
|
|
1154
|
+
body: args,
|
|
1155
|
+
failedResponseHandler: llmgatewayFailedResponseHandler,
|
|
1156
|
+
successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
|
|
1157
|
+
LLMGatewayCompletionChunkSchema
|
|
1158
|
+
),
|
|
1159
|
+
abortSignal: options.abortSignal,
|
|
1160
|
+
fetch: this.config.fetch
|
|
1161
|
+
});
|
|
1162
|
+
const _a = args, { prompt: rawPrompt } = _a, rawSettings = __objRest(_a, ["prompt"]);
|
|
1163
|
+
if ("error" in response) {
|
|
1164
|
+
throw new Error(`${response.error.message}`);
|
|
1165
|
+
}
|
|
1166
|
+
const choice = response.choices[0];
|
|
1167
|
+
if (!choice) {
|
|
1168
|
+
throw new Error("No choice in LLMGateway completion response");
|
|
1169
|
+
}
|
|
1170
|
+
return {
|
|
1171
|
+
response: {
|
|
1172
|
+
id: response.id,
|
|
1173
|
+
modelId: response.model
|
|
1174
|
+
},
|
|
1175
|
+
text: (_b = choice.text) != null ? _b : "",
|
|
1176
|
+
reasoning: choice.reasoning || void 0,
|
|
1177
|
+
usage: {
|
|
1178
|
+
promptTokens: (_d = (_c = response.usage) == null ? void 0 : _c.prompt_tokens) != null ? _d : 0,
|
|
1179
|
+
completionTokens: (_f = (_e = response.usage) == null ? void 0 : _e.completion_tokens) != null ? _f : 0
|
|
1180
|
+
},
|
|
1181
|
+
finishReason: mapLLMGatewayFinishReason(choice.finish_reason),
|
|
1182
|
+
logprobs: mapLLMGatewayCompletionLogprobs(choice.logprobs),
|
|
1183
|
+
rawCall: { rawPrompt, rawSettings },
|
|
1184
|
+
rawResponse: { headers: responseHeaders },
|
|
1185
|
+
warnings: []
|
|
1186
|
+
};
|
|
1187
|
+
}
|
|
1188
|
+
async doStream(options) {
|
|
1189
|
+
const args = this.getArgs(options);
|
|
1190
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
|
|
1191
|
+
url: this.config.url({
|
|
1192
|
+
path: "/completions",
|
|
1193
|
+
modelId: this.modelId
|
|
1194
|
+
}),
|
|
1195
|
+
headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
|
|
1196
|
+
body: __spreadProps(__spreadValues({}, this.getArgs(options)), {
|
|
1197
|
+
stream: true,
|
|
1198
|
+
// only include stream_options when in strict compatibility mode:
|
|
1199
|
+
stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
|
|
1200
|
+
}),
|
|
1201
|
+
failedResponseHandler: llmgatewayFailedResponseHandler,
|
|
1202
|
+
successfulResponseHandler: (0, import_provider_utils4.createEventSourceResponseHandler)(
|
|
1203
|
+
LLMGatewayCompletionChunkSchema
|
|
1204
|
+
),
|
|
1205
|
+
abortSignal: options.abortSignal,
|
|
1206
|
+
fetch: this.config.fetch
|
|
1207
|
+
});
|
|
1208
|
+
const _a = args, { prompt: rawPrompt } = _a, rawSettings = __objRest(_a, ["prompt"]);
|
|
1209
|
+
let finishReason = "other";
|
|
1210
|
+
let usage = {
|
|
1211
|
+
promptTokens: Number.NaN,
|
|
1212
|
+
completionTokens: Number.NaN
|
|
1213
|
+
};
|
|
1214
|
+
let logprobs;
|
|
1215
|
+
return {
|
|
1216
|
+
stream: response.pipeThrough(
|
|
1217
|
+
new TransformStream({
|
|
1218
|
+
transform(chunk, controller) {
|
|
1219
|
+
if (!chunk.success) {
|
|
1220
|
+
finishReason = "error";
|
|
1221
|
+
controller.enqueue({ type: "error", error: chunk.error });
|
|
1222
|
+
return;
|
|
1223
|
+
}
|
|
1224
|
+
const value = chunk.value;
|
|
1225
|
+
if ("error" in value) {
|
|
1226
|
+
finishReason = "error";
|
|
1227
|
+
controller.enqueue({ type: "error", error: value.error });
|
|
1228
|
+
return;
|
|
1229
|
+
}
|
|
1230
|
+
if (value.usage != null) {
|
|
1231
|
+
usage = {
|
|
1232
|
+
promptTokens: value.usage.prompt_tokens,
|
|
1233
|
+
completionTokens: value.usage.completion_tokens
|
|
1234
|
+
};
|
|
1235
|
+
}
|
|
1236
|
+
const choice = value.choices[0];
|
|
1237
|
+
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
1238
|
+
finishReason = mapLLMGatewayFinishReason(choice.finish_reason);
|
|
1239
|
+
}
|
|
1240
|
+
if ((choice == null ? void 0 : choice.text) != null) {
|
|
1241
|
+
controller.enqueue({
|
|
1242
|
+
type: "text-delta",
|
|
1243
|
+
textDelta: choice.text
|
|
1244
|
+
});
|
|
1245
|
+
}
|
|
1246
|
+
const mappedLogprobs = mapLLMGatewayCompletionLogprobs(
|
|
1247
|
+
choice == null ? void 0 : choice.logprobs
|
|
1248
|
+
);
|
|
1249
|
+
if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
|
|
1250
|
+
if (logprobs === void 0) {
|
|
1251
|
+
logprobs = [];
|
|
1252
|
+
}
|
|
1253
|
+
logprobs.push(...mappedLogprobs);
|
|
1254
|
+
}
|
|
1255
|
+
},
|
|
1256
|
+
flush(controller) {
|
|
1257
|
+
controller.enqueue({
|
|
1258
|
+
type: "finish",
|
|
1259
|
+
finishReason,
|
|
1260
|
+
logprobs,
|
|
1261
|
+
usage
|
|
1262
|
+
});
|
|
1263
|
+
}
|
|
1264
|
+
})
|
|
1265
|
+
),
|
|
1266
|
+
rawCall: { rawPrompt, rawSettings },
|
|
1267
|
+
rawResponse: { headers: responseHeaders },
|
|
1268
|
+
warnings: []
|
|
1269
|
+
};
|
|
1270
|
+
}
|
|
1271
|
+
};
|
|
1272
|
+
var LLMGatewayCompletionChunkSchema = import_zod4.z.union([
|
|
1273
|
+
import_zod4.z.object({
|
|
1274
|
+
id: import_zod4.z.string().optional(),
|
|
1275
|
+
model: import_zod4.z.string().optional(),
|
|
1276
|
+
choices: import_zod4.z.array(
|
|
1277
|
+
import_zod4.z.object({
|
|
1278
|
+
text: import_zod4.z.string(),
|
|
1279
|
+
reasoning: import_zod4.z.string().nullish().optional(),
|
|
1280
|
+
reasoning_details: ReasoningDetailArraySchema.nullish(),
|
|
1281
|
+
finish_reason: import_zod4.z.string().nullish(),
|
|
1282
|
+
index: import_zod4.z.number(),
|
|
1283
|
+
logprobs: import_zod4.z.object({
|
|
1284
|
+
tokens: import_zod4.z.array(import_zod4.z.string()),
|
|
1285
|
+
token_logprobs: import_zod4.z.array(import_zod4.z.number()),
|
|
1286
|
+
top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
|
|
1287
|
+
}).nullable().optional()
|
|
1288
|
+
})
|
|
1289
|
+
),
|
|
1290
|
+
usage: import_zod4.z.object({
|
|
1291
|
+
prompt_tokens: import_zod4.z.number(),
|
|
1292
|
+
completion_tokens: import_zod4.z.number()
|
|
1293
|
+
}).optional().nullable()
|
|
1294
|
+
}),
|
|
1295
|
+
LLMGatewayErrorResponseSchema
|
|
1296
|
+
]);
|
|
1297
|
+
|
|
1298
|
+
// src/llmgateway-facade.ts
|
|
1299
|
+
var LLMGateway = class {
|
|
1300
|
+
/**
|
|
1301
|
+
* Creates a new LLMGateway provider instance.
|
|
1302
|
+
*/
|
|
1303
|
+
constructor(options = {}) {
|
|
1304
|
+
var _a, _b;
|
|
1305
|
+
this.baseURL = (_b = (0, import_provider_utils5.withoutTrailingSlash)((_a = options.baseURL) != null ? _a : options.baseURL)) != null ? _b : "https://api.llmgateway.io/v1";
|
|
1306
|
+
this.apiKey = options.apiKey;
|
|
1307
|
+
this.headers = options.headers;
|
|
1308
|
+
}
|
|
1309
|
+
get baseConfig() {
|
|
1310
|
+
return {
|
|
1311
|
+
baseURL: this.baseURL,
|
|
1312
|
+
headers: () => __spreadValues({
|
|
1313
|
+
Authorization: `Bearer ${(0, import_provider_utils5.loadApiKey)({
|
|
1314
|
+
apiKey: this.apiKey,
|
|
1315
|
+
environmentVariableName: "LLMGATEWAY_API_KEY",
|
|
1316
|
+
description: "LLMGateway"
|
|
1317
|
+
})}`
|
|
1318
|
+
}, this.headers)
|
|
1319
|
+
};
|
|
1320
|
+
}
|
|
1321
|
+
chat(modelId, settings = {}) {
|
|
1322
|
+
return new LLMGatewayChatLanguageModel(modelId, settings, __spreadProps(__spreadValues({
|
|
1323
|
+
provider: "llmgateway.chat"
|
|
1324
|
+
}, this.baseConfig), {
|
|
1325
|
+
compatibility: "strict",
|
|
1326
|
+
url: ({ path }) => `${this.baseURL}${path}`
|
|
1327
|
+
}));
|
|
1328
|
+
}
|
|
1329
|
+
completion(modelId, settings = {}) {
|
|
1330
|
+
return new LLMGatewayCompletionLanguageModel(modelId, settings, __spreadProps(__spreadValues({
|
|
1331
|
+
provider: "llmgateway.completion"
|
|
1332
|
+
}, this.baseConfig), {
|
|
1333
|
+
compatibility: "strict",
|
|
1334
|
+
url: ({ path }) => `${this.baseURL}${path}`
|
|
1335
|
+
}));
|
|
1336
|
+
}
|
|
1337
|
+
};
|
|
1338
|
+
|
|
1339
|
+
// src/llmgateway-provider.ts
|
|
1340
|
+
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
|
1341
|
+
function createLLMGateway(options = {}) {
|
|
1342
|
+
var _a, _b, _c;
|
|
1343
|
+
const baseURL = (_b = (0, import_provider_utils6.withoutTrailingSlash)((_a = options.baseURL) != null ? _a : options.baseURL)) != null ? _b : "https://api.llmgateway.io/v1";
|
|
1344
|
+
const compatibility = (_c = options.compatibility) != null ? _c : "compatible";
|
|
1345
|
+
const getHeaders = () => __spreadValues({
|
|
1346
|
+
Authorization: `Bearer ${(0, import_provider_utils6.loadApiKey)({
|
|
1347
|
+
apiKey: options.apiKey,
|
|
1348
|
+
environmentVariableName: "LLMGATEWAY_API_KEY",
|
|
1349
|
+
description: "LLMGateway"
|
|
1350
|
+
})}`
|
|
1351
|
+
}, options.headers);
|
|
1352
|
+
const createChatModel = (modelId, settings = {}) => new LLMGatewayChatLanguageModel(modelId, settings, {
|
|
1353
|
+
provider: "llmgateway.chat",
|
|
1354
|
+
url: ({ path }) => `${baseURL}${path}`,
|
|
1355
|
+
headers: getHeaders,
|
|
1356
|
+
compatibility,
|
|
1357
|
+
fetch: options.fetch,
|
|
1358
|
+
extraBody: options.extraBody
|
|
1359
|
+
});
|
|
1360
|
+
const createCompletionModel = (modelId, settings = {}) => new LLMGatewayCompletionLanguageModel(modelId, settings, {
|
|
1361
|
+
provider: "llmgateway.completion",
|
|
1362
|
+
url: ({ path }) => `${baseURL}${path}`,
|
|
1363
|
+
headers: getHeaders,
|
|
1364
|
+
compatibility,
|
|
1365
|
+
fetch: options.fetch,
|
|
1366
|
+
extraBody: options.extraBody
|
|
1367
|
+
});
|
|
1368
|
+
const createLanguageModel = (modelId, settings) => {
|
|
1369
|
+
if (new.target) {
|
|
1370
|
+
throw new Error(
|
|
1371
|
+
"The LLMGateway model function cannot be called with the new keyword."
|
|
1372
|
+
);
|
|
1373
|
+
}
|
|
1374
|
+
if (modelId === "openai/gpt-3.5-turbo-instruct") {
|
|
1375
|
+
return createCompletionModel(
|
|
1376
|
+
modelId,
|
|
1377
|
+
settings
|
|
1378
|
+
);
|
|
1379
|
+
}
|
|
1380
|
+
return createChatModel(modelId, settings);
|
|
1381
|
+
};
|
|
1382
|
+
const provider = (modelId, settings) => createLanguageModel(modelId, settings);
|
|
1383
|
+
provider.languageModel = createLanguageModel;
|
|
1384
|
+
provider.chat = createChatModel;
|
|
1385
|
+
provider.completion = createCompletionModel;
|
|
1386
|
+
return provider;
|
|
1387
|
+
}
|
|
1388
|
+
var llmgateway = createLLMGateway({
|
|
1389
|
+
compatibility: "strict"
|
|
1390
|
+
// strict for LLMGateway API
|
|
1391
|
+
});
|
|
1392
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
1393
|
+
0 && (module.exports = {
|
|
1394
|
+
LLMGateway,
|
|
1395
|
+
createLLMGateway,
|
|
1396
|
+
llmgateway
|
|
1397
|
+
});
|
|
1398
|
+
//# sourceMappingURL=index.cjs.map
|