@llmgateway/ai-sdk-provider 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +201 -0
- package/README.md +187 -0
- package/dist/index.cjs +1398 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +263 -0
- package/dist/index.d.ts +263 -0
- package/dist/index.js +1390 -0
- package/dist/index.js.map +1 -0
- package/dist/internal/index.cjs +1298 -0
- package/dist/internal/index.cjs.map +1 -0
- package/dist/internal/index.d.cts +184 -0
- package/dist/internal/index.d.ts +184 -0
- package/dist/internal/index.js +1291 -0
- package/dist/internal/index.js.map +1 -0
- package/package.json +82 -0
package/dist/index.js
ADDED
|
@@ -0,0 +1,1390 @@
|
|
|
1
|
+
var __defProp = Object.defineProperty;
|
|
2
|
+
var __defProps = Object.defineProperties;
|
|
3
|
+
var __getOwnPropDescs = Object.getOwnPropertyDescriptors;
|
|
4
|
+
var __getOwnPropSymbols = Object.getOwnPropertySymbols;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __propIsEnum = Object.prototype.propertyIsEnumerable;
|
|
7
|
+
var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
|
|
8
|
+
var __spreadValues = (a, b) => {
|
|
9
|
+
for (var prop in b || (b = {}))
|
|
10
|
+
if (__hasOwnProp.call(b, prop))
|
|
11
|
+
__defNormalProp(a, prop, b[prop]);
|
|
12
|
+
if (__getOwnPropSymbols)
|
|
13
|
+
for (var prop of __getOwnPropSymbols(b)) {
|
|
14
|
+
if (__propIsEnum.call(b, prop))
|
|
15
|
+
__defNormalProp(a, prop, b[prop]);
|
|
16
|
+
}
|
|
17
|
+
return a;
|
|
18
|
+
};
|
|
19
|
+
var __spreadProps = (a, b) => __defProps(a, __getOwnPropDescs(b));
|
|
20
|
+
var __objRest = (source, exclude) => {
|
|
21
|
+
var target = {};
|
|
22
|
+
for (var prop in source)
|
|
23
|
+
if (__hasOwnProp.call(source, prop) && exclude.indexOf(prop) < 0)
|
|
24
|
+
target[prop] = source[prop];
|
|
25
|
+
if (source != null && __getOwnPropSymbols)
|
|
26
|
+
for (var prop of __getOwnPropSymbols(source)) {
|
|
27
|
+
if (exclude.indexOf(prop) < 0 && __propIsEnum.call(source, prop))
|
|
28
|
+
target[prop] = source[prop];
|
|
29
|
+
}
|
|
30
|
+
return target;
|
|
31
|
+
};
|
|
32
|
+
|
|
33
|
+
// src/llmgateway-facade.ts
|
|
34
|
+
import { loadApiKey, withoutTrailingSlash } from "@ai-sdk/provider-utils";
|
|
35
|
+
|
|
36
|
+
// src/schemas/reasoning-details.ts
|
|
37
|
+
import { z } from "zod";
|
|
38
|
+
var ReasoningDetailSummarySchema = z.object({
|
|
39
|
+
type: z.literal("reasoning.summary" /* Summary */),
|
|
40
|
+
summary: z.string()
|
|
41
|
+
});
|
|
42
|
+
var ReasoningDetailEncryptedSchema = z.object({
|
|
43
|
+
type: z.literal("reasoning.encrypted" /* Encrypted */),
|
|
44
|
+
data: z.string()
|
|
45
|
+
});
|
|
46
|
+
var ReasoningDetailTextSchema = z.object({
|
|
47
|
+
type: z.literal("reasoning.text" /* Text */),
|
|
48
|
+
text: z.string().nullish(),
|
|
49
|
+
signature: z.string().nullish()
|
|
50
|
+
});
|
|
51
|
+
var ReasoningDetailUnionSchema = z.union([
|
|
52
|
+
ReasoningDetailSummarySchema,
|
|
53
|
+
ReasoningDetailEncryptedSchema,
|
|
54
|
+
ReasoningDetailTextSchema
|
|
55
|
+
]);
|
|
56
|
+
var ReasoningDetailsWithUnknownSchema = z.union([
|
|
57
|
+
ReasoningDetailUnionSchema,
|
|
58
|
+
z.unknown().transform(() => null)
|
|
59
|
+
]);
|
|
60
|
+
var ReasoningDetailArraySchema = z.array(ReasoningDetailsWithUnknownSchema).transform((d) => d.filter((d2) => !!d2));
|
|
61
|
+
|
|
62
|
+
// src/llmgateway-chat-language-model.ts
|
|
63
|
+
import {
|
|
64
|
+
InvalidResponseDataError,
|
|
65
|
+
UnsupportedFunctionalityError
|
|
66
|
+
} from "@ai-sdk/provider";
|
|
67
|
+
import {
|
|
68
|
+
combineHeaders,
|
|
69
|
+
createEventSourceResponseHandler,
|
|
70
|
+
createJsonResponseHandler,
|
|
71
|
+
generateId,
|
|
72
|
+
isParsableJson,
|
|
73
|
+
postJsonToApi
|
|
74
|
+
} from "@ai-sdk/provider-utils";
|
|
75
|
+
import { z as z3 } from "zod";
|
|
76
|
+
|
|
77
|
+
// src/convert-to-llmgateway-chat-messages.ts
|
|
78
|
+
import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils";
|
|
79
|
+
function getCacheControl(providerMetadata) {
|
|
80
|
+
var _a, _b, _c;
|
|
81
|
+
const anthropic = providerMetadata == null ? void 0 : providerMetadata.anthropic;
|
|
82
|
+
const llmgateway2 = providerMetadata == null ? void 0 : providerMetadata.llmgateway;
|
|
83
|
+
return (_c = (_b = (_a = llmgateway2 == null ? void 0 : llmgateway2.cacheControl) != null ? _a : llmgateway2 == null ? void 0 : llmgateway2.cache_control) != null ? _b : anthropic == null ? void 0 : anthropic.cacheControl) != null ? _c : anthropic == null ? void 0 : anthropic.cache_control;
|
|
84
|
+
}
|
|
85
|
+
function convertToLLMGatewayChatMessages(prompt) {
|
|
86
|
+
var _a, _b, _c;
|
|
87
|
+
const messages = [];
|
|
88
|
+
for (const { role, content, providerMetadata } of prompt) {
|
|
89
|
+
switch (role) {
|
|
90
|
+
case "system": {
|
|
91
|
+
messages.push({
|
|
92
|
+
role: "system",
|
|
93
|
+
content,
|
|
94
|
+
cache_control: getCacheControl(providerMetadata)
|
|
95
|
+
});
|
|
96
|
+
break;
|
|
97
|
+
}
|
|
98
|
+
case "user": {
|
|
99
|
+
if (content.length === 1 && ((_a = content[0]) == null ? void 0 : _a.type) === "text") {
|
|
100
|
+
messages.push({
|
|
101
|
+
role: "user",
|
|
102
|
+
content: content[0].text,
|
|
103
|
+
cache_control: (_b = getCacheControl(providerMetadata)) != null ? _b : getCacheControl(content[0].providerMetadata)
|
|
104
|
+
});
|
|
105
|
+
break;
|
|
106
|
+
}
|
|
107
|
+
const messageCacheControl = getCacheControl(providerMetadata);
|
|
108
|
+
const contentParts = content.map(
|
|
109
|
+
(part) => {
|
|
110
|
+
var _a2, _b2, _c2, _d;
|
|
111
|
+
const cacheControl = (_a2 = getCacheControl(part.providerMetadata)) != null ? _a2 : messageCacheControl;
|
|
112
|
+
switch (part.type) {
|
|
113
|
+
case "text":
|
|
114
|
+
return {
|
|
115
|
+
type: "text",
|
|
116
|
+
text: part.text,
|
|
117
|
+
// For text parts, only use part-specific cache control
|
|
118
|
+
cache_control: cacheControl
|
|
119
|
+
};
|
|
120
|
+
case "image":
|
|
121
|
+
return {
|
|
122
|
+
type: "image_url",
|
|
123
|
+
image_url: {
|
|
124
|
+
url: part.image instanceof URL ? part.image.toString() : `data:${(_b2 = part.mimeType) != null ? _b2 : "image/jpeg"};base64,${convertUint8ArrayToBase64(
|
|
125
|
+
part.image
|
|
126
|
+
)}`
|
|
127
|
+
},
|
|
128
|
+
// For image parts, use part-specific or message-level cache control
|
|
129
|
+
cache_control: cacheControl
|
|
130
|
+
};
|
|
131
|
+
case "file":
|
|
132
|
+
return {
|
|
133
|
+
type: "file",
|
|
134
|
+
file: {
|
|
135
|
+
filename: String(
|
|
136
|
+
(_d = (_c2 = part.providerMetadata) == null ? void 0 : _c2.llmgateway) == null ? void 0 : _d.filename
|
|
137
|
+
),
|
|
138
|
+
file_data: part.data instanceof Uint8Array ? `data:${part.mimeType};base64,${convertUint8ArrayToBase64(part.data)}` : `data:${part.mimeType};base64,${part.data}`
|
|
139
|
+
},
|
|
140
|
+
cache_control: cacheControl
|
|
141
|
+
};
|
|
142
|
+
default: {
|
|
143
|
+
const _exhaustiveCheck = part;
|
|
144
|
+
throw new Error(
|
|
145
|
+
`Unsupported content part type: ${_exhaustiveCheck}`
|
|
146
|
+
);
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
);
|
|
151
|
+
messages.push({
|
|
152
|
+
role: "user",
|
|
153
|
+
content: contentParts
|
|
154
|
+
});
|
|
155
|
+
break;
|
|
156
|
+
}
|
|
157
|
+
case "assistant": {
|
|
158
|
+
let text = "";
|
|
159
|
+
let reasoning = "";
|
|
160
|
+
const reasoningDetails = [];
|
|
161
|
+
const toolCalls = [];
|
|
162
|
+
for (const part of content) {
|
|
163
|
+
switch (part.type) {
|
|
164
|
+
case "text": {
|
|
165
|
+
text += part.text;
|
|
166
|
+
break;
|
|
167
|
+
}
|
|
168
|
+
case "tool-call": {
|
|
169
|
+
toolCalls.push({
|
|
170
|
+
id: part.toolCallId,
|
|
171
|
+
type: "function",
|
|
172
|
+
function: {
|
|
173
|
+
name: part.toolName,
|
|
174
|
+
arguments: JSON.stringify(part.args)
|
|
175
|
+
}
|
|
176
|
+
});
|
|
177
|
+
break;
|
|
178
|
+
}
|
|
179
|
+
case "reasoning": {
|
|
180
|
+
reasoning += part.text;
|
|
181
|
+
reasoningDetails.push({
|
|
182
|
+
type: "reasoning.text" /* Text */,
|
|
183
|
+
text: part.text,
|
|
184
|
+
signature: part.signature
|
|
185
|
+
});
|
|
186
|
+
break;
|
|
187
|
+
}
|
|
188
|
+
case "redacted-reasoning": {
|
|
189
|
+
reasoningDetails.push({
|
|
190
|
+
type: "reasoning.encrypted" /* Encrypted */,
|
|
191
|
+
data: part.data
|
|
192
|
+
});
|
|
193
|
+
break;
|
|
194
|
+
}
|
|
195
|
+
case "file":
|
|
196
|
+
break;
|
|
197
|
+
default: {
|
|
198
|
+
const _exhaustiveCheck = part;
|
|
199
|
+
throw new Error(`Unsupported part: ${_exhaustiveCheck}`);
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
messages.push({
|
|
204
|
+
role: "assistant",
|
|
205
|
+
content: text,
|
|
206
|
+
tool_calls: toolCalls.length > 0 ? toolCalls : void 0,
|
|
207
|
+
reasoning: reasoning || void 0,
|
|
208
|
+
reasoning_details: reasoningDetails.length > 0 ? reasoningDetails : void 0,
|
|
209
|
+
cache_control: getCacheControl(providerMetadata)
|
|
210
|
+
});
|
|
211
|
+
break;
|
|
212
|
+
}
|
|
213
|
+
case "tool": {
|
|
214
|
+
for (const toolResponse of content) {
|
|
215
|
+
messages.push({
|
|
216
|
+
role: "tool",
|
|
217
|
+
tool_call_id: toolResponse.toolCallId,
|
|
218
|
+
content: JSON.stringify(toolResponse.result),
|
|
219
|
+
cache_control: (_c = getCacheControl(providerMetadata)) != null ? _c : getCacheControl(toolResponse.providerMetadata)
|
|
220
|
+
});
|
|
221
|
+
}
|
|
222
|
+
break;
|
|
223
|
+
}
|
|
224
|
+
default: {
|
|
225
|
+
const _exhaustiveCheck = role;
|
|
226
|
+
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
return messages;
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
// src/map-llmgateway-chat-logprobs.ts
|
|
234
|
+
function mapLLMGatewayChatLogProbsOutput(logprobs) {
|
|
235
|
+
var _a, _b;
|
|
236
|
+
return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
|
|
237
|
+
token,
|
|
238
|
+
logprob,
|
|
239
|
+
topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
|
|
240
|
+
token: token2,
|
|
241
|
+
logprob: logprob2
|
|
242
|
+
})) : []
|
|
243
|
+
}))) != null ? _b : void 0;
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
// src/map-llmgateway-finish-reason.ts
|
|
247
|
+
function mapLLMGatewayFinishReason(finishReason) {
|
|
248
|
+
switch (finishReason) {
|
|
249
|
+
case "stop":
|
|
250
|
+
return "stop";
|
|
251
|
+
case "length":
|
|
252
|
+
return "length";
|
|
253
|
+
case "content_filter":
|
|
254
|
+
return "content-filter";
|
|
255
|
+
case "function_call":
|
|
256
|
+
case "tool_calls":
|
|
257
|
+
return "tool-calls";
|
|
258
|
+
default:
|
|
259
|
+
return "unknown";
|
|
260
|
+
}
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
// src/llmgateway-error.ts
|
|
264
|
+
import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
|
|
265
|
+
import { z as z2 } from "zod";
|
|
266
|
+
var LLMGatewayErrorResponseSchema = z2.object({
|
|
267
|
+
error: z2.object({
|
|
268
|
+
message: z2.string(),
|
|
269
|
+
type: z2.string(),
|
|
270
|
+
param: z2.any().nullable(),
|
|
271
|
+
code: z2.string().nullable()
|
|
272
|
+
})
|
|
273
|
+
});
|
|
274
|
+
var llmgatewayFailedResponseHandler = createJsonErrorResponseHandler({
|
|
275
|
+
errorSchema: LLMGatewayErrorResponseSchema,
|
|
276
|
+
errorToMessage: (data) => data.error.message
|
|
277
|
+
});
|
|
278
|
+
|
|
279
|
+
// src/llmgateway-chat-language-model.ts
|
|
280
|
+
function isFunctionTool(tool) {
|
|
281
|
+
return "parameters" in tool;
|
|
282
|
+
}
|
|
283
|
+
var LLMGatewayChatLanguageModel = class {
|
|
284
|
+
constructor(modelId, settings, config) {
|
|
285
|
+
this.specificationVersion = "v1";
|
|
286
|
+
this.defaultObjectGenerationMode = "tool";
|
|
287
|
+
this.modelId = modelId;
|
|
288
|
+
this.settings = settings;
|
|
289
|
+
this.config = config;
|
|
290
|
+
}
|
|
291
|
+
get provider() {
|
|
292
|
+
return this.config.provider;
|
|
293
|
+
}
|
|
294
|
+
getArgs({
|
|
295
|
+
mode,
|
|
296
|
+
prompt,
|
|
297
|
+
maxTokens,
|
|
298
|
+
temperature,
|
|
299
|
+
topP,
|
|
300
|
+
frequencyPenalty,
|
|
301
|
+
presencePenalty,
|
|
302
|
+
seed,
|
|
303
|
+
stopSequences,
|
|
304
|
+
responseFormat,
|
|
305
|
+
topK,
|
|
306
|
+
providerMetadata
|
|
307
|
+
}) {
|
|
308
|
+
var _a;
|
|
309
|
+
const type = mode.type;
|
|
310
|
+
const extraCallingBody = (_a = providerMetadata == null ? void 0 : providerMetadata.llmgateway) != null ? _a : {};
|
|
311
|
+
const baseArgs = __spreadValues(__spreadValues(__spreadValues({
|
|
312
|
+
// model id:
|
|
313
|
+
model: this.modelId,
|
|
314
|
+
models: this.settings.models,
|
|
315
|
+
// model specific settings:
|
|
316
|
+
logit_bias: this.settings.logitBias,
|
|
317
|
+
logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
|
|
318
|
+
top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
|
|
319
|
+
user: this.settings.user,
|
|
320
|
+
parallel_tool_calls: this.settings.parallelToolCalls,
|
|
321
|
+
// standardized settings:
|
|
322
|
+
max_tokens: maxTokens,
|
|
323
|
+
temperature,
|
|
324
|
+
top_p: topP,
|
|
325
|
+
frequency_penalty: frequencyPenalty,
|
|
326
|
+
presence_penalty: presencePenalty,
|
|
327
|
+
seed,
|
|
328
|
+
stop: stopSequences,
|
|
329
|
+
response_format: responseFormat,
|
|
330
|
+
top_k: topK,
|
|
331
|
+
// messages:
|
|
332
|
+
messages: convertToLLMGatewayChatMessages(prompt),
|
|
333
|
+
// LLMGateway specific settings:
|
|
334
|
+
include_reasoning: this.settings.includeReasoning,
|
|
335
|
+
reasoning: this.settings.reasoning,
|
|
336
|
+
usage: this.settings.usage
|
|
337
|
+
}, this.config.extraBody), this.settings.extraBody), extraCallingBody);
|
|
338
|
+
switch (type) {
|
|
339
|
+
case "regular": {
|
|
340
|
+
return __spreadValues(__spreadValues({}, baseArgs), prepareToolsAndToolChoice(mode));
|
|
341
|
+
}
|
|
342
|
+
case "object-json": {
|
|
343
|
+
return __spreadProps(__spreadValues({}, baseArgs), {
|
|
344
|
+
response_format: { type: "json_object" }
|
|
345
|
+
});
|
|
346
|
+
}
|
|
347
|
+
case "object-tool": {
|
|
348
|
+
return __spreadProps(__spreadValues({}, baseArgs), {
|
|
349
|
+
tool_choice: { type: "function", function: { name: mode.tool.name } },
|
|
350
|
+
tools: [
|
|
351
|
+
{
|
|
352
|
+
type: "function",
|
|
353
|
+
function: {
|
|
354
|
+
name: mode.tool.name,
|
|
355
|
+
description: mode.tool.description,
|
|
356
|
+
parameters: mode.tool.parameters
|
|
357
|
+
}
|
|
358
|
+
}
|
|
359
|
+
]
|
|
360
|
+
});
|
|
361
|
+
}
|
|
362
|
+
// Handle all non-text types with a single default case
|
|
363
|
+
default: {
|
|
364
|
+
const _exhaustiveCheck = type;
|
|
365
|
+
throw new UnsupportedFunctionalityError({
|
|
366
|
+
functionality: `${_exhaustiveCheck} mode`
|
|
367
|
+
});
|
|
368
|
+
}
|
|
369
|
+
}
|
|
370
|
+
}
|
|
371
|
+
async doGenerate(options) {
|
|
372
|
+
var _b, _c, _d, _e, _f, _g, _h, _i, _j;
|
|
373
|
+
const args = this.getArgs(options);
|
|
374
|
+
const { responseHeaders, value: response } = await postJsonToApi({
|
|
375
|
+
url: this.config.url({
|
|
376
|
+
path: "/chat/completions",
|
|
377
|
+
modelId: this.modelId
|
|
378
|
+
}),
|
|
379
|
+
headers: combineHeaders(this.config.headers(), options.headers),
|
|
380
|
+
body: args,
|
|
381
|
+
failedResponseHandler: llmgatewayFailedResponseHandler,
|
|
382
|
+
successfulResponseHandler: createJsonResponseHandler(
|
|
383
|
+
LLMGatewayNonStreamChatCompletionResponseSchema
|
|
384
|
+
),
|
|
385
|
+
abortSignal: options.abortSignal,
|
|
386
|
+
fetch: this.config.fetch
|
|
387
|
+
});
|
|
388
|
+
const _a = args, { messages: rawPrompt } = _a, rawSettings = __objRest(_a, ["messages"]);
|
|
389
|
+
const choice = response.choices[0];
|
|
390
|
+
if (!choice) {
|
|
391
|
+
throw new Error("No choice in response");
|
|
392
|
+
}
|
|
393
|
+
const usageInfo = response.usage ? {
|
|
394
|
+
promptTokens: (_b = response.usage.prompt_tokens) != null ? _b : 0,
|
|
395
|
+
completionTokens: (_c = response.usage.completion_tokens) != null ? _c : 0
|
|
396
|
+
} : {
|
|
397
|
+
promptTokens: 0,
|
|
398
|
+
completionTokens: 0
|
|
399
|
+
};
|
|
400
|
+
const providerMetadata = {};
|
|
401
|
+
if (response.usage && ((_d = this.settings.usage) == null ? void 0 : _d.include)) {
|
|
402
|
+
providerMetadata.llmgateway = {
|
|
403
|
+
usage: {
|
|
404
|
+
promptTokens: response.usage.prompt_tokens,
|
|
405
|
+
promptTokensDetails: response.usage.prompt_tokens_details ? {
|
|
406
|
+
cachedTokens: (_e = response.usage.prompt_tokens_details.cached_tokens) != null ? _e : 0
|
|
407
|
+
} : void 0,
|
|
408
|
+
completionTokens: response.usage.completion_tokens,
|
|
409
|
+
completionTokensDetails: response.usage.completion_tokens_details ? {
|
|
410
|
+
reasoningTokens: (_f = response.usage.completion_tokens_details.reasoning_tokens) != null ? _f : 0
|
|
411
|
+
} : void 0,
|
|
412
|
+
cost: response.usage.cost,
|
|
413
|
+
totalTokens: (_g = response.usage.total_tokens) != null ? _g : 0
|
|
414
|
+
}
|
|
415
|
+
};
|
|
416
|
+
}
|
|
417
|
+
const hasProviderMetadata = Object.keys(providerMetadata).length > 0;
|
|
418
|
+
const reasoningDetails = (_h = choice.message.reasoning_details) != null ? _h : [];
|
|
419
|
+
const reasoning = reasoningDetails.length > 0 ? reasoningDetails.map((detail) => {
|
|
420
|
+
var _a2;
|
|
421
|
+
switch (detail.type) {
|
|
422
|
+
case "reasoning.text" /* Text */: {
|
|
423
|
+
if (detail.text) {
|
|
424
|
+
return {
|
|
425
|
+
type: "text",
|
|
426
|
+
text: detail.text,
|
|
427
|
+
signature: (_a2 = detail.signature) != null ? _a2 : void 0
|
|
428
|
+
};
|
|
429
|
+
}
|
|
430
|
+
break;
|
|
431
|
+
}
|
|
432
|
+
case "reasoning.summary" /* Summary */: {
|
|
433
|
+
if (detail.summary) {
|
|
434
|
+
return {
|
|
435
|
+
type: "text",
|
|
436
|
+
text: detail.summary
|
|
437
|
+
};
|
|
438
|
+
}
|
|
439
|
+
break;
|
|
440
|
+
}
|
|
441
|
+
case "reasoning.encrypted" /* Encrypted */: {
|
|
442
|
+
if (detail.data) {
|
|
443
|
+
return {
|
|
444
|
+
type: "redacted",
|
|
445
|
+
data: detail.data
|
|
446
|
+
};
|
|
447
|
+
}
|
|
448
|
+
break;
|
|
449
|
+
}
|
|
450
|
+
default: {
|
|
451
|
+
detail;
|
|
452
|
+
}
|
|
453
|
+
}
|
|
454
|
+
return null;
|
|
455
|
+
}).filter((p) => p !== null) : choice.message.reasoning ? [
|
|
456
|
+
{
|
|
457
|
+
type: "text",
|
|
458
|
+
text: choice.message.reasoning
|
|
459
|
+
}
|
|
460
|
+
] : [];
|
|
461
|
+
return __spreadValues({
|
|
462
|
+
response: {
|
|
463
|
+
id: response.id,
|
|
464
|
+
modelId: response.model
|
|
465
|
+
},
|
|
466
|
+
text: (_i = choice.message.content) != null ? _i : void 0,
|
|
467
|
+
reasoning,
|
|
468
|
+
toolCalls: (_j = choice.message.tool_calls) == null ? void 0 : _j.map((toolCall) => {
|
|
469
|
+
var _a2;
|
|
470
|
+
return {
|
|
471
|
+
toolCallType: "function",
|
|
472
|
+
toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
|
|
473
|
+
toolName: toolCall.function.name,
|
|
474
|
+
args: toolCall.function.arguments
|
|
475
|
+
};
|
|
476
|
+
}),
|
|
477
|
+
finishReason: mapLLMGatewayFinishReason(choice.finish_reason),
|
|
478
|
+
usage: usageInfo,
|
|
479
|
+
rawCall: { rawPrompt, rawSettings },
|
|
480
|
+
rawResponse: { headers: responseHeaders },
|
|
481
|
+
warnings: [],
|
|
482
|
+
logprobs: mapLLMGatewayChatLogProbsOutput(choice.logprobs)
|
|
483
|
+
}, hasProviderMetadata ? { providerMetadata } : {});
|
|
484
|
+
}
|
|
485
|
+
async doStream(options) {
|
|
486
|
+
var _a, _c;
|
|
487
|
+
const args = this.getArgs(options);
|
|
488
|
+
const { responseHeaders, value: response } = await postJsonToApi({
|
|
489
|
+
url: this.config.url({
|
|
490
|
+
path: "/chat/completions",
|
|
491
|
+
modelId: this.modelId
|
|
492
|
+
}),
|
|
493
|
+
headers: combineHeaders(this.config.headers(), options.headers),
|
|
494
|
+
body: __spreadProps(__spreadValues({}, args), {
|
|
495
|
+
stream: true,
|
|
496
|
+
// only include stream_options when in strict compatibility mode:
|
|
497
|
+
stream_options: this.config.compatibility === "strict" ? __spreadValues({
|
|
498
|
+
include_usage: true
|
|
499
|
+
}, ((_a = this.settings.usage) == null ? void 0 : _a.include) ? { include_usage: true } : {}) : void 0
|
|
500
|
+
}),
|
|
501
|
+
failedResponseHandler: llmgatewayFailedResponseHandler,
|
|
502
|
+
successfulResponseHandler: createEventSourceResponseHandler(
|
|
503
|
+
LLMGatewayStreamChatCompletionChunkSchema
|
|
504
|
+
),
|
|
505
|
+
abortSignal: options.abortSignal,
|
|
506
|
+
fetch: this.config.fetch
|
|
507
|
+
});
|
|
508
|
+
const _b = args, { messages: rawPrompt } = _b, rawSettings = __objRest(_b, ["messages"]);
|
|
509
|
+
const toolCalls = [];
|
|
510
|
+
let finishReason = "other";
|
|
511
|
+
let usage = {
|
|
512
|
+
promptTokens: Number.NaN,
|
|
513
|
+
completionTokens: Number.NaN
|
|
514
|
+
};
|
|
515
|
+
let logprobs;
|
|
516
|
+
const llmgatewayUsage = {};
|
|
517
|
+
const shouldIncludeUsageAccounting = !!((_c = this.settings.usage) == null ? void 0 : _c.include);
|
|
518
|
+
return {
|
|
519
|
+
stream: response.pipeThrough(
|
|
520
|
+
new TransformStream({
|
|
521
|
+
transform(chunk, controller) {
|
|
522
|
+
var _a2, _b2, _c2, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
|
|
523
|
+
if (!chunk.success) {
|
|
524
|
+
finishReason = "error";
|
|
525
|
+
controller.enqueue({ type: "error", error: chunk.error });
|
|
526
|
+
return;
|
|
527
|
+
}
|
|
528
|
+
const value = chunk.value;
|
|
529
|
+
if ("error" in value) {
|
|
530
|
+
finishReason = "error";
|
|
531
|
+
controller.enqueue({ type: "error", error: value.error });
|
|
532
|
+
return;
|
|
533
|
+
}
|
|
534
|
+
if (value.id) {
|
|
535
|
+
controller.enqueue({
|
|
536
|
+
type: "response-metadata",
|
|
537
|
+
id: value.id
|
|
538
|
+
});
|
|
539
|
+
}
|
|
540
|
+
if (value.model) {
|
|
541
|
+
controller.enqueue({
|
|
542
|
+
type: "response-metadata",
|
|
543
|
+
modelId: value.model
|
|
544
|
+
});
|
|
545
|
+
}
|
|
546
|
+
if (value.usage != null) {
|
|
547
|
+
usage = {
|
|
548
|
+
promptTokens: value.usage.prompt_tokens,
|
|
549
|
+
completionTokens: value.usage.completion_tokens
|
|
550
|
+
};
|
|
551
|
+
llmgatewayUsage.promptTokens = value.usage.prompt_tokens;
|
|
552
|
+
if (value.usage.prompt_tokens_details) {
|
|
553
|
+
llmgatewayUsage.promptTokensDetails = {
|
|
554
|
+
cachedTokens: (_a2 = value.usage.prompt_tokens_details.cached_tokens) != null ? _a2 : 0
|
|
555
|
+
};
|
|
556
|
+
}
|
|
557
|
+
llmgatewayUsage.completionTokens = value.usage.completion_tokens;
|
|
558
|
+
if (value.usage.completion_tokens_details) {
|
|
559
|
+
llmgatewayUsage.completionTokensDetails = {
|
|
560
|
+
reasoningTokens: (_b2 = value.usage.completion_tokens_details.reasoning_tokens) != null ? _b2 : 0
|
|
561
|
+
};
|
|
562
|
+
}
|
|
563
|
+
llmgatewayUsage.cost = value.usage.cost;
|
|
564
|
+
llmgatewayUsage.totalTokens = value.usage.total_tokens;
|
|
565
|
+
}
|
|
566
|
+
const choice = value.choices[0];
|
|
567
|
+
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
568
|
+
finishReason = mapLLMGatewayFinishReason(choice.finish_reason);
|
|
569
|
+
}
|
|
570
|
+
if ((choice == null ? void 0 : choice.delta) == null) {
|
|
571
|
+
return;
|
|
572
|
+
}
|
|
573
|
+
const delta = choice.delta;
|
|
574
|
+
if (delta.content != null) {
|
|
575
|
+
controller.enqueue({
|
|
576
|
+
type: "text-delta",
|
|
577
|
+
textDelta: delta.content
|
|
578
|
+
});
|
|
579
|
+
}
|
|
580
|
+
if (delta.reasoning != null) {
|
|
581
|
+
controller.enqueue({
|
|
582
|
+
type: "reasoning",
|
|
583
|
+
textDelta: delta.reasoning
|
|
584
|
+
});
|
|
585
|
+
}
|
|
586
|
+
if (delta.reasoning_details && delta.reasoning_details.length > 0) {
|
|
587
|
+
for (const detail of delta.reasoning_details) {
|
|
588
|
+
switch (detail.type) {
|
|
589
|
+
case "reasoning.text" /* Text */: {
|
|
590
|
+
if (detail.text) {
|
|
591
|
+
controller.enqueue({
|
|
592
|
+
type: "reasoning",
|
|
593
|
+
textDelta: detail.text
|
|
594
|
+
});
|
|
595
|
+
}
|
|
596
|
+
if (detail.signature) {
|
|
597
|
+
controller.enqueue({
|
|
598
|
+
type: "reasoning-signature",
|
|
599
|
+
signature: detail.signature
|
|
600
|
+
});
|
|
601
|
+
}
|
|
602
|
+
break;
|
|
603
|
+
}
|
|
604
|
+
case "reasoning.encrypted" /* Encrypted */: {
|
|
605
|
+
if (detail.data) {
|
|
606
|
+
controller.enqueue({
|
|
607
|
+
type: "redacted-reasoning",
|
|
608
|
+
data: detail.data
|
|
609
|
+
});
|
|
610
|
+
}
|
|
611
|
+
break;
|
|
612
|
+
}
|
|
613
|
+
case "reasoning.summary" /* Summary */: {
|
|
614
|
+
if (detail.summary) {
|
|
615
|
+
controller.enqueue({
|
|
616
|
+
type: "reasoning",
|
|
617
|
+
textDelta: detail.summary
|
|
618
|
+
});
|
|
619
|
+
}
|
|
620
|
+
break;
|
|
621
|
+
}
|
|
622
|
+
default: {
|
|
623
|
+
detail;
|
|
624
|
+
break;
|
|
625
|
+
}
|
|
626
|
+
}
|
|
627
|
+
}
|
|
628
|
+
}
|
|
629
|
+
const mappedLogprobs = mapLLMGatewayChatLogProbsOutput(
|
|
630
|
+
choice == null ? void 0 : choice.logprobs
|
|
631
|
+
);
|
|
632
|
+
if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
|
|
633
|
+
if (logprobs === void 0) {
|
|
634
|
+
logprobs = [];
|
|
635
|
+
}
|
|
636
|
+
logprobs.push(...mappedLogprobs);
|
|
637
|
+
}
|
|
638
|
+
if (delta.tool_calls != null) {
|
|
639
|
+
for (const toolCallDelta of delta.tool_calls) {
|
|
640
|
+
const index = toolCallDelta.index;
|
|
641
|
+
if (toolCalls[index] == null) {
|
|
642
|
+
if (toolCallDelta.type !== "function") {
|
|
643
|
+
throw new InvalidResponseDataError({
|
|
644
|
+
data: toolCallDelta,
|
|
645
|
+
message: `Expected 'function' type.`
|
|
646
|
+
});
|
|
647
|
+
}
|
|
648
|
+
if (toolCallDelta.id == null) {
|
|
649
|
+
throw new InvalidResponseDataError({
|
|
650
|
+
data: toolCallDelta,
|
|
651
|
+
message: `Expected 'id' to be a string.`
|
|
652
|
+
});
|
|
653
|
+
}
|
|
654
|
+
if (((_c2 = toolCallDelta.function) == null ? void 0 : _c2.name) == null) {
|
|
655
|
+
throw new InvalidResponseDataError({
|
|
656
|
+
data: toolCallDelta,
|
|
657
|
+
message: `Expected 'function.name' to be a string.`
|
|
658
|
+
});
|
|
659
|
+
}
|
|
660
|
+
toolCalls[index] = {
|
|
661
|
+
id: toolCallDelta.id,
|
|
662
|
+
type: "function",
|
|
663
|
+
function: {
|
|
664
|
+
name: toolCallDelta.function.name,
|
|
665
|
+
arguments: (_d = toolCallDelta.function.arguments) != null ? _d : ""
|
|
666
|
+
},
|
|
667
|
+
sent: false
|
|
668
|
+
};
|
|
669
|
+
const toolCall2 = toolCalls[index];
|
|
670
|
+
if (toolCall2 == null) {
|
|
671
|
+
throw new Error("Tool call is missing");
|
|
672
|
+
}
|
|
673
|
+
if (((_e = toolCall2.function) == null ? void 0 : _e.name) != null && ((_f = toolCall2.function) == null ? void 0 : _f.arguments) != null && isParsableJson(toolCall2.function.arguments)) {
|
|
674
|
+
controller.enqueue({
|
|
675
|
+
type: "tool-call-delta",
|
|
676
|
+
toolCallType: "function",
|
|
677
|
+
toolCallId: toolCall2.id,
|
|
678
|
+
toolName: toolCall2.function.name,
|
|
679
|
+
argsTextDelta: toolCall2.function.arguments
|
|
680
|
+
});
|
|
681
|
+
controller.enqueue({
|
|
682
|
+
type: "tool-call",
|
|
683
|
+
toolCallType: "function",
|
|
684
|
+
toolCallId: (_g = toolCall2.id) != null ? _g : generateId(),
|
|
685
|
+
toolName: toolCall2.function.name,
|
|
686
|
+
args: toolCall2.function.arguments
|
|
687
|
+
});
|
|
688
|
+
toolCall2.sent = true;
|
|
689
|
+
}
|
|
690
|
+
continue;
|
|
691
|
+
}
|
|
692
|
+
const toolCall = toolCalls[index];
|
|
693
|
+
if (toolCall == null) {
|
|
694
|
+
throw new Error("Tool call is missing");
|
|
695
|
+
}
|
|
696
|
+
if (((_h = toolCallDelta.function) == null ? void 0 : _h.arguments) != null) {
|
|
697
|
+
toolCall.function.arguments += (_j = (_i = toolCallDelta.function) == null ? void 0 : _i.arguments) != null ? _j : "";
|
|
698
|
+
}
|
|
699
|
+
controller.enqueue({
|
|
700
|
+
type: "tool-call-delta",
|
|
701
|
+
toolCallType: "function",
|
|
702
|
+
toolCallId: toolCall.id,
|
|
703
|
+
toolName: toolCall.function.name,
|
|
704
|
+
argsTextDelta: (_k = toolCallDelta.function.arguments) != null ? _k : ""
|
|
705
|
+
});
|
|
706
|
+
if (((_l = toolCall.function) == null ? void 0 : _l.name) != null && ((_m = toolCall.function) == null ? void 0 : _m.arguments) != null && isParsableJson(toolCall.function.arguments)) {
|
|
707
|
+
controller.enqueue({
|
|
708
|
+
type: "tool-call",
|
|
709
|
+
toolCallType: "function",
|
|
710
|
+
toolCallId: (_n = toolCall.id) != null ? _n : generateId(),
|
|
711
|
+
toolName: toolCall.function.name,
|
|
712
|
+
args: toolCall.function.arguments
|
|
713
|
+
});
|
|
714
|
+
toolCall.sent = true;
|
|
715
|
+
}
|
|
716
|
+
}
|
|
717
|
+
}
|
|
718
|
+
},
|
|
719
|
+
flush(controller) {
|
|
720
|
+
var _a2;
|
|
721
|
+
if (finishReason === "tool-calls") {
|
|
722
|
+
for (const toolCall of toolCalls) {
|
|
723
|
+
if (!toolCall.sent) {
|
|
724
|
+
controller.enqueue({
|
|
725
|
+
type: "tool-call",
|
|
726
|
+
toolCallType: "function",
|
|
727
|
+
toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
|
|
728
|
+
toolName: toolCall.function.name,
|
|
729
|
+
// Coerce invalid arguments to an empty JSON object
|
|
730
|
+
args: isParsableJson(toolCall.function.arguments) ? toolCall.function.arguments : "{}"
|
|
731
|
+
});
|
|
732
|
+
toolCall.sent = true;
|
|
733
|
+
}
|
|
734
|
+
}
|
|
735
|
+
}
|
|
736
|
+
const providerMetadata = {};
|
|
737
|
+
if (shouldIncludeUsageAccounting && (llmgatewayUsage.totalTokens !== void 0 || llmgatewayUsage.cost !== void 0 || llmgatewayUsage.promptTokensDetails !== void 0 || llmgatewayUsage.completionTokensDetails !== void 0)) {
|
|
738
|
+
providerMetadata.llmgateway = {
|
|
739
|
+
usage: llmgatewayUsage
|
|
740
|
+
};
|
|
741
|
+
}
|
|
742
|
+
const hasProviderMetadata = Object.keys(providerMetadata).length > 0 && shouldIncludeUsageAccounting;
|
|
743
|
+
controller.enqueue(__spreadValues({
|
|
744
|
+
type: "finish",
|
|
745
|
+
finishReason,
|
|
746
|
+
logprobs,
|
|
747
|
+
usage
|
|
748
|
+
}, hasProviderMetadata ? { providerMetadata } : {}));
|
|
749
|
+
}
|
|
750
|
+
})
|
|
751
|
+
),
|
|
752
|
+
rawCall: { rawPrompt, rawSettings },
|
|
753
|
+
rawResponse: { headers: responseHeaders },
|
|
754
|
+
warnings: []
|
|
755
|
+
};
|
|
756
|
+
}
|
|
757
|
+
};
|
|
758
|
+
var LLMGatewayChatCompletionBaseResponseSchema = z3.object({
|
|
759
|
+
id: z3.string().optional(),
|
|
760
|
+
model: z3.string().optional(),
|
|
761
|
+
usage: z3.object({
|
|
762
|
+
prompt_tokens: z3.number(),
|
|
763
|
+
prompt_tokens_details: z3.object({
|
|
764
|
+
cached_tokens: z3.number()
|
|
765
|
+
}).nullish(),
|
|
766
|
+
completion_tokens: z3.number(),
|
|
767
|
+
completion_tokens_details: z3.object({
|
|
768
|
+
reasoning_tokens: z3.number()
|
|
769
|
+
}).nullish(),
|
|
770
|
+
total_tokens: z3.number(),
|
|
771
|
+
cost: z3.number().optional()
|
|
772
|
+
}).nullish()
|
|
773
|
+
});
|
|
774
|
+
var LLMGatewayNonStreamChatCompletionResponseSchema = LLMGatewayChatCompletionBaseResponseSchema.extend({
|
|
775
|
+
choices: z3.array(
|
|
776
|
+
z3.object({
|
|
777
|
+
message: z3.object({
|
|
778
|
+
role: z3.literal("assistant"),
|
|
779
|
+
content: z3.string().nullable().optional(),
|
|
780
|
+
reasoning: z3.string().nullable().optional(),
|
|
781
|
+
reasoning_details: ReasoningDetailArraySchema.nullish(),
|
|
782
|
+
tool_calls: z3.array(
|
|
783
|
+
z3.object({
|
|
784
|
+
id: z3.string().optional().nullable(),
|
|
785
|
+
type: z3.literal("function"),
|
|
786
|
+
function: z3.object({
|
|
787
|
+
name: z3.string(),
|
|
788
|
+
arguments: z3.string()
|
|
789
|
+
})
|
|
790
|
+
})
|
|
791
|
+
).optional()
|
|
792
|
+
}),
|
|
793
|
+
index: z3.number(),
|
|
794
|
+
logprobs: z3.object({
|
|
795
|
+
content: z3.array(
|
|
796
|
+
z3.object({
|
|
797
|
+
token: z3.string(),
|
|
798
|
+
logprob: z3.number(),
|
|
799
|
+
top_logprobs: z3.array(
|
|
800
|
+
z3.object({
|
|
801
|
+
token: z3.string(),
|
|
802
|
+
logprob: z3.number()
|
|
803
|
+
})
|
|
804
|
+
)
|
|
805
|
+
})
|
|
806
|
+
).nullable()
|
|
807
|
+
}).nullable().optional(),
|
|
808
|
+
finish_reason: z3.string().optional().nullable()
|
|
809
|
+
})
|
|
810
|
+
)
|
|
811
|
+
});
|
|
812
|
+
var LLMGatewayStreamChatCompletionChunkSchema = z3.union([
|
|
813
|
+
LLMGatewayChatCompletionBaseResponseSchema.extend({
|
|
814
|
+
choices: z3.array(
|
|
815
|
+
z3.object({
|
|
816
|
+
delta: z3.object({
|
|
817
|
+
role: z3.enum(["assistant"]).optional(),
|
|
818
|
+
content: z3.string().nullish(),
|
|
819
|
+
reasoning: z3.string().nullish().optional(),
|
|
820
|
+
reasoning_details: ReasoningDetailArraySchema.nullish(),
|
|
821
|
+
tool_calls: z3.array(
|
|
822
|
+
z3.object({
|
|
823
|
+
index: z3.number(),
|
|
824
|
+
id: z3.string().nullish(),
|
|
825
|
+
type: z3.literal("function").optional(),
|
|
826
|
+
function: z3.object({
|
|
827
|
+
name: z3.string().nullish(),
|
|
828
|
+
arguments: z3.string().nullish()
|
|
829
|
+
})
|
|
830
|
+
})
|
|
831
|
+
).nullish()
|
|
832
|
+
}).nullish(),
|
|
833
|
+
logprobs: z3.object({
|
|
834
|
+
content: z3.array(
|
|
835
|
+
z3.object({
|
|
836
|
+
token: z3.string(),
|
|
837
|
+
logprob: z3.number(),
|
|
838
|
+
top_logprobs: z3.array(
|
|
839
|
+
z3.object({
|
|
840
|
+
token: z3.string(),
|
|
841
|
+
logprob: z3.number()
|
|
842
|
+
})
|
|
843
|
+
)
|
|
844
|
+
})
|
|
845
|
+
).nullable()
|
|
846
|
+
}).nullish(),
|
|
847
|
+
finish_reason: z3.string().nullable().optional(),
|
|
848
|
+
index: z3.number()
|
|
849
|
+
})
|
|
850
|
+
)
|
|
851
|
+
}),
|
|
852
|
+
LLMGatewayErrorResponseSchema
|
|
853
|
+
]);
|
|
854
|
+
function prepareToolsAndToolChoice(mode) {
|
|
855
|
+
var _a;
|
|
856
|
+
const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
|
|
857
|
+
if (tools == null) {
|
|
858
|
+
return { tools: void 0, tool_choice: void 0 };
|
|
859
|
+
}
|
|
860
|
+
const mappedTools = tools.map((tool) => {
|
|
861
|
+
if (isFunctionTool(tool)) {
|
|
862
|
+
return {
|
|
863
|
+
type: "function",
|
|
864
|
+
function: {
|
|
865
|
+
name: tool.name,
|
|
866
|
+
description: tool.description,
|
|
867
|
+
parameters: tool.parameters
|
|
868
|
+
}
|
|
869
|
+
};
|
|
870
|
+
}
|
|
871
|
+
return {
|
|
872
|
+
type: "function",
|
|
873
|
+
function: {
|
|
874
|
+
name: tool.name
|
|
875
|
+
}
|
|
876
|
+
};
|
|
877
|
+
});
|
|
878
|
+
const toolChoice = mode.toolChoice;
|
|
879
|
+
if (toolChoice == null) {
|
|
880
|
+
return { tools: mappedTools, tool_choice: void 0 };
|
|
881
|
+
}
|
|
882
|
+
const type = toolChoice.type;
|
|
883
|
+
switch (type) {
|
|
884
|
+
case "auto":
|
|
885
|
+
case "none":
|
|
886
|
+
case "required":
|
|
887
|
+
return { tools: mappedTools, tool_choice: type };
|
|
888
|
+
case "tool":
|
|
889
|
+
return {
|
|
890
|
+
tools: mappedTools,
|
|
891
|
+
tool_choice: {
|
|
892
|
+
type: "function",
|
|
893
|
+
function: {
|
|
894
|
+
name: toolChoice.toolName
|
|
895
|
+
}
|
|
896
|
+
}
|
|
897
|
+
};
|
|
898
|
+
default: {
|
|
899
|
+
const _exhaustiveCheck = type;
|
|
900
|
+
throw new Error(`Unsupported tool choice type: ${_exhaustiveCheck}`);
|
|
901
|
+
}
|
|
902
|
+
}
|
|
903
|
+
}
|
|
904
|
+
|
|
905
|
+
// src/llmgateway-completion-language-model.ts
|
|
906
|
+
import { UnsupportedFunctionalityError as UnsupportedFunctionalityError3 } from "@ai-sdk/provider";
|
|
907
|
+
import {
|
|
908
|
+
combineHeaders as combineHeaders2,
|
|
909
|
+
createEventSourceResponseHandler as createEventSourceResponseHandler2,
|
|
910
|
+
createJsonResponseHandler as createJsonResponseHandler2,
|
|
911
|
+
postJsonToApi as postJsonToApi2
|
|
912
|
+
} from "@ai-sdk/provider-utils";
|
|
913
|
+
import { z as z4 } from "zod";
|
|
914
|
+
|
|
915
|
+
// src/convert-to-llmgateway-completion-prompt.ts
|
|
916
|
+
import {
|
|
917
|
+
InvalidPromptError,
|
|
918
|
+
UnsupportedFunctionalityError as UnsupportedFunctionalityError2
|
|
919
|
+
} from "@ai-sdk/provider";
|
|
920
|
+
function convertToLLMGatewayCompletionPrompt({
|
|
921
|
+
prompt,
|
|
922
|
+
inputFormat,
|
|
923
|
+
user = "user",
|
|
924
|
+
assistant = "assistant"
|
|
925
|
+
}) {
|
|
926
|
+
if (inputFormat === "prompt" && prompt.length === 1 && prompt[0] && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0] && prompt[0].content[0].type === "text") {
|
|
927
|
+
return { prompt: prompt[0].content[0].text };
|
|
928
|
+
}
|
|
929
|
+
let text = "";
|
|
930
|
+
if (prompt[0] && prompt[0].role === "system") {
|
|
931
|
+
text += `${prompt[0].content}
|
|
932
|
+
|
|
933
|
+
`;
|
|
934
|
+
prompt = prompt.slice(1);
|
|
935
|
+
}
|
|
936
|
+
for (const { role, content } of prompt) {
|
|
937
|
+
switch (role) {
|
|
938
|
+
case "system": {
|
|
939
|
+
throw new InvalidPromptError({
|
|
940
|
+
message: "Unexpected system message in prompt: ${content}",
|
|
941
|
+
prompt
|
|
942
|
+
});
|
|
943
|
+
}
|
|
944
|
+
case "user": {
|
|
945
|
+
const userMessage = content.map((part) => {
|
|
946
|
+
switch (part.type) {
|
|
947
|
+
case "text": {
|
|
948
|
+
return part.text;
|
|
949
|
+
}
|
|
950
|
+
case "image": {
|
|
951
|
+
throw new UnsupportedFunctionalityError2({
|
|
952
|
+
functionality: "images"
|
|
953
|
+
});
|
|
954
|
+
}
|
|
955
|
+
case "file": {
|
|
956
|
+
throw new UnsupportedFunctionalityError2({
|
|
957
|
+
functionality: "file attachments"
|
|
958
|
+
});
|
|
959
|
+
}
|
|
960
|
+
default: {
|
|
961
|
+
const _exhaustiveCheck = part;
|
|
962
|
+
throw new Error(
|
|
963
|
+
`Unsupported content type: ${_exhaustiveCheck}`
|
|
964
|
+
);
|
|
965
|
+
}
|
|
966
|
+
}
|
|
967
|
+
}).join("");
|
|
968
|
+
text += `${user}:
|
|
969
|
+
${userMessage}
|
|
970
|
+
|
|
971
|
+
`;
|
|
972
|
+
break;
|
|
973
|
+
}
|
|
974
|
+
case "assistant": {
|
|
975
|
+
const assistantMessage = content.map((part) => {
|
|
976
|
+
switch (part.type) {
|
|
977
|
+
case "text": {
|
|
978
|
+
return part.text;
|
|
979
|
+
}
|
|
980
|
+
case "tool-call": {
|
|
981
|
+
throw new UnsupportedFunctionalityError2({
|
|
982
|
+
functionality: "tool-call messages"
|
|
983
|
+
});
|
|
984
|
+
}
|
|
985
|
+
case "reasoning": {
|
|
986
|
+
throw new UnsupportedFunctionalityError2({
|
|
987
|
+
functionality: "reasoning messages"
|
|
988
|
+
});
|
|
989
|
+
}
|
|
990
|
+
case "redacted-reasoning": {
|
|
991
|
+
throw new UnsupportedFunctionalityError2({
|
|
992
|
+
functionality: "redacted reasoning messages"
|
|
993
|
+
});
|
|
994
|
+
}
|
|
995
|
+
case "file": {
|
|
996
|
+
throw new UnsupportedFunctionalityError2({
|
|
997
|
+
functionality: "file attachments"
|
|
998
|
+
});
|
|
999
|
+
}
|
|
1000
|
+
default: {
|
|
1001
|
+
const _exhaustiveCheck = part;
|
|
1002
|
+
throw new Error(
|
|
1003
|
+
`Unsupported content type: ${_exhaustiveCheck}`
|
|
1004
|
+
);
|
|
1005
|
+
}
|
|
1006
|
+
}
|
|
1007
|
+
}).join("");
|
|
1008
|
+
text += `${assistant}:
|
|
1009
|
+
${assistantMessage}
|
|
1010
|
+
|
|
1011
|
+
`;
|
|
1012
|
+
break;
|
|
1013
|
+
}
|
|
1014
|
+
case "tool": {
|
|
1015
|
+
throw new UnsupportedFunctionalityError2({
|
|
1016
|
+
functionality: "tool messages"
|
|
1017
|
+
});
|
|
1018
|
+
}
|
|
1019
|
+
default: {
|
|
1020
|
+
const _exhaustiveCheck = role;
|
|
1021
|
+
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
|
1022
|
+
}
|
|
1023
|
+
}
|
|
1024
|
+
}
|
|
1025
|
+
text += `${assistant}:
|
|
1026
|
+
`;
|
|
1027
|
+
return {
|
|
1028
|
+
prompt: text
|
|
1029
|
+
};
|
|
1030
|
+
}
|
|
1031
|
+
|
|
1032
|
+
// src/map-llmgateway-completion-logprobs.ts
|
|
1033
|
+
function mapLLMGatewayCompletionLogprobs(logprobs) {
|
|
1034
|
+
return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => {
|
|
1035
|
+
var _a, _b;
|
|
1036
|
+
return {
|
|
1037
|
+
token,
|
|
1038
|
+
logprob: (_a = logprobs.token_logprobs[index]) != null ? _a : 0,
|
|
1039
|
+
topLogprobs: logprobs.top_logprobs ? Object.entries((_b = logprobs.top_logprobs[index]) != null ? _b : {}).map(
|
|
1040
|
+
([token2, logprob]) => ({
|
|
1041
|
+
token: token2,
|
|
1042
|
+
logprob
|
|
1043
|
+
})
|
|
1044
|
+
) : []
|
|
1045
|
+
};
|
|
1046
|
+
});
|
|
1047
|
+
}
|
|
1048
|
+
|
|
1049
|
+
// src/llmgateway-completion-language-model.ts
|
|
1050
|
+
var LLMGatewayCompletionLanguageModel = class {
|
|
1051
|
+
constructor(modelId, settings, config) {
|
|
1052
|
+
this.specificationVersion = "v1";
|
|
1053
|
+
this.defaultObjectGenerationMode = void 0;
|
|
1054
|
+
this.modelId = modelId;
|
|
1055
|
+
this.settings = settings;
|
|
1056
|
+
this.config = config;
|
|
1057
|
+
}
|
|
1058
|
+
get provider() {
|
|
1059
|
+
return this.config.provider;
|
|
1060
|
+
}
|
|
1061
|
+
getArgs({
|
|
1062
|
+
mode,
|
|
1063
|
+
inputFormat,
|
|
1064
|
+
prompt,
|
|
1065
|
+
maxTokens,
|
|
1066
|
+
temperature,
|
|
1067
|
+
topP,
|
|
1068
|
+
frequencyPenalty,
|
|
1069
|
+
presencePenalty,
|
|
1070
|
+
seed,
|
|
1071
|
+
responseFormat,
|
|
1072
|
+
topK,
|
|
1073
|
+
stopSequences,
|
|
1074
|
+
providerMetadata
|
|
1075
|
+
}) {
|
|
1076
|
+
var _a, _b;
|
|
1077
|
+
const type = mode.type;
|
|
1078
|
+
const extraCallingBody = (_a = providerMetadata == null ? void 0 : providerMetadata.llmgateway) != null ? _a : {};
|
|
1079
|
+
const { prompt: completionPrompt } = convertToLLMGatewayCompletionPrompt({
|
|
1080
|
+
prompt,
|
|
1081
|
+
inputFormat
|
|
1082
|
+
});
|
|
1083
|
+
const baseArgs = __spreadValues(__spreadValues(__spreadValues({
|
|
1084
|
+
// model id:
|
|
1085
|
+
model: this.modelId,
|
|
1086
|
+
models: this.settings.models,
|
|
1087
|
+
// model specific settings:
|
|
1088
|
+
logit_bias: this.settings.logitBias,
|
|
1089
|
+
logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
|
|
1090
|
+
suffix: this.settings.suffix,
|
|
1091
|
+
user: this.settings.user,
|
|
1092
|
+
// standardized settings:
|
|
1093
|
+
max_tokens: maxTokens,
|
|
1094
|
+
temperature,
|
|
1095
|
+
top_p: topP,
|
|
1096
|
+
frequency_penalty: frequencyPenalty,
|
|
1097
|
+
presence_penalty: presencePenalty,
|
|
1098
|
+
seed,
|
|
1099
|
+
stop: stopSequences,
|
|
1100
|
+
response_format: responseFormat,
|
|
1101
|
+
top_k: topK,
|
|
1102
|
+
// prompt:
|
|
1103
|
+
prompt: completionPrompt
|
|
1104
|
+
}, this.config.extraBody), this.settings.extraBody), extraCallingBody);
|
|
1105
|
+
switch (type) {
|
|
1106
|
+
case "regular": {
|
|
1107
|
+
if ((_b = mode.tools) == null ? void 0 : _b.length) {
|
|
1108
|
+
throw new UnsupportedFunctionalityError3({
|
|
1109
|
+
functionality: "tools"
|
|
1110
|
+
});
|
|
1111
|
+
}
|
|
1112
|
+
if (mode.toolChoice) {
|
|
1113
|
+
throw new UnsupportedFunctionalityError3({
|
|
1114
|
+
functionality: "toolChoice"
|
|
1115
|
+
});
|
|
1116
|
+
}
|
|
1117
|
+
return baseArgs;
|
|
1118
|
+
}
|
|
1119
|
+
case "object-json": {
|
|
1120
|
+
throw new UnsupportedFunctionalityError3({
|
|
1121
|
+
functionality: "object-json mode"
|
|
1122
|
+
});
|
|
1123
|
+
}
|
|
1124
|
+
case "object-tool": {
|
|
1125
|
+
throw new UnsupportedFunctionalityError3({
|
|
1126
|
+
functionality: "object-tool mode"
|
|
1127
|
+
});
|
|
1128
|
+
}
|
|
1129
|
+
// Handle all non-text types with a single default case
|
|
1130
|
+
default: {
|
|
1131
|
+
const _exhaustiveCheck = type;
|
|
1132
|
+
throw new UnsupportedFunctionalityError3({
|
|
1133
|
+
functionality: `${_exhaustiveCheck} mode`
|
|
1134
|
+
});
|
|
1135
|
+
}
|
|
1136
|
+
}
|
|
1137
|
+
}
|
|
1138
|
+
async doGenerate(options) {
|
|
1139
|
+
var _b, _c, _d, _e, _f;
|
|
1140
|
+
const args = this.getArgs(options);
|
|
1141
|
+
const { responseHeaders, value: response } = await postJsonToApi2({
|
|
1142
|
+
url: this.config.url({
|
|
1143
|
+
path: "/completions",
|
|
1144
|
+
modelId: this.modelId
|
|
1145
|
+
}),
|
|
1146
|
+
headers: combineHeaders2(this.config.headers(), options.headers),
|
|
1147
|
+
body: args,
|
|
1148
|
+
failedResponseHandler: llmgatewayFailedResponseHandler,
|
|
1149
|
+
successfulResponseHandler: createJsonResponseHandler2(
|
|
1150
|
+
LLMGatewayCompletionChunkSchema
|
|
1151
|
+
),
|
|
1152
|
+
abortSignal: options.abortSignal,
|
|
1153
|
+
fetch: this.config.fetch
|
|
1154
|
+
});
|
|
1155
|
+
const _a = args, { prompt: rawPrompt } = _a, rawSettings = __objRest(_a, ["prompt"]);
|
|
1156
|
+
if ("error" in response) {
|
|
1157
|
+
throw new Error(`${response.error.message}`);
|
|
1158
|
+
}
|
|
1159
|
+
const choice = response.choices[0];
|
|
1160
|
+
if (!choice) {
|
|
1161
|
+
throw new Error("No choice in LLMGateway completion response");
|
|
1162
|
+
}
|
|
1163
|
+
return {
|
|
1164
|
+
response: {
|
|
1165
|
+
id: response.id,
|
|
1166
|
+
modelId: response.model
|
|
1167
|
+
},
|
|
1168
|
+
text: (_b = choice.text) != null ? _b : "",
|
|
1169
|
+
reasoning: choice.reasoning || void 0,
|
|
1170
|
+
usage: {
|
|
1171
|
+
promptTokens: (_d = (_c = response.usage) == null ? void 0 : _c.prompt_tokens) != null ? _d : 0,
|
|
1172
|
+
completionTokens: (_f = (_e = response.usage) == null ? void 0 : _e.completion_tokens) != null ? _f : 0
|
|
1173
|
+
},
|
|
1174
|
+
finishReason: mapLLMGatewayFinishReason(choice.finish_reason),
|
|
1175
|
+
logprobs: mapLLMGatewayCompletionLogprobs(choice.logprobs),
|
|
1176
|
+
rawCall: { rawPrompt, rawSettings },
|
|
1177
|
+
rawResponse: { headers: responseHeaders },
|
|
1178
|
+
warnings: []
|
|
1179
|
+
};
|
|
1180
|
+
}
|
|
1181
|
+
async doStream(options) {
|
|
1182
|
+
const args = this.getArgs(options);
|
|
1183
|
+
const { responseHeaders, value: response } = await postJsonToApi2({
|
|
1184
|
+
url: this.config.url({
|
|
1185
|
+
path: "/completions",
|
|
1186
|
+
modelId: this.modelId
|
|
1187
|
+
}),
|
|
1188
|
+
headers: combineHeaders2(this.config.headers(), options.headers),
|
|
1189
|
+
body: __spreadProps(__spreadValues({}, this.getArgs(options)), {
|
|
1190
|
+
stream: true,
|
|
1191
|
+
// only include stream_options when in strict compatibility mode:
|
|
1192
|
+
stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
|
|
1193
|
+
}),
|
|
1194
|
+
failedResponseHandler: llmgatewayFailedResponseHandler,
|
|
1195
|
+
successfulResponseHandler: createEventSourceResponseHandler2(
|
|
1196
|
+
LLMGatewayCompletionChunkSchema
|
|
1197
|
+
),
|
|
1198
|
+
abortSignal: options.abortSignal,
|
|
1199
|
+
fetch: this.config.fetch
|
|
1200
|
+
});
|
|
1201
|
+
const _a = args, { prompt: rawPrompt } = _a, rawSettings = __objRest(_a, ["prompt"]);
|
|
1202
|
+
let finishReason = "other";
|
|
1203
|
+
let usage = {
|
|
1204
|
+
promptTokens: Number.NaN,
|
|
1205
|
+
completionTokens: Number.NaN
|
|
1206
|
+
};
|
|
1207
|
+
let logprobs;
|
|
1208
|
+
return {
|
|
1209
|
+
stream: response.pipeThrough(
|
|
1210
|
+
new TransformStream({
|
|
1211
|
+
transform(chunk, controller) {
|
|
1212
|
+
if (!chunk.success) {
|
|
1213
|
+
finishReason = "error";
|
|
1214
|
+
controller.enqueue({ type: "error", error: chunk.error });
|
|
1215
|
+
return;
|
|
1216
|
+
}
|
|
1217
|
+
const value = chunk.value;
|
|
1218
|
+
if ("error" in value) {
|
|
1219
|
+
finishReason = "error";
|
|
1220
|
+
controller.enqueue({ type: "error", error: value.error });
|
|
1221
|
+
return;
|
|
1222
|
+
}
|
|
1223
|
+
if (value.usage != null) {
|
|
1224
|
+
usage = {
|
|
1225
|
+
promptTokens: value.usage.prompt_tokens,
|
|
1226
|
+
completionTokens: value.usage.completion_tokens
|
|
1227
|
+
};
|
|
1228
|
+
}
|
|
1229
|
+
const choice = value.choices[0];
|
|
1230
|
+
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
1231
|
+
finishReason = mapLLMGatewayFinishReason(choice.finish_reason);
|
|
1232
|
+
}
|
|
1233
|
+
if ((choice == null ? void 0 : choice.text) != null) {
|
|
1234
|
+
controller.enqueue({
|
|
1235
|
+
type: "text-delta",
|
|
1236
|
+
textDelta: choice.text
|
|
1237
|
+
});
|
|
1238
|
+
}
|
|
1239
|
+
const mappedLogprobs = mapLLMGatewayCompletionLogprobs(
|
|
1240
|
+
choice == null ? void 0 : choice.logprobs
|
|
1241
|
+
);
|
|
1242
|
+
if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
|
|
1243
|
+
if (logprobs === void 0) {
|
|
1244
|
+
logprobs = [];
|
|
1245
|
+
}
|
|
1246
|
+
logprobs.push(...mappedLogprobs);
|
|
1247
|
+
}
|
|
1248
|
+
},
|
|
1249
|
+
flush(controller) {
|
|
1250
|
+
controller.enqueue({
|
|
1251
|
+
type: "finish",
|
|
1252
|
+
finishReason,
|
|
1253
|
+
logprobs,
|
|
1254
|
+
usage
|
|
1255
|
+
});
|
|
1256
|
+
}
|
|
1257
|
+
})
|
|
1258
|
+
),
|
|
1259
|
+
rawCall: { rawPrompt, rawSettings },
|
|
1260
|
+
rawResponse: { headers: responseHeaders },
|
|
1261
|
+
warnings: []
|
|
1262
|
+
};
|
|
1263
|
+
}
|
|
1264
|
+
};
|
|
1265
|
+
var LLMGatewayCompletionChunkSchema = z4.union([
|
|
1266
|
+
z4.object({
|
|
1267
|
+
id: z4.string().optional(),
|
|
1268
|
+
model: z4.string().optional(),
|
|
1269
|
+
choices: z4.array(
|
|
1270
|
+
z4.object({
|
|
1271
|
+
text: z4.string(),
|
|
1272
|
+
reasoning: z4.string().nullish().optional(),
|
|
1273
|
+
reasoning_details: ReasoningDetailArraySchema.nullish(),
|
|
1274
|
+
finish_reason: z4.string().nullish(),
|
|
1275
|
+
index: z4.number(),
|
|
1276
|
+
logprobs: z4.object({
|
|
1277
|
+
tokens: z4.array(z4.string()),
|
|
1278
|
+
token_logprobs: z4.array(z4.number()),
|
|
1279
|
+
top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullable()
|
|
1280
|
+
}).nullable().optional()
|
|
1281
|
+
})
|
|
1282
|
+
),
|
|
1283
|
+
usage: z4.object({
|
|
1284
|
+
prompt_tokens: z4.number(),
|
|
1285
|
+
completion_tokens: z4.number()
|
|
1286
|
+
}).optional().nullable()
|
|
1287
|
+
}),
|
|
1288
|
+
LLMGatewayErrorResponseSchema
|
|
1289
|
+
]);
|
|
1290
|
+
|
|
1291
|
+
// src/llmgateway-facade.ts
|
|
1292
|
+
var LLMGateway = class {
|
|
1293
|
+
/**
|
|
1294
|
+
* Creates a new LLMGateway provider instance.
|
|
1295
|
+
*/
|
|
1296
|
+
constructor(options = {}) {
|
|
1297
|
+
var _a, _b;
|
|
1298
|
+
this.baseURL = (_b = withoutTrailingSlash((_a = options.baseURL) != null ? _a : options.baseURL)) != null ? _b : "https://api.llmgateway.io/v1";
|
|
1299
|
+
this.apiKey = options.apiKey;
|
|
1300
|
+
this.headers = options.headers;
|
|
1301
|
+
}
|
|
1302
|
+
get baseConfig() {
|
|
1303
|
+
return {
|
|
1304
|
+
baseURL: this.baseURL,
|
|
1305
|
+
headers: () => __spreadValues({
|
|
1306
|
+
Authorization: `Bearer ${loadApiKey({
|
|
1307
|
+
apiKey: this.apiKey,
|
|
1308
|
+
environmentVariableName: "LLMGATEWAY_API_KEY",
|
|
1309
|
+
description: "LLMGateway"
|
|
1310
|
+
})}`
|
|
1311
|
+
}, this.headers)
|
|
1312
|
+
};
|
|
1313
|
+
}
|
|
1314
|
+
chat(modelId, settings = {}) {
|
|
1315
|
+
return new LLMGatewayChatLanguageModel(modelId, settings, __spreadProps(__spreadValues({
|
|
1316
|
+
provider: "llmgateway.chat"
|
|
1317
|
+
}, this.baseConfig), {
|
|
1318
|
+
compatibility: "strict",
|
|
1319
|
+
url: ({ path }) => `${this.baseURL}${path}`
|
|
1320
|
+
}));
|
|
1321
|
+
}
|
|
1322
|
+
completion(modelId, settings = {}) {
|
|
1323
|
+
return new LLMGatewayCompletionLanguageModel(modelId, settings, __spreadProps(__spreadValues({
|
|
1324
|
+
provider: "llmgateway.completion"
|
|
1325
|
+
}, this.baseConfig), {
|
|
1326
|
+
compatibility: "strict",
|
|
1327
|
+
url: ({ path }) => `${this.baseURL}${path}`
|
|
1328
|
+
}));
|
|
1329
|
+
}
|
|
1330
|
+
};
|
|
1331
|
+
|
|
1332
|
+
// src/llmgateway-provider.ts
|
|
1333
|
+
import { loadApiKey as loadApiKey2, withoutTrailingSlash as withoutTrailingSlash2 } from "@ai-sdk/provider-utils";
|
|
1334
|
+
function createLLMGateway(options = {}) {
|
|
1335
|
+
var _a, _b, _c;
|
|
1336
|
+
const baseURL = (_b = withoutTrailingSlash2((_a = options.baseURL) != null ? _a : options.baseURL)) != null ? _b : "https://api.llmgateway.io/v1";
|
|
1337
|
+
const compatibility = (_c = options.compatibility) != null ? _c : "compatible";
|
|
1338
|
+
const getHeaders = () => __spreadValues({
|
|
1339
|
+
Authorization: `Bearer ${loadApiKey2({
|
|
1340
|
+
apiKey: options.apiKey,
|
|
1341
|
+
environmentVariableName: "LLMGATEWAY_API_KEY",
|
|
1342
|
+
description: "LLMGateway"
|
|
1343
|
+
})}`
|
|
1344
|
+
}, options.headers);
|
|
1345
|
+
const createChatModel = (modelId, settings = {}) => new LLMGatewayChatLanguageModel(modelId, settings, {
|
|
1346
|
+
provider: "llmgateway.chat",
|
|
1347
|
+
url: ({ path }) => `${baseURL}${path}`,
|
|
1348
|
+
headers: getHeaders,
|
|
1349
|
+
compatibility,
|
|
1350
|
+
fetch: options.fetch,
|
|
1351
|
+
extraBody: options.extraBody
|
|
1352
|
+
});
|
|
1353
|
+
const createCompletionModel = (modelId, settings = {}) => new LLMGatewayCompletionLanguageModel(modelId, settings, {
|
|
1354
|
+
provider: "llmgateway.completion",
|
|
1355
|
+
url: ({ path }) => `${baseURL}${path}`,
|
|
1356
|
+
headers: getHeaders,
|
|
1357
|
+
compatibility,
|
|
1358
|
+
fetch: options.fetch,
|
|
1359
|
+
extraBody: options.extraBody
|
|
1360
|
+
});
|
|
1361
|
+
const createLanguageModel = (modelId, settings) => {
|
|
1362
|
+
if (new.target) {
|
|
1363
|
+
throw new Error(
|
|
1364
|
+
"The LLMGateway model function cannot be called with the new keyword."
|
|
1365
|
+
);
|
|
1366
|
+
}
|
|
1367
|
+
if (modelId === "openai/gpt-3.5-turbo-instruct") {
|
|
1368
|
+
return createCompletionModel(
|
|
1369
|
+
modelId,
|
|
1370
|
+
settings
|
|
1371
|
+
);
|
|
1372
|
+
}
|
|
1373
|
+
return createChatModel(modelId, settings);
|
|
1374
|
+
};
|
|
1375
|
+
const provider = (modelId, settings) => createLanguageModel(modelId, settings);
|
|
1376
|
+
provider.languageModel = createLanguageModel;
|
|
1377
|
+
provider.chat = createChatModel;
|
|
1378
|
+
provider.completion = createCompletionModel;
|
|
1379
|
+
return provider;
|
|
1380
|
+
}
|
|
1381
|
+
var llmgateway = createLLMGateway({
|
|
1382
|
+
compatibility: "strict"
|
|
1383
|
+
// strict for LLMGateway API
|
|
1384
|
+
});
|
|
1385
|
+
export {
|
|
1386
|
+
LLMGateway,
|
|
1387
|
+
createLLMGateway,
|
|
1388
|
+
llmgateway
|
|
1389
|
+
};
|
|
1390
|
+
//# sourceMappingURL=index.js.map
|