@llmgateway/ai-sdk-provider 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1291 @@
1
+ var __defProp = Object.defineProperty;
2
+ var __defProps = Object.defineProperties;
3
+ var __getOwnPropDescs = Object.getOwnPropertyDescriptors;
4
+ var __getOwnPropSymbols = Object.getOwnPropertySymbols;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __propIsEnum = Object.prototype.propertyIsEnumerable;
7
+ var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
8
+ var __spreadValues = (a, b) => {
9
+ for (var prop in b || (b = {}))
10
+ if (__hasOwnProp.call(b, prop))
11
+ __defNormalProp(a, prop, b[prop]);
12
+ if (__getOwnPropSymbols)
13
+ for (var prop of __getOwnPropSymbols(b)) {
14
+ if (__propIsEnum.call(b, prop))
15
+ __defNormalProp(a, prop, b[prop]);
16
+ }
17
+ return a;
18
+ };
19
+ var __spreadProps = (a, b) => __defProps(a, __getOwnPropDescs(b));
20
+ var __objRest = (source, exclude) => {
21
+ var target = {};
22
+ for (var prop in source)
23
+ if (__hasOwnProp.call(source, prop) && exclude.indexOf(prop) < 0)
24
+ target[prop] = source[prop];
25
+ if (source != null && __getOwnPropSymbols)
26
+ for (var prop of __getOwnPropSymbols(source)) {
27
+ if (exclude.indexOf(prop) < 0 && __propIsEnum.call(source, prop))
28
+ target[prop] = source[prop];
29
+ }
30
+ return target;
31
+ };
32
+
33
+ // src/schemas/reasoning-details.ts
34
+ import { z } from "zod";
35
+ var ReasoningDetailSummarySchema = z.object({
36
+ type: z.literal("reasoning.summary" /* Summary */),
37
+ summary: z.string()
38
+ });
39
+ var ReasoningDetailEncryptedSchema = z.object({
40
+ type: z.literal("reasoning.encrypted" /* Encrypted */),
41
+ data: z.string()
42
+ });
43
+ var ReasoningDetailTextSchema = z.object({
44
+ type: z.literal("reasoning.text" /* Text */),
45
+ text: z.string().nullish(),
46
+ signature: z.string().nullish()
47
+ });
48
+ var ReasoningDetailUnionSchema = z.union([
49
+ ReasoningDetailSummarySchema,
50
+ ReasoningDetailEncryptedSchema,
51
+ ReasoningDetailTextSchema
52
+ ]);
53
+ var ReasoningDetailsWithUnknownSchema = z.union([
54
+ ReasoningDetailUnionSchema,
55
+ z.unknown().transform(() => null)
56
+ ]);
57
+ var ReasoningDetailArraySchema = z.array(ReasoningDetailsWithUnknownSchema).transform((d) => d.filter((d2) => !!d2));
58
+
59
+ // src/llmgateway-chat-language-model.ts
60
+ import {
61
+ InvalidResponseDataError,
62
+ UnsupportedFunctionalityError
63
+ } from "@ai-sdk/provider";
64
+ import {
65
+ combineHeaders,
66
+ createEventSourceResponseHandler,
67
+ createJsonResponseHandler,
68
+ generateId,
69
+ isParsableJson,
70
+ postJsonToApi
71
+ } from "@ai-sdk/provider-utils";
72
+ import { z as z3 } from "zod";
73
+
74
+ // src/convert-to-llmgateway-chat-messages.ts
75
+ import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils";
76
+ function getCacheControl(providerMetadata) {
77
+ var _a, _b, _c;
78
+ const anthropic = providerMetadata == null ? void 0 : providerMetadata.anthropic;
79
+ const llmgateway = providerMetadata == null ? void 0 : providerMetadata.llmgateway;
80
+ return (_c = (_b = (_a = llmgateway == null ? void 0 : llmgateway.cacheControl) != null ? _a : llmgateway == null ? void 0 : llmgateway.cache_control) != null ? _b : anthropic == null ? void 0 : anthropic.cacheControl) != null ? _c : anthropic == null ? void 0 : anthropic.cache_control;
81
+ }
82
+ function convertToLLMGatewayChatMessages(prompt) {
83
+ var _a, _b, _c;
84
+ const messages = [];
85
+ for (const { role, content, providerMetadata } of prompt) {
86
+ switch (role) {
87
+ case "system": {
88
+ messages.push({
89
+ role: "system",
90
+ content,
91
+ cache_control: getCacheControl(providerMetadata)
92
+ });
93
+ break;
94
+ }
95
+ case "user": {
96
+ if (content.length === 1 && ((_a = content[0]) == null ? void 0 : _a.type) === "text") {
97
+ messages.push({
98
+ role: "user",
99
+ content: content[0].text,
100
+ cache_control: (_b = getCacheControl(providerMetadata)) != null ? _b : getCacheControl(content[0].providerMetadata)
101
+ });
102
+ break;
103
+ }
104
+ const messageCacheControl = getCacheControl(providerMetadata);
105
+ const contentParts = content.map(
106
+ (part) => {
107
+ var _a2, _b2, _c2, _d;
108
+ const cacheControl = (_a2 = getCacheControl(part.providerMetadata)) != null ? _a2 : messageCacheControl;
109
+ switch (part.type) {
110
+ case "text":
111
+ return {
112
+ type: "text",
113
+ text: part.text,
114
+ // For text parts, only use part-specific cache control
115
+ cache_control: cacheControl
116
+ };
117
+ case "image":
118
+ return {
119
+ type: "image_url",
120
+ image_url: {
121
+ url: part.image instanceof URL ? part.image.toString() : `data:${(_b2 = part.mimeType) != null ? _b2 : "image/jpeg"};base64,${convertUint8ArrayToBase64(
122
+ part.image
123
+ )}`
124
+ },
125
+ // For image parts, use part-specific or message-level cache control
126
+ cache_control: cacheControl
127
+ };
128
+ case "file":
129
+ return {
130
+ type: "file",
131
+ file: {
132
+ filename: String(
133
+ (_d = (_c2 = part.providerMetadata) == null ? void 0 : _c2.llmgateway) == null ? void 0 : _d.filename
134
+ ),
135
+ file_data: part.data instanceof Uint8Array ? `data:${part.mimeType};base64,${convertUint8ArrayToBase64(part.data)}` : `data:${part.mimeType};base64,${part.data}`
136
+ },
137
+ cache_control: cacheControl
138
+ };
139
+ default: {
140
+ const _exhaustiveCheck = part;
141
+ throw new Error(
142
+ `Unsupported content part type: ${_exhaustiveCheck}`
143
+ );
144
+ }
145
+ }
146
+ }
147
+ );
148
+ messages.push({
149
+ role: "user",
150
+ content: contentParts
151
+ });
152
+ break;
153
+ }
154
+ case "assistant": {
155
+ let text = "";
156
+ let reasoning = "";
157
+ const reasoningDetails = [];
158
+ const toolCalls = [];
159
+ for (const part of content) {
160
+ switch (part.type) {
161
+ case "text": {
162
+ text += part.text;
163
+ break;
164
+ }
165
+ case "tool-call": {
166
+ toolCalls.push({
167
+ id: part.toolCallId,
168
+ type: "function",
169
+ function: {
170
+ name: part.toolName,
171
+ arguments: JSON.stringify(part.args)
172
+ }
173
+ });
174
+ break;
175
+ }
176
+ case "reasoning": {
177
+ reasoning += part.text;
178
+ reasoningDetails.push({
179
+ type: "reasoning.text" /* Text */,
180
+ text: part.text,
181
+ signature: part.signature
182
+ });
183
+ break;
184
+ }
185
+ case "redacted-reasoning": {
186
+ reasoningDetails.push({
187
+ type: "reasoning.encrypted" /* Encrypted */,
188
+ data: part.data
189
+ });
190
+ break;
191
+ }
192
+ case "file":
193
+ break;
194
+ default: {
195
+ const _exhaustiveCheck = part;
196
+ throw new Error(`Unsupported part: ${_exhaustiveCheck}`);
197
+ }
198
+ }
199
+ }
200
+ messages.push({
201
+ role: "assistant",
202
+ content: text,
203
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0,
204
+ reasoning: reasoning || void 0,
205
+ reasoning_details: reasoningDetails.length > 0 ? reasoningDetails : void 0,
206
+ cache_control: getCacheControl(providerMetadata)
207
+ });
208
+ break;
209
+ }
210
+ case "tool": {
211
+ for (const toolResponse of content) {
212
+ messages.push({
213
+ role: "tool",
214
+ tool_call_id: toolResponse.toolCallId,
215
+ content: JSON.stringify(toolResponse.result),
216
+ cache_control: (_c = getCacheControl(providerMetadata)) != null ? _c : getCacheControl(toolResponse.providerMetadata)
217
+ });
218
+ }
219
+ break;
220
+ }
221
+ default: {
222
+ const _exhaustiveCheck = role;
223
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
224
+ }
225
+ }
226
+ }
227
+ return messages;
228
+ }
229
+
230
+ // src/map-llmgateway-chat-logprobs.ts
231
+ function mapLLMGatewayChatLogProbsOutput(logprobs) {
232
+ var _a, _b;
233
+ return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
234
+ token,
235
+ logprob,
236
+ topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
237
+ token: token2,
238
+ logprob: logprob2
239
+ })) : []
240
+ }))) != null ? _b : void 0;
241
+ }
242
+
243
+ // src/map-llmgateway-finish-reason.ts
244
+ function mapLLMGatewayFinishReason(finishReason) {
245
+ switch (finishReason) {
246
+ case "stop":
247
+ return "stop";
248
+ case "length":
249
+ return "length";
250
+ case "content_filter":
251
+ return "content-filter";
252
+ case "function_call":
253
+ case "tool_calls":
254
+ return "tool-calls";
255
+ default:
256
+ return "unknown";
257
+ }
258
+ }
259
+
260
+ // src/llmgateway-error.ts
261
+ import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
262
+ import { z as z2 } from "zod";
263
+ var LLMGatewayErrorResponseSchema = z2.object({
264
+ error: z2.object({
265
+ message: z2.string(),
266
+ type: z2.string(),
267
+ param: z2.any().nullable(),
268
+ code: z2.string().nullable()
269
+ })
270
+ });
271
+ var llmgatewayFailedResponseHandler = createJsonErrorResponseHandler({
272
+ errorSchema: LLMGatewayErrorResponseSchema,
273
+ errorToMessage: (data) => data.error.message
274
+ });
275
+
276
+ // src/llmgateway-chat-language-model.ts
277
+ function isFunctionTool(tool) {
278
+ return "parameters" in tool;
279
+ }
280
+ var LLMGatewayChatLanguageModel = class {
281
+ constructor(modelId, settings, config) {
282
+ this.specificationVersion = "v1";
283
+ this.defaultObjectGenerationMode = "tool";
284
+ this.modelId = modelId;
285
+ this.settings = settings;
286
+ this.config = config;
287
+ }
288
+ get provider() {
289
+ return this.config.provider;
290
+ }
291
+ getArgs({
292
+ mode,
293
+ prompt,
294
+ maxTokens,
295
+ temperature,
296
+ topP,
297
+ frequencyPenalty,
298
+ presencePenalty,
299
+ seed,
300
+ stopSequences,
301
+ responseFormat,
302
+ topK,
303
+ providerMetadata
304
+ }) {
305
+ var _a;
306
+ const type = mode.type;
307
+ const extraCallingBody = (_a = providerMetadata == null ? void 0 : providerMetadata.llmgateway) != null ? _a : {};
308
+ const baseArgs = __spreadValues(__spreadValues(__spreadValues({
309
+ // model id:
310
+ model: this.modelId,
311
+ models: this.settings.models,
312
+ // model specific settings:
313
+ logit_bias: this.settings.logitBias,
314
+ logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
315
+ top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
316
+ user: this.settings.user,
317
+ parallel_tool_calls: this.settings.parallelToolCalls,
318
+ // standardized settings:
319
+ max_tokens: maxTokens,
320
+ temperature,
321
+ top_p: topP,
322
+ frequency_penalty: frequencyPenalty,
323
+ presence_penalty: presencePenalty,
324
+ seed,
325
+ stop: stopSequences,
326
+ response_format: responseFormat,
327
+ top_k: topK,
328
+ // messages:
329
+ messages: convertToLLMGatewayChatMessages(prompt),
330
+ // LLMGateway specific settings:
331
+ include_reasoning: this.settings.includeReasoning,
332
+ reasoning: this.settings.reasoning,
333
+ usage: this.settings.usage
334
+ }, this.config.extraBody), this.settings.extraBody), extraCallingBody);
335
+ switch (type) {
336
+ case "regular": {
337
+ return __spreadValues(__spreadValues({}, baseArgs), prepareToolsAndToolChoice(mode));
338
+ }
339
+ case "object-json": {
340
+ return __spreadProps(__spreadValues({}, baseArgs), {
341
+ response_format: { type: "json_object" }
342
+ });
343
+ }
344
+ case "object-tool": {
345
+ return __spreadProps(__spreadValues({}, baseArgs), {
346
+ tool_choice: { type: "function", function: { name: mode.tool.name } },
347
+ tools: [
348
+ {
349
+ type: "function",
350
+ function: {
351
+ name: mode.tool.name,
352
+ description: mode.tool.description,
353
+ parameters: mode.tool.parameters
354
+ }
355
+ }
356
+ ]
357
+ });
358
+ }
359
+ // Handle all non-text types with a single default case
360
+ default: {
361
+ const _exhaustiveCheck = type;
362
+ throw new UnsupportedFunctionalityError({
363
+ functionality: `${_exhaustiveCheck} mode`
364
+ });
365
+ }
366
+ }
367
+ }
368
+ async doGenerate(options) {
369
+ var _b, _c, _d, _e, _f, _g, _h, _i, _j;
370
+ const args = this.getArgs(options);
371
+ const { responseHeaders, value: response } = await postJsonToApi({
372
+ url: this.config.url({
373
+ path: "/chat/completions",
374
+ modelId: this.modelId
375
+ }),
376
+ headers: combineHeaders(this.config.headers(), options.headers),
377
+ body: args,
378
+ failedResponseHandler: llmgatewayFailedResponseHandler,
379
+ successfulResponseHandler: createJsonResponseHandler(
380
+ LLMGatewayNonStreamChatCompletionResponseSchema
381
+ ),
382
+ abortSignal: options.abortSignal,
383
+ fetch: this.config.fetch
384
+ });
385
+ const _a = args, { messages: rawPrompt } = _a, rawSettings = __objRest(_a, ["messages"]);
386
+ const choice = response.choices[0];
387
+ if (!choice) {
388
+ throw new Error("No choice in response");
389
+ }
390
+ const usageInfo = response.usage ? {
391
+ promptTokens: (_b = response.usage.prompt_tokens) != null ? _b : 0,
392
+ completionTokens: (_c = response.usage.completion_tokens) != null ? _c : 0
393
+ } : {
394
+ promptTokens: 0,
395
+ completionTokens: 0
396
+ };
397
+ const providerMetadata = {};
398
+ if (response.usage && ((_d = this.settings.usage) == null ? void 0 : _d.include)) {
399
+ providerMetadata.llmgateway = {
400
+ usage: {
401
+ promptTokens: response.usage.prompt_tokens,
402
+ promptTokensDetails: response.usage.prompt_tokens_details ? {
403
+ cachedTokens: (_e = response.usage.prompt_tokens_details.cached_tokens) != null ? _e : 0
404
+ } : void 0,
405
+ completionTokens: response.usage.completion_tokens,
406
+ completionTokensDetails: response.usage.completion_tokens_details ? {
407
+ reasoningTokens: (_f = response.usage.completion_tokens_details.reasoning_tokens) != null ? _f : 0
408
+ } : void 0,
409
+ cost: response.usage.cost,
410
+ totalTokens: (_g = response.usage.total_tokens) != null ? _g : 0
411
+ }
412
+ };
413
+ }
414
+ const hasProviderMetadata = Object.keys(providerMetadata).length > 0;
415
+ const reasoningDetails = (_h = choice.message.reasoning_details) != null ? _h : [];
416
+ const reasoning = reasoningDetails.length > 0 ? reasoningDetails.map((detail) => {
417
+ var _a2;
418
+ switch (detail.type) {
419
+ case "reasoning.text" /* Text */: {
420
+ if (detail.text) {
421
+ return {
422
+ type: "text",
423
+ text: detail.text,
424
+ signature: (_a2 = detail.signature) != null ? _a2 : void 0
425
+ };
426
+ }
427
+ break;
428
+ }
429
+ case "reasoning.summary" /* Summary */: {
430
+ if (detail.summary) {
431
+ return {
432
+ type: "text",
433
+ text: detail.summary
434
+ };
435
+ }
436
+ break;
437
+ }
438
+ case "reasoning.encrypted" /* Encrypted */: {
439
+ if (detail.data) {
440
+ return {
441
+ type: "redacted",
442
+ data: detail.data
443
+ };
444
+ }
445
+ break;
446
+ }
447
+ default: {
448
+ detail;
449
+ }
450
+ }
451
+ return null;
452
+ }).filter((p) => p !== null) : choice.message.reasoning ? [
453
+ {
454
+ type: "text",
455
+ text: choice.message.reasoning
456
+ }
457
+ ] : [];
458
+ return __spreadValues({
459
+ response: {
460
+ id: response.id,
461
+ modelId: response.model
462
+ },
463
+ text: (_i = choice.message.content) != null ? _i : void 0,
464
+ reasoning,
465
+ toolCalls: (_j = choice.message.tool_calls) == null ? void 0 : _j.map((toolCall) => {
466
+ var _a2;
467
+ return {
468
+ toolCallType: "function",
469
+ toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
470
+ toolName: toolCall.function.name,
471
+ args: toolCall.function.arguments
472
+ };
473
+ }),
474
+ finishReason: mapLLMGatewayFinishReason(choice.finish_reason),
475
+ usage: usageInfo,
476
+ rawCall: { rawPrompt, rawSettings },
477
+ rawResponse: { headers: responseHeaders },
478
+ warnings: [],
479
+ logprobs: mapLLMGatewayChatLogProbsOutput(choice.logprobs)
480
+ }, hasProviderMetadata ? { providerMetadata } : {});
481
+ }
482
+ async doStream(options) {
483
+ var _a, _c;
484
+ const args = this.getArgs(options);
485
+ const { responseHeaders, value: response } = await postJsonToApi({
486
+ url: this.config.url({
487
+ path: "/chat/completions",
488
+ modelId: this.modelId
489
+ }),
490
+ headers: combineHeaders(this.config.headers(), options.headers),
491
+ body: __spreadProps(__spreadValues({}, args), {
492
+ stream: true,
493
+ // only include stream_options when in strict compatibility mode:
494
+ stream_options: this.config.compatibility === "strict" ? __spreadValues({
495
+ include_usage: true
496
+ }, ((_a = this.settings.usage) == null ? void 0 : _a.include) ? { include_usage: true } : {}) : void 0
497
+ }),
498
+ failedResponseHandler: llmgatewayFailedResponseHandler,
499
+ successfulResponseHandler: createEventSourceResponseHandler(
500
+ LLMGatewayStreamChatCompletionChunkSchema
501
+ ),
502
+ abortSignal: options.abortSignal,
503
+ fetch: this.config.fetch
504
+ });
505
+ const _b = args, { messages: rawPrompt } = _b, rawSettings = __objRest(_b, ["messages"]);
506
+ const toolCalls = [];
507
+ let finishReason = "other";
508
+ let usage = {
509
+ promptTokens: Number.NaN,
510
+ completionTokens: Number.NaN
511
+ };
512
+ let logprobs;
513
+ const llmgatewayUsage = {};
514
+ const shouldIncludeUsageAccounting = !!((_c = this.settings.usage) == null ? void 0 : _c.include);
515
+ return {
516
+ stream: response.pipeThrough(
517
+ new TransformStream({
518
+ transform(chunk, controller) {
519
+ var _a2, _b2, _c2, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
520
+ if (!chunk.success) {
521
+ finishReason = "error";
522
+ controller.enqueue({ type: "error", error: chunk.error });
523
+ return;
524
+ }
525
+ const value = chunk.value;
526
+ if ("error" in value) {
527
+ finishReason = "error";
528
+ controller.enqueue({ type: "error", error: value.error });
529
+ return;
530
+ }
531
+ if (value.id) {
532
+ controller.enqueue({
533
+ type: "response-metadata",
534
+ id: value.id
535
+ });
536
+ }
537
+ if (value.model) {
538
+ controller.enqueue({
539
+ type: "response-metadata",
540
+ modelId: value.model
541
+ });
542
+ }
543
+ if (value.usage != null) {
544
+ usage = {
545
+ promptTokens: value.usage.prompt_tokens,
546
+ completionTokens: value.usage.completion_tokens
547
+ };
548
+ llmgatewayUsage.promptTokens = value.usage.prompt_tokens;
549
+ if (value.usage.prompt_tokens_details) {
550
+ llmgatewayUsage.promptTokensDetails = {
551
+ cachedTokens: (_a2 = value.usage.prompt_tokens_details.cached_tokens) != null ? _a2 : 0
552
+ };
553
+ }
554
+ llmgatewayUsage.completionTokens = value.usage.completion_tokens;
555
+ if (value.usage.completion_tokens_details) {
556
+ llmgatewayUsage.completionTokensDetails = {
557
+ reasoningTokens: (_b2 = value.usage.completion_tokens_details.reasoning_tokens) != null ? _b2 : 0
558
+ };
559
+ }
560
+ llmgatewayUsage.cost = value.usage.cost;
561
+ llmgatewayUsage.totalTokens = value.usage.total_tokens;
562
+ }
563
+ const choice = value.choices[0];
564
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
565
+ finishReason = mapLLMGatewayFinishReason(choice.finish_reason);
566
+ }
567
+ if ((choice == null ? void 0 : choice.delta) == null) {
568
+ return;
569
+ }
570
+ const delta = choice.delta;
571
+ if (delta.content != null) {
572
+ controller.enqueue({
573
+ type: "text-delta",
574
+ textDelta: delta.content
575
+ });
576
+ }
577
+ if (delta.reasoning != null) {
578
+ controller.enqueue({
579
+ type: "reasoning",
580
+ textDelta: delta.reasoning
581
+ });
582
+ }
583
+ if (delta.reasoning_details && delta.reasoning_details.length > 0) {
584
+ for (const detail of delta.reasoning_details) {
585
+ switch (detail.type) {
586
+ case "reasoning.text" /* Text */: {
587
+ if (detail.text) {
588
+ controller.enqueue({
589
+ type: "reasoning",
590
+ textDelta: detail.text
591
+ });
592
+ }
593
+ if (detail.signature) {
594
+ controller.enqueue({
595
+ type: "reasoning-signature",
596
+ signature: detail.signature
597
+ });
598
+ }
599
+ break;
600
+ }
601
+ case "reasoning.encrypted" /* Encrypted */: {
602
+ if (detail.data) {
603
+ controller.enqueue({
604
+ type: "redacted-reasoning",
605
+ data: detail.data
606
+ });
607
+ }
608
+ break;
609
+ }
610
+ case "reasoning.summary" /* Summary */: {
611
+ if (detail.summary) {
612
+ controller.enqueue({
613
+ type: "reasoning",
614
+ textDelta: detail.summary
615
+ });
616
+ }
617
+ break;
618
+ }
619
+ default: {
620
+ detail;
621
+ break;
622
+ }
623
+ }
624
+ }
625
+ }
626
+ const mappedLogprobs = mapLLMGatewayChatLogProbsOutput(
627
+ choice == null ? void 0 : choice.logprobs
628
+ );
629
+ if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
630
+ if (logprobs === void 0) {
631
+ logprobs = [];
632
+ }
633
+ logprobs.push(...mappedLogprobs);
634
+ }
635
+ if (delta.tool_calls != null) {
636
+ for (const toolCallDelta of delta.tool_calls) {
637
+ const index = toolCallDelta.index;
638
+ if (toolCalls[index] == null) {
639
+ if (toolCallDelta.type !== "function") {
640
+ throw new InvalidResponseDataError({
641
+ data: toolCallDelta,
642
+ message: `Expected 'function' type.`
643
+ });
644
+ }
645
+ if (toolCallDelta.id == null) {
646
+ throw new InvalidResponseDataError({
647
+ data: toolCallDelta,
648
+ message: `Expected 'id' to be a string.`
649
+ });
650
+ }
651
+ if (((_c2 = toolCallDelta.function) == null ? void 0 : _c2.name) == null) {
652
+ throw new InvalidResponseDataError({
653
+ data: toolCallDelta,
654
+ message: `Expected 'function.name' to be a string.`
655
+ });
656
+ }
657
+ toolCalls[index] = {
658
+ id: toolCallDelta.id,
659
+ type: "function",
660
+ function: {
661
+ name: toolCallDelta.function.name,
662
+ arguments: (_d = toolCallDelta.function.arguments) != null ? _d : ""
663
+ },
664
+ sent: false
665
+ };
666
+ const toolCall2 = toolCalls[index];
667
+ if (toolCall2 == null) {
668
+ throw new Error("Tool call is missing");
669
+ }
670
+ if (((_e = toolCall2.function) == null ? void 0 : _e.name) != null && ((_f = toolCall2.function) == null ? void 0 : _f.arguments) != null && isParsableJson(toolCall2.function.arguments)) {
671
+ controller.enqueue({
672
+ type: "tool-call-delta",
673
+ toolCallType: "function",
674
+ toolCallId: toolCall2.id,
675
+ toolName: toolCall2.function.name,
676
+ argsTextDelta: toolCall2.function.arguments
677
+ });
678
+ controller.enqueue({
679
+ type: "tool-call",
680
+ toolCallType: "function",
681
+ toolCallId: (_g = toolCall2.id) != null ? _g : generateId(),
682
+ toolName: toolCall2.function.name,
683
+ args: toolCall2.function.arguments
684
+ });
685
+ toolCall2.sent = true;
686
+ }
687
+ continue;
688
+ }
689
+ const toolCall = toolCalls[index];
690
+ if (toolCall == null) {
691
+ throw new Error("Tool call is missing");
692
+ }
693
+ if (((_h = toolCallDelta.function) == null ? void 0 : _h.arguments) != null) {
694
+ toolCall.function.arguments += (_j = (_i = toolCallDelta.function) == null ? void 0 : _i.arguments) != null ? _j : "";
695
+ }
696
+ controller.enqueue({
697
+ type: "tool-call-delta",
698
+ toolCallType: "function",
699
+ toolCallId: toolCall.id,
700
+ toolName: toolCall.function.name,
701
+ argsTextDelta: (_k = toolCallDelta.function.arguments) != null ? _k : ""
702
+ });
703
+ if (((_l = toolCall.function) == null ? void 0 : _l.name) != null && ((_m = toolCall.function) == null ? void 0 : _m.arguments) != null && isParsableJson(toolCall.function.arguments)) {
704
+ controller.enqueue({
705
+ type: "tool-call",
706
+ toolCallType: "function",
707
+ toolCallId: (_n = toolCall.id) != null ? _n : generateId(),
708
+ toolName: toolCall.function.name,
709
+ args: toolCall.function.arguments
710
+ });
711
+ toolCall.sent = true;
712
+ }
713
+ }
714
+ }
715
+ },
716
+ flush(controller) {
717
+ var _a2;
718
+ if (finishReason === "tool-calls") {
719
+ for (const toolCall of toolCalls) {
720
+ if (!toolCall.sent) {
721
+ controller.enqueue({
722
+ type: "tool-call",
723
+ toolCallType: "function",
724
+ toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
725
+ toolName: toolCall.function.name,
726
+ // Coerce invalid arguments to an empty JSON object
727
+ args: isParsableJson(toolCall.function.arguments) ? toolCall.function.arguments : "{}"
728
+ });
729
+ toolCall.sent = true;
730
+ }
731
+ }
732
+ }
733
+ const providerMetadata = {};
734
+ if (shouldIncludeUsageAccounting && (llmgatewayUsage.totalTokens !== void 0 || llmgatewayUsage.cost !== void 0 || llmgatewayUsage.promptTokensDetails !== void 0 || llmgatewayUsage.completionTokensDetails !== void 0)) {
735
+ providerMetadata.llmgateway = {
736
+ usage: llmgatewayUsage
737
+ };
738
+ }
739
+ const hasProviderMetadata = Object.keys(providerMetadata).length > 0 && shouldIncludeUsageAccounting;
740
+ controller.enqueue(__spreadValues({
741
+ type: "finish",
742
+ finishReason,
743
+ logprobs,
744
+ usage
745
+ }, hasProviderMetadata ? { providerMetadata } : {}));
746
+ }
747
+ })
748
+ ),
749
+ rawCall: { rawPrompt, rawSettings },
750
+ rawResponse: { headers: responseHeaders },
751
+ warnings: []
752
+ };
753
+ }
754
+ };
755
+ var LLMGatewayChatCompletionBaseResponseSchema = z3.object({
756
+ id: z3.string().optional(),
757
+ model: z3.string().optional(),
758
+ usage: z3.object({
759
+ prompt_tokens: z3.number(),
760
+ prompt_tokens_details: z3.object({
761
+ cached_tokens: z3.number()
762
+ }).nullish(),
763
+ completion_tokens: z3.number(),
764
+ completion_tokens_details: z3.object({
765
+ reasoning_tokens: z3.number()
766
+ }).nullish(),
767
+ total_tokens: z3.number(),
768
+ cost: z3.number().optional()
769
+ }).nullish()
770
+ });
771
+ var LLMGatewayNonStreamChatCompletionResponseSchema = LLMGatewayChatCompletionBaseResponseSchema.extend({
772
+ choices: z3.array(
773
+ z3.object({
774
+ message: z3.object({
775
+ role: z3.literal("assistant"),
776
+ content: z3.string().nullable().optional(),
777
+ reasoning: z3.string().nullable().optional(),
778
+ reasoning_details: ReasoningDetailArraySchema.nullish(),
779
+ tool_calls: z3.array(
780
+ z3.object({
781
+ id: z3.string().optional().nullable(),
782
+ type: z3.literal("function"),
783
+ function: z3.object({
784
+ name: z3.string(),
785
+ arguments: z3.string()
786
+ })
787
+ })
788
+ ).optional()
789
+ }),
790
+ index: z3.number(),
791
+ logprobs: z3.object({
792
+ content: z3.array(
793
+ z3.object({
794
+ token: z3.string(),
795
+ logprob: z3.number(),
796
+ top_logprobs: z3.array(
797
+ z3.object({
798
+ token: z3.string(),
799
+ logprob: z3.number()
800
+ })
801
+ )
802
+ })
803
+ ).nullable()
804
+ }).nullable().optional(),
805
+ finish_reason: z3.string().optional().nullable()
806
+ })
807
+ )
808
+ });
809
+ var LLMGatewayStreamChatCompletionChunkSchema = z3.union([
810
+ LLMGatewayChatCompletionBaseResponseSchema.extend({
811
+ choices: z3.array(
812
+ z3.object({
813
+ delta: z3.object({
814
+ role: z3.enum(["assistant"]).optional(),
815
+ content: z3.string().nullish(),
816
+ reasoning: z3.string().nullish().optional(),
817
+ reasoning_details: ReasoningDetailArraySchema.nullish(),
818
+ tool_calls: z3.array(
819
+ z3.object({
820
+ index: z3.number(),
821
+ id: z3.string().nullish(),
822
+ type: z3.literal("function").optional(),
823
+ function: z3.object({
824
+ name: z3.string().nullish(),
825
+ arguments: z3.string().nullish()
826
+ })
827
+ })
828
+ ).nullish()
829
+ }).nullish(),
830
+ logprobs: z3.object({
831
+ content: z3.array(
832
+ z3.object({
833
+ token: z3.string(),
834
+ logprob: z3.number(),
835
+ top_logprobs: z3.array(
836
+ z3.object({
837
+ token: z3.string(),
838
+ logprob: z3.number()
839
+ })
840
+ )
841
+ })
842
+ ).nullable()
843
+ }).nullish(),
844
+ finish_reason: z3.string().nullable().optional(),
845
+ index: z3.number()
846
+ })
847
+ )
848
+ }),
849
+ LLMGatewayErrorResponseSchema
850
+ ]);
851
+ function prepareToolsAndToolChoice(mode) {
852
+ var _a;
853
+ const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
854
+ if (tools == null) {
855
+ return { tools: void 0, tool_choice: void 0 };
856
+ }
857
+ const mappedTools = tools.map((tool) => {
858
+ if (isFunctionTool(tool)) {
859
+ return {
860
+ type: "function",
861
+ function: {
862
+ name: tool.name,
863
+ description: tool.description,
864
+ parameters: tool.parameters
865
+ }
866
+ };
867
+ }
868
+ return {
869
+ type: "function",
870
+ function: {
871
+ name: tool.name
872
+ }
873
+ };
874
+ });
875
+ const toolChoice = mode.toolChoice;
876
+ if (toolChoice == null) {
877
+ return { tools: mappedTools, tool_choice: void 0 };
878
+ }
879
+ const type = toolChoice.type;
880
+ switch (type) {
881
+ case "auto":
882
+ case "none":
883
+ case "required":
884
+ return { tools: mappedTools, tool_choice: type };
885
+ case "tool":
886
+ return {
887
+ tools: mappedTools,
888
+ tool_choice: {
889
+ type: "function",
890
+ function: {
891
+ name: toolChoice.toolName
892
+ }
893
+ }
894
+ };
895
+ default: {
896
+ const _exhaustiveCheck = type;
897
+ throw new Error(`Unsupported tool choice type: ${_exhaustiveCheck}`);
898
+ }
899
+ }
900
+ }
901
+
902
+ // src/llmgateway-completion-language-model.ts
903
+ import { UnsupportedFunctionalityError as UnsupportedFunctionalityError3 } from "@ai-sdk/provider";
904
+ import {
905
+ combineHeaders as combineHeaders2,
906
+ createEventSourceResponseHandler as createEventSourceResponseHandler2,
907
+ createJsonResponseHandler as createJsonResponseHandler2,
908
+ postJsonToApi as postJsonToApi2
909
+ } from "@ai-sdk/provider-utils";
910
+ import { z as z4 } from "zod";
911
+
912
+ // src/convert-to-llmgateway-completion-prompt.ts
913
+ import {
914
+ InvalidPromptError,
915
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError2
916
+ } from "@ai-sdk/provider";
917
+ function convertToLLMGatewayCompletionPrompt({
918
+ prompt,
919
+ inputFormat,
920
+ user = "user",
921
+ assistant = "assistant"
922
+ }) {
923
+ if (inputFormat === "prompt" && prompt.length === 1 && prompt[0] && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0] && prompt[0].content[0].type === "text") {
924
+ return { prompt: prompt[0].content[0].text };
925
+ }
926
+ let text = "";
927
+ if (prompt[0] && prompt[0].role === "system") {
928
+ text += `${prompt[0].content}
929
+
930
+ `;
931
+ prompt = prompt.slice(1);
932
+ }
933
+ for (const { role, content } of prompt) {
934
+ switch (role) {
935
+ case "system": {
936
+ throw new InvalidPromptError({
937
+ message: "Unexpected system message in prompt: ${content}",
938
+ prompt
939
+ });
940
+ }
941
+ case "user": {
942
+ const userMessage = content.map((part) => {
943
+ switch (part.type) {
944
+ case "text": {
945
+ return part.text;
946
+ }
947
+ case "image": {
948
+ throw new UnsupportedFunctionalityError2({
949
+ functionality: "images"
950
+ });
951
+ }
952
+ case "file": {
953
+ throw new UnsupportedFunctionalityError2({
954
+ functionality: "file attachments"
955
+ });
956
+ }
957
+ default: {
958
+ const _exhaustiveCheck = part;
959
+ throw new Error(
960
+ `Unsupported content type: ${_exhaustiveCheck}`
961
+ );
962
+ }
963
+ }
964
+ }).join("");
965
+ text += `${user}:
966
+ ${userMessage}
967
+
968
+ `;
969
+ break;
970
+ }
971
+ case "assistant": {
972
+ const assistantMessage = content.map((part) => {
973
+ switch (part.type) {
974
+ case "text": {
975
+ return part.text;
976
+ }
977
+ case "tool-call": {
978
+ throw new UnsupportedFunctionalityError2({
979
+ functionality: "tool-call messages"
980
+ });
981
+ }
982
+ case "reasoning": {
983
+ throw new UnsupportedFunctionalityError2({
984
+ functionality: "reasoning messages"
985
+ });
986
+ }
987
+ case "redacted-reasoning": {
988
+ throw new UnsupportedFunctionalityError2({
989
+ functionality: "redacted reasoning messages"
990
+ });
991
+ }
992
+ case "file": {
993
+ throw new UnsupportedFunctionalityError2({
994
+ functionality: "file attachments"
995
+ });
996
+ }
997
+ default: {
998
+ const _exhaustiveCheck = part;
999
+ throw new Error(
1000
+ `Unsupported content type: ${_exhaustiveCheck}`
1001
+ );
1002
+ }
1003
+ }
1004
+ }).join("");
1005
+ text += `${assistant}:
1006
+ ${assistantMessage}
1007
+
1008
+ `;
1009
+ break;
1010
+ }
1011
+ case "tool": {
1012
+ throw new UnsupportedFunctionalityError2({
1013
+ functionality: "tool messages"
1014
+ });
1015
+ }
1016
+ default: {
1017
+ const _exhaustiveCheck = role;
1018
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1019
+ }
1020
+ }
1021
+ }
1022
+ text += `${assistant}:
1023
+ `;
1024
+ return {
1025
+ prompt: text
1026
+ };
1027
+ }
1028
+
1029
+ // src/map-llmgateway-completion-logprobs.ts
1030
+ function mapLLMGatewayCompletionLogprobs(logprobs) {
1031
+ return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => {
1032
+ var _a, _b;
1033
+ return {
1034
+ token,
1035
+ logprob: (_a = logprobs.token_logprobs[index]) != null ? _a : 0,
1036
+ topLogprobs: logprobs.top_logprobs ? Object.entries((_b = logprobs.top_logprobs[index]) != null ? _b : {}).map(
1037
+ ([token2, logprob]) => ({
1038
+ token: token2,
1039
+ logprob
1040
+ })
1041
+ ) : []
1042
+ };
1043
+ });
1044
+ }
1045
+
1046
+ // src/llmgateway-completion-language-model.ts
1047
+ var LLMGatewayCompletionLanguageModel = class {
1048
+ constructor(modelId, settings, config) {
1049
+ this.specificationVersion = "v1";
1050
+ this.defaultObjectGenerationMode = void 0;
1051
+ this.modelId = modelId;
1052
+ this.settings = settings;
1053
+ this.config = config;
1054
+ }
1055
+ get provider() {
1056
+ return this.config.provider;
1057
+ }
1058
+ getArgs({
1059
+ mode,
1060
+ inputFormat,
1061
+ prompt,
1062
+ maxTokens,
1063
+ temperature,
1064
+ topP,
1065
+ frequencyPenalty,
1066
+ presencePenalty,
1067
+ seed,
1068
+ responseFormat,
1069
+ topK,
1070
+ stopSequences,
1071
+ providerMetadata
1072
+ }) {
1073
+ var _a, _b;
1074
+ const type = mode.type;
1075
+ const extraCallingBody = (_a = providerMetadata == null ? void 0 : providerMetadata.llmgateway) != null ? _a : {};
1076
+ const { prompt: completionPrompt } = convertToLLMGatewayCompletionPrompt({
1077
+ prompt,
1078
+ inputFormat
1079
+ });
1080
+ const baseArgs = __spreadValues(__spreadValues(__spreadValues({
1081
+ // model id:
1082
+ model: this.modelId,
1083
+ models: this.settings.models,
1084
+ // model specific settings:
1085
+ logit_bias: this.settings.logitBias,
1086
+ logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1087
+ suffix: this.settings.suffix,
1088
+ user: this.settings.user,
1089
+ // standardized settings:
1090
+ max_tokens: maxTokens,
1091
+ temperature,
1092
+ top_p: topP,
1093
+ frequency_penalty: frequencyPenalty,
1094
+ presence_penalty: presencePenalty,
1095
+ seed,
1096
+ stop: stopSequences,
1097
+ response_format: responseFormat,
1098
+ top_k: topK,
1099
+ // prompt:
1100
+ prompt: completionPrompt
1101
+ }, this.config.extraBody), this.settings.extraBody), extraCallingBody);
1102
+ switch (type) {
1103
+ case "regular": {
1104
+ if ((_b = mode.tools) == null ? void 0 : _b.length) {
1105
+ throw new UnsupportedFunctionalityError3({
1106
+ functionality: "tools"
1107
+ });
1108
+ }
1109
+ if (mode.toolChoice) {
1110
+ throw new UnsupportedFunctionalityError3({
1111
+ functionality: "toolChoice"
1112
+ });
1113
+ }
1114
+ return baseArgs;
1115
+ }
1116
+ case "object-json": {
1117
+ throw new UnsupportedFunctionalityError3({
1118
+ functionality: "object-json mode"
1119
+ });
1120
+ }
1121
+ case "object-tool": {
1122
+ throw new UnsupportedFunctionalityError3({
1123
+ functionality: "object-tool mode"
1124
+ });
1125
+ }
1126
+ // Handle all non-text types with a single default case
1127
+ default: {
1128
+ const _exhaustiveCheck = type;
1129
+ throw new UnsupportedFunctionalityError3({
1130
+ functionality: `${_exhaustiveCheck} mode`
1131
+ });
1132
+ }
1133
+ }
1134
+ }
1135
+ async doGenerate(options) {
1136
+ var _b, _c, _d, _e, _f;
1137
+ const args = this.getArgs(options);
1138
+ const { responseHeaders, value: response } = await postJsonToApi2({
1139
+ url: this.config.url({
1140
+ path: "/completions",
1141
+ modelId: this.modelId
1142
+ }),
1143
+ headers: combineHeaders2(this.config.headers(), options.headers),
1144
+ body: args,
1145
+ failedResponseHandler: llmgatewayFailedResponseHandler,
1146
+ successfulResponseHandler: createJsonResponseHandler2(
1147
+ LLMGatewayCompletionChunkSchema
1148
+ ),
1149
+ abortSignal: options.abortSignal,
1150
+ fetch: this.config.fetch
1151
+ });
1152
+ const _a = args, { prompt: rawPrompt } = _a, rawSettings = __objRest(_a, ["prompt"]);
1153
+ if ("error" in response) {
1154
+ throw new Error(`${response.error.message}`);
1155
+ }
1156
+ const choice = response.choices[0];
1157
+ if (!choice) {
1158
+ throw new Error("No choice in LLMGateway completion response");
1159
+ }
1160
+ return {
1161
+ response: {
1162
+ id: response.id,
1163
+ modelId: response.model
1164
+ },
1165
+ text: (_b = choice.text) != null ? _b : "",
1166
+ reasoning: choice.reasoning || void 0,
1167
+ usage: {
1168
+ promptTokens: (_d = (_c = response.usage) == null ? void 0 : _c.prompt_tokens) != null ? _d : 0,
1169
+ completionTokens: (_f = (_e = response.usage) == null ? void 0 : _e.completion_tokens) != null ? _f : 0
1170
+ },
1171
+ finishReason: mapLLMGatewayFinishReason(choice.finish_reason),
1172
+ logprobs: mapLLMGatewayCompletionLogprobs(choice.logprobs),
1173
+ rawCall: { rawPrompt, rawSettings },
1174
+ rawResponse: { headers: responseHeaders },
1175
+ warnings: []
1176
+ };
1177
+ }
1178
+ async doStream(options) {
1179
+ const args = this.getArgs(options);
1180
+ const { responseHeaders, value: response } = await postJsonToApi2({
1181
+ url: this.config.url({
1182
+ path: "/completions",
1183
+ modelId: this.modelId
1184
+ }),
1185
+ headers: combineHeaders2(this.config.headers(), options.headers),
1186
+ body: __spreadProps(__spreadValues({}, this.getArgs(options)), {
1187
+ stream: true,
1188
+ // only include stream_options when in strict compatibility mode:
1189
+ stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
1190
+ }),
1191
+ failedResponseHandler: llmgatewayFailedResponseHandler,
1192
+ successfulResponseHandler: createEventSourceResponseHandler2(
1193
+ LLMGatewayCompletionChunkSchema
1194
+ ),
1195
+ abortSignal: options.abortSignal,
1196
+ fetch: this.config.fetch
1197
+ });
1198
+ const _a = args, { prompt: rawPrompt } = _a, rawSettings = __objRest(_a, ["prompt"]);
1199
+ let finishReason = "other";
1200
+ let usage = {
1201
+ promptTokens: Number.NaN,
1202
+ completionTokens: Number.NaN
1203
+ };
1204
+ let logprobs;
1205
+ return {
1206
+ stream: response.pipeThrough(
1207
+ new TransformStream({
1208
+ transform(chunk, controller) {
1209
+ if (!chunk.success) {
1210
+ finishReason = "error";
1211
+ controller.enqueue({ type: "error", error: chunk.error });
1212
+ return;
1213
+ }
1214
+ const value = chunk.value;
1215
+ if ("error" in value) {
1216
+ finishReason = "error";
1217
+ controller.enqueue({ type: "error", error: value.error });
1218
+ return;
1219
+ }
1220
+ if (value.usage != null) {
1221
+ usage = {
1222
+ promptTokens: value.usage.prompt_tokens,
1223
+ completionTokens: value.usage.completion_tokens
1224
+ };
1225
+ }
1226
+ const choice = value.choices[0];
1227
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
1228
+ finishReason = mapLLMGatewayFinishReason(choice.finish_reason);
1229
+ }
1230
+ if ((choice == null ? void 0 : choice.text) != null) {
1231
+ controller.enqueue({
1232
+ type: "text-delta",
1233
+ textDelta: choice.text
1234
+ });
1235
+ }
1236
+ const mappedLogprobs = mapLLMGatewayCompletionLogprobs(
1237
+ choice == null ? void 0 : choice.logprobs
1238
+ );
1239
+ if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1240
+ if (logprobs === void 0) {
1241
+ logprobs = [];
1242
+ }
1243
+ logprobs.push(...mappedLogprobs);
1244
+ }
1245
+ },
1246
+ flush(controller) {
1247
+ controller.enqueue({
1248
+ type: "finish",
1249
+ finishReason,
1250
+ logprobs,
1251
+ usage
1252
+ });
1253
+ }
1254
+ })
1255
+ ),
1256
+ rawCall: { rawPrompt, rawSettings },
1257
+ rawResponse: { headers: responseHeaders },
1258
+ warnings: []
1259
+ };
1260
+ }
1261
+ };
1262
+ var LLMGatewayCompletionChunkSchema = z4.union([
1263
+ z4.object({
1264
+ id: z4.string().optional(),
1265
+ model: z4.string().optional(),
1266
+ choices: z4.array(
1267
+ z4.object({
1268
+ text: z4.string(),
1269
+ reasoning: z4.string().nullish().optional(),
1270
+ reasoning_details: ReasoningDetailArraySchema.nullish(),
1271
+ finish_reason: z4.string().nullish(),
1272
+ index: z4.number(),
1273
+ logprobs: z4.object({
1274
+ tokens: z4.array(z4.string()),
1275
+ token_logprobs: z4.array(z4.number()),
1276
+ top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullable()
1277
+ }).nullable().optional()
1278
+ })
1279
+ ),
1280
+ usage: z4.object({
1281
+ prompt_tokens: z4.number(),
1282
+ completion_tokens: z4.number()
1283
+ }).optional().nullable()
1284
+ }),
1285
+ LLMGatewayErrorResponseSchema
1286
+ ]);
1287
+ export {
1288
+ LLMGatewayChatLanguageModel,
1289
+ LLMGatewayCompletionLanguageModel
1290
+ };
1291
+ //# sourceMappingURL=index.js.map