@llmgateway/ai-sdk-provider 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1298 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __defProps = Object.defineProperties;
4
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
+ var __getOwnPropDescs = Object.getOwnPropertyDescriptors;
6
+ var __getOwnPropNames = Object.getOwnPropertyNames;
7
+ var __getOwnPropSymbols = Object.getOwnPropertySymbols;
8
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
9
+ var __propIsEnum = Object.prototype.propertyIsEnumerable;
10
+ var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
11
+ var __spreadValues = (a, b) => {
12
+ for (var prop in b || (b = {}))
13
+ if (__hasOwnProp.call(b, prop))
14
+ __defNormalProp(a, prop, b[prop]);
15
+ if (__getOwnPropSymbols)
16
+ for (var prop of __getOwnPropSymbols(b)) {
17
+ if (__propIsEnum.call(b, prop))
18
+ __defNormalProp(a, prop, b[prop]);
19
+ }
20
+ return a;
21
+ };
22
+ var __spreadProps = (a, b) => __defProps(a, __getOwnPropDescs(b));
23
+ var __objRest = (source, exclude) => {
24
+ var target = {};
25
+ for (var prop in source)
26
+ if (__hasOwnProp.call(source, prop) && exclude.indexOf(prop) < 0)
27
+ target[prop] = source[prop];
28
+ if (source != null && __getOwnPropSymbols)
29
+ for (var prop of __getOwnPropSymbols(source)) {
30
+ if (exclude.indexOf(prop) < 0 && __propIsEnum.call(source, prop))
31
+ target[prop] = source[prop];
32
+ }
33
+ return target;
34
+ };
35
+ var __export = (target, all) => {
36
+ for (var name in all)
37
+ __defProp(target, name, { get: all[name], enumerable: true });
38
+ };
39
+ var __copyProps = (to, from, except, desc) => {
40
+ if (from && typeof from === "object" || typeof from === "function") {
41
+ for (let key of __getOwnPropNames(from))
42
+ if (!__hasOwnProp.call(to, key) && key !== except)
43
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
44
+ }
45
+ return to;
46
+ };
47
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
48
+
49
+ // src/internal/index.ts
50
+ var index_exports = {};
51
+ __export(index_exports, {
52
+ LLMGatewayChatLanguageModel: () => LLMGatewayChatLanguageModel,
53
+ LLMGatewayCompletionLanguageModel: () => LLMGatewayCompletionLanguageModel
54
+ });
55
+ module.exports = __toCommonJS(index_exports);
56
+
57
+ // src/schemas/reasoning-details.ts
58
+ var import_zod = require("zod");
59
+ var ReasoningDetailSummarySchema = import_zod.z.object({
60
+ type: import_zod.z.literal("reasoning.summary" /* Summary */),
61
+ summary: import_zod.z.string()
62
+ });
63
+ var ReasoningDetailEncryptedSchema = import_zod.z.object({
64
+ type: import_zod.z.literal("reasoning.encrypted" /* Encrypted */),
65
+ data: import_zod.z.string()
66
+ });
67
+ var ReasoningDetailTextSchema = import_zod.z.object({
68
+ type: import_zod.z.literal("reasoning.text" /* Text */),
69
+ text: import_zod.z.string().nullish(),
70
+ signature: import_zod.z.string().nullish()
71
+ });
72
+ var ReasoningDetailUnionSchema = import_zod.z.union([
73
+ ReasoningDetailSummarySchema,
74
+ ReasoningDetailEncryptedSchema,
75
+ ReasoningDetailTextSchema
76
+ ]);
77
+ var ReasoningDetailsWithUnknownSchema = import_zod.z.union([
78
+ ReasoningDetailUnionSchema,
79
+ import_zod.z.unknown().transform(() => null)
80
+ ]);
81
+ var ReasoningDetailArraySchema = import_zod.z.array(ReasoningDetailsWithUnknownSchema).transform((d) => d.filter((d2) => !!d2));
82
+
83
+ // src/llmgateway-chat-language-model.ts
84
+ var import_provider = require("@ai-sdk/provider");
85
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
86
+ var import_zod3 = require("zod");
87
+
88
+ // src/convert-to-llmgateway-chat-messages.ts
89
+ var import_provider_utils = require("@ai-sdk/provider-utils");
90
+ function getCacheControl(providerMetadata) {
91
+ var _a, _b, _c;
92
+ const anthropic = providerMetadata == null ? void 0 : providerMetadata.anthropic;
93
+ const llmgateway = providerMetadata == null ? void 0 : providerMetadata.llmgateway;
94
+ return (_c = (_b = (_a = llmgateway == null ? void 0 : llmgateway.cacheControl) != null ? _a : llmgateway == null ? void 0 : llmgateway.cache_control) != null ? _b : anthropic == null ? void 0 : anthropic.cacheControl) != null ? _c : anthropic == null ? void 0 : anthropic.cache_control;
95
+ }
96
+ function convertToLLMGatewayChatMessages(prompt) {
97
+ var _a, _b, _c;
98
+ const messages = [];
99
+ for (const { role, content, providerMetadata } of prompt) {
100
+ switch (role) {
101
+ case "system": {
102
+ messages.push({
103
+ role: "system",
104
+ content,
105
+ cache_control: getCacheControl(providerMetadata)
106
+ });
107
+ break;
108
+ }
109
+ case "user": {
110
+ if (content.length === 1 && ((_a = content[0]) == null ? void 0 : _a.type) === "text") {
111
+ messages.push({
112
+ role: "user",
113
+ content: content[0].text,
114
+ cache_control: (_b = getCacheControl(providerMetadata)) != null ? _b : getCacheControl(content[0].providerMetadata)
115
+ });
116
+ break;
117
+ }
118
+ const messageCacheControl = getCacheControl(providerMetadata);
119
+ const contentParts = content.map(
120
+ (part) => {
121
+ var _a2, _b2, _c2, _d;
122
+ const cacheControl = (_a2 = getCacheControl(part.providerMetadata)) != null ? _a2 : messageCacheControl;
123
+ switch (part.type) {
124
+ case "text":
125
+ return {
126
+ type: "text",
127
+ text: part.text,
128
+ // For text parts, only use part-specific cache control
129
+ cache_control: cacheControl
130
+ };
131
+ case "image":
132
+ return {
133
+ type: "image_url",
134
+ image_url: {
135
+ url: part.image instanceof URL ? part.image.toString() : `data:${(_b2 = part.mimeType) != null ? _b2 : "image/jpeg"};base64,${(0, import_provider_utils.convertUint8ArrayToBase64)(
136
+ part.image
137
+ )}`
138
+ },
139
+ // For image parts, use part-specific or message-level cache control
140
+ cache_control: cacheControl
141
+ };
142
+ case "file":
143
+ return {
144
+ type: "file",
145
+ file: {
146
+ filename: String(
147
+ (_d = (_c2 = part.providerMetadata) == null ? void 0 : _c2.llmgateway) == null ? void 0 : _d.filename
148
+ ),
149
+ file_data: part.data instanceof Uint8Array ? `data:${part.mimeType};base64,${(0, import_provider_utils.convertUint8ArrayToBase64)(part.data)}` : `data:${part.mimeType};base64,${part.data}`
150
+ },
151
+ cache_control: cacheControl
152
+ };
153
+ default: {
154
+ const _exhaustiveCheck = part;
155
+ throw new Error(
156
+ `Unsupported content part type: ${_exhaustiveCheck}`
157
+ );
158
+ }
159
+ }
160
+ }
161
+ );
162
+ messages.push({
163
+ role: "user",
164
+ content: contentParts
165
+ });
166
+ break;
167
+ }
168
+ case "assistant": {
169
+ let text = "";
170
+ let reasoning = "";
171
+ const reasoningDetails = [];
172
+ const toolCalls = [];
173
+ for (const part of content) {
174
+ switch (part.type) {
175
+ case "text": {
176
+ text += part.text;
177
+ break;
178
+ }
179
+ case "tool-call": {
180
+ toolCalls.push({
181
+ id: part.toolCallId,
182
+ type: "function",
183
+ function: {
184
+ name: part.toolName,
185
+ arguments: JSON.stringify(part.args)
186
+ }
187
+ });
188
+ break;
189
+ }
190
+ case "reasoning": {
191
+ reasoning += part.text;
192
+ reasoningDetails.push({
193
+ type: "reasoning.text" /* Text */,
194
+ text: part.text,
195
+ signature: part.signature
196
+ });
197
+ break;
198
+ }
199
+ case "redacted-reasoning": {
200
+ reasoningDetails.push({
201
+ type: "reasoning.encrypted" /* Encrypted */,
202
+ data: part.data
203
+ });
204
+ break;
205
+ }
206
+ case "file":
207
+ break;
208
+ default: {
209
+ const _exhaustiveCheck = part;
210
+ throw new Error(`Unsupported part: ${_exhaustiveCheck}`);
211
+ }
212
+ }
213
+ }
214
+ messages.push({
215
+ role: "assistant",
216
+ content: text,
217
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0,
218
+ reasoning: reasoning || void 0,
219
+ reasoning_details: reasoningDetails.length > 0 ? reasoningDetails : void 0,
220
+ cache_control: getCacheControl(providerMetadata)
221
+ });
222
+ break;
223
+ }
224
+ case "tool": {
225
+ for (const toolResponse of content) {
226
+ messages.push({
227
+ role: "tool",
228
+ tool_call_id: toolResponse.toolCallId,
229
+ content: JSON.stringify(toolResponse.result),
230
+ cache_control: (_c = getCacheControl(providerMetadata)) != null ? _c : getCacheControl(toolResponse.providerMetadata)
231
+ });
232
+ }
233
+ break;
234
+ }
235
+ default: {
236
+ const _exhaustiveCheck = role;
237
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
238
+ }
239
+ }
240
+ }
241
+ return messages;
242
+ }
243
+
244
+ // src/map-llmgateway-chat-logprobs.ts
245
+ function mapLLMGatewayChatLogProbsOutput(logprobs) {
246
+ var _a, _b;
247
+ return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
248
+ token,
249
+ logprob,
250
+ topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
251
+ token: token2,
252
+ logprob: logprob2
253
+ })) : []
254
+ }))) != null ? _b : void 0;
255
+ }
256
+
257
+ // src/map-llmgateway-finish-reason.ts
258
+ function mapLLMGatewayFinishReason(finishReason) {
259
+ switch (finishReason) {
260
+ case "stop":
261
+ return "stop";
262
+ case "length":
263
+ return "length";
264
+ case "content_filter":
265
+ return "content-filter";
266
+ case "function_call":
267
+ case "tool_calls":
268
+ return "tool-calls";
269
+ default:
270
+ return "unknown";
271
+ }
272
+ }
273
+
274
+ // src/llmgateway-error.ts
275
+ var import_provider_utils2 = require("@ai-sdk/provider-utils");
276
+ var import_zod2 = require("zod");
277
+ var LLMGatewayErrorResponseSchema = import_zod2.z.object({
278
+ error: import_zod2.z.object({
279
+ message: import_zod2.z.string(),
280
+ type: import_zod2.z.string(),
281
+ param: import_zod2.z.any().nullable(),
282
+ code: import_zod2.z.string().nullable()
283
+ })
284
+ });
285
+ var llmgatewayFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
286
+ errorSchema: LLMGatewayErrorResponseSchema,
287
+ errorToMessage: (data) => data.error.message
288
+ });
289
+
290
+ // src/llmgateway-chat-language-model.ts
291
+ function isFunctionTool(tool) {
292
+ return "parameters" in tool;
293
+ }
294
+ var LLMGatewayChatLanguageModel = class {
295
+ constructor(modelId, settings, config) {
296
+ this.specificationVersion = "v1";
297
+ this.defaultObjectGenerationMode = "tool";
298
+ this.modelId = modelId;
299
+ this.settings = settings;
300
+ this.config = config;
301
+ }
302
+ get provider() {
303
+ return this.config.provider;
304
+ }
305
+ getArgs({
306
+ mode,
307
+ prompt,
308
+ maxTokens,
309
+ temperature,
310
+ topP,
311
+ frequencyPenalty,
312
+ presencePenalty,
313
+ seed,
314
+ stopSequences,
315
+ responseFormat,
316
+ topK,
317
+ providerMetadata
318
+ }) {
319
+ var _a;
320
+ const type = mode.type;
321
+ const extraCallingBody = (_a = providerMetadata == null ? void 0 : providerMetadata.llmgateway) != null ? _a : {};
322
+ const baseArgs = __spreadValues(__spreadValues(__spreadValues({
323
+ // model id:
324
+ model: this.modelId,
325
+ models: this.settings.models,
326
+ // model specific settings:
327
+ logit_bias: this.settings.logitBias,
328
+ logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
329
+ top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
330
+ user: this.settings.user,
331
+ parallel_tool_calls: this.settings.parallelToolCalls,
332
+ // standardized settings:
333
+ max_tokens: maxTokens,
334
+ temperature,
335
+ top_p: topP,
336
+ frequency_penalty: frequencyPenalty,
337
+ presence_penalty: presencePenalty,
338
+ seed,
339
+ stop: stopSequences,
340
+ response_format: responseFormat,
341
+ top_k: topK,
342
+ // messages:
343
+ messages: convertToLLMGatewayChatMessages(prompt),
344
+ // LLMGateway specific settings:
345
+ include_reasoning: this.settings.includeReasoning,
346
+ reasoning: this.settings.reasoning,
347
+ usage: this.settings.usage
348
+ }, this.config.extraBody), this.settings.extraBody), extraCallingBody);
349
+ switch (type) {
350
+ case "regular": {
351
+ return __spreadValues(__spreadValues({}, baseArgs), prepareToolsAndToolChoice(mode));
352
+ }
353
+ case "object-json": {
354
+ return __spreadProps(__spreadValues({}, baseArgs), {
355
+ response_format: { type: "json_object" }
356
+ });
357
+ }
358
+ case "object-tool": {
359
+ return __spreadProps(__spreadValues({}, baseArgs), {
360
+ tool_choice: { type: "function", function: { name: mode.tool.name } },
361
+ tools: [
362
+ {
363
+ type: "function",
364
+ function: {
365
+ name: mode.tool.name,
366
+ description: mode.tool.description,
367
+ parameters: mode.tool.parameters
368
+ }
369
+ }
370
+ ]
371
+ });
372
+ }
373
+ // Handle all non-text types with a single default case
374
+ default: {
375
+ const _exhaustiveCheck = type;
376
+ throw new import_provider.UnsupportedFunctionalityError({
377
+ functionality: `${_exhaustiveCheck} mode`
378
+ });
379
+ }
380
+ }
381
+ }
382
+ async doGenerate(options) {
383
+ var _b, _c, _d, _e, _f, _g, _h, _i, _j;
384
+ const args = this.getArgs(options);
385
+ const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
386
+ url: this.config.url({
387
+ path: "/chat/completions",
388
+ modelId: this.modelId
389
+ }),
390
+ headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
391
+ body: args,
392
+ failedResponseHandler: llmgatewayFailedResponseHandler,
393
+ successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
394
+ LLMGatewayNonStreamChatCompletionResponseSchema
395
+ ),
396
+ abortSignal: options.abortSignal,
397
+ fetch: this.config.fetch
398
+ });
399
+ const _a = args, { messages: rawPrompt } = _a, rawSettings = __objRest(_a, ["messages"]);
400
+ const choice = response.choices[0];
401
+ if (!choice) {
402
+ throw new Error("No choice in response");
403
+ }
404
+ const usageInfo = response.usage ? {
405
+ promptTokens: (_b = response.usage.prompt_tokens) != null ? _b : 0,
406
+ completionTokens: (_c = response.usage.completion_tokens) != null ? _c : 0
407
+ } : {
408
+ promptTokens: 0,
409
+ completionTokens: 0
410
+ };
411
+ const providerMetadata = {};
412
+ if (response.usage && ((_d = this.settings.usage) == null ? void 0 : _d.include)) {
413
+ providerMetadata.llmgateway = {
414
+ usage: {
415
+ promptTokens: response.usage.prompt_tokens,
416
+ promptTokensDetails: response.usage.prompt_tokens_details ? {
417
+ cachedTokens: (_e = response.usage.prompt_tokens_details.cached_tokens) != null ? _e : 0
418
+ } : void 0,
419
+ completionTokens: response.usage.completion_tokens,
420
+ completionTokensDetails: response.usage.completion_tokens_details ? {
421
+ reasoningTokens: (_f = response.usage.completion_tokens_details.reasoning_tokens) != null ? _f : 0
422
+ } : void 0,
423
+ cost: response.usage.cost,
424
+ totalTokens: (_g = response.usage.total_tokens) != null ? _g : 0
425
+ }
426
+ };
427
+ }
428
+ const hasProviderMetadata = Object.keys(providerMetadata).length > 0;
429
+ const reasoningDetails = (_h = choice.message.reasoning_details) != null ? _h : [];
430
+ const reasoning = reasoningDetails.length > 0 ? reasoningDetails.map((detail) => {
431
+ var _a2;
432
+ switch (detail.type) {
433
+ case "reasoning.text" /* Text */: {
434
+ if (detail.text) {
435
+ return {
436
+ type: "text",
437
+ text: detail.text,
438
+ signature: (_a2 = detail.signature) != null ? _a2 : void 0
439
+ };
440
+ }
441
+ break;
442
+ }
443
+ case "reasoning.summary" /* Summary */: {
444
+ if (detail.summary) {
445
+ return {
446
+ type: "text",
447
+ text: detail.summary
448
+ };
449
+ }
450
+ break;
451
+ }
452
+ case "reasoning.encrypted" /* Encrypted */: {
453
+ if (detail.data) {
454
+ return {
455
+ type: "redacted",
456
+ data: detail.data
457
+ };
458
+ }
459
+ break;
460
+ }
461
+ default: {
462
+ detail;
463
+ }
464
+ }
465
+ return null;
466
+ }).filter((p) => p !== null) : choice.message.reasoning ? [
467
+ {
468
+ type: "text",
469
+ text: choice.message.reasoning
470
+ }
471
+ ] : [];
472
+ return __spreadValues({
473
+ response: {
474
+ id: response.id,
475
+ modelId: response.model
476
+ },
477
+ text: (_i = choice.message.content) != null ? _i : void 0,
478
+ reasoning,
479
+ toolCalls: (_j = choice.message.tool_calls) == null ? void 0 : _j.map((toolCall) => {
480
+ var _a2;
481
+ return {
482
+ toolCallType: "function",
483
+ toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils3.generateId)(),
484
+ toolName: toolCall.function.name,
485
+ args: toolCall.function.arguments
486
+ };
487
+ }),
488
+ finishReason: mapLLMGatewayFinishReason(choice.finish_reason),
489
+ usage: usageInfo,
490
+ rawCall: { rawPrompt, rawSettings },
491
+ rawResponse: { headers: responseHeaders },
492
+ warnings: [],
493
+ logprobs: mapLLMGatewayChatLogProbsOutput(choice.logprobs)
494
+ }, hasProviderMetadata ? { providerMetadata } : {});
495
+ }
496
+ async doStream(options) {
497
+ var _a, _c;
498
+ const args = this.getArgs(options);
499
+ const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
500
+ url: this.config.url({
501
+ path: "/chat/completions",
502
+ modelId: this.modelId
503
+ }),
504
+ headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
505
+ body: __spreadProps(__spreadValues({}, args), {
506
+ stream: true,
507
+ // only include stream_options when in strict compatibility mode:
508
+ stream_options: this.config.compatibility === "strict" ? __spreadValues({
509
+ include_usage: true
510
+ }, ((_a = this.settings.usage) == null ? void 0 : _a.include) ? { include_usage: true } : {}) : void 0
511
+ }),
512
+ failedResponseHandler: llmgatewayFailedResponseHandler,
513
+ successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(
514
+ LLMGatewayStreamChatCompletionChunkSchema
515
+ ),
516
+ abortSignal: options.abortSignal,
517
+ fetch: this.config.fetch
518
+ });
519
+ const _b = args, { messages: rawPrompt } = _b, rawSettings = __objRest(_b, ["messages"]);
520
+ const toolCalls = [];
521
+ let finishReason = "other";
522
+ let usage = {
523
+ promptTokens: Number.NaN,
524
+ completionTokens: Number.NaN
525
+ };
526
+ let logprobs;
527
+ const llmgatewayUsage = {};
528
+ const shouldIncludeUsageAccounting = !!((_c = this.settings.usage) == null ? void 0 : _c.include);
529
+ return {
530
+ stream: response.pipeThrough(
531
+ new TransformStream({
532
+ transform(chunk, controller) {
533
+ var _a2, _b2, _c2, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
534
+ if (!chunk.success) {
535
+ finishReason = "error";
536
+ controller.enqueue({ type: "error", error: chunk.error });
537
+ return;
538
+ }
539
+ const value = chunk.value;
540
+ if ("error" in value) {
541
+ finishReason = "error";
542
+ controller.enqueue({ type: "error", error: value.error });
543
+ return;
544
+ }
545
+ if (value.id) {
546
+ controller.enqueue({
547
+ type: "response-metadata",
548
+ id: value.id
549
+ });
550
+ }
551
+ if (value.model) {
552
+ controller.enqueue({
553
+ type: "response-metadata",
554
+ modelId: value.model
555
+ });
556
+ }
557
+ if (value.usage != null) {
558
+ usage = {
559
+ promptTokens: value.usage.prompt_tokens,
560
+ completionTokens: value.usage.completion_tokens
561
+ };
562
+ llmgatewayUsage.promptTokens = value.usage.prompt_tokens;
563
+ if (value.usage.prompt_tokens_details) {
564
+ llmgatewayUsage.promptTokensDetails = {
565
+ cachedTokens: (_a2 = value.usage.prompt_tokens_details.cached_tokens) != null ? _a2 : 0
566
+ };
567
+ }
568
+ llmgatewayUsage.completionTokens = value.usage.completion_tokens;
569
+ if (value.usage.completion_tokens_details) {
570
+ llmgatewayUsage.completionTokensDetails = {
571
+ reasoningTokens: (_b2 = value.usage.completion_tokens_details.reasoning_tokens) != null ? _b2 : 0
572
+ };
573
+ }
574
+ llmgatewayUsage.cost = value.usage.cost;
575
+ llmgatewayUsage.totalTokens = value.usage.total_tokens;
576
+ }
577
+ const choice = value.choices[0];
578
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
579
+ finishReason = mapLLMGatewayFinishReason(choice.finish_reason);
580
+ }
581
+ if ((choice == null ? void 0 : choice.delta) == null) {
582
+ return;
583
+ }
584
+ const delta = choice.delta;
585
+ if (delta.content != null) {
586
+ controller.enqueue({
587
+ type: "text-delta",
588
+ textDelta: delta.content
589
+ });
590
+ }
591
+ if (delta.reasoning != null) {
592
+ controller.enqueue({
593
+ type: "reasoning",
594
+ textDelta: delta.reasoning
595
+ });
596
+ }
597
+ if (delta.reasoning_details && delta.reasoning_details.length > 0) {
598
+ for (const detail of delta.reasoning_details) {
599
+ switch (detail.type) {
600
+ case "reasoning.text" /* Text */: {
601
+ if (detail.text) {
602
+ controller.enqueue({
603
+ type: "reasoning",
604
+ textDelta: detail.text
605
+ });
606
+ }
607
+ if (detail.signature) {
608
+ controller.enqueue({
609
+ type: "reasoning-signature",
610
+ signature: detail.signature
611
+ });
612
+ }
613
+ break;
614
+ }
615
+ case "reasoning.encrypted" /* Encrypted */: {
616
+ if (detail.data) {
617
+ controller.enqueue({
618
+ type: "redacted-reasoning",
619
+ data: detail.data
620
+ });
621
+ }
622
+ break;
623
+ }
624
+ case "reasoning.summary" /* Summary */: {
625
+ if (detail.summary) {
626
+ controller.enqueue({
627
+ type: "reasoning",
628
+ textDelta: detail.summary
629
+ });
630
+ }
631
+ break;
632
+ }
633
+ default: {
634
+ detail;
635
+ break;
636
+ }
637
+ }
638
+ }
639
+ }
640
+ const mappedLogprobs = mapLLMGatewayChatLogProbsOutput(
641
+ choice == null ? void 0 : choice.logprobs
642
+ );
643
+ if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
644
+ if (logprobs === void 0) {
645
+ logprobs = [];
646
+ }
647
+ logprobs.push(...mappedLogprobs);
648
+ }
649
+ if (delta.tool_calls != null) {
650
+ for (const toolCallDelta of delta.tool_calls) {
651
+ const index = toolCallDelta.index;
652
+ if (toolCalls[index] == null) {
653
+ if (toolCallDelta.type !== "function") {
654
+ throw new import_provider.InvalidResponseDataError({
655
+ data: toolCallDelta,
656
+ message: `Expected 'function' type.`
657
+ });
658
+ }
659
+ if (toolCallDelta.id == null) {
660
+ throw new import_provider.InvalidResponseDataError({
661
+ data: toolCallDelta,
662
+ message: `Expected 'id' to be a string.`
663
+ });
664
+ }
665
+ if (((_c2 = toolCallDelta.function) == null ? void 0 : _c2.name) == null) {
666
+ throw new import_provider.InvalidResponseDataError({
667
+ data: toolCallDelta,
668
+ message: `Expected 'function.name' to be a string.`
669
+ });
670
+ }
671
+ toolCalls[index] = {
672
+ id: toolCallDelta.id,
673
+ type: "function",
674
+ function: {
675
+ name: toolCallDelta.function.name,
676
+ arguments: (_d = toolCallDelta.function.arguments) != null ? _d : ""
677
+ },
678
+ sent: false
679
+ };
680
+ const toolCall2 = toolCalls[index];
681
+ if (toolCall2 == null) {
682
+ throw new Error("Tool call is missing");
683
+ }
684
+ if (((_e = toolCall2.function) == null ? void 0 : _e.name) != null && ((_f = toolCall2.function) == null ? void 0 : _f.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall2.function.arguments)) {
685
+ controller.enqueue({
686
+ type: "tool-call-delta",
687
+ toolCallType: "function",
688
+ toolCallId: toolCall2.id,
689
+ toolName: toolCall2.function.name,
690
+ argsTextDelta: toolCall2.function.arguments
691
+ });
692
+ controller.enqueue({
693
+ type: "tool-call",
694
+ toolCallType: "function",
695
+ toolCallId: (_g = toolCall2.id) != null ? _g : (0, import_provider_utils3.generateId)(),
696
+ toolName: toolCall2.function.name,
697
+ args: toolCall2.function.arguments
698
+ });
699
+ toolCall2.sent = true;
700
+ }
701
+ continue;
702
+ }
703
+ const toolCall = toolCalls[index];
704
+ if (toolCall == null) {
705
+ throw new Error("Tool call is missing");
706
+ }
707
+ if (((_h = toolCallDelta.function) == null ? void 0 : _h.arguments) != null) {
708
+ toolCall.function.arguments += (_j = (_i = toolCallDelta.function) == null ? void 0 : _i.arguments) != null ? _j : "";
709
+ }
710
+ controller.enqueue({
711
+ type: "tool-call-delta",
712
+ toolCallType: "function",
713
+ toolCallId: toolCall.id,
714
+ toolName: toolCall.function.name,
715
+ argsTextDelta: (_k = toolCallDelta.function.arguments) != null ? _k : ""
716
+ });
717
+ if (((_l = toolCall.function) == null ? void 0 : _l.name) != null && ((_m = toolCall.function) == null ? void 0 : _m.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
718
+ controller.enqueue({
719
+ type: "tool-call",
720
+ toolCallType: "function",
721
+ toolCallId: (_n = toolCall.id) != null ? _n : (0, import_provider_utils3.generateId)(),
722
+ toolName: toolCall.function.name,
723
+ args: toolCall.function.arguments
724
+ });
725
+ toolCall.sent = true;
726
+ }
727
+ }
728
+ }
729
+ },
730
+ flush(controller) {
731
+ var _a2;
732
+ if (finishReason === "tool-calls") {
733
+ for (const toolCall of toolCalls) {
734
+ if (!toolCall.sent) {
735
+ controller.enqueue({
736
+ type: "tool-call",
737
+ toolCallType: "function",
738
+ toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils3.generateId)(),
739
+ toolName: toolCall.function.name,
740
+ // Coerce invalid arguments to an empty JSON object
741
+ args: (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments) ? toolCall.function.arguments : "{}"
742
+ });
743
+ toolCall.sent = true;
744
+ }
745
+ }
746
+ }
747
+ const providerMetadata = {};
748
+ if (shouldIncludeUsageAccounting && (llmgatewayUsage.totalTokens !== void 0 || llmgatewayUsage.cost !== void 0 || llmgatewayUsage.promptTokensDetails !== void 0 || llmgatewayUsage.completionTokensDetails !== void 0)) {
749
+ providerMetadata.llmgateway = {
750
+ usage: llmgatewayUsage
751
+ };
752
+ }
753
+ const hasProviderMetadata = Object.keys(providerMetadata).length > 0 && shouldIncludeUsageAccounting;
754
+ controller.enqueue(__spreadValues({
755
+ type: "finish",
756
+ finishReason,
757
+ logprobs,
758
+ usage
759
+ }, hasProviderMetadata ? { providerMetadata } : {}));
760
+ }
761
+ })
762
+ ),
763
+ rawCall: { rawPrompt, rawSettings },
764
+ rawResponse: { headers: responseHeaders },
765
+ warnings: []
766
+ };
767
+ }
768
+ };
769
+ var LLMGatewayChatCompletionBaseResponseSchema = import_zod3.z.object({
770
+ id: import_zod3.z.string().optional(),
771
+ model: import_zod3.z.string().optional(),
772
+ usage: import_zod3.z.object({
773
+ prompt_tokens: import_zod3.z.number(),
774
+ prompt_tokens_details: import_zod3.z.object({
775
+ cached_tokens: import_zod3.z.number()
776
+ }).nullish(),
777
+ completion_tokens: import_zod3.z.number(),
778
+ completion_tokens_details: import_zod3.z.object({
779
+ reasoning_tokens: import_zod3.z.number()
780
+ }).nullish(),
781
+ total_tokens: import_zod3.z.number(),
782
+ cost: import_zod3.z.number().optional()
783
+ }).nullish()
784
+ });
785
+ var LLMGatewayNonStreamChatCompletionResponseSchema = LLMGatewayChatCompletionBaseResponseSchema.extend({
786
+ choices: import_zod3.z.array(
787
+ import_zod3.z.object({
788
+ message: import_zod3.z.object({
789
+ role: import_zod3.z.literal("assistant"),
790
+ content: import_zod3.z.string().nullable().optional(),
791
+ reasoning: import_zod3.z.string().nullable().optional(),
792
+ reasoning_details: ReasoningDetailArraySchema.nullish(),
793
+ tool_calls: import_zod3.z.array(
794
+ import_zod3.z.object({
795
+ id: import_zod3.z.string().optional().nullable(),
796
+ type: import_zod3.z.literal("function"),
797
+ function: import_zod3.z.object({
798
+ name: import_zod3.z.string(),
799
+ arguments: import_zod3.z.string()
800
+ })
801
+ })
802
+ ).optional()
803
+ }),
804
+ index: import_zod3.z.number(),
805
+ logprobs: import_zod3.z.object({
806
+ content: import_zod3.z.array(
807
+ import_zod3.z.object({
808
+ token: import_zod3.z.string(),
809
+ logprob: import_zod3.z.number(),
810
+ top_logprobs: import_zod3.z.array(
811
+ import_zod3.z.object({
812
+ token: import_zod3.z.string(),
813
+ logprob: import_zod3.z.number()
814
+ })
815
+ )
816
+ })
817
+ ).nullable()
818
+ }).nullable().optional(),
819
+ finish_reason: import_zod3.z.string().optional().nullable()
820
+ })
821
+ )
822
+ });
823
+ var LLMGatewayStreamChatCompletionChunkSchema = import_zod3.z.union([
824
+ LLMGatewayChatCompletionBaseResponseSchema.extend({
825
+ choices: import_zod3.z.array(
826
+ import_zod3.z.object({
827
+ delta: import_zod3.z.object({
828
+ role: import_zod3.z.enum(["assistant"]).optional(),
829
+ content: import_zod3.z.string().nullish(),
830
+ reasoning: import_zod3.z.string().nullish().optional(),
831
+ reasoning_details: ReasoningDetailArraySchema.nullish(),
832
+ tool_calls: import_zod3.z.array(
833
+ import_zod3.z.object({
834
+ index: import_zod3.z.number(),
835
+ id: import_zod3.z.string().nullish(),
836
+ type: import_zod3.z.literal("function").optional(),
837
+ function: import_zod3.z.object({
838
+ name: import_zod3.z.string().nullish(),
839
+ arguments: import_zod3.z.string().nullish()
840
+ })
841
+ })
842
+ ).nullish()
843
+ }).nullish(),
844
+ logprobs: import_zod3.z.object({
845
+ content: import_zod3.z.array(
846
+ import_zod3.z.object({
847
+ token: import_zod3.z.string(),
848
+ logprob: import_zod3.z.number(),
849
+ top_logprobs: import_zod3.z.array(
850
+ import_zod3.z.object({
851
+ token: import_zod3.z.string(),
852
+ logprob: import_zod3.z.number()
853
+ })
854
+ )
855
+ })
856
+ ).nullable()
857
+ }).nullish(),
858
+ finish_reason: import_zod3.z.string().nullable().optional(),
859
+ index: import_zod3.z.number()
860
+ })
861
+ )
862
+ }),
863
+ LLMGatewayErrorResponseSchema
864
+ ]);
865
+ function prepareToolsAndToolChoice(mode) {
866
+ var _a;
867
+ const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
868
+ if (tools == null) {
869
+ return { tools: void 0, tool_choice: void 0 };
870
+ }
871
+ const mappedTools = tools.map((tool) => {
872
+ if (isFunctionTool(tool)) {
873
+ return {
874
+ type: "function",
875
+ function: {
876
+ name: tool.name,
877
+ description: tool.description,
878
+ parameters: tool.parameters
879
+ }
880
+ };
881
+ }
882
+ return {
883
+ type: "function",
884
+ function: {
885
+ name: tool.name
886
+ }
887
+ };
888
+ });
889
+ const toolChoice = mode.toolChoice;
890
+ if (toolChoice == null) {
891
+ return { tools: mappedTools, tool_choice: void 0 };
892
+ }
893
+ const type = toolChoice.type;
894
+ switch (type) {
895
+ case "auto":
896
+ case "none":
897
+ case "required":
898
+ return { tools: mappedTools, tool_choice: type };
899
+ case "tool":
900
+ return {
901
+ tools: mappedTools,
902
+ tool_choice: {
903
+ type: "function",
904
+ function: {
905
+ name: toolChoice.toolName
906
+ }
907
+ }
908
+ };
909
+ default: {
910
+ const _exhaustiveCheck = type;
911
+ throw new Error(`Unsupported tool choice type: ${_exhaustiveCheck}`);
912
+ }
913
+ }
914
+ }
915
+
916
+ // src/llmgateway-completion-language-model.ts
917
+ var import_provider3 = require("@ai-sdk/provider");
918
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
919
+ var import_zod4 = require("zod");
920
+
921
+ // src/convert-to-llmgateway-completion-prompt.ts
922
+ var import_provider2 = require("@ai-sdk/provider");
923
+ function convertToLLMGatewayCompletionPrompt({
924
+ prompt,
925
+ inputFormat,
926
+ user = "user",
927
+ assistant = "assistant"
928
+ }) {
929
+ if (inputFormat === "prompt" && prompt.length === 1 && prompt[0] && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0] && prompt[0].content[0].type === "text") {
930
+ return { prompt: prompt[0].content[0].text };
931
+ }
932
+ let text = "";
933
+ if (prompt[0] && prompt[0].role === "system") {
934
+ text += `${prompt[0].content}
935
+
936
+ `;
937
+ prompt = prompt.slice(1);
938
+ }
939
+ for (const { role, content } of prompt) {
940
+ switch (role) {
941
+ case "system": {
942
+ throw new import_provider2.InvalidPromptError({
943
+ message: "Unexpected system message in prompt: ${content}",
944
+ prompt
945
+ });
946
+ }
947
+ case "user": {
948
+ const userMessage = content.map((part) => {
949
+ switch (part.type) {
950
+ case "text": {
951
+ return part.text;
952
+ }
953
+ case "image": {
954
+ throw new import_provider2.UnsupportedFunctionalityError({
955
+ functionality: "images"
956
+ });
957
+ }
958
+ case "file": {
959
+ throw new import_provider2.UnsupportedFunctionalityError({
960
+ functionality: "file attachments"
961
+ });
962
+ }
963
+ default: {
964
+ const _exhaustiveCheck = part;
965
+ throw new Error(
966
+ `Unsupported content type: ${_exhaustiveCheck}`
967
+ );
968
+ }
969
+ }
970
+ }).join("");
971
+ text += `${user}:
972
+ ${userMessage}
973
+
974
+ `;
975
+ break;
976
+ }
977
+ case "assistant": {
978
+ const assistantMessage = content.map((part) => {
979
+ switch (part.type) {
980
+ case "text": {
981
+ return part.text;
982
+ }
983
+ case "tool-call": {
984
+ throw new import_provider2.UnsupportedFunctionalityError({
985
+ functionality: "tool-call messages"
986
+ });
987
+ }
988
+ case "reasoning": {
989
+ throw new import_provider2.UnsupportedFunctionalityError({
990
+ functionality: "reasoning messages"
991
+ });
992
+ }
993
+ case "redacted-reasoning": {
994
+ throw new import_provider2.UnsupportedFunctionalityError({
995
+ functionality: "redacted reasoning messages"
996
+ });
997
+ }
998
+ case "file": {
999
+ throw new import_provider2.UnsupportedFunctionalityError({
1000
+ functionality: "file attachments"
1001
+ });
1002
+ }
1003
+ default: {
1004
+ const _exhaustiveCheck = part;
1005
+ throw new Error(
1006
+ `Unsupported content type: ${_exhaustiveCheck}`
1007
+ );
1008
+ }
1009
+ }
1010
+ }).join("");
1011
+ text += `${assistant}:
1012
+ ${assistantMessage}
1013
+
1014
+ `;
1015
+ break;
1016
+ }
1017
+ case "tool": {
1018
+ throw new import_provider2.UnsupportedFunctionalityError({
1019
+ functionality: "tool messages"
1020
+ });
1021
+ }
1022
+ default: {
1023
+ const _exhaustiveCheck = role;
1024
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1025
+ }
1026
+ }
1027
+ }
1028
+ text += `${assistant}:
1029
+ `;
1030
+ return {
1031
+ prompt: text
1032
+ };
1033
+ }
1034
+
1035
+ // src/map-llmgateway-completion-logprobs.ts
1036
+ function mapLLMGatewayCompletionLogprobs(logprobs) {
1037
+ return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => {
1038
+ var _a, _b;
1039
+ return {
1040
+ token,
1041
+ logprob: (_a = logprobs.token_logprobs[index]) != null ? _a : 0,
1042
+ topLogprobs: logprobs.top_logprobs ? Object.entries((_b = logprobs.top_logprobs[index]) != null ? _b : {}).map(
1043
+ ([token2, logprob]) => ({
1044
+ token: token2,
1045
+ logprob
1046
+ })
1047
+ ) : []
1048
+ };
1049
+ });
1050
+ }
1051
+
1052
+ // src/llmgateway-completion-language-model.ts
1053
+ var LLMGatewayCompletionLanguageModel = class {
1054
+ constructor(modelId, settings, config) {
1055
+ this.specificationVersion = "v1";
1056
+ this.defaultObjectGenerationMode = void 0;
1057
+ this.modelId = modelId;
1058
+ this.settings = settings;
1059
+ this.config = config;
1060
+ }
1061
+ get provider() {
1062
+ return this.config.provider;
1063
+ }
1064
+ getArgs({
1065
+ mode,
1066
+ inputFormat,
1067
+ prompt,
1068
+ maxTokens,
1069
+ temperature,
1070
+ topP,
1071
+ frequencyPenalty,
1072
+ presencePenalty,
1073
+ seed,
1074
+ responseFormat,
1075
+ topK,
1076
+ stopSequences,
1077
+ providerMetadata
1078
+ }) {
1079
+ var _a, _b;
1080
+ const type = mode.type;
1081
+ const extraCallingBody = (_a = providerMetadata == null ? void 0 : providerMetadata.llmgateway) != null ? _a : {};
1082
+ const { prompt: completionPrompt } = convertToLLMGatewayCompletionPrompt({
1083
+ prompt,
1084
+ inputFormat
1085
+ });
1086
+ const baseArgs = __spreadValues(__spreadValues(__spreadValues({
1087
+ // model id:
1088
+ model: this.modelId,
1089
+ models: this.settings.models,
1090
+ // model specific settings:
1091
+ logit_bias: this.settings.logitBias,
1092
+ logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1093
+ suffix: this.settings.suffix,
1094
+ user: this.settings.user,
1095
+ // standardized settings:
1096
+ max_tokens: maxTokens,
1097
+ temperature,
1098
+ top_p: topP,
1099
+ frequency_penalty: frequencyPenalty,
1100
+ presence_penalty: presencePenalty,
1101
+ seed,
1102
+ stop: stopSequences,
1103
+ response_format: responseFormat,
1104
+ top_k: topK,
1105
+ // prompt:
1106
+ prompt: completionPrompt
1107
+ }, this.config.extraBody), this.settings.extraBody), extraCallingBody);
1108
+ switch (type) {
1109
+ case "regular": {
1110
+ if ((_b = mode.tools) == null ? void 0 : _b.length) {
1111
+ throw new import_provider3.UnsupportedFunctionalityError({
1112
+ functionality: "tools"
1113
+ });
1114
+ }
1115
+ if (mode.toolChoice) {
1116
+ throw new import_provider3.UnsupportedFunctionalityError({
1117
+ functionality: "toolChoice"
1118
+ });
1119
+ }
1120
+ return baseArgs;
1121
+ }
1122
+ case "object-json": {
1123
+ throw new import_provider3.UnsupportedFunctionalityError({
1124
+ functionality: "object-json mode"
1125
+ });
1126
+ }
1127
+ case "object-tool": {
1128
+ throw new import_provider3.UnsupportedFunctionalityError({
1129
+ functionality: "object-tool mode"
1130
+ });
1131
+ }
1132
+ // Handle all non-text types with a single default case
1133
+ default: {
1134
+ const _exhaustiveCheck = type;
1135
+ throw new import_provider3.UnsupportedFunctionalityError({
1136
+ functionality: `${_exhaustiveCheck} mode`
1137
+ });
1138
+ }
1139
+ }
1140
+ }
1141
+ async doGenerate(options) {
1142
+ var _b, _c, _d, _e, _f;
1143
+ const args = this.getArgs(options);
1144
+ const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
1145
+ url: this.config.url({
1146
+ path: "/completions",
1147
+ modelId: this.modelId
1148
+ }),
1149
+ headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
1150
+ body: args,
1151
+ failedResponseHandler: llmgatewayFailedResponseHandler,
1152
+ successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
1153
+ LLMGatewayCompletionChunkSchema
1154
+ ),
1155
+ abortSignal: options.abortSignal,
1156
+ fetch: this.config.fetch
1157
+ });
1158
+ const _a = args, { prompt: rawPrompt } = _a, rawSettings = __objRest(_a, ["prompt"]);
1159
+ if ("error" in response) {
1160
+ throw new Error(`${response.error.message}`);
1161
+ }
1162
+ const choice = response.choices[0];
1163
+ if (!choice) {
1164
+ throw new Error("No choice in LLMGateway completion response");
1165
+ }
1166
+ return {
1167
+ response: {
1168
+ id: response.id,
1169
+ modelId: response.model
1170
+ },
1171
+ text: (_b = choice.text) != null ? _b : "",
1172
+ reasoning: choice.reasoning || void 0,
1173
+ usage: {
1174
+ promptTokens: (_d = (_c = response.usage) == null ? void 0 : _c.prompt_tokens) != null ? _d : 0,
1175
+ completionTokens: (_f = (_e = response.usage) == null ? void 0 : _e.completion_tokens) != null ? _f : 0
1176
+ },
1177
+ finishReason: mapLLMGatewayFinishReason(choice.finish_reason),
1178
+ logprobs: mapLLMGatewayCompletionLogprobs(choice.logprobs),
1179
+ rawCall: { rawPrompt, rawSettings },
1180
+ rawResponse: { headers: responseHeaders },
1181
+ warnings: []
1182
+ };
1183
+ }
1184
+ async doStream(options) {
1185
+ const args = this.getArgs(options);
1186
+ const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
1187
+ url: this.config.url({
1188
+ path: "/completions",
1189
+ modelId: this.modelId
1190
+ }),
1191
+ headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
1192
+ body: __spreadProps(__spreadValues({}, this.getArgs(options)), {
1193
+ stream: true,
1194
+ // only include stream_options when in strict compatibility mode:
1195
+ stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
1196
+ }),
1197
+ failedResponseHandler: llmgatewayFailedResponseHandler,
1198
+ successfulResponseHandler: (0, import_provider_utils4.createEventSourceResponseHandler)(
1199
+ LLMGatewayCompletionChunkSchema
1200
+ ),
1201
+ abortSignal: options.abortSignal,
1202
+ fetch: this.config.fetch
1203
+ });
1204
+ const _a = args, { prompt: rawPrompt } = _a, rawSettings = __objRest(_a, ["prompt"]);
1205
+ let finishReason = "other";
1206
+ let usage = {
1207
+ promptTokens: Number.NaN,
1208
+ completionTokens: Number.NaN
1209
+ };
1210
+ let logprobs;
1211
+ return {
1212
+ stream: response.pipeThrough(
1213
+ new TransformStream({
1214
+ transform(chunk, controller) {
1215
+ if (!chunk.success) {
1216
+ finishReason = "error";
1217
+ controller.enqueue({ type: "error", error: chunk.error });
1218
+ return;
1219
+ }
1220
+ const value = chunk.value;
1221
+ if ("error" in value) {
1222
+ finishReason = "error";
1223
+ controller.enqueue({ type: "error", error: value.error });
1224
+ return;
1225
+ }
1226
+ if (value.usage != null) {
1227
+ usage = {
1228
+ promptTokens: value.usage.prompt_tokens,
1229
+ completionTokens: value.usage.completion_tokens
1230
+ };
1231
+ }
1232
+ const choice = value.choices[0];
1233
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
1234
+ finishReason = mapLLMGatewayFinishReason(choice.finish_reason);
1235
+ }
1236
+ if ((choice == null ? void 0 : choice.text) != null) {
1237
+ controller.enqueue({
1238
+ type: "text-delta",
1239
+ textDelta: choice.text
1240
+ });
1241
+ }
1242
+ const mappedLogprobs = mapLLMGatewayCompletionLogprobs(
1243
+ choice == null ? void 0 : choice.logprobs
1244
+ );
1245
+ if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1246
+ if (logprobs === void 0) {
1247
+ logprobs = [];
1248
+ }
1249
+ logprobs.push(...mappedLogprobs);
1250
+ }
1251
+ },
1252
+ flush(controller) {
1253
+ controller.enqueue({
1254
+ type: "finish",
1255
+ finishReason,
1256
+ logprobs,
1257
+ usage
1258
+ });
1259
+ }
1260
+ })
1261
+ ),
1262
+ rawCall: { rawPrompt, rawSettings },
1263
+ rawResponse: { headers: responseHeaders },
1264
+ warnings: []
1265
+ };
1266
+ }
1267
+ };
1268
+ var LLMGatewayCompletionChunkSchema = import_zod4.z.union([
1269
+ import_zod4.z.object({
1270
+ id: import_zod4.z.string().optional(),
1271
+ model: import_zod4.z.string().optional(),
1272
+ choices: import_zod4.z.array(
1273
+ import_zod4.z.object({
1274
+ text: import_zod4.z.string(),
1275
+ reasoning: import_zod4.z.string().nullish().optional(),
1276
+ reasoning_details: ReasoningDetailArraySchema.nullish(),
1277
+ finish_reason: import_zod4.z.string().nullish(),
1278
+ index: import_zod4.z.number(),
1279
+ logprobs: import_zod4.z.object({
1280
+ tokens: import_zod4.z.array(import_zod4.z.string()),
1281
+ token_logprobs: import_zod4.z.array(import_zod4.z.number()),
1282
+ top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
1283
+ }).nullable().optional()
1284
+ })
1285
+ ),
1286
+ usage: import_zod4.z.object({
1287
+ prompt_tokens: import_zod4.z.number(),
1288
+ completion_tokens: import_zod4.z.number()
1289
+ }).optional().nullable()
1290
+ }),
1291
+ LLMGatewayErrorResponseSchema
1292
+ ]);
1293
+ // Annotate the CommonJS export names for ESM import in node:
1294
+ 0 && (module.exports = {
1295
+ LLMGatewayChatLanguageModel,
1296
+ LLMGatewayCompletionLanguageModel
1297
+ });
1298
+ //# sourceMappingURL=index.cjs.map