@ai-sdk/openai-compatible 2.0.0-beta.45 → 2.0.0-beta.46

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -14,6 +14,63 @@ import {
14
14
  } from "@ai-sdk/provider-utils";
15
15
  import { z as z3 } from "zod/v4";
16
16
 
17
+ // src/openai-compatible-error.ts
18
+ import { z } from "zod/v4";
19
+ var openaiCompatibleErrorDataSchema = z.object({
20
+ error: z.object({
21
+ message: z.string(),
22
+ // The additional information below is handled loosely to support
23
+ // OpenAI-compatible providers that have slightly different error
24
+ // responses:
25
+ type: z.string().nullish(),
26
+ param: z.any().nullish(),
27
+ code: z.union([z.string(), z.number()]).nullish()
28
+ })
29
+ });
30
+ var defaultOpenAICompatibleErrorStructure = {
31
+ errorSchema: openaiCompatibleErrorDataSchema,
32
+ errorToMessage: (data) => data.error.message
33
+ };
34
+
35
+ // src/chat/convert-openai-compatible-chat-usage.ts
36
+ function convertOpenAICompatibleChatUsage(usage) {
37
+ var _a, _b, _c, _d, _e, _f;
38
+ if (usage == null) {
39
+ return {
40
+ inputTokens: {
41
+ total: void 0,
42
+ noCache: void 0,
43
+ cacheRead: void 0,
44
+ cacheWrite: void 0
45
+ },
46
+ outputTokens: {
47
+ total: void 0,
48
+ text: void 0,
49
+ reasoning: void 0
50
+ },
51
+ raw: void 0
52
+ };
53
+ }
54
+ const promptTokens = (_a = usage.prompt_tokens) != null ? _a : 0;
55
+ const completionTokens = (_b = usage.completion_tokens) != null ? _b : 0;
56
+ const cacheReadTokens = (_d = (_c = usage.prompt_tokens_details) == null ? void 0 : _c.cached_tokens) != null ? _d : 0;
57
+ const reasoningTokens = (_f = (_e = usage.completion_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : 0;
58
+ return {
59
+ inputTokens: {
60
+ total: promptTokens,
61
+ noCache: promptTokens - cacheReadTokens,
62
+ cacheRead: cacheReadTokens,
63
+ cacheWrite: void 0
64
+ },
65
+ outputTokens: {
66
+ total: completionTokens,
67
+ text: completionTokens - reasoningTokens,
68
+ reasoning: reasoningTokens
69
+ },
70
+ raw: usage
71
+ };
72
+ }
73
+
17
74
  // src/chat/convert-to-openai-compatible-chat-messages.ts
18
75
  import {
19
76
  UnsupportedFunctionalityError
@@ -172,41 +229,23 @@ function mapOpenAICompatibleFinishReason(finishReason) {
172
229
  }
173
230
 
174
231
  // src/chat/openai-compatible-chat-options.ts
175
- import { z } from "zod/v4";
176
- var openaiCompatibleProviderOptions = z.object({
232
+ import { z as z2 } from "zod/v4";
233
+ var openaiCompatibleProviderOptions = z2.object({
177
234
  /**
178
235
  * A unique identifier representing your end-user, which can help the provider to
179
236
  * monitor and detect abuse.
180
237
  */
181
- user: z.string().optional(),
238
+ user: z2.string().optional(),
182
239
  /**
183
240
  * Reasoning effort for reasoning models. Defaults to `medium`.
184
241
  */
185
- reasoningEffort: z.string().optional(),
242
+ reasoningEffort: z2.string().optional(),
186
243
  /**
187
244
  * Controls the verbosity of the generated text. Defaults to `medium`.
188
245
  */
189
- textVerbosity: z.string().optional()
246
+ textVerbosity: z2.string().optional()
190
247
  });
191
248
 
192
- // src/openai-compatible-error.ts
193
- import { z as z2 } from "zod/v4";
194
- var openaiCompatibleErrorDataSchema = z2.object({
195
- error: z2.object({
196
- message: z2.string(),
197
- // The additional information below is handled loosely to support
198
- // OpenAI-compatible providers that have slightly different error
199
- // responses:
200
- type: z2.string().nullish(),
201
- param: z2.any().nullish(),
202
- code: z2.union([z2.string(), z2.number()]).nullish()
203
- })
204
- });
205
- var defaultOpenAICompatibleErrorStructure = {
206
- errorSchema: openaiCompatibleErrorDataSchema,
207
- errorToMessage: (data) => data.error.message
208
- };
209
-
210
249
  // src/chat/openai-compatible-prepare-tools.ts
211
250
  import {
212
251
  UnsupportedFunctionalityError as UnsupportedFunctionalityError2
@@ -379,7 +418,7 @@ var OpenAICompatibleChatLanguageModel = class {
379
418
  };
380
419
  }
381
420
  async doGenerate(options) {
382
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q;
421
+ var _a, _b, _c, _d, _e;
383
422
  const { args, warnings } = await this.getArgs({ ...options });
384
423
  const body = JSON.stringify(args);
385
424
  const {
@@ -439,13 +478,7 @@ var OpenAICompatibleChatLanguageModel = class {
439
478
  return {
440
479
  content,
441
480
  finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
442
- usage: {
443
- inputTokens: (_g = (_f = responseBody.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
444
- outputTokens: (_i = (_h = responseBody.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
445
- totalTokens: (_k = (_j = responseBody.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
446
- reasoningTokens: (_n = (_m = (_l = responseBody.usage) == null ? void 0 : _l.completion_tokens_details) == null ? void 0 : _m.reasoning_tokens) != null ? _n : void 0,
447
- cachedInputTokens: (_q = (_p = (_o = responseBody.usage) == null ? void 0 : _o.prompt_tokens_details) == null ? void 0 : _p.cached_tokens) != null ? _q : void 0
448
- },
481
+ usage: convertOpenAICompatibleChatUsage(responseBody.usage),
449
482
  providerMetadata,
450
483
  request: { body },
451
484
  response: {
@@ -482,19 +515,7 @@ var OpenAICompatibleChatLanguageModel = class {
482
515
  });
483
516
  const toolCalls = [];
484
517
  let finishReason = "unknown";
485
- const usage = {
486
- completionTokens: void 0,
487
- completionTokensDetails: {
488
- reasoningTokens: void 0,
489
- acceptedPredictionTokens: void 0,
490
- rejectedPredictionTokens: void 0
491
- },
492
- promptTokens: void 0,
493
- promptTokensDetails: {
494
- cachedTokens: void 0
495
- },
496
- totalTokens: void 0
497
- };
518
+ let usage = void 0;
498
519
  let isFirstChunk = true;
499
520
  const providerOptionsName = this.providerOptionsName;
500
521
  let isActiveReasoning = false;
@@ -505,7 +526,6 @@ var OpenAICompatibleChatLanguageModel = class {
505
526
  start(controller) {
506
527
  controller.enqueue({ type: "stream-start", warnings });
507
528
  },
508
- // TODO we lost type safety on Chunk, most likely due to the error schema. MUST FIX
509
529
  transform(chunk, controller) {
510
530
  var _a2, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
511
531
  if (options.includeRawChunks) {
@@ -516,13 +536,16 @@ var OpenAICompatibleChatLanguageModel = class {
516
536
  controller.enqueue({ type: "error", error: chunk.error });
517
537
  return;
518
538
  }
519
- const value = chunk.value;
520
539
  metadataExtractor == null ? void 0 : metadataExtractor.processChunk(chunk.rawValue);
521
- if ("error" in value) {
540
+ if ("error" in chunk.value) {
522
541
  finishReason = "error";
523
- controller.enqueue({ type: "error", error: value.error.message });
542
+ controller.enqueue({
543
+ type: "error",
544
+ error: chunk.value.error.message
545
+ });
524
546
  return;
525
547
  }
548
+ const value = chunk.value;
526
549
  if (isFirstChunk) {
527
550
  isFirstChunk = false;
528
551
  controller.enqueue({
@@ -531,28 +554,7 @@ var OpenAICompatibleChatLanguageModel = class {
531
554
  });
532
555
  }
533
556
  if (value.usage != null) {
534
- const {
535
- prompt_tokens,
536
- completion_tokens,
537
- total_tokens,
538
- prompt_tokens_details,
539
- completion_tokens_details
540
- } = value.usage;
541
- usage.promptTokens = prompt_tokens != null ? prompt_tokens : void 0;
542
- usage.completionTokens = completion_tokens != null ? completion_tokens : void 0;
543
- usage.totalTokens = total_tokens != null ? total_tokens : void 0;
544
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
545
- usage.completionTokensDetails.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
546
- }
547
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
548
- usage.completionTokensDetails.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
549
- }
550
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
551
- usage.completionTokensDetails.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
552
- }
553
- if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
554
- usage.promptTokensDetails.cachedTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
555
- }
557
+ usage = value.usage;
556
558
  }
557
559
  const choice = value.choices[0];
558
560
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -674,7 +676,7 @@ var OpenAICompatibleChatLanguageModel = class {
674
676
  }
675
677
  },
676
678
  flush(controller) {
677
- var _a2, _b, _c, _d, _e, _f;
679
+ var _a2, _b, _c, _d, _e;
678
680
  if (isActiveReasoning) {
679
681
  controller.enqueue({ type: "reasoning-end", id: "reasoning-0" });
680
682
  }
@@ -699,22 +701,16 @@ var OpenAICompatibleChatLanguageModel = class {
699
701
  [providerOptionsName]: {},
700
702
  ...metadataExtractor == null ? void 0 : metadataExtractor.buildMetadata()
701
703
  };
702
- if (usage.completionTokensDetails.acceptedPredictionTokens != null) {
703
- providerMetadata[providerOptionsName].acceptedPredictionTokens = usage.completionTokensDetails.acceptedPredictionTokens;
704
+ if (((_b = usage == null ? void 0 : usage.completion_tokens_details) == null ? void 0 : _b.accepted_prediction_tokens) != null) {
705
+ providerMetadata[providerOptionsName].acceptedPredictionTokens = (_c = usage == null ? void 0 : usage.completion_tokens_details) == null ? void 0 : _c.accepted_prediction_tokens;
704
706
  }
705
- if (usage.completionTokensDetails.rejectedPredictionTokens != null) {
706
- providerMetadata[providerOptionsName].rejectedPredictionTokens = usage.completionTokensDetails.rejectedPredictionTokens;
707
+ if (((_d = usage == null ? void 0 : usage.completion_tokens_details) == null ? void 0 : _d.rejected_prediction_tokens) != null) {
708
+ providerMetadata[providerOptionsName].rejectedPredictionTokens = (_e = usage == null ? void 0 : usage.completion_tokens_details) == null ? void 0 : _e.rejected_prediction_tokens;
707
709
  }
708
710
  controller.enqueue({
709
711
  type: "finish",
710
712
  finishReason,
711
- usage: {
712
- inputTokens: (_b = usage.promptTokens) != null ? _b : void 0,
713
- outputTokens: (_c = usage.completionTokens) != null ? _c : void 0,
714
- totalTokens: (_d = usage.totalTokens) != null ? _d : void 0,
715
- reasoningTokens: (_e = usage.completionTokensDetails.reasoningTokens) != null ? _e : void 0,
716
- cachedInputTokens: (_f = usage.promptTokensDetails.cachedTokens) != null ? _f : void 0
717
- },
713
+ usage: convertOpenAICompatibleChatUsage(usage),
718
714
  providerMetadata
719
715
  });
720
716
  }
@@ -764,38 +760,36 @@ var OpenAICompatibleChatResponseSchema = z3.object({
764
760
  ),
765
761
  usage: openaiCompatibleTokenUsageSchema
766
762
  });
767
- var createOpenAICompatibleChatChunkSchema = (errorSchema) => z3.union([
768
- z3.object({
769
- id: z3.string().nullish(),
770
- created: z3.number().nullish(),
771
- model: z3.string().nullish(),
772
- choices: z3.array(
773
- z3.object({
774
- delta: z3.object({
775
- role: z3.enum(["assistant"]).nullish(),
776
- content: z3.string().nullish(),
777
- // Most openai-compatible models set `reasoning_content`, but some
778
- // providers serving `gpt-oss` set `reasoning`. See #7866
779
- reasoning_content: z3.string().nullish(),
780
- reasoning: z3.string().nullish(),
781
- tool_calls: z3.array(
782
- z3.object({
783
- index: z3.number(),
784
- id: z3.string().nullish(),
785
- function: z3.object({
786
- name: z3.string().nullish(),
787
- arguments: z3.string().nullish()
788
- })
763
+ var chunkBaseSchema = z3.object({
764
+ id: z3.string().nullish(),
765
+ created: z3.number().nullish(),
766
+ model: z3.string().nullish(),
767
+ choices: z3.array(
768
+ z3.object({
769
+ delta: z3.object({
770
+ role: z3.enum(["assistant"]).nullish(),
771
+ content: z3.string().nullish(),
772
+ // Most openai-compatible models set `reasoning_content`, but some
773
+ // providers serving `gpt-oss` set `reasoning`. See #7866
774
+ reasoning_content: z3.string().nullish(),
775
+ reasoning: z3.string().nullish(),
776
+ tool_calls: z3.array(
777
+ z3.object({
778
+ index: z3.number(),
779
+ id: z3.string().nullish(),
780
+ function: z3.object({
781
+ name: z3.string().nullish(),
782
+ arguments: z3.string().nullish()
789
783
  })
790
- ).nullish()
791
- }).nullish(),
792
- finish_reason: z3.string().nullish()
793
- })
794
- ),
795
- usage: openaiCompatibleTokenUsageSchema
796
- }),
797
- errorSchema
798
- ]);
784
+ })
785
+ ).nullish()
786
+ }).nullish(),
787
+ finish_reason: z3.string().nullish()
788
+ })
789
+ ),
790
+ usage: openaiCompatibleTokenUsageSchema
791
+ });
792
+ var createOpenAICompatibleChatChunkSchema = (errorSchema) => z3.union([chunkBaseSchema, errorSchema]);
799
793
 
800
794
  // src/completion/openai-compatible-completion-language-model.ts
801
795
  import {
@@ -808,6 +802,43 @@ import {
808
802
  } from "@ai-sdk/provider-utils";
809
803
  import { z as z5 } from "zod/v4";
810
804
 
805
+ // src/completion/convert-openai-compatible-completion-usage.ts
806
+ function convertOpenAICompatibleCompletionUsage(usage) {
807
+ var _a, _b;
808
+ if (usage == null) {
809
+ return {
810
+ inputTokens: {
811
+ total: void 0,
812
+ noCache: void 0,
813
+ cacheRead: void 0,
814
+ cacheWrite: void 0
815
+ },
816
+ outputTokens: {
817
+ total: void 0,
818
+ text: void 0,
819
+ reasoning: void 0
820
+ },
821
+ raw: void 0
822
+ };
823
+ }
824
+ const promptTokens = (_a = usage.prompt_tokens) != null ? _a : 0;
825
+ const completionTokens = (_b = usage.completion_tokens) != null ? _b : 0;
826
+ return {
827
+ inputTokens: {
828
+ total: promptTokens,
829
+ noCache: promptTokens,
830
+ cacheRead: void 0,
831
+ cacheWrite: void 0
832
+ },
833
+ outputTokens: {
834
+ total: completionTokens,
835
+ text: completionTokens,
836
+ reasoning: void 0
837
+ },
838
+ raw: usage
839
+ };
840
+ }
841
+
811
842
  // src/completion/convert-to-openai-compatible-completion-prompt.ts
812
843
  import {
813
844
  InvalidPromptError,
@@ -1031,7 +1062,6 @@ var OpenAICompatibleCompletionLanguageModel = class {
1031
1062
  };
1032
1063
  }
1033
1064
  async doGenerate(options) {
1034
- var _a, _b, _c, _d, _e, _f;
1035
1065
  const { args, warnings } = await this.getArgs(options);
1036
1066
  const {
1037
1067
  responseHeaders,
@@ -1058,11 +1088,7 @@ var OpenAICompatibleCompletionLanguageModel = class {
1058
1088
  }
1059
1089
  return {
1060
1090
  content,
1061
- usage: {
1062
- inputTokens: (_b = (_a = response.usage) == null ? void 0 : _a.prompt_tokens) != null ? _b : void 0,
1063
- outputTokens: (_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens) != null ? _d : void 0,
1064
- totalTokens: (_f = (_e = response.usage) == null ? void 0 : _e.total_tokens) != null ? _f : void 0
1065
- },
1091
+ usage: convertOpenAICompatibleCompletionUsage(response.usage),
1066
1092
  finishReason: mapOpenAICompatibleFinishReason2(choice.finish_reason),
1067
1093
  request: { body: args },
1068
1094
  response: {
@@ -1096,11 +1122,7 @@ var OpenAICompatibleCompletionLanguageModel = class {
1096
1122
  fetch: this.config.fetch
1097
1123
  });
1098
1124
  let finishReason = "unknown";
1099
- const usage = {
1100
- inputTokens: void 0,
1101
- outputTokens: void 0,
1102
- totalTokens: void 0
1103
- };
1125
+ let usage = void 0;
1104
1126
  let isFirstChunk = true;
1105
1127
  return {
1106
1128
  stream: response.pipeThrough(
@@ -1109,7 +1131,6 @@ var OpenAICompatibleCompletionLanguageModel = class {
1109
1131
  controller.enqueue({ type: "stream-start", warnings });
1110
1132
  },
1111
1133
  transform(chunk, controller) {
1112
- var _a, _b, _c;
1113
1134
  if (options.includeRawChunks) {
1114
1135
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1115
1136
  }
@@ -1136,9 +1157,7 @@ var OpenAICompatibleCompletionLanguageModel = class {
1136
1157
  });
1137
1158
  }
1138
1159
  if (value.usage != null) {
1139
- usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
1140
- usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
1141
- usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
1160
+ usage = value.usage;
1142
1161
  }
1143
1162
  const choice = value.choices[0];
1144
1163
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1161,7 +1180,7 @@ var OpenAICompatibleCompletionLanguageModel = class {
1161
1180
  controller.enqueue({
1162
1181
  type: "finish",
1163
1182
  finishReason,
1164
- usage
1183
+ usage: convertOpenAICompatibleCompletionUsage(usage)
1165
1184
  });
1166
1185
  }
1167
1186
  })
@@ -1408,7 +1427,7 @@ import {
1408
1427
  } from "@ai-sdk/provider-utils";
1409
1428
 
1410
1429
  // src/version.ts
1411
- var VERSION = true ? "2.0.0-beta.45" : "0.0.0-test";
1430
+ var VERSION = true ? "2.0.0-beta.46" : "0.0.0-test";
1412
1431
 
1413
1432
  // src/openai-compatible-provider.ts
1414
1433
  function createOpenAICompatible(options) {