@ai-sdk/openai-compatible 2.0.0-beta.44 → 2.0.0-beta.46

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,22 @@
1
1
  # @ai-sdk/openai-compatible
2
2
 
3
+ ## 2.0.0-beta.46
4
+
5
+ ### Patch Changes
6
+
7
+ - 3bd2689: feat: extended token usage
8
+ - Updated dependencies [3bd2689]
9
+ - @ai-sdk/provider@3.0.0-beta.26
10
+ - @ai-sdk/provider-utils@4.0.0-beta.45
11
+
12
+ ## 2.0.0-beta.45
13
+
14
+ ### Patch Changes
15
+
16
+ - Updated dependencies [53f3368]
17
+ - @ai-sdk/provider@3.0.0-beta.25
18
+ - @ai-sdk/provider-utils@4.0.0-beta.44
19
+
3
20
  ## 2.0.0-beta.44
4
21
 
5
22
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -1,14 +1,6 @@
1
1
  import { SharedV3ProviderMetadata, LanguageModelV3, EmbeddingModelV3, ImageModelV3, ProviderV3 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
- import { z, ZodType } from 'zod/v4';
4
-
5
- type OpenAICompatibleChatModelId = string;
6
- declare const openaiCompatibleProviderOptions: z.ZodObject<{
7
- user: z.ZodOptional<z.ZodString>;
8
- reasoningEffort: z.ZodOptional<z.ZodString>;
9
- textVerbosity: z.ZodOptional<z.ZodString>;
10
- }, z.core.$strip>;
11
- type OpenAICompatibleProviderOptions = z.infer<typeof openaiCompatibleProviderOptions>;
3
+ import { ZodType, z } from 'zod/v4';
12
4
 
13
5
  declare const openaiCompatibleErrorDataSchema: z.ZodObject<{
14
6
  error: z.ZodObject<{
@@ -25,6 +17,14 @@ type ProviderErrorStructure<T> = {
25
17
  isRetryable?: (response: Response, error?: T) => boolean;
26
18
  };
27
19
 
20
+ type OpenAICompatibleChatModelId = string;
21
+ declare const openaiCompatibleProviderOptions: z.ZodObject<{
22
+ user: z.ZodOptional<z.ZodString>;
23
+ reasoningEffort: z.ZodOptional<z.ZodString>;
24
+ textVerbosity: z.ZodOptional<z.ZodString>;
25
+ }, z.core.$strip>;
26
+ type OpenAICompatibleProviderOptions = z.infer<typeof openaiCompatibleProviderOptions>;
27
+
28
28
  /**
29
29
  Extracts provider-specific metadata from API responses.
30
30
  Used to standardize metadata handling across different LLM providers while allowing
package/dist/index.d.ts CHANGED
@@ -1,14 +1,6 @@
1
1
  import { SharedV3ProviderMetadata, LanguageModelV3, EmbeddingModelV3, ImageModelV3, ProviderV3 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
- import { z, ZodType } from 'zod/v4';
4
-
5
- type OpenAICompatibleChatModelId = string;
6
- declare const openaiCompatibleProviderOptions: z.ZodObject<{
7
- user: z.ZodOptional<z.ZodString>;
8
- reasoningEffort: z.ZodOptional<z.ZodString>;
9
- textVerbosity: z.ZodOptional<z.ZodString>;
10
- }, z.core.$strip>;
11
- type OpenAICompatibleProviderOptions = z.infer<typeof openaiCompatibleProviderOptions>;
3
+ import { ZodType, z } from 'zod/v4';
12
4
 
13
5
  declare const openaiCompatibleErrorDataSchema: z.ZodObject<{
14
6
  error: z.ZodObject<{
@@ -25,6 +17,14 @@ type ProviderErrorStructure<T> = {
25
17
  isRetryable?: (response: Response, error?: T) => boolean;
26
18
  };
27
19
 
20
+ type OpenAICompatibleChatModelId = string;
21
+ declare const openaiCompatibleProviderOptions: z.ZodObject<{
22
+ user: z.ZodOptional<z.ZodString>;
23
+ reasoningEffort: z.ZodOptional<z.ZodString>;
24
+ textVerbosity: z.ZodOptional<z.ZodString>;
25
+ }, z.core.$strip>;
26
+ type OpenAICompatibleProviderOptions = z.infer<typeof openaiCompatibleProviderOptions>;
27
+
28
28
  /**
29
29
  Extracts provider-specific metadata from API responses.
30
30
  Used to standardize metadata handling across different LLM providers while allowing
package/dist/index.js CHANGED
@@ -34,6 +34,63 @@ var import_provider3 = require("@ai-sdk/provider");
34
34
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
35
35
  var import_v43 = require("zod/v4");
36
36
 
37
+ // src/openai-compatible-error.ts
38
+ var import_v4 = require("zod/v4");
39
+ var openaiCompatibleErrorDataSchema = import_v4.z.object({
40
+ error: import_v4.z.object({
41
+ message: import_v4.z.string(),
42
+ // The additional information below is handled loosely to support
43
+ // OpenAI-compatible providers that have slightly different error
44
+ // responses:
45
+ type: import_v4.z.string().nullish(),
46
+ param: import_v4.z.any().nullish(),
47
+ code: import_v4.z.union([import_v4.z.string(), import_v4.z.number()]).nullish()
48
+ })
49
+ });
50
+ var defaultOpenAICompatibleErrorStructure = {
51
+ errorSchema: openaiCompatibleErrorDataSchema,
52
+ errorToMessage: (data) => data.error.message
53
+ };
54
+
55
+ // src/chat/convert-openai-compatible-chat-usage.ts
56
+ function convertOpenAICompatibleChatUsage(usage) {
57
+ var _a, _b, _c, _d, _e, _f;
58
+ if (usage == null) {
59
+ return {
60
+ inputTokens: {
61
+ total: void 0,
62
+ noCache: void 0,
63
+ cacheRead: void 0,
64
+ cacheWrite: void 0
65
+ },
66
+ outputTokens: {
67
+ total: void 0,
68
+ text: void 0,
69
+ reasoning: void 0
70
+ },
71
+ raw: void 0
72
+ };
73
+ }
74
+ const promptTokens = (_a = usage.prompt_tokens) != null ? _a : 0;
75
+ const completionTokens = (_b = usage.completion_tokens) != null ? _b : 0;
76
+ const cacheReadTokens = (_d = (_c = usage.prompt_tokens_details) == null ? void 0 : _c.cached_tokens) != null ? _d : 0;
77
+ const reasoningTokens = (_f = (_e = usage.completion_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : 0;
78
+ return {
79
+ inputTokens: {
80
+ total: promptTokens,
81
+ noCache: promptTokens - cacheReadTokens,
82
+ cacheRead: cacheReadTokens,
83
+ cacheWrite: void 0
84
+ },
85
+ outputTokens: {
86
+ total: completionTokens,
87
+ text: completionTokens - reasoningTokens,
88
+ reasoning: reasoningTokens
89
+ },
90
+ raw: usage
91
+ };
92
+ }
93
+
37
94
  // src/chat/convert-to-openai-compatible-chat-messages.ts
38
95
  var import_provider = require("@ai-sdk/provider");
39
96
  var import_provider_utils = require("@ai-sdk/provider-utils");
@@ -190,41 +247,23 @@ function mapOpenAICompatibleFinishReason(finishReason) {
190
247
  }
191
248
 
192
249
  // src/chat/openai-compatible-chat-options.ts
193
- var import_v4 = require("zod/v4");
194
- var openaiCompatibleProviderOptions = import_v4.z.object({
250
+ var import_v42 = require("zod/v4");
251
+ var openaiCompatibleProviderOptions = import_v42.z.object({
195
252
  /**
196
253
  * A unique identifier representing your end-user, which can help the provider to
197
254
  * monitor and detect abuse.
198
255
  */
199
- user: import_v4.z.string().optional(),
256
+ user: import_v42.z.string().optional(),
200
257
  /**
201
258
  * Reasoning effort for reasoning models. Defaults to `medium`.
202
259
  */
203
- reasoningEffort: import_v4.z.string().optional(),
260
+ reasoningEffort: import_v42.z.string().optional(),
204
261
  /**
205
262
  * Controls the verbosity of the generated text. Defaults to `medium`.
206
263
  */
207
- textVerbosity: import_v4.z.string().optional()
264
+ textVerbosity: import_v42.z.string().optional()
208
265
  });
209
266
 
210
- // src/openai-compatible-error.ts
211
- var import_v42 = require("zod/v4");
212
- var openaiCompatibleErrorDataSchema = import_v42.z.object({
213
- error: import_v42.z.object({
214
- message: import_v42.z.string(),
215
- // The additional information below is handled loosely to support
216
- // OpenAI-compatible providers that have slightly different error
217
- // responses:
218
- type: import_v42.z.string().nullish(),
219
- param: import_v42.z.any().nullish(),
220
- code: import_v42.z.union([import_v42.z.string(), import_v42.z.number()]).nullish()
221
- })
222
- });
223
- var defaultOpenAICompatibleErrorStructure = {
224
- errorSchema: openaiCompatibleErrorDataSchema,
225
- errorToMessage: (data) => data.error.message
226
- };
227
-
228
267
  // src/chat/openai-compatible-prepare-tools.ts
229
268
  var import_provider2 = require("@ai-sdk/provider");
230
269
  function prepareTools({
@@ -395,7 +434,7 @@ var OpenAICompatibleChatLanguageModel = class {
395
434
  };
396
435
  }
397
436
  async doGenerate(options) {
398
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q;
437
+ var _a, _b, _c, _d, _e;
399
438
  const { args, warnings } = await this.getArgs({ ...options });
400
439
  const body = JSON.stringify(args);
401
440
  const {
@@ -455,13 +494,7 @@ var OpenAICompatibleChatLanguageModel = class {
455
494
  return {
456
495
  content,
457
496
  finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
458
- usage: {
459
- inputTokens: (_g = (_f = responseBody.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
460
- outputTokens: (_i = (_h = responseBody.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
461
- totalTokens: (_k = (_j = responseBody.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
462
- reasoningTokens: (_n = (_m = (_l = responseBody.usage) == null ? void 0 : _l.completion_tokens_details) == null ? void 0 : _m.reasoning_tokens) != null ? _n : void 0,
463
- cachedInputTokens: (_q = (_p = (_o = responseBody.usage) == null ? void 0 : _o.prompt_tokens_details) == null ? void 0 : _p.cached_tokens) != null ? _q : void 0
464
- },
497
+ usage: convertOpenAICompatibleChatUsage(responseBody.usage),
465
498
  providerMetadata,
466
499
  request: { body },
467
500
  response: {
@@ -498,19 +531,7 @@ var OpenAICompatibleChatLanguageModel = class {
498
531
  });
499
532
  const toolCalls = [];
500
533
  let finishReason = "unknown";
501
- const usage = {
502
- completionTokens: void 0,
503
- completionTokensDetails: {
504
- reasoningTokens: void 0,
505
- acceptedPredictionTokens: void 0,
506
- rejectedPredictionTokens: void 0
507
- },
508
- promptTokens: void 0,
509
- promptTokensDetails: {
510
- cachedTokens: void 0
511
- },
512
- totalTokens: void 0
513
- };
534
+ let usage = void 0;
514
535
  let isFirstChunk = true;
515
536
  const providerOptionsName = this.providerOptionsName;
516
537
  let isActiveReasoning = false;
@@ -521,7 +542,6 @@ var OpenAICompatibleChatLanguageModel = class {
521
542
  start(controller) {
522
543
  controller.enqueue({ type: "stream-start", warnings });
523
544
  },
524
- // TODO we lost type safety on Chunk, most likely due to the error schema. MUST FIX
525
545
  transform(chunk, controller) {
526
546
  var _a2, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
527
547
  if (options.includeRawChunks) {
@@ -532,13 +552,16 @@ var OpenAICompatibleChatLanguageModel = class {
532
552
  controller.enqueue({ type: "error", error: chunk.error });
533
553
  return;
534
554
  }
535
- const value = chunk.value;
536
555
  metadataExtractor == null ? void 0 : metadataExtractor.processChunk(chunk.rawValue);
537
- if ("error" in value) {
556
+ if ("error" in chunk.value) {
538
557
  finishReason = "error";
539
- controller.enqueue({ type: "error", error: value.error.message });
558
+ controller.enqueue({
559
+ type: "error",
560
+ error: chunk.value.error.message
561
+ });
540
562
  return;
541
563
  }
564
+ const value = chunk.value;
542
565
  if (isFirstChunk) {
543
566
  isFirstChunk = false;
544
567
  controller.enqueue({
@@ -547,28 +570,7 @@ var OpenAICompatibleChatLanguageModel = class {
547
570
  });
548
571
  }
549
572
  if (value.usage != null) {
550
- const {
551
- prompt_tokens,
552
- completion_tokens,
553
- total_tokens,
554
- prompt_tokens_details,
555
- completion_tokens_details
556
- } = value.usage;
557
- usage.promptTokens = prompt_tokens != null ? prompt_tokens : void 0;
558
- usage.completionTokens = completion_tokens != null ? completion_tokens : void 0;
559
- usage.totalTokens = total_tokens != null ? total_tokens : void 0;
560
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
561
- usage.completionTokensDetails.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
562
- }
563
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
564
- usage.completionTokensDetails.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
565
- }
566
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
567
- usage.completionTokensDetails.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
568
- }
569
- if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
570
- usage.promptTokensDetails.cachedTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
571
- }
573
+ usage = value.usage;
572
574
  }
573
575
  const choice = value.choices[0];
574
576
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -690,7 +692,7 @@ var OpenAICompatibleChatLanguageModel = class {
690
692
  }
691
693
  },
692
694
  flush(controller) {
693
- var _a2, _b, _c, _d, _e, _f;
695
+ var _a2, _b, _c, _d, _e;
694
696
  if (isActiveReasoning) {
695
697
  controller.enqueue({ type: "reasoning-end", id: "reasoning-0" });
696
698
  }
@@ -715,22 +717,16 @@ var OpenAICompatibleChatLanguageModel = class {
715
717
  [providerOptionsName]: {},
716
718
  ...metadataExtractor == null ? void 0 : metadataExtractor.buildMetadata()
717
719
  };
718
- if (usage.completionTokensDetails.acceptedPredictionTokens != null) {
719
- providerMetadata[providerOptionsName].acceptedPredictionTokens = usage.completionTokensDetails.acceptedPredictionTokens;
720
+ if (((_b = usage == null ? void 0 : usage.completion_tokens_details) == null ? void 0 : _b.accepted_prediction_tokens) != null) {
721
+ providerMetadata[providerOptionsName].acceptedPredictionTokens = (_c = usage == null ? void 0 : usage.completion_tokens_details) == null ? void 0 : _c.accepted_prediction_tokens;
720
722
  }
721
- if (usage.completionTokensDetails.rejectedPredictionTokens != null) {
722
- providerMetadata[providerOptionsName].rejectedPredictionTokens = usage.completionTokensDetails.rejectedPredictionTokens;
723
+ if (((_d = usage == null ? void 0 : usage.completion_tokens_details) == null ? void 0 : _d.rejected_prediction_tokens) != null) {
724
+ providerMetadata[providerOptionsName].rejectedPredictionTokens = (_e = usage == null ? void 0 : usage.completion_tokens_details) == null ? void 0 : _e.rejected_prediction_tokens;
723
725
  }
724
726
  controller.enqueue({
725
727
  type: "finish",
726
728
  finishReason,
727
- usage: {
728
- inputTokens: (_b = usage.promptTokens) != null ? _b : void 0,
729
- outputTokens: (_c = usage.completionTokens) != null ? _c : void 0,
730
- totalTokens: (_d = usage.totalTokens) != null ? _d : void 0,
731
- reasoningTokens: (_e = usage.completionTokensDetails.reasoningTokens) != null ? _e : void 0,
732
- cachedInputTokens: (_f = usage.promptTokensDetails.cachedTokens) != null ? _f : void 0
733
- },
729
+ usage: convertOpenAICompatibleChatUsage(usage),
734
730
  providerMetadata
735
731
  });
736
732
  }
@@ -780,43 +776,78 @@ var OpenAICompatibleChatResponseSchema = import_v43.z.object({
780
776
  ),
781
777
  usage: openaiCompatibleTokenUsageSchema
782
778
  });
783
- var createOpenAICompatibleChatChunkSchema = (errorSchema) => import_v43.z.union([
784
- import_v43.z.object({
785
- id: import_v43.z.string().nullish(),
786
- created: import_v43.z.number().nullish(),
787
- model: import_v43.z.string().nullish(),
788
- choices: import_v43.z.array(
789
- import_v43.z.object({
790
- delta: import_v43.z.object({
791
- role: import_v43.z.enum(["assistant"]).nullish(),
792
- content: import_v43.z.string().nullish(),
793
- // Most openai-compatible models set `reasoning_content`, but some
794
- // providers serving `gpt-oss` set `reasoning`. See #7866
795
- reasoning_content: import_v43.z.string().nullish(),
796
- reasoning: import_v43.z.string().nullish(),
797
- tool_calls: import_v43.z.array(
798
- import_v43.z.object({
799
- index: import_v43.z.number(),
800
- id: import_v43.z.string().nullish(),
801
- function: import_v43.z.object({
802
- name: import_v43.z.string().nullish(),
803
- arguments: import_v43.z.string().nullish()
804
- })
779
+ var chunkBaseSchema = import_v43.z.object({
780
+ id: import_v43.z.string().nullish(),
781
+ created: import_v43.z.number().nullish(),
782
+ model: import_v43.z.string().nullish(),
783
+ choices: import_v43.z.array(
784
+ import_v43.z.object({
785
+ delta: import_v43.z.object({
786
+ role: import_v43.z.enum(["assistant"]).nullish(),
787
+ content: import_v43.z.string().nullish(),
788
+ // Most openai-compatible models set `reasoning_content`, but some
789
+ // providers serving `gpt-oss` set `reasoning`. See #7866
790
+ reasoning_content: import_v43.z.string().nullish(),
791
+ reasoning: import_v43.z.string().nullish(),
792
+ tool_calls: import_v43.z.array(
793
+ import_v43.z.object({
794
+ index: import_v43.z.number(),
795
+ id: import_v43.z.string().nullish(),
796
+ function: import_v43.z.object({
797
+ name: import_v43.z.string().nullish(),
798
+ arguments: import_v43.z.string().nullish()
805
799
  })
806
- ).nullish()
807
- }).nullish(),
808
- finish_reason: import_v43.z.string().nullish()
809
- })
810
- ),
811
- usage: openaiCompatibleTokenUsageSchema
812
- }),
813
- errorSchema
814
- ]);
800
+ })
801
+ ).nullish()
802
+ }).nullish(),
803
+ finish_reason: import_v43.z.string().nullish()
804
+ })
805
+ ),
806
+ usage: openaiCompatibleTokenUsageSchema
807
+ });
808
+ var createOpenAICompatibleChatChunkSchema = (errorSchema) => import_v43.z.union([chunkBaseSchema, errorSchema]);
815
809
 
816
810
  // src/completion/openai-compatible-completion-language-model.ts
817
811
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
818
812
  var import_v45 = require("zod/v4");
819
813
 
814
+ // src/completion/convert-openai-compatible-completion-usage.ts
815
+ function convertOpenAICompatibleCompletionUsage(usage) {
816
+ var _a, _b;
817
+ if (usage == null) {
818
+ return {
819
+ inputTokens: {
820
+ total: void 0,
821
+ noCache: void 0,
822
+ cacheRead: void 0,
823
+ cacheWrite: void 0
824
+ },
825
+ outputTokens: {
826
+ total: void 0,
827
+ text: void 0,
828
+ reasoning: void 0
829
+ },
830
+ raw: void 0
831
+ };
832
+ }
833
+ const promptTokens = (_a = usage.prompt_tokens) != null ? _a : 0;
834
+ const completionTokens = (_b = usage.completion_tokens) != null ? _b : 0;
835
+ return {
836
+ inputTokens: {
837
+ total: promptTokens,
838
+ noCache: promptTokens,
839
+ cacheRead: void 0,
840
+ cacheWrite: void 0
841
+ },
842
+ outputTokens: {
843
+ total: completionTokens,
844
+ text: completionTokens,
845
+ reasoning: void 0
846
+ },
847
+ raw: usage
848
+ };
849
+ }
850
+
820
851
  // src/completion/convert-to-openai-compatible-completion-prompt.ts
821
852
  var import_provider4 = require("@ai-sdk/provider");
822
853
  function convertToOpenAICompatibleCompletionPrompt({
@@ -1037,7 +1068,6 @@ var OpenAICompatibleCompletionLanguageModel = class {
1037
1068
  };
1038
1069
  }
1039
1070
  async doGenerate(options) {
1040
- var _a, _b, _c, _d, _e, _f;
1041
1071
  const { args, warnings } = await this.getArgs(options);
1042
1072
  const {
1043
1073
  responseHeaders,
@@ -1064,11 +1094,7 @@ var OpenAICompatibleCompletionLanguageModel = class {
1064
1094
  }
1065
1095
  return {
1066
1096
  content,
1067
- usage: {
1068
- inputTokens: (_b = (_a = response.usage) == null ? void 0 : _a.prompt_tokens) != null ? _b : void 0,
1069
- outputTokens: (_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens) != null ? _d : void 0,
1070
- totalTokens: (_f = (_e = response.usage) == null ? void 0 : _e.total_tokens) != null ? _f : void 0
1071
- },
1097
+ usage: convertOpenAICompatibleCompletionUsage(response.usage),
1072
1098
  finishReason: mapOpenAICompatibleFinishReason2(choice.finish_reason),
1073
1099
  request: { body: args },
1074
1100
  response: {
@@ -1102,11 +1128,7 @@ var OpenAICompatibleCompletionLanguageModel = class {
1102
1128
  fetch: this.config.fetch
1103
1129
  });
1104
1130
  let finishReason = "unknown";
1105
- const usage = {
1106
- inputTokens: void 0,
1107
- outputTokens: void 0,
1108
- totalTokens: void 0
1109
- };
1131
+ let usage = void 0;
1110
1132
  let isFirstChunk = true;
1111
1133
  return {
1112
1134
  stream: response.pipeThrough(
@@ -1115,7 +1137,6 @@ var OpenAICompatibleCompletionLanguageModel = class {
1115
1137
  controller.enqueue({ type: "stream-start", warnings });
1116
1138
  },
1117
1139
  transform(chunk, controller) {
1118
- var _a, _b, _c;
1119
1140
  if (options.includeRawChunks) {
1120
1141
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1121
1142
  }
@@ -1142,9 +1163,7 @@ var OpenAICompatibleCompletionLanguageModel = class {
1142
1163
  });
1143
1164
  }
1144
1165
  if (value.usage != null) {
1145
- usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
1146
- usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
1147
- usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
1166
+ usage = value.usage;
1148
1167
  }
1149
1168
  const choice = value.choices[0];
1150
1169
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1167,7 +1186,7 @@ var OpenAICompatibleCompletionLanguageModel = class {
1167
1186
  controller.enqueue({
1168
1187
  type: "finish",
1169
1188
  finishReason,
1170
- usage
1189
+ usage: convertOpenAICompatibleCompletionUsage(usage)
1171
1190
  });
1172
1191
  }
1173
1192
  })
@@ -1306,6 +1325,7 @@ var OpenAICompatibleEmbeddingModel = class {
1306
1325
  fetch: this.config.fetch
1307
1326
  });
1308
1327
  return {
1328
+ warnings: [],
1309
1329
  embeddings: response.data.map((item) => item.embedding),
1310
1330
  usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1311
1331
  providerMetadata: response.providerMetadata,
@@ -1397,7 +1417,7 @@ var openaiCompatibleImageResponseSchema = import_v48.z.object({
1397
1417
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
1398
1418
 
1399
1419
  // src/version.ts
1400
- var VERSION = true ? "2.0.0-beta.44" : "0.0.0-test";
1420
+ var VERSION = true ? "2.0.0-beta.46" : "0.0.0-test";
1401
1421
 
1402
1422
  // src/openai-compatible-provider.ts
1403
1423
  function createOpenAICompatible(options) {