@ai-sdk/openai-compatible 1.0.0-canary.13 → 1.0.0-canary.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,25 @@
1
1
  # @ai-sdk/openai-compatible
2
2
 
3
+ ## 1.0.0-canary.15
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [a571d6e]
8
+ - Updated dependencies [a8c8bd5]
9
+ - Updated dependencies [7979f7f]
10
+ - Updated dependencies [41fa418]
11
+ - @ai-sdk/provider-utils@3.0.0-canary.15
12
+ - @ai-sdk/provider@2.0.0-canary.14
13
+
14
+ ## 1.0.0-canary.14
15
+
16
+ ### Patch Changes
17
+
18
+ - Updated dependencies [957b739]
19
+ - Updated dependencies [9bd5ab5]
20
+ - @ai-sdk/provider-utils@3.0.0-canary.14
21
+ - @ai-sdk/provider@2.0.0-canary.13
22
+
3
23
  ## 1.0.0-canary.13
4
24
 
5
25
  ### Patch Changes
package/README.md CHANGED
@@ -97,4 +97,4 @@ const { text } = await generateText({
97
97
  });
98
98
  ```
99
99
 
100
- For more examples, see the [OpenAI Compatible Providers](https://sdk.vercel.ai/providers/openai-compatible-providers) documentation.
100
+ For more examples, see the [OpenAI Compatible Providers](https://ai-sdk.dev/providers/openai-compatible-providers) documentation.
package/dist/index.js CHANGED
@@ -361,7 +361,7 @@ var OpenAICompatibleChatLanguageModel = class {
361
361
  };
362
362
  }
363
363
  async doGenerate(options) {
364
- var _a, _b, _c, _d, _e, _f, _g, _h, _i;
364
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p;
365
365
  const { args, warnings } = await this.getArgs({ ...options });
366
366
  const body = JSON.stringify(args);
367
367
  const {
@@ -413,25 +413,21 @@ var OpenAICompatibleChatLanguageModel = class {
413
413
  }))
414
414
  };
415
415
  const completionTokenDetails = (_d = responseBody.usage) == null ? void 0 : _d.completion_tokens_details;
416
- const promptTokenDetails = (_e = responseBody.usage) == null ? void 0 : _e.prompt_tokens_details;
417
- if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
418
- providerMetadata[this.providerOptionsName].reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
419
- }
420
416
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
421
417
  providerMetadata[this.providerOptionsName].acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
422
418
  }
423
419
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
424
420
  providerMetadata[this.providerOptionsName].rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
425
421
  }
426
- if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
427
- providerMetadata[this.providerOptionsName].cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
428
- }
429
422
  return {
430
423
  content,
431
424
  finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
432
425
  usage: {
433
- inputTokens: (_g = (_f = responseBody.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
434
- outputTokens: (_i = (_h = responseBody.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0
426
+ inputTokens: (_f = (_e = responseBody.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : void 0,
427
+ outputTokens: (_h = (_g = responseBody.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : void 0,
428
+ totalTokens: (_j = (_i = responseBody.usage) == null ? void 0 : _i.total_tokens) != null ? _j : void 0,
429
+ reasoningTokens: (_m = (_l = (_k = responseBody.usage) == null ? void 0 : _k.completion_tokens_details) == null ? void 0 : _l.reasoning_tokens) != null ? _m : void 0,
430
+ cachedInputTokens: (_p = (_o = (_n = responseBody.usage) == null ? void 0 : _n.prompt_tokens_details) == null ? void 0 : _o.cached_tokens) != null ? _p : void 0
435
431
  },
436
432
  providerMetadata,
437
433
  request: { body },
@@ -469,7 +465,7 @@ var OpenAICompatibleChatLanguageModel = class {
469
465
  });
470
466
  const toolCalls = [];
471
467
  let finishReason = "unknown";
472
- let usage = {
468
+ const usage = {
473
469
  completionTokens: void 0,
474
470
  completionTokensDetails: {
475
471
  reasoningTokens: void 0,
@@ -479,10 +475,11 @@ var OpenAICompatibleChatLanguageModel = class {
479
475
  promptTokens: void 0,
480
476
  promptTokensDetails: {
481
477
  cachedTokens: void 0
482
- }
478
+ },
479
+ totalTokens: void 0
483
480
  };
484
481
  let isFirstChunk = true;
485
- let providerOptionsName = this.providerOptionsName;
482
+ const providerOptionsName = this.providerOptionsName;
486
483
  return {
487
484
  stream: response.pipeThrough(
488
485
  new TransformStream({
@@ -515,11 +512,13 @@ var OpenAICompatibleChatLanguageModel = class {
515
512
  const {
516
513
  prompt_tokens,
517
514
  completion_tokens,
515
+ total_tokens,
518
516
  prompt_tokens_details,
519
517
  completion_tokens_details
520
518
  } = value.usage;
521
519
  usage.promptTokens = prompt_tokens != null ? prompt_tokens : void 0;
522
520
  usage.completionTokens = completion_tokens != null ? completion_tokens : void 0;
521
+ usage.totalTokens = total_tokens != null ? total_tokens : void 0;
523
522
  if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
524
523
  usage.completionTokensDetails.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
525
524
  }
@@ -638,29 +637,26 @@ var OpenAICompatibleChatLanguageModel = class {
638
637
  }
639
638
  },
640
639
  flush(controller) {
641
- var _a2, _b;
640
+ var _a2, _b, _c, _d, _e;
642
641
  const providerMetadata = {
643
642
  [providerOptionsName]: {},
644
643
  ...metadataExtractor == null ? void 0 : metadataExtractor.buildMetadata()
645
644
  };
646
- if (usage.completionTokensDetails.reasoningTokens != null) {
647
- providerMetadata[providerOptionsName].reasoningTokens = usage.completionTokensDetails.reasoningTokens;
648
- }
649
645
  if (usage.completionTokensDetails.acceptedPredictionTokens != null) {
650
646
  providerMetadata[providerOptionsName].acceptedPredictionTokens = usage.completionTokensDetails.acceptedPredictionTokens;
651
647
  }
652
648
  if (usage.completionTokensDetails.rejectedPredictionTokens != null) {
653
649
  providerMetadata[providerOptionsName].rejectedPredictionTokens = usage.completionTokensDetails.rejectedPredictionTokens;
654
650
  }
655
- if (usage.promptTokensDetails.cachedTokens != null) {
656
- providerMetadata[providerOptionsName].cachedPromptTokens = usage.promptTokensDetails.cachedTokens;
657
- }
658
651
  controller.enqueue({
659
652
  type: "finish",
660
653
  finishReason,
661
654
  usage: {
662
655
  inputTokens: (_a2 = usage.promptTokens) != null ? _a2 : void 0,
663
- outputTokens: (_b = usage.completionTokens) != null ? _b : void 0
656
+ outputTokens: (_b = usage.completionTokens) != null ? _b : void 0,
657
+ totalTokens: (_c = usage.totalTokens) != null ? _c : void 0,
658
+ reasoningTokens: (_d = usage.completionTokensDetails.reasoningTokens) != null ? _d : void 0,
659
+ cachedInputTokens: (_e = usage.promptTokensDetails.cachedTokens) != null ? _e : void 0
664
660
  },
665
661
  providerMetadata
666
662
  });
@@ -675,6 +671,7 @@ var OpenAICompatibleChatLanguageModel = class {
675
671
  var openaiCompatibleTokenUsageSchema = import_zod3.z.object({
676
672
  prompt_tokens: import_zod3.z.number().nullish(),
677
673
  completion_tokens: import_zod3.z.number().nullish(),
674
+ total_tokens: import_zod3.z.number().nullish(),
678
675
  prompt_tokens_details: import_zod3.z.object({
679
676
  cached_tokens: import_zod3.z.number().nullish()
680
677
  }).nullish(),
@@ -935,7 +932,7 @@ var OpenAICompatibleCompletionLanguageModel = class {
935
932
  };
936
933
  }
937
934
  async doGenerate(options) {
938
- var _a, _b, _c, _d;
935
+ var _a, _b, _c, _d, _e, _f;
939
936
  const { args, warnings } = await this.getArgs(options);
940
937
  const {
941
938
  responseHeaders,
@@ -964,7 +961,8 @@ var OpenAICompatibleCompletionLanguageModel = class {
964
961
  content,
965
962
  usage: {
966
963
  inputTokens: (_b = (_a = response.usage) == null ? void 0 : _a.prompt_tokens) != null ? _b : void 0,
967
- outputTokens: (_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens) != null ? _d : void 0
964
+ outputTokens: (_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens) != null ? _d : void 0,
965
+ totalTokens: (_f = (_e = response.usage) == null ? void 0 : _e.total_tokens) != null ? _f : void 0
968
966
  },
969
967
  finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
970
968
  request: { body: args },
@@ -1001,7 +999,8 @@ var OpenAICompatibleCompletionLanguageModel = class {
1001
999
  let finishReason = "unknown";
1002
1000
  const usage = {
1003
1001
  inputTokens: void 0,
1004
- outputTokens: void 0
1002
+ outputTokens: void 0,
1003
+ totalTokens: void 0
1005
1004
  };
1006
1005
  let isFirstChunk = true;
1007
1006
  return {
@@ -1011,7 +1010,7 @@ var OpenAICompatibleCompletionLanguageModel = class {
1011
1010
  controller.enqueue({ type: "stream-start", warnings });
1012
1011
  },
1013
1012
  transform(chunk, controller) {
1014
- var _a, _b;
1013
+ var _a, _b, _c;
1015
1014
  if (!chunk.success) {
1016
1015
  finishReason = "error";
1017
1016
  controller.enqueue({ type: "error", error: chunk.error });
@@ -1033,6 +1032,7 @@ var OpenAICompatibleCompletionLanguageModel = class {
1033
1032
  if (value.usage != null) {
1034
1033
  usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
1035
1034
  usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
1035
+ usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
1036
1036
  }
1037
1037
  const choice = value.choices[0];
1038
1038
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1061,6 +1061,11 @@ var OpenAICompatibleCompletionLanguageModel = class {
1061
1061
  };
1062
1062
  }
1063
1063
  };
1064
+ var usageSchema = import_zod5.z.object({
1065
+ prompt_tokens: import_zod5.z.number(),
1066
+ completion_tokens: import_zod5.z.number(),
1067
+ total_tokens: import_zod5.z.number()
1068
+ });
1064
1069
  var openaiCompatibleCompletionResponseSchema = import_zod5.z.object({
1065
1070
  id: import_zod5.z.string().nullish(),
1066
1071
  created: import_zod5.z.number().nullish(),
@@ -1071,10 +1076,7 @@ var openaiCompatibleCompletionResponseSchema = import_zod5.z.object({
1071
1076
  finish_reason: import_zod5.z.string()
1072
1077
  })
1073
1078
  ),
1074
- usage: import_zod5.z.object({
1075
- prompt_tokens: import_zod5.z.number(),
1076
- completion_tokens: import_zod5.z.number()
1077
- }).nullish()
1079
+ usage: usageSchema.nullish()
1078
1080
  });
1079
1081
  var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => import_zod5.z.union([
1080
1082
  import_zod5.z.object({
@@ -1088,10 +1090,7 @@ var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => import_zod5.z
1088
1090
  index: import_zod5.z.number()
1089
1091
  })
1090
1092
  ),
1091
- usage: import_zod5.z.object({
1092
- prompt_tokens: import_zod5.z.number(),
1093
- completion_tokens: import_zod5.z.number()
1094
- }).nullish()
1093
+ usage: usageSchema.nullish()
1095
1094
  }),
1096
1095
  errorSchema
1097
1096
  ]);