@ai-sdk/openai-compatible 0.2.4 → 0.2.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,18 @@
1
1
  # @ai-sdk/openai-compatible
2
2
 
3
+ ## 0.2.6
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [2c19b9a]
8
+ - @ai-sdk/provider-utils@2.2.4
9
+
10
+ ## 0.2.5
11
+
12
+ ### Patch Changes
13
+
14
+ - d186cca: feat (provider/openai-compatible): add additional token usage metrics
15
+
3
16
  ## 0.2.4
4
17
 
5
18
  ### Patch Changes
package/dist/index.js CHANGED
@@ -381,7 +381,7 @@ var OpenAICompatibleChatLanguageModel = class {
381
381
  }
382
382
  }
383
383
  async doGenerate(options) {
384
- var _a, _b, _c, _d, _e, _f, _g, _h, _i;
384
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
385
385
  const { args, warnings } = this.getArgs({ ...options });
386
386
  const body = JSON.stringify(args);
387
387
  const {
@@ -404,13 +404,30 @@ var OpenAICompatibleChatLanguageModel = class {
404
404
  });
405
405
  const { messages: rawPrompt, ...rawSettings } = args;
406
406
  const choice = responseBody.choices[0];
407
- const providerMetadata = (_b = (_a = this.config.metadataExtractor) == null ? void 0 : _a.extractMetadata) == null ? void 0 : _b.call(_a, {
408
- parsedBody: rawResponse
409
- });
407
+ const providerMetadata = {
408
+ [this.providerOptionsName]: {},
409
+ ...(_b = (_a = this.config.metadataExtractor) == null ? void 0 : _a.extractMetadata) == null ? void 0 : _b.call(_a, {
410
+ parsedBody: rawResponse
411
+ })
412
+ };
413
+ const completionTokenDetails = (_c = responseBody.usage) == null ? void 0 : _c.completion_tokens_details;
414
+ const promptTokenDetails = (_d = responseBody.usage) == null ? void 0 : _d.prompt_tokens_details;
415
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
416
+ providerMetadata[this.providerOptionsName].reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
417
+ }
418
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
419
+ providerMetadata[this.providerOptionsName].acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
420
+ }
421
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
422
+ providerMetadata[this.providerOptionsName].rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
423
+ }
424
+ if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
425
+ providerMetadata[this.providerOptionsName].cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
426
+ }
410
427
  return {
411
- text: (_c = choice.message.content) != null ? _c : void 0,
412
- reasoning: (_d = choice.message.reasoning_content) != null ? _d : void 0,
413
- toolCalls: (_e = choice.message.tool_calls) == null ? void 0 : _e.map((toolCall) => {
428
+ text: (_e = choice.message.content) != null ? _e : void 0,
429
+ reasoning: (_f = choice.message.reasoning_content) != null ? _f : void 0,
430
+ toolCalls: (_g = choice.message.tool_calls) == null ? void 0 : _g.map((toolCall) => {
414
431
  var _a2;
415
432
  return {
416
433
  toolCallType: "function",
@@ -421,10 +438,10 @@ var OpenAICompatibleChatLanguageModel = class {
421
438
  }),
422
439
  finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
423
440
  usage: {
424
- promptTokens: (_g = (_f = responseBody.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : NaN,
425
- completionTokens: (_i = (_h = responseBody.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : NaN
441
+ promptTokens: (_i = (_h = responseBody.usage) == null ? void 0 : _h.prompt_tokens) != null ? _i : NaN,
442
+ completionTokens: (_k = (_j = responseBody.usage) == null ? void 0 : _j.completion_tokens) != null ? _k : NaN
426
443
  },
427
- ...providerMetadata && { providerMetadata },
444
+ providerMetadata,
428
445
  rawCall: { rawPrompt, rawSettings },
429
446
  rawResponse: { headers: responseHeaders, body: rawResponse },
430
447
  response: getResponseMetadata(responseBody),
@@ -511,16 +528,25 @@ var OpenAICompatibleChatLanguageModel = class {
511
528
  const toolCalls = [];
512
529
  let finishReason = "unknown";
513
530
  let usage = {
531
+ completionTokens: void 0,
532
+ completionTokensDetails: {
533
+ reasoningTokens: void 0,
534
+ acceptedPredictionTokens: void 0,
535
+ rejectedPredictionTokens: void 0
536
+ },
514
537
  promptTokens: void 0,
515
- completionTokens: void 0
538
+ promptTokensDetails: {
539
+ cachedTokens: void 0
540
+ }
516
541
  };
517
542
  let isFirstChunk = true;
543
+ let providerOptionsName = this.providerOptionsName;
518
544
  return {
519
545
  stream: response.pipeThrough(
520
546
  new TransformStream({
521
547
  // TODO we lost type safety on Chunk, most likely due to the error schema. MUST FIX
522
548
  transform(chunk, controller) {
523
- var _a2, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
549
+ var _a2, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
524
550
  if (!chunk.success) {
525
551
  finishReason = "error";
526
552
  controller.enqueue({ type: "error", error: chunk.error });
@@ -541,10 +567,26 @@ var OpenAICompatibleChatLanguageModel = class {
541
567
  });
542
568
  }
543
569
  if (value.usage != null) {
544
- usage = {
545
- promptTokens: (_a2 = value.usage.prompt_tokens) != null ? _a2 : void 0,
546
- completionTokens: (_b = value.usage.completion_tokens) != null ? _b : void 0
547
- };
570
+ const {
571
+ prompt_tokens,
572
+ completion_tokens,
573
+ prompt_tokens_details,
574
+ completion_tokens_details
575
+ } = value.usage;
576
+ usage.promptTokens = prompt_tokens != null ? prompt_tokens : void 0;
577
+ usage.completionTokens = completion_tokens != null ? completion_tokens : void 0;
578
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
579
+ usage.completionTokensDetails.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
580
+ }
581
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
582
+ usage.completionTokensDetails.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
583
+ }
584
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
585
+ usage.completionTokensDetails.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
586
+ }
587
+ if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
588
+ usage.promptTokensDetails.cachedTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
589
+ }
548
590
  }
549
591
  const choice = value.choices[0];
550
592
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -584,7 +626,7 @@ var OpenAICompatibleChatLanguageModel = class {
584
626
  message: `Expected 'id' to be a string.`
585
627
  });
586
628
  }
587
- if (((_c = toolCallDelta.function) == null ? void 0 : _c.name) == null) {
629
+ if (((_a2 = toolCallDelta.function) == null ? void 0 : _a2.name) == null) {
588
630
  throw new import_provider3.InvalidResponseDataError({
589
631
  data: toolCallDelta,
590
632
  message: `Expected 'function.name' to be a string.`
@@ -595,12 +637,12 @@ var OpenAICompatibleChatLanguageModel = class {
595
637
  type: "function",
596
638
  function: {
597
639
  name: toolCallDelta.function.name,
598
- arguments: (_d = toolCallDelta.function.arguments) != null ? _d : ""
640
+ arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
599
641
  },
600
642
  hasFinished: false
601
643
  };
602
644
  const toolCall2 = toolCalls[index];
603
- if (((_e = toolCall2.function) == null ? void 0 : _e.name) != null && ((_f = toolCall2.function) == null ? void 0 : _f.arguments) != null) {
645
+ if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
604
646
  if (toolCall2.function.arguments.length > 0) {
605
647
  controller.enqueue({
606
648
  type: "tool-call-delta",
@@ -614,7 +656,7 @@ var OpenAICompatibleChatLanguageModel = class {
614
656
  controller.enqueue({
615
657
  type: "tool-call",
616
658
  toolCallType: "function",
617
- toolCallId: (_g = toolCall2.id) != null ? _g : (0, import_provider_utils2.generateId)(),
659
+ toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils2.generateId)(),
618
660
  toolName: toolCall2.function.name,
619
661
  args: toolCall2.function.arguments
620
662
  });
@@ -627,21 +669,21 @@ var OpenAICompatibleChatLanguageModel = class {
627
669
  if (toolCall.hasFinished) {
628
670
  continue;
629
671
  }
630
- if (((_h = toolCallDelta.function) == null ? void 0 : _h.arguments) != null) {
631
- toolCall.function.arguments += (_j = (_i = toolCallDelta.function) == null ? void 0 : _i.arguments) != null ? _j : "";
672
+ if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
673
+ toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
632
674
  }
633
675
  controller.enqueue({
634
676
  type: "tool-call-delta",
635
677
  toolCallType: "function",
636
678
  toolCallId: toolCall.id,
637
679
  toolName: toolCall.function.name,
638
- argsTextDelta: (_k = toolCallDelta.function.arguments) != null ? _k : ""
680
+ argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
639
681
  });
640
- if (((_l = toolCall.function) == null ? void 0 : _l.name) != null && ((_m = toolCall.function) == null ? void 0 : _m.arguments) != null && (0, import_provider_utils2.isParsableJson)(toolCall.function.arguments)) {
682
+ if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils2.isParsableJson)(toolCall.function.arguments)) {
641
683
  controller.enqueue({
642
684
  type: "tool-call",
643
685
  toolCallType: "function",
644
- toolCallId: (_n = toolCall.id) != null ? _n : (0, import_provider_utils2.generateId)(),
686
+ toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils2.generateId)(),
645
687
  toolName: toolCall.function.name,
646
688
  args: toolCall.function.arguments
647
689
  });
@@ -652,7 +694,22 @@ var OpenAICompatibleChatLanguageModel = class {
652
694
  },
653
695
  flush(controller) {
654
696
  var _a2, _b;
655
- const metadata = metadataExtractor == null ? void 0 : metadataExtractor.buildMetadata();
697
+ const providerMetadata = {
698
+ [providerOptionsName]: {},
699
+ ...metadataExtractor == null ? void 0 : metadataExtractor.buildMetadata()
700
+ };
701
+ if (usage.completionTokensDetails.reasoningTokens != null) {
702
+ providerMetadata[providerOptionsName].reasoningTokens = usage.completionTokensDetails.reasoningTokens;
703
+ }
704
+ if (usage.completionTokensDetails.acceptedPredictionTokens != null) {
705
+ providerMetadata[providerOptionsName].acceptedPredictionTokens = usage.completionTokensDetails.acceptedPredictionTokens;
706
+ }
707
+ if (usage.completionTokensDetails.rejectedPredictionTokens != null) {
708
+ providerMetadata[providerOptionsName].rejectedPredictionTokens = usage.completionTokensDetails.rejectedPredictionTokens;
709
+ }
710
+ if (usage.promptTokensDetails.cachedTokens != null) {
711
+ providerMetadata[providerOptionsName].cachedPromptTokens = usage.promptTokensDetails.cachedTokens;
712
+ }
656
713
  controller.enqueue({
657
714
  type: "finish",
658
715
  finishReason,
@@ -660,7 +717,7 @@ var OpenAICompatibleChatLanguageModel = class {
660
717
  promptTokens: (_a2 = usage.promptTokens) != null ? _a2 : NaN,
661
718
  completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
662
719
  },
663
- ...metadata && { providerMetadata: metadata }
720
+ providerMetadata
664
721
  });
665
722
  }
666
723
  })
@@ -672,6 +729,18 @@ var OpenAICompatibleChatLanguageModel = class {
672
729
  };
673
730
  }
674
731
  };
732
+ var openaiCompatibleTokenUsageSchema = import_zod2.z.object({
733
+ prompt_tokens: import_zod2.z.number().nullish(),
734
+ completion_tokens: import_zod2.z.number().nullish(),
735
+ prompt_tokens_details: import_zod2.z.object({
736
+ cached_tokens: import_zod2.z.number().nullish()
737
+ }).nullish(),
738
+ completion_tokens_details: import_zod2.z.object({
739
+ reasoning_tokens: import_zod2.z.number().nullish(),
740
+ accepted_prediction_tokens: import_zod2.z.number().nullish(),
741
+ rejected_prediction_tokens: import_zod2.z.number().nullish()
742
+ }).nullish()
743
+ }).nullish();
675
744
  var OpenAICompatibleChatResponseSchema = import_zod2.z.object({
676
745
  id: import_zod2.z.string().nullish(),
677
746
  created: import_zod2.z.number().nullish(),
@@ -696,10 +765,7 @@ var OpenAICompatibleChatResponseSchema = import_zod2.z.object({
696
765
  finish_reason: import_zod2.z.string().nullish()
697
766
  })
698
767
  ),
699
- usage: import_zod2.z.object({
700
- prompt_tokens: import_zod2.z.number().nullish(),
701
- completion_tokens: import_zod2.z.number().nullish()
702
- }).nullish()
768
+ usage: openaiCompatibleTokenUsageSchema
703
769
  });
704
770
  var createOpenAICompatibleChatChunkSchema = (errorSchema) => import_zod2.z.union([
705
771
  import_zod2.z.object({
@@ -727,10 +793,7 @@ var createOpenAICompatibleChatChunkSchema = (errorSchema) => import_zod2.z.union
727
793
  finish_reason: import_zod2.z.string().nullish()
728
794
  })
729
795
  ),
730
- usage: import_zod2.z.object({
731
- prompt_tokens: import_zod2.z.number().nullish(),
732
- completion_tokens: import_zod2.z.number().nullish()
733
- }).nullish()
796
+ usage: openaiCompatibleTokenUsageSchema
734
797
  }),
735
798
  errorSchema
736
799
  ]);