@ai-sdk/openai 1.0.12 → 1.0.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,23 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 1.0.14
4
+
5
+ ### Patch Changes
6
+
7
+ - 19a2ce7: feat (ai/core): add aspectRatio and seed options to generateImage
8
+ - 6337688: feat: change image generation errors to warnings
9
+ - Updated dependencies [19a2ce7]
10
+ - Updated dependencies [19a2ce7]
11
+ - Updated dependencies [6337688]
12
+ - @ai-sdk/provider@1.0.4
13
+ - @ai-sdk/provider-utils@2.0.6
14
+
15
+ ## 1.0.13
16
+
17
+ ### Patch Changes
18
+
19
+ - b19aa82: feat (provider/openai): add predicted outputs token usage
20
+
3
21
  ## 1.0.12
4
22
 
5
23
  ### Patch Changes
package/dist/index.js CHANGED
@@ -524,7 +524,7 @@ var OpenAIChatLanguageModel = class {
524
524
  }
525
525
  }
526
526
  async doGenerate(options) {
527
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r;
527
+ var _a, _b, _c, _d, _e, _f, _g, _h;
528
528
  const { args: body, warnings } = this.getArgs(options);
529
529
  const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
530
530
  url: this.config.url({
@@ -542,18 +542,23 @@ var OpenAIChatLanguageModel = class {
542
542
  });
543
543
  const { messages: rawPrompt, ...rawSettings } = body;
544
544
  const choice = response.choices[0];
545
- let providerMetadata;
546
- if (((_b = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null || ((_d = (_c = response.usage) == null ? void 0 : _c.prompt_tokens_details) == null ? void 0 : _d.cached_tokens) != null) {
547
- providerMetadata = { openai: {} };
548
- if (((_f = (_e = response.usage) == null ? void 0 : _e.completion_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null) {
549
- providerMetadata.openai.reasoningTokens = (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens_details) == null ? void 0 : _h.reasoning_tokens;
550
- }
551
- if (((_j = (_i = response.usage) == null ? void 0 : _i.prompt_tokens_details) == null ? void 0 : _j.cached_tokens) != null) {
552
- providerMetadata.openai.cachedPromptTokens = (_l = (_k = response.usage) == null ? void 0 : _k.prompt_tokens_details) == null ? void 0 : _l.cached_tokens;
553
- }
545
+ const completionTokenDetails = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details;
546
+ const promptTokenDetails = (_b = response.usage) == null ? void 0 : _b.prompt_tokens_details;
547
+ const providerMetadata = { openai: {} };
548
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
549
+ providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
550
+ }
551
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
552
+ providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
553
+ }
554
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
555
+ providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
556
+ }
557
+ if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
558
+ providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
554
559
  }
555
560
  return {
556
- text: (_m = choice.message.content) != null ? _m : void 0,
561
+ text: (_c = choice.message.content) != null ? _c : void 0,
557
562
  toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
558
563
  {
559
564
  toolCallType: "function",
@@ -561,7 +566,7 @@ var OpenAIChatLanguageModel = class {
561
566
  toolName: choice.message.function_call.name,
562
567
  args: choice.message.function_call.arguments
563
568
  }
564
- ] : (_n = choice.message.tool_calls) == null ? void 0 : _n.map((toolCall) => {
569
+ ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
565
570
  var _a2;
566
571
  return {
567
572
  toolCallType: "function",
@@ -572,8 +577,8 @@ var OpenAIChatLanguageModel = class {
572
577
  }),
573
578
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
574
579
  usage: {
575
- promptTokens: (_p = (_o = response.usage) == null ? void 0 : _o.prompt_tokens) != null ? _p : NaN,
576
- completionTokens: (_r = (_q = response.usage) == null ? void 0 : _q.completion_tokens) != null ? _r : NaN
580
+ promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
581
+ completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
577
582
  },
578
583
  rawCall: { rawPrompt, rawSettings },
579
584
  rawResponse: { headers: responseHeaders },
@@ -652,12 +657,12 @@ var OpenAIChatLanguageModel = class {
652
657
  let logprobs;
653
658
  let isFirstChunk = true;
654
659
  const { useLegacyFunctionCalling } = this.settings;
655
- let providerMetadata;
660
+ const providerMetadata = { openai: {} };
656
661
  return {
657
662
  stream: response.pipeThrough(
658
663
  new TransformStream({
659
664
  transform(chunk, controller) {
660
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
665
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
661
666
  if (!chunk.success) {
662
667
  finishReason = "error";
663
668
  controller.enqueue({ type: "error", error: chunk.error });
@@ -677,22 +682,27 @@ var OpenAIChatLanguageModel = class {
677
682
  });
678
683
  }
679
684
  if (value.usage != null) {
680
- usage = {
681
- promptTokens: (_a = value.usage.prompt_tokens) != null ? _a : void 0,
682
- completionTokens: (_b = value.usage.completion_tokens) != null ? _b : void 0
683
- };
684
685
  const {
685
- completion_tokens_details: completionTokenDetails,
686
- prompt_tokens_details: promptTokenDetails
686
+ prompt_tokens,
687
+ completion_tokens,
688
+ prompt_tokens_details,
689
+ completion_tokens_details
687
690
  } = value.usage;
688
- if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null || (promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
689
- providerMetadata = { openai: {} };
690
- if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
691
- providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
692
- }
693
- if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
694
- providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
695
- }
691
+ usage = {
692
+ promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
693
+ completionTokens: completion_tokens != null ? completion_tokens : void 0
694
+ };
695
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
696
+ providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
697
+ }
698
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
699
+ providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
700
+ }
701
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
702
+ providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
703
+ }
704
+ if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
705
+ providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
696
706
  }
697
707
  }
698
708
  const choice = value.choices[0];
@@ -740,7 +750,7 @@ var OpenAIChatLanguageModel = class {
740
750
  message: `Expected 'id' to be a string.`
741
751
  });
742
752
  }
743
- if (((_c = toolCallDelta.function) == null ? void 0 : _c.name) == null) {
753
+ if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
744
754
  throw new import_provider3.InvalidResponseDataError({
745
755
  data: toolCallDelta,
746
756
  message: `Expected 'function.name' to be a string.`
@@ -751,12 +761,12 @@ var OpenAIChatLanguageModel = class {
751
761
  type: "function",
752
762
  function: {
753
763
  name: toolCallDelta.function.name,
754
- arguments: (_d = toolCallDelta.function.arguments) != null ? _d : ""
764
+ arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
755
765
  },
756
766
  hasFinished: false
757
767
  };
758
768
  const toolCall2 = toolCalls[index];
759
- if (((_e = toolCall2.function) == null ? void 0 : _e.name) != null && ((_f = toolCall2.function) == null ? void 0 : _f.arguments) != null) {
769
+ if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
760
770
  if (toolCall2.function.arguments.length > 0) {
761
771
  controller.enqueue({
762
772
  type: "tool-call-delta",
@@ -770,7 +780,7 @@ var OpenAIChatLanguageModel = class {
770
780
  controller.enqueue({
771
781
  type: "tool-call",
772
782
  toolCallType: "function",
773
- toolCallId: (_g = toolCall2.id) != null ? _g : (0, import_provider_utils3.generateId)(),
783
+ toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils3.generateId)(),
774
784
  toolName: toolCall2.function.name,
775
785
  args: toolCall2.function.arguments
776
786
  });
@@ -783,21 +793,21 @@ var OpenAIChatLanguageModel = class {
783
793
  if (toolCall.hasFinished) {
784
794
  continue;
785
795
  }
786
- if (((_h = toolCallDelta.function) == null ? void 0 : _h.arguments) != null) {
787
- toolCall.function.arguments += (_j = (_i = toolCallDelta.function) == null ? void 0 : _i.arguments) != null ? _j : "";
796
+ if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
797
+ toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
788
798
  }
789
799
  controller.enqueue({
790
800
  type: "tool-call-delta",
791
801
  toolCallType: "function",
792
802
  toolCallId: toolCall.id,
793
803
  toolName: toolCall.function.name,
794
- argsTextDelta: (_k = toolCallDelta.function.arguments) != null ? _k : ""
804
+ argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
795
805
  });
796
- if (((_l = toolCall.function) == null ? void 0 : _l.name) != null && ((_m = toolCall.function) == null ? void 0 : _m.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
806
+ if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
797
807
  controller.enqueue({
798
808
  type: "tool-call",
799
809
  toolCallType: "function",
800
- toolCallId: (_n = toolCall.id) != null ? _n : (0, import_provider_utils3.generateId)(),
810
+ toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils3.generateId)(),
801
811
  toolName: toolCall.function.name,
802
812
  args: toolCall.function.arguments
803
813
  });
@@ -835,7 +845,9 @@ var openaiTokenUsageSchema = import_zod2.z.object({
835
845
  cached_tokens: import_zod2.z.number().nullish()
836
846
  }).nullish(),
837
847
  completion_tokens_details: import_zod2.z.object({
838
- reasoning_tokens: import_zod2.z.number().nullish()
848
+ reasoning_tokens: import_zod2.z.number().nullish(),
849
+ accepted_prediction_tokens: import_zod2.z.number().nullish(),
850
+ rejected_prediction_tokens: import_zod2.z.number().nullish()
839
851
  }).nullish()
840
852
  }).nullish();
841
853
  var openaiChatResponseSchema = import_zod2.z.object({
@@ -1376,12 +1388,20 @@ var openaiTextEmbeddingResponseSchema = import_zod4.z.object({
1376
1388
  // src/openai-image-model.ts
1377
1389
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
1378
1390
  var import_zod5 = require("zod");
1391
+ var modelMaxImagesPerCall = {
1392
+ "dall-e-3": 1,
1393
+ "dall-e-2": 10
1394
+ };
1379
1395
  var OpenAIImageModel = class {
1380
1396
  constructor(modelId, config) {
1381
1397
  this.specificationVersion = "v1";
1382
1398
  this.modelId = modelId;
1383
1399
  this.config = config;
1384
1400
  }
1401
+ get maxImagesPerCall() {
1402
+ var _a;
1403
+ return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
1404
+ }
1385
1405
  get provider() {
1386
1406
  return this.config.provider;
1387
1407
  }
@@ -1389,11 +1409,24 @@ var OpenAIImageModel = class {
1389
1409
  prompt,
1390
1410
  n,
1391
1411
  size,
1412
+ aspectRatio,
1413
+ seed,
1392
1414
  providerOptions,
1393
1415
  headers,
1394
1416
  abortSignal
1395
1417
  }) {
1396
1418
  var _a;
1419
+ const warnings = [];
1420
+ if (aspectRatio != null) {
1421
+ warnings.push({
1422
+ type: "unsupported-setting",
1423
+ setting: "aspectRatio",
1424
+ details: "This model does not support aspect ratio. Use `size` instead."
1425
+ });
1426
+ }
1427
+ if (seed != null) {
1428
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
1429
+ }
1397
1430
  const { value: response } = await (0, import_provider_utils6.postJsonToApi)({
1398
1431
  url: this.config.url({
1399
1432
  path: "/images/generations",
@@ -1416,7 +1449,8 @@ var OpenAIImageModel = class {
1416
1449
  fetch: this.config.fetch
1417
1450
  });
1418
1451
  return {
1419
- images: response.data.map((item) => item.b64_json)
1452
+ images: response.data.map((item) => item.b64_json),
1453
+ warnings
1420
1454
  };
1421
1455
  }
1422
1456
  };