@ai-sdk/openai 1.0.13 → 1.0.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,25 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 1.0.15
4
+
5
+ ### Patch Changes
6
+
7
+ - f8c6acb: feat (provider/openai): automatically simulate streaming for reasoning models
8
+ - d0041f7: feat (provider/openai): improved system message support for reasoning models
9
+ - 4d2f97b: feat (provider/openai): improve automatic setting removal for reasoning models
10
+
11
+ ## 1.0.14
12
+
13
+ ### Patch Changes
14
+
15
+ - 19a2ce7: feat (ai/core): add aspectRatio and seed options to generateImage
16
+ - 6337688: feat: change image generation errors to warnings
17
+ - Updated dependencies [19a2ce7]
18
+ - Updated dependencies [19a2ce7]
19
+ - Updated dependencies [6337688]
20
+ - @ai-sdk/provider@1.0.4
21
+ - @ai-sdk/provider-utils@2.0.6
22
+
3
23
  ## 1.0.13
4
24
 
5
25
  ### Patch Changes
package/dist/index.js CHANGED
@@ -38,13 +38,32 @@ var import_provider = require("@ai-sdk/provider");
38
38
  var import_provider_utils = require("@ai-sdk/provider-utils");
39
39
  function convertToOpenAIChatMessages({
40
40
  prompt,
41
- useLegacyFunctionCalling = false
41
+ useLegacyFunctionCalling = false,
42
+ systemMessageMode = "system"
42
43
  }) {
43
44
  const messages = [];
44
45
  for (const { role, content } of prompt) {
45
46
  switch (role) {
46
47
  case "system": {
47
- messages.push({ role: "system", content });
48
+ switch (systemMessageMode) {
49
+ case "system": {
50
+ messages.push({ role: "system", content });
51
+ break;
52
+ }
53
+ case "developer": {
54
+ messages.push({ role: "developer", content });
55
+ break;
56
+ }
57
+ case "remove": {
58
+ break;
59
+ }
60
+ default: {
61
+ const _exhaustiveCheck = systemMessageMode;
62
+ throw new Error(
63
+ `Unsupported system message mode: ${_exhaustiveCheck}`
64
+ );
65
+ }
66
+ }
48
67
  break;
49
68
  }
50
69
  case "user": {
@@ -403,6 +422,12 @@ var OpenAIChatLanguageModel = class {
403
422
  functionality: "structuredOutputs with useLegacyFunctionCalling"
404
423
  });
405
424
  }
425
+ if (getSystemMessageMode(this.modelId) === "remove" && prompt.some((message) => message.role === "system")) {
426
+ warnings.push({
427
+ type: "other",
428
+ message: "system messages are removed for this model"
429
+ });
430
+ }
406
431
  const baseArgs = {
407
432
  // model id:
408
433
  model: this.modelId,
@@ -438,14 +463,64 @@ var OpenAIChatLanguageModel = class {
438
463
  // messages:
439
464
  messages: convertToOpenAIChatMessages({
440
465
  prompt,
441
- useLegacyFunctionCalling
466
+ useLegacyFunctionCalling,
467
+ systemMessageMode: getSystemMessageMode(this.modelId)
442
468
  })
443
469
  };
444
470
  if (isReasoningModel(this.modelId)) {
445
- baseArgs.temperature = void 0;
446
- baseArgs.top_p = void 0;
447
- baseArgs.frequency_penalty = void 0;
448
- baseArgs.presence_penalty = void 0;
471
+ if (baseArgs.temperature != null) {
472
+ baseArgs.temperature = void 0;
473
+ warnings.push({
474
+ type: "unsupported-setting",
475
+ setting: "temperature",
476
+ details: "temperature is not supported for reasoning models"
477
+ });
478
+ }
479
+ if (baseArgs.top_p != null) {
480
+ baseArgs.top_p = void 0;
481
+ warnings.push({
482
+ type: "unsupported-setting",
483
+ setting: "topP",
484
+ details: "topP is not supported for reasoning models"
485
+ });
486
+ }
487
+ if (baseArgs.frequency_penalty != null) {
488
+ baseArgs.frequency_penalty = void 0;
489
+ warnings.push({
490
+ type: "unsupported-setting",
491
+ setting: "frequencyPenalty",
492
+ details: "frequencyPenalty is not supported for reasoning models"
493
+ });
494
+ }
495
+ if (baseArgs.presence_penalty != null) {
496
+ baseArgs.presence_penalty = void 0;
497
+ warnings.push({
498
+ type: "unsupported-setting",
499
+ setting: "presencePenalty",
500
+ details: "presencePenalty is not supported for reasoning models"
501
+ });
502
+ }
503
+ if (baseArgs.logit_bias != null) {
504
+ baseArgs.logit_bias = void 0;
505
+ warnings.push({
506
+ type: "other",
507
+ message: "logitBias is not supported for reasoning models"
508
+ });
509
+ }
510
+ if (baseArgs.logprobs != null) {
511
+ baseArgs.logprobs = void 0;
512
+ warnings.push({
513
+ type: "other",
514
+ message: "logprobs is not supported for reasoning models"
515
+ });
516
+ }
517
+ if (baseArgs.top_logprobs != null) {
518
+ baseArgs.top_logprobs = void 0;
519
+ warnings.push({
520
+ type: "other",
521
+ message: "topLogprobs is not supported for reasoning models"
522
+ });
523
+ }
449
524
  }
450
525
  switch (type) {
451
526
  case "regular": {
@@ -590,7 +665,8 @@ var OpenAIChatLanguageModel = class {
590
665
  };
591
666
  }
592
667
  async doStream(options) {
593
- if (this.settings.simulateStreaming) {
668
+ var _a;
669
+ if ((_a = this.settings.simulateStreaming) != null ? _a : isStreamingSimulatedByDefault(this.modelId)) {
594
670
  const result = await this.doGenerate(options);
595
671
  const simulatedStream = new ReadableStream({
596
672
  start(controller) {
@@ -662,7 +738,7 @@ var OpenAIChatLanguageModel = class {
662
738
  stream: response.pipeThrough(
663
739
  new TransformStream({
664
740
  transform(chunk, controller) {
665
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
741
+ var _a2, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
666
742
  if (!chunk.success) {
667
743
  finishReason = "error";
668
744
  controller.enqueue({ type: "error", error: chunk.error });
@@ -750,7 +826,7 @@ var OpenAIChatLanguageModel = class {
750
826
  message: `Expected 'id' to be a string.`
751
827
  });
752
828
  }
753
- if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
829
+ if (((_a2 = toolCallDelta.function) == null ? void 0 : _a2.name) == null) {
754
830
  throw new import_provider3.InvalidResponseDataError({
755
831
  data: toolCallDelta,
756
832
  message: `Expected 'function.name' to be a string.`
@@ -817,13 +893,13 @@ var OpenAIChatLanguageModel = class {
817
893
  }
818
894
  },
819
895
  flush(controller) {
820
- var _a, _b;
896
+ var _a2, _b;
821
897
  controller.enqueue({
822
898
  type: "finish",
823
899
  finishReason,
824
900
  logprobs,
825
901
  usage: {
826
- promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
902
+ promptTokens: (_a2 = usage.promptTokens) != null ? _a2 : NaN,
827
903
  completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
828
904
  },
829
905
  ...providerMetadata != null ? { providerMetadata } : {}
@@ -948,6 +1024,30 @@ function isReasoningModel(modelId) {
948
1024
  function isAudioModel(modelId) {
949
1025
  return modelId.startsWith("gpt-4o-audio-preview");
950
1026
  }
1027
+ function getSystemMessageMode(modelId) {
1028
+ var _a, _b;
1029
+ if (!isReasoningModel(modelId)) {
1030
+ return "system";
1031
+ }
1032
+ return (_b = (_a = reasoningModels[modelId]) == null ? void 0 : _a.systemMessageMode) != null ? _b : "developer";
1033
+ }
1034
+ function isStreamingSimulatedByDefault(modelId) {
1035
+ var _a, _b;
1036
+ if (!isReasoningModel(modelId)) {
1037
+ return false;
1038
+ }
1039
+ return (_b = (_a = reasoningModels[modelId]) == null ? void 0 : _a.simulateStreamingByDefault) != null ? _b : true;
1040
+ }
1041
+ var reasoningModels = {
1042
+ "o1-mini": {
1043
+ systemMessageMode: "remove",
1044
+ simulateStreamingByDefault: false
1045
+ },
1046
+ "o1-preview": {
1047
+ systemMessageMode: "remove",
1048
+ simulateStreamingByDefault: false
1049
+ }
1050
+ };
951
1051
 
952
1052
  // src/openai-completion-language-model.ts
953
1053
  var import_provider5 = require("@ai-sdk/provider");
@@ -1388,12 +1488,20 @@ var openaiTextEmbeddingResponseSchema = import_zod4.z.object({
1388
1488
  // src/openai-image-model.ts
1389
1489
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
1390
1490
  var import_zod5 = require("zod");
1491
+ var modelMaxImagesPerCall = {
1492
+ "dall-e-3": 1,
1493
+ "dall-e-2": 10
1494
+ };
1391
1495
  var OpenAIImageModel = class {
1392
1496
  constructor(modelId, config) {
1393
1497
  this.specificationVersion = "v1";
1394
1498
  this.modelId = modelId;
1395
1499
  this.config = config;
1396
1500
  }
1501
+ get maxImagesPerCall() {
1502
+ var _a;
1503
+ return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
1504
+ }
1397
1505
  get provider() {
1398
1506
  return this.config.provider;
1399
1507
  }
@@ -1401,11 +1509,24 @@ var OpenAIImageModel = class {
1401
1509
  prompt,
1402
1510
  n,
1403
1511
  size,
1512
+ aspectRatio,
1513
+ seed,
1404
1514
  providerOptions,
1405
1515
  headers,
1406
1516
  abortSignal
1407
1517
  }) {
1408
1518
  var _a;
1519
+ const warnings = [];
1520
+ if (aspectRatio != null) {
1521
+ warnings.push({
1522
+ type: "unsupported-setting",
1523
+ setting: "aspectRatio",
1524
+ details: "This model does not support aspect ratio. Use `size` instead."
1525
+ });
1526
+ }
1527
+ if (seed != null) {
1528
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
1529
+ }
1409
1530
  const { value: response } = await (0, import_provider_utils6.postJsonToApi)({
1410
1531
  url: this.config.url({
1411
1532
  path: "/images/generations",
@@ -1428,7 +1549,8 @@ var OpenAIImageModel = class {
1428
1549
  fetch: this.config.fetch
1429
1550
  });
1430
1551
  return {
1431
- images: response.data.map((item) => item.b64_json)
1552
+ images: response.data.map((item) => item.b64_json),
1553
+ warnings
1432
1554
  };
1433
1555
  }
1434
1556
  };