@ai-sdk/openai 1.0.14 → 1.0.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,22 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 1.0.16
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [90fb95a]
8
+ - Updated dependencies [e6dfef4]
9
+ - Updated dependencies [6636db6]
10
+ - @ai-sdk/provider-utils@2.0.7
11
+
12
+ ## 1.0.15
13
+
14
+ ### Patch Changes
15
+
16
+ - f8c6acb: feat (provider/openai): automatically simulate streaming for reasoning models
17
+ - d0041f7: feat (provider/openai): improved system message support for reasoning models
18
+ - 4d2f97b: feat (provider/openai): improve automatic setting removal for reasoning models
19
+
3
20
  ## 1.0.14
4
21
 
5
22
  ### Patch Changes
package/dist/index.js CHANGED
@@ -38,13 +38,32 @@ var import_provider = require("@ai-sdk/provider");
38
38
  var import_provider_utils = require("@ai-sdk/provider-utils");
39
39
  function convertToOpenAIChatMessages({
40
40
  prompt,
41
- useLegacyFunctionCalling = false
41
+ useLegacyFunctionCalling = false,
42
+ systemMessageMode = "system"
42
43
  }) {
43
44
  const messages = [];
44
45
  for (const { role, content } of prompt) {
45
46
  switch (role) {
46
47
  case "system": {
47
- messages.push({ role: "system", content });
48
+ switch (systemMessageMode) {
49
+ case "system": {
50
+ messages.push({ role: "system", content });
51
+ break;
52
+ }
53
+ case "developer": {
54
+ messages.push({ role: "developer", content });
55
+ break;
56
+ }
57
+ case "remove": {
58
+ break;
59
+ }
60
+ default: {
61
+ const _exhaustiveCheck = systemMessageMode;
62
+ throw new Error(
63
+ `Unsupported system message mode: ${_exhaustiveCheck}`
64
+ );
65
+ }
66
+ }
48
67
  break;
49
68
  }
50
69
  case "user": {
@@ -403,6 +422,12 @@ var OpenAIChatLanguageModel = class {
403
422
  functionality: "structuredOutputs with useLegacyFunctionCalling"
404
423
  });
405
424
  }
425
+ if (getSystemMessageMode(this.modelId) === "remove" && prompt.some((message) => message.role === "system")) {
426
+ warnings.push({
427
+ type: "other",
428
+ message: "system messages are removed for this model"
429
+ });
430
+ }
406
431
  const baseArgs = {
407
432
  // model id:
408
433
  model: this.modelId,
@@ -438,14 +463,64 @@ var OpenAIChatLanguageModel = class {
438
463
  // messages:
439
464
  messages: convertToOpenAIChatMessages({
440
465
  prompt,
441
- useLegacyFunctionCalling
466
+ useLegacyFunctionCalling,
467
+ systemMessageMode: getSystemMessageMode(this.modelId)
442
468
  })
443
469
  };
444
470
  if (isReasoningModel(this.modelId)) {
445
- baseArgs.temperature = void 0;
446
- baseArgs.top_p = void 0;
447
- baseArgs.frequency_penalty = void 0;
448
- baseArgs.presence_penalty = void 0;
471
+ if (baseArgs.temperature != null) {
472
+ baseArgs.temperature = void 0;
473
+ warnings.push({
474
+ type: "unsupported-setting",
475
+ setting: "temperature",
476
+ details: "temperature is not supported for reasoning models"
477
+ });
478
+ }
479
+ if (baseArgs.top_p != null) {
480
+ baseArgs.top_p = void 0;
481
+ warnings.push({
482
+ type: "unsupported-setting",
483
+ setting: "topP",
484
+ details: "topP is not supported for reasoning models"
485
+ });
486
+ }
487
+ if (baseArgs.frequency_penalty != null) {
488
+ baseArgs.frequency_penalty = void 0;
489
+ warnings.push({
490
+ type: "unsupported-setting",
491
+ setting: "frequencyPenalty",
492
+ details: "frequencyPenalty is not supported for reasoning models"
493
+ });
494
+ }
495
+ if (baseArgs.presence_penalty != null) {
496
+ baseArgs.presence_penalty = void 0;
497
+ warnings.push({
498
+ type: "unsupported-setting",
499
+ setting: "presencePenalty",
500
+ details: "presencePenalty is not supported for reasoning models"
501
+ });
502
+ }
503
+ if (baseArgs.logit_bias != null) {
504
+ baseArgs.logit_bias = void 0;
505
+ warnings.push({
506
+ type: "other",
507
+ message: "logitBias is not supported for reasoning models"
508
+ });
509
+ }
510
+ if (baseArgs.logprobs != null) {
511
+ baseArgs.logprobs = void 0;
512
+ warnings.push({
513
+ type: "other",
514
+ message: "logprobs is not supported for reasoning models"
515
+ });
516
+ }
517
+ if (baseArgs.top_logprobs != null) {
518
+ baseArgs.top_logprobs = void 0;
519
+ warnings.push({
520
+ type: "other",
521
+ message: "topLogprobs is not supported for reasoning models"
522
+ });
523
+ }
449
524
  }
450
525
  switch (type) {
451
526
  case "regular": {
@@ -590,7 +665,8 @@ var OpenAIChatLanguageModel = class {
590
665
  };
591
666
  }
592
667
  async doStream(options) {
593
- if (this.settings.simulateStreaming) {
668
+ var _a;
669
+ if ((_a = this.settings.simulateStreaming) != null ? _a : isStreamingSimulatedByDefault(this.modelId)) {
594
670
  const result = await this.doGenerate(options);
595
671
  const simulatedStream = new ReadableStream({
596
672
  start(controller) {
@@ -662,7 +738,7 @@ var OpenAIChatLanguageModel = class {
662
738
  stream: response.pipeThrough(
663
739
  new TransformStream({
664
740
  transform(chunk, controller) {
665
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
741
+ var _a2, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
666
742
  if (!chunk.success) {
667
743
  finishReason = "error";
668
744
  controller.enqueue({ type: "error", error: chunk.error });
@@ -750,7 +826,7 @@ var OpenAIChatLanguageModel = class {
750
826
  message: `Expected 'id' to be a string.`
751
827
  });
752
828
  }
753
- if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
829
+ if (((_a2 = toolCallDelta.function) == null ? void 0 : _a2.name) == null) {
754
830
  throw new import_provider3.InvalidResponseDataError({
755
831
  data: toolCallDelta,
756
832
  message: `Expected 'function.name' to be a string.`
@@ -817,13 +893,13 @@ var OpenAIChatLanguageModel = class {
817
893
  }
818
894
  },
819
895
  flush(controller) {
820
- var _a, _b;
896
+ var _a2, _b;
821
897
  controller.enqueue({
822
898
  type: "finish",
823
899
  finishReason,
824
900
  logprobs,
825
901
  usage: {
826
- promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
902
+ promptTokens: (_a2 = usage.promptTokens) != null ? _a2 : NaN,
827
903
  completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
828
904
  },
829
905
  ...providerMetadata != null ? { providerMetadata } : {}
@@ -948,6 +1024,30 @@ function isReasoningModel(modelId) {
948
1024
  function isAudioModel(modelId) {
949
1025
  return modelId.startsWith("gpt-4o-audio-preview");
950
1026
  }
1027
+ function getSystemMessageMode(modelId) {
1028
+ var _a, _b;
1029
+ if (!isReasoningModel(modelId)) {
1030
+ return "system";
1031
+ }
1032
+ return (_b = (_a = reasoningModels[modelId]) == null ? void 0 : _a.systemMessageMode) != null ? _b : "developer";
1033
+ }
1034
+ function isStreamingSimulatedByDefault(modelId) {
1035
+ var _a, _b;
1036
+ if (!isReasoningModel(modelId)) {
1037
+ return false;
1038
+ }
1039
+ return (_b = (_a = reasoningModels[modelId]) == null ? void 0 : _a.simulateStreamingByDefault) != null ? _b : true;
1040
+ }
1041
+ var reasoningModels = {
1042
+ "o1-mini": {
1043
+ systemMessageMode: "remove",
1044
+ simulateStreamingByDefault: false
1045
+ },
1046
+ "o1-preview": {
1047
+ systemMessageMode: "remove",
1048
+ simulateStreamingByDefault: false
1049
+ }
1050
+ };
951
1051
 
952
1052
  // src/openai-completion-language-model.ts
953
1053
  var import_provider5 = require("@ai-sdk/provider");