@ai-sdk/openai 1.0.13 → 1.0.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -26,13 +26,32 @@ import {
26
26
  import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils";
27
27
  function convertToOpenAIChatMessages({
28
28
  prompt,
29
- useLegacyFunctionCalling = false
29
+ useLegacyFunctionCalling = false,
30
+ systemMessageMode = "system"
30
31
  }) {
31
32
  const messages = [];
32
33
  for (const { role, content } of prompt) {
33
34
  switch (role) {
34
35
  case "system": {
35
- messages.push({ role: "system", content });
36
+ switch (systemMessageMode) {
37
+ case "system": {
38
+ messages.push({ role: "system", content });
39
+ break;
40
+ }
41
+ case "developer": {
42
+ messages.push({ role: "developer", content });
43
+ break;
44
+ }
45
+ case "remove": {
46
+ break;
47
+ }
48
+ default: {
49
+ const _exhaustiveCheck = systemMessageMode;
50
+ throw new Error(
51
+ `Unsupported system message mode: ${_exhaustiveCheck}`
52
+ );
53
+ }
54
+ }
36
55
  break;
37
56
  }
38
57
  case "user": {
@@ -393,6 +412,12 @@ var OpenAIChatLanguageModel = class {
393
412
  functionality: "structuredOutputs with useLegacyFunctionCalling"
394
413
  });
395
414
  }
415
+ if (getSystemMessageMode(this.modelId) === "remove" && prompt.some((message) => message.role === "system")) {
416
+ warnings.push({
417
+ type: "other",
418
+ message: "system messages are removed for this model"
419
+ });
420
+ }
396
421
  const baseArgs = {
397
422
  // model id:
398
423
  model: this.modelId,
@@ -428,14 +453,64 @@ var OpenAIChatLanguageModel = class {
428
453
  // messages:
429
454
  messages: convertToOpenAIChatMessages({
430
455
  prompt,
431
- useLegacyFunctionCalling
456
+ useLegacyFunctionCalling,
457
+ systemMessageMode: getSystemMessageMode(this.modelId)
432
458
  })
433
459
  };
434
460
  if (isReasoningModel(this.modelId)) {
435
- baseArgs.temperature = void 0;
436
- baseArgs.top_p = void 0;
437
- baseArgs.frequency_penalty = void 0;
438
- baseArgs.presence_penalty = void 0;
461
+ if (baseArgs.temperature != null) {
462
+ baseArgs.temperature = void 0;
463
+ warnings.push({
464
+ type: "unsupported-setting",
465
+ setting: "temperature",
466
+ details: "temperature is not supported for reasoning models"
467
+ });
468
+ }
469
+ if (baseArgs.top_p != null) {
470
+ baseArgs.top_p = void 0;
471
+ warnings.push({
472
+ type: "unsupported-setting",
473
+ setting: "topP",
474
+ details: "topP is not supported for reasoning models"
475
+ });
476
+ }
477
+ if (baseArgs.frequency_penalty != null) {
478
+ baseArgs.frequency_penalty = void 0;
479
+ warnings.push({
480
+ type: "unsupported-setting",
481
+ setting: "frequencyPenalty",
482
+ details: "frequencyPenalty is not supported for reasoning models"
483
+ });
484
+ }
485
+ if (baseArgs.presence_penalty != null) {
486
+ baseArgs.presence_penalty = void 0;
487
+ warnings.push({
488
+ type: "unsupported-setting",
489
+ setting: "presencePenalty",
490
+ details: "presencePenalty is not supported for reasoning models"
491
+ });
492
+ }
493
+ if (baseArgs.logit_bias != null) {
494
+ baseArgs.logit_bias = void 0;
495
+ warnings.push({
496
+ type: "other",
497
+ message: "logitBias is not supported for reasoning models"
498
+ });
499
+ }
500
+ if (baseArgs.logprobs != null) {
501
+ baseArgs.logprobs = void 0;
502
+ warnings.push({
503
+ type: "other",
504
+ message: "logprobs is not supported for reasoning models"
505
+ });
506
+ }
507
+ if (baseArgs.top_logprobs != null) {
508
+ baseArgs.top_logprobs = void 0;
509
+ warnings.push({
510
+ type: "other",
511
+ message: "topLogprobs is not supported for reasoning models"
512
+ });
513
+ }
439
514
  }
440
515
  switch (type) {
441
516
  case "regular": {
@@ -580,7 +655,8 @@ var OpenAIChatLanguageModel = class {
580
655
  };
581
656
  }
582
657
  async doStream(options) {
583
- if (this.settings.simulateStreaming) {
658
+ var _a;
659
+ if ((_a = this.settings.simulateStreaming) != null ? _a : isStreamingSimulatedByDefault(this.modelId)) {
584
660
  const result = await this.doGenerate(options);
585
661
  const simulatedStream = new ReadableStream({
586
662
  start(controller) {
@@ -652,7 +728,7 @@ var OpenAIChatLanguageModel = class {
652
728
  stream: response.pipeThrough(
653
729
  new TransformStream({
654
730
  transform(chunk, controller) {
655
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
731
+ var _a2, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
656
732
  if (!chunk.success) {
657
733
  finishReason = "error";
658
734
  controller.enqueue({ type: "error", error: chunk.error });
@@ -740,7 +816,7 @@ var OpenAIChatLanguageModel = class {
740
816
  message: `Expected 'id' to be a string.`
741
817
  });
742
818
  }
743
- if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
819
+ if (((_a2 = toolCallDelta.function) == null ? void 0 : _a2.name) == null) {
744
820
  throw new InvalidResponseDataError({
745
821
  data: toolCallDelta,
746
822
  message: `Expected 'function.name' to be a string.`
@@ -807,13 +883,13 @@ var OpenAIChatLanguageModel = class {
807
883
  }
808
884
  },
809
885
  flush(controller) {
810
- var _a, _b;
886
+ var _a2, _b;
811
887
  controller.enqueue({
812
888
  type: "finish",
813
889
  finishReason,
814
890
  logprobs,
815
891
  usage: {
816
- promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
892
+ promptTokens: (_a2 = usage.promptTokens) != null ? _a2 : NaN,
817
893
  completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
818
894
  },
819
895
  ...providerMetadata != null ? { providerMetadata } : {}
@@ -938,6 +1014,30 @@ function isReasoningModel(modelId) {
938
1014
  function isAudioModel(modelId) {
939
1015
  return modelId.startsWith("gpt-4o-audio-preview");
940
1016
  }
1017
+ function getSystemMessageMode(modelId) {
1018
+ var _a, _b;
1019
+ if (!isReasoningModel(modelId)) {
1020
+ return "system";
1021
+ }
1022
+ return (_b = (_a = reasoningModels[modelId]) == null ? void 0 : _a.systemMessageMode) != null ? _b : "developer";
1023
+ }
1024
+ function isStreamingSimulatedByDefault(modelId) {
1025
+ var _a, _b;
1026
+ if (!isReasoningModel(modelId)) {
1027
+ return false;
1028
+ }
1029
+ return (_b = (_a = reasoningModels[modelId]) == null ? void 0 : _a.simulateStreamingByDefault) != null ? _b : true;
1030
+ }
1031
+ var reasoningModels = {
1032
+ "o1-mini": {
1033
+ systemMessageMode: "remove",
1034
+ simulateStreamingByDefault: false
1035
+ },
1036
+ "o1-preview": {
1037
+ systemMessageMode: "remove",
1038
+ simulateStreamingByDefault: false
1039
+ }
1040
+ };
941
1041
 
942
1042
  // src/openai-completion-language-model.ts
943
1043
  import {
@@ -1398,12 +1498,20 @@ import {
1398
1498
  postJsonToApi as postJsonToApi4
1399
1499
  } from "@ai-sdk/provider-utils";
1400
1500
  import { z as z5 } from "zod";
1501
+ var modelMaxImagesPerCall = {
1502
+ "dall-e-3": 1,
1503
+ "dall-e-2": 10
1504
+ };
1401
1505
  var OpenAIImageModel = class {
1402
1506
  constructor(modelId, config) {
1403
1507
  this.specificationVersion = "v1";
1404
1508
  this.modelId = modelId;
1405
1509
  this.config = config;
1406
1510
  }
1511
+ get maxImagesPerCall() {
1512
+ var _a;
1513
+ return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
1514
+ }
1407
1515
  get provider() {
1408
1516
  return this.config.provider;
1409
1517
  }
@@ -1411,11 +1519,24 @@ var OpenAIImageModel = class {
1411
1519
  prompt,
1412
1520
  n,
1413
1521
  size,
1522
+ aspectRatio,
1523
+ seed,
1414
1524
  providerOptions,
1415
1525
  headers,
1416
1526
  abortSignal
1417
1527
  }) {
1418
1528
  var _a;
1529
+ const warnings = [];
1530
+ if (aspectRatio != null) {
1531
+ warnings.push({
1532
+ type: "unsupported-setting",
1533
+ setting: "aspectRatio",
1534
+ details: "This model does not support aspect ratio. Use `size` instead."
1535
+ });
1536
+ }
1537
+ if (seed != null) {
1538
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
1539
+ }
1419
1540
  const { value: response } = await postJsonToApi4({
1420
1541
  url: this.config.url({
1421
1542
  path: "/images/generations",
@@ -1438,7 +1559,8 @@ var OpenAIImageModel = class {
1438
1559
  fetch: this.config.fetch
1439
1560
  });
1440
1561
  return {
1441
- images: response.data.map((item) => item.b64_json)
1562
+ images: response.data.map((item) => item.b64_json),
1563
+ warnings
1442
1564
  };
1443
1565
  }
1444
1566
  };