@ai-sdk/openai 1.3.9 → 1.3.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -520,6 +520,15 @@ var OpenAIChatLanguageModel = class {
520
520
  }
521
521
  baseArgs.max_tokens = void 0;
522
522
  }
523
+ } else if (this.modelId.startsWith("gpt-4o-search-preview")) {
524
+ if (baseArgs.temperature != null) {
525
+ baseArgs.temperature = void 0;
526
+ warnings.push({
527
+ type: "unsupported-setting",
528
+ setting: "temperature",
529
+ details: "temperature is not supported for the gpt-4o-search-preview model and has been removed."
530
+ });
531
+ }
523
532
  }
524
533
  switch (type) {
525
534
  case "regular": {
@@ -1615,17 +1624,11 @@ import {
1615
1624
  } from "@ai-sdk/provider-utils";
1616
1625
  import { z as z6 } from "zod";
1617
1626
  var OpenAIProviderOptionsSchema = z6.object({
1618
- include: z6.array(z6.string()).optional().describe(
1619
- "Additional information to include in the transcription response."
1620
- ),
1621
- language: z6.string().optional().describe("The language of the input audio in ISO-639-1 format."),
1622
- prompt: z6.string().optional().describe(
1623
- "An optional text to guide the model's style or continue a previous audio segment."
1624
- ),
1625
- temperature: z6.number().min(0).max(1).optional().default(0).describe("The sampling temperature, between 0 and 1."),
1626
- timestampGranularities: z6.array(z6.enum(["word", "segment"])).optional().default(["segment"]).describe(
1627
- "The timestamp granularities to populate for this transcription."
1628
- )
1627
+ include: z6.array(z6.string()).nullish(),
1628
+ language: z6.string().nullish(),
1629
+ prompt: z6.string().nullish(),
1630
+ temperature: z6.number().min(0).max(1).nullish().default(0),
1631
+ timestampGranularities: z6.array(z6.enum(["word", "segment"])).nullish().default(["segment"])
1629
1632
  });
1630
1633
  var languageMap = {
1631
1634
  afrikaans: "af",
@@ -1700,6 +1703,7 @@ var OpenAITranscriptionModel = class {
1700
1703
  mediaType,
1701
1704
  providerOptions
1702
1705
  }) {
1706
+ var _a, _b, _c, _d, _e;
1703
1707
  const warnings = [];
1704
1708
  const openAIOptions = parseProviderOptions({
1705
1709
  provider: "openai",
@@ -1712,16 +1716,16 @@ var OpenAITranscriptionModel = class {
1712
1716
  formData.append("file", new File([blob], "audio", { type: mediaType }));
1713
1717
  if (openAIOptions) {
1714
1718
  const transcriptionModelOptions = {
1715
- include: openAIOptions.include,
1716
- language: openAIOptions.language,
1717
- prompt: openAIOptions.prompt,
1718
- temperature: openAIOptions.temperature,
1719
- timestamp_granularities: openAIOptions.timestampGranularities
1719
+ include: (_a = openAIOptions.include) != null ? _a : void 0,
1720
+ language: (_b = openAIOptions.language) != null ? _b : void 0,
1721
+ prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1722
+ temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1723
+ timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1720
1724
  };
1721
1725
  for (const key in transcriptionModelOptions) {
1722
1726
  const value = transcriptionModelOptions[key];
1723
1727
  if (value !== void 0) {
1724
- formData.append(key, value);
1728
+ formData.append(key, String(value));
1725
1729
  }
1726
1730
  }
1727
1731
  }