@ai-sdk/openai 3.0.0-beta.102 → 3.0.0-beta.105

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,26 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 3.0.0-beta.105
4
+
5
+ ### Patch Changes
6
+
7
+ - 88574c1: Change `isReasoningModel` detection from blocklist to allowlist and add override option
8
+
9
+ ## 3.0.0-beta.104
10
+
11
+ ### Patch Changes
12
+
13
+ - 61c52dc: feat (provider/openai): add gpt-image-1.5 model support
14
+
15
+ ## 3.0.0-beta.103
16
+
17
+ ### Patch Changes
18
+
19
+ - 366f50b: chore(provider): add deprecated textEmbeddingModel and textEmbedding aliases
20
+ - Updated dependencies [366f50b]
21
+ - @ai-sdk/provider@3.0.0-beta.27
22
+ - @ai-sdk/provider-utils@4.0.0-beta.53
23
+
3
24
  ## 3.0.0-beta.102
4
25
 
5
26
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -20,6 +20,8 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<
20
20
  promptCacheKey?: string | undefined;
21
21
  promptCacheRetention?: "in_memory" | "24h" | undefined;
22
22
  safetyIdentifier?: string | undefined;
23
+ systemMessageMode?: "remove" | "system" | "developer" | undefined;
24
+ forceReasoning?: boolean | undefined;
23
25
  }>;
24
26
  type OpenAIChatLanguageModelOptions = InferSchema<typeof openaiChatLanguageModelOptions>;
25
27
 
@@ -27,7 +29,7 @@ type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
27
29
 
28
30
  type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
29
31
 
30
- type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | 'gpt-image-1' | 'gpt-image-1-mini' | (string & {});
32
+ type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | 'gpt-image-1' | 'gpt-image-1-mini' | 'gpt-image-1.5' | (string & {});
31
33
 
32
34
  declare const webSearchToolFactory: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSchema<{}, {
33
35
  /**
@@ -464,6 +466,8 @@ declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyS
464
466
  textVerbosity?: "low" | "medium" | "high" | null | undefined;
465
467
  truncation?: "auto" | "disabled" | null | undefined;
466
468
  user?: string | null | undefined;
469
+ systemMessageMode?: "remove" | "system" | "developer" | undefined;
470
+ forceReasoning?: boolean | undefined;
467
471
  }>;
468
472
  type OpenAIResponsesProviderOptions = InferSchema<typeof openaiResponsesProviderOptionsSchema>;
469
473
 
@@ -498,6 +502,14 @@ interface OpenAIProvider extends ProviderV3 {
498
502
  */
499
503
  embeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
500
504
  /**
505
+ * @deprecated Use `embedding` instead.
506
+ */
507
+ textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
508
+ /**
509
+ * @deprecated Use `embeddingModel` instead.
510
+ */
511
+ textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
512
+ /**
501
513
  Creates a model for image generation.
502
514
  */
503
515
  image(modelId: OpenAIImageModelId): ImageModelV3;
package/dist/index.d.ts CHANGED
@@ -20,6 +20,8 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<
20
20
  promptCacheKey?: string | undefined;
21
21
  promptCacheRetention?: "in_memory" | "24h" | undefined;
22
22
  safetyIdentifier?: string | undefined;
23
+ systemMessageMode?: "remove" | "system" | "developer" | undefined;
24
+ forceReasoning?: boolean | undefined;
23
25
  }>;
24
26
  type OpenAIChatLanguageModelOptions = InferSchema<typeof openaiChatLanguageModelOptions>;
25
27
 
@@ -27,7 +29,7 @@ type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
27
29
 
28
30
  type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
29
31
 
30
- type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | 'gpt-image-1' | 'gpt-image-1-mini' | (string & {});
32
+ type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | 'gpt-image-1' | 'gpt-image-1-mini' | 'gpt-image-1.5' | (string & {});
31
33
 
32
34
  declare const webSearchToolFactory: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSchema<{}, {
33
35
  /**
@@ -464,6 +466,8 @@ declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyS
464
466
  textVerbosity?: "low" | "medium" | "high" | null | undefined;
465
467
  truncation?: "auto" | "disabled" | null | undefined;
466
468
  user?: string | null | undefined;
469
+ systemMessageMode?: "remove" | "system" | "developer" | undefined;
470
+ forceReasoning?: boolean | undefined;
467
471
  }>;
468
472
  type OpenAIResponsesProviderOptions = InferSchema<typeof openaiResponsesProviderOptionsSchema>;
469
473
 
@@ -498,6 +502,14 @@ interface OpenAIProvider extends ProviderV3 {
498
502
  */
499
503
  embeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
500
504
  /**
505
+ * @deprecated Use `embedding` instead.
506
+ */
507
+ textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
508
+ /**
509
+ * @deprecated Use `embeddingModel` instead.
510
+ */
511
+ textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
512
+ /**
501
513
  Creates a model for image generation.
502
514
  */
503
515
  image(modelId: OpenAIImageModelId): ImageModelV3;
package/dist/index.js CHANGED
@@ -56,7 +56,7 @@ var openaiFailedResponseHandler = (0, import_provider_utils.createJsonErrorRespo
56
56
  function getOpenAILanguageModelCapabilities(modelId) {
57
57
  const supportsFlexProcessing = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
58
58
  const supportsPriorityProcessing = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
59
- const isReasoningModel = !(modelId.startsWith("gpt-3") || modelId.startsWith("gpt-4") || modelId.startsWith("chatgpt-4o") || modelId.startsWith("gpt-5-chat"));
59
+ const isReasoningModel = modelId.startsWith("o1") || modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("codex-mini") || modelId.startsWith("computer-use-preview") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
60
60
  const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1") || modelId.startsWith("gpt-5.2");
61
61
  const systemMessageMode = isReasoningModel ? "developer" : "system";
62
62
  return {
@@ -556,7 +556,26 @@ var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazySchema)(
556
556
  * username or email address, in order to avoid sending us any identifying
557
557
  * information.
558
558
  */
559
- safetyIdentifier: import_v43.z.string().optional()
559
+ safetyIdentifier: import_v43.z.string().optional(),
560
+ /**
561
+ * Override the system message mode for this model.
562
+ * - 'system': Use the 'system' role for system messages (default for most models)
563
+ * - 'developer': Use the 'developer' role for system messages (used by reasoning models)
564
+ * - 'remove': Remove system messages entirely
565
+ *
566
+ * If not specified, the mode is automatically determined based on the model.
567
+ */
568
+ systemMessageMode: import_v43.z.enum(["system", "developer", "remove"]).optional(),
569
+ /**
570
+ * Force treating this model as a reasoning model.
571
+ *
572
+ * This is useful for "stealth" reasoning models (e.g. via a custom baseURL)
573
+ * where the model ID is not recognized by the SDK's allowlist.
574
+ *
575
+ * When enabled, the SDK applies reasoning-model parameter compatibility rules
576
+ * and defaults `systemMessageMode` to `developer` unless overridden.
577
+ */
578
+ forceReasoning: import_v43.z.boolean().optional()
560
579
  })
561
580
  )
562
581
  );
@@ -651,7 +670,7 @@ var OpenAIChatLanguageModel = class {
651
670
  toolChoice,
652
671
  providerOptions
653
672
  }) {
654
- var _a, _b, _c;
673
+ var _a, _b, _c, _d, _e;
655
674
  const warnings = [];
656
675
  const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
657
676
  provider: "openai",
@@ -659,17 +678,18 @@ var OpenAIChatLanguageModel = class {
659
678
  schema: openaiChatLanguageModelOptions
660
679
  })) != null ? _a : {};
661
680
  const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
681
+ const isReasoningModel = (_b = openaiOptions.forceReasoning) != null ? _b : modelCapabilities.isReasoningModel;
662
682
  if (topK != null) {
663
683
  warnings.push({ type: "unsupported", feature: "topK" });
664
684
  }
665
685
  const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
666
686
  {
667
687
  prompt,
668
- systemMessageMode: modelCapabilities.systemMessageMode
688
+ systemMessageMode: (_c = openaiOptions.systemMessageMode) != null ? _c : isReasoningModel ? "developer" : modelCapabilities.systemMessageMode
669
689
  }
670
690
  );
671
691
  warnings.push(...messageWarnings);
672
- const strictJsonSchema = (_b = openaiOptions.strictJsonSchema) != null ? _b : true;
692
+ const strictJsonSchema = (_d = openaiOptions.strictJsonSchema) != null ? _d : true;
673
693
  const baseArgs = {
674
694
  // model id:
675
695
  model: this.modelId,
@@ -690,7 +710,7 @@ var OpenAIChatLanguageModel = class {
690
710
  json_schema: {
691
711
  schema: responseFormat.schema,
692
712
  strict: strictJsonSchema,
693
- name: (_c = responseFormat.name) != null ? _c : "response",
713
+ name: (_e = responseFormat.name) != null ? _e : "response",
694
714
  description: responseFormat.description
695
715
  }
696
716
  } : { type: "json_object" } : void 0,
@@ -711,7 +731,7 @@ var OpenAIChatLanguageModel = class {
711
731
  // messages:
712
732
  messages
713
733
  };
714
- if (modelCapabilities.isReasoningModel) {
734
+ if (isReasoningModel) {
715
735
  if (openaiOptions.reasoningEffort !== "none" || !modelCapabilities.supportsNonReasoningParameters) {
716
736
  if (baseArgs.temperature != null) {
717
737
  baseArgs.temperature = void 0;
@@ -1706,11 +1726,13 @@ var modelMaxImagesPerCall = {
1706
1726
  "dall-e-3": 1,
1707
1727
  "dall-e-2": 10,
1708
1728
  "gpt-image-1": 10,
1709
- "gpt-image-1-mini": 10
1729
+ "gpt-image-1-mini": 10,
1730
+ "gpt-image-1.5": 10
1710
1731
  };
1711
1732
  var hasDefaultResponseFormat = /* @__PURE__ */ new Set([
1712
1733
  "gpt-image-1",
1713
- "gpt-image-1-mini"
1734
+ "gpt-image-1-mini",
1735
+ "gpt-image-1.5"
1714
1736
  ]);
1715
1737
 
1716
1738
  // src/image/openai-image-model.ts
@@ -1729,6 +1751,8 @@ var OpenAIImageModel = class {
1729
1751
  }
1730
1752
  async doGenerate({
1731
1753
  prompt,
1754
+ files,
1755
+ mask,
1732
1756
  n,
1733
1757
  size,
1734
1758
  aspectRatio,
@@ -1737,7 +1761,7 @@ var OpenAIImageModel = class {
1737
1761
  headers,
1738
1762
  abortSignal
1739
1763
  }) {
1740
- var _a, _b, _c, _d, _e, _f, _g;
1764
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
1741
1765
  const warnings = [];
1742
1766
  if (aspectRatio != null) {
1743
1767
  warnings.push({
@@ -1750,6 +1774,72 @@ var OpenAIImageModel = class {
1750
1774
  warnings.push({ type: "unsupported", feature: "seed" });
1751
1775
  }
1752
1776
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1777
+ if (files != null) {
1778
+ const { value: response2, responseHeaders: responseHeaders2 } = await (0, import_provider_utils13.postFormDataToApi)({
1779
+ url: this.config.url({
1780
+ path: "/images/edits",
1781
+ modelId: this.modelId
1782
+ }),
1783
+ headers: (0, import_provider_utils13.combineHeaders)(this.config.headers(), headers),
1784
+ formData: (0, import_provider_utils13.convertToFormData)({
1785
+ model: this.modelId,
1786
+ prompt,
1787
+ image: await Promise.all(
1788
+ files.map(
1789
+ (file) => file.type === "file" ? new Blob(
1790
+ [
1791
+ file.data instanceof Uint8Array ? new Blob([file.data], {
1792
+ type: file.mediaType
1793
+ }) : new Blob([(0, import_provider_utils13.convertBase64ToUint8Array)(file.data)], {
1794
+ type: file.mediaType
1795
+ })
1796
+ ],
1797
+ { type: file.mediaType }
1798
+ ) : (0, import_provider_utils13.downloadBlob)(file.url)
1799
+ )
1800
+ ),
1801
+ mask: mask != null ? await fileToBlob(mask) : void 0,
1802
+ n,
1803
+ size,
1804
+ ...(_d = providerOptions.openai) != null ? _d : {}
1805
+ }),
1806
+ failedResponseHandler: openaiFailedResponseHandler,
1807
+ successfulResponseHandler: (0, import_provider_utils13.createJsonResponseHandler)(
1808
+ openaiImageResponseSchema
1809
+ ),
1810
+ abortSignal,
1811
+ fetch: this.config.fetch
1812
+ });
1813
+ return {
1814
+ images: response2.data.map((item) => item.b64_json),
1815
+ warnings,
1816
+ usage: response2.usage != null ? {
1817
+ inputTokens: (_e = response2.usage.input_tokens) != null ? _e : void 0,
1818
+ outputTokens: (_f = response2.usage.output_tokens) != null ? _f : void 0,
1819
+ totalTokens: (_g = response2.usage.total_tokens) != null ? _g : void 0
1820
+ } : void 0,
1821
+ response: {
1822
+ timestamp: currentDate,
1823
+ modelId: this.modelId,
1824
+ headers: responseHeaders2
1825
+ },
1826
+ providerMetadata: {
1827
+ openai: {
1828
+ images: response2.data.map((item) => {
1829
+ var _a2, _b2, _c2, _d2, _e2;
1830
+ return {
1831
+ ...item.revised_prompt ? { revisedPrompt: item.revised_prompt } : {},
1832
+ created: (_a2 = response2.created) != null ? _a2 : void 0,
1833
+ size: (_b2 = response2.size) != null ? _b2 : void 0,
1834
+ quality: (_c2 = response2.quality) != null ? _c2 : void 0,
1835
+ background: (_d2 = response2.background) != null ? _d2 : void 0,
1836
+ outputFormat: (_e2 = response2.output_format) != null ? _e2 : void 0
1837
+ };
1838
+ })
1839
+ }
1840
+ }
1841
+ };
1842
+ }
1753
1843
  const { value: response, responseHeaders } = await (0, import_provider_utils13.postJsonToApi)({
1754
1844
  url: this.config.url({
1755
1845
  path: "/images/generations",
@@ -1761,7 +1851,7 @@ var OpenAIImageModel = class {
1761
1851
  prompt,
1762
1852
  n,
1763
1853
  size,
1764
- ...(_d = providerOptions.openai) != null ? _d : {},
1854
+ ...(_h = providerOptions.openai) != null ? _h : {},
1765
1855
  ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1766
1856
  },
1767
1857
  failedResponseHandler: openaiFailedResponseHandler,
@@ -1775,9 +1865,9 @@ var OpenAIImageModel = class {
1775
1865
  images: response.data.map((item) => item.b64_json),
1776
1866
  warnings,
1777
1867
  usage: response.usage != null ? {
1778
- inputTokens: (_e = response.usage.input_tokens) != null ? _e : void 0,
1779
- outputTokens: (_f = response.usage.output_tokens) != null ? _f : void 0,
1780
- totalTokens: (_g = response.usage.total_tokens) != null ? _g : void 0
1868
+ inputTokens: (_i = response.usage.input_tokens) != null ? _i : void 0,
1869
+ outputTokens: (_j = response.usage.output_tokens) != null ? _j : void 0,
1870
+ totalTokens: (_k = response.usage.total_tokens) != null ? _k : void 0
1781
1871
  } : void 0,
1782
1872
  response: {
1783
1873
  timestamp: currentDate,
@@ -1802,6 +1892,14 @@ var OpenAIImageModel = class {
1802
1892
  };
1803
1893
  }
1804
1894
  };
1895
+ async function fileToBlob(file) {
1896
+ if (!file) return void 0;
1897
+ if (file.type === "url") {
1898
+ return (0, import_provider_utils13.downloadBlob)(file.url);
1899
+ }
1900
+ const data = file.data instanceof Uint8Array ? file.data : (0, import_provider_utils13.convertBase64ToUint8Array)(file.data);
1901
+ return new Blob([data], { type: file.mediaType });
1902
+ }
1805
1903
 
1806
1904
  // src/tool/apply-patch.ts
1807
1905
  var import_provider_utils14 = require("@ai-sdk/provider-utils");
@@ -3611,7 +3709,26 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils25.lazySchem
3611
3709
  * Defaults to `undefined`.
3612
3710
  * @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
3613
3711
  */
3614
- user: import_v420.z.string().nullish()
3712
+ user: import_v420.z.string().nullish(),
3713
+ /**
3714
+ * Override the system message mode for this model.
3715
+ * - 'system': Use the 'system' role for system messages (default for most models)
3716
+ * - 'developer': Use the 'developer' role for system messages (used by reasoning models)
3717
+ * - 'remove': Remove system messages entirely
3718
+ *
3719
+ * If not specified, the mode is automatically determined based on the model.
3720
+ */
3721
+ systemMessageMode: import_v420.z.enum(["system", "developer", "remove"]).optional(),
3722
+ /**
3723
+ * Force treating this model as a reasoning model.
3724
+ *
3725
+ * This is useful for "stealth" reasoning models (e.g. via a custom baseURL)
3726
+ * where the model ID is not recognized by the SDK's allowlist.
3727
+ *
3728
+ * When enabled, the SDK applies reasoning-model parameter compatibility rules
3729
+ * and defaults `systemMessageMode` to `developer` unless overridden.
3730
+ */
3731
+ forceReasoning: import_v420.z.boolean().optional()
3615
3732
  })
3616
3733
  )
3617
3734
  );
@@ -3822,7 +3939,7 @@ var OpenAIResponsesLanguageModel = class {
3822
3939
  toolChoice,
3823
3940
  responseFormat
3824
3941
  }) {
3825
- var _a, _b, _c, _d;
3942
+ var _a, _b, _c, _d, _e, _f;
3826
3943
  const warnings = [];
3827
3944
  const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
3828
3945
  if (topK != null) {
@@ -3845,6 +3962,7 @@ var OpenAIResponsesLanguageModel = class {
3845
3962
  providerOptions,
3846
3963
  schema: openaiResponsesProviderOptionsSchema
3847
3964
  });
3965
+ const isReasoningModel = (_a = openaiOptions == null ? void 0 : openaiOptions.forceReasoning) != null ? _a : modelCapabilities.isReasoningModel;
3848
3966
  if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
3849
3967
  warnings.push({
3850
3968
  type: "unsupported",
@@ -3869,15 +3987,15 @@ var OpenAIResponsesLanguageModel = class {
3869
3987
  const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
3870
3988
  prompt,
3871
3989
  toolNameMapping,
3872
- systemMessageMode: modelCapabilities.systemMessageMode,
3990
+ systemMessageMode: (_b = openaiOptions == null ? void 0 : openaiOptions.systemMessageMode) != null ? _b : isReasoningModel ? "developer" : modelCapabilities.systemMessageMode,
3873
3991
  fileIdPrefixes: this.config.fileIdPrefixes,
3874
- store: (_a = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _a : true,
3992
+ store: (_c = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _c : true,
3875
3993
  hasLocalShellTool: hasOpenAITool("openai.local_shell"),
3876
3994
  hasShellTool: hasOpenAITool("openai.shell"),
3877
3995
  hasApplyPatchTool: hasOpenAITool("openai.apply_patch")
3878
3996
  });
3879
3997
  warnings.push(...inputWarnings);
3880
- const strictJsonSchema = (_b = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _b : true;
3998
+ const strictJsonSchema = (_d = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _d : true;
3881
3999
  let include = openaiOptions == null ? void 0 : openaiOptions.include;
3882
4000
  function addInclude(key) {
3883
4001
  if (include == null) {
@@ -3893,9 +4011,9 @@ var OpenAIResponsesLanguageModel = class {
3893
4011
  if (topLogprobs) {
3894
4012
  addInclude("message.output_text.logprobs");
3895
4013
  }
3896
- const webSearchToolName = (_c = tools == null ? void 0 : tools.find(
4014
+ const webSearchToolName = (_e = tools == null ? void 0 : tools.find(
3897
4015
  (tool) => tool.type === "provider" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview")
3898
- )) == null ? void 0 : _c.name;
4016
+ )) == null ? void 0 : _e.name;
3899
4017
  if (webSearchToolName) {
3900
4018
  addInclude("web_search_call.action.sources");
3901
4019
  }
@@ -3903,7 +4021,7 @@ var OpenAIResponsesLanguageModel = class {
3903
4021
  addInclude("code_interpreter_call.outputs");
3904
4022
  }
3905
4023
  const store = openaiOptions == null ? void 0 : openaiOptions.store;
3906
- if (store === false && modelCapabilities.isReasoningModel) {
4024
+ if (store === false && isReasoningModel) {
3907
4025
  addInclude("reasoning.encrypted_content");
3908
4026
  }
3909
4027
  const baseArgs = {
@@ -3918,7 +4036,7 @@ var OpenAIResponsesLanguageModel = class {
3918
4036
  format: responseFormat.schema != null ? {
3919
4037
  type: "json_schema",
3920
4038
  strict: strictJsonSchema,
3921
- name: (_d = responseFormat.name) != null ? _d : "response",
4039
+ name: (_f = responseFormat.name) != null ? _f : "response",
3922
4040
  description: responseFormat.description,
3923
4041
  schema: responseFormat.schema
3924
4042
  } : { type: "json_object" }
@@ -3945,7 +4063,7 @@ var OpenAIResponsesLanguageModel = class {
3945
4063
  top_logprobs: topLogprobs,
3946
4064
  truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
3947
4065
  // model-specific settings:
3948
- ...modelCapabilities.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
4066
+ ...isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
3949
4067
  reasoning: {
3950
4068
  ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
3951
4069
  effort: openaiOptions.reasoningEffort
@@ -3956,7 +4074,7 @@ var OpenAIResponsesLanguageModel = class {
3956
4074
  }
3957
4075
  }
3958
4076
  };
3959
- if (modelCapabilities.isReasoningModel) {
4077
+ if (isReasoningModel) {
3960
4078
  if (!((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) === "none" && modelCapabilities.supportsNonReasoningParameters)) {
3961
4079
  if (baseArgs.temperature != null) {
3962
4080
  baseArgs.temperature = void 0;
@@ -5493,7 +5611,7 @@ var OpenAITranscriptionModel = class {
5493
5611
  };
5494
5612
 
5495
5613
  // src/version.ts
5496
- var VERSION = true ? "3.0.0-beta.102" : "0.0.0-test";
5614
+ var VERSION = true ? "3.0.0-beta.105" : "0.0.0-test";
5497
5615
 
5498
5616
  // src/openai-provider.ts
5499
5617
  function createOpenAI(options = {}) {
@@ -5581,6 +5699,8 @@ function createOpenAI(options = {}) {
5581
5699
  provider.responses = createResponsesModel;
5582
5700
  provider.embedding = createEmbeddingModel;
5583
5701
  provider.embeddingModel = createEmbeddingModel;
5702
+ provider.textEmbedding = createEmbeddingModel;
5703
+ provider.textEmbeddingModel = createEmbeddingModel;
5584
5704
  provider.image = createImageModel;
5585
5705
  provider.imageModel = createImageModel;
5586
5706
  provider.transcription = createTranscriptionModel;