@ai-sdk/openai 1.1.14 → 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,17 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 1.2.0
4
+
5
+ ### Minor Changes
6
+
7
+ - ede6d1b: feat (provider/azure): Add Azure image model support
8
+
9
+ ## 1.1.15
10
+
11
+ ### Patch Changes
12
+
13
+ - d8216f8: feat (provider/openai): add gpt-4.5-preview to model id set
14
+
3
15
  ## 1.1.14
4
16
 
5
17
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { LanguageModelV1, ProviderV1, EmbeddingModelV1, ImageModelV1 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
3
 
4
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | (string & {});
4
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | (string & {});
5
5
  interface OpenAIChatSettings {
6
6
  /**
7
7
  Modify the likelihood of specified tokens appearing in the completion.
package/dist/index.d.ts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { LanguageModelV1, ProviderV1, EmbeddingModelV1, ImageModelV1 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
3
 
4
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | (string & {});
4
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | (string & {});
5
5
  interface OpenAIChatSettings {
6
6
  /**
7
7
  Modify the likelihood of specified tokens appearing in the completion.
@@ -1,7 +1,7 @@
1
- import { LanguageModelV1, EmbeddingModelV1 } from '@ai-sdk/provider';
1
+ import { LanguageModelV1, EmbeddingModelV1, ImageModelV1 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
3
 
4
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | (string & {});
4
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | (string & {});
5
5
  interface OpenAIChatSettings {
6
6
  /**
7
7
  Modify the likelihood of specified tokens appearing in the completion.
@@ -214,4 +214,30 @@ declare class OpenAIEmbeddingModel implements EmbeddingModelV1<string> {
214
214
  doEmbed({ values, headers, abortSignal, }: Parameters<EmbeddingModelV1<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV1<string>['doEmbed']>>>;
215
215
  }
216
216
 
217
- export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingSettings };
217
+ type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
218
+ declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
219
+ interface OpenAIImageSettings {
220
+ /**
221
+ Override the maximum number of images per call (default is dependent on the
222
+ model, or 1 for an unknown model).
223
+ */
224
+ maxImagesPerCall?: number;
225
+ }
226
+
227
+ interface OpenAIImageModelConfig extends OpenAIConfig {
228
+ _internal?: {
229
+ currentDate?: () => Date;
230
+ };
231
+ }
232
+ declare class OpenAIImageModel implements ImageModelV1 {
233
+ readonly modelId: OpenAIImageModelId;
234
+ private readonly settings;
235
+ private readonly config;
236
+ readonly specificationVersion = "v1";
237
+ get maxImagesPerCall(): number;
238
+ get provider(): string;
239
+ constructor(modelId: OpenAIImageModelId, settings: OpenAIImageSettings, config: OpenAIImageModelConfig);
240
+ doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV1['doGenerate']>>>;
241
+ }
242
+
243
+ export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, modelMaxImagesPerCall };
@@ -1,7 +1,7 @@
1
- import { LanguageModelV1, EmbeddingModelV1 } from '@ai-sdk/provider';
1
+ import { LanguageModelV1, EmbeddingModelV1, ImageModelV1 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
3
 
4
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | (string & {});
4
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | (string & {});
5
5
  interface OpenAIChatSettings {
6
6
  /**
7
7
  Modify the likelihood of specified tokens appearing in the completion.
@@ -214,4 +214,30 @@ declare class OpenAIEmbeddingModel implements EmbeddingModelV1<string> {
214
214
  doEmbed({ values, headers, abortSignal, }: Parameters<EmbeddingModelV1<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV1<string>['doEmbed']>>>;
215
215
  }
216
216
 
217
- export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingSettings };
217
+ type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
218
+ declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
219
+ interface OpenAIImageSettings {
220
+ /**
221
+ Override the maximum number of images per call (default is dependent on the
222
+ model, or 1 for an unknown model).
223
+ */
224
+ maxImagesPerCall?: number;
225
+ }
226
+
227
+ interface OpenAIImageModelConfig extends OpenAIConfig {
228
+ _internal?: {
229
+ currentDate?: () => Date;
230
+ };
231
+ }
232
+ declare class OpenAIImageModel implements ImageModelV1 {
233
+ readonly modelId: OpenAIImageModelId;
234
+ private readonly settings;
235
+ private readonly config;
236
+ readonly specificationVersion = "v1";
237
+ get maxImagesPerCall(): number;
238
+ get provider(): string;
239
+ constructor(modelId: OpenAIImageModelId, settings: OpenAIImageSettings, config: OpenAIImageModelConfig);
240
+ doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV1['doGenerate']>>>;
241
+ }
242
+
243
+ export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, modelMaxImagesPerCall };
@@ -22,7 +22,9 @@ var internal_exports = {};
22
22
  __export(internal_exports, {
23
23
  OpenAIChatLanguageModel: () => OpenAIChatLanguageModel,
24
24
  OpenAICompletionLanguageModel: () => OpenAICompletionLanguageModel,
25
- OpenAIEmbeddingModel: () => OpenAIEmbeddingModel
25
+ OpenAIEmbeddingModel: () => OpenAIEmbeddingModel,
26
+ OpenAIImageModel: () => OpenAIImageModel,
27
+ modelMaxImagesPerCall: () => modelMaxImagesPerCall
26
28
  });
27
29
  module.exports = __toCommonJS(internal_exports);
28
30
 
@@ -1502,10 +1504,96 @@ var openaiTextEmbeddingResponseSchema = import_zod4.z.object({
1502
1504
  data: import_zod4.z.array(import_zod4.z.object({ embedding: import_zod4.z.array(import_zod4.z.number()) })),
1503
1505
  usage: import_zod4.z.object({ prompt_tokens: import_zod4.z.number() }).nullish()
1504
1506
  });
1507
+
1508
+ // src/openai-image-model.ts
1509
+ var import_provider_utils6 = require("@ai-sdk/provider-utils");
1510
+ var import_zod5 = require("zod");
1511
+
1512
+ // src/openai-image-settings.ts
1513
+ var modelMaxImagesPerCall = {
1514
+ "dall-e-3": 1,
1515
+ "dall-e-2": 10
1516
+ };
1517
+
1518
+ // src/openai-image-model.ts
1519
+ var OpenAIImageModel = class {
1520
+ constructor(modelId, settings, config) {
1521
+ this.modelId = modelId;
1522
+ this.settings = settings;
1523
+ this.config = config;
1524
+ this.specificationVersion = "v1";
1525
+ }
1526
+ get maxImagesPerCall() {
1527
+ var _a, _b;
1528
+ return (_b = (_a = this.settings.maxImagesPerCall) != null ? _a : modelMaxImagesPerCall[this.modelId]) != null ? _b : 1;
1529
+ }
1530
+ get provider() {
1531
+ return this.config.provider;
1532
+ }
1533
+ async doGenerate({
1534
+ prompt,
1535
+ n,
1536
+ size,
1537
+ aspectRatio,
1538
+ seed,
1539
+ providerOptions,
1540
+ headers,
1541
+ abortSignal
1542
+ }) {
1543
+ var _a, _b, _c, _d;
1544
+ const warnings = [];
1545
+ if (aspectRatio != null) {
1546
+ warnings.push({
1547
+ type: "unsupported-setting",
1548
+ setting: "aspectRatio",
1549
+ details: "This model does not support aspect ratio. Use `size` instead."
1550
+ });
1551
+ }
1552
+ if (seed != null) {
1553
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
1554
+ }
1555
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1556
+ const { value: response, responseHeaders } = await (0, import_provider_utils6.postJsonToApi)({
1557
+ url: this.config.url({
1558
+ path: "/images/generations",
1559
+ modelId: this.modelId
1560
+ }),
1561
+ headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), headers),
1562
+ body: {
1563
+ model: this.modelId,
1564
+ prompt,
1565
+ n,
1566
+ size,
1567
+ ...(_d = providerOptions.openai) != null ? _d : {},
1568
+ response_format: "b64_json"
1569
+ },
1570
+ failedResponseHandler: openaiFailedResponseHandler,
1571
+ successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
1572
+ openaiImageResponseSchema
1573
+ ),
1574
+ abortSignal,
1575
+ fetch: this.config.fetch
1576
+ });
1577
+ return {
1578
+ images: response.data.map((item) => item.b64_json),
1579
+ warnings,
1580
+ response: {
1581
+ timestamp: currentDate,
1582
+ modelId: this.modelId,
1583
+ headers: responseHeaders
1584
+ }
1585
+ };
1586
+ }
1587
+ };
1588
+ var openaiImageResponseSchema = import_zod5.z.object({
1589
+ data: import_zod5.z.array(import_zod5.z.object({ b64_json: import_zod5.z.string() }))
1590
+ });
1505
1591
  // Annotate the CommonJS export names for ESM import in node:
1506
1592
  0 && (module.exports = {
1507
1593
  OpenAIChatLanguageModel,
1508
1594
  OpenAICompletionLanguageModel,
1509
- OpenAIEmbeddingModel
1595
+ OpenAIEmbeddingModel,
1596
+ OpenAIImageModel,
1597
+ modelMaxImagesPerCall
1510
1598
  });
1511
1599
  //# sourceMappingURL=index.js.map