@ai-sdk/openai 1.1.15 → 1.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,4 @@
1
- import { LanguageModelV1, EmbeddingModelV1 } from '@ai-sdk/provider';
1
+ import { LanguageModelV1, EmbeddingModelV1, ImageModelV1 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
3
 
4
4
  type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | (string & {});
@@ -214,4 +214,30 @@ declare class OpenAIEmbeddingModel implements EmbeddingModelV1<string> {
214
214
  doEmbed({ values, headers, abortSignal, }: Parameters<EmbeddingModelV1<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV1<string>['doEmbed']>>>;
215
215
  }
216
216
 
217
- export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingSettings };
217
+ type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
218
+ declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
219
+ interface OpenAIImageSettings {
220
+ /**
221
+ Override the maximum number of images per call (default is dependent on the
222
+ model, or 1 for an unknown model).
223
+ */
224
+ maxImagesPerCall?: number;
225
+ }
226
+
227
+ interface OpenAIImageModelConfig extends OpenAIConfig {
228
+ _internal?: {
229
+ currentDate?: () => Date;
230
+ };
231
+ }
232
+ declare class OpenAIImageModel implements ImageModelV1 {
233
+ readonly modelId: OpenAIImageModelId;
234
+ private readonly settings;
235
+ private readonly config;
236
+ readonly specificationVersion = "v1";
237
+ get maxImagesPerCall(): number;
238
+ get provider(): string;
239
+ constructor(modelId: OpenAIImageModelId, settings: OpenAIImageSettings, config: OpenAIImageModelConfig);
240
+ doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV1['doGenerate']>>>;
241
+ }
242
+
243
+ export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, modelMaxImagesPerCall };
@@ -22,7 +22,9 @@ var internal_exports = {};
22
22
  __export(internal_exports, {
23
23
  OpenAIChatLanguageModel: () => OpenAIChatLanguageModel,
24
24
  OpenAICompletionLanguageModel: () => OpenAICompletionLanguageModel,
25
- OpenAIEmbeddingModel: () => OpenAIEmbeddingModel
25
+ OpenAIEmbeddingModel: () => OpenAIEmbeddingModel,
26
+ OpenAIImageModel: () => OpenAIImageModel,
27
+ modelMaxImagesPerCall: () => modelMaxImagesPerCall
26
28
  });
27
29
  module.exports = __toCommonJS(internal_exports);
28
30
 
@@ -610,7 +612,11 @@ var OpenAIChatLanguageModel = class {
610
612
  async doGenerate(options) {
611
613
  var _a, _b, _c, _d, _e, _f, _g, _h;
612
614
  const { args: body, warnings } = this.getArgs(options);
613
- const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
615
+ const {
616
+ responseHeaders,
617
+ value: response,
618
+ rawValue: rawResponse
619
+ } = await (0, import_provider_utils3.postJsonToApi)({
614
620
  url: this.config.url({
615
621
  path: "/chat/completions",
616
622
  modelId: this.modelId
@@ -665,7 +671,7 @@ var OpenAIChatLanguageModel = class {
665
671
  completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
666
672
  },
667
673
  rawCall: { rawPrompt, rawSettings },
668
- rawResponse: { headers: responseHeaders },
674
+ rawResponse: { headers: responseHeaders, body: rawResponse },
669
675
  request: { body: JSON.stringify(body) },
670
676
  response: getResponseMetadata(response),
671
677
  warnings,
@@ -1267,7 +1273,11 @@ var OpenAICompletionLanguageModel = class {
1267
1273
  }
1268
1274
  async doGenerate(options) {
1269
1275
  const { args, warnings } = this.getArgs(options);
1270
- const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
1276
+ const {
1277
+ responseHeaders,
1278
+ value: response,
1279
+ rawValue: rawResponse
1280
+ } = await (0, import_provider_utils4.postJsonToApi)({
1271
1281
  url: this.config.url({
1272
1282
  path: "/completions",
1273
1283
  modelId: this.modelId
@@ -1292,7 +1302,7 @@ var OpenAICompletionLanguageModel = class {
1292
1302
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1293
1303
  logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1294
1304
  rawCall: { rawPrompt, rawSettings },
1295
- rawResponse: { headers: responseHeaders },
1305
+ rawResponse: { headers: responseHeaders, body: rawResponse },
1296
1306
  response: getResponseMetadata(response),
1297
1307
  warnings,
1298
1308
  request: { body: JSON.stringify(args) }
@@ -1502,10 +1512,96 @@ var openaiTextEmbeddingResponseSchema = import_zod4.z.object({
1502
1512
  data: import_zod4.z.array(import_zod4.z.object({ embedding: import_zod4.z.array(import_zod4.z.number()) })),
1503
1513
  usage: import_zod4.z.object({ prompt_tokens: import_zod4.z.number() }).nullish()
1504
1514
  });
1515
+
1516
+ // src/openai-image-model.ts
1517
+ var import_provider_utils6 = require("@ai-sdk/provider-utils");
1518
+ var import_zod5 = require("zod");
1519
+
1520
+ // src/openai-image-settings.ts
1521
+ var modelMaxImagesPerCall = {
1522
+ "dall-e-3": 1,
1523
+ "dall-e-2": 10
1524
+ };
1525
+
1526
+ // src/openai-image-model.ts
1527
+ var OpenAIImageModel = class {
1528
+ constructor(modelId, settings, config) {
1529
+ this.modelId = modelId;
1530
+ this.settings = settings;
1531
+ this.config = config;
1532
+ this.specificationVersion = "v1";
1533
+ }
1534
+ get maxImagesPerCall() {
1535
+ var _a, _b;
1536
+ return (_b = (_a = this.settings.maxImagesPerCall) != null ? _a : modelMaxImagesPerCall[this.modelId]) != null ? _b : 1;
1537
+ }
1538
+ get provider() {
1539
+ return this.config.provider;
1540
+ }
1541
+ async doGenerate({
1542
+ prompt,
1543
+ n,
1544
+ size,
1545
+ aspectRatio,
1546
+ seed,
1547
+ providerOptions,
1548
+ headers,
1549
+ abortSignal
1550
+ }) {
1551
+ var _a, _b, _c, _d;
1552
+ const warnings = [];
1553
+ if (aspectRatio != null) {
1554
+ warnings.push({
1555
+ type: "unsupported-setting",
1556
+ setting: "aspectRatio",
1557
+ details: "This model does not support aspect ratio. Use `size` instead."
1558
+ });
1559
+ }
1560
+ if (seed != null) {
1561
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
1562
+ }
1563
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1564
+ const { value: response, responseHeaders } = await (0, import_provider_utils6.postJsonToApi)({
1565
+ url: this.config.url({
1566
+ path: "/images/generations",
1567
+ modelId: this.modelId
1568
+ }),
1569
+ headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), headers),
1570
+ body: {
1571
+ model: this.modelId,
1572
+ prompt,
1573
+ n,
1574
+ size,
1575
+ ...(_d = providerOptions.openai) != null ? _d : {},
1576
+ response_format: "b64_json"
1577
+ },
1578
+ failedResponseHandler: openaiFailedResponseHandler,
1579
+ successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
1580
+ openaiImageResponseSchema
1581
+ ),
1582
+ abortSignal,
1583
+ fetch: this.config.fetch
1584
+ });
1585
+ return {
1586
+ images: response.data.map((item) => item.b64_json),
1587
+ warnings,
1588
+ response: {
1589
+ timestamp: currentDate,
1590
+ modelId: this.modelId,
1591
+ headers: responseHeaders
1592
+ }
1593
+ };
1594
+ }
1595
+ };
1596
+ var openaiImageResponseSchema = import_zod5.z.object({
1597
+ data: import_zod5.z.array(import_zod5.z.object({ b64_json: import_zod5.z.string() }))
1598
+ });
1505
1599
  // Annotate the CommonJS export names for ESM import in node:
1506
1600
  0 && (module.exports = {
1507
1601
  OpenAIChatLanguageModel,
1508
1602
  OpenAICompletionLanguageModel,
1509
- OpenAIEmbeddingModel
1603
+ OpenAIEmbeddingModel,
1604
+ OpenAIImageModel,
1605
+ modelMaxImagesPerCall
1510
1606
  });
1511
1607
  //# sourceMappingURL=index.js.map