@ai-sdk/openai 2.0.0-canary.17 → 2.0.0-canary.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,47 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 2.0.0-canary.18
4
+
5
+ ### Major Changes
6
+
7
+ - 516be5b: ### Move Image Model Settings into generate options
8
+
9
+ Image Models no longer have settings. Instead, `maxImagesPerCall` can be passed directly to `generateImage()`. All other image settings can be passed to `providerOptions[provider]`.
10
+
11
+ Before
12
+
13
+ ```js
14
+ await generateImage({
15
+ model: luma.image('photon-flash-1', {
16
+ maxImagesPerCall: 5,
17
+ pollIntervalMillis: 500,
18
+ }),
19
+ prompt,
20
+ n: 10,
21
+ });
22
+ ```
23
+
24
+ After
25
+
26
+ ```js
27
+ await generateImage({
28
+ model: luma.image('photon-flash-1'),
29
+ prompt,
30
+ n: 10,
31
+ maxImagesPerCall: 5,
32
+ providerOptions: {
33
+ luma: { pollIntervalMillis: 5 },
34
+ },
35
+ });
36
+ ```
37
+
38
+ Pull Request: https://github.com/vercel/ai/pull/6180
39
+
40
+ ### Patch Changes
41
+
42
+ - Updated dependencies [ea7a7c9]
43
+ - @ai-sdk/provider-utils@3.0.0-canary.17
44
+
3
45
  ## 2.0.0-canary.17
4
46
 
5
47
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -31,13 +31,6 @@ declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
31
31
  type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
32
32
 
33
33
  type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
34
- interface OpenAIImageSettings {
35
- /**
36
- Override the maximum number of images per call (default is dependent on the
37
- model, or 1 for an unknown model).
38
- */
39
- maxImagesPerCall?: number;
40
- }
41
34
 
42
35
  declare const WebSearchPreviewParameters: z.ZodObject<{}, "strip", z.ZodTypeAny, {}, {}>;
43
36
  declare function webSearchPreviewTool({ searchContextSize, userLocation, }?: {
@@ -101,12 +94,13 @@ interface OpenAIProvider extends ProviderV2 {
101
94
  textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
102
95
  /**
103
96
  Creates a model for image generation.
97
+ @deprecated Use `imageModel` instead.
104
98
  */
105
- image(modelId: OpenAIImageModelId, settings?: OpenAIImageSettings): ImageModelV2;
99
+ image(modelId: OpenAIImageModelId): ImageModelV2;
106
100
  /**
107
101
  Creates a model for image generation.
108
102
  */
109
- imageModel(modelId: OpenAIImageModelId, settings?: OpenAIImageSettings): ImageModelV2;
103
+ imageModel(modelId: OpenAIImageModelId): ImageModelV2;
110
104
  /**
111
105
  Creates a model for transcription.
112
106
  */
package/dist/index.d.ts CHANGED
@@ -31,13 +31,6 @@ declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
31
31
  type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
32
32
 
33
33
  type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
34
- interface OpenAIImageSettings {
35
- /**
36
- Override the maximum number of images per call (default is dependent on the
37
- model, or 1 for an unknown model).
38
- */
39
- maxImagesPerCall?: number;
40
- }
41
34
 
42
35
  declare const WebSearchPreviewParameters: z.ZodObject<{}, "strip", z.ZodTypeAny, {}, {}>;
43
36
  declare function webSearchPreviewTool({ searchContextSize, userLocation, }?: {
@@ -101,12 +94,13 @@ interface OpenAIProvider extends ProviderV2 {
101
94
  textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
102
95
  /**
103
96
  Creates a model for image generation.
97
+ @deprecated Use `imageModel` instead.
104
98
  */
105
- image(modelId: OpenAIImageModelId, settings?: OpenAIImageSettings): ImageModelV2;
99
+ image(modelId: OpenAIImageModelId): ImageModelV2;
106
100
  /**
107
101
  Creates a model for image generation.
108
102
  */
109
- imageModel(modelId: OpenAIImageModelId, settings?: OpenAIImageSettings): ImageModelV2;
103
+ imageModel(modelId: OpenAIImageModelId): ImageModelV2;
110
104
  /**
111
105
  Creates a model for transcription.
112
106
  */
package/dist/index.js CHANGED
@@ -1426,15 +1426,14 @@ var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
1426
1426
 
1427
1427
  // src/openai-image-model.ts
1428
1428
  var OpenAIImageModel = class {
1429
- constructor(modelId, settings, config) {
1429
+ constructor(modelId, config) {
1430
1430
  this.modelId = modelId;
1431
- this.settings = settings;
1432
1431
  this.config = config;
1433
1432
  this.specificationVersion = "v2";
1434
1433
  }
1435
1434
  get maxImagesPerCall() {
1436
- var _a, _b;
1437
- return (_b = (_a = this.settings.maxImagesPerCall) != null ? _a : modelMaxImagesPerCall[this.modelId]) != null ? _b : 1;
1435
+ var _a;
1436
+ return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
1438
1437
  }
1439
1438
  get provider() {
1440
1439
  return this.config.provider;
@@ -2619,7 +2618,7 @@ function createOpenAI(options = {}) {
2619
2618
  headers: getHeaders,
2620
2619
  fetch: options.fetch
2621
2620
  });
2622
- const createImageModel = (modelId, settings = {}) => new OpenAIImageModel(modelId, settings, {
2621
+ const createImageModel = (modelId) => new OpenAIImageModel(modelId, {
2623
2622
  provider: `${providerName}.image`,
2624
2623
  url: ({ path }) => `${baseURL}${path}`,
2625
2624
  headers: getHeaders,