@ai-sdk/openai-compatible 3.0.0-beta.2 → 3.0.0-beta.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,150 @@
1
1
  # @ai-sdk/openai-compatible
2
2
 
3
+ ## 3.0.0-beta.21
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [176466a]
8
+ - @ai-sdk/provider@4.0.0-beta.10
9
+ - @ai-sdk/provider-utils@5.0.0-beta.16
10
+
11
+ ## 3.0.0-beta.20
12
+
13
+ ### Patch Changes
14
+
15
+ - Updated dependencies [e311194]
16
+ - @ai-sdk/provider@4.0.0-beta.9
17
+ - @ai-sdk/provider-utils@5.0.0-beta.15
18
+
19
+ ## 3.0.0-beta.19
20
+
21
+ ### Patch Changes
22
+
23
+ - 008271d: feat(openai-compatible): emit warning when using kebab-case instead of camelCase
24
+ - Updated dependencies [34bd95d]
25
+ - Updated dependencies [008271d]
26
+ - @ai-sdk/provider@4.0.0-beta.8
27
+ - @ai-sdk/provider-utils@5.0.0-beta.14
28
+
29
+ ## 3.0.0-beta.18
30
+
31
+ ### Patch Changes
32
+
33
+ - Updated dependencies [b0c2869]
34
+ - Updated dependencies [7e26e81]
35
+ - @ai-sdk/provider-utils@5.0.0-beta.13
36
+
37
+ ## 3.0.0-beta.17
38
+
39
+ ### Patch Changes
40
+
41
+ - 816ff67: fix(openai-compatible): honor camelCase providerOptions key in chat and completion models
42
+
43
+ ## 3.0.0-beta.16
44
+
45
+ ### Patch Changes
46
+
47
+ - Updated dependencies [46d1149]
48
+ - @ai-sdk/provider-utils@5.0.0-beta.12
49
+
50
+ ## 3.0.0-beta.15
51
+
52
+ ### Patch Changes
53
+
54
+ - 6fd51c0: fix(provider): preserve error type prefix in getErrorMessage
55
+ - Updated dependencies [6fd51c0]
56
+ - @ai-sdk/provider-utils@5.0.0-beta.11
57
+ - @ai-sdk/provider@4.0.0-beta.7
58
+
59
+ ## 3.0.0-beta.14
60
+
61
+ ### Patch Changes
62
+
63
+ - c29a26f: feat(provider): add support for provider references and uploading files as supported per provider
64
+ - Updated dependencies [c29a26f]
65
+ - @ai-sdk/provider-utils@5.0.0-beta.10
66
+ - @ai-sdk/provider@4.0.0-beta.6
67
+
68
+ ## 3.0.0-beta.13
69
+
70
+ ### Patch Changes
71
+
72
+ - 38fc777: Add AI Gateway hint to provider READMEs
73
+
74
+ ## 3.0.0-beta.12
75
+
76
+ ### Patch Changes
77
+
78
+ - Updated dependencies [2e17091]
79
+ - @ai-sdk/provider-utils@5.0.0-beta.9
80
+
81
+ ## 3.0.0-beta.11
82
+
83
+ ### Patch Changes
84
+
85
+ - Updated dependencies [986c6fd]
86
+ - Updated dependencies [493295c]
87
+ - @ai-sdk/provider-utils@5.0.0-beta.8
88
+
89
+ ## 3.0.0-beta.10
90
+
91
+ ### Patch Changes
92
+
93
+ - Updated dependencies [1f509d4]
94
+ - @ai-sdk/provider-utils@5.0.0-beta.7
95
+ - @ai-sdk/provider@4.0.0-beta.5
96
+
97
+ ## 3.0.0-beta.9
98
+
99
+ ### Patch Changes
100
+
101
+ - 74d520f: feat: migrate providers to support new top-level `reasoning` parameter
102
+
103
+ ## 3.0.0-beta.8
104
+
105
+ ### Patch Changes
106
+
107
+ - Updated dependencies [3887c70]
108
+ - @ai-sdk/provider-utils@5.0.0-beta.6
109
+ - @ai-sdk/provider@4.0.0-beta.4
110
+
111
+ ## 3.0.0-beta.7
112
+
113
+ ### Patch Changes
114
+
115
+ - Updated dependencies [776b617]
116
+ - @ai-sdk/provider-utils@5.0.0-beta.5
117
+ - @ai-sdk/provider@4.0.0-beta.3
118
+
119
+ ## 3.0.0-beta.6
120
+
121
+ ### Patch Changes
122
+
123
+ - Updated dependencies [61753c3]
124
+ - @ai-sdk/provider-utils@5.0.0-beta.4
125
+
126
+ ## 3.0.0-beta.5
127
+
128
+ ### Patch Changes
129
+
130
+ - Updated dependencies [f7d4f01]
131
+ - @ai-sdk/provider-utils@5.0.0-beta.3
132
+ - @ai-sdk/provider@4.0.0-beta.2
133
+
134
+ ## 3.0.0-beta.4
135
+
136
+ ### Patch Changes
137
+
138
+ - Updated dependencies [5c2a5a2]
139
+ - @ai-sdk/provider@4.0.0-beta.1
140
+ - @ai-sdk/provider-utils@5.0.0-beta.2
141
+
142
+ ## 3.0.0-beta.3
143
+
144
+ ### Patch Changes
145
+
146
+ - 8f3e1da: chore(openai-compat): update v3 specs to v4
147
+
3
148
  ## 3.0.0-beta.2
4
149
 
5
150
  ### Patch Changes
@@ -287,13 +432,13 @@
287
432
  Before
288
433
 
289
434
  ```ts
290
- model.textEmbeddingModel('my-model-id');
435
+ model.textEmbeddingModel("my-model-id");
291
436
  ```
292
437
 
293
438
  After
294
439
 
295
440
  ```ts
296
- model.embeddingModel('my-model-id');
441
+ model.embeddingModel("my-model-id");
297
442
  ```
298
443
 
299
444
  - 2625a04: feat(openai); update spec for mcp approval
@@ -508,13 +653,13 @@
508
653
  Before
509
654
 
510
655
  ```ts
511
- model.textEmbeddingModel('my-model-id');
656
+ model.textEmbeddingModel("my-model-id");
512
657
  ```
513
658
 
514
659
  After
515
660
 
516
661
  ```ts
517
- model.embeddingModel('my-model-id');
662
+ model.embeddingModel("my-model-id");
518
663
  ```
519
664
 
520
665
  - Updated dependencies [8d9e8ad]
@@ -950,7 +1095,7 @@
950
1095
 
951
1096
  ```js
952
1097
  await generateImage({
953
- model: luma.image('photon-flash-1', {
1098
+ model: luma.image("photon-flash-1", {
954
1099
  maxImagesPerCall: 5,
955
1100
  pollIntervalMillis: 500,
956
1101
  }),
@@ -963,7 +1108,7 @@
963
1108
 
964
1109
  ```js
965
1110
  await generateImage({
966
- model: luma.image('photon-flash-1'),
1111
+ model: luma.image("photon-flash-1"),
967
1112
  prompt,
968
1113
  n: 10,
969
1114
  maxImagesPerCall: 5,
@@ -1232,7 +1377,7 @@
1232
1377
 
1233
1378
  ```js
1234
1379
  await generateImage({
1235
- model: luma.image('photon-flash-1', {
1380
+ model: luma.image("photon-flash-1", {
1236
1381
  maxImagesPerCall: 5,
1237
1382
  pollIntervalMillis: 500,
1238
1383
  }),
@@ -1245,7 +1390,7 @@
1245
1390
 
1246
1391
  ```js
1247
1392
  await generateImage({
1248
- model: luma.image('photon-flash-1'),
1393
+ model: luma.image("photon-flash-1"),
1249
1394
  prompt,
1250
1395
  n: 10,
1251
1396
  maxImagesPerCall: 5,
package/README.md CHANGED
@@ -4,6 +4,8 @@ This package provides a foundation for implementing providers that expose an Ope
4
4
 
5
5
  The primary [OpenAI provider](../openai/README.md) is more feature-rich, including OpenAI-specific experimental and legacy features. This package offers a lighter-weight alternative focused on core OpenAI-compatible functionality.
6
6
 
7
+ > **Deploying to Vercel?** With Vercel's AI Gateway you can access hundreds of models from any provider — no additional packages, API keys, or extra cost. [Get started with AI Gateway](https://vercel.com/ai-gateway).
8
+
7
9
  ## Setup
8
10
 
9
11
  The provider is available in the `@ai-sdk/openai-compatible` module. You can install it with
package/dist/index.d.mts CHANGED
@@ -1,4 +1,4 @@
1
- import { SharedV3ProviderMetadata, LanguageModelV3, LanguageModelV3CallOptions, LanguageModelV3GenerateResult, LanguageModelV3StreamResult, EmbeddingModelV3, ImageModelV3, ProviderV3 } from '@ai-sdk/provider';
1
+ import { SharedV4ProviderMetadata, LanguageModelV4, LanguageModelV4CallOptions, LanguageModelV4GenerateResult, LanguageModelV4StreamResult, EmbeddingModelV4, ImageModelV4, ProviderV4 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
3
  import { ZodType, z } from 'zod/v4';
4
4
 
@@ -42,7 +42,7 @@ type MetadataExtractor = {
42
42
  */
43
43
  extractMetadata: ({ parsedBody, }: {
44
44
  parsedBody: unknown;
45
- }) => Promise<SharedV3ProviderMetadata | undefined>;
45
+ }) => Promise<SharedV4ProviderMetadata | undefined>;
46
46
  /**
47
47
  * Creates an extractor for handling streaming responses. The returned object provides
48
48
  * methods to process individual chunks and build the final metadata from the accumulated
@@ -65,7 +65,7 @@ type MetadataExtractor = {
65
65
  * @returns Provider-specific metadata or undefined if no metadata is available.
66
66
  * The metadata should be under a key indicating the provider id.
67
67
  */
68
- buildMetadata(): SharedV3ProviderMetadata | undefined;
68
+ buildMetadata(): SharedV4ProviderMetadata | undefined;
69
69
  };
70
70
  };
71
71
 
@@ -87,7 +87,7 @@ type OpenAICompatibleChatConfig = {
87
87
  /**
88
88
  * The supported URLs for the model.
89
89
  */
90
- supportedUrls?: () => LanguageModelV3['supportedUrls'];
90
+ supportedUrls?: () => LanguageModelV4['supportedUrls'];
91
91
  /**
92
92
  * Optional function to transform the request body before sending it to the API.
93
93
  * This is useful for proxy providers that may require a different request format
@@ -95,8 +95,8 @@ type OpenAICompatibleChatConfig = {
95
95
  */
96
96
  transformRequestBody?: (args: Record<string, any>) => Record<string, any>;
97
97
  };
98
- declare class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
99
- readonly specificationVersion = "v3";
98
+ declare class OpenAICompatibleChatLanguageModel implements LanguageModelV4 {
99
+ readonly specificationVersion = "v4";
100
100
  readonly supportsStructuredOutputs: boolean;
101
101
  readonly modelId: OpenAICompatibleChatModelId;
102
102
  private readonly config;
@@ -108,8 +108,8 @@ declare class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
108
108
  get supportedUrls(): Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>>;
109
109
  private transformRequestBody;
110
110
  private getArgs;
111
- doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
112
- doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
111
+ doGenerate(options: LanguageModelV4CallOptions): Promise<LanguageModelV4GenerateResult>;
112
+ doStream(options: LanguageModelV4CallOptions): Promise<LanguageModelV4StreamResult>;
113
113
  }
114
114
 
115
115
  type OpenAICompatibleCompletionModelId = string;
@@ -134,10 +134,10 @@ type OpenAICompatibleCompletionConfig = {
134
134
  /**
135
135
  * The supported URLs for the model.
136
136
  */
137
- supportedUrls?: () => LanguageModelV3['supportedUrls'];
137
+ supportedUrls?: () => LanguageModelV4['supportedUrls'];
138
138
  };
139
- declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV3 {
140
- readonly specificationVersion = "v3";
139
+ declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV4 {
140
+ readonly specificationVersion = "v4";
141
141
  readonly modelId: OpenAICompatibleCompletionModelId;
142
142
  private readonly config;
143
143
  private readonly failedResponseHandler;
@@ -147,8 +147,8 @@ declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV3
147
147
  private get providerOptionsName();
148
148
  get supportedUrls(): Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>>;
149
149
  private getArgs;
150
- doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
151
- doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
150
+ doGenerate(options: LanguageModelV4CallOptions): Promise<LanguageModelV4GenerateResult>;
151
+ doStream(options: LanguageModelV4CallOptions): Promise<LanguageModelV4StreamResult>;
152
152
  }
153
153
 
154
154
  type OpenAICompatibleEmbeddingModelId = string;
@@ -176,8 +176,8 @@ type OpenAICompatibleEmbeddingConfig = {
176
176
  fetch?: FetchFunction;
177
177
  errorStructure?: ProviderErrorStructure<any>;
178
178
  };
179
- declare class OpenAICompatibleEmbeddingModel implements EmbeddingModelV3 {
180
- readonly specificationVersion = "v3";
179
+ declare class OpenAICompatibleEmbeddingModel implements EmbeddingModelV4 {
180
+ readonly specificationVersion = "v4";
181
181
  readonly modelId: OpenAICompatibleEmbeddingModelId;
182
182
  private readonly config;
183
183
  get provider(): string;
@@ -185,7 +185,7 @@ declare class OpenAICompatibleEmbeddingModel implements EmbeddingModelV3 {
185
185
  get supportsParallelCalls(): boolean;
186
186
  constructor(modelId: OpenAICompatibleEmbeddingModelId, config: OpenAICompatibleEmbeddingConfig);
187
187
  private get providerOptionsName();
188
- doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV3['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV3['doEmbed']>>>;
188
+ doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV4['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV4['doEmbed']>>>;
189
189
  }
190
190
 
191
191
  type OpenAICompatibleImageModelId = string;
@@ -203,10 +203,10 @@ type OpenAICompatibleImageModelConfig = {
203
203
  currentDate?: () => Date;
204
204
  };
205
205
  };
206
- declare class OpenAICompatibleImageModel implements ImageModelV3 {
206
+ declare class OpenAICompatibleImageModel implements ImageModelV4 {
207
207
  readonly modelId: OpenAICompatibleImageModelId;
208
208
  private readonly config;
209
- readonly specificationVersion = "v3";
209
+ readonly specificationVersion = "v4";
210
210
  readonly maxImagesPerCall = 10;
211
211
  get provider(): string;
212
212
  /**
@@ -215,20 +215,20 @@ declare class OpenAICompatibleImageModel implements ImageModelV3 {
215
215
  private get providerOptionsKey();
216
216
  constructor(modelId: OpenAICompatibleImageModelId, config: OpenAICompatibleImageModelConfig);
217
217
  private getArgs;
218
- doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, files, mask, }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV3['doGenerate']>>>;
218
+ doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, files, mask, }: Parameters<ImageModelV4['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV4['doGenerate']>>>;
219
219
  }
220
220
 
221
- interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string> extends Omit<ProviderV3, 'imageModel'> {
222
- (modelId: CHAT_MODEL_IDS): LanguageModelV3;
223
- languageModel(modelId: CHAT_MODEL_IDS, config?: Partial<OpenAICompatibleChatConfig>): LanguageModelV3;
224
- chatModel(modelId: CHAT_MODEL_IDS): LanguageModelV3;
225
- completionModel(modelId: COMPLETION_MODEL_IDS): LanguageModelV3;
226
- embeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV3;
221
+ interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string> extends Omit<ProviderV4, 'imageModel'> {
222
+ (modelId: CHAT_MODEL_IDS): LanguageModelV4;
223
+ languageModel(modelId: CHAT_MODEL_IDS, config?: Partial<OpenAICompatibleChatConfig>): LanguageModelV4;
224
+ chatModel(modelId: CHAT_MODEL_IDS): LanguageModelV4;
225
+ completionModel(modelId: COMPLETION_MODEL_IDS): LanguageModelV4;
226
+ embeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV4;
227
227
  /**
228
228
  * @deprecated Use `embeddingModel` instead.
229
229
  */
230
- textEmbeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV3;
231
- imageModel(modelId: IMAGE_MODEL_IDS): ImageModelV3;
230
+ textEmbeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV4;
231
+ imageModel(modelId: IMAGE_MODEL_IDS): ImageModelV4;
232
232
  }
233
233
  interface OpenAICompatibleProviderSettings {
234
234
  /**
package/dist/index.d.ts CHANGED
@@ -1,4 +1,4 @@
1
- import { SharedV3ProviderMetadata, LanguageModelV3, LanguageModelV3CallOptions, LanguageModelV3GenerateResult, LanguageModelV3StreamResult, EmbeddingModelV3, ImageModelV3, ProviderV3 } from '@ai-sdk/provider';
1
+ import { SharedV4ProviderMetadata, LanguageModelV4, LanguageModelV4CallOptions, LanguageModelV4GenerateResult, LanguageModelV4StreamResult, EmbeddingModelV4, ImageModelV4, ProviderV4 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
3
  import { ZodType, z } from 'zod/v4';
4
4
 
@@ -42,7 +42,7 @@ type MetadataExtractor = {
42
42
  */
43
43
  extractMetadata: ({ parsedBody, }: {
44
44
  parsedBody: unknown;
45
- }) => Promise<SharedV3ProviderMetadata | undefined>;
45
+ }) => Promise<SharedV4ProviderMetadata | undefined>;
46
46
  /**
47
47
  * Creates an extractor for handling streaming responses. The returned object provides
48
48
  * methods to process individual chunks and build the final metadata from the accumulated
@@ -65,7 +65,7 @@ type MetadataExtractor = {
65
65
  * @returns Provider-specific metadata or undefined if no metadata is available.
66
66
  * The metadata should be under a key indicating the provider id.
67
67
  */
68
- buildMetadata(): SharedV3ProviderMetadata | undefined;
68
+ buildMetadata(): SharedV4ProviderMetadata | undefined;
69
69
  };
70
70
  };
71
71
 
@@ -87,7 +87,7 @@ type OpenAICompatibleChatConfig = {
87
87
  /**
88
88
  * The supported URLs for the model.
89
89
  */
90
- supportedUrls?: () => LanguageModelV3['supportedUrls'];
90
+ supportedUrls?: () => LanguageModelV4['supportedUrls'];
91
91
  /**
92
92
  * Optional function to transform the request body before sending it to the API.
93
93
  * This is useful for proxy providers that may require a different request format
@@ -95,8 +95,8 @@ type OpenAICompatibleChatConfig = {
95
95
  */
96
96
  transformRequestBody?: (args: Record<string, any>) => Record<string, any>;
97
97
  };
98
- declare class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
99
- readonly specificationVersion = "v3";
98
+ declare class OpenAICompatibleChatLanguageModel implements LanguageModelV4 {
99
+ readonly specificationVersion = "v4";
100
100
  readonly supportsStructuredOutputs: boolean;
101
101
  readonly modelId: OpenAICompatibleChatModelId;
102
102
  private readonly config;
@@ -108,8 +108,8 @@ declare class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
108
108
  get supportedUrls(): Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>>;
109
109
  private transformRequestBody;
110
110
  private getArgs;
111
- doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
112
- doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
111
+ doGenerate(options: LanguageModelV4CallOptions): Promise<LanguageModelV4GenerateResult>;
112
+ doStream(options: LanguageModelV4CallOptions): Promise<LanguageModelV4StreamResult>;
113
113
  }
114
114
 
115
115
  type OpenAICompatibleCompletionModelId = string;
@@ -134,10 +134,10 @@ type OpenAICompatibleCompletionConfig = {
134
134
  /**
135
135
  * The supported URLs for the model.
136
136
  */
137
- supportedUrls?: () => LanguageModelV3['supportedUrls'];
137
+ supportedUrls?: () => LanguageModelV4['supportedUrls'];
138
138
  };
139
- declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV3 {
140
- readonly specificationVersion = "v3";
139
+ declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV4 {
140
+ readonly specificationVersion = "v4";
141
141
  readonly modelId: OpenAICompatibleCompletionModelId;
142
142
  private readonly config;
143
143
  private readonly failedResponseHandler;
@@ -147,8 +147,8 @@ declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV3
147
147
  private get providerOptionsName();
148
148
  get supportedUrls(): Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>>;
149
149
  private getArgs;
150
- doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
151
- doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
150
+ doGenerate(options: LanguageModelV4CallOptions): Promise<LanguageModelV4GenerateResult>;
151
+ doStream(options: LanguageModelV4CallOptions): Promise<LanguageModelV4StreamResult>;
152
152
  }
153
153
 
154
154
  type OpenAICompatibleEmbeddingModelId = string;
@@ -176,8 +176,8 @@ type OpenAICompatibleEmbeddingConfig = {
176
176
  fetch?: FetchFunction;
177
177
  errorStructure?: ProviderErrorStructure<any>;
178
178
  };
179
- declare class OpenAICompatibleEmbeddingModel implements EmbeddingModelV3 {
180
- readonly specificationVersion = "v3";
179
+ declare class OpenAICompatibleEmbeddingModel implements EmbeddingModelV4 {
180
+ readonly specificationVersion = "v4";
181
181
  readonly modelId: OpenAICompatibleEmbeddingModelId;
182
182
  private readonly config;
183
183
  get provider(): string;
@@ -185,7 +185,7 @@ declare class OpenAICompatibleEmbeddingModel implements EmbeddingModelV3 {
185
185
  get supportsParallelCalls(): boolean;
186
186
  constructor(modelId: OpenAICompatibleEmbeddingModelId, config: OpenAICompatibleEmbeddingConfig);
187
187
  private get providerOptionsName();
188
- doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV3['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV3['doEmbed']>>>;
188
+ doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV4['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV4['doEmbed']>>>;
189
189
  }
190
190
 
191
191
  type OpenAICompatibleImageModelId = string;
@@ -203,10 +203,10 @@ type OpenAICompatibleImageModelConfig = {
203
203
  currentDate?: () => Date;
204
204
  };
205
205
  };
206
- declare class OpenAICompatibleImageModel implements ImageModelV3 {
206
+ declare class OpenAICompatibleImageModel implements ImageModelV4 {
207
207
  readonly modelId: OpenAICompatibleImageModelId;
208
208
  private readonly config;
209
- readonly specificationVersion = "v3";
209
+ readonly specificationVersion = "v4";
210
210
  readonly maxImagesPerCall = 10;
211
211
  get provider(): string;
212
212
  /**
@@ -215,20 +215,20 @@ declare class OpenAICompatibleImageModel implements ImageModelV3 {
215
215
  private get providerOptionsKey();
216
216
  constructor(modelId: OpenAICompatibleImageModelId, config: OpenAICompatibleImageModelConfig);
217
217
  private getArgs;
218
- doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, files, mask, }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV3['doGenerate']>>>;
218
+ doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, files, mask, }: Parameters<ImageModelV4['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV4['doGenerate']>>>;
219
219
  }
220
220
 
221
- interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string> extends Omit<ProviderV3, 'imageModel'> {
222
- (modelId: CHAT_MODEL_IDS): LanguageModelV3;
223
- languageModel(modelId: CHAT_MODEL_IDS, config?: Partial<OpenAICompatibleChatConfig>): LanguageModelV3;
224
- chatModel(modelId: CHAT_MODEL_IDS): LanguageModelV3;
225
- completionModel(modelId: COMPLETION_MODEL_IDS): LanguageModelV3;
226
- embeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV3;
221
+ interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string> extends Omit<ProviderV4, 'imageModel'> {
222
+ (modelId: CHAT_MODEL_IDS): LanguageModelV4;
223
+ languageModel(modelId: CHAT_MODEL_IDS, config?: Partial<OpenAICompatibleChatConfig>): LanguageModelV4;
224
+ chatModel(modelId: CHAT_MODEL_IDS): LanguageModelV4;
225
+ completionModel(modelId: COMPLETION_MODEL_IDS): LanguageModelV4;
226
+ embeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV4;
227
227
  /**
228
228
  * @deprecated Use `embeddingModel` instead.
229
229
  */
230
- textEmbeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV3;
231
- imageModel(modelId: IMAGE_MODEL_IDS): ImageModelV3;
230
+ textEmbeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV4;
231
+ imageModel(modelId: IMAGE_MODEL_IDS): ImageModelV4;
232
232
  }
233
233
  interface OpenAICompatibleProviderSettings {
234
234
  /**