@ai-sdk/openai-compatible 3.0.0-beta.2 → 3.0.0-beta.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,142 @@
1
1
  # @ai-sdk/openai-compatible
2
2
 
3
+ ## 3.0.0-beta.20
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [e311194]
8
+ - @ai-sdk/provider@4.0.0-beta.9
9
+ - @ai-sdk/provider-utils@5.0.0-beta.15
10
+
11
+ ## 3.0.0-beta.19
12
+
13
+ ### Patch Changes
14
+
15
+ - 008271d: feat(openai-compatible): emit warning when using kebab-case instead of camelCase
16
+ - Updated dependencies [34bd95d]
17
+ - Updated dependencies [008271d]
18
+ - @ai-sdk/provider@4.0.0-beta.8
19
+ - @ai-sdk/provider-utils@5.0.0-beta.14
20
+
21
+ ## 3.0.0-beta.18
22
+
23
+ ### Patch Changes
24
+
25
+ - Updated dependencies [b0c2869]
26
+ - Updated dependencies [7e26e81]
27
+ - @ai-sdk/provider-utils@5.0.0-beta.13
28
+
29
+ ## 3.0.0-beta.17
30
+
31
+ ### Patch Changes
32
+
33
+ - 816ff67: fix(openai-compatible): honor camelCase providerOptions key in chat and completion models
34
+
35
+ ## 3.0.0-beta.16
36
+
37
+ ### Patch Changes
38
+
39
+ - Updated dependencies [46d1149]
40
+ - @ai-sdk/provider-utils@5.0.0-beta.12
41
+
42
+ ## 3.0.0-beta.15
43
+
44
+ ### Patch Changes
45
+
46
+ - 6fd51c0: fix(provider): preserve error type prefix in getErrorMessage
47
+ - Updated dependencies [6fd51c0]
48
+ - @ai-sdk/provider-utils@5.0.0-beta.11
49
+ - @ai-sdk/provider@4.0.0-beta.7
50
+
51
+ ## 3.0.0-beta.14
52
+
53
+ ### Patch Changes
54
+
55
+ - c29a26f: feat(provider): add support for provider references and uploading files as supported per provider
56
+ - Updated dependencies [c29a26f]
57
+ - @ai-sdk/provider-utils@5.0.0-beta.10
58
+ - @ai-sdk/provider@4.0.0-beta.6
59
+
60
+ ## 3.0.0-beta.13
61
+
62
+ ### Patch Changes
63
+
64
+ - 38fc777: Add AI Gateway hint to provider READMEs
65
+
66
+ ## 3.0.0-beta.12
67
+
68
+ ### Patch Changes
69
+
70
+ - Updated dependencies [2e17091]
71
+ - @ai-sdk/provider-utils@5.0.0-beta.9
72
+
73
+ ## 3.0.0-beta.11
74
+
75
+ ### Patch Changes
76
+
77
+ - Updated dependencies [986c6fd]
78
+ - Updated dependencies [493295c]
79
+ - @ai-sdk/provider-utils@5.0.0-beta.8
80
+
81
+ ## 3.0.0-beta.10
82
+
83
+ ### Patch Changes
84
+
85
+ - Updated dependencies [1f509d4]
86
+ - @ai-sdk/provider-utils@5.0.0-beta.7
87
+ - @ai-sdk/provider@4.0.0-beta.5
88
+
89
+ ## 3.0.0-beta.9
90
+
91
+ ### Patch Changes
92
+
93
+ - 74d520f: feat: migrate providers to support new top-level `reasoning` parameter
94
+
95
+ ## 3.0.0-beta.8
96
+
97
+ ### Patch Changes
98
+
99
+ - Updated dependencies [3887c70]
100
+ - @ai-sdk/provider-utils@5.0.0-beta.6
101
+ - @ai-sdk/provider@4.0.0-beta.4
102
+
103
+ ## 3.0.0-beta.7
104
+
105
+ ### Patch Changes
106
+
107
+ - Updated dependencies [776b617]
108
+ - @ai-sdk/provider-utils@5.0.0-beta.5
109
+ - @ai-sdk/provider@4.0.0-beta.3
110
+
111
+ ## 3.0.0-beta.6
112
+
113
+ ### Patch Changes
114
+
115
+ - Updated dependencies [61753c3]
116
+ - @ai-sdk/provider-utils@5.0.0-beta.4
117
+
118
+ ## 3.0.0-beta.5
119
+
120
+ ### Patch Changes
121
+
122
+ - Updated dependencies [f7d4f01]
123
+ - @ai-sdk/provider-utils@5.0.0-beta.3
124
+ - @ai-sdk/provider@4.0.0-beta.2
125
+
126
+ ## 3.0.0-beta.4
127
+
128
+ ### Patch Changes
129
+
130
+ - Updated dependencies [5c2a5a2]
131
+ - @ai-sdk/provider@4.0.0-beta.1
132
+ - @ai-sdk/provider-utils@5.0.0-beta.2
133
+
134
+ ## 3.0.0-beta.3
135
+
136
+ ### Patch Changes
137
+
138
+ - 8f3e1da: chore(openai-compat): update v3 specs to v4
139
+
3
140
  ## 3.0.0-beta.2
4
141
 
5
142
  ### Patch Changes
@@ -287,13 +424,13 @@
287
424
  Before
288
425
 
289
426
  ```ts
290
- model.textEmbeddingModel('my-model-id');
427
+ model.textEmbeddingModel("my-model-id");
291
428
  ```
292
429
 
293
430
  After
294
431
 
295
432
  ```ts
296
- model.embeddingModel('my-model-id');
433
+ model.embeddingModel("my-model-id");
297
434
  ```
298
435
 
299
436
  - 2625a04: feat(openai); update spec for mcp approval
@@ -508,13 +645,13 @@
508
645
  Before
509
646
 
510
647
  ```ts
511
- model.textEmbeddingModel('my-model-id');
648
+ model.textEmbeddingModel("my-model-id");
512
649
  ```
513
650
 
514
651
  After
515
652
 
516
653
  ```ts
517
- model.embeddingModel('my-model-id');
654
+ model.embeddingModel("my-model-id");
518
655
  ```
519
656
 
520
657
  - Updated dependencies [8d9e8ad]
@@ -950,7 +1087,7 @@
950
1087
 
951
1088
  ```js
952
1089
  await generateImage({
953
- model: luma.image('photon-flash-1', {
1090
+ model: luma.image("photon-flash-1", {
954
1091
  maxImagesPerCall: 5,
955
1092
  pollIntervalMillis: 500,
956
1093
  }),
@@ -963,7 +1100,7 @@
963
1100
 
964
1101
  ```js
965
1102
  await generateImage({
966
- model: luma.image('photon-flash-1'),
1103
+ model: luma.image("photon-flash-1"),
967
1104
  prompt,
968
1105
  n: 10,
969
1106
  maxImagesPerCall: 5,
@@ -1232,7 +1369,7 @@
1232
1369
 
1233
1370
  ```js
1234
1371
  await generateImage({
1235
- model: luma.image('photon-flash-1', {
1372
+ model: luma.image("photon-flash-1", {
1236
1373
  maxImagesPerCall: 5,
1237
1374
  pollIntervalMillis: 500,
1238
1375
  }),
@@ -1245,7 +1382,7 @@
1245
1382
 
1246
1383
  ```js
1247
1384
  await generateImage({
1248
- model: luma.image('photon-flash-1'),
1385
+ model: luma.image("photon-flash-1"),
1249
1386
  prompt,
1250
1387
  n: 10,
1251
1388
  maxImagesPerCall: 5,
package/README.md CHANGED
@@ -4,6 +4,8 @@ This package provides a foundation for implementing providers that expose an Ope
4
4
 
5
5
  The primary [OpenAI provider](../openai/README.md) is more feature-rich, including OpenAI-specific experimental and legacy features. This package offers a lighter-weight alternative focused on core OpenAI-compatible functionality.
6
6
 
7
+ > **Deploying to Vercel?** With Vercel's AI Gateway you can access hundreds of models from any provider — no additional packages, API keys, or extra cost. [Get started with AI Gateway](https://vercel.com/ai-gateway).
8
+
7
9
  ## Setup
8
10
 
9
11
  The provider is available in the `@ai-sdk/openai-compatible` module. You can install it with
package/dist/index.d.mts CHANGED
@@ -1,4 +1,4 @@
1
- import { SharedV3ProviderMetadata, LanguageModelV3, LanguageModelV3CallOptions, LanguageModelV3GenerateResult, LanguageModelV3StreamResult, EmbeddingModelV3, ImageModelV3, ProviderV3 } from '@ai-sdk/provider';
1
+ import { SharedV4ProviderMetadata, LanguageModelV4, LanguageModelV4CallOptions, LanguageModelV4GenerateResult, LanguageModelV4StreamResult, EmbeddingModelV4, ImageModelV4, ProviderV4 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
3
  import { ZodType, z } from 'zod/v4';
4
4
 
@@ -42,7 +42,7 @@ type MetadataExtractor = {
42
42
  */
43
43
  extractMetadata: ({ parsedBody, }: {
44
44
  parsedBody: unknown;
45
- }) => Promise<SharedV3ProviderMetadata | undefined>;
45
+ }) => Promise<SharedV4ProviderMetadata | undefined>;
46
46
  /**
47
47
  * Creates an extractor for handling streaming responses. The returned object provides
48
48
  * methods to process individual chunks and build the final metadata from the accumulated
@@ -65,7 +65,7 @@ type MetadataExtractor = {
65
65
  * @returns Provider-specific metadata or undefined if no metadata is available.
66
66
  * The metadata should be under a key indicating the provider id.
67
67
  */
68
- buildMetadata(): SharedV3ProviderMetadata | undefined;
68
+ buildMetadata(): SharedV4ProviderMetadata | undefined;
69
69
  };
70
70
  };
71
71
 
@@ -87,7 +87,7 @@ type OpenAICompatibleChatConfig = {
87
87
  /**
88
88
  * The supported URLs for the model.
89
89
  */
90
- supportedUrls?: () => LanguageModelV3['supportedUrls'];
90
+ supportedUrls?: () => LanguageModelV4['supportedUrls'];
91
91
  /**
92
92
  * Optional function to transform the request body before sending it to the API.
93
93
  * This is useful for proxy providers that may require a different request format
@@ -95,8 +95,8 @@ type OpenAICompatibleChatConfig = {
95
95
  */
96
96
  transformRequestBody?: (args: Record<string, any>) => Record<string, any>;
97
97
  };
98
- declare class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
99
- readonly specificationVersion = "v3";
98
+ declare class OpenAICompatibleChatLanguageModel implements LanguageModelV4 {
99
+ readonly specificationVersion = "v4";
100
100
  readonly supportsStructuredOutputs: boolean;
101
101
  readonly modelId: OpenAICompatibleChatModelId;
102
102
  private readonly config;
@@ -108,8 +108,8 @@ declare class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
108
108
  get supportedUrls(): Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>>;
109
109
  private transformRequestBody;
110
110
  private getArgs;
111
- doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
112
- doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
111
+ doGenerate(options: LanguageModelV4CallOptions): Promise<LanguageModelV4GenerateResult>;
112
+ doStream(options: LanguageModelV4CallOptions): Promise<LanguageModelV4StreamResult>;
113
113
  }
114
114
 
115
115
  type OpenAICompatibleCompletionModelId = string;
@@ -134,10 +134,10 @@ type OpenAICompatibleCompletionConfig = {
134
134
  /**
135
135
  * The supported URLs for the model.
136
136
  */
137
- supportedUrls?: () => LanguageModelV3['supportedUrls'];
137
+ supportedUrls?: () => LanguageModelV4['supportedUrls'];
138
138
  };
139
- declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV3 {
140
- readonly specificationVersion = "v3";
139
+ declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV4 {
140
+ readonly specificationVersion = "v4";
141
141
  readonly modelId: OpenAICompatibleCompletionModelId;
142
142
  private readonly config;
143
143
  private readonly failedResponseHandler;
@@ -147,8 +147,8 @@ declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV3
147
147
  private get providerOptionsName();
148
148
  get supportedUrls(): Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>>;
149
149
  private getArgs;
150
- doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
151
- doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
150
+ doGenerate(options: LanguageModelV4CallOptions): Promise<LanguageModelV4GenerateResult>;
151
+ doStream(options: LanguageModelV4CallOptions): Promise<LanguageModelV4StreamResult>;
152
152
  }
153
153
 
154
154
  type OpenAICompatibleEmbeddingModelId = string;
@@ -176,8 +176,8 @@ type OpenAICompatibleEmbeddingConfig = {
176
176
  fetch?: FetchFunction;
177
177
  errorStructure?: ProviderErrorStructure<any>;
178
178
  };
179
- declare class OpenAICompatibleEmbeddingModel implements EmbeddingModelV3 {
180
- readonly specificationVersion = "v3";
179
+ declare class OpenAICompatibleEmbeddingModel implements EmbeddingModelV4 {
180
+ readonly specificationVersion = "v4";
181
181
  readonly modelId: OpenAICompatibleEmbeddingModelId;
182
182
  private readonly config;
183
183
  get provider(): string;
@@ -185,7 +185,7 @@ declare class OpenAICompatibleEmbeddingModel implements EmbeddingModelV3 {
185
185
  get supportsParallelCalls(): boolean;
186
186
  constructor(modelId: OpenAICompatibleEmbeddingModelId, config: OpenAICompatibleEmbeddingConfig);
187
187
  private get providerOptionsName();
188
- doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV3['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV3['doEmbed']>>>;
188
+ doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV4['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV4['doEmbed']>>>;
189
189
  }
190
190
 
191
191
  type OpenAICompatibleImageModelId = string;
@@ -203,10 +203,10 @@ type OpenAICompatibleImageModelConfig = {
203
203
  currentDate?: () => Date;
204
204
  };
205
205
  };
206
- declare class OpenAICompatibleImageModel implements ImageModelV3 {
206
+ declare class OpenAICompatibleImageModel implements ImageModelV4 {
207
207
  readonly modelId: OpenAICompatibleImageModelId;
208
208
  private readonly config;
209
- readonly specificationVersion = "v3";
209
+ readonly specificationVersion = "v4";
210
210
  readonly maxImagesPerCall = 10;
211
211
  get provider(): string;
212
212
  /**
@@ -215,20 +215,20 @@ declare class OpenAICompatibleImageModel implements ImageModelV3 {
215
215
  private get providerOptionsKey();
216
216
  constructor(modelId: OpenAICompatibleImageModelId, config: OpenAICompatibleImageModelConfig);
217
217
  private getArgs;
218
- doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, files, mask, }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV3['doGenerate']>>>;
218
+ doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, files, mask, }: Parameters<ImageModelV4['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV4['doGenerate']>>>;
219
219
  }
220
220
 
221
- interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string> extends Omit<ProviderV3, 'imageModel'> {
222
- (modelId: CHAT_MODEL_IDS): LanguageModelV3;
223
- languageModel(modelId: CHAT_MODEL_IDS, config?: Partial<OpenAICompatibleChatConfig>): LanguageModelV3;
224
- chatModel(modelId: CHAT_MODEL_IDS): LanguageModelV3;
225
- completionModel(modelId: COMPLETION_MODEL_IDS): LanguageModelV3;
226
- embeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV3;
221
+ interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string> extends Omit<ProviderV4, 'imageModel'> {
222
+ (modelId: CHAT_MODEL_IDS): LanguageModelV4;
223
+ languageModel(modelId: CHAT_MODEL_IDS, config?: Partial<OpenAICompatibleChatConfig>): LanguageModelV4;
224
+ chatModel(modelId: CHAT_MODEL_IDS): LanguageModelV4;
225
+ completionModel(modelId: COMPLETION_MODEL_IDS): LanguageModelV4;
226
+ embeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV4;
227
227
  /**
228
228
  * @deprecated Use `embeddingModel` instead.
229
229
  */
230
- textEmbeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV3;
231
- imageModel(modelId: IMAGE_MODEL_IDS): ImageModelV3;
230
+ textEmbeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV4;
231
+ imageModel(modelId: IMAGE_MODEL_IDS): ImageModelV4;
232
232
  }
233
233
  interface OpenAICompatibleProviderSettings {
234
234
  /**
package/dist/index.d.ts CHANGED
@@ -1,4 +1,4 @@
1
- import { SharedV3ProviderMetadata, LanguageModelV3, LanguageModelV3CallOptions, LanguageModelV3GenerateResult, LanguageModelV3StreamResult, EmbeddingModelV3, ImageModelV3, ProviderV3 } from '@ai-sdk/provider';
1
+ import { SharedV4ProviderMetadata, LanguageModelV4, LanguageModelV4CallOptions, LanguageModelV4GenerateResult, LanguageModelV4StreamResult, EmbeddingModelV4, ImageModelV4, ProviderV4 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
3
  import { ZodType, z } from 'zod/v4';
4
4
 
@@ -42,7 +42,7 @@ type MetadataExtractor = {
42
42
  */
43
43
  extractMetadata: ({ parsedBody, }: {
44
44
  parsedBody: unknown;
45
- }) => Promise<SharedV3ProviderMetadata | undefined>;
45
+ }) => Promise<SharedV4ProviderMetadata | undefined>;
46
46
  /**
47
47
  * Creates an extractor for handling streaming responses. The returned object provides
48
48
  * methods to process individual chunks and build the final metadata from the accumulated
@@ -65,7 +65,7 @@ type MetadataExtractor = {
65
65
  * @returns Provider-specific metadata or undefined if no metadata is available.
66
66
  * The metadata should be under a key indicating the provider id.
67
67
  */
68
- buildMetadata(): SharedV3ProviderMetadata | undefined;
68
+ buildMetadata(): SharedV4ProviderMetadata | undefined;
69
69
  };
70
70
  };
71
71
 
@@ -87,7 +87,7 @@ type OpenAICompatibleChatConfig = {
87
87
  /**
88
88
  * The supported URLs for the model.
89
89
  */
90
- supportedUrls?: () => LanguageModelV3['supportedUrls'];
90
+ supportedUrls?: () => LanguageModelV4['supportedUrls'];
91
91
  /**
92
92
  * Optional function to transform the request body before sending it to the API.
93
93
  * This is useful for proxy providers that may require a different request format
@@ -95,8 +95,8 @@ type OpenAICompatibleChatConfig = {
95
95
  */
96
96
  transformRequestBody?: (args: Record<string, any>) => Record<string, any>;
97
97
  };
98
- declare class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
99
- readonly specificationVersion = "v3";
98
+ declare class OpenAICompatibleChatLanguageModel implements LanguageModelV4 {
99
+ readonly specificationVersion = "v4";
100
100
  readonly supportsStructuredOutputs: boolean;
101
101
  readonly modelId: OpenAICompatibleChatModelId;
102
102
  private readonly config;
@@ -108,8 +108,8 @@ declare class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
108
108
  get supportedUrls(): Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>>;
109
109
  private transformRequestBody;
110
110
  private getArgs;
111
- doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
112
- doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
111
+ doGenerate(options: LanguageModelV4CallOptions): Promise<LanguageModelV4GenerateResult>;
112
+ doStream(options: LanguageModelV4CallOptions): Promise<LanguageModelV4StreamResult>;
113
113
  }
114
114
 
115
115
  type OpenAICompatibleCompletionModelId = string;
@@ -134,10 +134,10 @@ type OpenAICompatibleCompletionConfig = {
134
134
  /**
135
135
  * The supported URLs for the model.
136
136
  */
137
- supportedUrls?: () => LanguageModelV3['supportedUrls'];
137
+ supportedUrls?: () => LanguageModelV4['supportedUrls'];
138
138
  };
139
- declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV3 {
140
- readonly specificationVersion = "v3";
139
+ declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV4 {
140
+ readonly specificationVersion = "v4";
141
141
  readonly modelId: OpenAICompatibleCompletionModelId;
142
142
  private readonly config;
143
143
  private readonly failedResponseHandler;
@@ -147,8 +147,8 @@ declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV3
147
147
  private get providerOptionsName();
148
148
  get supportedUrls(): Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>>;
149
149
  private getArgs;
150
- doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
151
- doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
150
+ doGenerate(options: LanguageModelV4CallOptions): Promise<LanguageModelV4GenerateResult>;
151
+ doStream(options: LanguageModelV4CallOptions): Promise<LanguageModelV4StreamResult>;
152
152
  }
153
153
 
154
154
  type OpenAICompatibleEmbeddingModelId = string;
@@ -176,8 +176,8 @@ type OpenAICompatibleEmbeddingConfig = {
176
176
  fetch?: FetchFunction;
177
177
  errorStructure?: ProviderErrorStructure<any>;
178
178
  };
179
- declare class OpenAICompatibleEmbeddingModel implements EmbeddingModelV3 {
180
- readonly specificationVersion = "v3";
179
+ declare class OpenAICompatibleEmbeddingModel implements EmbeddingModelV4 {
180
+ readonly specificationVersion = "v4";
181
181
  readonly modelId: OpenAICompatibleEmbeddingModelId;
182
182
  private readonly config;
183
183
  get provider(): string;
@@ -185,7 +185,7 @@ declare class OpenAICompatibleEmbeddingModel implements EmbeddingModelV3 {
185
185
  get supportsParallelCalls(): boolean;
186
186
  constructor(modelId: OpenAICompatibleEmbeddingModelId, config: OpenAICompatibleEmbeddingConfig);
187
187
  private get providerOptionsName();
188
- doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV3['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV3['doEmbed']>>>;
188
+ doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV4['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV4['doEmbed']>>>;
189
189
  }
190
190
 
191
191
  type OpenAICompatibleImageModelId = string;
@@ -203,10 +203,10 @@ type OpenAICompatibleImageModelConfig = {
203
203
  currentDate?: () => Date;
204
204
  };
205
205
  };
206
- declare class OpenAICompatibleImageModel implements ImageModelV3 {
206
+ declare class OpenAICompatibleImageModel implements ImageModelV4 {
207
207
  readonly modelId: OpenAICompatibleImageModelId;
208
208
  private readonly config;
209
- readonly specificationVersion = "v3";
209
+ readonly specificationVersion = "v4";
210
210
  readonly maxImagesPerCall = 10;
211
211
  get provider(): string;
212
212
  /**
@@ -215,20 +215,20 @@ declare class OpenAICompatibleImageModel implements ImageModelV3 {
215
215
  private get providerOptionsKey();
216
216
  constructor(modelId: OpenAICompatibleImageModelId, config: OpenAICompatibleImageModelConfig);
217
217
  private getArgs;
218
- doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, files, mask, }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV3['doGenerate']>>>;
218
+ doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, files, mask, }: Parameters<ImageModelV4['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV4['doGenerate']>>>;
219
219
  }
220
220
 
221
- interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string> extends Omit<ProviderV3, 'imageModel'> {
222
- (modelId: CHAT_MODEL_IDS): LanguageModelV3;
223
- languageModel(modelId: CHAT_MODEL_IDS, config?: Partial<OpenAICompatibleChatConfig>): LanguageModelV3;
224
- chatModel(modelId: CHAT_MODEL_IDS): LanguageModelV3;
225
- completionModel(modelId: COMPLETION_MODEL_IDS): LanguageModelV3;
226
- embeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV3;
221
+ interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string> extends Omit<ProviderV4, 'imageModel'> {
222
+ (modelId: CHAT_MODEL_IDS): LanguageModelV4;
223
+ languageModel(modelId: CHAT_MODEL_IDS, config?: Partial<OpenAICompatibleChatConfig>): LanguageModelV4;
224
+ chatModel(modelId: CHAT_MODEL_IDS): LanguageModelV4;
225
+ completionModel(modelId: COMPLETION_MODEL_IDS): LanguageModelV4;
226
+ embeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV4;
227
227
  /**
228
228
  * @deprecated Use `embeddingModel` instead.
229
229
  */
230
- textEmbeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV3;
231
- imageModel(modelId: IMAGE_MODEL_IDS): ImageModelV3;
230
+ textEmbeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV4;
231
+ imageModel(modelId: IMAGE_MODEL_IDS): ImageModelV4;
232
232
  }
233
233
  interface OpenAICompatibleProviderSettings {
234
234
  /**