@ai-sdk/openai-compatible 1.0.0-canary.8 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -1,19 +1,12 @@
1
- import { SharedV2ProviderMetadata, LanguageModelV2, LanguageModelV2ObjectGenerationMode, EmbeddingModelV2, ImageModelV1, ProviderV2 } from '@ai-sdk/provider';
1
+ import { SharedV2ProviderMetadata, LanguageModelV2, EmbeddingModelV2, ImageModelV2, ProviderV2 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
- import { z, ZodSchema } from 'zod';
3
+ import { z, ZodType } from 'zod/v4';
4
4
 
5
5
  type OpenAICompatibleChatModelId = string;
6
6
  declare const openaiCompatibleProviderOptions: z.ZodObject<{
7
- /**
8
- * A unique identifier representing your end-user, which can help the provider to
9
- * monitor and detect abuse.
10
- */
11
7
  user: z.ZodOptional<z.ZodString>;
12
- }, "strip", z.ZodTypeAny, {
13
- user?: string | undefined;
14
- }, {
15
- user?: string | undefined;
16
- }>;
8
+ reasoningEffort: z.ZodOptional<z.ZodString>;
9
+ }, z.core.$strip>;
17
10
  type OpenAICompatibleProviderOptions = z.infer<typeof openaiCompatibleProviderOptions>;
18
11
 
19
12
  declare const openaiCompatibleErrorDataSchema: z.ZodObject<{
@@ -21,36 +14,12 @@ declare const openaiCompatibleErrorDataSchema: z.ZodObject<{
21
14
  message: z.ZodString;
22
15
  type: z.ZodOptional<z.ZodNullable<z.ZodString>>;
23
16
  param: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
24
- code: z.ZodOptional<z.ZodNullable<z.ZodUnion<[z.ZodString, z.ZodNumber]>>>;
25
- }, "strip", z.ZodTypeAny, {
26
- message: string;
27
- type?: string | null | undefined;
28
- code?: string | number | null | undefined;
29
- param?: any;
30
- }, {
31
- message: string;
32
- type?: string | null | undefined;
33
- code?: string | number | null | undefined;
34
- param?: any;
35
- }>;
36
- }, "strip", z.ZodTypeAny, {
37
- error: {
38
- message: string;
39
- type?: string | null | undefined;
40
- code?: string | number | null | undefined;
41
- param?: any;
42
- };
43
- }, {
44
- error: {
45
- message: string;
46
- type?: string | null | undefined;
47
- code?: string | number | null | undefined;
48
- param?: any;
49
- };
50
- }>;
17
+ code: z.ZodOptional<z.ZodNullable<z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>>>;
18
+ }, z.core.$strip>;
19
+ }, z.core.$strip>;
51
20
  type OpenAICompatibleErrorData = z.infer<typeof openaiCompatibleErrorDataSchema>;
52
21
  type ProviderErrorStructure<T> = {
53
- errorSchema: ZodSchema<T>;
22
+ errorSchema: ZodType<T>;
54
23
  errorToMessage: (error: T) => string;
55
24
  isRetryable?: (response: Response, error?: T) => boolean;
56
25
  };
@@ -71,7 +40,7 @@ type MetadataExtractor = {
71
40
  */
72
41
  extractMetadata: ({ parsedBody, }: {
73
42
  parsedBody: unknown;
74
- }) => SharedV2ProviderMetadata | undefined;
43
+ }) => Promise<SharedV2ProviderMetadata | undefined>;
75
44
  /**
76
45
  * Creates an extractor for handling streaming responses. The returned object provides
77
46
  * methods to process individual chunks and build the final metadata from the accumulated
@@ -106,18 +75,17 @@ type OpenAICompatibleChatConfig = {
106
75
  path: string;
107
76
  }) => string;
108
77
  fetch?: FetchFunction;
78
+ includeUsage?: boolean;
109
79
  errorStructure?: ProviderErrorStructure<any>;
110
80
  metadataExtractor?: MetadataExtractor;
111
81
  /**
112
- Default object generation mode that should be used with this model when
113
- no mode is specified. Should be the mode with the best results for this
114
- model. `undefined` can be specified if object generation is not supported.
115
- */
116
- defaultObjectGenerationMode?: LanguageModelV2ObjectGenerationMode;
117
- /**
118
82
  * Whether the model supports structured outputs.
119
83
  */
120
84
  supportsStructuredOutputs?: boolean;
85
+ /**
86
+ * The supported URLs for the model.
87
+ */
88
+ supportedUrls?: () => LanguageModelV2['supportedUrls'];
121
89
  };
122
90
  declare class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
123
91
  readonly specificationVersion = "v2";
@@ -127,9 +95,9 @@ declare class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
127
95
  private readonly failedResponseHandler;
128
96
  private readonly chunkSchema;
129
97
  constructor(modelId: OpenAICompatibleChatModelId, config: OpenAICompatibleChatConfig);
130
- get defaultObjectGenerationMode(): 'json' | 'tool' | undefined;
131
98
  get provider(): string;
132
99
  private get providerOptionsName();
100
+ get supportedUrls(): Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>>;
133
101
  private getArgs;
134
102
  doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
135
103
  doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
@@ -137,41 +105,16 @@ declare class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
137
105
 
138
106
  type OpenAICompatibleCompletionModelId = string;
139
107
  declare const openaiCompatibleCompletionProviderOptions: z.ZodObject<{
140
- /**
141
- * Echo back the prompt in addition to the completion.
142
- */
143
108
  echo: z.ZodOptional<z.ZodBoolean>;
144
- /**
145
- * Modify the likelihood of specified tokens appearing in the completion.
146
- *
147
- * Accepts a JSON object that maps tokens (specified by their token ID in
148
- * the GPT tokenizer) to an associated bias value from -100 to 100.
149
- */
150
- logitBias: z.ZodOptional<z.ZodRecord<z.ZodNumber, z.ZodNumber>>;
151
- /**
152
- * The suffix that comes after a completion of inserted text.
153
- */
109
+ logitBias: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodNumber>>;
154
110
  suffix: z.ZodOptional<z.ZodString>;
155
- /**
156
- * A unique identifier representing your end-user, which can help providers to
157
- * monitor and detect abuse.
158
- */
159
111
  user: z.ZodOptional<z.ZodString>;
160
- }, "strip", z.ZodTypeAny, {
161
- user?: string | undefined;
162
- echo?: boolean | undefined;
163
- logitBias?: Record<number, number> | undefined;
164
- suffix?: string | undefined;
165
- }, {
166
- user?: string | undefined;
167
- echo?: boolean | undefined;
168
- logitBias?: Record<number, number> | undefined;
169
- suffix?: string | undefined;
170
- }>;
112
+ }, z.core.$strip>;
171
113
  type OpenAICompatibleCompletionProviderOptions = z.infer<typeof openaiCompatibleCompletionProviderOptions>;
172
114
 
173
115
  type OpenAICompatibleCompletionConfig = {
174
116
  provider: string;
117
+ includeUsage?: boolean;
175
118
  headers: () => Record<string, string | undefined>;
176
119
  url: (options: {
177
120
  modelId: string;
@@ -179,10 +122,13 @@ type OpenAICompatibleCompletionConfig = {
179
122
  }) => string;
180
123
  fetch?: FetchFunction;
181
124
  errorStructure?: ProviderErrorStructure<any>;
125
+ /**
126
+ * The supported URLs for the model.
127
+ */
128
+ supportedUrls?: () => LanguageModelV2['supportedUrls'];
182
129
  };
183
130
  declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV2 {
184
131
  readonly specificationVersion = "v2";
185
- readonly defaultObjectGenerationMode: undefined;
186
132
  readonly modelId: OpenAICompatibleCompletionModelId;
187
133
  private readonly config;
188
134
  private readonly failedResponseHandler;
@@ -190,6 +136,7 @@ declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV2
190
136
  constructor(modelId: OpenAICompatibleCompletionModelId, config: OpenAICompatibleCompletionConfig);
191
137
  get provider(): string;
192
138
  private get providerOptionsName();
139
+ get supportedUrls(): Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>>;
193
140
  private getArgs;
194
141
  doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
195
142
  doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
@@ -197,23 +144,9 @@ declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV2
197
144
 
198
145
  type OpenAICompatibleEmbeddingModelId = string;
199
146
  declare const openaiCompatibleEmbeddingProviderOptions: z.ZodObject<{
200
- /**
201
- * The number of dimensions the resulting output embeddings should have.
202
- * Only supported in text-embedding-3 and later models.
203
- */
204
147
  dimensions: z.ZodOptional<z.ZodNumber>;
205
- /**
206
- * A unique identifier representing your end-user, which can help providers to
207
- * monitor and detect abuse.
208
- */
209
148
  user: z.ZodOptional<z.ZodString>;
210
- }, "strip", z.ZodTypeAny, {
211
- user?: string | undefined;
212
- dimensions?: number | undefined;
213
- }, {
214
- user?: string | undefined;
215
- dimensions?: number | undefined;
216
- }>;
149
+ }, z.core.$strip>;
217
150
  type OpenAICompatibleEmbeddingProviderOptions = z.infer<typeof openaiCompatibleEmbeddingProviderOptions>;
218
151
 
219
152
  type OpenAICompatibleEmbeddingConfig = {
@@ -247,17 +180,6 @@ declare class OpenAICompatibleEmbeddingModel implements EmbeddingModelV2<string>
247
180
  }
248
181
 
249
182
  type OpenAICompatibleImageModelId = string;
250
- interface OpenAICompatibleImageSettings {
251
- /**
252
- A unique identifier representing your end-user, which can help the provider to
253
- monitor and detect abuse.
254
- */
255
- user?: string;
256
- /**
257
- * The maximum number of images to generate.
258
- */
259
- maxImagesPerCall?: number;
260
- }
261
183
 
262
184
  type OpenAICompatibleImageModelConfig = {
263
185
  provider: string;
@@ -272,15 +194,14 @@ type OpenAICompatibleImageModelConfig = {
272
194
  currentDate?: () => Date;
273
195
  };
274
196
  };
275
- declare class OpenAICompatibleImageModel implements ImageModelV1 {
197
+ declare class OpenAICompatibleImageModel implements ImageModelV2 {
276
198
  readonly modelId: OpenAICompatibleImageModelId;
277
- private readonly settings;
278
199
  private readonly config;
279
- readonly specificationVersion = "v1";
280
- get maxImagesPerCall(): number;
200
+ readonly specificationVersion = "v2";
201
+ readonly maxImagesPerCall = 10;
281
202
  get provider(): string;
282
- constructor(modelId: OpenAICompatibleImageModelId, settings: OpenAICompatibleImageSettings, config: OpenAICompatibleImageModelConfig);
283
- doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV1['doGenerate']>>>;
203
+ constructor(modelId: OpenAICompatibleImageModelId, config: OpenAICompatibleImageModelConfig);
204
+ doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV2['doGenerate']>>>;
284
205
  }
285
206
 
286
207
  interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string> extends Omit<ProviderV2, 'imageModel'> {
@@ -289,7 +210,7 @@ interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPL
289
210
  chatModel(modelId: CHAT_MODEL_IDS): LanguageModelV2;
290
211
  completionModel(modelId: COMPLETION_MODEL_IDS): LanguageModelV2;
291
212
  textEmbeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV2<string>;
292
- imageModel(modelId: IMAGE_MODEL_IDS): ImageModelV1;
213
+ imageModel(modelId: IMAGE_MODEL_IDS): ImageModelV2;
293
214
  }
294
215
  interface OpenAICompatibleProviderSettings {
295
216
  /**
@@ -320,10 +241,14 @@ interface OpenAICompatibleProviderSettings {
320
241
  or to provide a custom fetch implementation for e.g. testing.
321
242
  */
322
243
  fetch?: FetchFunction;
244
+ /**
245
+ Include usage information in streaming responses.
246
+ */
247
+ includeUsage?: boolean;
323
248
  }
324
249
  /**
325
250
  Create an OpenAICompatible provider instance.
326
251
  */
327
252
  declare function createOpenAICompatible<CHAT_MODEL_IDS extends string, COMPLETION_MODEL_IDS extends string, EMBEDDING_MODEL_IDS extends string, IMAGE_MODEL_IDS extends string>(options: OpenAICompatibleProviderSettings): OpenAICompatibleProvider<CHAT_MODEL_IDS, COMPLETION_MODEL_IDS, EMBEDDING_MODEL_IDS, IMAGE_MODEL_IDS>;
328
253
 
329
- export { type MetadataExtractor, OpenAICompatibleChatLanguageModel, type OpenAICompatibleChatModelId, OpenAICompatibleCompletionLanguageModel, type OpenAICompatibleCompletionModelId, type OpenAICompatibleCompletionProviderOptions, OpenAICompatibleEmbeddingModel, type OpenAICompatibleEmbeddingModelId, type OpenAICompatibleEmbeddingProviderOptions, type OpenAICompatibleErrorData, OpenAICompatibleImageModel, type OpenAICompatibleImageSettings, type OpenAICompatibleProvider, type OpenAICompatibleProviderOptions, type OpenAICompatibleProviderSettings, type ProviderErrorStructure, createOpenAICompatible };
254
+ export { type MetadataExtractor, OpenAICompatibleChatLanguageModel, type OpenAICompatibleChatModelId, OpenAICompatibleCompletionLanguageModel, type OpenAICompatibleCompletionModelId, type OpenAICompatibleCompletionProviderOptions, OpenAICompatibleEmbeddingModel, type OpenAICompatibleEmbeddingModelId, type OpenAICompatibleEmbeddingProviderOptions, type OpenAICompatibleErrorData, OpenAICompatibleImageModel, type OpenAICompatibleProvider, type OpenAICompatibleProviderOptions, type OpenAICompatibleProviderSettings, type ProviderErrorStructure, createOpenAICompatible };
package/dist/index.d.ts CHANGED
@@ -1,19 +1,12 @@
1
- import { SharedV2ProviderMetadata, LanguageModelV2, LanguageModelV2ObjectGenerationMode, EmbeddingModelV2, ImageModelV1, ProviderV2 } from '@ai-sdk/provider';
1
+ import { SharedV2ProviderMetadata, LanguageModelV2, EmbeddingModelV2, ImageModelV2, ProviderV2 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
- import { z, ZodSchema } from 'zod';
3
+ import { z, ZodType } from 'zod/v4';
4
4
 
5
5
  type OpenAICompatibleChatModelId = string;
6
6
  declare const openaiCompatibleProviderOptions: z.ZodObject<{
7
- /**
8
- * A unique identifier representing your end-user, which can help the provider to
9
- * monitor and detect abuse.
10
- */
11
7
  user: z.ZodOptional<z.ZodString>;
12
- }, "strip", z.ZodTypeAny, {
13
- user?: string | undefined;
14
- }, {
15
- user?: string | undefined;
16
- }>;
8
+ reasoningEffort: z.ZodOptional<z.ZodString>;
9
+ }, z.core.$strip>;
17
10
  type OpenAICompatibleProviderOptions = z.infer<typeof openaiCompatibleProviderOptions>;
18
11
 
19
12
  declare const openaiCompatibleErrorDataSchema: z.ZodObject<{
@@ -21,36 +14,12 @@ declare const openaiCompatibleErrorDataSchema: z.ZodObject<{
21
14
  message: z.ZodString;
22
15
  type: z.ZodOptional<z.ZodNullable<z.ZodString>>;
23
16
  param: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
24
- code: z.ZodOptional<z.ZodNullable<z.ZodUnion<[z.ZodString, z.ZodNumber]>>>;
25
- }, "strip", z.ZodTypeAny, {
26
- message: string;
27
- type?: string | null | undefined;
28
- code?: string | number | null | undefined;
29
- param?: any;
30
- }, {
31
- message: string;
32
- type?: string | null | undefined;
33
- code?: string | number | null | undefined;
34
- param?: any;
35
- }>;
36
- }, "strip", z.ZodTypeAny, {
37
- error: {
38
- message: string;
39
- type?: string | null | undefined;
40
- code?: string | number | null | undefined;
41
- param?: any;
42
- };
43
- }, {
44
- error: {
45
- message: string;
46
- type?: string | null | undefined;
47
- code?: string | number | null | undefined;
48
- param?: any;
49
- };
50
- }>;
17
+ code: z.ZodOptional<z.ZodNullable<z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>>>;
18
+ }, z.core.$strip>;
19
+ }, z.core.$strip>;
51
20
  type OpenAICompatibleErrorData = z.infer<typeof openaiCompatibleErrorDataSchema>;
52
21
  type ProviderErrorStructure<T> = {
53
- errorSchema: ZodSchema<T>;
22
+ errorSchema: ZodType<T>;
54
23
  errorToMessage: (error: T) => string;
55
24
  isRetryable?: (response: Response, error?: T) => boolean;
56
25
  };
@@ -71,7 +40,7 @@ type MetadataExtractor = {
71
40
  */
72
41
  extractMetadata: ({ parsedBody, }: {
73
42
  parsedBody: unknown;
74
- }) => SharedV2ProviderMetadata | undefined;
43
+ }) => Promise<SharedV2ProviderMetadata | undefined>;
75
44
  /**
76
45
  * Creates an extractor for handling streaming responses. The returned object provides
77
46
  * methods to process individual chunks and build the final metadata from the accumulated
@@ -106,18 +75,17 @@ type OpenAICompatibleChatConfig = {
106
75
  path: string;
107
76
  }) => string;
108
77
  fetch?: FetchFunction;
78
+ includeUsage?: boolean;
109
79
  errorStructure?: ProviderErrorStructure<any>;
110
80
  metadataExtractor?: MetadataExtractor;
111
81
  /**
112
- Default object generation mode that should be used with this model when
113
- no mode is specified. Should be the mode with the best results for this
114
- model. `undefined` can be specified if object generation is not supported.
115
- */
116
- defaultObjectGenerationMode?: LanguageModelV2ObjectGenerationMode;
117
- /**
118
82
  * Whether the model supports structured outputs.
119
83
  */
120
84
  supportsStructuredOutputs?: boolean;
85
+ /**
86
+ * The supported URLs for the model.
87
+ */
88
+ supportedUrls?: () => LanguageModelV2['supportedUrls'];
121
89
  };
122
90
  declare class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
123
91
  readonly specificationVersion = "v2";
@@ -127,9 +95,9 @@ declare class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
127
95
  private readonly failedResponseHandler;
128
96
  private readonly chunkSchema;
129
97
  constructor(modelId: OpenAICompatibleChatModelId, config: OpenAICompatibleChatConfig);
130
- get defaultObjectGenerationMode(): 'json' | 'tool' | undefined;
131
98
  get provider(): string;
132
99
  private get providerOptionsName();
100
+ get supportedUrls(): Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>>;
133
101
  private getArgs;
134
102
  doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
135
103
  doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
@@ -137,41 +105,16 @@ declare class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
137
105
 
138
106
  type OpenAICompatibleCompletionModelId = string;
139
107
  declare const openaiCompatibleCompletionProviderOptions: z.ZodObject<{
140
- /**
141
- * Echo back the prompt in addition to the completion.
142
- */
143
108
  echo: z.ZodOptional<z.ZodBoolean>;
144
- /**
145
- * Modify the likelihood of specified tokens appearing in the completion.
146
- *
147
- * Accepts a JSON object that maps tokens (specified by their token ID in
148
- * the GPT tokenizer) to an associated bias value from -100 to 100.
149
- */
150
- logitBias: z.ZodOptional<z.ZodRecord<z.ZodNumber, z.ZodNumber>>;
151
- /**
152
- * The suffix that comes after a completion of inserted text.
153
- */
109
+ logitBias: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodNumber>>;
154
110
  suffix: z.ZodOptional<z.ZodString>;
155
- /**
156
- * A unique identifier representing your end-user, which can help providers to
157
- * monitor and detect abuse.
158
- */
159
111
  user: z.ZodOptional<z.ZodString>;
160
- }, "strip", z.ZodTypeAny, {
161
- user?: string | undefined;
162
- echo?: boolean | undefined;
163
- logitBias?: Record<number, number> | undefined;
164
- suffix?: string | undefined;
165
- }, {
166
- user?: string | undefined;
167
- echo?: boolean | undefined;
168
- logitBias?: Record<number, number> | undefined;
169
- suffix?: string | undefined;
170
- }>;
112
+ }, z.core.$strip>;
171
113
  type OpenAICompatibleCompletionProviderOptions = z.infer<typeof openaiCompatibleCompletionProviderOptions>;
172
114
 
173
115
  type OpenAICompatibleCompletionConfig = {
174
116
  provider: string;
117
+ includeUsage?: boolean;
175
118
  headers: () => Record<string, string | undefined>;
176
119
  url: (options: {
177
120
  modelId: string;
@@ -179,10 +122,13 @@ type OpenAICompatibleCompletionConfig = {
179
122
  }) => string;
180
123
  fetch?: FetchFunction;
181
124
  errorStructure?: ProviderErrorStructure<any>;
125
+ /**
126
+ * The supported URLs for the model.
127
+ */
128
+ supportedUrls?: () => LanguageModelV2['supportedUrls'];
182
129
  };
183
130
  declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV2 {
184
131
  readonly specificationVersion = "v2";
185
- readonly defaultObjectGenerationMode: undefined;
186
132
  readonly modelId: OpenAICompatibleCompletionModelId;
187
133
  private readonly config;
188
134
  private readonly failedResponseHandler;
@@ -190,6 +136,7 @@ declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV2
190
136
  constructor(modelId: OpenAICompatibleCompletionModelId, config: OpenAICompatibleCompletionConfig);
191
137
  get provider(): string;
192
138
  private get providerOptionsName();
139
+ get supportedUrls(): Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>>;
193
140
  private getArgs;
194
141
  doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
195
142
  doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
@@ -197,23 +144,9 @@ declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV2
197
144
 
198
145
  type OpenAICompatibleEmbeddingModelId = string;
199
146
  declare const openaiCompatibleEmbeddingProviderOptions: z.ZodObject<{
200
- /**
201
- * The number of dimensions the resulting output embeddings should have.
202
- * Only supported in text-embedding-3 and later models.
203
- */
204
147
  dimensions: z.ZodOptional<z.ZodNumber>;
205
- /**
206
- * A unique identifier representing your end-user, which can help providers to
207
- * monitor and detect abuse.
208
- */
209
148
  user: z.ZodOptional<z.ZodString>;
210
- }, "strip", z.ZodTypeAny, {
211
- user?: string | undefined;
212
- dimensions?: number | undefined;
213
- }, {
214
- user?: string | undefined;
215
- dimensions?: number | undefined;
216
- }>;
149
+ }, z.core.$strip>;
217
150
  type OpenAICompatibleEmbeddingProviderOptions = z.infer<typeof openaiCompatibleEmbeddingProviderOptions>;
218
151
 
219
152
  type OpenAICompatibleEmbeddingConfig = {
@@ -247,17 +180,6 @@ declare class OpenAICompatibleEmbeddingModel implements EmbeddingModelV2<string>
247
180
  }
248
181
 
249
182
  type OpenAICompatibleImageModelId = string;
250
- interface OpenAICompatibleImageSettings {
251
- /**
252
- A unique identifier representing your end-user, which can help the provider to
253
- monitor and detect abuse.
254
- */
255
- user?: string;
256
- /**
257
- * The maximum number of images to generate.
258
- */
259
- maxImagesPerCall?: number;
260
- }
261
183
 
262
184
  type OpenAICompatibleImageModelConfig = {
263
185
  provider: string;
@@ -272,15 +194,14 @@ type OpenAICompatibleImageModelConfig = {
272
194
  currentDate?: () => Date;
273
195
  };
274
196
  };
275
- declare class OpenAICompatibleImageModel implements ImageModelV1 {
197
+ declare class OpenAICompatibleImageModel implements ImageModelV2 {
276
198
  readonly modelId: OpenAICompatibleImageModelId;
277
- private readonly settings;
278
199
  private readonly config;
279
- readonly specificationVersion = "v1";
280
- get maxImagesPerCall(): number;
200
+ readonly specificationVersion = "v2";
201
+ readonly maxImagesPerCall = 10;
281
202
  get provider(): string;
282
- constructor(modelId: OpenAICompatibleImageModelId, settings: OpenAICompatibleImageSettings, config: OpenAICompatibleImageModelConfig);
283
- doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV1['doGenerate']>>>;
203
+ constructor(modelId: OpenAICompatibleImageModelId, config: OpenAICompatibleImageModelConfig);
204
+ doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV2['doGenerate']>>>;
284
205
  }
285
206
 
286
207
  interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string> extends Omit<ProviderV2, 'imageModel'> {
@@ -289,7 +210,7 @@ interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPL
289
210
  chatModel(modelId: CHAT_MODEL_IDS): LanguageModelV2;
290
211
  completionModel(modelId: COMPLETION_MODEL_IDS): LanguageModelV2;
291
212
  textEmbeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV2<string>;
292
- imageModel(modelId: IMAGE_MODEL_IDS): ImageModelV1;
213
+ imageModel(modelId: IMAGE_MODEL_IDS): ImageModelV2;
293
214
  }
294
215
  interface OpenAICompatibleProviderSettings {
295
216
  /**
@@ -320,10 +241,14 @@ interface OpenAICompatibleProviderSettings {
320
241
  or to provide a custom fetch implementation for e.g. testing.
321
242
  */
322
243
  fetch?: FetchFunction;
244
+ /**
245
+ Include usage information in streaming responses.
246
+ */
247
+ includeUsage?: boolean;
323
248
  }
324
249
  /**
325
250
  Create an OpenAICompatible provider instance.
326
251
  */
327
252
  declare function createOpenAICompatible<CHAT_MODEL_IDS extends string, COMPLETION_MODEL_IDS extends string, EMBEDDING_MODEL_IDS extends string, IMAGE_MODEL_IDS extends string>(options: OpenAICompatibleProviderSettings): OpenAICompatibleProvider<CHAT_MODEL_IDS, COMPLETION_MODEL_IDS, EMBEDDING_MODEL_IDS, IMAGE_MODEL_IDS>;
328
253
 
329
- export { type MetadataExtractor, OpenAICompatibleChatLanguageModel, type OpenAICompatibleChatModelId, OpenAICompatibleCompletionLanguageModel, type OpenAICompatibleCompletionModelId, type OpenAICompatibleCompletionProviderOptions, OpenAICompatibleEmbeddingModel, type OpenAICompatibleEmbeddingModelId, type OpenAICompatibleEmbeddingProviderOptions, type OpenAICompatibleErrorData, OpenAICompatibleImageModel, type OpenAICompatibleImageSettings, type OpenAICompatibleProvider, type OpenAICompatibleProviderOptions, type OpenAICompatibleProviderSettings, type ProviderErrorStructure, createOpenAICompatible };
254
+ export { type MetadataExtractor, OpenAICompatibleChatLanguageModel, type OpenAICompatibleChatModelId, OpenAICompatibleCompletionLanguageModel, type OpenAICompatibleCompletionModelId, type OpenAICompatibleCompletionProviderOptions, OpenAICompatibleEmbeddingModel, type OpenAICompatibleEmbeddingModelId, type OpenAICompatibleEmbeddingProviderOptions, type OpenAICompatibleErrorData, OpenAICompatibleImageModel, type OpenAICompatibleProvider, type OpenAICompatibleProviderOptions, type OpenAICompatibleProviderSettings, type ProviderErrorStructure, createOpenAICompatible };