@ai-sdk/openai-compatible 3.0.0-beta.1 → 3.0.0-beta.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +77 -8
- package/dist/index.d.mts +27 -27
- package/dist/index.d.ts +27 -27
- package/dist/index.js +12 -11
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +10 -8
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +10 -10
- package/dist/internal/index.d.ts +10 -10
- package/dist/internal/index.js +3 -3
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -5
- package/src/chat/convert-openai-compatible-chat-usage.ts +2 -2
- package/src/chat/convert-to-openai-compatible-chat-messages.ts +4 -4
- package/src/chat/map-openai-compatible-finish-reason.ts +2 -2
- package/src/chat/openai-compatible-api-types.ts +2 -4
- package/src/chat/openai-compatible-chat-language-model.ts +30 -24
- package/src/chat/openai-compatible-metadata-extractor.ts +3 -3
- package/src/chat/openai-compatible-prepare-tools.ts +6 -6
- package/src/completion/convert-openai-compatible-completion-usage.ts +2 -2
- package/src/completion/convert-to-openai-compatible-completion-prompt.ts +2 -2
- package/src/completion/map-openai-compatible-finish-reason.ts +2 -2
- package/src/completion/openai-compatible-completion-language-model.ts +20 -22
- package/src/embedding/openai-compatible-embedding-model.ts +7 -7
- package/src/image/openai-compatible-image-model.ts +11 -11
- package/src/openai-compatible-provider.ts +13 -13
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,74 @@
|
|
|
1
1
|
# @ai-sdk/openai-compatible
|
|
2
2
|
|
|
3
|
+
## 3.0.0-beta.10
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- Updated dependencies [1f509d4]
|
|
8
|
+
- @ai-sdk/provider-utils@5.0.0-beta.7
|
|
9
|
+
- @ai-sdk/provider@4.0.0-beta.5
|
|
10
|
+
|
|
11
|
+
## 3.0.0-beta.9
|
|
12
|
+
|
|
13
|
+
### Patch Changes
|
|
14
|
+
|
|
15
|
+
- 74d520f: feat: migrate providers to support new top-level `reasoning` parameter
|
|
16
|
+
|
|
17
|
+
## 3.0.0-beta.8
|
|
18
|
+
|
|
19
|
+
### Patch Changes
|
|
20
|
+
|
|
21
|
+
- Updated dependencies [3887c70]
|
|
22
|
+
- @ai-sdk/provider-utils@5.0.0-beta.6
|
|
23
|
+
- @ai-sdk/provider@4.0.0-beta.4
|
|
24
|
+
|
|
25
|
+
## 3.0.0-beta.7
|
|
26
|
+
|
|
27
|
+
### Patch Changes
|
|
28
|
+
|
|
29
|
+
- Updated dependencies [776b617]
|
|
30
|
+
- @ai-sdk/provider-utils@5.0.0-beta.5
|
|
31
|
+
- @ai-sdk/provider@4.0.0-beta.3
|
|
32
|
+
|
|
33
|
+
## 3.0.0-beta.6
|
|
34
|
+
|
|
35
|
+
### Patch Changes
|
|
36
|
+
|
|
37
|
+
- Updated dependencies [61753c3]
|
|
38
|
+
- @ai-sdk/provider-utils@5.0.0-beta.4
|
|
39
|
+
|
|
40
|
+
## 3.0.0-beta.5
|
|
41
|
+
|
|
42
|
+
### Patch Changes
|
|
43
|
+
|
|
44
|
+
- Updated dependencies [f7d4f01]
|
|
45
|
+
- @ai-sdk/provider-utils@5.0.0-beta.3
|
|
46
|
+
- @ai-sdk/provider@4.0.0-beta.2
|
|
47
|
+
|
|
48
|
+
## 3.0.0-beta.4
|
|
49
|
+
|
|
50
|
+
### Patch Changes
|
|
51
|
+
|
|
52
|
+
- Updated dependencies [5c2a5a2]
|
|
53
|
+
- @ai-sdk/provider@4.0.0-beta.1
|
|
54
|
+
- @ai-sdk/provider-utils@5.0.0-beta.2
|
|
55
|
+
|
|
56
|
+
## 3.0.0-beta.3
|
|
57
|
+
|
|
58
|
+
### Patch Changes
|
|
59
|
+
|
|
60
|
+
- 8f3e1da: chore(openai-compat): update v3 specs to v4
|
|
61
|
+
|
|
62
|
+
## 3.0.0-beta.2
|
|
63
|
+
|
|
64
|
+
### Patch Changes
|
|
65
|
+
|
|
66
|
+
- 45b3d76: fix(security): prevent streaming tool calls from finalizing on parsable partial JSON
|
|
67
|
+
|
|
68
|
+
Streaming tool call arguments were finalized using `isParsableJson()` as a heuristic for completion. If partial accumulated JSON happened to be valid JSON before all chunks arrived, the tool call would be executed with incomplete arguments. Tool call finalization now only occurs in `flush()` after the stream is fully consumed.
|
|
69
|
+
|
|
70
|
+
- f7295cb: revert incorrect fix https://github.com/vercel/ai/pull/13172
|
|
71
|
+
|
|
3
72
|
## 3.0.0-beta.1
|
|
4
73
|
|
|
5
74
|
### Patch Changes
|
|
@@ -277,13 +346,13 @@
|
|
|
277
346
|
Before
|
|
278
347
|
|
|
279
348
|
```ts
|
|
280
|
-
model.textEmbeddingModel(
|
|
349
|
+
model.textEmbeddingModel("my-model-id");
|
|
281
350
|
```
|
|
282
351
|
|
|
283
352
|
After
|
|
284
353
|
|
|
285
354
|
```ts
|
|
286
|
-
model.embeddingModel(
|
|
355
|
+
model.embeddingModel("my-model-id");
|
|
287
356
|
```
|
|
288
357
|
|
|
289
358
|
- 2625a04: feat(openai); update spec for mcp approval
|
|
@@ -498,13 +567,13 @@
|
|
|
498
567
|
Before
|
|
499
568
|
|
|
500
569
|
```ts
|
|
501
|
-
model.textEmbeddingModel(
|
|
570
|
+
model.textEmbeddingModel("my-model-id");
|
|
502
571
|
```
|
|
503
572
|
|
|
504
573
|
After
|
|
505
574
|
|
|
506
575
|
```ts
|
|
507
|
-
model.embeddingModel(
|
|
576
|
+
model.embeddingModel("my-model-id");
|
|
508
577
|
```
|
|
509
578
|
|
|
510
579
|
- Updated dependencies [8d9e8ad]
|
|
@@ -940,7 +1009,7 @@
|
|
|
940
1009
|
|
|
941
1010
|
```js
|
|
942
1011
|
await generateImage({
|
|
943
|
-
model: luma.image(
|
|
1012
|
+
model: luma.image("photon-flash-1", {
|
|
944
1013
|
maxImagesPerCall: 5,
|
|
945
1014
|
pollIntervalMillis: 500,
|
|
946
1015
|
}),
|
|
@@ -953,7 +1022,7 @@
|
|
|
953
1022
|
|
|
954
1023
|
```js
|
|
955
1024
|
await generateImage({
|
|
956
|
-
model: luma.image(
|
|
1025
|
+
model: luma.image("photon-flash-1"),
|
|
957
1026
|
prompt,
|
|
958
1027
|
n: 10,
|
|
959
1028
|
maxImagesPerCall: 5,
|
|
@@ -1222,7 +1291,7 @@
|
|
|
1222
1291
|
|
|
1223
1292
|
```js
|
|
1224
1293
|
await generateImage({
|
|
1225
|
-
model: luma.image(
|
|
1294
|
+
model: luma.image("photon-flash-1", {
|
|
1226
1295
|
maxImagesPerCall: 5,
|
|
1227
1296
|
pollIntervalMillis: 500,
|
|
1228
1297
|
}),
|
|
@@ -1235,7 +1304,7 @@
|
|
|
1235
1304
|
|
|
1236
1305
|
```js
|
|
1237
1306
|
await generateImage({
|
|
1238
|
-
model: luma.image(
|
|
1307
|
+
model: luma.image("photon-flash-1"),
|
|
1239
1308
|
prompt,
|
|
1240
1309
|
n: 10,
|
|
1241
1310
|
maxImagesPerCall: 5,
|
package/dist/index.d.mts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { SharedV4ProviderMetadata, LanguageModelV4, LanguageModelV4CallOptions, LanguageModelV4GenerateResult, LanguageModelV4StreamResult, EmbeddingModelV4, ImageModelV4, ProviderV4 } from '@ai-sdk/provider';
|
|
2
2
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
3
3
|
import { ZodType, z } from 'zod/v4';
|
|
4
4
|
|
|
@@ -42,7 +42,7 @@ type MetadataExtractor = {
|
|
|
42
42
|
*/
|
|
43
43
|
extractMetadata: ({ parsedBody, }: {
|
|
44
44
|
parsedBody: unknown;
|
|
45
|
-
}) => Promise<
|
|
45
|
+
}) => Promise<SharedV4ProviderMetadata | undefined>;
|
|
46
46
|
/**
|
|
47
47
|
* Creates an extractor for handling streaming responses. The returned object provides
|
|
48
48
|
* methods to process individual chunks and build the final metadata from the accumulated
|
|
@@ -65,7 +65,7 @@ type MetadataExtractor = {
|
|
|
65
65
|
* @returns Provider-specific metadata or undefined if no metadata is available.
|
|
66
66
|
* The metadata should be under a key indicating the provider id.
|
|
67
67
|
*/
|
|
68
|
-
buildMetadata():
|
|
68
|
+
buildMetadata(): SharedV4ProviderMetadata | undefined;
|
|
69
69
|
};
|
|
70
70
|
};
|
|
71
71
|
|
|
@@ -87,7 +87,7 @@ type OpenAICompatibleChatConfig = {
|
|
|
87
87
|
/**
|
|
88
88
|
* The supported URLs for the model.
|
|
89
89
|
*/
|
|
90
|
-
supportedUrls?: () =>
|
|
90
|
+
supportedUrls?: () => LanguageModelV4['supportedUrls'];
|
|
91
91
|
/**
|
|
92
92
|
* Optional function to transform the request body before sending it to the API.
|
|
93
93
|
* This is useful for proxy providers that may require a different request format
|
|
@@ -95,8 +95,8 @@ type OpenAICompatibleChatConfig = {
|
|
|
95
95
|
*/
|
|
96
96
|
transformRequestBody?: (args: Record<string, any>) => Record<string, any>;
|
|
97
97
|
};
|
|
98
|
-
declare class OpenAICompatibleChatLanguageModel implements
|
|
99
|
-
readonly specificationVersion = "
|
|
98
|
+
declare class OpenAICompatibleChatLanguageModel implements LanguageModelV4 {
|
|
99
|
+
readonly specificationVersion = "v4";
|
|
100
100
|
readonly supportsStructuredOutputs: boolean;
|
|
101
101
|
readonly modelId: OpenAICompatibleChatModelId;
|
|
102
102
|
private readonly config;
|
|
@@ -108,8 +108,8 @@ declare class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
|
|
|
108
108
|
get supportedUrls(): Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>>;
|
|
109
109
|
private transformRequestBody;
|
|
110
110
|
private getArgs;
|
|
111
|
-
doGenerate(options:
|
|
112
|
-
doStream(options:
|
|
111
|
+
doGenerate(options: LanguageModelV4CallOptions): Promise<LanguageModelV4GenerateResult>;
|
|
112
|
+
doStream(options: LanguageModelV4CallOptions): Promise<LanguageModelV4StreamResult>;
|
|
113
113
|
}
|
|
114
114
|
|
|
115
115
|
type OpenAICompatibleCompletionModelId = string;
|
|
@@ -134,10 +134,10 @@ type OpenAICompatibleCompletionConfig = {
|
|
|
134
134
|
/**
|
|
135
135
|
* The supported URLs for the model.
|
|
136
136
|
*/
|
|
137
|
-
supportedUrls?: () =>
|
|
137
|
+
supportedUrls?: () => LanguageModelV4['supportedUrls'];
|
|
138
138
|
};
|
|
139
|
-
declare class OpenAICompatibleCompletionLanguageModel implements
|
|
140
|
-
readonly specificationVersion = "
|
|
139
|
+
declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV4 {
|
|
140
|
+
readonly specificationVersion = "v4";
|
|
141
141
|
readonly modelId: OpenAICompatibleCompletionModelId;
|
|
142
142
|
private readonly config;
|
|
143
143
|
private readonly failedResponseHandler;
|
|
@@ -147,8 +147,8 @@ declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV3
|
|
|
147
147
|
private get providerOptionsName();
|
|
148
148
|
get supportedUrls(): Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>>;
|
|
149
149
|
private getArgs;
|
|
150
|
-
doGenerate(options:
|
|
151
|
-
doStream(options:
|
|
150
|
+
doGenerate(options: LanguageModelV4CallOptions): Promise<LanguageModelV4GenerateResult>;
|
|
151
|
+
doStream(options: LanguageModelV4CallOptions): Promise<LanguageModelV4StreamResult>;
|
|
152
152
|
}
|
|
153
153
|
|
|
154
154
|
type OpenAICompatibleEmbeddingModelId = string;
|
|
@@ -176,8 +176,8 @@ type OpenAICompatibleEmbeddingConfig = {
|
|
|
176
176
|
fetch?: FetchFunction;
|
|
177
177
|
errorStructure?: ProviderErrorStructure<any>;
|
|
178
178
|
};
|
|
179
|
-
declare class OpenAICompatibleEmbeddingModel implements
|
|
180
|
-
readonly specificationVersion = "
|
|
179
|
+
declare class OpenAICompatibleEmbeddingModel implements EmbeddingModelV4 {
|
|
180
|
+
readonly specificationVersion = "v4";
|
|
181
181
|
readonly modelId: OpenAICompatibleEmbeddingModelId;
|
|
182
182
|
private readonly config;
|
|
183
183
|
get provider(): string;
|
|
@@ -185,7 +185,7 @@ declare class OpenAICompatibleEmbeddingModel implements EmbeddingModelV3 {
|
|
|
185
185
|
get supportsParallelCalls(): boolean;
|
|
186
186
|
constructor(modelId: OpenAICompatibleEmbeddingModelId, config: OpenAICompatibleEmbeddingConfig);
|
|
187
187
|
private get providerOptionsName();
|
|
188
|
-
doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<
|
|
188
|
+
doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV4['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV4['doEmbed']>>>;
|
|
189
189
|
}
|
|
190
190
|
|
|
191
191
|
type OpenAICompatibleImageModelId = string;
|
|
@@ -203,10 +203,10 @@ type OpenAICompatibleImageModelConfig = {
|
|
|
203
203
|
currentDate?: () => Date;
|
|
204
204
|
};
|
|
205
205
|
};
|
|
206
|
-
declare class OpenAICompatibleImageModel implements
|
|
206
|
+
declare class OpenAICompatibleImageModel implements ImageModelV4 {
|
|
207
207
|
readonly modelId: OpenAICompatibleImageModelId;
|
|
208
208
|
private readonly config;
|
|
209
|
-
readonly specificationVersion = "
|
|
209
|
+
readonly specificationVersion = "v4";
|
|
210
210
|
readonly maxImagesPerCall = 10;
|
|
211
211
|
get provider(): string;
|
|
212
212
|
/**
|
|
@@ -215,20 +215,20 @@ declare class OpenAICompatibleImageModel implements ImageModelV3 {
|
|
|
215
215
|
private get providerOptionsKey();
|
|
216
216
|
constructor(modelId: OpenAICompatibleImageModelId, config: OpenAICompatibleImageModelConfig);
|
|
217
217
|
private getArgs;
|
|
218
|
-
doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, files, mask, }: Parameters<
|
|
218
|
+
doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, files, mask, }: Parameters<ImageModelV4['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV4['doGenerate']>>>;
|
|
219
219
|
}
|
|
220
220
|
|
|
221
|
-
interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string> extends Omit<
|
|
222
|
-
(modelId: CHAT_MODEL_IDS):
|
|
223
|
-
languageModel(modelId: CHAT_MODEL_IDS, config?: Partial<OpenAICompatibleChatConfig>):
|
|
224
|
-
chatModel(modelId: CHAT_MODEL_IDS):
|
|
225
|
-
completionModel(modelId: COMPLETION_MODEL_IDS):
|
|
226
|
-
embeddingModel(modelId: EMBEDDING_MODEL_IDS):
|
|
221
|
+
interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string> extends Omit<ProviderV4, 'imageModel'> {
|
|
222
|
+
(modelId: CHAT_MODEL_IDS): LanguageModelV4;
|
|
223
|
+
languageModel(modelId: CHAT_MODEL_IDS, config?: Partial<OpenAICompatibleChatConfig>): LanguageModelV4;
|
|
224
|
+
chatModel(modelId: CHAT_MODEL_IDS): LanguageModelV4;
|
|
225
|
+
completionModel(modelId: COMPLETION_MODEL_IDS): LanguageModelV4;
|
|
226
|
+
embeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV4;
|
|
227
227
|
/**
|
|
228
228
|
* @deprecated Use `embeddingModel` instead.
|
|
229
229
|
*/
|
|
230
|
-
textEmbeddingModel(modelId: EMBEDDING_MODEL_IDS):
|
|
231
|
-
imageModel(modelId: IMAGE_MODEL_IDS):
|
|
230
|
+
textEmbeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV4;
|
|
231
|
+
imageModel(modelId: IMAGE_MODEL_IDS): ImageModelV4;
|
|
232
232
|
}
|
|
233
233
|
interface OpenAICompatibleProviderSettings {
|
|
234
234
|
/**
|
package/dist/index.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { SharedV4ProviderMetadata, LanguageModelV4, LanguageModelV4CallOptions, LanguageModelV4GenerateResult, LanguageModelV4StreamResult, EmbeddingModelV4, ImageModelV4, ProviderV4 } from '@ai-sdk/provider';
|
|
2
2
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
3
3
|
import { ZodType, z } from 'zod/v4';
|
|
4
4
|
|
|
@@ -42,7 +42,7 @@ type MetadataExtractor = {
|
|
|
42
42
|
*/
|
|
43
43
|
extractMetadata: ({ parsedBody, }: {
|
|
44
44
|
parsedBody: unknown;
|
|
45
|
-
}) => Promise<
|
|
45
|
+
}) => Promise<SharedV4ProviderMetadata | undefined>;
|
|
46
46
|
/**
|
|
47
47
|
* Creates an extractor for handling streaming responses. The returned object provides
|
|
48
48
|
* methods to process individual chunks and build the final metadata from the accumulated
|
|
@@ -65,7 +65,7 @@ type MetadataExtractor = {
|
|
|
65
65
|
* @returns Provider-specific metadata or undefined if no metadata is available.
|
|
66
66
|
* The metadata should be under a key indicating the provider id.
|
|
67
67
|
*/
|
|
68
|
-
buildMetadata():
|
|
68
|
+
buildMetadata(): SharedV4ProviderMetadata | undefined;
|
|
69
69
|
};
|
|
70
70
|
};
|
|
71
71
|
|
|
@@ -87,7 +87,7 @@ type OpenAICompatibleChatConfig = {
|
|
|
87
87
|
/**
|
|
88
88
|
* The supported URLs for the model.
|
|
89
89
|
*/
|
|
90
|
-
supportedUrls?: () =>
|
|
90
|
+
supportedUrls?: () => LanguageModelV4['supportedUrls'];
|
|
91
91
|
/**
|
|
92
92
|
* Optional function to transform the request body before sending it to the API.
|
|
93
93
|
* This is useful for proxy providers that may require a different request format
|
|
@@ -95,8 +95,8 @@ type OpenAICompatibleChatConfig = {
|
|
|
95
95
|
*/
|
|
96
96
|
transformRequestBody?: (args: Record<string, any>) => Record<string, any>;
|
|
97
97
|
};
|
|
98
|
-
declare class OpenAICompatibleChatLanguageModel implements
|
|
99
|
-
readonly specificationVersion = "
|
|
98
|
+
declare class OpenAICompatibleChatLanguageModel implements LanguageModelV4 {
|
|
99
|
+
readonly specificationVersion = "v4";
|
|
100
100
|
readonly supportsStructuredOutputs: boolean;
|
|
101
101
|
readonly modelId: OpenAICompatibleChatModelId;
|
|
102
102
|
private readonly config;
|
|
@@ -108,8 +108,8 @@ declare class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
|
|
|
108
108
|
get supportedUrls(): Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>>;
|
|
109
109
|
private transformRequestBody;
|
|
110
110
|
private getArgs;
|
|
111
|
-
doGenerate(options:
|
|
112
|
-
doStream(options:
|
|
111
|
+
doGenerate(options: LanguageModelV4CallOptions): Promise<LanguageModelV4GenerateResult>;
|
|
112
|
+
doStream(options: LanguageModelV4CallOptions): Promise<LanguageModelV4StreamResult>;
|
|
113
113
|
}
|
|
114
114
|
|
|
115
115
|
type OpenAICompatibleCompletionModelId = string;
|
|
@@ -134,10 +134,10 @@ type OpenAICompatibleCompletionConfig = {
|
|
|
134
134
|
/**
|
|
135
135
|
* The supported URLs for the model.
|
|
136
136
|
*/
|
|
137
|
-
supportedUrls?: () =>
|
|
137
|
+
supportedUrls?: () => LanguageModelV4['supportedUrls'];
|
|
138
138
|
};
|
|
139
|
-
declare class OpenAICompatibleCompletionLanguageModel implements
|
|
140
|
-
readonly specificationVersion = "
|
|
139
|
+
declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV4 {
|
|
140
|
+
readonly specificationVersion = "v4";
|
|
141
141
|
readonly modelId: OpenAICompatibleCompletionModelId;
|
|
142
142
|
private readonly config;
|
|
143
143
|
private readonly failedResponseHandler;
|
|
@@ -147,8 +147,8 @@ declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV3
|
|
|
147
147
|
private get providerOptionsName();
|
|
148
148
|
get supportedUrls(): Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>>;
|
|
149
149
|
private getArgs;
|
|
150
|
-
doGenerate(options:
|
|
151
|
-
doStream(options:
|
|
150
|
+
doGenerate(options: LanguageModelV4CallOptions): Promise<LanguageModelV4GenerateResult>;
|
|
151
|
+
doStream(options: LanguageModelV4CallOptions): Promise<LanguageModelV4StreamResult>;
|
|
152
152
|
}
|
|
153
153
|
|
|
154
154
|
type OpenAICompatibleEmbeddingModelId = string;
|
|
@@ -176,8 +176,8 @@ type OpenAICompatibleEmbeddingConfig = {
|
|
|
176
176
|
fetch?: FetchFunction;
|
|
177
177
|
errorStructure?: ProviderErrorStructure<any>;
|
|
178
178
|
};
|
|
179
|
-
declare class OpenAICompatibleEmbeddingModel implements
|
|
180
|
-
readonly specificationVersion = "
|
|
179
|
+
declare class OpenAICompatibleEmbeddingModel implements EmbeddingModelV4 {
|
|
180
|
+
readonly specificationVersion = "v4";
|
|
181
181
|
readonly modelId: OpenAICompatibleEmbeddingModelId;
|
|
182
182
|
private readonly config;
|
|
183
183
|
get provider(): string;
|
|
@@ -185,7 +185,7 @@ declare class OpenAICompatibleEmbeddingModel implements EmbeddingModelV3 {
|
|
|
185
185
|
get supportsParallelCalls(): boolean;
|
|
186
186
|
constructor(modelId: OpenAICompatibleEmbeddingModelId, config: OpenAICompatibleEmbeddingConfig);
|
|
187
187
|
private get providerOptionsName();
|
|
188
|
-
doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<
|
|
188
|
+
doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV4['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV4['doEmbed']>>>;
|
|
189
189
|
}
|
|
190
190
|
|
|
191
191
|
type OpenAICompatibleImageModelId = string;
|
|
@@ -203,10 +203,10 @@ type OpenAICompatibleImageModelConfig = {
|
|
|
203
203
|
currentDate?: () => Date;
|
|
204
204
|
};
|
|
205
205
|
};
|
|
206
|
-
declare class OpenAICompatibleImageModel implements
|
|
206
|
+
declare class OpenAICompatibleImageModel implements ImageModelV4 {
|
|
207
207
|
readonly modelId: OpenAICompatibleImageModelId;
|
|
208
208
|
private readonly config;
|
|
209
|
-
readonly specificationVersion = "
|
|
209
|
+
readonly specificationVersion = "v4";
|
|
210
210
|
readonly maxImagesPerCall = 10;
|
|
211
211
|
get provider(): string;
|
|
212
212
|
/**
|
|
@@ -215,20 +215,20 @@ declare class OpenAICompatibleImageModel implements ImageModelV3 {
|
|
|
215
215
|
private get providerOptionsKey();
|
|
216
216
|
constructor(modelId: OpenAICompatibleImageModelId, config: OpenAICompatibleImageModelConfig);
|
|
217
217
|
private getArgs;
|
|
218
|
-
doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, files, mask, }: Parameters<
|
|
218
|
+
doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, files, mask, }: Parameters<ImageModelV4['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV4['doGenerate']>>>;
|
|
219
219
|
}
|
|
220
220
|
|
|
221
|
-
interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string> extends Omit<
|
|
222
|
-
(modelId: CHAT_MODEL_IDS):
|
|
223
|
-
languageModel(modelId: CHAT_MODEL_IDS, config?: Partial<OpenAICompatibleChatConfig>):
|
|
224
|
-
chatModel(modelId: CHAT_MODEL_IDS):
|
|
225
|
-
completionModel(modelId: COMPLETION_MODEL_IDS):
|
|
226
|
-
embeddingModel(modelId: EMBEDDING_MODEL_IDS):
|
|
221
|
+
interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string> extends Omit<ProviderV4, 'imageModel'> {
|
|
222
|
+
(modelId: CHAT_MODEL_IDS): LanguageModelV4;
|
|
223
|
+
languageModel(modelId: CHAT_MODEL_IDS, config?: Partial<OpenAICompatibleChatConfig>): LanguageModelV4;
|
|
224
|
+
chatModel(modelId: CHAT_MODEL_IDS): LanguageModelV4;
|
|
225
|
+
completionModel(modelId: COMPLETION_MODEL_IDS): LanguageModelV4;
|
|
226
|
+
embeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV4;
|
|
227
227
|
/**
|
|
228
228
|
* @deprecated Use `embeddingModel` instead.
|
|
229
229
|
*/
|
|
230
|
-
textEmbeddingModel(modelId: EMBEDDING_MODEL_IDS):
|
|
231
|
-
imageModel(modelId: IMAGE_MODEL_IDS):
|
|
230
|
+
textEmbeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV4;
|
|
231
|
+
imageModel(modelId: IMAGE_MODEL_IDS): ImageModelV4;
|
|
232
232
|
}
|
|
233
233
|
interface OpenAICompatibleProviderSettings {
|
|
234
234
|
/**
|
package/dist/index.js
CHANGED
|
@@ -18,8 +18,8 @@ var __copyProps = (to, from, except, desc) => {
|
|
|
18
18
|
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
19
|
|
|
20
20
|
// src/index.ts
|
|
21
|
-
var
|
|
22
|
-
__export(
|
|
21
|
+
var index_exports = {};
|
|
22
|
+
__export(index_exports, {
|
|
23
23
|
OpenAICompatibleChatLanguageModel: () => OpenAICompatibleChatLanguageModel,
|
|
24
24
|
OpenAICompatibleCompletionLanguageModel: () => OpenAICompatibleCompletionLanguageModel,
|
|
25
25
|
OpenAICompatibleEmbeddingModel: () => OpenAICompatibleEmbeddingModel,
|
|
@@ -27,7 +27,7 @@ __export(src_exports, {
|
|
|
27
27
|
VERSION: () => VERSION,
|
|
28
28
|
createOpenAICompatible: () => createOpenAICompatible
|
|
29
29
|
});
|
|
30
|
-
module.exports = __toCommonJS(
|
|
30
|
+
module.exports = __toCommonJS(index_exports);
|
|
31
31
|
|
|
32
32
|
// src/chat/openai-compatible-chat-language-model.ts
|
|
33
33
|
var import_provider3 = require("@ai-sdk/provider");
|
|
@@ -408,7 +408,7 @@ function prepareTools({
|
|
|
408
408
|
var OpenAICompatibleChatLanguageModel = class {
|
|
409
409
|
// type inferred via constructor
|
|
410
410
|
constructor(modelId, config) {
|
|
411
|
-
this.specificationVersion = "
|
|
411
|
+
this.specificationVersion = "v4";
|
|
412
412
|
var _a, _b;
|
|
413
413
|
this.modelId = modelId;
|
|
414
414
|
this.config = config;
|
|
@@ -441,6 +441,7 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
|
441
441
|
topK,
|
|
442
442
|
frequencyPenalty,
|
|
443
443
|
presencePenalty,
|
|
444
|
+
reasoning,
|
|
444
445
|
providerOptions,
|
|
445
446
|
stopSequences,
|
|
446
447
|
responseFormat,
|
|
@@ -448,7 +449,7 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
|
448
449
|
toolChoice,
|
|
449
450
|
tools
|
|
450
451
|
}) {
|
|
451
|
-
var _a, _b, _c, _d, _e;
|
|
452
|
+
var _a, _b, _c, _d, _e, _f;
|
|
452
453
|
const warnings = [];
|
|
453
454
|
const deprecatedOptions = await (0, import_provider_utils2.parseProviderOptions)({
|
|
454
455
|
provider: "openai-compatible",
|
|
@@ -525,7 +526,7 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
|
525
526
|
).includes(key)
|
|
526
527
|
)
|
|
527
528
|
),
|
|
528
|
-
reasoning_effort: compatibleOptions.reasoningEffort,
|
|
529
|
+
reasoning_effort: (_f = compatibleOptions.reasoningEffort) != null ? _f : (0, import_provider_utils2.isCustomReasoning)(reasoning) && reasoning !== "none" ? reasoning : void 0,
|
|
529
530
|
verbosity: compatibleOptions.textVerbosity,
|
|
530
531
|
// messages:
|
|
531
532
|
messages: convertToOpenAICompatibleChatMessages(prompt),
|
|
@@ -1148,7 +1149,7 @@ var openaiCompatibleLanguageModelCompletionOptions = import_v44.z.object({
|
|
|
1148
1149
|
var OpenAICompatibleCompletionLanguageModel = class {
|
|
1149
1150
|
// type inferred via constructor
|
|
1150
1151
|
constructor(modelId, config) {
|
|
1151
|
-
this.specificationVersion = "
|
|
1152
|
+
this.specificationVersion = "v4";
|
|
1152
1153
|
var _a;
|
|
1153
1154
|
this.modelId = modelId;
|
|
1154
1155
|
this.config = config;
|
|
@@ -1427,7 +1428,7 @@ var openaiCompatibleEmbeddingModelOptions = import_v46.z.object({
|
|
|
1427
1428
|
// src/embedding/openai-compatible-embedding-model.ts
|
|
1428
1429
|
var OpenAICompatibleEmbeddingModel = class {
|
|
1429
1430
|
constructor(modelId, config) {
|
|
1430
|
-
this.specificationVersion = "
|
|
1431
|
+
this.specificationVersion = "v4";
|
|
1431
1432
|
this.modelId = modelId;
|
|
1432
1433
|
this.config = config;
|
|
1433
1434
|
}
|
|
@@ -1533,7 +1534,7 @@ var OpenAICompatibleImageModel = class {
|
|
|
1533
1534
|
constructor(modelId, config) {
|
|
1534
1535
|
this.modelId = modelId;
|
|
1535
1536
|
this.config = config;
|
|
1536
|
-
this.specificationVersion = "
|
|
1537
|
+
this.specificationVersion = "v4";
|
|
1537
1538
|
this.maxImagesPerCall = 10;
|
|
1538
1539
|
}
|
|
1539
1540
|
get provider() {
|
|
@@ -1665,7 +1666,7 @@ function toCamelCase(str) {
|
|
|
1665
1666
|
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
|
1666
1667
|
|
|
1667
1668
|
// src/version.ts
|
|
1668
|
-
var VERSION = true ? "3.0.0-beta.
|
|
1669
|
+
var VERSION = true ? "3.0.0-beta.10" : "0.0.0-test";
|
|
1669
1670
|
|
|
1670
1671
|
// src/openai-compatible-provider.ts
|
|
1671
1672
|
function createOpenAICompatible(options) {
|
|
@@ -1705,7 +1706,7 @@ function createOpenAICompatible(options) {
|
|
|
1705
1706
|
});
|
|
1706
1707
|
const createImageModel = (modelId) => new OpenAICompatibleImageModel(modelId, getCommonModelConfig("image"));
|
|
1707
1708
|
const provider = (modelId) => createLanguageModel(modelId);
|
|
1708
|
-
provider.specificationVersion = "
|
|
1709
|
+
provider.specificationVersion = "v4";
|
|
1709
1710
|
provider.languageModel = createLanguageModel;
|
|
1710
1711
|
provider.chatModel = createChatModel;
|
|
1711
1712
|
provider.completionModel = createCompletionModel;
|