@ai-sdk/openai-compatible 1.0.0-canary.5 → 1.0.0-canary.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +36 -0
- package/dist/index.d.mts +68 -62
- package/dist/index.d.ts +68 -62
- package/dist/index.js +236 -199
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +239 -199
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +3 -3
- package/dist/internal/index.d.ts +3 -3
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/CHANGELOG.md
CHANGED
@@ -1,5 +1,41 @@
|
|
1
1
|
# @ai-sdk/openai-compatible
|
2
2
|
|
3
|
+
## 1.0.0-canary.7
|
4
|
+
|
5
|
+
### Patch Changes
|
6
|
+
|
7
|
+
- fa49207: feat(providers/openai-compatible): convert to providerOptions
|
8
|
+
- 26735b5: chore(embedding-model): add v2 interface
|
9
|
+
- 443d8ec: feat(embedding-model-v2): add response body field
|
10
|
+
- fd65bc6: chore(embedding-model-v2): rename rawResponse to response
|
11
|
+
- Updated dependencies [26735b5]
|
12
|
+
- Updated dependencies [443d8ec]
|
13
|
+
- Updated dependencies [14c9410]
|
14
|
+
- Updated dependencies [d9c98f4]
|
15
|
+
- Updated dependencies [c4a2fec]
|
16
|
+
- Updated dependencies [0054544]
|
17
|
+
- Updated dependencies [9e9c809]
|
18
|
+
- Updated dependencies [32831c6]
|
19
|
+
- Updated dependencies [d0f9495]
|
20
|
+
- Updated dependencies [fd65bc6]
|
21
|
+
- Updated dependencies [393138b]
|
22
|
+
- Updated dependencies [7182d14]
|
23
|
+
- @ai-sdk/provider@2.0.0-canary.6
|
24
|
+
- @ai-sdk/provider-utils@3.0.0-canary.7
|
25
|
+
|
26
|
+
## 1.0.0-canary.6
|
27
|
+
|
28
|
+
### Patch Changes
|
29
|
+
|
30
|
+
- 6db02c9: chore(openai-compatible): remove simulateStreaming
|
31
|
+
- Updated dependencies [411e483]
|
32
|
+
- Updated dependencies [79457bd]
|
33
|
+
- Updated dependencies [ad80501]
|
34
|
+
- Updated dependencies [1766ede]
|
35
|
+
- Updated dependencies [f10304b]
|
36
|
+
- @ai-sdk/provider@2.0.0-canary.5
|
37
|
+
- @ai-sdk/provider-utils@3.0.0-canary.6
|
38
|
+
|
3
39
|
## 1.0.0-canary.5
|
4
40
|
|
5
41
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
@@ -1,22 +1,20 @@
|
|
1
|
-
import {
|
1
|
+
import { SharedV2ProviderMetadata, LanguageModelV2, LanguageModelV2ObjectGenerationMode, EmbeddingModelV2, ImageModelV1, ProviderV2 } from '@ai-sdk/provider';
|
2
2
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
3
3
|
import { z, ZodSchema } from 'zod';
|
4
4
|
|
5
5
|
type OpenAICompatibleChatModelId = string;
|
6
|
-
|
6
|
+
declare const openaiCompatibleProviderOptions: z.ZodObject<{
|
7
7
|
/**
|
8
|
-
|
9
|
-
|
10
|
-
*/
|
11
|
-
user?: string;
|
12
|
-
/**
|
13
|
-
Simulates streaming by using a normal generate call and returning it as a stream.
|
14
|
-
Enable this if the model that you are using does not support streaming.
|
15
|
-
|
16
|
-
Defaults to `false`.
|
8
|
+
* A unique identifier representing your end-user, which can help the provider to
|
9
|
+
* monitor and detect abuse.
|
17
10
|
*/
|
18
|
-
|
19
|
-
}
|
11
|
+
user: z.ZodOptional<z.ZodString>;
|
12
|
+
}, "strip", z.ZodTypeAny, {
|
13
|
+
user?: string | undefined;
|
14
|
+
}, {
|
15
|
+
user?: string | undefined;
|
16
|
+
}>;
|
17
|
+
type OpenAICompatibleProviderOptions = z.infer<typeof openaiCompatibleProviderOptions>;
|
20
18
|
|
21
19
|
declare const openaiCompatibleErrorDataSchema: z.ZodObject<{
|
22
20
|
error: z.ZodObject<{
|
@@ -73,7 +71,7 @@ type MetadataExtractor = {
|
|
73
71
|
*/
|
74
72
|
extractMetadata: ({ parsedBody, }: {
|
75
73
|
parsedBody: unknown;
|
76
|
-
}) =>
|
74
|
+
}) => SharedV2ProviderMetadata | undefined;
|
77
75
|
/**
|
78
76
|
* Creates an extractor for handling streaming responses. The returned object provides
|
79
77
|
* methods to process individual chunks and build the final metadata from the accumulated
|
@@ -96,7 +94,7 @@ type MetadataExtractor = {
|
|
96
94
|
* @returns Provider-specific metadata or undefined if no metadata is available.
|
97
95
|
* The metadata should be under a key indicating the provider id.
|
98
96
|
*/
|
99
|
-
buildMetadata():
|
97
|
+
buildMetadata(): SharedV2ProviderMetadata | undefined;
|
100
98
|
};
|
101
99
|
};
|
102
100
|
|
@@ -125,11 +123,10 @@ declare class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
|
125
123
|
readonly specificationVersion = "v2";
|
126
124
|
readonly supportsStructuredOutputs: boolean;
|
127
125
|
readonly modelId: OpenAICompatibleChatModelId;
|
128
|
-
readonly settings: OpenAICompatibleChatSettings;
|
129
126
|
private readonly config;
|
130
127
|
private readonly failedResponseHandler;
|
131
128
|
private readonly chunkSchema;
|
132
|
-
constructor(modelId: OpenAICompatibleChatModelId,
|
129
|
+
constructor(modelId: OpenAICompatibleChatModelId, config: OpenAICompatibleChatConfig);
|
133
130
|
get defaultObjectGenerationMode(): 'json' | 'tool' | undefined;
|
134
131
|
get provider(): string;
|
135
132
|
private get providerOptionsName();
|
@@ -139,36 +136,39 @@ declare class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
|
139
136
|
}
|
140
137
|
|
141
138
|
type OpenAICompatibleCompletionModelId = string;
|
142
|
-
|
139
|
+
declare const openaiCompatibleCompletionProviderOptions: z.ZodObject<{
|
143
140
|
/**
|
144
|
-
|
141
|
+
* Echo back the prompt in addition to the completion.
|
145
142
|
*/
|
146
|
-
echo
|
143
|
+
echo: z.ZodOptional<z.ZodBoolean>;
|
147
144
|
/**
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
153
|
-
the bias is added to the logits generated by the model prior to sampling.
|
154
|
-
The exact effect will vary per model, but values between -1 and 1 should
|
155
|
-
decrease or increase likelihood of selection; values like -100 or 100
|
156
|
-
should result in a ban or exclusive selection of the relevant token.
|
157
|
-
|
158
|
-
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
159
|
-
token from being generated.
|
145
|
+
* Modify the likelihood of specified tokens appearing in the completion.
|
146
|
+
*
|
147
|
+
* Accepts a JSON object that maps tokens (specified by their token ID in
|
148
|
+
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
160
149
|
*/
|
161
|
-
logitBias
|
150
|
+
logitBias: z.ZodOptional<z.ZodRecord<z.ZodNumber, z.ZodNumber>>;
|
162
151
|
/**
|
163
|
-
|
152
|
+
* The suffix that comes after a completion of inserted text.
|
164
153
|
*/
|
165
|
-
suffix
|
154
|
+
suffix: z.ZodOptional<z.ZodString>;
|
166
155
|
/**
|
167
|
-
|
168
|
-
|
156
|
+
* A unique identifier representing your end-user, which can help providers to
|
157
|
+
* monitor and detect abuse.
|
169
158
|
*/
|
170
|
-
user
|
171
|
-
}
|
159
|
+
user: z.ZodOptional<z.ZodString>;
|
160
|
+
}, "strip", z.ZodTypeAny, {
|
161
|
+
user?: string | undefined;
|
162
|
+
echo?: boolean | undefined;
|
163
|
+
logitBias?: Record<number, number> | undefined;
|
164
|
+
suffix?: string | undefined;
|
165
|
+
}, {
|
166
|
+
user?: string | undefined;
|
167
|
+
echo?: boolean | undefined;
|
168
|
+
logitBias?: Record<number, number> | undefined;
|
169
|
+
suffix?: string | undefined;
|
170
|
+
}>;
|
171
|
+
type OpenAICompatibleCompletionProviderOptions = z.infer<typeof openaiCompatibleCompletionProviderOptions>;
|
172
172
|
|
173
173
|
type OpenAICompatibleCompletionConfig = {
|
174
174
|
provider: string;
|
@@ -184,11 +184,10 @@ declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV2
|
|
184
184
|
readonly specificationVersion = "v2";
|
185
185
|
readonly defaultObjectGenerationMode: undefined;
|
186
186
|
readonly modelId: OpenAICompatibleCompletionModelId;
|
187
|
-
readonly settings: OpenAICompatibleCompletionSettings;
|
188
187
|
private readonly config;
|
189
188
|
private readonly failedResponseHandler;
|
190
189
|
private readonly chunkSchema;
|
191
|
-
constructor(modelId: OpenAICompatibleCompletionModelId,
|
190
|
+
constructor(modelId: OpenAICompatibleCompletionModelId, config: OpenAICompatibleCompletionConfig);
|
192
191
|
get provider(): string;
|
193
192
|
private get providerOptionsName();
|
194
193
|
private getArgs;
|
@@ -197,18 +196,25 @@ declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV2
|
|
197
196
|
}
|
198
197
|
|
199
198
|
type OpenAICompatibleEmbeddingModelId = string;
|
200
|
-
|
199
|
+
declare const openaiCompatibleEmbeddingProviderOptions: z.ZodObject<{
|
201
200
|
/**
|
202
|
-
|
203
|
-
|
201
|
+
* The number of dimensions the resulting output embeddings should have.
|
202
|
+
* Only supported in text-embedding-3 and later models.
|
204
203
|
*/
|
205
|
-
dimensions
|
204
|
+
dimensions: z.ZodOptional<z.ZodNumber>;
|
206
205
|
/**
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
user
|
211
|
-
}
|
206
|
+
* A unique identifier representing your end-user, which can help providers to
|
207
|
+
* monitor and detect abuse.
|
208
|
+
*/
|
209
|
+
user: z.ZodOptional<z.ZodString>;
|
210
|
+
}, "strip", z.ZodTypeAny, {
|
211
|
+
user?: string | undefined;
|
212
|
+
dimensions?: number | undefined;
|
213
|
+
}, {
|
214
|
+
user?: string | undefined;
|
215
|
+
dimensions?: number | undefined;
|
216
|
+
}>;
|
217
|
+
type OpenAICompatibleEmbeddingProviderOptions = z.infer<typeof openaiCompatibleEmbeddingProviderOptions>;
|
212
218
|
|
213
219
|
type OpenAICompatibleEmbeddingConfig = {
|
214
220
|
/**
|
@@ -228,16 +234,16 @@ type OpenAICompatibleEmbeddingConfig = {
|
|
228
234
|
fetch?: FetchFunction;
|
229
235
|
errorStructure?: ProviderErrorStructure<any>;
|
230
236
|
};
|
231
|
-
declare class OpenAICompatibleEmbeddingModel implements
|
232
|
-
readonly specificationVersion = "
|
237
|
+
declare class OpenAICompatibleEmbeddingModel implements EmbeddingModelV2<string> {
|
238
|
+
readonly specificationVersion = "v2";
|
233
239
|
readonly modelId: OpenAICompatibleEmbeddingModelId;
|
234
240
|
private readonly config;
|
235
|
-
private readonly settings;
|
236
241
|
get provider(): string;
|
237
242
|
get maxEmbeddingsPerCall(): number;
|
238
243
|
get supportsParallelCalls(): boolean;
|
239
|
-
constructor(modelId: OpenAICompatibleEmbeddingModelId,
|
240
|
-
|
244
|
+
constructor(modelId: OpenAICompatibleEmbeddingModelId, config: OpenAICompatibleEmbeddingConfig);
|
245
|
+
private get providerOptionsName();
|
246
|
+
doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV2<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV2<string>['doEmbed']>>>;
|
241
247
|
}
|
242
248
|
|
243
249
|
type OpenAICompatibleImageModelId = string;
|
@@ -278,12 +284,12 @@ declare class OpenAICompatibleImageModel implements ImageModelV1 {
|
|
278
284
|
}
|
279
285
|
|
280
286
|
interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string> extends Omit<ProviderV2, 'imageModel'> {
|
281
|
-
(modelId: CHAT_MODEL_IDS
|
282
|
-
languageModel(modelId: CHAT_MODEL_IDS
|
283
|
-
chatModel(modelId: CHAT_MODEL_IDS
|
284
|
-
completionModel(modelId: COMPLETION_MODEL_IDS
|
285
|
-
textEmbeddingModel(modelId: EMBEDDING_MODEL_IDS
|
286
|
-
imageModel(modelId: IMAGE_MODEL_IDS
|
287
|
+
(modelId: CHAT_MODEL_IDS): LanguageModelV2;
|
288
|
+
languageModel(modelId: CHAT_MODEL_IDS): LanguageModelV2;
|
289
|
+
chatModel(modelId: CHAT_MODEL_IDS): LanguageModelV2;
|
290
|
+
completionModel(modelId: COMPLETION_MODEL_IDS): LanguageModelV2;
|
291
|
+
textEmbeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV2<string>;
|
292
|
+
imageModel(modelId: IMAGE_MODEL_IDS): ImageModelV1;
|
287
293
|
}
|
288
294
|
interface OpenAICompatibleProviderSettings {
|
289
295
|
/**
|
@@ -320,4 +326,4 @@ Create an OpenAICompatible provider instance.
|
|
320
326
|
*/
|
321
327
|
declare function createOpenAICompatible<CHAT_MODEL_IDS extends string, COMPLETION_MODEL_IDS extends string, EMBEDDING_MODEL_IDS extends string, IMAGE_MODEL_IDS extends string>(options: OpenAICompatibleProviderSettings): OpenAICompatibleProvider<CHAT_MODEL_IDS, COMPLETION_MODEL_IDS, EMBEDDING_MODEL_IDS, IMAGE_MODEL_IDS>;
|
322
328
|
|
323
|
-
export { type MetadataExtractor, OpenAICompatibleChatLanguageModel, type
|
329
|
+
export { type MetadataExtractor, OpenAICompatibleChatLanguageModel, type OpenAICompatibleChatModelId, OpenAICompatibleCompletionLanguageModel, type OpenAICompatibleCompletionModelId, type OpenAICompatibleCompletionProviderOptions, OpenAICompatibleEmbeddingModel, type OpenAICompatibleEmbeddingModelId, type OpenAICompatibleEmbeddingProviderOptions, type OpenAICompatibleErrorData, OpenAICompatibleImageModel, type OpenAICompatibleImageSettings, type OpenAICompatibleProvider, type OpenAICompatibleProviderOptions, type OpenAICompatibleProviderSettings, type ProviderErrorStructure, createOpenAICompatible };
|
package/dist/index.d.ts
CHANGED
@@ -1,22 +1,20 @@
|
|
1
|
-
import {
|
1
|
+
import { SharedV2ProviderMetadata, LanguageModelV2, LanguageModelV2ObjectGenerationMode, EmbeddingModelV2, ImageModelV1, ProviderV2 } from '@ai-sdk/provider';
|
2
2
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
3
3
|
import { z, ZodSchema } from 'zod';
|
4
4
|
|
5
5
|
type OpenAICompatibleChatModelId = string;
|
6
|
-
|
6
|
+
declare const openaiCompatibleProviderOptions: z.ZodObject<{
|
7
7
|
/**
|
8
|
-
|
9
|
-
|
10
|
-
*/
|
11
|
-
user?: string;
|
12
|
-
/**
|
13
|
-
Simulates streaming by using a normal generate call and returning it as a stream.
|
14
|
-
Enable this if the model that you are using does not support streaming.
|
15
|
-
|
16
|
-
Defaults to `false`.
|
8
|
+
* A unique identifier representing your end-user, which can help the provider to
|
9
|
+
* monitor and detect abuse.
|
17
10
|
*/
|
18
|
-
|
19
|
-
}
|
11
|
+
user: z.ZodOptional<z.ZodString>;
|
12
|
+
}, "strip", z.ZodTypeAny, {
|
13
|
+
user?: string | undefined;
|
14
|
+
}, {
|
15
|
+
user?: string | undefined;
|
16
|
+
}>;
|
17
|
+
type OpenAICompatibleProviderOptions = z.infer<typeof openaiCompatibleProviderOptions>;
|
20
18
|
|
21
19
|
declare const openaiCompatibleErrorDataSchema: z.ZodObject<{
|
22
20
|
error: z.ZodObject<{
|
@@ -73,7 +71,7 @@ type MetadataExtractor = {
|
|
73
71
|
*/
|
74
72
|
extractMetadata: ({ parsedBody, }: {
|
75
73
|
parsedBody: unknown;
|
76
|
-
}) =>
|
74
|
+
}) => SharedV2ProviderMetadata | undefined;
|
77
75
|
/**
|
78
76
|
* Creates an extractor for handling streaming responses. The returned object provides
|
79
77
|
* methods to process individual chunks and build the final metadata from the accumulated
|
@@ -96,7 +94,7 @@ type MetadataExtractor = {
|
|
96
94
|
* @returns Provider-specific metadata or undefined if no metadata is available.
|
97
95
|
* The metadata should be under a key indicating the provider id.
|
98
96
|
*/
|
99
|
-
buildMetadata():
|
97
|
+
buildMetadata(): SharedV2ProviderMetadata | undefined;
|
100
98
|
};
|
101
99
|
};
|
102
100
|
|
@@ -125,11 +123,10 @@ declare class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
|
125
123
|
readonly specificationVersion = "v2";
|
126
124
|
readonly supportsStructuredOutputs: boolean;
|
127
125
|
readonly modelId: OpenAICompatibleChatModelId;
|
128
|
-
readonly settings: OpenAICompatibleChatSettings;
|
129
126
|
private readonly config;
|
130
127
|
private readonly failedResponseHandler;
|
131
128
|
private readonly chunkSchema;
|
132
|
-
constructor(modelId: OpenAICompatibleChatModelId,
|
129
|
+
constructor(modelId: OpenAICompatibleChatModelId, config: OpenAICompatibleChatConfig);
|
133
130
|
get defaultObjectGenerationMode(): 'json' | 'tool' | undefined;
|
134
131
|
get provider(): string;
|
135
132
|
private get providerOptionsName();
|
@@ -139,36 +136,39 @@ declare class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
|
139
136
|
}
|
140
137
|
|
141
138
|
type OpenAICompatibleCompletionModelId = string;
|
142
|
-
|
139
|
+
declare const openaiCompatibleCompletionProviderOptions: z.ZodObject<{
|
143
140
|
/**
|
144
|
-
|
141
|
+
* Echo back the prompt in addition to the completion.
|
145
142
|
*/
|
146
|
-
echo
|
143
|
+
echo: z.ZodOptional<z.ZodBoolean>;
|
147
144
|
/**
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
153
|
-
the bias is added to the logits generated by the model prior to sampling.
|
154
|
-
The exact effect will vary per model, but values between -1 and 1 should
|
155
|
-
decrease or increase likelihood of selection; values like -100 or 100
|
156
|
-
should result in a ban or exclusive selection of the relevant token.
|
157
|
-
|
158
|
-
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
159
|
-
token from being generated.
|
145
|
+
* Modify the likelihood of specified tokens appearing in the completion.
|
146
|
+
*
|
147
|
+
* Accepts a JSON object that maps tokens (specified by their token ID in
|
148
|
+
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
160
149
|
*/
|
161
|
-
logitBias
|
150
|
+
logitBias: z.ZodOptional<z.ZodRecord<z.ZodNumber, z.ZodNumber>>;
|
162
151
|
/**
|
163
|
-
|
152
|
+
* The suffix that comes after a completion of inserted text.
|
164
153
|
*/
|
165
|
-
suffix
|
154
|
+
suffix: z.ZodOptional<z.ZodString>;
|
166
155
|
/**
|
167
|
-
|
168
|
-
|
156
|
+
* A unique identifier representing your end-user, which can help providers to
|
157
|
+
* monitor and detect abuse.
|
169
158
|
*/
|
170
|
-
user
|
171
|
-
}
|
159
|
+
user: z.ZodOptional<z.ZodString>;
|
160
|
+
}, "strip", z.ZodTypeAny, {
|
161
|
+
user?: string | undefined;
|
162
|
+
echo?: boolean | undefined;
|
163
|
+
logitBias?: Record<number, number> | undefined;
|
164
|
+
suffix?: string | undefined;
|
165
|
+
}, {
|
166
|
+
user?: string | undefined;
|
167
|
+
echo?: boolean | undefined;
|
168
|
+
logitBias?: Record<number, number> | undefined;
|
169
|
+
suffix?: string | undefined;
|
170
|
+
}>;
|
171
|
+
type OpenAICompatibleCompletionProviderOptions = z.infer<typeof openaiCompatibleCompletionProviderOptions>;
|
172
172
|
|
173
173
|
type OpenAICompatibleCompletionConfig = {
|
174
174
|
provider: string;
|
@@ -184,11 +184,10 @@ declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV2
|
|
184
184
|
readonly specificationVersion = "v2";
|
185
185
|
readonly defaultObjectGenerationMode: undefined;
|
186
186
|
readonly modelId: OpenAICompatibleCompletionModelId;
|
187
|
-
readonly settings: OpenAICompatibleCompletionSettings;
|
188
187
|
private readonly config;
|
189
188
|
private readonly failedResponseHandler;
|
190
189
|
private readonly chunkSchema;
|
191
|
-
constructor(modelId: OpenAICompatibleCompletionModelId,
|
190
|
+
constructor(modelId: OpenAICompatibleCompletionModelId, config: OpenAICompatibleCompletionConfig);
|
192
191
|
get provider(): string;
|
193
192
|
private get providerOptionsName();
|
194
193
|
private getArgs;
|
@@ -197,18 +196,25 @@ declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV2
|
|
197
196
|
}
|
198
197
|
|
199
198
|
type OpenAICompatibleEmbeddingModelId = string;
|
200
|
-
|
199
|
+
declare const openaiCompatibleEmbeddingProviderOptions: z.ZodObject<{
|
201
200
|
/**
|
202
|
-
|
203
|
-
|
201
|
+
* The number of dimensions the resulting output embeddings should have.
|
202
|
+
* Only supported in text-embedding-3 and later models.
|
204
203
|
*/
|
205
|
-
dimensions
|
204
|
+
dimensions: z.ZodOptional<z.ZodNumber>;
|
206
205
|
/**
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
user
|
211
|
-
}
|
206
|
+
* A unique identifier representing your end-user, which can help providers to
|
207
|
+
* monitor and detect abuse.
|
208
|
+
*/
|
209
|
+
user: z.ZodOptional<z.ZodString>;
|
210
|
+
}, "strip", z.ZodTypeAny, {
|
211
|
+
user?: string | undefined;
|
212
|
+
dimensions?: number | undefined;
|
213
|
+
}, {
|
214
|
+
user?: string | undefined;
|
215
|
+
dimensions?: number | undefined;
|
216
|
+
}>;
|
217
|
+
type OpenAICompatibleEmbeddingProviderOptions = z.infer<typeof openaiCompatibleEmbeddingProviderOptions>;
|
212
218
|
|
213
219
|
type OpenAICompatibleEmbeddingConfig = {
|
214
220
|
/**
|
@@ -228,16 +234,16 @@ type OpenAICompatibleEmbeddingConfig = {
|
|
228
234
|
fetch?: FetchFunction;
|
229
235
|
errorStructure?: ProviderErrorStructure<any>;
|
230
236
|
};
|
231
|
-
declare class OpenAICompatibleEmbeddingModel implements
|
232
|
-
readonly specificationVersion = "
|
237
|
+
declare class OpenAICompatibleEmbeddingModel implements EmbeddingModelV2<string> {
|
238
|
+
readonly specificationVersion = "v2";
|
233
239
|
readonly modelId: OpenAICompatibleEmbeddingModelId;
|
234
240
|
private readonly config;
|
235
|
-
private readonly settings;
|
236
241
|
get provider(): string;
|
237
242
|
get maxEmbeddingsPerCall(): number;
|
238
243
|
get supportsParallelCalls(): boolean;
|
239
|
-
constructor(modelId: OpenAICompatibleEmbeddingModelId,
|
240
|
-
|
244
|
+
constructor(modelId: OpenAICompatibleEmbeddingModelId, config: OpenAICompatibleEmbeddingConfig);
|
245
|
+
private get providerOptionsName();
|
246
|
+
doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV2<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV2<string>['doEmbed']>>>;
|
241
247
|
}
|
242
248
|
|
243
249
|
type OpenAICompatibleImageModelId = string;
|
@@ -278,12 +284,12 @@ declare class OpenAICompatibleImageModel implements ImageModelV1 {
|
|
278
284
|
}
|
279
285
|
|
280
286
|
interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string> extends Omit<ProviderV2, 'imageModel'> {
|
281
|
-
(modelId: CHAT_MODEL_IDS
|
282
|
-
languageModel(modelId: CHAT_MODEL_IDS
|
283
|
-
chatModel(modelId: CHAT_MODEL_IDS
|
284
|
-
completionModel(modelId: COMPLETION_MODEL_IDS
|
285
|
-
textEmbeddingModel(modelId: EMBEDDING_MODEL_IDS
|
286
|
-
imageModel(modelId: IMAGE_MODEL_IDS
|
287
|
+
(modelId: CHAT_MODEL_IDS): LanguageModelV2;
|
288
|
+
languageModel(modelId: CHAT_MODEL_IDS): LanguageModelV2;
|
289
|
+
chatModel(modelId: CHAT_MODEL_IDS): LanguageModelV2;
|
290
|
+
completionModel(modelId: COMPLETION_MODEL_IDS): LanguageModelV2;
|
291
|
+
textEmbeddingModel(modelId: EMBEDDING_MODEL_IDS): EmbeddingModelV2<string>;
|
292
|
+
imageModel(modelId: IMAGE_MODEL_IDS): ImageModelV1;
|
287
293
|
}
|
288
294
|
interface OpenAICompatibleProviderSettings {
|
289
295
|
/**
|
@@ -320,4 +326,4 @@ Create an OpenAICompatible provider instance.
|
|
320
326
|
*/
|
321
327
|
declare function createOpenAICompatible<CHAT_MODEL_IDS extends string, COMPLETION_MODEL_IDS extends string, EMBEDDING_MODEL_IDS extends string, IMAGE_MODEL_IDS extends string>(options: OpenAICompatibleProviderSettings): OpenAICompatibleProvider<CHAT_MODEL_IDS, COMPLETION_MODEL_IDS, EMBEDDING_MODEL_IDS, IMAGE_MODEL_IDS>;
|
322
328
|
|
323
|
-
export { type MetadataExtractor, OpenAICompatibleChatLanguageModel, type
|
329
|
+
export { type MetadataExtractor, OpenAICompatibleChatLanguageModel, type OpenAICompatibleChatModelId, OpenAICompatibleCompletionLanguageModel, type OpenAICompatibleCompletionModelId, type OpenAICompatibleCompletionProviderOptions, OpenAICompatibleEmbeddingModel, type OpenAICompatibleEmbeddingModelId, type OpenAICompatibleEmbeddingProviderOptions, type OpenAICompatibleErrorData, OpenAICompatibleImageModel, type OpenAICompatibleImageSettings, type OpenAICompatibleProvider, type OpenAICompatibleProviderOptions, type OpenAICompatibleProviderSettings, type ProviderErrorStructure, createOpenAICompatible };
|