@ai-sdk/openai-compatible 2.0.43 → 2.0.45
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +13 -0
- package/dist/index.d.mts +29 -1
- package/dist/index.d.ts +29 -1
- package/dist/index.js +11 -4
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +11 -4
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +19 -1
- package/dist/internal/index.d.ts +19 -1
- package/package.json +2 -2
- package/src/chat/openai-compatible-chat-language-model.ts +23 -2
- package/src/openai-compatible-provider.ts +13 -0
package/dist/internal/index.d.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { JSONValue, LanguageModelV3Prompt, LanguageModelV3Usage, LanguageModelV3FinishReason, LanguageModelV3CallOptions, SharedV3Warning, SharedV3ProviderMetadata, LanguageModelV3 } from '@ai-sdk/provider';
|
|
2
2
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
3
|
-
import { ZodType } from 'zod/v4';
|
|
3
|
+
import { ZodType, z } from 'zod/v4';
|
|
4
4
|
|
|
5
5
|
type OpenAICompatibleChatPrompt = Array<OpenAICompatibleMessage>;
|
|
6
6
|
type OpenAICompatibleMessage = OpenAICompatibleSystemMessage | OpenAICompatibleUserMessage | OpenAICompatibleAssistantMessage | OpenAICompatibleToolMessage;
|
|
@@ -188,6 +188,24 @@ type OpenAICompatibleChatConfig = {
|
|
|
188
188
|
* than the official OpenAI API.
|
|
189
189
|
*/
|
|
190
190
|
transformRequestBody?: (args: Record<string, any>) => Record<string, any>;
|
|
191
|
+
/**
|
|
192
|
+
* Optional usage converter for OpenAI-compatible providers with different
|
|
193
|
+
* token accounting semantics.
|
|
194
|
+
*/
|
|
195
|
+
convertUsage?: (usage: z.infer<typeof openaiCompatibleTokenUsageSchema>) => LanguageModelV3Usage;
|
|
191
196
|
};
|
|
197
|
+
declare const openaiCompatibleTokenUsageSchema: z.ZodOptional<z.ZodNullable<z.ZodObject<{
|
|
198
|
+
prompt_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
|
|
199
|
+
completion_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
|
|
200
|
+
total_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
|
|
201
|
+
prompt_tokens_details: z.ZodOptional<z.ZodNullable<z.ZodObject<{
|
|
202
|
+
cached_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
|
|
203
|
+
}, z.core.$strip>>>;
|
|
204
|
+
completion_tokens_details: z.ZodOptional<z.ZodNullable<z.ZodObject<{
|
|
205
|
+
reasoning_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
|
|
206
|
+
accepted_prediction_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
|
|
207
|
+
rejected_prediction_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
|
|
208
|
+
}, z.core.$strip>>>;
|
|
209
|
+
}, z.core.$loose>>>;
|
|
192
210
|
|
|
193
211
|
export { type OpenAICompatibleChatConfig, convertOpenAICompatibleChatUsage, convertToOpenAICompatibleChatMessages, getResponseMetadata, mapOpenAICompatibleFinishReason, prepareTools };
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@ai-sdk/openai-compatible",
|
|
3
|
-
"version": "2.0.
|
|
3
|
+
"version": "2.0.45",
|
|
4
4
|
"license": "Apache-2.0",
|
|
5
5
|
"sideEffects": false,
|
|
6
6
|
"main": "./dist/index.js",
|
|
@@ -37,7 +37,7 @@
|
|
|
37
37
|
},
|
|
38
38
|
"dependencies": {
|
|
39
39
|
"@ai-sdk/provider": "3.0.10",
|
|
40
|
-
"@ai-sdk/provider-utils": "4.0.
|
|
40
|
+
"@ai-sdk/provider-utils": "4.0.26"
|
|
41
41
|
},
|
|
42
42
|
"devDependencies": {
|
|
43
43
|
"@types/node": "20.17.24",
|
|
@@ -8,6 +8,7 @@ import {
|
|
|
8
8
|
type LanguageModelV3GenerateResult,
|
|
9
9
|
type LanguageModelV3StreamPart,
|
|
10
10
|
type LanguageModelV3StreamResult,
|
|
11
|
+
type LanguageModelV3Usage,
|
|
11
12
|
type SharedV3ProviderMetadata,
|
|
12
13
|
type SharedV3Warning,
|
|
13
14
|
} from '@ai-sdk/provider';
|
|
@@ -66,6 +67,14 @@ export type OpenAICompatibleChatConfig = {
|
|
|
66
67
|
* than the official OpenAI API.
|
|
67
68
|
*/
|
|
68
69
|
transformRequestBody?: (args: Record<string, any>) => Record<string, any>;
|
|
70
|
+
|
|
71
|
+
/**
|
|
72
|
+
* Optional usage converter for OpenAI-compatible providers with different
|
|
73
|
+
* token accounting semantics.
|
|
74
|
+
*/
|
|
75
|
+
convertUsage?: (
|
|
76
|
+
usage: z.infer<typeof openaiCompatibleTokenUsageSchema>,
|
|
77
|
+
) => LanguageModelV3Usage;
|
|
69
78
|
};
|
|
70
79
|
|
|
71
80
|
export class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
|
|
@@ -112,6 +121,15 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
|
|
|
112
121
|
return this.config.transformRequestBody?.(args) ?? args;
|
|
113
122
|
}
|
|
114
123
|
|
|
124
|
+
private convertUsage(
|
|
125
|
+
usage: z.infer<typeof openaiCompatibleTokenUsageSchema>,
|
|
126
|
+
): LanguageModelV3Usage {
|
|
127
|
+
return (
|
|
128
|
+
this.config.convertUsage?.(usage) ??
|
|
129
|
+
convertOpenAICompatibleChatUsage(usage)
|
|
130
|
+
);
|
|
131
|
+
}
|
|
132
|
+
|
|
115
133
|
private async getArgs({
|
|
116
134
|
prompt,
|
|
117
135
|
maxOutputTokens,
|
|
@@ -345,7 +363,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
|
|
|
345
363
|
unified: mapOpenAICompatibleFinishReason(choice.finish_reason),
|
|
346
364
|
raw: choice.finish_reason ?? undefined,
|
|
347
365
|
},
|
|
348
|
-
usage:
|
|
366
|
+
usage: this.convertUsage(responseBody.usage),
|
|
349
367
|
providerMetadata,
|
|
350
368
|
request: { body },
|
|
351
369
|
response: {
|
|
@@ -411,6 +429,9 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
|
|
|
411
429
|
const providerOptionsName = metadataKey;
|
|
412
430
|
let isActiveReasoning = false;
|
|
413
431
|
let isActiveText = false;
|
|
432
|
+
const convertUsage = (
|
|
433
|
+
usage: z.infer<typeof openaiCompatibleTokenUsageSchema>,
|
|
434
|
+
) => this.convertUsage(usage);
|
|
414
435
|
|
|
415
436
|
return {
|
|
416
437
|
stream: response.pipeThrough(
|
|
@@ -719,7 +740,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
|
|
|
719
740
|
controller.enqueue({
|
|
720
741
|
type: 'finish',
|
|
721
742
|
finishReason,
|
|
722
|
-
usage:
|
|
743
|
+
usage: convertUsage(usage),
|
|
723
744
|
providerMetadata,
|
|
724
745
|
});
|
|
725
746
|
},
|
|
@@ -104,6 +104,17 @@ export interface OpenAICompatibleProviderSettings {
|
|
|
104
104
|
* or provider-specific metrics from both streaming and non-streaming responses.
|
|
105
105
|
*/
|
|
106
106
|
metadataExtractor?: MetadataExtractor;
|
|
107
|
+
|
|
108
|
+
/**
|
|
109
|
+
* The supported URLs for chat models.
|
|
110
|
+
*/
|
|
111
|
+
supportedUrls?: OpenAICompatibleChatConfig['supportedUrls'];
|
|
112
|
+
|
|
113
|
+
/**
|
|
114
|
+
* Optional usage converter for providers with token accounting semantics that
|
|
115
|
+
* differ from the default OpenAI-compatible shape.
|
|
116
|
+
*/
|
|
117
|
+
convertUsage?: OpenAICompatibleChatConfig['convertUsage'];
|
|
107
118
|
}
|
|
108
119
|
|
|
109
120
|
/**
|
|
@@ -161,8 +172,10 @@ export function createOpenAICompatible<
|
|
|
161
172
|
...getCommonModelConfig('chat'),
|
|
162
173
|
includeUsage: options.includeUsage,
|
|
163
174
|
supportsStructuredOutputs: options.supportsStructuredOutputs,
|
|
175
|
+
supportedUrls: options.supportedUrls,
|
|
164
176
|
transformRequestBody: options.transformRequestBody,
|
|
165
177
|
metadataExtractor: options.metadataExtractor,
|
|
178
|
+
convertUsage: options.convertUsage,
|
|
166
179
|
});
|
|
167
180
|
|
|
168
181
|
const createCompletionModel = (modelId: COMPLETION_MODEL_IDS) =>
|