@ai-sdk/openai-compatible 2.0.44 → 2.0.46
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +29 -1
- package/dist/index.d.ts +29 -1
- package/dist/index.js +12 -5
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +12 -5
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +19 -1
- package/dist/internal/index.d.ts +19 -1
- package/dist/internal/index.js +1 -1
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +1 -1
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
- package/src/chat/convert-to-openai-compatible-chat-messages.ts +1 -1
- package/src/chat/openai-compatible-chat-language-model.ts +23 -2
- package/src/openai-compatible-provider.ts +13 -0
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,17 @@
|
|
|
1
1
|
# @ai-sdk/openai-compatible
|
|
2
2
|
|
|
3
|
+
## 2.0.46
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- 38966ab: fix(openai, openai-compatible): only send null content for assistant messages with tool calls
|
|
8
|
+
|
|
9
|
+
## 2.0.45
|
|
10
|
+
|
|
11
|
+
### Patch Changes
|
|
12
|
+
|
|
13
|
+
- 6043d24: feat(vertex): add grok models to vertex provider
|
|
14
|
+
|
|
3
15
|
## 2.0.44
|
|
4
16
|
|
|
5
17
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { SharedV3ProviderMetadata, LanguageModelV3, LanguageModelV3CallOptions, LanguageModelV3GenerateResult, LanguageModelV3StreamResult, EmbeddingModelV3, ImageModelV3, ProviderV3 } from '@ai-sdk/provider';
|
|
1
|
+
import { SharedV3ProviderMetadata, LanguageModelV3, LanguageModelV3Usage, LanguageModelV3CallOptions, LanguageModelV3GenerateResult, LanguageModelV3StreamResult, EmbeddingModelV3, ImageModelV3, ProviderV3 } from '@ai-sdk/provider';
|
|
2
2
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
3
3
|
import { ZodType, z } from 'zod/v4';
|
|
4
4
|
|
|
@@ -94,6 +94,11 @@ type OpenAICompatibleChatConfig = {
|
|
|
94
94
|
* than the official OpenAI API.
|
|
95
95
|
*/
|
|
96
96
|
transformRequestBody?: (args: Record<string, any>) => Record<string, any>;
|
|
97
|
+
/**
|
|
98
|
+
* Optional usage converter for OpenAI-compatible providers with different
|
|
99
|
+
* token accounting semantics.
|
|
100
|
+
*/
|
|
101
|
+
convertUsage?: (usage: z.infer<typeof openaiCompatibleTokenUsageSchema>) => LanguageModelV3Usage;
|
|
97
102
|
};
|
|
98
103
|
declare class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
|
|
99
104
|
readonly specificationVersion = "v3";
|
|
@@ -107,10 +112,24 @@ declare class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
|
|
|
107
112
|
private get providerOptionsName();
|
|
108
113
|
get supportedUrls(): Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>>;
|
|
109
114
|
private transformRequestBody;
|
|
115
|
+
private convertUsage;
|
|
110
116
|
private getArgs;
|
|
111
117
|
doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
|
|
112
118
|
doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
|
|
113
119
|
}
|
|
120
|
+
declare const openaiCompatibleTokenUsageSchema: z.ZodOptional<z.ZodNullable<z.ZodObject<{
|
|
121
|
+
prompt_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
|
|
122
|
+
completion_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
|
|
123
|
+
total_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
|
|
124
|
+
prompt_tokens_details: z.ZodOptional<z.ZodNullable<z.ZodObject<{
|
|
125
|
+
cached_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
|
|
126
|
+
}, z.core.$strip>>>;
|
|
127
|
+
completion_tokens_details: z.ZodOptional<z.ZodNullable<z.ZodObject<{
|
|
128
|
+
reasoning_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
|
|
129
|
+
accepted_prediction_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
|
|
130
|
+
rejected_prediction_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
|
|
131
|
+
}, z.core.$strip>>>;
|
|
132
|
+
}, z.core.$loose>>>;
|
|
114
133
|
|
|
115
134
|
type OpenAICompatibleCompletionModelId = string;
|
|
116
135
|
declare const openaiCompatibleLanguageModelCompletionOptions: z.ZodObject<{
|
|
@@ -279,6 +298,15 @@ interface OpenAICompatibleProviderSettings {
|
|
|
279
298
|
* or provider-specific metrics from both streaming and non-streaming responses.
|
|
280
299
|
*/
|
|
281
300
|
metadataExtractor?: MetadataExtractor;
|
|
301
|
+
/**
|
|
302
|
+
* The supported URLs for chat models.
|
|
303
|
+
*/
|
|
304
|
+
supportedUrls?: OpenAICompatibleChatConfig['supportedUrls'];
|
|
305
|
+
/**
|
|
306
|
+
* Optional usage converter for providers with token accounting semantics that
|
|
307
|
+
* differ from the default OpenAI-compatible shape.
|
|
308
|
+
*/
|
|
309
|
+
convertUsage?: OpenAICompatibleChatConfig['convertUsage'];
|
|
282
310
|
}
|
|
283
311
|
/**
|
|
284
312
|
* Create an OpenAICompatible provider instance.
|
package/dist/index.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { SharedV3ProviderMetadata, LanguageModelV3, LanguageModelV3CallOptions, LanguageModelV3GenerateResult, LanguageModelV3StreamResult, EmbeddingModelV3, ImageModelV3, ProviderV3 } from '@ai-sdk/provider';
|
|
1
|
+
import { SharedV3ProviderMetadata, LanguageModelV3, LanguageModelV3Usage, LanguageModelV3CallOptions, LanguageModelV3GenerateResult, LanguageModelV3StreamResult, EmbeddingModelV3, ImageModelV3, ProviderV3 } from '@ai-sdk/provider';
|
|
2
2
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
3
3
|
import { ZodType, z } from 'zod/v4';
|
|
4
4
|
|
|
@@ -94,6 +94,11 @@ type OpenAICompatibleChatConfig = {
|
|
|
94
94
|
* than the official OpenAI API.
|
|
95
95
|
*/
|
|
96
96
|
transformRequestBody?: (args: Record<string, any>) => Record<string, any>;
|
|
97
|
+
/**
|
|
98
|
+
* Optional usage converter for OpenAI-compatible providers with different
|
|
99
|
+
* token accounting semantics.
|
|
100
|
+
*/
|
|
101
|
+
convertUsage?: (usage: z.infer<typeof openaiCompatibleTokenUsageSchema>) => LanguageModelV3Usage;
|
|
97
102
|
};
|
|
98
103
|
declare class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
|
|
99
104
|
readonly specificationVersion = "v3";
|
|
@@ -107,10 +112,24 @@ declare class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
|
|
|
107
112
|
private get providerOptionsName();
|
|
108
113
|
get supportedUrls(): Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>>;
|
|
109
114
|
private transformRequestBody;
|
|
115
|
+
private convertUsage;
|
|
110
116
|
private getArgs;
|
|
111
117
|
doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
|
|
112
118
|
doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
|
|
113
119
|
}
|
|
120
|
+
declare const openaiCompatibleTokenUsageSchema: z.ZodOptional<z.ZodNullable<z.ZodObject<{
|
|
121
|
+
prompt_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
|
|
122
|
+
completion_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
|
|
123
|
+
total_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
|
|
124
|
+
prompt_tokens_details: z.ZodOptional<z.ZodNullable<z.ZodObject<{
|
|
125
|
+
cached_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
|
|
126
|
+
}, z.core.$strip>>>;
|
|
127
|
+
completion_tokens_details: z.ZodOptional<z.ZodNullable<z.ZodObject<{
|
|
128
|
+
reasoning_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
|
|
129
|
+
accepted_prediction_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
|
|
130
|
+
rejected_prediction_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
|
|
131
|
+
}, z.core.$strip>>>;
|
|
132
|
+
}, z.core.$loose>>>;
|
|
114
133
|
|
|
115
134
|
type OpenAICompatibleCompletionModelId = string;
|
|
116
135
|
declare const openaiCompatibleLanguageModelCompletionOptions: z.ZodObject<{
|
|
@@ -279,6 +298,15 @@ interface OpenAICompatibleProviderSettings {
|
|
|
279
298
|
* or provider-specific metrics from both streaming and non-streaming responses.
|
|
280
299
|
*/
|
|
281
300
|
metadataExtractor?: MetadataExtractor;
|
|
301
|
+
/**
|
|
302
|
+
* The supported URLs for chat models.
|
|
303
|
+
*/
|
|
304
|
+
supportedUrls?: OpenAICompatibleChatConfig['supportedUrls'];
|
|
305
|
+
/**
|
|
306
|
+
* Optional usage converter for providers with token accounting semantics that
|
|
307
|
+
* differ from the default OpenAI-compatible shape.
|
|
308
|
+
*/
|
|
309
|
+
convertUsage?: OpenAICompatibleChatConfig['convertUsage'];
|
|
282
310
|
}
|
|
283
311
|
/**
|
|
284
312
|
* Create an OpenAICompatible provider instance.
|
package/dist/index.js
CHANGED
|
@@ -256,7 +256,7 @@ function convertToOpenAICompatibleChatMessages(prompt) {
|
|
|
256
256
|
}
|
|
257
257
|
messages.push({
|
|
258
258
|
role: "assistant",
|
|
259
|
-
content: text || null,
|
|
259
|
+
content: toolCalls.length > 0 ? text || null : text,
|
|
260
260
|
...reasoning.length > 0 ? { reasoning_content: reasoning } : {},
|
|
261
261
|
tool_calls: toolCalls.length > 0 ? toolCalls : void 0,
|
|
262
262
|
...metadata
|
|
@@ -445,6 +445,10 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
|
445
445
|
var _a, _b, _c;
|
|
446
446
|
return (_c = (_b = (_a = this.config).transformRequestBody) == null ? void 0 : _b.call(_a, args)) != null ? _c : args;
|
|
447
447
|
}
|
|
448
|
+
convertUsage(usage) {
|
|
449
|
+
var _a, _b, _c;
|
|
450
|
+
return (_c = (_b = (_a = this.config).convertUsage) == null ? void 0 : _b.call(_a, usage)) != null ? _c : convertOpenAICompatibleChatUsage(usage);
|
|
451
|
+
}
|
|
448
452
|
async getArgs({
|
|
449
453
|
prompt,
|
|
450
454
|
maxOutputTokens,
|
|
@@ -630,7 +634,7 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
|
630
634
|
unified: mapOpenAICompatibleFinishReason(choice.finish_reason),
|
|
631
635
|
raw: (_h = choice.finish_reason) != null ? _h : void 0
|
|
632
636
|
},
|
|
633
|
-
usage:
|
|
637
|
+
usage: this.convertUsage(responseBody.usage),
|
|
634
638
|
providerMetadata,
|
|
635
639
|
request: { body },
|
|
636
640
|
response: {
|
|
@@ -675,6 +679,7 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
|
675
679
|
const providerOptionsName = metadataKey;
|
|
676
680
|
let isActiveReasoning = false;
|
|
677
681
|
let isActiveText = false;
|
|
682
|
+
const convertUsage = (usage2) => this.convertUsage(usage2);
|
|
678
683
|
return {
|
|
679
684
|
stream: response.pipeThrough(
|
|
680
685
|
new TransformStream({
|
|
@@ -902,7 +907,7 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
|
902
907
|
controller.enqueue({
|
|
903
908
|
type: "finish",
|
|
904
909
|
finishReason,
|
|
905
|
-
usage:
|
|
910
|
+
usage: convertUsage(usage),
|
|
906
911
|
providerMetadata
|
|
907
912
|
});
|
|
908
913
|
}
|
|
@@ -1693,7 +1698,7 @@ async function fileToBlob(file) {
|
|
|
1693
1698
|
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
|
1694
1699
|
|
|
1695
1700
|
// src/version.ts
|
|
1696
|
-
var VERSION = true ? "2.0.
|
|
1701
|
+
var VERSION = true ? "2.0.46" : "0.0.0-test";
|
|
1697
1702
|
|
|
1698
1703
|
// src/openai-compatible-provider.ts
|
|
1699
1704
|
function createOpenAICompatible(options) {
|
|
@@ -1721,8 +1726,10 @@ function createOpenAICompatible(options) {
|
|
|
1721
1726
|
...getCommonModelConfig("chat"),
|
|
1722
1727
|
includeUsage: options.includeUsage,
|
|
1723
1728
|
supportsStructuredOutputs: options.supportsStructuredOutputs,
|
|
1729
|
+
supportedUrls: options.supportedUrls,
|
|
1724
1730
|
transformRequestBody: options.transformRequestBody,
|
|
1725
|
-
metadataExtractor: options.metadataExtractor
|
|
1731
|
+
metadataExtractor: options.metadataExtractor,
|
|
1732
|
+
convertUsage: options.convertUsage
|
|
1726
1733
|
});
|
|
1727
1734
|
const createCompletionModel = (modelId) => new OpenAICompatibleCompletionLanguageModel(modelId, {
|
|
1728
1735
|
...getCommonModelConfig("completion"),
|