@ai-sdk/openai-compatible 3.0.0-beta.9 → 3.0.0-canary.38

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/CHANGELOG.md +251 -0
  2. package/README.md +2 -0
  3. package/dist/index.d.ts +69 -8
  4. package/dist/index.js +546 -427
  5. package/dist/index.js.map +1 -1
  6. package/dist/internal/index.d.ts +20 -2
  7. package/dist/internal/index.js +91 -91
  8. package/dist/internal/index.js.map +1 -1
  9. package/package.json +13 -12
  10. package/src/chat/convert-openai-compatible-chat-usage.ts +1 -1
  11. package/src/chat/convert-to-openai-compatible-chat-messages.ts +94 -73
  12. package/src/chat/map-openai-compatible-finish-reason.ts +1 -1
  13. package/src/chat/openai-compatible-api-types.ts +1 -1
  14. package/src/chat/openai-compatible-chat-language-model.ts +197 -189
  15. package/src/chat/openai-compatible-metadata-extractor.ts +1 -1
  16. package/src/chat/openai-compatible-prepare-tools.ts +2 -3
  17. package/src/completion/convert-openai-compatible-completion-usage.ts +1 -1
  18. package/src/completion/convert-to-openai-compatible-completion-prompt.ts +1 -2
  19. package/src/completion/map-openai-compatible-finish-reason.ts +1 -1
  20. package/src/completion/openai-compatible-completion-language-model.ts +51 -14
  21. package/src/embedding/openai-compatible-embedding-model.ts +36 -10
  22. package/src/image/openai-compatible-image-model.ts +35 -13
  23. package/src/index.ts +3 -3
  24. package/src/openai-compatible-error.ts +1 -2
  25. package/src/openai-compatible-provider.ts +18 -5
  26. package/src/utils/to-camel-case.ts +43 -0
  27. package/dist/index.d.mts +0 -290
  28. package/dist/index.mjs +0 -1744
  29. package/dist/index.mjs.map +0 -1
  30. package/dist/internal/index.d.mts +0 -193
  31. package/dist/internal/index.mjs +0 -340
  32. package/dist/internal/index.mjs.map +0 -1
  33. /package/src/chat/{openai-compatible-chat-options.ts → openai-compatible-chat-language-model-options.ts} +0 -0
  34. /package/src/completion/{openai-compatible-completion-options.ts → openai-compatible-completion-language-model-options.ts} +0 -0
  35. /package/src/embedding/{openai-compatible-embedding-options.ts → openai-compatible-embedding-model-options.ts} +0 -0
package/CHANGELOG.md CHANGED
@@ -1,5 +1,256 @@
1
1
  # @ai-sdk/openai-compatible
2
2
 
3
+ ## 3.0.0-canary.38
4
+
5
+ ### Patch Changes
6
+
7
+ - cd9c311: fix(openai, openai-compatible): only send null content for assistant messages with tool calls
8
+
9
+ ## 3.0.0-canary.37
10
+
11
+ ### Patch Changes
12
+
13
+ - 0c4c275: trigger initial canary release
14
+ - Updated dependencies [0c4c275]
15
+ - @ai-sdk/provider-utils@5.0.0-canary.31
16
+ - @ai-sdk/provider@4.0.0-canary.15
17
+
18
+ ## 3.0.0-beta.36
19
+
20
+ ### Patch Changes
21
+
22
+ - e59c955: feat(vertex): add grok models to vertex provider
23
+
24
+ ## 3.0.0-beta.35
25
+
26
+ ### Major Changes
27
+
28
+ - 04e9009: chore: make provider implementations code patterns more consistent, including renaming certain exported symbols
29
+
30
+ For all externally exported symbols that were renamed, the old names continue to work via deprecated aliases.
31
+
32
+ ### Patch Changes
33
+
34
+ - Updated dependencies [08d2129]
35
+ - @ai-sdk/provider-utils@5.0.0-beta.30
36
+
37
+ ## 3.0.0-beta.34
38
+
39
+ ### Patch Changes
40
+
41
+ - 9bd6512: feat(provider): change file part data property to be tagged with a type and remove the image part type
42
+ - 258c093: chore: ensure consistent import handling and avoid import duplicates or cycles
43
+ - Updated dependencies [9bd6512]
44
+ - Updated dependencies [258c093]
45
+ - Updated dependencies [b6783da]
46
+ - @ai-sdk/provider-utils@5.0.0-beta.29
47
+ - @ai-sdk/provider@4.0.0-beta.14
48
+
49
+ ## 3.0.0-beta.33
50
+
51
+ ### Patch Changes
52
+
53
+ - 9f0e36c: trigger release for all packages after provenance setup
54
+ - Updated dependencies [9f0e36c]
55
+ - @ai-sdk/provider@4.0.0-beta.13
56
+ - @ai-sdk/provider-utils@5.0.0-beta.28
57
+
58
+ ## 3.0.0-beta.32
59
+
60
+ ### Patch Changes
61
+
62
+ - ab81968: fix(openai-compatible): buffer tool call deltas until function.name arrives
63
+ - 58a2ad7: fix: more precise default message for tool execution denial
64
+ - Updated dependencies [785fe16]
65
+ - Updated dependencies [67df0a0]
66
+ - Updated dependencies [befb78c]
67
+ - Updated dependencies [0458559]
68
+ - Updated dependencies [5852c0a]
69
+ - Updated dependencies [fc92055]
70
+ - @ai-sdk/provider-utils@5.0.0-beta.27
71
+
72
+ ## 3.0.0-beta.31
73
+
74
+ ### Patch Changes
75
+
76
+ - bfb756d: patch - send content: null instead of empty string for tool-only assistant messages
77
+ - Updated dependencies [2e98477]
78
+ - @ai-sdk/provider-utils@5.0.0-beta.26
79
+
80
+ ## 3.0.0-beta.30
81
+
82
+ ### Patch Changes
83
+
84
+ - Updated dependencies [eea8d98]
85
+ - @ai-sdk/provider-utils@5.0.0-beta.25
86
+
87
+ ## 3.0.0-beta.29
88
+
89
+ ### Patch Changes
90
+
91
+ - f807e45: Extract shared `StreamingToolCallTracker` class into `@ai-sdk/provider-utils` to deduplicate streaming tool call handling across OpenAI-compatible providers. Also adds missing `generateId()` fallback for `toolCallId` in Alibaba's `doGenerate` path and ensures all providers finalize unfinished tool calls during stream flush.
92
+ - Updated dependencies [f807e45]
93
+ - @ai-sdk/provider-utils@5.0.0-beta.24
94
+
95
+ ## 3.0.0-beta.28
96
+
97
+ ### Patch Changes
98
+
99
+ - Updated dependencies [350ea38]
100
+ - @ai-sdk/provider-utils@5.0.0-beta.23
101
+
102
+ ## 3.0.0-beta.27
103
+
104
+ ### Patch Changes
105
+
106
+ - Updated dependencies [083947b]
107
+ - @ai-sdk/provider-utils@5.0.0-beta.22
108
+
109
+ ## 3.0.0-beta.26
110
+
111
+ ### Patch Changes
112
+
113
+ - Updated dependencies [add1126]
114
+ - @ai-sdk/provider-utils@5.0.0-beta.21
115
+
116
+ ## 3.0.0-beta.25
117
+
118
+ ### Patch Changes
119
+
120
+ - b3976a2: Add workflow serialization support to all provider models.
121
+
122
+ **`@ai-sdk/provider-utils`:** New `serializeModel()` helper that extracts only serializable properties from a model instance, filtering out functions and objects containing functions. Third-party provider authors can use this to add workflow support to their own models.
123
+
124
+ **All providers:** `headers` is now optional in provider config types. This is non-breaking — existing code that passes `headers` continues to work. Custom provider implementations that construct model configs manually can now omit `headers`, which is useful when models are deserialized from a workflow step boundary where auth is provided separately.
125
+
126
+ All provider model classes now include `WORKFLOW_SERIALIZE` and `WORKFLOW_DESERIALIZE` static methods, enabling them to cross workflow step boundaries without serialization errors.
127
+
128
+ - Updated dependencies [b3976a2]
129
+ - Updated dependencies [ff5eba1]
130
+ - @ai-sdk/provider-utils@5.0.0-beta.20
131
+ - @ai-sdk/provider@4.0.0-beta.12
132
+
133
+ ## 3.0.0-beta.24
134
+
135
+ ### Major Changes
136
+
137
+ - ef992f8: Remove CommonJS exports from all packages. All packages are now ESM-only (`"type": "module"`). Consumers using `require()` must switch to ESM `import` syntax.
138
+
139
+ ### Patch Changes
140
+
141
+ - Updated dependencies [ef992f8]
142
+ - @ai-sdk/provider@4.0.0-beta.11
143
+ - @ai-sdk/provider-utils@5.0.0-beta.19
144
+
145
+ ## 3.0.0-beta.23
146
+
147
+ ### Patch Changes
148
+
149
+ - 90e2d8a: chore: fix unused vars not being flagged by our lint tooling
150
+ - Updated dependencies [90e2d8a]
151
+ - @ai-sdk/provider-utils@5.0.0-beta.18
152
+
153
+ ## 3.0.0-beta.22
154
+
155
+ ### Patch Changes
156
+
157
+ - Updated dependencies [3ae1786]
158
+ - @ai-sdk/provider-utils@5.0.0-beta.17
159
+
160
+ ## 3.0.0-beta.21
161
+
162
+ ### Patch Changes
163
+
164
+ - Updated dependencies [176466a]
165
+ - @ai-sdk/provider@4.0.0-beta.10
166
+ - @ai-sdk/provider-utils@5.0.0-beta.16
167
+
168
+ ## 3.0.0-beta.20
169
+
170
+ ### Patch Changes
171
+
172
+ - Updated dependencies [e311194]
173
+ - @ai-sdk/provider@4.0.0-beta.9
174
+ - @ai-sdk/provider-utils@5.0.0-beta.15
175
+
176
+ ## 3.0.0-beta.19
177
+
178
+ ### Patch Changes
179
+
180
+ - 008271d: feat(openai-compatible): emit warning when using kebab-case instead of camelCase
181
+ - Updated dependencies [34bd95d]
182
+ - Updated dependencies [008271d]
183
+ - @ai-sdk/provider@4.0.0-beta.8
184
+ - @ai-sdk/provider-utils@5.0.0-beta.14
185
+
186
+ ## 3.0.0-beta.18
187
+
188
+ ### Patch Changes
189
+
190
+ - Updated dependencies [b0c2869]
191
+ - Updated dependencies [7e26e81]
192
+ - @ai-sdk/provider-utils@5.0.0-beta.13
193
+
194
+ ## 3.0.0-beta.17
195
+
196
+ ### Patch Changes
197
+
198
+ - 816ff67: fix(openai-compatible): honor camelCase providerOptions key in chat and completion models
199
+
200
+ ## 3.0.0-beta.16
201
+
202
+ ### Patch Changes
203
+
204
+ - Updated dependencies [46d1149]
205
+ - @ai-sdk/provider-utils@5.0.0-beta.12
206
+
207
+ ## 3.0.0-beta.15
208
+
209
+ ### Patch Changes
210
+
211
+ - 6fd51c0: fix(provider): preserve error type prefix in getErrorMessage
212
+ - Updated dependencies [6fd51c0]
213
+ - @ai-sdk/provider-utils@5.0.0-beta.11
214
+ - @ai-sdk/provider@4.0.0-beta.7
215
+
216
+ ## 3.0.0-beta.14
217
+
218
+ ### Patch Changes
219
+
220
+ - c29a26f: feat(provider): add support for provider references and uploading files as supported per provider
221
+ - Updated dependencies [c29a26f]
222
+ - @ai-sdk/provider-utils@5.0.0-beta.10
223
+ - @ai-sdk/provider@4.0.0-beta.6
224
+
225
+ ## 3.0.0-beta.13
226
+
227
+ ### Patch Changes
228
+
229
+ - 38fc777: Add AI Gateway hint to provider READMEs
230
+
231
+ ## 3.0.0-beta.12
232
+
233
+ ### Patch Changes
234
+
235
+ - Updated dependencies [2e17091]
236
+ - @ai-sdk/provider-utils@5.0.0-beta.9
237
+
238
+ ## 3.0.0-beta.11
239
+
240
+ ### Patch Changes
241
+
242
+ - Updated dependencies [986c6fd]
243
+ - Updated dependencies [493295c]
244
+ - @ai-sdk/provider-utils@5.0.0-beta.8
245
+
246
+ ## 3.0.0-beta.10
247
+
248
+ ### Patch Changes
249
+
250
+ - Updated dependencies [1f509d4]
251
+ - @ai-sdk/provider-utils@5.0.0-beta.7
252
+ - @ai-sdk/provider@4.0.0-beta.5
253
+
3
254
  ## 3.0.0-beta.9
4
255
 
5
256
  ### Patch Changes
package/README.md CHANGED
@@ -4,6 +4,8 @@ This package provides a foundation for implementing providers that expose an Ope
4
4
 
5
5
  The primary [OpenAI provider](../openai/README.md) is more feature-rich, including OpenAI-specific experimental and legacy features. This package offers a lighter-weight alternative focused on core OpenAI-compatible functionality.
6
6
 
7
+ > **Deploying to Vercel?** With Vercel's AI Gateway you can access hundreds of models from any provider — no additional packages, API keys, or extra cost. [Get started with AI Gateway](https://vercel.com/ai-gateway).
8
+
7
9
  ## Setup
8
10
 
9
11
  The provider is available in the `@ai-sdk/openai-compatible` module. You can install it with
package/dist/index.d.ts CHANGED
@@ -1,5 +1,6 @@
1
- import { SharedV4ProviderMetadata, LanguageModelV4, LanguageModelV4CallOptions, LanguageModelV4GenerateResult, LanguageModelV4StreamResult, EmbeddingModelV4, ImageModelV4, ProviderV4 } from '@ai-sdk/provider';
2
- import { FetchFunction } from '@ai-sdk/provider-utils';
1
+ import * as _ai_sdk_provider from '@ai-sdk/provider';
2
+ import { SharedV4ProviderMetadata, LanguageModelV4, LanguageModelV4Usage, LanguageModelV4CallOptions, LanguageModelV4GenerateResult, LanguageModelV4StreamResult, EmbeddingModelV4, ImageModelV4, ProviderV4 } from '@ai-sdk/provider';
3
+ import { FetchFunction, WORKFLOW_SERIALIZE, WORKFLOW_DESERIALIZE } from '@ai-sdk/provider-utils';
3
4
  import { ZodType, z } from 'zod/v4';
4
5
 
5
6
  declare const openaiCompatibleErrorDataSchema: z.ZodObject<{
@@ -71,7 +72,7 @@ type MetadataExtractor = {
71
72
 
72
73
  type OpenAICompatibleChatConfig = {
73
74
  provider: string;
74
- headers: () => Record<string, string | undefined>;
75
+ headers?: () => Record<string, string | undefined>;
75
76
  url: (options: {
76
77
  modelId: string;
77
78
  path: string;
@@ -94,23 +95,50 @@ type OpenAICompatibleChatConfig = {
94
95
  * than the official OpenAI API.
95
96
  */
96
97
  transformRequestBody?: (args: Record<string, any>) => Record<string, any>;
98
+ /**
99
+ * Optional usage converter for OpenAI-compatible providers with different
100
+ * token accounting semantics.
101
+ */
102
+ convertUsage?: (usage: z.infer<typeof openaiCompatibleTokenUsageSchema>) => LanguageModelV4Usage;
97
103
  };
98
104
  declare class OpenAICompatibleChatLanguageModel implements LanguageModelV4 {
99
105
  readonly specificationVersion = "v4";
100
106
  readonly supportsStructuredOutputs: boolean;
101
107
  readonly modelId: OpenAICompatibleChatModelId;
102
- private readonly config;
108
+ protected readonly config: OpenAICompatibleChatConfig;
103
109
  private readonly failedResponseHandler;
104
110
  private readonly chunkSchema;
111
+ static [WORKFLOW_SERIALIZE](model: OpenAICompatibleChatLanguageModel): {
112
+ modelId: string;
113
+ config: _ai_sdk_provider.JSONObject;
114
+ };
115
+ static [WORKFLOW_DESERIALIZE](options: {
116
+ modelId: string;
117
+ config: OpenAICompatibleChatConfig;
118
+ }): OpenAICompatibleChatLanguageModel;
105
119
  constructor(modelId: OpenAICompatibleChatModelId, config: OpenAICompatibleChatConfig);
106
120
  get provider(): string;
107
121
  private get providerOptionsName();
108
122
  get supportedUrls(): Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>>;
109
123
  private transformRequestBody;
124
+ private convertUsage;
110
125
  private getArgs;
111
126
  doGenerate(options: LanguageModelV4CallOptions): Promise<LanguageModelV4GenerateResult>;
112
127
  doStream(options: LanguageModelV4CallOptions): Promise<LanguageModelV4StreamResult>;
113
128
  }
129
+ declare const openaiCompatibleTokenUsageSchema: z.ZodOptional<z.ZodNullable<z.ZodObject<{
130
+ prompt_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
131
+ completion_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
132
+ total_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
133
+ prompt_tokens_details: z.ZodOptional<z.ZodNullable<z.ZodObject<{
134
+ cached_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
135
+ }, z.core.$strip>>>;
136
+ completion_tokens_details: z.ZodOptional<z.ZodNullable<z.ZodObject<{
137
+ reasoning_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
138
+ accepted_prediction_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
139
+ rejected_prediction_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
140
+ }, z.core.$strip>>>;
141
+ }, z.core.$loose>>>;
114
142
 
115
143
  type OpenAICompatibleCompletionModelId = string;
116
144
  declare const openaiCompatibleLanguageModelCompletionOptions: z.ZodObject<{
@@ -124,7 +152,7 @@ type OpenAICompatibleLanguageModelCompletionOptions = z.infer<typeof openaiCompa
124
152
  type OpenAICompatibleCompletionConfig = {
125
153
  provider: string;
126
154
  includeUsage?: boolean;
127
- headers: () => Record<string, string | undefined>;
155
+ headers?: () => Record<string, string | undefined>;
128
156
  url: (options: {
129
157
  modelId: string;
130
158
  path: string;
@@ -142,6 +170,14 @@ declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV4
142
170
  private readonly config;
143
171
  private readonly failedResponseHandler;
144
172
  private readonly chunkSchema;
173
+ static [WORKFLOW_SERIALIZE](model: OpenAICompatibleCompletionLanguageModel): {
174
+ modelId: string;
175
+ config: _ai_sdk_provider.JSONObject;
176
+ };
177
+ static [WORKFLOW_DESERIALIZE](options: {
178
+ modelId: string;
179
+ config: OpenAICompatibleCompletionConfig;
180
+ }): OpenAICompatibleCompletionLanguageModel;
145
181
  constructor(modelId: OpenAICompatibleCompletionModelId, config: OpenAICompatibleCompletionConfig);
146
182
  get provider(): string;
147
183
  private get providerOptionsName();
@@ -172,7 +208,7 @@ type OpenAICompatibleEmbeddingConfig = {
172
208
  modelId: string;
173
209
  path: string;
174
210
  }) => string;
175
- headers: () => Record<string, string | undefined>;
211
+ headers?: () => Record<string, string | undefined>;
176
212
  fetch?: FetchFunction;
177
213
  errorStructure?: ProviderErrorStructure<any>;
178
214
  };
@@ -183,6 +219,14 @@ declare class OpenAICompatibleEmbeddingModel implements EmbeddingModelV4 {
183
219
  get provider(): string;
184
220
  get maxEmbeddingsPerCall(): number;
185
221
  get supportsParallelCalls(): boolean;
222
+ static [WORKFLOW_SERIALIZE](model: OpenAICompatibleEmbeddingModel): {
223
+ modelId: string;
224
+ config: _ai_sdk_provider.JSONObject;
225
+ };
226
+ static [WORKFLOW_DESERIALIZE](options: {
227
+ modelId: string;
228
+ config: OpenAICompatibleEmbeddingConfig;
229
+ }): OpenAICompatibleEmbeddingModel;
186
230
  constructor(modelId: OpenAICompatibleEmbeddingModelId, config: OpenAICompatibleEmbeddingConfig);
187
231
  private get providerOptionsName();
188
232
  doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV4['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV4['doEmbed']>>>;
@@ -192,7 +236,7 @@ type OpenAICompatibleImageModelId = string;
192
236
 
193
237
  type OpenAICompatibleImageModelConfig = {
194
238
  provider: string;
195
- headers: () => Record<string, string | undefined>;
239
+ headers?: () => Record<string, string | undefined>;
196
240
  url: (options: {
197
241
  modelId: string;
198
242
  path: string;
@@ -213,12 +257,20 @@ declare class OpenAICompatibleImageModel implements ImageModelV4 {
213
257
  * The provider options key used to extract provider-specific options.
214
258
  */
215
259
  private get providerOptionsKey();
260
+ static [WORKFLOW_SERIALIZE](model: OpenAICompatibleImageModel): {
261
+ modelId: string;
262
+ config: _ai_sdk_provider.JSONObject;
263
+ };
264
+ static [WORKFLOW_DESERIALIZE](options: {
265
+ modelId: string;
266
+ config: OpenAICompatibleImageModelConfig;
267
+ }): OpenAICompatibleImageModel;
216
268
  constructor(modelId: OpenAICompatibleImageModelId, config: OpenAICompatibleImageModelConfig);
217
269
  private getArgs;
218
270
  doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, files, mask, }: Parameters<ImageModelV4['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV4['doGenerate']>>>;
219
271
  }
220
272
 
221
- interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string> extends Omit<ProviderV4, 'imageModel'> {
273
+ interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string> extends ProviderV4 {
222
274
  (modelId: CHAT_MODEL_IDS): LanguageModelV4;
223
275
  languageModel(modelId: CHAT_MODEL_IDS, config?: Partial<OpenAICompatibleChatConfig>): LanguageModelV4;
224
276
  chatModel(modelId: CHAT_MODEL_IDS): LanguageModelV4;
@@ -279,6 +331,15 @@ interface OpenAICompatibleProviderSettings {
279
331
  * or provider-specific metrics from both streaming and non-streaming responses.
280
332
  */
281
333
  metadataExtractor?: MetadataExtractor;
334
+ /**
335
+ * The supported URLs for chat models.
336
+ */
337
+ supportedUrls?: OpenAICompatibleChatConfig['supportedUrls'];
338
+ /**
339
+ * Optional usage converter for providers with token accounting semantics that
340
+ * differ from the default OpenAI-compatible shape.
341
+ */
342
+ convertUsage?: OpenAICompatibleChatConfig['convertUsage'];
282
343
  }
283
344
  /**
284
345
  * Create an OpenAICompatible provider instance.