@ai-sdk/openai-compatible 3.0.0-beta.8 → 3.0.0-canary.37

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/CHANGELOG.md +251 -0
  2. package/README.md +2 -0
  3. package/dist/index.d.ts +69 -8
  4. package/dist/index.js +548 -428
  5. package/dist/index.js.map +1 -1
  6. package/dist/internal/index.d.ts +20 -2
  7. package/dist/internal/index.js +91 -91
  8. package/dist/internal/index.js.map +1 -1
  9. package/package.json +14 -13
  10. package/src/chat/convert-openai-compatible-chat-usage.ts +1 -1
  11. package/src/chat/convert-to-openai-compatible-chat-messages.ts +94 -73
  12. package/src/chat/map-openai-compatible-finish-reason.ts +1 -1
  13. package/src/chat/openai-compatible-api-types.ts +1 -1
  14. package/src/chat/openai-compatible-chat-language-model.ts +204 -190
  15. package/src/chat/openai-compatible-metadata-extractor.ts +1 -1
  16. package/src/chat/openai-compatible-prepare-tools.ts +2 -3
  17. package/src/completion/convert-openai-compatible-completion-usage.ts +1 -1
  18. package/src/completion/convert-to-openai-compatible-completion-prompt.ts +1 -2
  19. package/src/completion/map-openai-compatible-finish-reason.ts +1 -1
  20. package/src/completion/openai-compatible-completion-language-model.ts +51 -14
  21. package/src/embedding/openai-compatible-embedding-model.ts +36 -10
  22. package/src/image/openai-compatible-image-model.ts +35 -13
  23. package/src/index.ts +3 -3
  24. package/src/openai-compatible-error.ts +1 -2
  25. package/src/openai-compatible-provider.ts +18 -5
  26. package/src/utils/to-camel-case.ts +43 -0
  27. package/dist/index.d.mts +0 -290
  28. package/dist/index.mjs +0 -1742
  29. package/dist/index.mjs.map +0 -1
  30. package/dist/internal/index.d.mts +0 -193
  31. package/dist/internal/index.mjs +0 -340
  32. package/dist/internal/index.mjs.map +0 -1
  33. /package/src/chat/{openai-compatible-chat-options.ts → openai-compatible-chat-language-model-options.ts} +0 -0
  34. /package/src/completion/{openai-compatible-completion-options.ts → openai-compatible-completion-language-model-options.ts} +0 -0
  35. /package/src/embedding/{openai-compatible-embedding-options.ts → openai-compatible-embedding-model-options.ts} +0 -0
package/CHANGELOG.md CHANGED
@@ -1,5 +1,256 @@
1
1
  # @ai-sdk/openai-compatible
2
2
 
3
+ ## 3.0.0-canary.37
4
+
5
+ ### Patch Changes
6
+
7
+ - 0c4c275: trigger initial canary release
8
+ - Updated dependencies [0c4c275]
9
+ - @ai-sdk/provider-utils@5.0.0-canary.31
10
+ - @ai-sdk/provider@4.0.0-canary.15
11
+
12
+ ## 3.0.0-beta.36
13
+
14
+ ### Patch Changes
15
+
16
+ - e59c955: feat(vertex): add grok models to vertex provider
17
+
18
+ ## 3.0.0-beta.35
19
+
20
+ ### Major Changes
21
+
22
+ - 04e9009: chore: make provider implementations code patterns more consistent, including renaming certain exported symbols
23
+
24
+ For all externally exported symbols that were renamed, the old names continue to work via deprecated aliases.
25
+
26
+ ### Patch Changes
27
+
28
+ - Updated dependencies [08d2129]
29
+ - @ai-sdk/provider-utils@5.0.0-beta.30
30
+
31
+ ## 3.0.0-beta.34
32
+
33
+ ### Patch Changes
34
+
35
+ - 9bd6512: feat(provider): change file part data property to be tagged with a type and remove the image part type
36
+ - 258c093: chore: ensure consistent import handling and avoid import duplicates or cycles
37
+ - Updated dependencies [9bd6512]
38
+ - Updated dependencies [258c093]
39
+ - Updated dependencies [b6783da]
40
+ - @ai-sdk/provider-utils@5.0.0-beta.29
41
+ - @ai-sdk/provider@4.0.0-beta.14
42
+
43
+ ## 3.0.0-beta.33
44
+
45
+ ### Patch Changes
46
+
47
+ - 9f0e36c: trigger release for all packages after provenance setup
48
+ - Updated dependencies [9f0e36c]
49
+ - @ai-sdk/provider@4.0.0-beta.13
50
+ - @ai-sdk/provider-utils@5.0.0-beta.28
51
+
52
+ ## 3.0.0-beta.32
53
+
54
+ ### Patch Changes
55
+
56
+ - ab81968: fix(openai-compatible): buffer tool call deltas until function.name arrives
57
+ - 58a2ad7: fix: more precise default message for tool execution denial
58
+ - Updated dependencies [785fe16]
59
+ - Updated dependencies [67df0a0]
60
+ - Updated dependencies [befb78c]
61
+ - Updated dependencies [0458559]
62
+ - Updated dependencies [5852c0a]
63
+ - Updated dependencies [fc92055]
64
+ - @ai-sdk/provider-utils@5.0.0-beta.27
65
+
66
+ ## 3.0.0-beta.31
67
+
68
+ ### Patch Changes
69
+
70
+ - bfb756d: patch - send content: null instead of empty string for tool-only assistant messages
71
+ - Updated dependencies [2e98477]
72
+ - @ai-sdk/provider-utils@5.0.0-beta.26
73
+
74
+ ## 3.0.0-beta.30
75
+
76
+ ### Patch Changes
77
+
78
+ - Updated dependencies [eea8d98]
79
+ - @ai-sdk/provider-utils@5.0.0-beta.25
80
+
81
+ ## 3.0.0-beta.29
82
+
83
+ ### Patch Changes
84
+
85
+ - f807e45: Extract shared `StreamingToolCallTracker` class into `@ai-sdk/provider-utils` to deduplicate streaming tool call handling across OpenAI-compatible providers. Also adds missing `generateId()` fallback for `toolCallId` in Alibaba's `doGenerate` path and ensures all providers finalize unfinished tool calls during stream flush.
86
+ - Updated dependencies [f807e45]
87
+ - @ai-sdk/provider-utils@5.0.0-beta.24
88
+
89
+ ## 3.0.0-beta.28
90
+
91
+ ### Patch Changes
92
+
93
+ - Updated dependencies [350ea38]
94
+ - @ai-sdk/provider-utils@5.0.0-beta.23
95
+
96
+ ## 3.0.0-beta.27
97
+
98
+ ### Patch Changes
99
+
100
+ - Updated dependencies [083947b]
101
+ - @ai-sdk/provider-utils@5.0.0-beta.22
102
+
103
+ ## 3.0.0-beta.26
104
+
105
+ ### Patch Changes
106
+
107
+ - Updated dependencies [add1126]
108
+ - @ai-sdk/provider-utils@5.0.0-beta.21
109
+
110
+ ## 3.0.0-beta.25
111
+
112
+ ### Patch Changes
113
+
114
+ - b3976a2: Add workflow serialization support to all provider models.
115
+
116
+ **`@ai-sdk/provider-utils`:** New `serializeModel()` helper that extracts only serializable properties from a model instance, filtering out functions and objects containing functions. Third-party provider authors can use this to add workflow support to their own models.
117
+
118
+ **All providers:** `headers` is now optional in provider config types. This is non-breaking — existing code that passes `headers` continues to work. Custom provider implementations that construct model configs manually can now omit `headers`, which is useful when models are deserialized from a workflow step boundary where auth is provided separately.
119
+
120
+ All provider model classes now include `WORKFLOW_SERIALIZE` and `WORKFLOW_DESERIALIZE` static methods, enabling them to cross workflow step boundaries without serialization errors.
121
+
122
+ - Updated dependencies [b3976a2]
123
+ - Updated dependencies [ff5eba1]
124
+ - @ai-sdk/provider-utils@5.0.0-beta.20
125
+ - @ai-sdk/provider@4.0.0-beta.12
126
+
127
+ ## 3.0.0-beta.24
128
+
129
+ ### Major Changes
130
+
131
+ - ef992f8: Remove CommonJS exports from all packages. All packages are now ESM-only (`"type": "module"`). Consumers using `require()` must switch to ESM `import` syntax.
132
+
133
+ ### Patch Changes
134
+
135
+ - Updated dependencies [ef992f8]
136
+ - @ai-sdk/provider@4.0.0-beta.11
137
+ - @ai-sdk/provider-utils@5.0.0-beta.19
138
+
139
+ ## 3.0.0-beta.23
140
+
141
+ ### Patch Changes
142
+
143
+ - 90e2d8a: chore: fix unused vars not being flagged by our lint tooling
144
+ - Updated dependencies [90e2d8a]
145
+ - @ai-sdk/provider-utils@5.0.0-beta.18
146
+
147
+ ## 3.0.0-beta.22
148
+
149
+ ### Patch Changes
150
+
151
+ - Updated dependencies [3ae1786]
152
+ - @ai-sdk/provider-utils@5.0.0-beta.17
153
+
154
+ ## 3.0.0-beta.21
155
+
156
+ ### Patch Changes
157
+
158
+ - Updated dependencies [176466a]
159
+ - @ai-sdk/provider@4.0.0-beta.10
160
+ - @ai-sdk/provider-utils@5.0.0-beta.16
161
+
162
+ ## 3.0.0-beta.20
163
+
164
+ ### Patch Changes
165
+
166
+ - Updated dependencies [e311194]
167
+ - @ai-sdk/provider@4.0.0-beta.9
168
+ - @ai-sdk/provider-utils@5.0.0-beta.15
169
+
170
+ ## 3.0.0-beta.19
171
+
172
+ ### Patch Changes
173
+
174
+ - 008271d: feat(openai-compatible): emit warning when using kebab-case instead of camelCase
175
+ - Updated dependencies [34bd95d]
176
+ - Updated dependencies [008271d]
177
+ - @ai-sdk/provider@4.0.0-beta.8
178
+ - @ai-sdk/provider-utils@5.0.0-beta.14
179
+
180
+ ## 3.0.0-beta.18
181
+
182
+ ### Patch Changes
183
+
184
+ - Updated dependencies [b0c2869]
185
+ - Updated dependencies [7e26e81]
186
+ - @ai-sdk/provider-utils@5.0.0-beta.13
187
+
188
+ ## 3.0.0-beta.17
189
+
190
+ ### Patch Changes
191
+
192
+ - 816ff67: fix(openai-compatible): honor camelCase providerOptions key in chat and completion models
193
+
194
+ ## 3.0.0-beta.16
195
+
196
+ ### Patch Changes
197
+
198
+ - Updated dependencies [46d1149]
199
+ - @ai-sdk/provider-utils@5.0.0-beta.12
200
+
201
+ ## 3.0.0-beta.15
202
+
203
+ ### Patch Changes
204
+
205
+ - 6fd51c0: fix(provider): preserve error type prefix in getErrorMessage
206
+ - Updated dependencies [6fd51c0]
207
+ - @ai-sdk/provider-utils@5.0.0-beta.11
208
+ - @ai-sdk/provider@4.0.0-beta.7
209
+
210
+ ## 3.0.0-beta.14
211
+
212
+ ### Patch Changes
213
+
214
+ - c29a26f: feat(provider): add support for provider references and uploading files as supported per provider
215
+ - Updated dependencies [c29a26f]
216
+ - @ai-sdk/provider-utils@5.0.0-beta.10
217
+ - @ai-sdk/provider@4.0.0-beta.6
218
+
219
+ ## 3.0.0-beta.13
220
+
221
+ ### Patch Changes
222
+
223
+ - 38fc777: Add AI Gateway hint to provider READMEs
224
+
225
+ ## 3.0.0-beta.12
226
+
227
+ ### Patch Changes
228
+
229
+ - Updated dependencies [2e17091]
230
+ - @ai-sdk/provider-utils@5.0.0-beta.9
231
+
232
+ ## 3.0.0-beta.11
233
+
234
+ ### Patch Changes
235
+
236
+ - Updated dependencies [986c6fd]
237
+ - Updated dependencies [493295c]
238
+ - @ai-sdk/provider-utils@5.0.0-beta.8
239
+
240
+ ## 3.0.0-beta.10
241
+
242
+ ### Patch Changes
243
+
244
+ - Updated dependencies [1f509d4]
245
+ - @ai-sdk/provider-utils@5.0.0-beta.7
246
+ - @ai-sdk/provider@4.0.0-beta.5
247
+
248
+ ## 3.0.0-beta.9
249
+
250
+ ### Patch Changes
251
+
252
+ - 74d520f: feat: migrate providers to support new top-level `reasoning` parameter
253
+
3
254
  ## 3.0.0-beta.8
4
255
 
5
256
  ### Patch Changes
package/README.md CHANGED
@@ -4,6 +4,8 @@ This package provides a foundation for implementing providers that expose an Ope
4
4
 
5
5
  The primary [OpenAI provider](../openai/README.md) is more feature-rich, including OpenAI-specific experimental and legacy features. This package offers a lighter-weight alternative focused on core OpenAI-compatible functionality.
6
6
 
7
+ > **Deploying to Vercel?** With Vercel's AI Gateway you can access hundreds of models from any provider — no additional packages, API keys, or extra cost. [Get started with AI Gateway](https://vercel.com/ai-gateway).
8
+
7
9
  ## Setup
8
10
 
9
11
  The provider is available in the `@ai-sdk/openai-compatible` module. You can install it with
package/dist/index.d.ts CHANGED
@@ -1,5 +1,6 @@
1
- import { SharedV4ProviderMetadata, LanguageModelV4, LanguageModelV4CallOptions, LanguageModelV4GenerateResult, LanguageModelV4StreamResult, EmbeddingModelV4, ImageModelV4, ProviderV4 } from '@ai-sdk/provider';
2
- import { FetchFunction } from '@ai-sdk/provider-utils';
1
+ import * as _ai_sdk_provider from '@ai-sdk/provider';
2
+ import { SharedV4ProviderMetadata, LanguageModelV4, LanguageModelV4Usage, LanguageModelV4CallOptions, LanguageModelV4GenerateResult, LanguageModelV4StreamResult, EmbeddingModelV4, ImageModelV4, ProviderV4 } from '@ai-sdk/provider';
3
+ import { FetchFunction, WORKFLOW_SERIALIZE, WORKFLOW_DESERIALIZE } from '@ai-sdk/provider-utils';
3
4
  import { ZodType, z } from 'zod/v4';
4
5
 
5
6
  declare const openaiCompatibleErrorDataSchema: z.ZodObject<{
@@ -71,7 +72,7 @@ type MetadataExtractor = {
71
72
 
72
73
  type OpenAICompatibleChatConfig = {
73
74
  provider: string;
74
- headers: () => Record<string, string | undefined>;
75
+ headers?: () => Record<string, string | undefined>;
75
76
  url: (options: {
76
77
  modelId: string;
77
78
  path: string;
@@ -94,23 +95,50 @@ type OpenAICompatibleChatConfig = {
94
95
  * than the official OpenAI API.
95
96
  */
96
97
  transformRequestBody?: (args: Record<string, any>) => Record<string, any>;
98
+ /**
99
+ * Optional usage converter for OpenAI-compatible providers with different
100
+ * token accounting semantics.
101
+ */
102
+ convertUsage?: (usage: z.infer<typeof openaiCompatibleTokenUsageSchema>) => LanguageModelV4Usage;
97
103
  };
98
104
  declare class OpenAICompatibleChatLanguageModel implements LanguageModelV4 {
99
105
  readonly specificationVersion = "v4";
100
106
  readonly supportsStructuredOutputs: boolean;
101
107
  readonly modelId: OpenAICompatibleChatModelId;
102
- private readonly config;
108
+ protected readonly config: OpenAICompatibleChatConfig;
103
109
  private readonly failedResponseHandler;
104
110
  private readonly chunkSchema;
111
+ static [WORKFLOW_SERIALIZE](model: OpenAICompatibleChatLanguageModel): {
112
+ modelId: string;
113
+ config: _ai_sdk_provider.JSONObject;
114
+ };
115
+ static [WORKFLOW_DESERIALIZE](options: {
116
+ modelId: string;
117
+ config: OpenAICompatibleChatConfig;
118
+ }): OpenAICompatibleChatLanguageModel;
105
119
  constructor(modelId: OpenAICompatibleChatModelId, config: OpenAICompatibleChatConfig);
106
120
  get provider(): string;
107
121
  private get providerOptionsName();
108
122
  get supportedUrls(): Record<string, RegExp[]> | PromiseLike<Record<string, RegExp[]>>;
109
123
  private transformRequestBody;
124
+ private convertUsage;
110
125
  private getArgs;
111
126
  doGenerate(options: LanguageModelV4CallOptions): Promise<LanguageModelV4GenerateResult>;
112
127
  doStream(options: LanguageModelV4CallOptions): Promise<LanguageModelV4StreamResult>;
113
128
  }
129
+ declare const openaiCompatibleTokenUsageSchema: z.ZodOptional<z.ZodNullable<z.ZodObject<{
130
+ prompt_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
131
+ completion_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
132
+ total_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
133
+ prompt_tokens_details: z.ZodOptional<z.ZodNullable<z.ZodObject<{
134
+ cached_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
135
+ }, z.core.$strip>>>;
136
+ completion_tokens_details: z.ZodOptional<z.ZodNullable<z.ZodObject<{
137
+ reasoning_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
138
+ accepted_prediction_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
139
+ rejected_prediction_tokens: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
140
+ }, z.core.$strip>>>;
141
+ }, z.core.$loose>>>;
114
142
 
115
143
  type OpenAICompatibleCompletionModelId = string;
116
144
  declare const openaiCompatibleLanguageModelCompletionOptions: z.ZodObject<{
@@ -124,7 +152,7 @@ type OpenAICompatibleLanguageModelCompletionOptions = z.infer<typeof openaiCompa
124
152
  type OpenAICompatibleCompletionConfig = {
125
153
  provider: string;
126
154
  includeUsage?: boolean;
127
- headers: () => Record<string, string | undefined>;
155
+ headers?: () => Record<string, string | undefined>;
128
156
  url: (options: {
129
157
  modelId: string;
130
158
  path: string;
@@ -142,6 +170,14 @@ declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV4
142
170
  private readonly config;
143
171
  private readonly failedResponseHandler;
144
172
  private readonly chunkSchema;
173
+ static [WORKFLOW_SERIALIZE](model: OpenAICompatibleCompletionLanguageModel): {
174
+ modelId: string;
175
+ config: _ai_sdk_provider.JSONObject;
176
+ };
177
+ static [WORKFLOW_DESERIALIZE](options: {
178
+ modelId: string;
179
+ config: OpenAICompatibleCompletionConfig;
180
+ }): OpenAICompatibleCompletionLanguageModel;
145
181
  constructor(modelId: OpenAICompatibleCompletionModelId, config: OpenAICompatibleCompletionConfig);
146
182
  get provider(): string;
147
183
  private get providerOptionsName();
@@ -172,7 +208,7 @@ type OpenAICompatibleEmbeddingConfig = {
172
208
  modelId: string;
173
209
  path: string;
174
210
  }) => string;
175
- headers: () => Record<string, string | undefined>;
211
+ headers?: () => Record<string, string | undefined>;
176
212
  fetch?: FetchFunction;
177
213
  errorStructure?: ProviderErrorStructure<any>;
178
214
  };
@@ -183,6 +219,14 @@ declare class OpenAICompatibleEmbeddingModel implements EmbeddingModelV4 {
183
219
  get provider(): string;
184
220
  get maxEmbeddingsPerCall(): number;
185
221
  get supportsParallelCalls(): boolean;
222
+ static [WORKFLOW_SERIALIZE](model: OpenAICompatibleEmbeddingModel): {
223
+ modelId: string;
224
+ config: _ai_sdk_provider.JSONObject;
225
+ };
226
+ static [WORKFLOW_DESERIALIZE](options: {
227
+ modelId: string;
228
+ config: OpenAICompatibleEmbeddingConfig;
229
+ }): OpenAICompatibleEmbeddingModel;
186
230
  constructor(modelId: OpenAICompatibleEmbeddingModelId, config: OpenAICompatibleEmbeddingConfig);
187
231
  private get providerOptionsName();
188
232
  doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV4['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV4['doEmbed']>>>;
@@ -192,7 +236,7 @@ type OpenAICompatibleImageModelId = string;
192
236
 
193
237
  type OpenAICompatibleImageModelConfig = {
194
238
  provider: string;
195
- headers: () => Record<string, string | undefined>;
239
+ headers?: () => Record<string, string | undefined>;
196
240
  url: (options: {
197
241
  modelId: string;
198
242
  path: string;
@@ -213,12 +257,20 @@ declare class OpenAICompatibleImageModel implements ImageModelV4 {
213
257
  * The provider options key used to extract provider-specific options.
214
258
  */
215
259
  private get providerOptionsKey();
260
+ static [WORKFLOW_SERIALIZE](model: OpenAICompatibleImageModel): {
261
+ modelId: string;
262
+ config: _ai_sdk_provider.JSONObject;
263
+ };
264
+ static [WORKFLOW_DESERIALIZE](options: {
265
+ modelId: string;
266
+ config: OpenAICompatibleImageModelConfig;
267
+ }): OpenAICompatibleImageModel;
216
268
  constructor(modelId: OpenAICompatibleImageModelId, config: OpenAICompatibleImageModelConfig);
217
269
  private getArgs;
218
270
  doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, files, mask, }: Parameters<ImageModelV4['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV4['doGenerate']>>>;
219
271
  }
220
272
 
221
- interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string> extends Omit<ProviderV4, 'imageModel'> {
273
+ interface OpenAICompatibleProvider<CHAT_MODEL_IDS extends string = string, COMPLETION_MODEL_IDS extends string = string, EMBEDDING_MODEL_IDS extends string = string, IMAGE_MODEL_IDS extends string = string> extends ProviderV4 {
222
274
  (modelId: CHAT_MODEL_IDS): LanguageModelV4;
223
275
  languageModel(modelId: CHAT_MODEL_IDS, config?: Partial<OpenAICompatibleChatConfig>): LanguageModelV4;
224
276
  chatModel(modelId: CHAT_MODEL_IDS): LanguageModelV4;
@@ -279,6 +331,15 @@ interface OpenAICompatibleProviderSettings {
279
331
  * or provider-specific metrics from both streaming and non-streaming responses.
280
332
  */
281
333
  metadataExtractor?: MetadataExtractor;
334
+ /**
335
+ * The supported URLs for chat models.
336
+ */
337
+ supportedUrls?: OpenAICompatibleChatConfig['supportedUrls'];
338
+ /**
339
+ * Optional usage converter for providers with token accounting semantics that
340
+ * differ from the default OpenAI-compatible shape.
341
+ */
342
+ convertUsage?: OpenAICompatibleChatConfig['convertUsage'];
282
343
  }
283
344
  /**
284
345
  * Create an OpenAICompatible provider instance.