@ai-sdk/openai-compatible 3.0.0-beta.3 → 3.0.0-beta.30

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,224 @@
1
1
  # @ai-sdk/openai-compatible
2
2
 
3
+ ## 3.0.0-beta.30
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [eea8d98]
8
+ - @ai-sdk/provider-utils@5.0.0-beta.25
9
+
10
+ ## 3.0.0-beta.29
11
+
12
+ ### Patch Changes
13
+
14
+ - f807e45: Extract shared `StreamingToolCallTracker` class into `@ai-sdk/provider-utils` to deduplicate streaming tool call handling across OpenAI-compatible providers. Also adds missing `generateId()` fallback for `toolCallId` in Alibaba's `doGenerate` path and ensures all providers finalize unfinished tool calls during stream flush.
15
+ - Updated dependencies [f807e45]
16
+ - @ai-sdk/provider-utils@5.0.0-beta.24
17
+
18
+ ## 3.0.0-beta.28
19
+
20
+ ### Patch Changes
21
+
22
+ - Updated dependencies [350ea38]
23
+ - @ai-sdk/provider-utils@5.0.0-beta.23
24
+
25
+ ## 3.0.0-beta.27
26
+
27
+ ### Patch Changes
28
+
29
+ - Updated dependencies [083947b]
30
+ - @ai-sdk/provider-utils@5.0.0-beta.22
31
+
32
+ ## 3.0.0-beta.26
33
+
34
+ ### Patch Changes
35
+
36
+ - Updated dependencies [add1126]
37
+ - @ai-sdk/provider-utils@5.0.0-beta.21
38
+
39
+ ## 3.0.0-beta.25
40
+
41
+ ### Patch Changes
42
+
43
+ - b3976a2: Add workflow serialization support to all provider models.
44
+
45
+ **`@ai-sdk/provider-utils`:** New `serializeModel()` helper that extracts only serializable properties from a model instance, filtering out functions and objects containing functions. Third-party provider authors can use this to add workflow support to their own models.
46
+
47
+ **All providers:** `headers` is now optional in provider config types. This is non-breaking — existing code that passes `headers` continues to work. Custom provider implementations that construct model configs manually can now omit `headers`, which is useful when models are deserialized from a workflow step boundary where auth is provided separately.
48
+
49
+ All provider model classes now include `WORKFLOW_SERIALIZE` and `WORKFLOW_DESERIALIZE` static methods, enabling them to cross workflow step boundaries without serialization errors.
50
+
51
+ - Updated dependencies [b3976a2]
52
+ - Updated dependencies [ff5eba1]
53
+ - @ai-sdk/provider-utils@5.0.0-beta.20
54
+ - @ai-sdk/provider@4.0.0-beta.12
55
+
56
+ ## 3.0.0-beta.24
57
+
58
+ ### Major Changes
59
+
60
+ - ef992f8: Remove CommonJS exports from all packages. All packages are now ESM-only (`"type": "module"`). Consumers using `require()` must switch to ESM `import` syntax.
61
+
62
+ ### Patch Changes
63
+
64
+ - Updated dependencies [ef992f8]
65
+ - @ai-sdk/provider@4.0.0-beta.11
66
+ - @ai-sdk/provider-utils@5.0.0-beta.19
67
+
68
+ ## 3.0.0-beta.23
69
+
70
+ ### Patch Changes
71
+
72
+ - 90e2d8a: chore: fix unused vars not being flagged by our lint tooling
73
+ - Updated dependencies [90e2d8a]
74
+ - @ai-sdk/provider-utils@5.0.0-beta.18
75
+
76
+ ## 3.0.0-beta.22
77
+
78
+ ### Patch Changes
79
+
80
+ - Updated dependencies [3ae1786]
81
+ - @ai-sdk/provider-utils@5.0.0-beta.17
82
+
83
+ ## 3.0.0-beta.21
84
+
85
+ ### Patch Changes
86
+
87
+ - Updated dependencies [176466a]
88
+ - @ai-sdk/provider@4.0.0-beta.10
89
+ - @ai-sdk/provider-utils@5.0.0-beta.16
90
+
91
+ ## 3.0.0-beta.20
92
+
93
+ ### Patch Changes
94
+
95
+ - Updated dependencies [e311194]
96
+ - @ai-sdk/provider@4.0.0-beta.9
97
+ - @ai-sdk/provider-utils@5.0.0-beta.15
98
+
99
+ ## 3.0.0-beta.19
100
+
101
+ ### Patch Changes
102
+
103
+ - 008271d: feat(openai-compatible): emit warning when using kebab-case instead of camelCase
104
+ - Updated dependencies [34bd95d]
105
+ - Updated dependencies [008271d]
106
+ - @ai-sdk/provider@4.0.0-beta.8
107
+ - @ai-sdk/provider-utils@5.0.0-beta.14
108
+
109
+ ## 3.0.0-beta.18
110
+
111
+ ### Patch Changes
112
+
113
+ - Updated dependencies [b0c2869]
114
+ - Updated dependencies [7e26e81]
115
+ - @ai-sdk/provider-utils@5.0.0-beta.13
116
+
117
+ ## 3.0.0-beta.17
118
+
119
+ ### Patch Changes
120
+
121
+ - 816ff67: fix(openai-compatible): honor camelCase providerOptions key in chat and completion models
122
+
123
+ ## 3.0.0-beta.16
124
+
125
+ ### Patch Changes
126
+
127
+ - Updated dependencies [46d1149]
128
+ - @ai-sdk/provider-utils@5.0.0-beta.12
129
+
130
+ ## 3.0.0-beta.15
131
+
132
+ ### Patch Changes
133
+
134
+ - 6fd51c0: fix(provider): preserve error type prefix in getErrorMessage
135
+ - Updated dependencies [6fd51c0]
136
+ - @ai-sdk/provider-utils@5.0.0-beta.11
137
+ - @ai-sdk/provider@4.0.0-beta.7
138
+
139
+ ## 3.0.0-beta.14
140
+
141
+ ### Patch Changes
142
+
143
+ - c29a26f: feat(provider): add support for provider references and uploading files as supported per provider
144
+ - Updated dependencies [c29a26f]
145
+ - @ai-sdk/provider-utils@5.0.0-beta.10
146
+ - @ai-sdk/provider@4.0.0-beta.6
147
+
148
+ ## 3.0.0-beta.13
149
+
150
+ ### Patch Changes
151
+
152
+ - 38fc777: Add AI Gateway hint to provider READMEs
153
+
154
+ ## 3.0.0-beta.12
155
+
156
+ ### Patch Changes
157
+
158
+ - Updated dependencies [2e17091]
159
+ - @ai-sdk/provider-utils@5.0.0-beta.9
160
+
161
+ ## 3.0.0-beta.11
162
+
163
+ ### Patch Changes
164
+
165
+ - Updated dependencies [986c6fd]
166
+ - Updated dependencies [493295c]
167
+ - @ai-sdk/provider-utils@5.0.0-beta.8
168
+
169
+ ## 3.0.0-beta.10
170
+
171
+ ### Patch Changes
172
+
173
+ - Updated dependencies [1f509d4]
174
+ - @ai-sdk/provider-utils@5.0.0-beta.7
175
+ - @ai-sdk/provider@4.0.0-beta.5
176
+
177
+ ## 3.0.0-beta.9
178
+
179
+ ### Patch Changes
180
+
181
+ - 74d520f: feat: migrate providers to support new top-level `reasoning` parameter
182
+
183
+ ## 3.0.0-beta.8
184
+
185
+ ### Patch Changes
186
+
187
+ - Updated dependencies [3887c70]
188
+ - @ai-sdk/provider-utils@5.0.0-beta.6
189
+ - @ai-sdk/provider@4.0.0-beta.4
190
+
191
+ ## 3.0.0-beta.7
192
+
193
+ ### Patch Changes
194
+
195
+ - Updated dependencies [776b617]
196
+ - @ai-sdk/provider-utils@5.0.0-beta.5
197
+ - @ai-sdk/provider@4.0.0-beta.3
198
+
199
+ ## 3.0.0-beta.6
200
+
201
+ ### Patch Changes
202
+
203
+ - Updated dependencies [61753c3]
204
+ - @ai-sdk/provider-utils@5.0.0-beta.4
205
+
206
+ ## 3.0.0-beta.5
207
+
208
+ ### Patch Changes
209
+
210
+ - Updated dependencies [f7d4f01]
211
+ - @ai-sdk/provider-utils@5.0.0-beta.3
212
+ - @ai-sdk/provider@4.0.0-beta.2
213
+
214
+ ## 3.0.0-beta.4
215
+
216
+ ### Patch Changes
217
+
218
+ - Updated dependencies [5c2a5a2]
219
+ - @ai-sdk/provider@4.0.0-beta.1
220
+ - @ai-sdk/provider-utils@5.0.0-beta.2
221
+
3
222
  ## 3.0.0-beta.3
4
223
 
5
224
  ### Patch Changes
@@ -293,13 +512,13 @@
293
512
  Before
294
513
 
295
514
  ```ts
296
- model.textEmbeddingModel('my-model-id');
515
+ model.textEmbeddingModel("my-model-id");
297
516
  ```
298
517
 
299
518
  After
300
519
 
301
520
  ```ts
302
- model.embeddingModel('my-model-id');
521
+ model.embeddingModel("my-model-id");
303
522
  ```
304
523
 
305
524
  - 2625a04: feat(openai); update spec for mcp approval
@@ -514,13 +733,13 @@
514
733
  Before
515
734
 
516
735
  ```ts
517
- model.textEmbeddingModel('my-model-id');
736
+ model.textEmbeddingModel("my-model-id");
518
737
  ```
519
738
 
520
739
  After
521
740
 
522
741
  ```ts
523
- model.embeddingModel('my-model-id');
742
+ model.embeddingModel("my-model-id");
524
743
  ```
525
744
 
526
745
  - Updated dependencies [8d9e8ad]
@@ -956,7 +1175,7 @@
956
1175
 
957
1176
  ```js
958
1177
  await generateImage({
959
- model: luma.image('photon-flash-1', {
1178
+ model: luma.image("photon-flash-1", {
960
1179
  maxImagesPerCall: 5,
961
1180
  pollIntervalMillis: 500,
962
1181
  }),
@@ -969,7 +1188,7 @@
969
1188
 
970
1189
  ```js
971
1190
  await generateImage({
972
- model: luma.image('photon-flash-1'),
1191
+ model: luma.image("photon-flash-1"),
973
1192
  prompt,
974
1193
  n: 10,
975
1194
  maxImagesPerCall: 5,
@@ -1238,7 +1457,7 @@
1238
1457
 
1239
1458
  ```js
1240
1459
  await generateImage({
1241
- model: luma.image('photon-flash-1', {
1460
+ model: luma.image("photon-flash-1", {
1242
1461
  maxImagesPerCall: 5,
1243
1462
  pollIntervalMillis: 500,
1244
1463
  }),
@@ -1251,7 +1470,7 @@
1251
1470
 
1252
1471
  ```js
1253
1472
  await generateImage({
1254
- model: luma.image('photon-flash-1'),
1473
+ model: luma.image("photon-flash-1"),
1255
1474
  prompt,
1256
1475
  n: 10,
1257
1476
  maxImagesPerCall: 5,
package/README.md CHANGED
@@ -4,6 +4,8 @@ This package provides a foundation for implementing providers that expose an Ope
4
4
 
5
5
  The primary [OpenAI provider](../openai/README.md) is more feature-rich, including OpenAI-specific experimental and legacy features. This package offers a lighter-weight alternative focused on core OpenAI-compatible functionality.
6
6
 
7
+ > **Deploying to Vercel?** With Vercel's AI Gateway you can access hundreds of models from any provider — no additional packages, API keys, or extra cost. [Get started with AI Gateway](https://vercel.com/ai-gateway).
8
+
7
9
  ## Setup
8
10
 
9
11
  The provider is available in the `@ai-sdk/openai-compatible` module. You can install it with
package/dist/index.d.ts CHANGED
@@ -1,5 +1,6 @@
1
+ import * as _ai_sdk_provider from '@ai-sdk/provider';
1
2
  import { SharedV4ProviderMetadata, LanguageModelV4, LanguageModelV4CallOptions, LanguageModelV4GenerateResult, LanguageModelV4StreamResult, EmbeddingModelV4, ImageModelV4, ProviderV4 } from '@ai-sdk/provider';
2
- import { FetchFunction } from '@ai-sdk/provider-utils';
3
+ import { FetchFunction, WORKFLOW_SERIALIZE, WORKFLOW_DESERIALIZE } from '@ai-sdk/provider-utils';
3
4
  import { ZodType, z } from 'zod/v4';
4
5
 
5
6
  declare const openaiCompatibleErrorDataSchema: z.ZodObject<{
@@ -71,7 +72,7 @@ type MetadataExtractor = {
71
72
 
72
73
  type OpenAICompatibleChatConfig = {
73
74
  provider: string;
74
- headers: () => Record<string, string | undefined>;
75
+ headers?: () => Record<string, string | undefined>;
75
76
  url: (options: {
76
77
  modelId: string;
77
78
  path: string;
@@ -99,9 +100,17 @@ declare class OpenAICompatibleChatLanguageModel implements LanguageModelV4 {
99
100
  readonly specificationVersion = "v4";
100
101
  readonly supportsStructuredOutputs: boolean;
101
102
  readonly modelId: OpenAICompatibleChatModelId;
102
- private readonly config;
103
+ protected readonly config: OpenAICompatibleChatConfig;
103
104
  private readonly failedResponseHandler;
104
105
  private readonly chunkSchema;
106
+ static [WORKFLOW_SERIALIZE](model: OpenAICompatibleChatLanguageModel): {
107
+ modelId: string;
108
+ config: _ai_sdk_provider.JSONObject;
109
+ };
110
+ static [WORKFLOW_DESERIALIZE](options: {
111
+ modelId: string;
112
+ config: OpenAICompatibleChatConfig;
113
+ }): OpenAICompatibleChatLanguageModel;
105
114
  constructor(modelId: OpenAICompatibleChatModelId, config: OpenAICompatibleChatConfig);
106
115
  get provider(): string;
107
116
  private get providerOptionsName();
@@ -124,7 +133,7 @@ type OpenAICompatibleLanguageModelCompletionOptions = z.infer<typeof openaiCompa
124
133
  type OpenAICompatibleCompletionConfig = {
125
134
  provider: string;
126
135
  includeUsage?: boolean;
127
- headers: () => Record<string, string | undefined>;
136
+ headers?: () => Record<string, string | undefined>;
128
137
  url: (options: {
129
138
  modelId: string;
130
139
  path: string;
@@ -142,6 +151,14 @@ declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV4
142
151
  private readonly config;
143
152
  private readonly failedResponseHandler;
144
153
  private readonly chunkSchema;
154
+ static [WORKFLOW_SERIALIZE](model: OpenAICompatibleCompletionLanguageModel): {
155
+ modelId: string;
156
+ config: _ai_sdk_provider.JSONObject;
157
+ };
158
+ static [WORKFLOW_DESERIALIZE](options: {
159
+ modelId: string;
160
+ config: OpenAICompatibleCompletionConfig;
161
+ }): OpenAICompatibleCompletionLanguageModel;
145
162
  constructor(modelId: OpenAICompatibleCompletionModelId, config: OpenAICompatibleCompletionConfig);
146
163
  get provider(): string;
147
164
  private get providerOptionsName();
@@ -172,7 +189,7 @@ type OpenAICompatibleEmbeddingConfig = {
172
189
  modelId: string;
173
190
  path: string;
174
191
  }) => string;
175
- headers: () => Record<string, string | undefined>;
192
+ headers?: () => Record<string, string | undefined>;
176
193
  fetch?: FetchFunction;
177
194
  errorStructure?: ProviderErrorStructure<any>;
178
195
  };
@@ -183,6 +200,14 @@ declare class OpenAICompatibleEmbeddingModel implements EmbeddingModelV4 {
183
200
  get provider(): string;
184
201
  get maxEmbeddingsPerCall(): number;
185
202
  get supportsParallelCalls(): boolean;
203
+ static [WORKFLOW_SERIALIZE](model: OpenAICompatibleEmbeddingModel): {
204
+ modelId: string;
205
+ config: _ai_sdk_provider.JSONObject;
206
+ };
207
+ static [WORKFLOW_DESERIALIZE](options: {
208
+ modelId: string;
209
+ config: OpenAICompatibleEmbeddingConfig;
210
+ }): OpenAICompatibleEmbeddingModel;
186
211
  constructor(modelId: OpenAICompatibleEmbeddingModelId, config: OpenAICompatibleEmbeddingConfig);
187
212
  private get providerOptionsName();
188
213
  doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV4['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV4['doEmbed']>>>;
@@ -192,7 +217,7 @@ type OpenAICompatibleImageModelId = string;
192
217
 
193
218
  type OpenAICompatibleImageModelConfig = {
194
219
  provider: string;
195
- headers: () => Record<string, string | undefined>;
220
+ headers?: () => Record<string, string | undefined>;
196
221
  url: (options: {
197
222
  modelId: string;
198
223
  path: string;
@@ -213,6 +238,14 @@ declare class OpenAICompatibleImageModel implements ImageModelV4 {
213
238
  * The provider options key used to extract provider-specific options.
214
239
  */
215
240
  private get providerOptionsKey();
241
+ static [WORKFLOW_SERIALIZE](model: OpenAICompatibleImageModel): {
242
+ modelId: string;
243
+ config: _ai_sdk_provider.JSONObject;
244
+ };
245
+ static [WORKFLOW_DESERIALIZE](options: {
246
+ modelId: string;
247
+ config: OpenAICompatibleImageModelConfig;
248
+ }): OpenAICompatibleImageModel;
216
249
  constructor(modelId: OpenAICompatibleImageModelId, config: OpenAICompatibleImageModelConfig);
217
250
  private getArgs;
218
251
  doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, files, mask, }: Parameters<ImageModelV4['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV4['doGenerate']>>>;