@ai-sdk/openai-compatible 3.0.0-beta.23 → 3.0.0-beta.25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,34 @@
1
1
  # @ai-sdk/openai-compatible
2
2
 
3
+ ## 3.0.0-beta.25
4
+
5
+ ### Patch Changes
6
+
7
+ - b3976a2: Add workflow serialization support to all provider models.
8
+
9
+ **`@ai-sdk/provider-utils`:** New `serializeModel()` helper that extracts only serializable properties from a model instance, filtering out functions and objects containing functions. Third-party provider authors can use this to add workflow support to their own models.
10
+
11
+ **All providers:** `headers` is now optional in provider config types. This is non-breaking — existing code that passes `headers` continues to work. Custom provider implementations that construct model configs manually can now omit `headers`, which is useful when models are deserialized from a workflow step boundary where auth is provided separately.
12
+
13
+ All provider model classes now include `WORKFLOW_SERIALIZE` and `WORKFLOW_DESERIALIZE` static methods, enabling them to cross workflow step boundaries without serialization errors.
14
+
15
+ - Updated dependencies [b3976a2]
16
+ - Updated dependencies [ff5eba1]
17
+ - @ai-sdk/provider-utils@5.0.0-beta.20
18
+ - @ai-sdk/provider@4.0.0-beta.12
19
+
20
+ ## 3.0.0-beta.24
21
+
22
+ ### Major Changes
23
+
24
+ - ef992f8: Remove CommonJS exports from all packages. All packages are now ESM-only (`"type": "module"`). Consumers using `require()` must switch to ESM `import` syntax.
25
+
26
+ ### Patch Changes
27
+
28
+ - Updated dependencies [ef992f8]
29
+ - @ai-sdk/provider@4.0.0-beta.11
30
+ - @ai-sdk/provider-utils@5.0.0-beta.19
31
+
3
32
  ## 3.0.0-beta.23
4
33
 
5
34
  ### Patch Changes
package/dist/index.d.ts CHANGED
@@ -1,5 +1,6 @@
1
+ import * as _ai_sdk_provider from '@ai-sdk/provider';
1
2
  import { SharedV4ProviderMetadata, LanguageModelV4, LanguageModelV4CallOptions, LanguageModelV4GenerateResult, LanguageModelV4StreamResult, EmbeddingModelV4, ImageModelV4, ProviderV4 } from '@ai-sdk/provider';
2
- import { FetchFunction } from '@ai-sdk/provider-utils';
3
+ import { FetchFunction, WORKFLOW_SERIALIZE, WORKFLOW_DESERIALIZE } from '@ai-sdk/provider-utils';
3
4
  import { ZodType, z } from 'zod/v4';
4
5
 
5
6
  declare const openaiCompatibleErrorDataSchema: z.ZodObject<{
@@ -71,7 +72,7 @@ type MetadataExtractor = {
71
72
 
72
73
  type OpenAICompatibleChatConfig = {
73
74
  provider: string;
74
- headers: () => Record<string, string | undefined>;
75
+ headers?: () => Record<string, string | undefined>;
75
76
  url: (options: {
76
77
  modelId: string;
77
78
  path: string;
@@ -99,9 +100,17 @@ declare class OpenAICompatibleChatLanguageModel implements LanguageModelV4 {
99
100
  readonly specificationVersion = "v4";
100
101
  readonly supportsStructuredOutputs: boolean;
101
102
  readonly modelId: OpenAICompatibleChatModelId;
102
- private readonly config;
103
+ protected readonly config: OpenAICompatibleChatConfig;
103
104
  private readonly failedResponseHandler;
104
105
  private readonly chunkSchema;
106
+ static [WORKFLOW_SERIALIZE](model: OpenAICompatibleChatLanguageModel): {
107
+ modelId: string;
108
+ config: _ai_sdk_provider.JSONObject;
109
+ };
110
+ static [WORKFLOW_DESERIALIZE](options: {
111
+ modelId: string;
112
+ config: OpenAICompatibleChatConfig;
113
+ }): OpenAICompatibleChatLanguageModel;
105
114
  constructor(modelId: OpenAICompatibleChatModelId, config: OpenAICompatibleChatConfig);
106
115
  get provider(): string;
107
116
  private get providerOptionsName();
@@ -124,7 +133,7 @@ type OpenAICompatibleLanguageModelCompletionOptions = z.infer<typeof openaiCompa
124
133
  type OpenAICompatibleCompletionConfig = {
125
134
  provider: string;
126
135
  includeUsage?: boolean;
127
- headers: () => Record<string, string | undefined>;
136
+ headers?: () => Record<string, string | undefined>;
128
137
  url: (options: {
129
138
  modelId: string;
130
139
  path: string;
@@ -142,6 +151,14 @@ declare class OpenAICompatibleCompletionLanguageModel implements LanguageModelV4
142
151
  private readonly config;
143
152
  private readonly failedResponseHandler;
144
153
  private readonly chunkSchema;
154
+ static [WORKFLOW_SERIALIZE](model: OpenAICompatibleCompletionLanguageModel): {
155
+ modelId: string;
156
+ config: _ai_sdk_provider.JSONObject;
157
+ };
158
+ static [WORKFLOW_DESERIALIZE](options: {
159
+ modelId: string;
160
+ config: OpenAICompatibleCompletionConfig;
161
+ }): OpenAICompatibleCompletionLanguageModel;
145
162
  constructor(modelId: OpenAICompatibleCompletionModelId, config: OpenAICompatibleCompletionConfig);
146
163
  get provider(): string;
147
164
  private get providerOptionsName();
@@ -172,7 +189,7 @@ type OpenAICompatibleEmbeddingConfig = {
172
189
  modelId: string;
173
190
  path: string;
174
191
  }) => string;
175
- headers: () => Record<string, string | undefined>;
192
+ headers?: () => Record<string, string | undefined>;
176
193
  fetch?: FetchFunction;
177
194
  errorStructure?: ProviderErrorStructure<any>;
178
195
  };
@@ -183,6 +200,14 @@ declare class OpenAICompatibleEmbeddingModel implements EmbeddingModelV4 {
183
200
  get provider(): string;
184
201
  get maxEmbeddingsPerCall(): number;
185
202
  get supportsParallelCalls(): boolean;
203
+ static [WORKFLOW_SERIALIZE](model: OpenAICompatibleEmbeddingModel): {
204
+ modelId: string;
205
+ config: _ai_sdk_provider.JSONObject;
206
+ };
207
+ static [WORKFLOW_DESERIALIZE](options: {
208
+ modelId: string;
209
+ config: OpenAICompatibleEmbeddingConfig;
210
+ }): OpenAICompatibleEmbeddingModel;
186
211
  constructor(modelId: OpenAICompatibleEmbeddingModelId, config: OpenAICompatibleEmbeddingConfig);
187
212
  private get providerOptionsName();
188
213
  doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV4['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV4['doEmbed']>>>;
@@ -192,7 +217,7 @@ type OpenAICompatibleImageModelId = string;
192
217
 
193
218
  type OpenAICompatibleImageModelConfig = {
194
219
  provider: string;
195
- headers: () => Record<string, string | undefined>;
220
+ headers?: () => Record<string, string | undefined>;
196
221
  url: (options: {
197
222
  modelId: string;
198
223
  path: string;
@@ -213,6 +238,14 @@ declare class OpenAICompatibleImageModel implements ImageModelV4 {
213
238
  * The provider options key used to extract provider-specific options.
214
239
  */
215
240
  private get providerOptionsKey();
241
+ static [WORKFLOW_SERIALIZE](model: OpenAICompatibleImageModel): {
242
+ modelId: string;
243
+ config: _ai_sdk_provider.JSONObject;
244
+ };
245
+ static [WORKFLOW_DESERIALIZE](options: {
246
+ modelId: string;
247
+ config: OpenAICompatibleImageModelConfig;
248
+ }): OpenAICompatibleImageModel;
216
249
  constructor(modelId: OpenAICompatibleImageModelId, config: OpenAICompatibleImageModelConfig);
217
250
  private getArgs;
218
251
  doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, files, mask, }: Parameters<ImageModelV4['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV4['doGenerate']>>>;