@ai-sdk/openai-compatible 2.0.0-beta.10 → 2.0.0-beta.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,21 @@
1
1
  # @ai-sdk/openai-compatible
2
2
 
3
+ ## 2.0.0-beta.12
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [7e32fea]
8
+ - @ai-sdk/provider-utils@4.0.0-beta.12
9
+
10
+ ## 2.0.0-beta.11
11
+
12
+ ### Patch Changes
13
+
14
+ - 95f65c2: chore: use import \* from zod/v4
15
+ - Updated dependencies [95f65c2]
16
+ - Updated dependencies [95f65c2]
17
+ - @ai-sdk/provider-utils@4.0.0-beta.11
18
+
3
19
  ## 2.0.0-beta.10
4
20
 
5
21
  ### Major Changes
package/dist/index.d.mts CHANGED
@@ -1,6 +1,7 @@
1
1
  import { SharedV3ProviderMetadata, LanguageModelV3, EmbeddingModelV3, ImageModelV3, ProviderV3 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
- import { z, ZodType } from 'zod/v4';
3
+ import * as z from 'zod/v4';
4
+ import { ZodType, z as z$1 } from 'zod/v4';
4
5
 
5
6
  type OpenAICompatibleChatModelId = string;
6
7
  declare const openaiCompatibleProviderOptions: z.ZodObject<{
@@ -9,15 +10,15 @@ declare const openaiCompatibleProviderOptions: z.ZodObject<{
9
10
  }, z.core.$strip>;
10
11
  type OpenAICompatibleProviderOptions = z.infer<typeof openaiCompatibleProviderOptions>;
11
12
 
12
- declare const openaiCompatibleErrorDataSchema: z.ZodObject<{
13
- error: z.ZodObject<{
14
- message: z.ZodString;
15
- type: z.ZodOptional<z.ZodNullable<z.ZodString>>;
16
- param: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
17
- code: z.ZodOptional<z.ZodNullable<z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>>>;
18
- }, z.core.$strip>;
19
- }, z.core.$strip>;
20
- type OpenAICompatibleErrorData = z.infer<typeof openaiCompatibleErrorDataSchema>;
13
+ declare const openaiCompatibleErrorDataSchema: z$1.ZodObject<{
14
+ error: z$1.ZodObject<{
15
+ message: z$1.ZodString;
16
+ type: z$1.ZodOptional<z$1.ZodNullable<z$1.ZodString>>;
17
+ param: z$1.ZodOptional<z$1.ZodNullable<z$1.ZodAny>>;
18
+ code: z$1.ZodOptional<z$1.ZodNullable<z$1.ZodUnion<readonly [z$1.ZodString, z$1.ZodNumber]>>>;
19
+ }, z$1.core.$strip>;
20
+ }, z$1.core.$strip>;
21
+ type OpenAICompatibleErrorData = z$1.infer<typeof openaiCompatibleErrorDataSchema>;
21
22
  type ProviderErrorStructure<T> = {
22
23
  errorSchema: ZodType<T>;
23
24
  errorToMessage: (error: T) => string;
package/dist/index.d.ts CHANGED
@@ -1,6 +1,7 @@
1
1
  import { SharedV3ProviderMetadata, LanguageModelV3, EmbeddingModelV3, ImageModelV3, ProviderV3 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
- import { z, ZodType } from 'zod/v4';
3
+ import * as z from 'zod/v4';
4
+ import { ZodType, z as z$1 } from 'zod/v4';
4
5
 
5
6
  type OpenAICompatibleChatModelId = string;
6
7
  declare const openaiCompatibleProviderOptions: z.ZodObject<{
@@ -9,15 +10,15 @@ declare const openaiCompatibleProviderOptions: z.ZodObject<{
9
10
  }, z.core.$strip>;
10
11
  type OpenAICompatibleProviderOptions = z.infer<typeof openaiCompatibleProviderOptions>;
11
12
 
12
- declare const openaiCompatibleErrorDataSchema: z.ZodObject<{
13
- error: z.ZodObject<{
14
- message: z.ZodString;
15
- type: z.ZodOptional<z.ZodNullable<z.ZodString>>;
16
- param: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
17
- code: z.ZodOptional<z.ZodNullable<z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>>>;
18
- }, z.core.$strip>;
19
- }, z.core.$strip>;
20
- type OpenAICompatibleErrorData = z.infer<typeof openaiCompatibleErrorDataSchema>;
13
+ declare const openaiCompatibleErrorDataSchema: z$1.ZodObject<{
14
+ error: z$1.ZodObject<{
15
+ message: z$1.ZodString;
16
+ type: z$1.ZodOptional<z$1.ZodNullable<z$1.ZodString>>;
17
+ param: z$1.ZodOptional<z$1.ZodNullable<z$1.ZodAny>>;
18
+ code: z$1.ZodOptional<z$1.ZodNullable<z$1.ZodUnion<readonly [z$1.ZodString, z$1.ZodNumber]>>>;
19
+ }, z$1.core.$strip>;
20
+ }, z$1.core.$strip>;
21
+ type OpenAICompatibleErrorData = z$1.infer<typeof openaiCompatibleErrorDataSchema>;
21
22
  type ProviderErrorStructure<T> = {
22
23
  errorSchema: ZodType<T>;
23
24
  errorToMessage: (error: T) => string;
package/dist/index.js CHANGED
@@ -1,7 +1,9 @@
1
1
  "use strict";
2
+ var __create = Object.create;
2
3
  var __defProp = Object.defineProperty;
3
4
  var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
5
  var __getOwnPropNames = Object.getOwnPropertyNames;
6
+ var __getProtoOf = Object.getPrototypeOf;
5
7
  var __hasOwnProp = Object.prototype.hasOwnProperty;
6
8
  var __export = (target, all) => {
7
9
  for (var name in all)
@@ -15,6 +17,14 @@ var __copyProps = (to, from, except, desc) => {
15
17
  }
16
18
  return to;
17
19
  };
20
+ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
21
+ // If the importer is in node compatibility mode or this is not an ESM
22
+ // file that has been converted to a CommonJS file using a Babel-
23
+ // compatible transform (i.e. "__esModule" has not been set), then set
24
+ // "default" to the CommonJS "module.exports" for node compatibility.
25
+ isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
26
+ mod
27
+ ));
18
28
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
29
 
20
30
  // src/index.ts
@@ -32,7 +42,7 @@ module.exports = __toCommonJS(src_exports);
32
42
  // src/chat/openai-compatible-chat-language-model.ts
33
43
  var import_provider3 = require("@ai-sdk/provider");
34
44
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
35
- var import_v43 = require("zod/v4");
45
+ var z3 = __toESM(require("zod/v4"));
36
46
 
37
47
  // src/chat/convert-to-openai-compatible-chat-messages.ts
38
48
  var import_provider = require("@ai-sdk/provider");
@@ -190,30 +200,30 @@ function mapOpenAICompatibleFinishReason(finishReason) {
190
200
  }
191
201
 
192
202
  // src/chat/openai-compatible-chat-options.ts
193
- var import_v4 = require("zod/v4");
194
- var openaiCompatibleProviderOptions = import_v4.z.object({
203
+ var z = __toESM(require("zod/v4"));
204
+ var openaiCompatibleProviderOptions = z.object({
195
205
  /**
196
206
  * A unique identifier representing your end-user, which can help the provider to
197
207
  * monitor and detect abuse.
198
208
  */
199
- user: import_v4.z.string().optional(),
209
+ user: z.string().optional(),
200
210
  /**
201
211
  * Reasoning effort for reasoning models. Defaults to `medium`.
202
212
  */
203
- reasoningEffort: import_v4.z.string().optional()
213
+ reasoningEffort: z.string().optional()
204
214
  });
205
215
 
206
216
  // src/openai-compatible-error.ts
207
- var import_v42 = require("zod/v4");
208
- var openaiCompatibleErrorDataSchema = import_v42.z.object({
209
- error: import_v42.z.object({
210
- message: import_v42.z.string(),
217
+ var import_v4 = require("zod/v4");
218
+ var openaiCompatibleErrorDataSchema = import_v4.z.object({
219
+ error: import_v4.z.object({
220
+ message: import_v4.z.string(),
211
221
  // The additional information below is handled loosely to support
212
222
  // OpenAI-compatible providers that have slightly different error
213
223
  // responses:
214
- type: import_v42.z.string().nullish(),
215
- param: import_v42.z.any().nullish(),
216
- code: import_v42.z.union([import_v42.z.string(), import_v42.z.number()]).nullish()
224
+ type: import_v4.z.string().nullish(),
225
+ param: import_v4.z.any().nullish(),
226
+ code: import_v4.z.union([import_v4.z.string(), import_v4.z.number()]).nullish()
217
227
  })
218
228
  });
219
229
  var defaultOpenAICompatibleErrorStructure = {
@@ -732,71 +742,71 @@ var OpenAICompatibleChatLanguageModel = class {
732
742
  };
733
743
  }
734
744
  };
735
- var openaiCompatibleTokenUsageSchema = import_v43.z.object({
736
- prompt_tokens: import_v43.z.number().nullish(),
737
- completion_tokens: import_v43.z.number().nullish(),
738
- total_tokens: import_v43.z.number().nullish(),
739
- prompt_tokens_details: import_v43.z.object({
740
- cached_tokens: import_v43.z.number().nullish()
745
+ var openaiCompatibleTokenUsageSchema = z3.object({
746
+ prompt_tokens: z3.number().nullish(),
747
+ completion_tokens: z3.number().nullish(),
748
+ total_tokens: z3.number().nullish(),
749
+ prompt_tokens_details: z3.object({
750
+ cached_tokens: z3.number().nullish()
741
751
  }).nullish(),
742
- completion_tokens_details: import_v43.z.object({
743
- reasoning_tokens: import_v43.z.number().nullish(),
744
- accepted_prediction_tokens: import_v43.z.number().nullish(),
745
- rejected_prediction_tokens: import_v43.z.number().nullish()
752
+ completion_tokens_details: z3.object({
753
+ reasoning_tokens: z3.number().nullish(),
754
+ accepted_prediction_tokens: z3.number().nullish(),
755
+ rejected_prediction_tokens: z3.number().nullish()
746
756
  }).nullish()
747
757
  }).nullish();
748
- var OpenAICompatibleChatResponseSchema = import_v43.z.object({
749
- id: import_v43.z.string().nullish(),
750
- created: import_v43.z.number().nullish(),
751
- model: import_v43.z.string().nullish(),
752
- choices: import_v43.z.array(
753
- import_v43.z.object({
754
- message: import_v43.z.object({
755
- role: import_v43.z.literal("assistant").nullish(),
756
- content: import_v43.z.string().nullish(),
757
- reasoning_content: import_v43.z.string().nullish(),
758
- reasoning: import_v43.z.string().nullish(),
759
- tool_calls: import_v43.z.array(
760
- import_v43.z.object({
761
- id: import_v43.z.string().nullish(),
762
- function: import_v43.z.object({
763
- name: import_v43.z.string(),
764
- arguments: import_v43.z.string()
758
+ var OpenAICompatibleChatResponseSchema = z3.object({
759
+ id: z3.string().nullish(),
760
+ created: z3.number().nullish(),
761
+ model: z3.string().nullish(),
762
+ choices: z3.array(
763
+ z3.object({
764
+ message: z3.object({
765
+ role: z3.literal("assistant").nullish(),
766
+ content: z3.string().nullish(),
767
+ reasoning_content: z3.string().nullish(),
768
+ reasoning: z3.string().nullish(),
769
+ tool_calls: z3.array(
770
+ z3.object({
771
+ id: z3.string().nullish(),
772
+ function: z3.object({
773
+ name: z3.string(),
774
+ arguments: z3.string()
765
775
  })
766
776
  })
767
777
  ).nullish()
768
778
  }),
769
- finish_reason: import_v43.z.string().nullish()
779
+ finish_reason: z3.string().nullish()
770
780
  })
771
781
  ),
772
782
  usage: openaiCompatibleTokenUsageSchema
773
783
  });
774
- var createOpenAICompatibleChatChunkSchema = (errorSchema) => import_v43.z.union([
775
- import_v43.z.object({
776
- id: import_v43.z.string().nullish(),
777
- created: import_v43.z.number().nullish(),
778
- model: import_v43.z.string().nullish(),
779
- choices: import_v43.z.array(
780
- import_v43.z.object({
781
- delta: import_v43.z.object({
782
- role: import_v43.z.enum(["assistant"]).nullish(),
783
- content: import_v43.z.string().nullish(),
784
+ var createOpenAICompatibleChatChunkSchema = (errorSchema) => z3.union([
785
+ z3.object({
786
+ id: z3.string().nullish(),
787
+ created: z3.number().nullish(),
788
+ model: z3.string().nullish(),
789
+ choices: z3.array(
790
+ z3.object({
791
+ delta: z3.object({
792
+ role: z3.enum(["assistant"]).nullish(),
793
+ content: z3.string().nullish(),
784
794
  // Most openai-compatible models set `reasoning_content`, but some
785
795
  // providers serving `gpt-oss` set `reasoning`. See #7866
786
- reasoning_content: import_v43.z.string().nullish(),
787
- reasoning: import_v43.z.string().nullish(),
788
- tool_calls: import_v43.z.array(
789
- import_v43.z.object({
790
- index: import_v43.z.number(),
791
- id: import_v43.z.string().nullish(),
792
- function: import_v43.z.object({
793
- name: import_v43.z.string().nullish(),
794
- arguments: import_v43.z.string().nullish()
796
+ reasoning_content: z3.string().nullish(),
797
+ reasoning: z3.string().nullish(),
798
+ tool_calls: z3.array(
799
+ z3.object({
800
+ index: z3.number(),
801
+ id: z3.string().nullish(),
802
+ function: z3.object({
803
+ name: z3.string().nullish(),
804
+ arguments: z3.string().nullish()
795
805
  })
796
806
  })
797
807
  ).nullish()
798
808
  }).nullish(),
799
- finish_reason: import_v43.z.string().nullish()
809
+ finish_reason: z3.string().nullish()
800
810
  })
801
811
  ),
802
812
  usage: openaiCompatibleTokenUsageSchema
@@ -806,7 +816,7 @@ var createOpenAICompatibleChatChunkSchema = (errorSchema) => import_v43.z.union(
806
816
 
807
817
  // src/completion/openai-compatible-completion-language-model.ts
808
818
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
809
- var import_v45 = require("zod/v4");
819
+ var z5 = __toESM(require("zod/v4"));
810
820
 
811
821
  // src/completion/convert-to-openai-compatible-completion-prompt.ts
812
822
  var import_provider4 = require("@ai-sdk/provider");
@@ -914,28 +924,28 @@ function mapOpenAICompatibleFinishReason2(finishReason) {
914
924
  }
915
925
 
916
926
  // src/completion/openai-compatible-completion-options.ts
917
- var import_v44 = require("zod/v4");
918
- var openaiCompatibleCompletionProviderOptions = import_v44.z.object({
927
+ var z4 = __toESM(require("zod/v4"));
928
+ var openaiCompatibleCompletionProviderOptions = z4.object({
919
929
  /**
920
930
  * Echo back the prompt in addition to the completion.
921
931
  */
922
- echo: import_v44.z.boolean().optional(),
932
+ echo: z4.boolean().optional(),
923
933
  /**
924
934
  * Modify the likelihood of specified tokens appearing in the completion.
925
935
  *
926
936
  * Accepts a JSON object that maps tokens (specified by their token ID in
927
937
  * the GPT tokenizer) to an associated bias value from -100 to 100.
928
938
  */
929
- logitBias: import_v44.z.record(import_v44.z.string(), import_v44.z.number()).optional(),
939
+ logitBias: z4.record(z4.string(), z4.number()).optional(),
930
940
  /**
931
941
  * The suffix that comes after a completion of inserted text.
932
942
  */
933
- suffix: import_v44.z.string().optional(),
943
+ suffix: z4.string().optional(),
934
944
  /**
935
945
  * A unique identifier representing your end-user, which can help providers to
936
946
  * monitor and detect abuse.
937
947
  */
938
- user: import_v44.z.string().optional()
948
+ user: z4.string().optional()
939
949
  });
940
950
 
941
951
  // src/completion/openai-compatible-completion-language-model.ts
@@ -1168,33 +1178,33 @@ var OpenAICompatibleCompletionLanguageModel = class {
1168
1178
  };
1169
1179
  }
1170
1180
  };
1171
- var usageSchema = import_v45.z.object({
1172
- prompt_tokens: import_v45.z.number(),
1173
- completion_tokens: import_v45.z.number(),
1174
- total_tokens: import_v45.z.number()
1181
+ var usageSchema = z5.object({
1182
+ prompt_tokens: z5.number(),
1183
+ completion_tokens: z5.number(),
1184
+ total_tokens: z5.number()
1175
1185
  });
1176
- var openaiCompatibleCompletionResponseSchema = import_v45.z.object({
1177
- id: import_v45.z.string().nullish(),
1178
- created: import_v45.z.number().nullish(),
1179
- model: import_v45.z.string().nullish(),
1180
- choices: import_v45.z.array(
1181
- import_v45.z.object({
1182
- text: import_v45.z.string(),
1183
- finish_reason: import_v45.z.string()
1186
+ var openaiCompatibleCompletionResponseSchema = z5.object({
1187
+ id: z5.string().nullish(),
1188
+ created: z5.number().nullish(),
1189
+ model: z5.string().nullish(),
1190
+ choices: z5.array(
1191
+ z5.object({
1192
+ text: z5.string(),
1193
+ finish_reason: z5.string()
1184
1194
  })
1185
1195
  ),
1186
1196
  usage: usageSchema.nullish()
1187
1197
  });
1188
- var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => import_v45.z.union([
1189
- import_v45.z.object({
1190
- id: import_v45.z.string().nullish(),
1191
- created: import_v45.z.number().nullish(),
1192
- model: import_v45.z.string().nullish(),
1193
- choices: import_v45.z.array(
1194
- import_v45.z.object({
1195
- text: import_v45.z.string(),
1196
- finish_reason: import_v45.z.string().nullish(),
1197
- index: import_v45.z.number()
1198
+ var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => z5.union([
1199
+ z5.object({
1200
+ id: z5.string().nullish(),
1201
+ created: z5.number().nullish(),
1202
+ model: z5.string().nullish(),
1203
+ choices: z5.array(
1204
+ z5.object({
1205
+ text: z5.string(),
1206
+ finish_reason: z5.string().nullish(),
1207
+ index: z5.number()
1198
1208
  })
1199
1209
  ),
1200
1210
  usage: usageSchema.nullish()
@@ -1205,21 +1215,21 @@ var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => import_v45.z.
1205
1215
  // src/embedding/openai-compatible-embedding-model.ts
1206
1216
  var import_provider5 = require("@ai-sdk/provider");
1207
1217
  var import_provider_utils4 = require("@ai-sdk/provider-utils");
1208
- var import_v47 = require("zod/v4");
1218
+ var z7 = __toESM(require("zod/v4"));
1209
1219
 
1210
1220
  // src/embedding/openai-compatible-embedding-options.ts
1211
- var import_v46 = require("zod/v4");
1212
- var openaiCompatibleEmbeddingProviderOptions = import_v46.z.object({
1221
+ var z6 = __toESM(require("zod/v4"));
1222
+ var openaiCompatibleEmbeddingProviderOptions = z6.object({
1213
1223
  /**
1214
1224
  * The number of dimensions the resulting output embeddings should have.
1215
1225
  * Only supported in text-embedding-3 and later models.
1216
1226
  */
1217
- dimensions: import_v46.z.number().optional(),
1227
+ dimensions: z6.number().optional(),
1218
1228
  /**
1219
1229
  * A unique identifier representing your end-user, which can help providers to
1220
1230
  * monitor and detect abuse.
1221
1231
  */
1222
- user: import_v46.z.string().optional()
1232
+ user: z6.string().optional()
1223
1233
  });
1224
1234
 
1225
1235
  // src/embedding/openai-compatible-embedding-model.ts
@@ -1304,15 +1314,15 @@ var OpenAICompatibleEmbeddingModel = class {
1304
1314
  };
1305
1315
  }
1306
1316
  };
1307
- var openaiTextEmbeddingResponseSchema = import_v47.z.object({
1308
- data: import_v47.z.array(import_v47.z.object({ embedding: import_v47.z.array(import_v47.z.number()) })),
1309
- usage: import_v47.z.object({ prompt_tokens: import_v47.z.number() }).nullish(),
1310
- providerMetadata: import_v47.z.record(import_v47.z.string(), import_v47.z.record(import_v47.z.string(), import_v47.z.any())).optional()
1317
+ var openaiTextEmbeddingResponseSchema = z7.object({
1318
+ data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
1319
+ usage: z7.object({ prompt_tokens: z7.number() }).nullish(),
1320
+ providerMetadata: z7.record(z7.string(), z7.record(z7.string(), z7.any())).optional()
1311
1321
  });
1312
1322
 
1313
1323
  // src/image/openai-compatible-image-model.ts
1314
1324
  var import_provider_utils5 = require("@ai-sdk/provider-utils");
1315
- var import_v48 = require("zod/v4");
1325
+ var z8 = __toESM(require("zod/v4"));
1316
1326
  var OpenAICompatibleImageModel = class {
1317
1327
  constructor(modelId, config) {
1318
1328
  this.modelId = modelId;
@@ -1380,15 +1390,15 @@ var OpenAICompatibleImageModel = class {
1380
1390
  };
1381
1391
  }
1382
1392
  };
1383
- var openaiCompatibleImageResponseSchema = import_v48.z.object({
1384
- data: import_v48.z.array(import_v48.z.object({ b64_json: import_v48.z.string() }))
1393
+ var openaiCompatibleImageResponseSchema = z8.object({
1394
+ data: z8.array(z8.object({ b64_json: z8.string() }))
1385
1395
  });
1386
1396
 
1387
1397
  // src/openai-compatible-provider.ts
1388
1398
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
1389
1399
 
1390
1400
  // src/version.ts
1391
- var VERSION = true ? "2.0.0-beta.10" : "0.0.0-test";
1401
+ var VERSION = true ? "2.0.0-beta.12" : "0.0.0-test";
1392
1402
 
1393
1403
  // src/openai-compatible-provider.ts
1394
1404
  function createOpenAICompatible(options) {