@ai-sdk/openai-compatible 1.0.19 → 1.0.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,14 @@
1
1
  # @ai-sdk/openai-compatible
2
2
 
3
+ ## 1.0.20
4
+
5
+ ### Patch Changes
6
+
7
+ - 6f0644c: chore: use import \* from zod/v4
8
+ - Updated dependencies [6f0644c]
9
+ - Updated dependencies [6f0644c]
10
+ - @ai-sdk/provider-utils@3.0.11
11
+
3
12
  ## 1.0.19
4
13
 
5
14
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -1,6 +1,7 @@
1
1
  import { SharedV2ProviderMetadata, LanguageModelV2, EmbeddingModelV2, ImageModelV2, ProviderV2 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
- import { z, ZodType } from 'zod/v4';
3
+ import * as z from 'zod/v4';
4
+ import { ZodType, z as z$1 } from 'zod/v4';
4
5
 
5
6
  type OpenAICompatibleChatModelId = string;
6
7
  declare const openaiCompatibleProviderOptions: z.ZodObject<{
@@ -9,15 +10,15 @@ declare const openaiCompatibleProviderOptions: z.ZodObject<{
9
10
  }, z.core.$strip>;
10
11
  type OpenAICompatibleProviderOptions = z.infer<typeof openaiCompatibleProviderOptions>;
11
12
 
12
- declare const openaiCompatibleErrorDataSchema: z.ZodObject<{
13
- error: z.ZodObject<{
14
- message: z.ZodString;
15
- type: z.ZodOptional<z.ZodNullable<z.ZodString>>;
16
- param: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
17
- code: z.ZodOptional<z.ZodNullable<z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>>>;
18
- }, z.core.$strip>;
19
- }, z.core.$strip>;
20
- type OpenAICompatibleErrorData = z.infer<typeof openaiCompatibleErrorDataSchema>;
13
+ declare const openaiCompatibleErrorDataSchema: z$1.ZodObject<{
14
+ error: z$1.ZodObject<{
15
+ message: z$1.ZodString;
16
+ type: z$1.ZodOptional<z$1.ZodNullable<z$1.ZodString>>;
17
+ param: z$1.ZodOptional<z$1.ZodNullable<z$1.ZodAny>>;
18
+ code: z$1.ZodOptional<z$1.ZodNullable<z$1.ZodUnion<readonly [z$1.ZodString, z$1.ZodNumber]>>>;
19
+ }, z$1.core.$strip>;
20
+ }, z$1.core.$strip>;
21
+ type OpenAICompatibleErrorData = z$1.infer<typeof openaiCompatibleErrorDataSchema>;
21
22
  type ProviderErrorStructure<T> = {
22
23
  errorSchema: ZodType<T>;
23
24
  errorToMessage: (error: T) => string;
package/dist/index.d.ts CHANGED
@@ -1,6 +1,7 @@
1
1
  import { SharedV2ProviderMetadata, LanguageModelV2, EmbeddingModelV2, ImageModelV2, ProviderV2 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
- import { z, ZodType } from 'zod/v4';
3
+ import * as z from 'zod/v4';
4
+ import { ZodType, z as z$1 } from 'zod/v4';
4
5
 
5
6
  type OpenAICompatibleChatModelId = string;
6
7
  declare const openaiCompatibleProviderOptions: z.ZodObject<{
@@ -9,15 +10,15 @@ declare const openaiCompatibleProviderOptions: z.ZodObject<{
9
10
  }, z.core.$strip>;
10
11
  type OpenAICompatibleProviderOptions = z.infer<typeof openaiCompatibleProviderOptions>;
11
12
 
12
- declare const openaiCompatibleErrorDataSchema: z.ZodObject<{
13
- error: z.ZodObject<{
14
- message: z.ZodString;
15
- type: z.ZodOptional<z.ZodNullable<z.ZodString>>;
16
- param: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
17
- code: z.ZodOptional<z.ZodNullable<z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>>>;
18
- }, z.core.$strip>;
19
- }, z.core.$strip>;
20
- type OpenAICompatibleErrorData = z.infer<typeof openaiCompatibleErrorDataSchema>;
13
+ declare const openaiCompatibleErrorDataSchema: z$1.ZodObject<{
14
+ error: z$1.ZodObject<{
15
+ message: z$1.ZodString;
16
+ type: z$1.ZodOptional<z$1.ZodNullable<z$1.ZodString>>;
17
+ param: z$1.ZodOptional<z$1.ZodNullable<z$1.ZodAny>>;
18
+ code: z$1.ZodOptional<z$1.ZodNullable<z$1.ZodUnion<readonly [z$1.ZodString, z$1.ZodNumber]>>>;
19
+ }, z$1.core.$strip>;
20
+ }, z$1.core.$strip>;
21
+ type OpenAICompatibleErrorData = z$1.infer<typeof openaiCompatibleErrorDataSchema>;
21
22
  type ProviderErrorStructure<T> = {
22
23
  errorSchema: ZodType<T>;
23
24
  errorToMessage: (error: T) => string;
package/dist/index.js CHANGED
@@ -1,7 +1,9 @@
1
1
  "use strict";
2
+ var __create = Object.create;
2
3
  var __defProp = Object.defineProperty;
3
4
  var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
5
  var __getOwnPropNames = Object.getOwnPropertyNames;
6
+ var __getProtoOf = Object.getPrototypeOf;
5
7
  var __hasOwnProp = Object.prototype.hasOwnProperty;
6
8
  var __export = (target, all) => {
7
9
  for (var name in all)
@@ -15,6 +17,14 @@ var __copyProps = (to, from, except, desc) => {
15
17
  }
16
18
  return to;
17
19
  };
20
+ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
21
+ // If the importer is in node compatibility mode or this is not an ESM
22
+ // file that has been converted to a CommonJS file using a Babel-
23
+ // compatible transform (i.e. "__esModule" has not been set), then set
24
+ // "default" to the CommonJS "module.exports" for node compatibility.
25
+ isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
26
+ mod
27
+ ));
18
28
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
29
 
20
30
  // src/index.ts
@@ -32,7 +42,7 @@ module.exports = __toCommonJS(src_exports);
32
42
  // src/chat/openai-compatible-chat-language-model.ts
33
43
  var import_provider3 = require("@ai-sdk/provider");
34
44
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
35
- var import_v43 = require("zod/v4");
45
+ var z3 = __toESM(require("zod/v4"));
36
46
 
37
47
  // src/chat/convert-to-openai-compatible-chat-messages.ts
38
48
  var import_provider = require("@ai-sdk/provider");
@@ -186,30 +196,30 @@ function mapOpenAICompatibleFinishReason(finishReason) {
186
196
  }
187
197
 
188
198
  // src/chat/openai-compatible-chat-options.ts
189
- var import_v4 = require("zod/v4");
190
- var openaiCompatibleProviderOptions = import_v4.z.object({
199
+ var z = __toESM(require("zod/v4"));
200
+ var openaiCompatibleProviderOptions = z.object({
191
201
  /**
192
202
  * A unique identifier representing your end-user, which can help the provider to
193
203
  * monitor and detect abuse.
194
204
  */
195
- user: import_v4.z.string().optional(),
205
+ user: z.string().optional(),
196
206
  /**
197
207
  * Reasoning effort for reasoning models. Defaults to `medium`.
198
208
  */
199
- reasoningEffort: import_v4.z.string().optional()
209
+ reasoningEffort: z.string().optional()
200
210
  });
201
211
 
202
212
  // src/openai-compatible-error.ts
203
- var import_v42 = require("zod/v4");
204
- var openaiCompatibleErrorDataSchema = import_v42.z.object({
205
- error: import_v42.z.object({
206
- message: import_v42.z.string(),
213
+ var import_v4 = require("zod/v4");
214
+ var openaiCompatibleErrorDataSchema = import_v4.z.object({
215
+ error: import_v4.z.object({
216
+ message: import_v4.z.string(),
207
217
  // The additional information below is handled loosely to support
208
218
  // OpenAI-compatible providers that have slightly different error
209
219
  // responses:
210
- type: import_v42.z.string().nullish(),
211
- param: import_v42.z.any().nullish(),
212
- code: import_v42.z.union([import_v42.z.string(), import_v42.z.number()]).nullish()
220
+ type: import_v4.z.string().nullish(),
221
+ param: import_v4.z.any().nullish(),
222
+ code: import_v4.z.union([import_v4.z.string(), import_v4.z.number()]).nullish()
213
223
  })
214
224
  });
215
225
  var defaultOpenAICompatibleErrorStructure = {
@@ -728,71 +738,71 @@ var OpenAICompatibleChatLanguageModel = class {
728
738
  };
729
739
  }
730
740
  };
731
- var openaiCompatibleTokenUsageSchema = import_v43.z.object({
732
- prompt_tokens: import_v43.z.number().nullish(),
733
- completion_tokens: import_v43.z.number().nullish(),
734
- total_tokens: import_v43.z.number().nullish(),
735
- prompt_tokens_details: import_v43.z.object({
736
- cached_tokens: import_v43.z.number().nullish()
741
+ var openaiCompatibleTokenUsageSchema = z3.object({
742
+ prompt_tokens: z3.number().nullish(),
743
+ completion_tokens: z3.number().nullish(),
744
+ total_tokens: z3.number().nullish(),
745
+ prompt_tokens_details: z3.object({
746
+ cached_tokens: z3.number().nullish()
737
747
  }).nullish(),
738
- completion_tokens_details: import_v43.z.object({
739
- reasoning_tokens: import_v43.z.number().nullish(),
740
- accepted_prediction_tokens: import_v43.z.number().nullish(),
741
- rejected_prediction_tokens: import_v43.z.number().nullish()
748
+ completion_tokens_details: z3.object({
749
+ reasoning_tokens: z3.number().nullish(),
750
+ accepted_prediction_tokens: z3.number().nullish(),
751
+ rejected_prediction_tokens: z3.number().nullish()
742
752
  }).nullish()
743
753
  }).nullish();
744
- var OpenAICompatibleChatResponseSchema = import_v43.z.object({
745
- id: import_v43.z.string().nullish(),
746
- created: import_v43.z.number().nullish(),
747
- model: import_v43.z.string().nullish(),
748
- choices: import_v43.z.array(
749
- import_v43.z.object({
750
- message: import_v43.z.object({
751
- role: import_v43.z.literal("assistant").nullish(),
752
- content: import_v43.z.string().nullish(),
753
- reasoning_content: import_v43.z.string().nullish(),
754
- reasoning: import_v43.z.string().nullish(),
755
- tool_calls: import_v43.z.array(
756
- import_v43.z.object({
757
- id: import_v43.z.string().nullish(),
758
- function: import_v43.z.object({
759
- name: import_v43.z.string(),
760
- arguments: import_v43.z.string()
754
+ var OpenAICompatibleChatResponseSchema = z3.object({
755
+ id: z3.string().nullish(),
756
+ created: z3.number().nullish(),
757
+ model: z3.string().nullish(),
758
+ choices: z3.array(
759
+ z3.object({
760
+ message: z3.object({
761
+ role: z3.literal("assistant").nullish(),
762
+ content: z3.string().nullish(),
763
+ reasoning_content: z3.string().nullish(),
764
+ reasoning: z3.string().nullish(),
765
+ tool_calls: z3.array(
766
+ z3.object({
767
+ id: z3.string().nullish(),
768
+ function: z3.object({
769
+ name: z3.string(),
770
+ arguments: z3.string()
761
771
  })
762
772
  })
763
773
  ).nullish()
764
774
  }),
765
- finish_reason: import_v43.z.string().nullish()
775
+ finish_reason: z3.string().nullish()
766
776
  })
767
777
  ),
768
778
  usage: openaiCompatibleTokenUsageSchema
769
779
  });
770
- var createOpenAICompatibleChatChunkSchema = (errorSchema) => import_v43.z.union([
771
- import_v43.z.object({
772
- id: import_v43.z.string().nullish(),
773
- created: import_v43.z.number().nullish(),
774
- model: import_v43.z.string().nullish(),
775
- choices: import_v43.z.array(
776
- import_v43.z.object({
777
- delta: import_v43.z.object({
778
- role: import_v43.z.enum(["assistant"]).nullish(),
779
- content: import_v43.z.string().nullish(),
780
+ var createOpenAICompatibleChatChunkSchema = (errorSchema) => z3.union([
781
+ z3.object({
782
+ id: z3.string().nullish(),
783
+ created: z3.number().nullish(),
784
+ model: z3.string().nullish(),
785
+ choices: z3.array(
786
+ z3.object({
787
+ delta: z3.object({
788
+ role: z3.enum(["assistant"]).nullish(),
789
+ content: z3.string().nullish(),
780
790
  // Most openai-compatible models set `reasoning_content`, but some
781
791
  // providers serving `gpt-oss` set `reasoning`. See #7866
782
- reasoning_content: import_v43.z.string().nullish(),
783
- reasoning: import_v43.z.string().nullish(),
784
- tool_calls: import_v43.z.array(
785
- import_v43.z.object({
786
- index: import_v43.z.number(),
787
- id: import_v43.z.string().nullish(),
788
- function: import_v43.z.object({
789
- name: import_v43.z.string().nullish(),
790
- arguments: import_v43.z.string().nullish()
792
+ reasoning_content: z3.string().nullish(),
793
+ reasoning: z3.string().nullish(),
794
+ tool_calls: z3.array(
795
+ z3.object({
796
+ index: z3.number(),
797
+ id: z3.string().nullish(),
798
+ function: z3.object({
799
+ name: z3.string().nullish(),
800
+ arguments: z3.string().nullish()
791
801
  })
792
802
  })
793
803
  ).nullish()
794
804
  }).nullish(),
795
- finish_reason: import_v43.z.string().nullish()
805
+ finish_reason: z3.string().nullish()
796
806
  })
797
807
  ),
798
808
  usage: openaiCompatibleTokenUsageSchema
@@ -802,7 +812,7 @@ var createOpenAICompatibleChatChunkSchema = (errorSchema) => import_v43.z.union(
802
812
 
803
813
  // src/completion/openai-compatible-completion-language-model.ts
804
814
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
805
- var import_v45 = require("zod/v4");
815
+ var z5 = __toESM(require("zod/v4"));
806
816
 
807
817
  // src/completion/convert-to-openai-compatible-completion-prompt.ts
808
818
  var import_provider4 = require("@ai-sdk/provider");
@@ -910,28 +920,28 @@ function mapOpenAICompatibleFinishReason2(finishReason) {
910
920
  }
911
921
 
912
922
  // src/completion/openai-compatible-completion-options.ts
913
- var import_v44 = require("zod/v4");
914
- var openaiCompatibleCompletionProviderOptions = import_v44.z.object({
923
+ var z4 = __toESM(require("zod/v4"));
924
+ var openaiCompatibleCompletionProviderOptions = z4.object({
915
925
  /**
916
926
  * Echo back the prompt in addition to the completion.
917
927
  */
918
- echo: import_v44.z.boolean().optional(),
928
+ echo: z4.boolean().optional(),
919
929
  /**
920
930
  * Modify the likelihood of specified tokens appearing in the completion.
921
931
  *
922
932
  * Accepts a JSON object that maps tokens (specified by their token ID in
923
933
  * the GPT tokenizer) to an associated bias value from -100 to 100.
924
934
  */
925
- logitBias: import_v44.z.record(import_v44.z.string(), import_v44.z.number()).optional(),
935
+ logitBias: z4.record(z4.string(), z4.number()).optional(),
926
936
  /**
927
937
  * The suffix that comes after a completion of inserted text.
928
938
  */
929
- suffix: import_v44.z.string().optional(),
939
+ suffix: z4.string().optional(),
930
940
  /**
931
941
  * A unique identifier representing your end-user, which can help providers to
932
942
  * monitor and detect abuse.
933
943
  */
934
- user: import_v44.z.string().optional()
944
+ user: z4.string().optional()
935
945
  });
936
946
 
937
947
  // src/completion/openai-compatible-completion-language-model.ts
@@ -1164,33 +1174,33 @@ var OpenAICompatibleCompletionLanguageModel = class {
1164
1174
  };
1165
1175
  }
1166
1176
  };
1167
- var usageSchema = import_v45.z.object({
1168
- prompt_tokens: import_v45.z.number(),
1169
- completion_tokens: import_v45.z.number(),
1170
- total_tokens: import_v45.z.number()
1177
+ var usageSchema = z5.object({
1178
+ prompt_tokens: z5.number(),
1179
+ completion_tokens: z5.number(),
1180
+ total_tokens: z5.number()
1171
1181
  });
1172
- var openaiCompatibleCompletionResponseSchema = import_v45.z.object({
1173
- id: import_v45.z.string().nullish(),
1174
- created: import_v45.z.number().nullish(),
1175
- model: import_v45.z.string().nullish(),
1176
- choices: import_v45.z.array(
1177
- import_v45.z.object({
1178
- text: import_v45.z.string(),
1179
- finish_reason: import_v45.z.string()
1182
+ var openaiCompatibleCompletionResponseSchema = z5.object({
1183
+ id: z5.string().nullish(),
1184
+ created: z5.number().nullish(),
1185
+ model: z5.string().nullish(),
1186
+ choices: z5.array(
1187
+ z5.object({
1188
+ text: z5.string(),
1189
+ finish_reason: z5.string()
1180
1190
  })
1181
1191
  ),
1182
1192
  usage: usageSchema.nullish()
1183
1193
  });
1184
- var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => import_v45.z.union([
1185
- import_v45.z.object({
1186
- id: import_v45.z.string().nullish(),
1187
- created: import_v45.z.number().nullish(),
1188
- model: import_v45.z.string().nullish(),
1189
- choices: import_v45.z.array(
1190
- import_v45.z.object({
1191
- text: import_v45.z.string(),
1192
- finish_reason: import_v45.z.string().nullish(),
1193
- index: import_v45.z.number()
1194
+ var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => z5.union([
1195
+ z5.object({
1196
+ id: z5.string().nullish(),
1197
+ created: z5.number().nullish(),
1198
+ model: z5.string().nullish(),
1199
+ choices: z5.array(
1200
+ z5.object({
1201
+ text: z5.string(),
1202
+ finish_reason: z5.string().nullish(),
1203
+ index: z5.number()
1194
1204
  })
1195
1205
  ),
1196
1206
  usage: usageSchema.nullish()
@@ -1201,21 +1211,21 @@ var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => import_v45.z.
1201
1211
  // src/embedding/openai-compatible-embedding-model.ts
1202
1212
  var import_provider5 = require("@ai-sdk/provider");
1203
1213
  var import_provider_utils4 = require("@ai-sdk/provider-utils");
1204
- var import_v47 = require("zod/v4");
1214
+ var z7 = __toESM(require("zod/v4"));
1205
1215
 
1206
1216
  // src/embedding/openai-compatible-embedding-options.ts
1207
- var import_v46 = require("zod/v4");
1208
- var openaiCompatibleEmbeddingProviderOptions = import_v46.z.object({
1217
+ var z6 = __toESM(require("zod/v4"));
1218
+ var openaiCompatibleEmbeddingProviderOptions = z6.object({
1209
1219
  /**
1210
1220
  * The number of dimensions the resulting output embeddings should have.
1211
1221
  * Only supported in text-embedding-3 and later models.
1212
1222
  */
1213
- dimensions: import_v46.z.number().optional(),
1223
+ dimensions: z6.number().optional(),
1214
1224
  /**
1215
1225
  * A unique identifier representing your end-user, which can help providers to
1216
1226
  * monitor and detect abuse.
1217
1227
  */
1218
- user: import_v46.z.string().optional()
1228
+ user: z6.string().optional()
1219
1229
  });
1220
1230
 
1221
1231
  // src/embedding/openai-compatible-embedding-model.ts
@@ -1300,15 +1310,15 @@ var OpenAICompatibleEmbeddingModel = class {
1300
1310
  };
1301
1311
  }
1302
1312
  };
1303
- var openaiTextEmbeddingResponseSchema = import_v47.z.object({
1304
- data: import_v47.z.array(import_v47.z.object({ embedding: import_v47.z.array(import_v47.z.number()) })),
1305
- usage: import_v47.z.object({ prompt_tokens: import_v47.z.number() }).nullish(),
1306
- providerMetadata: import_v47.z.record(import_v47.z.string(), import_v47.z.record(import_v47.z.string(), import_v47.z.any())).optional()
1313
+ var openaiTextEmbeddingResponseSchema = z7.object({
1314
+ data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
1315
+ usage: z7.object({ prompt_tokens: z7.number() }).nullish(),
1316
+ providerMetadata: z7.record(z7.string(), z7.record(z7.string(), z7.any())).optional()
1307
1317
  });
1308
1318
 
1309
1319
  // src/image/openai-compatible-image-model.ts
1310
1320
  var import_provider_utils5 = require("@ai-sdk/provider-utils");
1311
- var import_v48 = require("zod/v4");
1321
+ var z8 = __toESM(require("zod/v4"));
1312
1322
  var OpenAICompatibleImageModel = class {
1313
1323
  constructor(modelId, config) {
1314
1324
  this.modelId = modelId;
@@ -1376,15 +1386,15 @@ var OpenAICompatibleImageModel = class {
1376
1386
  };
1377
1387
  }
1378
1388
  };
1379
- var openaiCompatibleImageResponseSchema = import_v48.z.object({
1380
- data: import_v48.z.array(import_v48.z.object({ b64_json: import_v48.z.string() }))
1389
+ var openaiCompatibleImageResponseSchema = z8.object({
1390
+ data: z8.array(z8.object({ b64_json: z8.string() }))
1381
1391
  });
1382
1392
 
1383
1393
  // src/openai-compatible-provider.ts
1384
1394
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
1385
1395
 
1386
1396
  // src/version.ts
1387
- var VERSION = true ? "1.0.19" : "0.0.0-test";
1397
+ var VERSION = true ? "1.0.20" : "0.0.0-test";
1388
1398
 
1389
1399
  // src/openai-compatible-provider.ts
1390
1400
  function createOpenAICompatible(options) {