@ai-sdk/openai-compatible 1.0.20 → 1.0.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,12 @@
1
1
  # @ai-sdk/openai-compatible
2
2
 
3
+ ## 1.0.21
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [17f9872]
8
+ - @ai-sdk/provider-utils@3.0.12
9
+
3
10
  ## 1.0.20
4
11
 
5
12
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -1,7 +1,6 @@
1
1
  import { SharedV2ProviderMetadata, LanguageModelV2, EmbeddingModelV2, ImageModelV2, ProviderV2 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
- import * as z from 'zod/v4';
4
- import { ZodType, z as z$1 } from 'zod/v4';
3
+ import { z, ZodType } from 'zod/v4';
5
4
 
6
5
  type OpenAICompatibleChatModelId = string;
7
6
  declare const openaiCompatibleProviderOptions: z.ZodObject<{
@@ -10,15 +9,15 @@ declare const openaiCompatibleProviderOptions: z.ZodObject<{
10
9
  }, z.core.$strip>;
11
10
  type OpenAICompatibleProviderOptions = z.infer<typeof openaiCompatibleProviderOptions>;
12
11
 
13
- declare const openaiCompatibleErrorDataSchema: z$1.ZodObject<{
14
- error: z$1.ZodObject<{
15
- message: z$1.ZodString;
16
- type: z$1.ZodOptional<z$1.ZodNullable<z$1.ZodString>>;
17
- param: z$1.ZodOptional<z$1.ZodNullable<z$1.ZodAny>>;
18
- code: z$1.ZodOptional<z$1.ZodNullable<z$1.ZodUnion<readonly [z$1.ZodString, z$1.ZodNumber]>>>;
19
- }, z$1.core.$strip>;
20
- }, z$1.core.$strip>;
21
- type OpenAICompatibleErrorData = z$1.infer<typeof openaiCompatibleErrorDataSchema>;
12
+ declare const openaiCompatibleErrorDataSchema: z.ZodObject<{
13
+ error: z.ZodObject<{
14
+ message: z.ZodString;
15
+ type: z.ZodOptional<z.ZodNullable<z.ZodString>>;
16
+ param: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
17
+ code: z.ZodOptional<z.ZodNullable<z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>>>;
18
+ }, z.core.$strip>;
19
+ }, z.core.$strip>;
20
+ type OpenAICompatibleErrorData = z.infer<typeof openaiCompatibleErrorDataSchema>;
22
21
  type ProviderErrorStructure<T> = {
23
22
  errorSchema: ZodType<T>;
24
23
  errorToMessage: (error: T) => string;
package/dist/index.d.ts CHANGED
@@ -1,7 +1,6 @@
1
1
  import { SharedV2ProviderMetadata, LanguageModelV2, EmbeddingModelV2, ImageModelV2, ProviderV2 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
- import * as z from 'zod/v4';
4
- import { ZodType, z as z$1 } from 'zod/v4';
3
+ import { z, ZodType } from 'zod/v4';
5
4
 
6
5
  type OpenAICompatibleChatModelId = string;
7
6
  declare const openaiCompatibleProviderOptions: z.ZodObject<{
@@ -10,15 +9,15 @@ declare const openaiCompatibleProviderOptions: z.ZodObject<{
10
9
  }, z.core.$strip>;
11
10
  type OpenAICompatibleProviderOptions = z.infer<typeof openaiCompatibleProviderOptions>;
12
11
 
13
- declare const openaiCompatibleErrorDataSchema: z$1.ZodObject<{
14
- error: z$1.ZodObject<{
15
- message: z$1.ZodString;
16
- type: z$1.ZodOptional<z$1.ZodNullable<z$1.ZodString>>;
17
- param: z$1.ZodOptional<z$1.ZodNullable<z$1.ZodAny>>;
18
- code: z$1.ZodOptional<z$1.ZodNullable<z$1.ZodUnion<readonly [z$1.ZodString, z$1.ZodNumber]>>>;
19
- }, z$1.core.$strip>;
20
- }, z$1.core.$strip>;
21
- type OpenAICompatibleErrorData = z$1.infer<typeof openaiCompatibleErrorDataSchema>;
12
+ declare const openaiCompatibleErrorDataSchema: z.ZodObject<{
13
+ error: z.ZodObject<{
14
+ message: z.ZodString;
15
+ type: z.ZodOptional<z.ZodNullable<z.ZodString>>;
16
+ param: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
17
+ code: z.ZodOptional<z.ZodNullable<z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>>>;
18
+ }, z.core.$strip>;
19
+ }, z.core.$strip>;
20
+ type OpenAICompatibleErrorData = z.infer<typeof openaiCompatibleErrorDataSchema>;
22
21
  type ProviderErrorStructure<T> = {
23
22
  errorSchema: ZodType<T>;
24
23
  errorToMessage: (error: T) => string;
package/dist/index.js CHANGED
@@ -1,9 +1,7 @@
1
1
  "use strict";
2
- var __create = Object.create;
3
2
  var __defProp = Object.defineProperty;
4
3
  var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
4
  var __getOwnPropNames = Object.getOwnPropertyNames;
6
- var __getProtoOf = Object.getPrototypeOf;
7
5
  var __hasOwnProp = Object.prototype.hasOwnProperty;
8
6
  var __export = (target, all) => {
9
7
  for (var name in all)
@@ -17,14 +15,6 @@ var __copyProps = (to, from, except, desc) => {
17
15
  }
18
16
  return to;
19
17
  };
20
- var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
21
- // If the importer is in node compatibility mode or this is not an ESM
22
- // file that has been converted to a CommonJS file using a Babel-
23
- // compatible transform (i.e. "__esModule" has not been set), then set
24
- // "default" to the CommonJS "module.exports" for node compatibility.
25
- isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
26
- mod
27
- ));
28
18
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
29
19
 
30
20
  // src/index.ts
@@ -42,7 +32,7 @@ module.exports = __toCommonJS(src_exports);
42
32
  // src/chat/openai-compatible-chat-language-model.ts
43
33
  var import_provider3 = require("@ai-sdk/provider");
44
34
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
45
- var z3 = __toESM(require("zod/v4"));
35
+ var import_v43 = require("zod/v4");
46
36
 
47
37
  // src/chat/convert-to-openai-compatible-chat-messages.ts
48
38
  var import_provider = require("@ai-sdk/provider");
@@ -196,30 +186,30 @@ function mapOpenAICompatibleFinishReason(finishReason) {
196
186
  }
197
187
 
198
188
  // src/chat/openai-compatible-chat-options.ts
199
- var z = __toESM(require("zod/v4"));
200
- var openaiCompatibleProviderOptions = z.object({
189
+ var import_v4 = require("zod/v4");
190
+ var openaiCompatibleProviderOptions = import_v4.z.object({
201
191
  /**
202
192
  * A unique identifier representing your end-user, which can help the provider to
203
193
  * monitor and detect abuse.
204
194
  */
205
- user: z.string().optional(),
195
+ user: import_v4.z.string().optional(),
206
196
  /**
207
197
  * Reasoning effort for reasoning models. Defaults to `medium`.
208
198
  */
209
- reasoningEffort: z.string().optional()
199
+ reasoningEffort: import_v4.z.string().optional()
210
200
  });
211
201
 
212
202
  // src/openai-compatible-error.ts
213
- var import_v4 = require("zod/v4");
214
- var openaiCompatibleErrorDataSchema = import_v4.z.object({
215
- error: import_v4.z.object({
216
- message: import_v4.z.string(),
203
+ var import_v42 = require("zod/v4");
204
+ var openaiCompatibleErrorDataSchema = import_v42.z.object({
205
+ error: import_v42.z.object({
206
+ message: import_v42.z.string(),
217
207
  // The additional information below is handled loosely to support
218
208
  // OpenAI-compatible providers that have slightly different error
219
209
  // responses:
220
- type: import_v4.z.string().nullish(),
221
- param: import_v4.z.any().nullish(),
222
- code: import_v4.z.union([import_v4.z.string(), import_v4.z.number()]).nullish()
210
+ type: import_v42.z.string().nullish(),
211
+ param: import_v42.z.any().nullish(),
212
+ code: import_v42.z.union([import_v42.z.string(), import_v42.z.number()]).nullish()
223
213
  })
224
214
  });
225
215
  var defaultOpenAICompatibleErrorStructure = {
@@ -738,71 +728,71 @@ var OpenAICompatibleChatLanguageModel = class {
738
728
  };
739
729
  }
740
730
  };
741
- var openaiCompatibleTokenUsageSchema = z3.object({
742
- prompt_tokens: z3.number().nullish(),
743
- completion_tokens: z3.number().nullish(),
744
- total_tokens: z3.number().nullish(),
745
- prompt_tokens_details: z3.object({
746
- cached_tokens: z3.number().nullish()
731
+ var openaiCompatibleTokenUsageSchema = import_v43.z.object({
732
+ prompt_tokens: import_v43.z.number().nullish(),
733
+ completion_tokens: import_v43.z.number().nullish(),
734
+ total_tokens: import_v43.z.number().nullish(),
735
+ prompt_tokens_details: import_v43.z.object({
736
+ cached_tokens: import_v43.z.number().nullish()
747
737
  }).nullish(),
748
- completion_tokens_details: z3.object({
749
- reasoning_tokens: z3.number().nullish(),
750
- accepted_prediction_tokens: z3.number().nullish(),
751
- rejected_prediction_tokens: z3.number().nullish()
738
+ completion_tokens_details: import_v43.z.object({
739
+ reasoning_tokens: import_v43.z.number().nullish(),
740
+ accepted_prediction_tokens: import_v43.z.number().nullish(),
741
+ rejected_prediction_tokens: import_v43.z.number().nullish()
752
742
  }).nullish()
753
743
  }).nullish();
754
- var OpenAICompatibleChatResponseSchema = z3.object({
755
- id: z3.string().nullish(),
756
- created: z3.number().nullish(),
757
- model: z3.string().nullish(),
758
- choices: z3.array(
759
- z3.object({
760
- message: z3.object({
761
- role: z3.literal("assistant").nullish(),
762
- content: z3.string().nullish(),
763
- reasoning_content: z3.string().nullish(),
764
- reasoning: z3.string().nullish(),
765
- tool_calls: z3.array(
766
- z3.object({
767
- id: z3.string().nullish(),
768
- function: z3.object({
769
- name: z3.string(),
770
- arguments: z3.string()
744
+ var OpenAICompatibleChatResponseSchema = import_v43.z.object({
745
+ id: import_v43.z.string().nullish(),
746
+ created: import_v43.z.number().nullish(),
747
+ model: import_v43.z.string().nullish(),
748
+ choices: import_v43.z.array(
749
+ import_v43.z.object({
750
+ message: import_v43.z.object({
751
+ role: import_v43.z.literal("assistant").nullish(),
752
+ content: import_v43.z.string().nullish(),
753
+ reasoning_content: import_v43.z.string().nullish(),
754
+ reasoning: import_v43.z.string().nullish(),
755
+ tool_calls: import_v43.z.array(
756
+ import_v43.z.object({
757
+ id: import_v43.z.string().nullish(),
758
+ function: import_v43.z.object({
759
+ name: import_v43.z.string(),
760
+ arguments: import_v43.z.string()
771
761
  })
772
762
  })
773
763
  ).nullish()
774
764
  }),
775
- finish_reason: z3.string().nullish()
765
+ finish_reason: import_v43.z.string().nullish()
776
766
  })
777
767
  ),
778
768
  usage: openaiCompatibleTokenUsageSchema
779
769
  });
780
- var createOpenAICompatibleChatChunkSchema = (errorSchema) => z3.union([
781
- z3.object({
782
- id: z3.string().nullish(),
783
- created: z3.number().nullish(),
784
- model: z3.string().nullish(),
785
- choices: z3.array(
786
- z3.object({
787
- delta: z3.object({
788
- role: z3.enum(["assistant"]).nullish(),
789
- content: z3.string().nullish(),
770
+ var createOpenAICompatibleChatChunkSchema = (errorSchema) => import_v43.z.union([
771
+ import_v43.z.object({
772
+ id: import_v43.z.string().nullish(),
773
+ created: import_v43.z.number().nullish(),
774
+ model: import_v43.z.string().nullish(),
775
+ choices: import_v43.z.array(
776
+ import_v43.z.object({
777
+ delta: import_v43.z.object({
778
+ role: import_v43.z.enum(["assistant"]).nullish(),
779
+ content: import_v43.z.string().nullish(),
790
780
  // Most openai-compatible models set `reasoning_content`, but some
791
781
  // providers serving `gpt-oss` set `reasoning`. See #7866
792
- reasoning_content: z3.string().nullish(),
793
- reasoning: z3.string().nullish(),
794
- tool_calls: z3.array(
795
- z3.object({
796
- index: z3.number(),
797
- id: z3.string().nullish(),
798
- function: z3.object({
799
- name: z3.string().nullish(),
800
- arguments: z3.string().nullish()
782
+ reasoning_content: import_v43.z.string().nullish(),
783
+ reasoning: import_v43.z.string().nullish(),
784
+ tool_calls: import_v43.z.array(
785
+ import_v43.z.object({
786
+ index: import_v43.z.number(),
787
+ id: import_v43.z.string().nullish(),
788
+ function: import_v43.z.object({
789
+ name: import_v43.z.string().nullish(),
790
+ arguments: import_v43.z.string().nullish()
801
791
  })
802
792
  })
803
793
  ).nullish()
804
794
  }).nullish(),
805
- finish_reason: z3.string().nullish()
795
+ finish_reason: import_v43.z.string().nullish()
806
796
  })
807
797
  ),
808
798
  usage: openaiCompatibleTokenUsageSchema
@@ -812,7 +802,7 @@ var createOpenAICompatibleChatChunkSchema = (errorSchema) => z3.union([
812
802
 
813
803
  // src/completion/openai-compatible-completion-language-model.ts
814
804
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
815
- var z5 = __toESM(require("zod/v4"));
805
+ var import_v45 = require("zod/v4");
816
806
 
817
807
  // src/completion/convert-to-openai-compatible-completion-prompt.ts
818
808
  var import_provider4 = require("@ai-sdk/provider");
@@ -920,28 +910,28 @@ function mapOpenAICompatibleFinishReason2(finishReason) {
920
910
  }
921
911
 
922
912
  // src/completion/openai-compatible-completion-options.ts
923
- var z4 = __toESM(require("zod/v4"));
924
- var openaiCompatibleCompletionProviderOptions = z4.object({
913
+ var import_v44 = require("zod/v4");
914
+ var openaiCompatibleCompletionProviderOptions = import_v44.z.object({
925
915
  /**
926
916
  * Echo back the prompt in addition to the completion.
927
917
  */
928
- echo: z4.boolean().optional(),
918
+ echo: import_v44.z.boolean().optional(),
929
919
  /**
930
920
  * Modify the likelihood of specified tokens appearing in the completion.
931
921
  *
932
922
  * Accepts a JSON object that maps tokens (specified by their token ID in
933
923
  * the GPT tokenizer) to an associated bias value from -100 to 100.
934
924
  */
935
- logitBias: z4.record(z4.string(), z4.number()).optional(),
925
+ logitBias: import_v44.z.record(import_v44.z.string(), import_v44.z.number()).optional(),
936
926
  /**
937
927
  * The suffix that comes after a completion of inserted text.
938
928
  */
939
- suffix: z4.string().optional(),
929
+ suffix: import_v44.z.string().optional(),
940
930
  /**
941
931
  * A unique identifier representing your end-user, which can help providers to
942
932
  * monitor and detect abuse.
943
933
  */
944
- user: z4.string().optional()
934
+ user: import_v44.z.string().optional()
945
935
  });
946
936
 
947
937
  // src/completion/openai-compatible-completion-language-model.ts
@@ -1174,33 +1164,33 @@ var OpenAICompatibleCompletionLanguageModel = class {
1174
1164
  };
1175
1165
  }
1176
1166
  };
1177
- var usageSchema = z5.object({
1178
- prompt_tokens: z5.number(),
1179
- completion_tokens: z5.number(),
1180
- total_tokens: z5.number()
1167
+ var usageSchema = import_v45.z.object({
1168
+ prompt_tokens: import_v45.z.number(),
1169
+ completion_tokens: import_v45.z.number(),
1170
+ total_tokens: import_v45.z.number()
1181
1171
  });
1182
- var openaiCompatibleCompletionResponseSchema = z5.object({
1183
- id: z5.string().nullish(),
1184
- created: z5.number().nullish(),
1185
- model: z5.string().nullish(),
1186
- choices: z5.array(
1187
- z5.object({
1188
- text: z5.string(),
1189
- finish_reason: z5.string()
1172
+ var openaiCompatibleCompletionResponseSchema = import_v45.z.object({
1173
+ id: import_v45.z.string().nullish(),
1174
+ created: import_v45.z.number().nullish(),
1175
+ model: import_v45.z.string().nullish(),
1176
+ choices: import_v45.z.array(
1177
+ import_v45.z.object({
1178
+ text: import_v45.z.string(),
1179
+ finish_reason: import_v45.z.string()
1190
1180
  })
1191
1181
  ),
1192
1182
  usage: usageSchema.nullish()
1193
1183
  });
1194
- var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => z5.union([
1195
- z5.object({
1196
- id: z5.string().nullish(),
1197
- created: z5.number().nullish(),
1198
- model: z5.string().nullish(),
1199
- choices: z5.array(
1200
- z5.object({
1201
- text: z5.string(),
1202
- finish_reason: z5.string().nullish(),
1203
- index: z5.number()
1184
+ var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => import_v45.z.union([
1185
+ import_v45.z.object({
1186
+ id: import_v45.z.string().nullish(),
1187
+ created: import_v45.z.number().nullish(),
1188
+ model: import_v45.z.string().nullish(),
1189
+ choices: import_v45.z.array(
1190
+ import_v45.z.object({
1191
+ text: import_v45.z.string(),
1192
+ finish_reason: import_v45.z.string().nullish(),
1193
+ index: import_v45.z.number()
1204
1194
  })
1205
1195
  ),
1206
1196
  usage: usageSchema.nullish()
@@ -1211,21 +1201,21 @@ var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => z5.union([
1211
1201
  // src/embedding/openai-compatible-embedding-model.ts
1212
1202
  var import_provider5 = require("@ai-sdk/provider");
1213
1203
  var import_provider_utils4 = require("@ai-sdk/provider-utils");
1214
- var z7 = __toESM(require("zod/v4"));
1204
+ var import_v47 = require("zod/v4");
1215
1205
 
1216
1206
  // src/embedding/openai-compatible-embedding-options.ts
1217
- var z6 = __toESM(require("zod/v4"));
1218
- var openaiCompatibleEmbeddingProviderOptions = z6.object({
1207
+ var import_v46 = require("zod/v4");
1208
+ var openaiCompatibleEmbeddingProviderOptions = import_v46.z.object({
1219
1209
  /**
1220
1210
  * The number of dimensions the resulting output embeddings should have.
1221
1211
  * Only supported in text-embedding-3 and later models.
1222
1212
  */
1223
- dimensions: z6.number().optional(),
1213
+ dimensions: import_v46.z.number().optional(),
1224
1214
  /**
1225
1215
  * A unique identifier representing your end-user, which can help providers to
1226
1216
  * monitor and detect abuse.
1227
1217
  */
1228
- user: z6.string().optional()
1218
+ user: import_v46.z.string().optional()
1229
1219
  });
1230
1220
 
1231
1221
  // src/embedding/openai-compatible-embedding-model.ts
@@ -1310,15 +1300,15 @@ var OpenAICompatibleEmbeddingModel = class {
1310
1300
  };
1311
1301
  }
1312
1302
  };
1313
- var openaiTextEmbeddingResponseSchema = z7.object({
1314
- data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
1315
- usage: z7.object({ prompt_tokens: z7.number() }).nullish(),
1316
- providerMetadata: z7.record(z7.string(), z7.record(z7.string(), z7.any())).optional()
1303
+ var openaiTextEmbeddingResponseSchema = import_v47.z.object({
1304
+ data: import_v47.z.array(import_v47.z.object({ embedding: import_v47.z.array(import_v47.z.number()) })),
1305
+ usage: import_v47.z.object({ prompt_tokens: import_v47.z.number() }).nullish(),
1306
+ providerMetadata: import_v47.z.record(import_v47.z.string(), import_v47.z.record(import_v47.z.string(), import_v47.z.any())).optional()
1317
1307
  });
1318
1308
 
1319
1309
  // src/image/openai-compatible-image-model.ts
1320
1310
  var import_provider_utils5 = require("@ai-sdk/provider-utils");
1321
- var z8 = __toESM(require("zod/v4"));
1311
+ var import_v48 = require("zod/v4");
1322
1312
  var OpenAICompatibleImageModel = class {
1323
1313
  constructor(modelId, config) {
1324
1314
  this.modelId = modelId;
@@ -1386,15 +1376,15 @@ var OpenAICompatibleImageModel = class {
1386
1376
  };
1387
1377
  }
1388
1378
  };
1389
- var openaiCompatibleImageResponseSchema = z8.object({
1390
- data: z8.array(z8.object({ b64_json: z8.string() }))
1379
+ var openaiCompatibleImageResponseSchema = import_v48.z.object({
1380
+ data: import_v48.z.array(import_v48.z.object({ b64_json: import_v48.z.string() }))
1391
1381
  });
1392
1382
 
1393
1383
  // src/openai-compatible-provider.ts
1394
1384
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
1395
1385
 
1396
1386
  // src/version.ts
1397
- var VERSION = true ? "1.0.20" : "0.0.0-test";
1387
+ var VERSION = true ? "1.0.21" : "0.0.0-test";
1398
1388
 
1399
1389
  // src/openai-compatible-provider.ts
1400
1390
  function createOpenAICompatible(options) {