@ai-sdk/openai-compatible 2.0.0-beta.13 → 2.0.0-beta.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,19 @@
1
1
  # @ai-sdk/openai-compatible
2
2
 
3
+ ## 2.0.0-beta.15
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [f0b2157]
8
+ - @ai-sdk/provider-utils@4.0.0-beta.15
9
+
10
+ ## 2.0.0-beta.14
11
+
12
+ ### Patch Changes
13
+
14
+ - Updated dependencies [3b1d015]
15
+ - @ai-sdk/provider-utils@4.0.0-beta.14
16
+
3
17
  ## 2.0.0-beta.13
4
18
 
5
19
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -1,7 +1,6 @@
1
1
  import { SharedV3ProviderMetadata, LanguageModelV3, EmbeddingModelV3, ImageModelV3, ProviderV3 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
- import * as z from 'zod/v4';
4
- import { ZodType, z as z$1 } from 'zod/v4';
3
+ import { z, ZodType } from 'zod/v4';
5
4
 
6
5
  type OpenAICompatibleChatModelId = string;
7
6
  declare const openaiCompatibleProviderOptions: z.ZodObject<{
@@ -10,15 +9,15 @@ declare const openaiCompatibleProviderOptions: z.ZodObject<{
10
9
  }, z.core.$strip>;
11
10
  type OpenAICompatibleProviderOptions = z.infer<typeof openaiCompatibleProviderOptions>;
12
11
 
13
- declare const openaiCompatibleErrorDataSchema: z$1.ZodObject<{
14
- error: z$1.ZodObject<{
15
- message: z$1.ZodString;
16
- type: z$1.ZodOptional<z$1.ZodNullable<z$1.ZodString>>;
17
- param: z$1.ZodOptional<z$1.ZodNullable<z$1.ZodAny>>;
18
- code: z$1.ZodOptional<z$1.ZodNullable<z$1.ZodUnion<readonly [z$1.ZodString, z$1.ZodNumber]>>>;
19
- }, z$1.core.$strip>;
20
- }, z$1.core.$strip>;
21
- type OpenAICompatibleErrorData = z$1.infer<typeof openaiCompatibleErrorDataSchema>;
12
+ declare const openaiCompatibleErrorDataSchema: z.ZodObject<{
13
+ error: z.ZodObject<{
14
+ message: z.ZodString;
15
+ type: z.ZodOptional<z.ZodNullable<z.ZodString>>;
16
+ param: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
17
+ code: z.ZodOptional<z.ZodNullable<z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>>>;
18
+ }, z.core.$strip>;
19
+ }, z.core.$strip>;
20
+ type OpenAICompatibleErrorData = z.infer<typeof openaiCompatibleErrorDataSchema>;
22
21
  type ProviderErrorStructure<T> = {
23
22
  errorSchema: ZodType<T>;
24
23
  errorToMessage: (error: T) => string;
package/dist/index.d.ts CHANGED
@@ -1,7 +1,6 @@
1
1
  import { SharedV3ProviderMetadata, LanguageModelV3, EmbeddingModelV3, ImageModelV3, ProviderV3 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
- import * as z from 'zod/v4';
4
- import { ZodType, z as z$1 } from 'zod/v4';
3
+ import { z, ZodType } from 'zod/v4';
5
4
 
6
5
  type OpenAICompatibleChatModelId = string;
7
6
  declare const openaiCompatibleProviderOptions: z.ZodObject<{
@@ -10,15 +9,15 @@ declare const openaiCompatibleProviderOptions: z.ZodObject<{
10
9
  }, z.core.$strip>;
11
10
  type OpenAICompatibleProviderOptions = z.infer<typeof openaiCompatibleProviderOptions>;
12
11
 
13
- declare const openaiCompatibleErrorDataSchema: z$1.ZodObject<{
14
- error: z$1.ZodObject<{
15
- message: z$1.ZodString;
16
- type: z$1.ZodOptional<z$1.ZodNullable<z$1.ZodString>>;
17
- param: z$1.ZodOptional<z$1.ZodNullable<z$1.ZodAny>>;
18
- code: z$1.ZodOptional<z$1.ZodNullable<z$1.ZodUnion<readonly [z$1.ZodString, z$1.ZodNumber]>>>;
19
- }, z$1.core.$strip>;
20
- }, z$1.core.$strip>;
21
- type OpenAICompatibleErrorData = z$1.infer<typeof openaiCompatibleErrorDataSchema>;
12
+ declare const openaiCompatibleErrorDataSchema: z.ZodObject<{
13
+ error: z.ZodObject<{
14
+ message: z.ZodString;
15
+ type: z.ZodOptional<z.ZodNullable<z.ZodString>>;
16
+ param: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
17
+ code: z.ZodOptional<z.ZodNullable<z.ZodUnion<readonly [z.ZodString, z.ZodNumber]>>>;
18
+ }, z.core.$strip>;
19
+ }, z.core.$strip>;
20
+ type OpenAICompatibleErrorData = z.infer<typeof openaiCompatibleErrorDataSchema>;
22
21
  type ProviderErrorStructure<T> = {
23
22
  errorSchema: ZodType<T>;
24
23
  errorToMessage: (error: T) => string;
package/dist/index.js CHANGED
@@ -1,9 +1,7 @@
1
1
  "use strict";
2
- var __create = Object.create;
3
2
  var __defProp = Object.defineProperty;
4
3
  var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
4
  var __getOwnPropNames = Object.getOwnPropertyNames;
6
- var __getProtoOf = Object.getPrototypeOf;
7
5
  var __hasOwnProp = Object.prototype.hasOwnProperty;
8
6
  var __export = (target, all) => {
9
7
  for (var name in all)
@@ -17,14 +15,6 @@ var __copyProps = (to, from, except, desc) => {
17
15
  }
18
16
  return to;
19
17
  };
20
- var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
21
- // If the importer is in node compatibility mode or this is not an ESM
22
- // file that has been converted to a CommonJS file using a Babel-
23
- // compatible transform (i.e. "__esModule" has not been set), then set
24
- // "default" to the CommonJS "module.exports" for node compatibility.
25
- isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
26
- mod
27
- ));
28
18
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
29
19
 
30
20
  // src/index.ts
@@ -42,7 +32,7 @@ module.exports = __toCommonJS(src_exports);
42
32
  // src/chat/openai-compatible-chat-language-model.ts
43
33
  var import_provider3 = require("@ai-sdk/provider");
44
34
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
45
- var z3 = __toESM(require("zod/v4"));
35
+ var import_v43 = require("zod/v4");
46
36
 
47
37
  // src/chat/convert-to-openai-compatible-chat-messages.ts
48
38
  var import_provider = require("@ai-sdk/provider");
@@ -200,30 +190,30 @@ function mapOpenAICompatibleFinishReason(finishReason) {
200
190
  }
201
191
 
202
192
  // src/chat/openai-compatible-chat-options.ts
203
- var z = __toESM(require("zod/v4"));
204
- var openaiCompatibleProviderOptions = z.object({
193
+ var import_v4 = require("zod/v4");
194
+ var openaiCompatibleProviderOptions = import_v4.z.object({
205
195
  /**
206
196
  * A unique identifier representing your end-user, which can help the provider to
207
197
  * monitor and detect abuse.
208
198
  */
209
- user: z.string().optional(),
199
+ user: import_v4.z.string().optional(),
210
200
  /**
211
201
  * Reasoning effort for reasoning models. Defaults to `medium`.
212
202
  */
213
- reasoningEffort: z.string().optional()
203
+ reasoningEffort: import_v4.z.string().optional()
214
204
  });
215
205
 
216
206
  // src/openai-compatible-error.ts
217
- var import_v4 = require("zod/v4");
218
- var openaiCompatibleErrorDataSchema = import_v4.z.object({
219
- error: import_v4.z.object({
220
- message: import_v4.z.string(),
207
+ var import_v42 = require("zod/v4");
208
+ var openaiCompatibleErrorDataSchema = import_v42.z.object({
209
+ error: import_v42.z.object({
210
+ message: import_v42.z.string(),
221
211
  // The additional information below is handled loosely to support
222
212
  // OpenAI-compatible providers that have slightly different error
223
213
  // responses:
224
- type: import_v4.z.string().nullish(),
225
- param: import_v4.z.any().nullish(),
226
- code: import_v4.z.union([import_v4.z.string(), import_v4.z.number()]).nullish()
214
+ type: import_v42.z.string().nullish(),
215
+ param: import_v42.z.any().nullish(),
216
+ code: import_v42.z.union([import_v42.z.string(), import_v42.z.number()]).nullish()
227
217
  })
228
218
  });
229
219
  var defaultOpenAICompatibleErrorStructure = {
@@ -742,71 +732,71 @@ var OpenAICompatibleChatLanguageModel = class {
742
732
  };
743
733
  }
744
734
  };
745
- var openaiCompatibleTokenUsageSchema = z3.object({
746
- prompt_tokens: z3.number().nullish(),
747
- completion_tokens: z3.number().nullish(),
748
- total_tokens: z3.number().nullish(),
749
- prompt_tokens_details: z3.object({
750
- cached_tokens: z3.number().nullish()
735
+ var openaiCompatibleTokenUsageSchema = import_v43.z.object({
736
+ prompt_tokens: import_v43.z.number().nullish(),
737
+ completion_tokens: import_v43.z.number().nullish(),
738
+ total_tokens: import_v43.z.number().nullish(),
739
+ prompt_tokens_details: import_v43.z.object({
740
+ cached_tokens: import_v43.z.number().nullish()
751
741
  }).nullish(),
752
- completion_tokens_details: z3.object({
753
- reasoning_tokens: z3.number().nullish(),
754
- accepted_prediction_tokens: z3.number().nullish(),
755
- rejected_prediction_tokens: z3.number().nullish()
742
+ completion_tokens_details: import_v43.z.object({
743
+ reasoning_tokens: import_v43.z.number().nullish(),
744
+ accepted_prediction_tokens: import_v43.z.number().nullish(),
745
+ rejected_prediction_tokens: import_v43.z.number().nullish()
756
746
  }).nullish()
757
747
  }).nullish();
758
- var OpenAICompatibleChatResponseSchema = z3.object({
759
- id: z3.string().nullish(),
760
- created: z3.number().nullish(),
761
- model: z3.string().nullish(),
762
- choices: z3.array(
763
- z3.object({
764
- message: z3.object({
765
- role: z3.literal("assistant").nullish(),
766
- content: z3.string().nullish(),
767
- reasoning_content: z3.string().nullish(),
768
- reasoning: z3.string().nullish(),
769
- tool_calls: z3.array(
770
- z3.object({
771
- id: z3.string().nullish(),
772
- function: z3.object({
773
- name: z3.string(),
774
- arguments: z3.string()
748
+ var OpenAICompatibleChatResponseSchema = import_v43.z.object({
749
+ id: import_v43.z.string().nullish(),
750
+ created: import_v43.z.number().nullish(),
751
+ model: import_v43.z.string().nullish(),
752
+ choices: import_v43.z.array(
753
+ import_v43.z.object({
754
+ message: import_v43.z.object({
755
+ role: import_v43.z.literal("assistant").nullish(),
756
+ content: import_v43.z.string().nullish(),
757
+ reasoning_content: import_v43.z.string().nullish(),
758
+ reasoning: import_v43.z.string().nullish(),
759
+ tool_calls: import_v43.z.array(
760
+ import_v43.z.object({
761
+ id: import_v43.z.string().nullish(),
762
+ function: import_v43.z.object({
763
+ name: import_v43.z.string(),
764
+ arguments: import_v43.z.string()
775
765
  })
776
766
  })
777
767
  ).nullish()
778
768
  }),
779
- finish_reason: z3.string().nullish()
769
+ finish_reason: import_v43.z.string().nullish()
780
770
  })
781
771
  ),
782
772
  usage: openaiCompatibleTokenUsageSchema
783
773
  });
784
- var createOpenAICompatibleChatChunkSchema = (errorSchema) => z3.union([
785
- z3.object({
786
- id: z3.string().nullish(),
787
- created: z3.number().nullish(),
788
- model: z3.string().nullish(),
789
- choices: z3.array(
790
- z3.object({
791
- delta: z3.object({
792
- role: z3.enum(["assistant"]).nullish(),
793
- content: z3.string().nullish(),
774
+ var createOpenAICompatibleChatChunkSchema = (errorSchema) => import_v43.z.union([
775
+ import_v43.z.object({
776
+ id: import_v43.z.string().nullish(),
777
+ created: import_v43.z.number().nullish(),
778
+ model: import_v43.z.string().nullish(),
779
+ choices: import_v43.z.array(
780
+ import_v43.z.object({
781
+ delta: import_v43.z.object({
782
+ role: import_v43.z.enum(["assistant"]).nullish(),
783
+ content: import_v43.z.string().nullish(),
794
784
  // Most openai-compatible models set `reasoning_content`, but some
795
785
  // providers serving `gpt-oss` set `reasoning`. See #7866
796
- reasoning_content: z3.string().nullish(),
797
- reasoning: z3.string().nullish(),
798
- tool_calls: z3.array(
799
- z3.object({
800
- index: z3.number(),
801
- id: z3.string().nullish(),
802
- function: z3.object({
803
- name: z3.string().nullish(),
804
- arguments: z3.string().nullish()
786
+ reasoning_content: import_v43.z.string().nullish(),
787
+ reasoning: import_v43.z.string().nullish(),
788
+ tool_calls: import_v43.z.array(
789
+ import_v43.z.object({
790
+ index: import_v43.z.number(),
791
+ id: import_v43.z.string().nullish(),
792
+ function: import_v43.z.object({
793
+ name: import_v43.z.string().nullish(),
794
+ arguments: import_v43.z.string().nullish()
805
795
  })
806
796
  })
807
797
  ).nullish()
808
798
  }).nullish(),
809
- finish_reason: z3.string().nullish()
799
+ finish_reason: import_v43.z.string().nullish()
810
800
  })
811
801
  ),
812
802
  usage: openaiCompatibleTokenUsageSchema
@@ -816,7 +806,7 @@ var createOpenAICompatibleChatChunkSchema = (errorSchema) => z3.union([
816
806
 
817
807
  // src/completion/openai-compatible-completion-language-model.ts
818
808
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
819
- var z5 = __toESM(require("zod/v4"));
809
+ var import_v45 = require("zod/v4");
820
810
 
821
811
  // src/completion/convert-to-openai-compatible-completion-prompt.ts
822
812
  var import_provider4 = require("@ai-sdk/provider");
@@ -924,28 +914,28 @@ function mapOpenAICompatibleFinishReason2(finishReason) {
924
914
  }
925
915
 
926
916
  // src/completion/openai-compatible-completion-options.ts
927
- var z4 = __toESM(require("zod/v4"));
928
- var openaiCompatibleCompletionProviderOptions = z4.object({
917
+ var import_v44 = require("zod/v4");
918
+ var openaiCompatibleCompletionProviderOptions = import_v44.z.object({
929
919
  /**
930
920
  * Echo back the prompt in addition to the completion.
931
921
  */
932
- echo: z4.boolean().optional(),
922
+ echo: import_v44.z.boolean().optional(),
933
923
  /**
934
924
  * Modify the likelihood of specified tokens appearing in the completion.
935
925
  *
936
926
  * Accepts a JSON object that maps tokens (specified by their token ID in
937
927
  * the GPT tokenizer) to an associated bias value from -100 to 100.
938
928
  */
939
- logitBias: z4.record(z4.string(), z4.number()).optional(),
929
+ logitBias: import_v44.z.record(import_v44.z.string(), import_v44.z.number()).optional(),
940
930
  /**
941
931
  * The suffix that comes after a completion of inserted text.
942
932
  */
943
- suffix: z4.string().optional(),
933
+ suffix: import_v44.z.string().optional(),
944
934
  /**
945
935
  * A unique identifier representing your end-user, which can help providers to
946
936
  * monitor and detect abuse.
947
937
  */
948
- user: z4.string().optional()
938
+ user: import_v44.z.string().optional()
949
939
  });
950
940
 
951
941
  // src/completion/openai-compatible-completion-language-model.ts
@@ -1178,33 +1168,33 @@ var OpenAICompatibleCompletionLanguageModel = class {
1178
1168
  };
1179
1169
  }
1180
1170
  };
1181
- var usageSchema = z5.object({
1182
- prompt_tokens: z5.number(),
1183
- completion_tokens: z5.number(),
1184
- total_tokens: z5.number()
1171
+ var usageSchema = import_v45.z.object({
1172
+ prompt_tokens: import_v45.z.number(),
1173
+ completion_tokens: import_v45.z.number(),
1174
+ total_tokens: import_v45.z.number()
1185
1175
  });
1186
- var openaiCompatibleCompletionResponseSchema = z5.object({
1187
- id: z5.string().nullish(),
1188
- created: z5.number().nullish(),
1189
- model: z5.string().nullish(),
1190
- choices: z5.array(
1191
- z5.object({
1192
- text: z5.string(),
1193
- finish_reason: z5.string()
1176
+ var openaiCompatibleCompletionResponseSchema = import_v45.z.object({
1177
+ id: import_v45.z.string().nullish(),
1178
+ created: import_v45.z.number().nullish(),
1179
+ model: import_v45.z.string().nullish(),
1180
+ choices: import_v45.z.array(
1181
+ import_v45.z.object({
1182
+ text: import_v45.z.string(),
1183
+ finish_reason: import_v45.z.string()
1194
1184
  })
1195
1185
  ),
1196
1186
  usage: usageSchema.nullish()
1197
1187
  });
1198
- var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => z5.union([
1199
- z5.object({
1200
- id: z5.string().nullish(),
1201
- created: z5.number().nullish(),
1202
- model: z5.string().nullish(),
1203
- choices: z5.array(
1204
- z5.object({
1205
- text: z5.string(),
1206
- finish_reason: z5.string().nullish(),
1207
- index: z5.number()
1188
+ var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => import_v45.z.union([
1189
+ import_v45.z.object({
1190
+ id: import_v45.z.string().nullish(),
1191
+ created: import_v45.z.number().nullish(),
1192
+ model: import_v45.z.string().nullish(),
1193
+ choices: import_v45.z.array(
1194
+ import_v45.z.object({
1195
+ text: import_v45.z.string(),
1196
+ finish_reason: import_v45.z.string().nullish(),
1197
+ index: import_v45.z.number()
1208
1198
  })
1209
1199
  ),
1210
1200
  usage: usageSchema.nullish()
@@ -1215,21 +1205,21 @@ var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => z5.union([
1215
1205
  // src/embedding/openai-compatible-embedding-model.ts
1216
1206
  var import_provider5 = require("@ai-sdk/provider");
1217
1207
  var import_provider_utils4 = require("@ai-sdk/provider-utils");
1218
- var z7 = __toESM(require("zod/v4"));
1208
+ var import_v47 = require("zod/v4");
1219
1209
 
1220
1210
  // src/embedding/openai-compatible-embedding-options.ts
1221
- var z6 = __toESM(require("zod/v4"));
1222
- var openaiCompatibleEmbeddingProviderOptions = z6.object({
1211
+ var import_v46 = require("zod/v4");
1212
+ var openaiCompatibleEmbeddingProviderOptions = import_v46.z.object({
1223
1213
  /**
1224
1214
  * The number of dimensions the resulting output embeddings should have.
1225
1215
  * Only supported in text-embedding-3 and later models.
1226
1216
  */
1227
- dimensions: z6.number().optional(),
1217
+ dimensions: import_v46.z.number().optional(),
1228
1218
  /**
1229
1219
  * A unique identifier representing your end-user, which can help providers to
1230
1220
  * monitor and detect abuse.
1231
1221
  */
1232
- user: z6.string().optional()
1222
+ user: import_v46.z.string().optional()
1233
1223
  });
1234
1224
 
1235
1225
  // src/embedding/openai-compatible-embedding-model.ts
@@ -1314,15 +1304,15 @@ var OpenAICompatibleEmbeddingModel = class {
1314
1304
  };
1315
1305
  }
1316
1306
  };
1317
- var openaiTextEmbeddingResponseSchema = z7.object({
1318
- data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
1319
- usage: z7.object({ prompt_tokens: z7.number() }).nullish(),
1320
- providerMetadata: z7.record(z7.string(), z7.record(z7.string(), z7.any())).optional()
1307
+ var openaiTextEmbeddingResponseSchema = import_v47.z.object({
1308
+ data: import_v47.z.array(import_v47.z.object({ embedding: import_v47.z.array(import_v47.z.number()) })),
1309
+ usage: import_v47.z.object({ prompt_tokens: import_v47.z.number() }).nullish(),
1310
+ providerMetadata: import_v47.z.record(import_v47.z.string(), import_v47.z.record(import_v47.z.string(), import_v47.z.any())).optional()
1321
1311
  });
1322
1312
 
1323
1313
  // src/image/openai-compatible-image-model.ts
1324
1314
  var import_provider_utils5 = require("@ai-sdk/provider-utils");
1325
- var z8 = __toESM(require("zod/v4"));
1315
+ var import_v48 = require("zod/v4");
1326
1316
  var OpenAICompatibleImageModel = class {
1327
1317
  constructor(modelId, config) {
1328
1318
  this.modelId = modelId;
@@ -1390,15 +1380,15 @@ var OpenAICompatibleImageModel = class {
1390
1380
  };
1391
1381
  }
1392
1382
  };
1393
- var openaiCompatibleImageResponseSchema = z8.object({
1394
- data: z8.array(z8.object({ b64_json: z8.string() }))
1383
+ var openaiCompatibleImageResponseSchema = import_v48.z.object({
1384
+ data: import_v48.z.array(import_v48.z.object({ b64_json: import_v48.z.string() }))
1395
1385
  });
1396
1386
 
1397
1387
  // src/openai-compatible-provider.ts
1398
1388
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
1399
1389
 
1400
1390
  // src/version.ts
1401
- var VERSION = true ? "2.0.0-beta.13" : "0.0.0-test";
1391
+ var VERSION = true ? "2.0.0-beta.15" : "0.0.0-test";
1402
1392
 
1403
1393
  // src/openai-compatible-provider.ts
1404
1394
  function createOpenAICompatible(options) {