@ai-sdk/openai-compatible 1.0.0-canary.5 → 1.0.0-canary.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -9,9 +9,10 @@ import {
9
9
  createJsonResponseHandler,
10
10
  generateId,
11
11
  isParsableJson,
12
+ parseProviderOptions,
12
13
  postJsonToApi
13
14
  } from "@ai-sdk/provider-utils";
14
- import { z as z2 } from "zod";
15
+ import { z as z3 } from "zod";
15
16
 
16
17
  // src/convert-to-openai-compatible-chat-messages.ts
17
18
  import {
@@ -152,17 +153,27 @@ function mapOpenAICompatibleFinishReason(finishReason) {
152
153
  }
153
154
  }
154
155
 
155
- // src/openai-compatible-error.ts
156
+ // src/openai-compatible-chat-options.ts
156
157
  import { z } from "zod";
157
- var openaiCompatibleErrorDataSchema = z.object({
158
- error: z.object({
159
- message: z.string(),
158
+ var openaiCompatibleProviderOptions = z.object({
159
+ /**
160
+ * A unique identifier representing your end-user, which can help the provider to
161
+ * monitor and detect abuse.
162
+ */
163
+ user: z.string().optional()
164
+ });
165
+
166
+ // src/openai-compatible-error.ts
167
+ import { z as z2 } from "zod";
168
+ var openaiCompatibleErrorDataSchema = z2.object({
169
+ error: z2.object({
170
+ message: z2.string(),
160
171
  // The additional information below is handled loosely to support
161
172
  // OpenAI-compatible providers that have slightly different error
162
173
  // responses:
163
- type: z.string().nullish(),
164
- param: z.any().nullish(),
165
- code: z.union([z.string(), z.number()]).nullish()
174
+ type: z2.string().nullish(),
175
+ param: z2.any().nullish(),
176
+ code: z2.union([z2.string(), z2.number()]).nullish()
166
177
  })
167
178
  });
168
179
  var defaultOpenAICompatibleErrorStructure = {
@@ -228,11 +239,10 @@ function prepareTools({
228
239
  // src/openai-compatible-chat-language-model.ts
229
240
  var OpenAICompatibleChatLanguageModel = class {
230
241
  // type inferred via constructor
231
- constructor(modelId, settings, config) {
242
+ constructor(modelId, config) {
232
243
  this.specificationVersion = "v2";
233
244
  var _a, _b;
234
245
  this.modelId = modelId;
235
- this.settings = settings;
236
246
  this.config = config;
237
247
  const errorStructure = (_a = config.errorStructure) != null ? _a : defaultOpenAICompatibleErrorStructure;
238
248
  this.chunkSchema = createOpenAICompatibleChatChunkSchema(
@@ -252,7 +262,7 @@ var OpenAICompatibleChatLanguageModel = class {
252
262
  }
253
263
  getArgs({
254
264
  prompt,
255
- maxTokens,
265
+ maxOutputTokens,
256
266
  temperature,
257
267
  topP,
258
268
  topK,
@@ -265,8 +275,20 @@ var OpenAICompatibleChatLanguageModel = class {
265
275
  toolChoice,
266
276
  tools
267
277
  }) {
268
- var _a;
278
+ var _a, _b, _c;
269
279
  const warnings = [];
280
+ const compatibleOptions = Object.assign(
281
+ (_a = parseProviderOptions({
282
+ provider: "openai-compatible",
283
+ providerOptions,
284
+ schema: openaiCompatibleProviderOptions
285
+ })) != null ? _a : {},
286
+ (_b = parseProviderOptions({
287
+ provider: this.providerOptionsName,
288
+ providerOptions,
289
+ schema: openaiCompatibleProviderOptions
290
+ })) != null ? _b : {}
291
+ );
270
292
  if (topK != null) {
271
293
  warnings.push({ type: "unsupported-setting", setting: "topK" });
272
294
  }
@@ -290,9 +312,9 @@ var OpenAICompatibleChatLanguageModel = class {
290
312
  // model id:
291
313
  model: this.modelId,
292
314
  // model specific settings:
293
- user: this.settings.user,
315
+ user: compatibleOptions.user,
294
316
  // standardized settings:
295
- max_tokens: maxTokens,
317
+ max_tokens: maxOutputTokens,
296
318
  temperature,
297
319
  top_p: topP,
298
320
  frequency_penalty: frequencyPenalty,
@@ -301,7 +323,7 @@ var OpenAICompatibleChatLanguageModel = class {
301
323
  type: "json_schema",
302
324
  json_schema: {
303
325
  schema: responseFormat.schema,
304
- name: (_a = responseFormat.name) != null ? _a : "response",
326
+ name: (_c = responseFormat.name) != null ? _c : "response",
305
327
  description: responseFormat.description
306
328
  }
307
329
  } : { type: "json_object" } : void 0,
@@ -318,7 +340,7 @@ var OpenAICompatibleChatLanguageModel = class {
318
340
  };
319
341
  }
320
342
  async doGenerate(options) {
321
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
343
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
322
344
  const { args, warnings } = this.getArgs({ ...options });
323
345
  const body = JSON.stringify(args);
324
346
  const {
@@ -362,11 +384,18 @@ var OpenAICompatibleChatLanguageModel = class {
362
384
  providerMetadata[this.providerOptionsName].cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
363
385
  }
364
386
  return {
365
- text: (_e = choice.message.content) != null ? _e : void 0,
366
- reasoning: (_f = choice.message.reasoning_content) != null ? _f : void 0,
367
- toolCalls: (_g = choice.message.tool_calls) == null ? void 0 : _g.map((toolCall) => {
387
+ text: choice.message.content != null ? { type: "text", text: choice.message.content } : void 0,
388
+ reasoning: choice.message.reasoning_content ? [
389
+ {
390
+ type: "reasoning",
391
+ reasoningType: "text",
392
+ text: choice.message.reasoning_content
393
+ }
394
+ ] : void 0,
395
+ toolCalls: (_e = choice.message.tool_calls) == null ? void 0 : _e.map((toolCall) => {
368
396
  var _a2;
369
397
  return {
398
+ type: "tool-call",
370
399
  toolCallType: "function",
371
400
  toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
372
401
  toolName: toolCall.function.name,
@@ -375,8 +404,8 @@ var OpenAICompatibleChatLanguageModel = class {
375
404
  }),
376
405
  finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
377
406
  usage: {
378
- promptTokens: (_i = (_h = responseBody.usage) == null ? void 0 : _h.prompt_tokens) != null ? _i : NaN,
379
- completionTokens: (_k = (_j = responseBody.usage) == null ? void 0 : _j.completion_tokens) != null ? _k : NaN
407
+ inputTokens: (_g = (_f = responseBody.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
408
+ outputTokens: (_i = (_h = responseBody.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0
380
409
  },
381
410
  providerMetadata,
382
411
  request: { body },
@@ -390,59 +419,6 @@ var OpenAICompatibleChatLanguageModel = class {
390
419
  }
391
420
  async doStream(options) {
392
421
  var _a;
393
- if (this.settings.simulateStreaming) {
394
- const result = await this.doGenerate(options);
395
- const simulatedStream = new ReadableStream({
396
- start(controller) {
397
- controller.enqueue({ type: "response-metadata", ...result.response });
398
- if (result.reasoning) {
399
- if (Array.isArray(result.reasoning)) {
400
- for (const part of result.reasoning) {
401
- if (part.type === "text") {
402
- controller.enqueue({
403
- type: "reasoning",
404
- textDelta: part.text
405
- });
406
- }
407
- }
408
- } else {
409
- controller.enqueue({
410
- type: "reasoning",
411
- textDelta: result.reasoning
412
- });
413
- }
414
- }
415
- if (result.text) {
416
- controller.enqueue({
417
- type: "text-delta",
418
- textDelta: result.text
419
- });
420
- }
421
- if (result.toolCalls) {
422
- for (const toolCall of result.toolCalls) {
423
- controller.enqueue({
424
- type: "tool-call",
425
- ...toolCall
426
- });
427
- }
428
- }
429
- controller.enqueue({
430
- type: "finish",
431
- finishReason: result.finishReason,
432
- usage: result.usage,
433
- logprobs: result.logprobs,
434
- providerMetadata: result.providerMetadata
435
- });
436
- controller.close();
437
- }
438
- });
439
- return {
440
- stream: simulatedStream,
441
- request: result.request,
442
- response: result.response,
443
- warnings: result.warnings
444
- };
445
- }
446
422
  const { args, warnings } = this.getArgs({ ...options });
447
423
  const body = JSON.stringify({ ...args, stream: true });
448
424
  const metadataExtractor = (_a = this.config.metadataExtractor) == null ? void 0 : _a.createStreamExtractor();
@@ -463,7 +439,6 @@ var OpenAICompatibleChatLanguageModel = class {
463
439
  abortSignal: options.abortSignal,
464
440
  fetch: this.config.fetch
465
441
  });
466
- const { messages: rawPrompt, ...rawSettings } = args;
467
442
  const toolCalls = [];
468
443
  let finishReason = "unknown";
469
444
  let usage = {
@@ -540,13 +515,14 @@ var OpenAICompatibleChatLanguageModel = class {
540
515
  if (delta.reasoning_content != null) {
541
516
  controller.enqueue({
542
517
  type: "reasoning",
543
- textDelta: delta.reasoning_content
518
+ reasoningType: "text",
519
+ text: delta.reasoning_content
544
520
  });
545
521
  }
546
522
  if (delta.content != null) {
547
523
  controller.enqueue({
548
- type: "text-delta",
549
- textDelta: delta.content
524
+ type: "text",
525
+ text: delta.content
550
526
  });
551
527
  }
552
528
  if (delta.tool_calls != null) {
@@ -653,8 +629,8 @@ var OpenAICompatibleChatLanguageModel = class {
653
629
  type: "finish",
654
630
  finishReason,
655
631
  usage: {
656
- promptTokens: (_a2 = usage.promptTokens) != null ? _a2 : NaN,
657
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
632
+ inputTokens: (_a2 = usage.promptTokens) != null ? _a2 : void 0,
633
+ outputTokens: (_b = usage.completionTokens) != null ? _b : void 0
658
634
  },
659
635
  providerMetadata
660
636
  });
@@ -667,68 +643,68 @@ var OpenAICompatibleChatLanguageModel = class {
667
643
  };
668
644
  }
669
645
  };
670
- var openaiCompatibleTokenUsageSchema = z2.object({
671
- prompt_tokens: z2.number().nullish(),
672
- completion_tokens: z2.number().nullish(),
673
- prompt_tokens_details: z2.object({
674
- cached_tokens: z2.number().nullish()
646
+ var openaiCompatibleTokenUsageSchema = z3.object({
647
+ prompt_tokens: z3.number().nullish(),
648
+ completion_tokens: z3.number().nullish(),
649
+ prompt_tokens_details: z3.object({
650
+ cached_tokens: z3.number().nullish()
675
651
  }).nullish(),
676
- completion_tokens_details: z2.object({
677
- reasoning_tokens: z2.number().nullish(),
678
- accepted_prediction_tokens: z2.number().nullish(),
679
- rejected_prediction_tokens: z2.number().nullish()
652
+ completion_tokens_details: z3.object({
653
+ reasoning_tokens: z3.number().nullish(),
654
+ accepted_prediction_tokens: z3.number().nullish(),
655
+ rejected_prediction_tokens: z3.number().nullish()
680
656
  }).nullish()
681
657
  }).nullish();
682
- var OpenAICompatibleChatResponseSchema = z2.object({
683
- id: z2.string().nullish(),
684
- created: z2.number().nullish(),
685
- model: z2.string().nullish(),
686
- choices: z2.array(
687
- z2.object({
688
- message: z2.object({
689
- role: z2.literal("assistant").nullish(),
690
- content: z2.string().nullish(),
691
- reasoning_content: z2.string().nullish(),
692
- tool_calls: z2.array(
693
- z2.object({
694
- id: z2.string().nullish(),
695
- type: z2.literal("function"),
696
- function: z2.object({
697
- name: z2.string(),
698
- arguments: z2.string()
658
+ var OpenAICompatibleChatResponseSchema = z3.object({
659
+ id: z3.string().nullish(),
660
+ created: z3.number().nullish(),
661
+ model: z3.string().nullish(),
662
+ choices: z3.array(
663
+ z3.object({
664
+ message: z3.object({
665
+ role: z3.literal("assistant").nullish(),
666
+ content: z3.string().nullish(),
667
+ reasoning_content: z3.string().nullish(),
668
+ tool_calls: z3.array(
669
+ z3.object({
670
+ id: z3.string().nullish(),
671
+ type: z3.literal("function"),
672
+ function: z3.object({
673
+ name: z3.string(),
674
+ arguments: z3.string()
699
675
  })
700
676
  })
701
677
  ).nullish()
702
678
  }),
703
- finish_reason: z2.string().nullish()
679
+ finish_reason: z3.string().nullish()
704
680
  })
705
681
  ),
706
682
  usage: openaiCompatibleTokenUsageSchema
707
683
  });
708
- var createOpenAICompatibleChatChunkSchema = (errorSchema) => z2.union([
709
- z2.object({
710
- id: z2.string().nullish(),
711
- created: z2.number().nullish(),
712
- model: z2.string().nullish(),
713
- choices: z2.array(
714
- z2.object({
715
- delta: z2.object({
716
- role: z2.enum(["assistant"]).nullish(),
717
- content: z2.string().nullish(),
718
- reasoning_content: z2.string().nullish(),
719
- tool_calls: z2.array(
720
- z2.object({
721
- index: z2.number(),
722
- id: z2.string().nullish(),
723
- type: z2.literal("function").optional(),
724
- function: z2.object({
725
- name: z2.string().nullish(),
726
- arguments: z2.string().nullish()
684
+ var createOpenAICompatibleChatChunkSchema = (errorSchema) => z3.union([
685
+ z3.object({
686
+ id: z3.string().nullish(),
687
+ created: z3.number().nullish(),
688
+ model: z3.string().nullish(),
689
+ choices: z3.array(
690
+ z3.object({
691
+ delta: z3.object({
692
+ role: z3.enum(["assistant"]).nullish(),
693
+ content: z3.string().nullish(),
694
+ reasoning_content: z3.string().nullish(),
695
+ tool_calls: z3.array(
696
+ z3.object({
697
+ index: z3.number(),
698
+ id: z3.string().nullish(),
699
+ type: z3.literal("function").optional(),
700
+ function: z3.object({
701
+ name: z3.string().nullish(),
702
+ arguments: z3.string().nullish()
727
703
  })
728
704
  })
729
705
  ).nullish()
730
706
  }).nullish(),
731
- finish_reason: z2.string().nullish()
707
+ finish_reason: z3.string().nullish()
732
708
  })
733
709
  ),
734
710
  usage: openaiCompatibleTokenUsageSchema
@@ -742,9 +718,10 @@ import {
742
718
  createEventSourceResponseHandler as createEventSourceResponseHandler2,
743
719
  createJsonErrorResponseHandler as createJsonErrorResponseHandler2,
744
720
  createJsonResponseHandler as createJsonResponseHandler2,
721
+ parseProviderOptions as parseProviderOptions2,
745
722
  postJsonToApi as postJsonToApi2
746
723
  } from "@ai-sdk/provider-utils";
747
- import { z as z3 } from "zod";
724
+ import { z as z5 } from "zod";
748
725
 
749
726
  // src/convert-to-openai-compatible-completion-prompt.ts
750
727
  import {
@@ -828,15 +805,39 @@ ${user}:`]
828
805
  };
829
806
  }
830
807
 
808
+ // src/openai-compatible-completion-options.ts
809
+ import { z as z4 } from "zod";
810
+ var openaiCompatibleCompletionProviderOptions = z4.object({
811
+ /**
812
+ * Echo back the prompt in addition to the completion.
813
+ */
814
+ echo: z4.boolean().optional(),
815
+ /**
816
+ * Modify the likelihood of specified tokens appearing in the completion.
817
+ *
818
+ * Accepts a JSON object that maps tokens (specified by their token ID in
819
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
820
+ */
821
+ logitBias: z4.record(z4.number(), z4.number()).optional(),
822
+ /**
823
+ * The suffix that comes after a completion of inserted text.
824
+ */
825
+ suffix: z4.string().optional(),
826
+ /**
827
+ * A unique identifier representing your end-user, which can help providers to
828
+ * monitor and detect abuse.
829
+ */
830
+ user: z4.string().optional()
831
+ });
832
+
831
833
  // src/openai-compatible-completion-language-model.ts
832
834
  var OpenAICompatibleCompletionLanguageModel = class {
833
835
  // type inferred via constructor
834
- constructor(modelId, settings, config) {
836
+ constructor(modelId, config) {
835
837
  this.specificationVersion = "v2";
836
838
  this.defaultObjectGenerationMode = void 0;
837
839
  var _a;
838
840
  this.modelId = modelId;
839
- this.settings = settings;
840
841
  this.config = config;
841
842
  const errorStructure = (_a = config.errorStructure) != null ? _a : defaultOpenAICompatibleErrorStructure;
842
843
  this.chunkSchema = createOpenAICompatibleCompletionChunkSchema(
@@ -853,7 +854,7 @@ var OpenAICompatibleCompletionLanguageModel = class {
853
854
  getArgs({
854
855
  inputFormat,
855
856
  prompt,
856
- maxTokens,
857
+ maxOutputTokens,
857
858
  temperature,
858
859
  topP,
859
860
  topK,
@@ -866,7 +867,13 @@ var OpenAICompatibleCompletionLanguageModel = class {
866
867
  tools,
867
868
  toolChoice
868
869
  }) {
870
+ var _a;
869
871
  const warnings = [];
872
+ const completionOptions = (_a = parseProviderOptions2({
873
+ provider: this.providerOptionsName,
874
+ providerOptions,
875
+ schema: openaiCompatibleCompletionProviderOptions
876
+ })) != null ? _a : {};
870
877
  if (topK != null) {
871
878
  warnings.push({ type: "unsupported-setting", setting: "topK" });
872
879
  }
@@ -890,12 +897,12 @@ var OpenAICompatibleCompletionLanguageModel = class {
890
897
  // model id:
891
898
  model: this.modelId,
892
899
  // model specific settings:
893
- echo: this.settings.echo,
894
- logit_bias: this.settings.logitBias,
895
- suffix: this.settings.suffix,
896
- user: this.settings.user,
900
+ echo: completionOptions.echo,
901
+ logit_bias: completionOptions.logitBias,
902
+ suffix: completionOptions.suffix,
903
+ user: completionOptions.user,
897
904
  // standardized settings:
898
- max_tokens: maxTokens,
905
+ max_tokens: maxOutputTokens,
899
906
  temperature,
900
907
  top_p: topP,
901
908
  frequency_penalty: frequencyPenalty,
@@ -933,10 +940,10 @@ var OpenAICompatibleCompletionLanguageModel = class {
933
940
  });
934
941
  const choice = response.choices[0];
935
942
  return {
936
- text: choice.text,
943
+ text: { type: "text", text: choice.text },
937
944
  usage: {
938
- promptTokens: (_b = (_a = response.usage) == null ? void 0 : _a.prompt_tokens) != null ? _b : NaN,
939
- completionTokens: (_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens) != null ? _d : NaN
945
+ inputTokens: (_b = (_a = response.usage) == null ? void 0 : _a.prompt_tokens) != null ? _b : void 0,
946
+ outputTokens: (_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens) != null ? _d : void 0
940
947
  },
941
948
  finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
942
949
  request: { body: args },
@@ -969,15 +976,16 @@ var OpenAICompatibleCompletionLanguageModel = class {
969
976
  fetch: this.config.fetch
970
977
  });
971
978
  let finishReason = "unknown";
972
- let usage = {
973
- promptTokens: Number.NaN,
974
- completionTokens: Number.NaN
979
+ const usage = {
980
+ inputTokens: void 0,
981
+ outputTokens: void 0
975
982
  };
976
983
  let isFirstChunk = true;
977
984
  return {
978
985
  stream: response.pipeThrough(
979
986
  new TransformStream({
980
987
  transform(chunk, controller) {
988
+ var _a, _b;
981
989
  if (!chunk.success) {
982
990
  finishReason = "error";
983
991
  controller.enqueue({ type: "error", error: chunk.error });
@@ -997,10 +1005,8 @@ var OpenAICompatibleCompletionLanguageModel = class {
997
1005
  });
998
1006
  }
999
1007
  if (value.usage != null) {
1000
- usage = {
1001
- promptTokens: value.usage.prompt_tokens,
1002
- completionTokens: value.usage.completion_tokens
1003
- };
1008
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
1009
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
1004
1010
  }
1005
1011
  const choice = value.choices[0];
1006
1012
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1010,8 +1016,8 @@ var OpenAICompatibleCompletionLanguageModel = class {
1010
1016
  }
1011
1017
  if ((choice == null ? void 0 : choice.text) != null) {
1012
1018
  controller.enqueue({
1013
- type: "text-delta",
1014
- textDelta: choice.text
1019
+ type: "text",
1020
+ text: choice.text
1015
1021
  });
1016
1022
  }
1017
1023
  },
@@ -1030,36 +1036,36 @@ var OpenAICompatibleCompletionLanguageModel = class {
1030
1036
  };
1031
1037
  }
1032
1038
  };
1033
- var openaiCompatibleCompletionResponseSchema = z3.object({
1034
- id: z3.string().nullish(),
1035
- created: z3.number().nullish(),
1036
- model: z3.string().nullish(),
1037
- choices: z3.array(
1038
- z3.object({
1039
- text: z3.string(),
1040
- finish_reason: z3.string()
1039
+ var openaiCompatibleCompletionResponseSchema = z5.object({
1040
+ id: z5.string().nullish(),
1041
+ created: z5.number().nullish(),
1042
+ model: z5.string().nullish(),
1043
+ choices: z5.array(
1044
+ z5.object({
1045
+ text: z5.string(),
1046
+ finish_reason: z5.string()
1041
1047
  })
1042
1048
  ),
1043
- usage: z3.object({
1044
- prompt_tokens: z3.number(),
1045
- completion_tokens: z3.number()
1049
+ usage: z5.object({
1050
+ prompt_tokens: z5.number(),
1051
+ completion_tokens: z5.number()
1046
1052
  }).nullish()
1047
1053
  });
1048
- var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => z3.union([
1049
- z3.object({
1050
- id: z3.string().nullish(),
1051
- created: z3.number().nullish(),
1052
- model: z3.string().nullish(),
1053
- choices: z3.array(
1054
- z3.object({
1055
- text: z3.string(),
1056
- finish_reason: z3.string().nullish(),
1057
- index: z3.number()
1054
+ var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => z5.union([
1055
+ z5.object({
1056
+ id: z5.string().nullish(),
1057
+ created: z5.number().nullish(),
1058
+ model: z5.string().nullish(),
1059
+ choices: z5.array(
1060
+ z5.object({
1061
+ text: z5.string(),
1062
+ finish_reason: z5.string().nullish(),
1063
+ index: z5.number()
1058
1064
  })
1059
1065
  ),
1060
- usage: z3.object({
1061
- prompt_tokens: z3.number(),
1062
- completion_tokens: z3.number()
1066
+ usage: z5.object({
1067
+ prompt_tokens: z5.number(),
1068
+ completion_tokens: z5.number()
1063
1069
  }).nullish()
1064
1070
  }),
1065
1071
  errorSchema
@@ -1073,14 +1079,31 @@ import {
1073
1079
  combineHeaders as combineHeaders3,
1074
1080
  createJsonErrorResponseHandler as createJsonErrorResponseHandler3,
1075
1081
  createJsonResponseHandler as createJsonResponseHandler3,
1082
+ parseProviderOptions as parseProviderOptions3,
1076
1083
  postJsonToApi as postJsonToApi3
1077
1084
  } from "@ai-sdk/provider-utils";
1078
- import { z as z4 } from "zod";
1085
+ import { z as z7 } from "zod";
1086
+
1087
+ // src/openai-compatible-embedding-options.ts
1088
+ import { z as z6 } from "zod";
1089
+ var openaiCompatibleEmbeddingProviderOptions = z6.object({
1090
+ /**
1091
+ * The number of dimensions the resulting output embeddings should have.
1092
+ * Only supported in text-embedding-3 and later models.
1093
+ */
1094
+ dimensions: z6.number().optional(),
1095
+ /**
1096
+ * A unique identifier representing your end-user, which can help providers to
1097
+ * monitor and detect abuse.
1098
+ */
1099
+ user: z6.string().optional()
1100
+ });
1101
+
1102
+ // src/openai-compatible-embedding-model.ts
1079
1103
  var OpenAICompatibleEmbeddingModel = class {
1080
- constructor(modelId, settings, config) {
1081
- this.specificationVersion = "v1";
1104
+ constructor(modelId, config) {
1105
+ this.specificationVersion = "v2";
1082
1106
  this.modelId = modelId;
1083
- this.settings = settings;
1084
1107
  this.config = config;
1085
1108
  }
1086
1109
  get provider() {
@@ -1094,12 +1117,28 @@ var OpenAICompatibleEmbeddingModel = class {
1094
1117
  var _a;
1095
1118
  return (_a = this.config.supportsParallelCalls) != null ? _a : true;
1096
1119
  }
1120
+ get providerOptionsName() {
1121
+ return this.config.provider.split(".")[0].trim();
1122
+ }
1097
1123
  async doEmbed({
1098
1124
  values,
1099
1125
  headers,
1100
- abortSignal
1126
+ abortSignal,
1127
+ providerOptions
1101
1128
  }) {
1102
- var _a;
1129
+ var _a, _b, _c;
1130
+ const compatibleOptions = Object.assign(
1131
+ (_a = parseProviderOptions3({
1132
+ provider: "openai-compatible",
1133
+ providerOptions,
1134
+ schema: openaiCompatibleEmbeddingProviderOptions
1135
+ })) != null ? _a : {},
1136
+ (_b = parseProviderOptions3({
1137
+ provider: this.providerOptionsName,
1138
+ providerOptions,
1139
+ schema: openaiCompatibleEmbeddingProviderOptions
1140
+ })) != null ? _b : {}
1141
+ );
1103
1142
  if (values.length > this.maxEmbeddingsPerCall) {
1104
1143
  throw new TooManyEmbeddingValuesForCallError({
1105
1144
  provider: this.provider,
@@ -1108,7 +1147,11 @@ var OpenAICompatibleEmbeddingModel = class {
1108
1147
  values
1109
1148
  });
1110
1149
  }
1111
- const { responseHeaders, value: response } = await postJsonToApi3({
1150
+ const {
1151
+ responseHeaders,
1152
+ value: response,
1153
+ rawValue
1154
+ } = await postJsonToApi3({
1112
1155
  url: this.config.url({
1113
1156
  path: "/embeddings",
1114
1157
  modelId: this.modelId
@@ -1118,11 +1161,11 @@ var OpenAICompatibleEmbeddingModel = class {
1118
1161
  model: this.modelId,
1119
1162
  input: values,
1120
1163
  encoding_format: "float",
1121
- dimensions: this.settings.dimensions,
1122
- user: this.settings.user
1164
+ dimensions: compatibleOptions.dimensions,
1165
+ user: compatibleOptions.user
1123
1166
  },
1124
1167
  failedResponseHandler: createJsonErrorResponseHandler3(
1125
- (_a = this.config.errorStructure) != null ? _a : defaultOpenAICompatibleErrorStructure
1168
+ (_c = this.config.errorStructure) != null ? _c : defaultOpenAICompatibleErrorStructure
1126
1169
  ),
1127
1170
  successfulResponseHandler: createJsonResponseHandler3(
1128
1171
  openaiTextEmbeddingResponseSchema
@@ -1133,13 +1176,13 @@ var OpenAICompatibleEmbeddingModel = class {
1133
1176
  return {
1134
1177
  embeddings: response.data.map((item) => item.embedding),
1135
1178
  usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1136
- rawResponse: { headers: responseHeaders }
1179
+ response: { headers: responseHeaders, body: rawValue }
1137
1180
  };
1138
1181
  }
1139
1182
  };
1140
- var openaiTextEmbeddingResponseSchema = z4.object({
1141
- data: z4.array(z4.object({ embedding: z4.array(z4.number()) })),
1142
- usage: z4.object({ prompt_tokens: z4.number() }).nullish()
1183
+ var openaiTextEmbeddingResponseSchema = z7.object({
1184
+ data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
1185
+ usage: z7.object({ prompt_tokens: z7.number() }).nullish()
1143
1186
  });
1144
1187
 
1145
1188
  // src/openai-compatible-image-model.ts
@@ -1149,7 +1192,7 @@ import {
1149
1192
  createJsonResponseHandler as createJsonResponseHandler4,
1150
1193
  postJsonToApi as postJsonToApi4
1151
1194
  } from "@ai-sdk/provider-utils";
1152
- import { z as z5 } from "zod";
1195
+ import { z as z8 } from "zod";
1153
1196
  var OpenAICompatibleImageModel = class {
1154
1197
  constructor(modelId, settings, config) {
1155
1198
  this.modelId = modelId;
@@ -1222,8 +1265,8 @@ var OpenAICompatibleImageModel = class {
1222
1265
  };
1223
1266
  }
1224
1267
  };
1225
- var openaiCompatibleImageResponseSchema = z5.object({
1226
- data: z5.array(z5.object({ b64_json: z5.string() }))
1268
+ var openaiCompatibleImageResponseSchema = z8.object({
1269
+ data: z8.array(z8.object({ b64_json: z8.string() }))
1227
1270
  });
1228
1271
 
1229
1272
  // src/openai-compatible-provider.ts
@@ -1247,27 +1290,24 @@ function createOpenAICompatible(options) {
1247
1290
  headers: getHeaders,
1248
1291
  fetch: options.fetch
1249
1292
  });
1250
- const createLanguageModel = (modelId, settings = {}) => createChatModel(modelId, settings);
1251
- const createChatModel = (modelId, settings = {}) => new OpenAICompatibleChatLanguageModel(modelId, settings, {
1293
+ const createLanguageModel = (modelId) => createChatModel(modelId);
1294
+ const createChatModel = (modelId) => new OpenAICompatibleChatLanguageModel(modelId, {
1252
1295
  ...getCommonModelConfig("chat"),
1253
1296
  defaultObjectGenerationMode: "tool"
1254
1297
  });
1255
- const createCompletionModel = (modelId, settings = {}) => new OpenAICompatibleCompletionLanguageModel(
1298
+ const createCompletionModel = (modelId) => new OpenAICompatibleCompletionLanguageModel(
1256
1299
  modelId,
1257
- settings,
1258
1300
  getCommonModelConfig("completion")
1259
1301
  );
1260
- const createEmbeddingModel = (modelId, settings = {}) => new OpenAICompatibleEmbeddingModel(
1261
- modelId,
1262
- settings,
1263
- getCommonModelConfig("embedding")
1264
- );
1302
+ const createEmbeddingModel = (modelId) => new OpenAICompatibleEmbeddingModel(modelId, {
1303
+ ...getCommonModelConfig("embedding")
1304
+ });
1265
1305
  const createImageModel = (modelId, settings = {}) => new OpenAICompatibleImageModel(
1266
1306
  modelId,
1267
1307
  settings,
1268
1308
  getCommonModelConfig("image")
1269
1309
  );
1270
- const provider = (modelId, settings) => createLanguageModel(modelId, settings);
1310
+ const provider = (modelId) => createLanguageModel(modelId);
1271
1311
  provider.languageModel = createLanguageModel;
1272
1312
  provider.chatModel = createChatModel;
1273
1313
  provider.completionModel = createCompletionModel;