modelfusion 0.136.0 → 0.137.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.cjs CHANGED
@@ -42,6 +42,7 @@ __export(src_exports, {
42
42
  AzureOpenAIApiConfiguration: () => AzureOpenAIApiConfiguration,
43
43
  BaseUrlApiConfiguration: () => BaseUrlApiConfiguration,
44
44
  BaseUrlApiConfigurationWithDefaults: () => BaseUrlApiConfigurationWithDefaults,
45
+ CHAT_MODEL_CONTEXT_WINDOW_SIZES: () => CHAT_MODEL_CONTEXT_WINDOW_SIZES,
45
46
  COHERE_TEXT_EMBEDDING_MODELS: () => COHERE_TEXT_EMBEDDING_MODELS,
46
47
  COHERE_TEXT_GENERATION_MODELS: () => COHERE_TEXT_GENERATION_MODELS,
47
48
  ChatMLPrompt: () => ChatMLPromptTemplate_exports,
@@ -81,13 +82,9 @@ __export(src_exports, {
81
82
  NeuralChatPrompt: () => NeuralChatPromptTemplate_exports,
82
83
  NoSuchToolDefinitionError: () => NoSuchToolDefinitionError,
83
84
  OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT: () => OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT,
84
- OPENAI_CHAT_MODELS: () => OPENAI_CHAT_MODELS,
85
85
  OPENAI_CHAT_PROMPT_BASE_TOKEN_COUNT: () => OPENAI_CHAT_PROMPT_BASE_TOKEN_COUNT,
86
- OPENAI_IMAGE_MODELS: () => OPENAI_IMAGE_MODELS,
87
- OPENAI_SPEECH_MODELS: () => OPENAI_SPEECH_MODELS,
88
86
  OPENAI_TEXT_EMBEDDING_MODELS: () => OPENAI_TEXT_EMBEDDING_MODELS,
89
87
  OPENAI_TEXT_GENERATION_MODELS: () => OPENAI_TEXT_GENERATION_MODELS,
90
- OPENAI_TRANSCRIPTION_MODELS: () => OPENAI_TRANSCRIPTION_MODELS,
91
88
  ObjectFromTextGenerationModel: () => ObjectFromTextGenerationModel,
92
89
  ObjectFromTextStreamingModel: () => ObjectFromTextStreamingModel,
93
90
  ObjectGeneratorTool: () => ObjectGeneratorTool,
@@ -147,12 +144,6 @@ __export(src_exports, {
147
144
  ZodSchema: () => ZodSchema,
148
145
  api: () => ApiFacade_exports,
149
146
  automatic1111: () => Automatic1111Facade_exports,
150
- calculateOpenAIChatCostInMillicents: () => calculateOpenAIChatCostInMillicents,
151
- calculateOpenAICompletionCostInMillicents: () => calculateOpenAICompletionCostInMillicents,
152
- calculateOpenAIEmbeddingCostInMillicents: () => calculateOpenAIEmbeddingCostInMillicents,
153
- calculateOpenAIImageGenerationCostInMillicents: () => calculateOpenAIImageGenerationCostInMillicents,
154
- calculateOpenAISpeechCostInMillicents: () => calculateOpenAISpeechCostInMillicents,
155
- calculateOpenAITranscriptionCostInMillicents: () => calculateOpenAITranscriptionCostInMillicents,
156
147
  classify: () => classify,
157
148
  cohere: () => CohereFacade_exports,
158
149
  convertDataContentToBase64String: () => convertDataContentToBase64String,
@@ -183,9 +174,6 @@ __export(src_exports, {
183
174
  getOpenAICompletionModelInformation: () => getOpenAICompletionModelInformation,
184
175
  getRun: () => getRun,
185
176
  huggingface: () => HuggingFaceFacade_exports,
186
- isOpenAIChatModel: () => isOpenAIChatModel,
187
- isOpenAICompletionModel: () => isOpenAICompletionModel,
188
- isOpenAIEmbeddingModel: () => isOpenAIEmbeddingModel,
189
177
  isPromptFunction: () => isPromptFunction,
190
178
  jsonObjectPrompt: () => jsonObjectPrompt,
191
179
  jsonToolCallPrompt: () => jsonToolCallPrompt,
@@ -197,6 +185,7 @@ __export(src_exports, {
197
185
  mistral: () => MistralFacade_exports,
198
186
  modelfusion: () => ModelFusionConfiguration_exports,
199
187
  ollama: () => OllamaFacade_exports,
188
+ openAITextEmbeddingResponseSchema: () => openAITextEmbeddingResponseSchema2,
200
189
  openai: () => OpenAIFacade_exports,
201
190
  openaicompatible: () => OpenAICompatibleFacade_exports,
202
191
  parseJSON: () => parseJSON,
@@ -288,6 +277,11 @@ var DefaultRun = class {
288
277
  this.functionEventSource.notify(event);
289
278
  }
290
279
  };
280
+ getSuccessfulModelCalls() {
281
+ return this.events.filter(
282
+ (event) => "model" in event && "result" in event && "status" in event.result && event.result.status === "success"
283
+ );
284
+ }
291
285
  };
292
286
 
293
287
  // src/core/ModelFusionConfiguration.ts
@@ -8402,134 +8396,45 @@ async function countOpenAIChatPromptTokens({
8402
8396
  }
8403
8397
 
8404
8398
  // src/model-provider/openai/OpenAIChatModel.ts
8405
- var OPENAI_CHAT_MODELS = {
8406
- "gpt-4": {
8407
- contextWindowSize: 8192,
8408
- promptTokenCostInMillicents: 3,
8409
- completionTokenCostInMillicents: 6
8410
- },
8411
- "gpt-4-0314": {
8412
- contextWindowSize: 8192,
8413
- promptTokenCostInMillicents: 3,
8414
- completionTokenCostInMillicents: 6
8415
- },
8416
- "gpt-4-0613": {
8417
- contextWindowSize: 8192,
8418
- promptTokenCostInMillicents: 3,
8419
- completionTokenCostInMillicents: 6,
8420
- fineTunedPromptTokenCostInMillicents: null,
8421
- fineTunedCompletionTokenCostInMillicents: null
8422
- },
8423
- "gpt-4-turbo-preview": {
8424
- contextWindowSize: 128e3,
8425
- promptTokenCostInMillicents: 1,
8426
- completionTokenCostInMillicents: 3
8427
- },
8428
- "gpt-4-1106-preview": {
8429
- contextWindowSize: 128e3,
8430
- promptTokenCostInMillicents: 1,
8431
- completionTokenCostInMillicents: 3
8432
- },
8433
- "gpt-4-0125-preview": {
8434
- contextWindowSize: 128e3,
8435
- promptTokenCostInMillicents: 1,
8436
- completionTokenCostInMillicents: 3
8437
- },
8438
- "gpt-4-vision-preview": {
8439
- contextWindowSize: 128e3,
8440
- promptTokenCostInMillicents: 1,
8441
- completionTokenCostInMillicents: 3
8442
- },
8443
- "gpt-4-32k": {
8444
- contextWindowSize: 32768,
8445
- promptTokenCostInMillicents: 6,
8446
- completionTokenCostInMillicents: 12
8447
- },
8448
- "gpt-4-32k-0314": {
8449
- contextWindowSize: 32768,
8450
- promptTokenCostInMillicents: 6,
8451
- completionTokenCostInMillicents: 12
8452
- },
8453
- "gpt-4-32k-0613": {
8454
- contextWindowSize: 32768,
8455
- promptTokenCostInMillicents: 6,
8456
- completionTokenCostInMillicents: 12
8457
- },
8458
- "gpt-3.5-turbo": {
8459
- contextWindowSize: 4096,
8460
- promptTokenCostInMillicents: 0.15,
8461
- completionTokenCostInMillicents: 0.2,
8462
- fineTunedPromptTokenCostInMillicents: 0.3,
8463
- fineTunedCompletionTokenCostInMillicents: 0.6
8464
- },
8465
- "gpt-3.5-turbo-0125": {
8466
- contextWindowSize: 16385,
8467
- promptTokenCostInMillicents: 0.05,
8468
- completionTokenCostInMillicents: 0.15
8469
- },
8470
- "gpt-3.5-turbo-1106": {
8471
- contextWindowSize: 16385,
8472
- promptTokenCostInMillicents: 0.1,
8473
- completionTokenCostInMillicents: 0.2
8474
- },
8475
- "gpt-3.5-turbo-0301": {
8476
- contextWindowSize: 4096,
8477
- promptTokenCostInMillicents: 0.15,
8478
- completionTokenCostInMillicents: 0.2
8479
- },
8480
- "gpt-3.5-turbo-0613": {
8481
- contextWindowSize: 4096,
8482
- promptTokenCostInMillicents: 0.15,
8483
- completionTokenCostInMillicents: 0.2,
8484
- fineTunedPromptTokenCostInMillicents: 1.2,
8485
- fineTunedCompletionTokenCostInMillicents: 1.6
8486
- },
8487
- "gpt-3.5-turbo-16k": {
8488
- contextWindowSize: 16384,
8489
- promptTokenCostInMillicents: 0.3,
8490
- completionTokenCostInMillicents: 0.4
8491
- },
8492
- "gpt-3.5-turbo-16k-0613": {
8493
- contextWindowSize: 16384,
8494
- promptTokenCostInMillicents: 0.3,
8495
- completionTokenCostInMillicents: 0.4
8496
- }
8399
+ var CHAT_MODEL_CONTEXT_WINDOW_SIZES = {
8400
+ "gpt-4": 8192,
8401
+ "gpt-4-0314": 8192,
8402
+ "gpt-4-0613": 8192,
8403
+ "gpt-4-turbo-preview": 128e3,
8404
+ "gpt-4-1106-preview": 128e3,
8405
+ "gpt-4-0125-preview": 128e3,
8406
+ "gpt-4-vision-preview": 128e3,
8407
+ "gpt-4-32k": 32768,
8408
+ "gpt-4-32k-0314": 32768,
8409
+ "gpt-4-32k-0613": 32768,
8410
+ "gpt-3.5-turbo": 4096,
8411
+ "gpt-3.5-turbo-0125": 16385,
8412
+ "gpt-3.5-turbo-1106": 16385,
8413
+ "gpt-3.5-turbo-0301": 4096,
8414
+ "gpt-3.5-turbo-0613": 4096,
8415
+ "gpt-3.5-turbo-16k": 16384,
8416
+ "gpt-3.5-turbo-16k-0613": 16384
8497
8417
  };
8498
8418
  function getOpenAIChatModelInformation(model) {
8499
- if (model in OPENAI_CHAT_MODELS) {
8500
- const baseModelInformation = OPENAI_CHAT_MODELS[model];
8419
+ if (model in CHAT_MODEL_CONTEXT_WINDOW_SIZES) {
8420
+ const contextWindowSize = CHAT_MODEL_CONTEXT_WINDOW_SIZES[model];
8501
8421
  return {
8502
8422
  baseModel: model,
8503
8423
  isFineTuned: false,
8504
- contextWindowSize: baseModelInformation.contextWindowSize,
8505
- promptTokenCostInMillicents: baseModelInformation.promptTokenCostInMillicents,
8506
- completionTokenCostInMillicents: baseModelInformation.completionTokenCostInMillicents
8424
+ contextWindowSize
8507
8425
  };
8508
8426
  }
8509
8427
  const [_, baseModel, ___, ____, _____] = model.split(":");
8510
8428
  if (["gpt-3.5-turbo", "gpt-3.5-turbo-0613", "gpt-4-0613"].includes(baseModel)) {
8511
- const baseModelInformation = OPENAI_CHAT_MODELS[baseModel];
8429
+ const contextWindowSize = CHAT_MODEL_CONTEXT_WINDOW_SIZES[baseModel];
8512
8430
  return {
8513
8431
  baseModel,
8514
8432
  isFineTuned: true,
8515
- contextWindowSize: baseModelInformation.contextWindowSize,
8516
- promptTokenCostInMillicents: baseModelInformation.fineTunedPromptTokenCostInMillicents,
8517
- completionTokenCostInMillicents: baseModelInformation.fineTunedCompletionTokenCostInMillicents
8433
+ contextWindowSize
8518
8434
  };
8519
8435
  }
8520
8436
  throw new Error(`Unknown OpenAI chat base model ${baseModel}.`);
8521
8437
  }
8522
- var isOpenAIChatModel = (model) => model in OPENAI_CHAT_MODELS || model.startsWith("ft:gpt-3.5-turbo-0613:") || model.startsWith("ft:gpt-3.5-turbo:");
8523
- var calculateOpenAIChatCostInMillicents = ({
8524
- model,
8525
- response
8526
- }) => {
8527
- const { promptTokenCostInMillicents, completionTokenCostInMillicents } = getOpenAIChatModelInformation(model);
8528
- if (promptTokenCostInMillicents == null || completionTokenCostInMillicents == null) {
8529
- return null;
8530
- }
8531
- return response.usage.prompt_tokens * promptTokenCostInMillicents + response.usage.completion_tokens * completionTokenCostInMillicents;
8532
- };
8533
8438
  var OpenAIChatModel = class _OpenAIChatModel extends AbstractOpenAIChatModel {
8534
8439
  constructor(settings) {
8535
8440
  super(settings);
@@ -8627,22 +8532,12 @@ var OpenAIChatModel = class _OpenAIChatModel extends AbstractOpenAIChatModel {
8627
8532
  // src/model-provider/openai/OpenAICompletionModel.ts
8628
8533
  var OPENAI_TEXT_GENERATION_MODELS = {
8629
8534
  "gpt-3.5-turbo-instruct": {
8630
- contextWindowSize: 4097,
8631
- promptTokenCostInMillicents: 0.15,
8632
- completionTokenCostInMillicents: 0.2
8535
+ contextWindowSize: 4097
8633
8536
  }
8634
8537
  };
8635
8538
  function getOpenAICompletionModelInformation(model) {
8636
8539
  return OPENAI_TEXT_GENERATION_MODELS[model];
8637
8540
  }
8638
- var isOpenAICompletionModel = (model) => model in OPENAI_TEXT_GENERATION_MODELS;
8639
- var calculateOpenAICompletionCostInMillicents = ({
8640
- model,
8641
- response
8642
- }) => {
8643
- const modelInformation = getOpenAICompletionModelInformation(model);
8644
- return response.usage.prompt_tokens * modelInformation.promptTokenCostInMillicents + response.usage.completion_tokens * modelInformation.completionTokenCostInMillicents;
8645
- };
8646
8541
  var OpenAICompletionModel = class _OpenAICompletionModel extends AbstractOpenAICompletionModel {
8647
8542
  constructor(settings) {
8648
8543
  super(settings);
@@ -8727,60 +8622,6 @@ __export(OpenAIFacade_exports, {
8727
8622
 
8728
8623
  // src/model-provider/openai/OpenAIImageGenerationModel.ts
8729
8624
  var import_zod27 = require("zod");
8730
- var OPENAI_IMAGE_MODELS = {
8731
- "dall-e-2": {
8732
- getCost(settings) {
8733
- switch (settings.size ?? "1024x1024") {
8734
- case "1024x1024":
8735
- return 2e3;
8736
- case "512x512":
8737
- return 1800;
8738
- case "256x256":
8739
- return 1600;
8740
- default:
8741
- return null;
8742
- }
8743
- }
8744
- },
8745
- "dall-e-3": {
8746
- getCost(settings) {
8747
- switch (settings.quality ?? "standard") {
8748
- case "standard": {
8749
- switch (settings.size ?? "1024x1024") {
8750
- case "1024x1024":
8751
- return 4e3;
8752
- case "1024x1792":
8753
- case "1792x1024":
8754
- return 8e3;
8755
- default:
8756
- return null;
8757
- }
8758
- }
8759
- case "hd": {
8760
- switch (settings.size ?? "1024x1024") {
8761
- case "1024x1024":
8762
- return 8e3;
8763
- case "1024x1792":
8764
- case "1792x1024":
8765
- return 12e3;
8766
- default:
8767
- return null;
8768
- }
8769
- }
8770
- }
8771
- }
8772
- }
8773
- };
8774
- var calculateOpenAIImageGenerationCostInMillicents = ({
8775
- model,
8776
- settings
8777
- }) => {
8778
- const cost = OPENAI_IMAGE_MODELS[model]?.getCost(settings);
8779
- if (cost == null) {
8780
- return null;
8781
- }
8782
- return (settings.numberOfGenerations ?? 1) * cost;
8783
- };
8784
8625
  var OpenAIImageGenerationModel = class _OpenAIImageGenerationModel extends AbstractModel {
8785
8626
  constructor(settings) {
8786
8627
  super({ settings });
@@ -8884,25 +8725,6 @@ var OpenAIImageGenerationResponseFormat = {
8884
8725
  };
8885
8726
 
8886
8727
  // src/model-provider/openai/OpenAISpeechModel.ts
8887
- var OPENAI_SPEECH_MODELS = {
8888
- "tts-1": {
8889
- costInMillicentsPerCharacter: 1.5
8890
- // = 1500 / 1000,
8891
- },
8892
- "tts-1-hd": {
8893
- costInMillicentsPerCharacter: 3
8894
- // = 3000 / 1000
8895
- }
8896
- };
8897
- var calculateOpenAISpeechCostInMillicents = ({
8898
- model,
8899
- input
8900
- }) => {
8901
- if (!OPENAI_SPEECH_MODELS[model]) {
8902
- return null;
8903
- }
8904
- return input.length * OPENAI_SPEECH_MODELS[model].costInMillicentsPerCharacter;
8905
- };
8906
8728
  var OpenAISpeechModel = class _OpenAISpeechModel extends AbstractModel {
8907
8729
  constructor(settings) {
8908
8730
  super({ settings });
@@ -8961,34 +8783,37 @@ var OpenAISpeechModel = class _OpenAISpeechModel extends AbstractModel {
8961
8783
  };
8962
8784
 
8963
8785
  // src/model-provider/openai/OpenAITextEmbeddingModel.ts
8786
+ var import_zod28 = __toESM(require("zod"), 1);
8964
8787
  var OPENAI_TEXT_EMBEDDING_MODELS = {
8965
8788
  "text-embedding-3-small": {
8966
8789
  contextWindowSize: 8192,
8967
- dimensions: 1536,
8968
- tokenCostInMillicents: 2e-3
8790
+ dimensions: 1536
8969
8791
  },
8970
8792
  "text-embedding-3-large": {
8971
8793
  contextWindowSize: 8192,
8972
- dimensions: 3072,
8973
- tokenCostInMillicents: 0.013
8794
+ dimensions: 3072
8974
8795
  },
8975
8796
  "text-embedding-ada-002": {
8976
8797
  contextWindowSize: 8192,
8977
- dimensions: 1536,
8978
- tokenCostInMillicents: 0.01
8979
- }
8980
- };
8981
- var isOpenAIEmbeddingModel = (model) => model in OPENAI_TEXT_EMBEDDING_MODELS;
8982
- var calculateOpenAIEmbeddingCostInMillicents = ({
8983
- model,
8984
- responses
8985
- }) => {
8986
- let amountInMilliseconds = 0;
8987
- for (const response of responses) {
8988
- amountInMilliseconds += response.usage.total_tokens * OPENAI_TEXT_EMBEDDING_MODELS[model].tokenCostInMillicents;
8798
+ dimensions: 1536
8989
8799
  }
8990
- return amountInMilliseconds;
8991
8800
  };
8801
+ var openAITextEmbeddingResponseSchema2 = import_zod28.default.object({
8802
+ object: import_zod28.default.literal("list"),
8803
+ data: import_zod28.default.array(
8804
+ import_zod28.default.object({
8805
+ object: import_zod28.default.literal("embedding"),
8806
+ embedding: import_zod28.default.array(import_zod28.default.number()),
8807
+ index: import_zod28.default.number()
8808
+ })
8809
+ ),
8810
+ model: import_zod28.default.string(),
8811
+ usage: import_zod28.default.object({
8812
+ prompt_tokens: import_zod28.default.number(),
8813
+ total_tokens: import_zod28.default.number()
8814
+ }).optional()
8815
+ // for openai-compatible models
8816
+ });
8992
8817
  var OpenAITextEmbeddingModel = class _OpenAITextEmbeddingModel extends AbstractOpenAITextEmbeddingModel {
8993
8818
  constructor(settings) {
8994
8819
  super(settings);
@@ -9017,7 +8842,7 @@ var OpenAITextEmbeddingModel = class _OpenAITextEmbeddingModel extends AbstractO
9017
8842
  };
9018
8843
 
9019
8844
  // src/model-provider/openai/OpenAITranscriptionModel.ts
9020
- var import_zod28 = require("zod");
8845
+ var import_zod29 = require("zod");
9021
8846
 
9022
8847
  // src/util/audio/getAudioFileExtension.ts
9023
8848
  function getAudioFileExtension(mimeType) {
@@ -9047,22 +8872,6 @@ function getAudioFileExtension(mimeType) {
9047
8872
  }
9048
8873
 
9049
8874
  // src/model-provider/openai/OpenAITranscriptionModel.ts
9050
- var OPENAI_TRANSCRIPTION_MODELS = {
9051
- "whisper-1": {
9052
- costInMillicentsPerSecond: 10
9053
- // = 600 / 60,
9054
- }
9055
- };
9056
- var calculateOpenAITranscriptionCostInMillicents = ({
9057
- model,
9058
- response
9059
- }) => {
9060
- if (model !== "whisper-1") {
9061
- return null;
9062
- }
9063
- const durationInSeconds = response.duration;
9064
- return Math.ceil(durationInSeconds) * OPENAI_TRANSCRIPTION_MODELS[model].costInMillicentsPerSecond;
9065
- };
9066
8875
  var OpenAITranscriptionModel = class _OpenAITranscriptionModel extends AbstractModel {
9067
8876
  constructor(settings) {
9068
8877
  super({ settings });
@@ -9148,29 +8957,29 @@ var OpenAITranscriptionModel = class _OpenAITranscriptionModel extends AbstractM
9148
8957
  );
9149
8958
  }
9150
8959
  };
9151
- var openAITranscriptionJsonSchema = import_zod28.z.object({
9152
- text: import_zod28.z.string()
8960
+ var openAITranscriptionJsonSchema = import_zod29.z.object({
8961
+ text: import_zod29.z.string()
9153
8962
  });
9154
- var openAITranscriptionVerboseJsonSchema = import_zod28.z.object({
9155
- task: import_zod28.z.literal("transcribe"),
9156
- language: import_zod28.z.string(),
9157
- duration: import_zod28.z.number(),
9158
- segments: import_zod28.z.array(
9159
- import_zod28.z.object({
9160
- id: import_zod28.z.number(),
9161
- seek: import_zod28.z.number(),
9162
- start: import_zod28.z.number(),
9163
- end: import_zod28.z.number(),
9164
- text: import_zod28.z.string(),
9165
- tokens: import_zod28.z.array(import_zod28.z.number()),
9166
- temperature: import_zod28.z.number(),
9167
- avg_logprob: import_zod28.z.number(),
9168
- compression_ratio: import_zod28.z.number(),
9169
- no_speech_prob: import_zod28.z.number(),
9170
- transient: import_zod28.z.boolean().optional()
8963
+ var openAITranscriptionVerboseJsonSchema = import_zod29.z.object({
8964
+ task: import_zod29.z.literal("transcribe"),
8965
+ language: import_zod29.z.string(),
8966
+ duration: import_zod29.z.number(),
8967
+ segments: import_zod29.z.array(
8968
+ import_zod29.z.object({
8969
+ id: import_zod29.z.number(),
8970
+ seek: import_zod29.z.number(),
8971
+ start: import_zod29.z.number(),
8972
+ end: import_zod29.z.number(),
8973
+ text: import_zod29.z.string(),
8974
+ tokens: import_zod29.z.array(import_zod29.z.number()),
8975
+ temperature: import_zod29.z.number(),
8976
+ avg_logprob: import_zod29.z.number(),
8977
+ compression_ratio: import_zod29.z.number(),
8978
+ no_speech_prob: import_zod29.z.number(),
8979
+ transient: import_zod29.z.boolean().optional()
9171
8980
  })
9172
8981
  ),
9173
- text: import_zod28.z.string()
8982
+ text: import_zod29.z.string()
9174
8983
  });
9175
8984
  var OpenAITranscriptionResponseFormat = {
9176
8985
  json: {
@@ -9510,9 +9319,9 @@ var StabilityApiConfiguration = class extends BaseUrlApiConfigurationWithDefault
9510
9319
  };
9511
9320
 
9512
9321
  // src/model-provider/stability/StabilityError.ts
9513
- var import_zod29 = require("zod");
9514
- var stabilityErrorDataSchema = import_zod29.z.object({
9515
- message: import_zod29.z.string()
9322
+ var import_zod30 = require("zod");
9323
+ var stabilityErrorDataSchema = import_zod30.z.object({
9324
+ message: import_zod30.z.string()
9516
9325
  });
9517
9326
  var failedStabilityCallResponseHandler = createJsonErrorResponseHandler({
9518
9327
  errorSchema: zodSchema(stabilityErrorDataSchema),
@@ -9527,7 +9336,7 @@ __export(StabilityFacade_exports, {
9527
9336
  });
9528
9337
 
9529
9338
  // src/model-provider/stability/StabilityImageGenerationModel.ts
9530
- var import_zod30 = require("zod");
9339
+ var import_zod31 = require("zod");
9531
9340
 
9532
9341
  // src/model-provider/stability/StabilityImageGenerationPrompt.ts
9533
9342
  function mapBasicPromptToStabilityFormat() {
@@ -9616,12 +9425,12 @@ var StabilityImageGenerationModel = class _StabilityImageGenerationModel extends
9616
9425
  );
9617
9426
  }
9618
9427
  };
9619
- var stabilityImageGenerationResponseSchema = import_zod30.z.object({
9620
- artifacts: import_zod30.z.array(
9621
- import_zod30.z.object({
9622
- base64: import_zod30.z.string(),
9623
- seed: import_zod30.z.number(),
9624
- finishReason: import_zod30.z.enum(["SUCCESS", "ERROR", "CONTENT_FILTERED"])
9428
+ var stabilityImageGenerationResponseSchema = import_zod31.z.object({
9429
+ artifacts: import_zod31.z.array(
9430
+ import_zod31.z.object({
9431
+ base64: import_zod31.z.string(),
9432
+ seed: import_zod31.z.number(),
9433
+ finishReason: import_zod31.z.enum(["SUCCESS", "ERROR", "CONTENT_FILTERED"])
9625
9434
  })
9626
9435
  )
9627
9436
  });
@@ -9657,7 +9466,7 @@ __export(WhisperCppFacade_exports, {
9657
9466
  });
9658
9467
 
9659
9468
  // src/model-provider/whispercpp/WhisperCppTranscriptionModel.ts
9660
- var import_zod31 = require("zod");
9469
+ var import_zod32 = require("zod");
9661
9470
  var WhisperCppTranscriptionModel = class _WhisperCppTranscriptionModel extends AbstractModel {
9662
9471
  constructor(settings) {
9663
9472
  super({ settings });
@@ -9728,9 +9537,9 @@ var WhisperCppTranscriptionModel = class _WhisperCppTranscriptionModel extends A
9728
9537
  );
9729
9538
  }
9730
9539
  };
9731
- var whisperCppTranscriptionJsonSchema = import_zod31.z.union([
9732
- import_zod31.z.object({ text: import_zod31.z.string() }),
9733
- import_zod31.z.object({ error: import_zod31.z.string() })
9540
+ var whisperCppTranscriptionJsonSchema = import_zod32.z.union([
9541
+ import_zod32.z.object({ text: import_zod32.z.string() }),
9542
+ import_zod32.z.object({ error: import_zod32.z.string() })
9734
9543
  ]);
9735
9544
  var successfulResponseHandler = async ({ response, url, requestBodyValues }) => {
9736
9545
  const responseBody = await response.text();
@@ -10089,14 +9898,14 @@ var ToolExecutionError = class extends Error {
10089
9898
  };
10090
9899
 
10091
9900
  // src/tool/WebSearchTool.ts
10092
- var import_zod32 = require("zod");
9901
+ var import_zod33 = require("zod");
10093
9902
  var RETURN_TYPE_SCHEMA = zodSchema(
10094
- import_zod32.z.object({
10095
- results: import_zod32.z.array(
10096
- import_zod32.z.object({
10097
- title: import_zod32.z.string(),
10098
- link: import_zod32.z.string().url(),
10099
- snippet: import_zod32.z.string()
9903
+ import_zod33.z.object({
9904
+ results: import_zod33.z.array(
9905
+ import_zod33.z.object({
9906
+ title: import_zod33.z.string(),
9907
+ link: import_zod33.z.string().url(),
9908
+ snippet: import_zod33.z.string()
10100
9909
  })
10101
9910
  )
10102
9911
  })
@@ -10104,8 +9913,8 @@ var RETURN_TYPE_SCHEMA = zodSchema(
10104
9913
  var createParameters = (description) => (
10105
9914
  // same schema, but with description:
10106
9915
  zodSchema(
10107
- import_zod32.z.object({
10108
- query: import_zod32.z.string().describe(description)
9916
+ import_zod33.z.object({
9917
+ query: import_zod33.z.string().describe(description)
10109
9918
  })
10110
9919
  )
10111
9920
  );
@@ -10579,13 +10388,13 @@ var VectorIndexRetriever = class _VectorIndexRetriever {
10579
10388
  };
10580
10389
 
10581
10390
  // src/vector-index/memory/MemoryVectorIndex.ts
10582
- var import_zod33 = require("zod");
10391
+ var import_zod34 = require("zod");
10583
10392
  var jsonDataSchema = zodSchema(
10584
- import_zod33.z.array(
10585
- import_zod33.z.object({
10586
- id: import_zod33.z.string(),
10587
- vector: import_zod33.z.array(import_zod33.z.number()),
10588
- data: import_zod33.z.unknown()
10393
+ import_zod34.z.array(
10394
+ import_zod34.z.object({
10395
+ id: import_zod34.z.string(),
10396
+ vector: import_zod34.z.array(import_zod34.z.number()),
10397
+ data: import_zod34.z.unknown()
10589
10398
  })
10590
10399
  )
10591
10400
  );
@@ -10684,6 +10493,7 @@ async function upsertIntoVectorIndex({
10684
10493
  AzureOpenAIApiConfiguration,
10685
10494
  BaseUrlApiConfiguration,
10686
10495
  BaseUrlApiConfigurationWithDefaults,
10496
+ CHAT_MODEL_CONTEXT_WINDOW_SIZES,
10687
10497
  COHERE_TEXT_EMBEDDING_MODELS,
10688
10498
  COHERE_TEXT_GENERATION_MODELS,
10689
10499
  ChatMLPrompt,
@@ -10723,13 +10533,9 @@ async function upsertIntoVectorIndex({
10723
10533
  NeuralChatPrompt,
10724
10534
  NoSuchToolDefinitionError,
10725
10535
  OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT,
10726
- OPENAI_CHAT_MODELS,
10727
10536
  OPENAI_CHAT_PROMPT_BASE_TOKEN_COUNT,
10728
- OPENAI_IMAGE_MODELS,
10729
- OPENAI_SPEECH_MODELS,
10730
10537
  OPENAI_TEXT_EMBEDDING_MODELS,
10731
10538
  OPENAI_TEXT_GENERATION_MODELS,
10732
- OPENAI_TRANSCRIPTION_MODELS,
10733
10539
  ObjectFromTextGenerationModel,
10734
10540
  ObjectFromTextStreamingModel,
10735
10541
  ObjectGeneratorTool,
@@ -10789,12 +10595,6 @@ async function upsertIntoVectorIndex({
10789
10595
  ZodSchema,
10790
10596
  api,
10791
10597
  automatic1111,
10792
- calculateOpenAIChatCostInMillicents,
10793
- calculateOpenAICompletionCostInMillicents,
10794
- calculateOpenAIEmbeddingCostInMillicents,
10795
- calculateOpenAIImageGenerationCostInMillicents,
10796
- calculateOpenAISpeechCostInMillicents,
10797
- calculateOpenAITranscriptionCostInMillicents,
10798
10598
  classify,
10799
10599
  cohere,
10800
10600
  convertDataContentToBase64String,
@@ -10825,9 +10625,6 @@ async function upsertIntoVectorIndex({
10825
10625
  getOpenAICompletionModelInformation,
10826
10626
  getRun,
10827
10627
  huggingface,
10828
- isOpenAIChatModel,
10829
- isOpenAICompletionModel,
10830
- isOpenAIEmbeddingModel,
10831
10628
  isPromptFunction,
10832
10629
  jsonObjectPrompt,
10833
10630
  jsonToolCallPrompt,
@@ -10839,6 +10636,7 @@ async function upsertIntoVectorIndex({
10839
10636
  mistral,
10840
10637
  modelfusion,
10841
10638
  ollama,
10639
+ openAITextEmbeddingResponseSchema,
10842
10640
  openai,
10843
10641
  openaicompatible,
10844
10642
  parseJSON,