modelfusion 0.134.0 → 0.135.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.cjs CHANGED
@@ -5,7 +5,6 @@ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
5
  var __getOwnPropNames = Object.getOwnPropertyNames;
6
6
  var __getProtoOf = Object.getPrototypeOf;
7
7
  var __hasOwnProp = Object.prototype.hasOwnProperty;
8
- var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
9
8
  var __export = (target, all) => {
10
9
  for (var name in all)
11
10
  __defProp(target, name, { get: all[name], enumerable: true });
@@ -27,10 +26,6 @@ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__ge
27
26
  mod
28
27
  ));
29
28
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
30
- var __publicField = (obj, key, value) => {
31
- __defNormalProp(obj, typeof key !== "symbol" ? key + "" : key, value);
32
- return value;
33
- };
34
29
 
35
30
  // src/index.ts
36
31
  var src_exports = {};
@@ -95,6 +90,7 @@ __export(src_exports, {
95
90
  OPENAI_TRANSCRIPTION_MODELS: () => OPENAI_TRANSCRIPTION_MODELS,
96
91
  ObjectFromTextGenerationModel: () => ObjectFromTextGenerationModel,
97
92
  ObjectFromTextStreamingModel: () => ObjectFromTextStreamingModel,
93
+ ObjectGeneratorTool: () => ObjectGeneratorTool,
98
94
  ObjectParseError: () => ObjectParseError,
99
95
  ObjectStreamFromResponse: () => ObjectStreamFromResponse,
100
96
  ObjectStreamResponse: () => ObjectStreamResponse,
@@ -239,12 +235,12 @@ var import_nanoid = require("nanoid");
239
235
 
240
236
  // src/core/FunctionEventSource.ts
241
237
  var FunctionEventSource = class {
238
+ observers;
239
+ errorHandler;
242
240
  constructor({
243
241
  observers,
244
242
  errorHandler
245
243
  }) {
246
- __publicField(this, "observers");
247
- __publicField(this, "errorHandler");
248
244
  this.observers = observers;
249
245
  this.errorHandler = errorHandler ?? ((error) => console.error(error));
250
246
  }
@@ -261,6 +257,13 @@ var FunctionEventSource = class {
261
257
 
262
258
  // src/core/DefaultRun.ts
263
259
  var DefaultRun = class {
260
+ runId;
261
+ sessionId;
262
+ userId;
263
+ abortSignal;
264
+ errorHandler;
265
+ events = [];
266
+ functionEventSource;
264
267
  constructor({
265
268
  runId = `run-${(0, import_nanoid.nanoid)()}`,
266
269
  sessionId,
@@ -269,19 +272,6 @@ var DefaultRun = class {
269
272
  observers,
270
273
  errorHandler
271
274
  } = {}) {
272
- __publicField(this, "runId");
273
- __publicField(this, "sessionId");
274
- __publicField(this, "userId");
275
- __publicField(this, "abortSignal");
276
- __publicField(this, "errorHandler");
277
- __publicField(this, "events", []);
278
- __publicField(this, "functionEventSource");
279
- __publicField(this, "functionObserver", {
280
- onFunctionEvent: (event) => {
281
- this.events.push(event);
282
- this.functionEventSource.notify(event);
283
- }
284
- });
285
275
  this.runId = runId;
286
276
  this.sessionId = sessionId;
287
277
  this.userId = userId;
@@ -292,6 +282,12 @@ var DefaultRun = class {
292
282
  errorHandler: this.errorHandler.bind(this)
293
283
  });
294
284
  }
285
+ functionObserver = {
286
+ onFunctionEvent: (event) => {
287
+ this.events.push(event);
288
+ this.functionEventSource.notify(event);
289
+ }
290
+ };
295
291
  };
296
292
 
297
293
  // src/core/ModelFusionConfiguration.ts
@@ -341,6 +337,13 @@ var AbortError = class extends Error {
341
337
 
342
338
  // src/core/api/ApiCallError.ts
343
339
  var ApiCallError = class extends Error {
340
+ url;
341
+ requestBodyValues;
342
+ statusCode;
343
+ responseBody;
344
+ cause;
345
+ isRetryable;
346
+ data;
344
347
  constructor({
345
348
  message,
346
349
  url,
@@ -352,13 +355,6 @@ var ApiCallError = class extends Error {
352
355
  data
353
356
  }) {
354
357
  super(message);
355
- __publicField(this, "url");
356
- __publicField(this, "requestBodyValues");
357
- __publicField(this, "statusCode");
358
- __publicField(this, "responseBody");
359
- __publicField(this, "cause");
360
- __publicField(this, "isRetryable");
361
- __publicField(this, "data");
362
358
  this.name = "ApiCallError";
363
359
  this.url = url;
364
360
  this.requestBodyValues = requestBodyValues;
@@ -416,16 +412,16 @@ function getErrorMessage(error) {
416
412
 
417
413
  // src/core/api/RetryError.ts
418
414
  var RetryError = class extends Error {
415
+ // note: property order determines debugging output
416
+ reason;
417
+ lastError;
418
+ errors;
419
419
  constructor({
420
420
  message,
421
421
  reason,
422
422
  errors
423
423
  }) {
424
424
  super(message);
425
- // note: property order determines debugging output
426
- __publicField(this, "reason");
427
- __publicField(this, "lastError");
428
- __publicField(this, "errors");
429
425
  this.name = "RetryError";
430
426
  this.reason = reason;
431
427
  this.errors = errors;
@@ -493,10 +489,10 @@ async function _retryWithExponentialBackoff(f, {
493
489
 
494
490
  // src/core/api/throttleMaxConcurrency.ts
495
491
  var MaxConcurrencyThrottler = class {
492
+ maxConcurrentCalls;
493
+ activeCallCount;
494
+ callQueue;
496
495
  constructor({ maxConcurrentCalls }) {
497
- __publicField(this, "maxConcurrentCalls");
498
- __publicField(this, "activeCallCount");
499
- __publicField(this, "callQueue");
500
496
  this.maxConcurrentCalls = maxConcurrentCalls;
501
497
  this.activeCallCount = 0;
502
498
  this.callQueue = [];
@@ -540,14 +536,14 @@ var throttleOff = () => (fn) => fn();
540
536
 
541
537
  // src/core/api/AbstractApiConfiguration.ts
542
538
  var AbstractApiConfiguration = class {
539
+ retry;
540
+ throttle;
541
+ customCallHeaders;
543
542
  constructor({
544
543
  retry,
545
544
  throttle,
546
545
  customCallHeaders = () => ({})
547
546
  }) {
548
- __publicField(this, "retry");
549
- __publicField(this, "throttle");
550
- __publicField(this, "customCallHeaders");
551
547
  this.retry = retry;
552
548
  this.throttle = throttle;
553
549
  this.customCallHeaders = customCallHeaders;
@@ -567,6 +563,8 @@ var AbstractApiConfiguration = class {
567
563
 
568
564
  // src/core/api/BaseUrlApiConfiguration.ts
569
565
  var BaseUrlApiConfiguration = class extends AbstractApiConfiguration {
566
+ baseUrl;
567
+ fixedHeadersValue;
570
568
  constructor({
571
569
  baseUrl,
572
570
  headers,
@@ -575,8 +573,6 @@ var BaseUrlApiConfiguration = class extends AbstractApiConfiguration {
575
573
  customCallHeaders
576
574
  }) {
577
575
  super({ retry, throttle, customCallHeaders });
578
- __publicField(this, "baseUrl");
579
- __publicField(this, "fixedHeadersValue");
580
576
  this.baseUrl = typeof baseUrl == "string" ? parseBaseUrl(baseUrl) : baseUrl;
581
577
  this.fixedHeadersValue = headers ?? {};
582
578
  }
@@ -637,9 +633,7 @@ function resolveBaseUrl(baseUrl = {}, baseUrlDefaults) {
637
633
 
638
634
  // src/core/cache/MemoryCache.ts
639
635
  var MemoryCache = class {
640
- constructor() {
641
- __publicField(this, "cache", /* @__PURE__ */ new Map());
642
- }
636
+ cache = /* @__PURE__ */ new Map();
643
637
  hashKey(key) {
644
638
  return JSON.stringify(key);
645
639
  }
@@ -763,7 +757,16 @@ function detectRuntime() {
763
757
  var runStorage;
764
758
  async function ensureLoaded() {
765
759
  if (detectRuntime() === "node" && !runStorage) {
766
- const { AsyncLocalStorage } = await import("async_hooks");
760
+ let AsyncLocalStorage;
761
+ try {
762
+ AsyncLocalStorage = (await import("async_hooks")).AsyncLocalStorage;
763
+ } catch (error) {
764
+ try {
765
+ AsyncLocalStorage = require("async_hooks").AsyncLocalStorage;
766
+ } catch (error2) {
767
+ throw new Error(`Failed to load 'async_hooks' module dynamically.`);
768
+ }
769
+ }
767
770
  runStorage = new AsyncLocalStorage();
768
771
  }
769
772
  return Promise.resolve();
@@ -786,9 +789,7 @@ function startDurationMeasurement() {
786
789
  return globalThis.performance != null ? new PerformanceNowDurationMeasurement() : new DateDurationMeasurement();
787
790
  }
788
791
  var PerformanceNowDurationMeasurement = class {
789
- constructor() {
790
- __publicField(this, "startTime", globalThis.performance.now());
791
- }
792
+ startTime = globalThis.performance.now();
792
793
  get startEpochSeconds() {
793
794
  return Math.floor(
794
795
  (globalThis.performance.timeOrigin + this.startTime) / 1e3
@@ -802,9 +803,7 @@ var PerformanceNowDurationMeasurement = class {
802
803
  }
803
804
  };
804
805
  var DateDurationMeasurement = class {
805
- constructor() {
806
- __publicField(this, "startTime", Date.now());
807
- }
806
+ startTime = Date.now();
808
807
  get startEpochSeconds() {
809
808
  return Math.floor(this.startTime / 1e3);
810
809
  }
@@ -924,14 +923,14 @@ async function executeFunction(fn, input, options) {
924
923
 
925
924
  // src/core/schema/JSONParseError.ts
926
925
  var JSONParseError = class extends Error {
926
+ // note: property order determines debugging output
927
+ text;
928
+ cause;
927
929
  constructor({ text: text13, cause }) {
928
930
  super(
929
931
  `JSON parsing failed: Text: ${text13}.
930
932
  Error message: ${getErrorMessage(cause)}`
931
933
  );
932
- // note: property order determines debugging output
933
- __publicField(this, "text");
934
- __publicField(this, "cause");
935
934
  this.name = "JSONParseError";
936
935
  this.cause = cause;
937
936
  this.text = text13;
@@ -949,13 +948,13 @@ Error message: ${getErrorMessage(cause)}`
949
948
 
950
949
  // src/core/schema/TypeValidationError.ts
951
950
  var TypeValidationError = class extends Error {
951
+ value;
952
+ cause;
952
953
  constructor({ value, cause }) {
953
954
  super(
954
955
  `Type validation failed: Value: ${JSON.stringify(value)}.
955
956
  Error message: ${getErrorMessage(cause)}`
956
957
  );
957
- __publicField(this, "value");
958
- __publicField(this, "cause");
959
958
  this.name = "TypeValidationError";
960
959
  this.cause = cause;
961
960
  this.value = value;
@@ -978,7 +977,6 @@ function uncheckedSchema(jsonSchema) {
978
977
  var UncheckedSchema = class {
979
978
  constructor(jsonSchema) {
980
979
  this.jsonSchema = jsonSchema;
981
- __publicField(this, "_type");
982
980
  }
983
981
  validate(value) {
984
982
  return { success: true, value };
@@ -986,6 +984,7 @@ var UncheckedSchema = class {
986
984
  getJsonSchema() {
987
985
  return this.jsonSchema;
988
986
  }
987
+ _type;
989
988
  };
990
989
 
991
990
  // src/core/schema/ZodSchema.ts
@@ -994,16 +993,8 @@ function zodSchema(zodSchema2) {
994
993
  return new ZodSchema(zodSchema2);
995
994
  }
996
995
  var ZodSchema = class {
996
+ zodSchema;
997
997
  constructor(zodSchema2) {
998
- __publicField(this, "zodSchema");
999
- /**
1000
- * Use only for typing purposes. The value is always `undefined`.
1001
- */
1002
- __publicField(this, "_type");
1003
- /**
1004
- * Use only for typing purposes. The value is always `undefined`.
1005
- */
1006
- __publicField(this, "_partialType");
1007
998
  this.zodSchema = zodSchema2;
1008
999
  }
1009
1000
  validate(value) {
@@ -1013,6 +1004,14 @@ var ZodSchema = class {
1013
1004
  getJsonSchema() {
1014
1005
  return (0, import_zod_to_json_schema.zodToJsonSchema)(this.zodSchema);
1015
1006
  }
1007
+ /**
1008
+ * Use only for typing purposes. The value is always `undefined`.
1009
+ */
1010
+ _type;
1011
+ /**
1012
+ * Use only for typing purposes. The value is always `undefined`.
1013
+ */
1014
+ _partialType;
1016
1015
  };
1017
1016
 
1018
1017
  // src/core/schema/parseJSON.ts
@@ -1311,13 +1310,13 @@ async function embed({
1311
1310
 
1312
1311
  // src/model-function/classify/EmbeddingSimilarityClassifier.ts
1313
1312
  var EmbeddingSimilarityClassifier = class _EmbeddingSimilarityClassifier {
1313
+ settings;
1314
+ modelInformation = {
1315
+ provider: "modelfusion",
1316
+ modelName: "EmbeddingSimilarityClassifier"
1317
+ };
1318
+ embeddings;
1314
1319
  constructor(settings) {
1315
- __publicField(this, "settings");
1316
- __publicField(this, "modelInformation", {
1317
- provider: "modelfusion",
1318
- modelName: "EmbeddingSimilarityClassifier"
1319
- });
1320
- __publicField(this, "embeddings");
1321
1320
  this.settings = settings;
1322
1321
  }
1323
1322
  async getEmbeddings(options) {
@@ -1414,12 +1413,12 @@ async function classify({
1414
1413
 
1415
1414
  // src/model-function/generate-image/PromptTemplateImageGenerationModel.ts
1416
1415
  var PromptTemplateImageGenerationModel = class _PromptTemplateImageGenerationModel {
1416
+ model;
1417
+ promptTemplate;
1417
1418
  constructor({
1418
1419
  model,
1419
1420
  promptTemplate
1420
1421
  }) {
1421
- __publicField(this, "model");
1422
- __publicField(this, "promptTemplate");
1423
1422
  this.model = model;
1424
1423
  this.promptTemplate = promptTemplate;
1425
1424
  }
@@ -1533,11 +1532,9 @@ async function generateSpeech({
1533
1532
 
1534
1533
  // src/util/AsyncQueue.ts
1535
1534
  var AsyncQueue = class {
1536
- constructor() {
1537
- __publicField(this, "values", Array());
1538
- __publicField(this, "pendingResolvers", []);
1539
- __publicField(this, "closed", false);
1540
- }
1535
+ values = Array();
1536
+ pendingResolvers = [];
1537
+ closed = false;
1541
1538
  processPendingResolvers() {
1542
1539
  while (this.pendingResolvers.length > 0) {
1543
1540
  this.pendingResolvers.shift()?.();
@@ -1903,13 +1900,13 @@ async function generateText({
1903
1900
 
1904
1901
  // src/model-function/generate-object/ObjectParseError.ts
1905
1902
  var ObjectParseError = class extends Error {
1903
+ cause;
1904
+ valueText;
1906
1905
  constructor({ valueText, cause }) {
1907
1906
  super(
1908
1907
  `Object parsing failed. Value: ${valueText}.
1909
1908
  Error message: ${getErrorMessage(cause)}`
1910
1909
  );
1911
- __publicField(this, "cause");
1912
- __publicField(this, "valueText");
1913
1910
  this.name = "ObjectParseError";
1914
1911
  this.cause = cause;
1915
1912
  this.valueText = valueText;
@@ -1927,12 +1924,12 @@ Error message: ${getErrorMessage(cause)}`
1927
1924
 
1928
1925
  // src/model-function/generate-object/ObjectFromTextGenerationModel.ts
1929
1926
  var ObjectFromTextGenerationModel = class _ObjectFromTextGenerationModel {
1927
+ model;
1928
+ template;
1930
1929
  constructor({
1931
1930
  model,
1932
1931
  template
1933
1932
  }) {
1934
- __publicField(this, "model");
1935
- __publicField(this, "template");
1936
1933
  this.model = model;
1937
1934
  this.template = template;
1938
1935
  }
@@ -2453,6 +2450,9 @@ function ObjectStreamToTextStream(stream) {
2453
2450
 
2454
2451
  // src/model-function/generate-object/ObjectValidationError.ts
2455
2452
  var ObjectValidationError = class extends Error {
2453
+ cause;
2454
+ valueText;
2455
+ value;
2456
2456
  constructor({
2457
2457
  value,
2458
2458
  valueText,
@@ -2462,9 +2462,6 @@ var ObjectValidationError = class extends Error {
2462
2462
  `Object validation failed. Value: ${valueText}.
2463
2463
  Error message: ${getErrorMessage(cause)}`
2464
2464
  );
2465
- __publicField(this, "cause");
2466
- __publicField(this, "valueText");
2467
- __publicField(this, "value");
2468
2465
  this.name = "ObjectValidationError";
2469
2466
  this.cause = cause;
2470
2467
  this.value = value;
@@ -2531,7 +2528,7 @@ async function generateObject({
2531
2528
 
2532
2529
  // src/model-function/generate-object/jsonObjectPrompt.ts
2533
2530
  var DEFAULT_SCHEMA_PREFIX = "JSON schema:";
2534
- var DEFAULT_SCHEMA_SUFFIX = "\nYou MUST answer with a JSON object matches the above schema.";
2531
+ var DEFAULT_SCHEMA_SUFFIX = "\nYou MUST answer with a JSON object that matches the JSON schema above.";
2535
2532
  var jsonObjectPrompt = {
2536
2533
  custom(createPrompt) {
2537
2534
  return { createPrompt, extractObject };
@@ -2578,6 +2575,7 @@ function createSystemPrompt({
2578
2575
  }) {
2579
2576
  return [
2580
2577
  originalSystemPrompt,
2578
+ originalSystemPrompt != null ? "" : null,
2581
2579
  schemaPrefix,
2582
2580
  JSON.stringify(schema.getJsonSchema()),
2583
2581
  schemaSuffix
@@ -2697,6 +2695,9 @@ async function streamObject({
2697
2695
 
2698
2696
  // src/tool/generate-tool-call/ToolCallParseError.ts
2699
2697
  var ToolCallParseError = class extends Error {
2698
+ toolName;
2699
+ valueText;
2700
+ cause;
2700
2701
  constructor({
2701
2702
  toolName,
2702
2703
  valueText,
@@ -2706,9 +2707,6 @@ var ToolCallParseError = class extends Error {
2706
2707
  `Tool call parsing failed for '${toolName}'. Value: ${valueText}.
2707
2708
  Error message: ${getErrorMessage(cause)}`
2708
2709
  );
2709
- __publicField(this, "toolName");
2710
- __publicField(this, "valueText");
2711
- __publicField(this, "cause");
2712
2710
  this.name = "ToolCallParseError";
2713
2711
  this.toolName = toolName;
2714
2712
  this.cause = cause;
@@ -2728,14 +2726,14 @@ Error message: ${getErrorMessage(cause)}`
2728
2726
 
2729
2727
  // src/tool/generate-tool-call/TextGenerationToolCallModel.ts
2730
2728
  var TextGenerationToolCallModel = class _TextGenerationToolCallModel {
2729
+ model;
2730
+ template;
2731
2731
  constructor({
2732
2732
  model,
2733
- format
2733
+ template
2734
2734
  }) {
2735
- __publicField(this, "model");
2736
- __publicField(this, "format");
2737
2735
  this.model = model;
2738
- this.format = format;
2736
+ this.template = template;
2739
2737
  }
2740
2738
  get modelInformation() {
2741
2739
  return this.model.modelInformation;
@@ -2746,17 +2744,26 @@ var TextGenerationToolCallModel = class _TextGenerationToolCallModel {
2746
2744
  get settingsForEvent() {
2747
2745
  return this.model.settingsForEvent;
2748
2746
  }
2747
+ getModelWithJsonOutput(schema) {
2748
+ if (this.template.withJsonOutput != null) {
2749
+ return this.template.withJsonOutput({
2750
+ model: this.model,
2751
+ schema
2752
+ });
2753
+ }
2754
+ return this.model;
2755
+ }
2749
2756
  async doGenerateToolCall(tool, prompt, options) {
2750
2757
  const { rawResponse, text: text13, metadata } = await generateText({
2751
- model: this.model,
2752
- prompt: this.format.createPrompt(prompt, tool),
2758
+ model: this.getModelWithJsonOutput(tool.parameters),
2759
+ prompt: this.template.createPrompt(prompt, tool),
2753
2760
  fullResponse: true,
2754
2761
  ...options
2755
2762
  });
2756
2763
  try {
2757
2764
  return {
2758
2765
  rawResponse,
2759
- toolCall: this.format.extractToolCall(text13, tool),
2766
+ toolCall: this.template.extractToolCall(text13, tool),
2760
2767
  usage: metadata?.usage
2761
2768
  };
2762
2769
  } catch (error) {
@@ -2770,20 +2777,20 @@ var TextGenerationToolCallModel = class _TextGenerationToolCallModel {
2770
2777
  withSettings(additionalSettings) {
2771
2778
  return new _TextGenerationToolCallModel({
2772
2779
  model: this.model.withSettings(additionalSettings),
2773
- format: this.format
2780
+ template: this.template
2774
2781
  });
2775
2782
  }
2776
2783
  };
2777
2784
 
2778
2785
  // src/tool/generate-tool-calls/ToolCallsParseError.ts
2779
2786
  var ToolCallsParseError = class extends Error {
2787
+ valueText;
2788
+ cause;
2780
2789
  constructor({ valueText, cause }) {
2781
2790
  super(
2782
2791
  `Tool calls parsing failed. Value: ${valueText}.
2783
2792
  Error message: ${getErrorMessage(cause)}`
2784
2793
  );
2785
- __publicField(this, "valueText");
2786
- __publicField(this, "cause");
2787
2794
  this.name = "ToolCallsParseError";
2788
2795
  this.cause = cause;
2789
2796
  this.valueText = valueText;
@@ -2801,12 +2808,12 @@ Error message: ${getErrorMessage(cause)}`
2801
2808
 
2802
2809
  // src/tool/generate-tool-calls/TextGenerationToolCallsModel.ts
2803
2810
  var TextGenerationToolCallsModel = class _TextGenerationToolCallsModel {
2811
+ model;
2812
+ template;
2804
2813
  constructor({
2805
2814
  model,
2806
2815
  template
2807
2816
  }) {
2808
- __publicField(this, "model");
2809
- __publicField(this, "template");
2810
2817
  this.model = model;
2811
2818
  this.template = template;
2812
2819
  }
@@ -2855,12 +2862,12 @@ var TextGenerationToolCallsModel = class _TextGenerationToolCallsModel {
2855
2862
 
2856
2863
  // src/model-function/generate-text/PromptTemplateTextGenerationModel.ts
2857
2864
  var PromptTemplateTextGenerationModel = class _PromptTemplateTextGenerationModel {
2865
+ model;
2866
+ promptTemplate;
2858
2867
  constructor({
2859
2868
  model,
2860
2869
  promptTemplate
2861
2870
  }) {
2862
- __publicField(this, "model");
2863
- __publicField(this, "promptTemplate");
2864
2871
  this.model = model;
2865
2872
  this.promptTemplate = promptTemplate;
2866
2873
  }
@@ -2900,7 +2907,7 @@ var PromptTemplateTextGenerationModel = class _PromptTemplateTextGenerationModel
2900
2907
  asToolCallGenerationModel(promptTemplate) {
2901
2908
  return new TextGenerationToolCallModel({
2902
2909
  model: this,
2903
- format: promptTemplate
2910
+ template: promptTemplate
2904
2911
  });
2905
2912
  }
2906
2913
  asToolCallsOrTextGenerationModel(promptTemplate) {
@@ -3000,9 +3007,9 @@ __export(AlpacaPromptTemplate_exports, {
3000
3007
 
3001
3008
  // src/model-function/generate-text/prompt-template/InvalidPromptError.ts
3002
3009
  var InvalidPromptError = class extends Error {
3010
+ prompt;
3003
3011
  constructor(message, prompt) {
3004
3012
  super(message);
3005
- __publicField(this, "prompt");
3006
3013
  this.name = "InvalidPromptError";
3007
3014
  this.prompt = prompt;
3008
3015
  }
@@ -4017,8 +4024,8 @@ var callWithRetryAndThrottle = async ({
4017
4024
 
4018
4025
  // src/model-function/AbstractModel.ts
4019
4026
  var AbstractModel = class {
4027
+ settings;
4020
4028
  constructor({ settings }) {
4021
- __publicField(this, "settings");
4022
4029
  this.settings = settings;
4023
4030
  }
4024
4031
  // implemented as a separate accessor to remove all other properties from the model
@@ -4041,8 +4048,8 @@ function mapBasicPromptToAutomatic1111Format() {
4041
4048
  var Automatic1111ImageGenerationModel = class _Automatic1111ImageGenerationModel extends AbstractModel {
4042
4049
  constructor(settings) {
4043
4050
  super({ settings });
4044
- __publicField(this, "provider", "Automatic1111");
4045
4051
  }
4052
+ provider = "Automatic1111";
4046
4053
  get modelName() {
4047
4054
  return this.settings.model;
4048
4055
  }
@@ -4218,8 +4225,8 @@ var import_zod5 = require("zod");
4218
4225
  // src/model-provider/cohere/CohereTokenizer.ts
4219
4226
  var import_zod4 = require("zod");
4220
4227
  var CohereTokenizer = class {
4228
+ settings;
4221
4229
  constructor(settings) {
4222
- __publicField(this, "settings");
4223
4230
  this.settings = settings;
4224
4231
  }
4225
4232
  async callTokenizeAPI(text13, callOptions) {
@@ -4341,12 +4348,6 @@ var COHERE_TEXT_EMBEDDING_MODELS = {
4341
4348
  var CohereTextEmbeddingModel = class _CohereTextEmbeddingModel extends AbstractModel {
4342
4349
  constructor(settings) {
4343
4350
  super({ settings });
4344
- __publicField(this, "provider", "cohere");
4345
- __publicField(this, "maxValuesPerCall", 96);
4346
- __publicField(this, "isParallelizable", true);
4347
- __publicField(this, "dimensions");
4348
- __publicField(this, "contextWindowSize");
4349
- __publicField(this, "tokenizer");
4350
4351
  this.contextWindowSize = COHERE_TEXT_EMBEDDING_MODELS[this.modelName].contextWindowSize;
4351
4352
  this.tokenizer = new CohereTokenizer({
4352
4353
  api: this.settings.api,
@@ -4354,9 +4355,15 @@ var CohereTextEmbeddingModel = class _CohereTextEmbeddingModel extends AbstractM
4354
4355
  });
4355
4356
  this.dimensions = COHERE_TEXT_EMBEDDING_MODELS[this.modelName].dimensions;
4356
4357
  }
4358
+ provider = "cohere";
4357
4359
  get modelName() {
4358
4360
  return this.settings.model;
4359
4361
  }
4362
+ maxValuesPerCall = 96;
4363
+ isParallelizable = true;
4364
+ dimensions;
4365
+ contextWindowSize;
4366
+ tokenizer;
4360
4367
  async tokenize(text13) {
4361
4368
  return this.tokenizer.tokenize(text13);
4362
4369
  }
@@ -4502,18 +4509,18 @@ var COHERE_TEXT_GENERATION_MODELS = {
4502
4509
  var CohereTextGenerationModel = class _CohereTextGenerationModel extends AbstractModel {
4503
4510
  constructor(settings) {
4504
4511
  super({ settings });
4505
- __publicField(this, "provider", "cohere");
4506
- __publicField(this, "contextWindowSize");
4507
- __publicField(this, "tokenizer");
4508
4512
  this.contextWindowSize = COHERE_TEXT_GENERATION_MODELS[this.settings.model].contextWindowSize;
4509
4513
  this.tokenizer = new CohereTokenizer({
4510
4514
  api: this.settings.api,
4511
4515
  model: this.settings.model
4512
4516
  });
4513
4517
  }
4518
+ provider = "cohere";
4514
4519
  get modelName() {
4515
4520
  return this.settings.model;
4516
4521
  }
4522
+ contextWindowSize;
4523
+ tokenizer;
4517
4524
  async countPromptTokens(input) {
4518
4525
  return countTokens(this.tokenizer, input);
4519
4526
  }
@@ -4757,7 +4764,16 @@ async function createSimpleWebSocket(url) {
4757
4764
  return new WebSocket(url);
4758
4765
  }
4759
4766
  case "node": {
4760
- const { default: WebSocket2 } = await import("ws");
4767
+ let WebSocket2;
4768
+ try {
4769
+ WebSocket2 = (await import("ws")).default;
4770
+ } catch (error) {
4771
+ try {
4772
+ WebSocket2 = require("ws");
4773
+ } catch (error2) {
4774
+ throw new Error(`Failed to load 'ws' module dynamically.`);
4775
+ }
4776
+ }
4761
4777
  return new WebSocket2(url);
4762
4778
  }
4763
4779
  default: {
@@ -4771,8 +4787,8 @@ var defaultModel = "eleven_monolingual_v1";
4771
4787
  var ElevenLabsSpeechModel = class _ElevenLabsSpeechModel extends AbstractModel {
4772
4788
  constructor(settings) {
4773
4789
  super({ settings });
4774
- __publicField(this, "provider", "elevenlabs");
4775
4790
  }
4791
+ provider = "elevenlabs";
4776
4792
  get modelName() {
4777
4793
  return this.settings.voice;
4778
4794
  }
@@ -5003,19 +5019,18 @@ var import_zod9 = require("zod");
5003
5019
  var HuggingFaceTextEmbeddingModel = class _HuggingFaceTextEmbeddingModel extends AbstractModel {
5004
5020
  constructor(settings) {
5005
5021
  super({ settings });
5006
- __publicField(this, "provider", "huggingface");
5007
- __publicField(this, "maxValuesPerCall");
5008
- __publicField(this, "isParallelizable", true);
5009
- __publicField(this, "contextWindowSize");
5010
- __publicField(this, "dimensions");
5011
- __publicField(this, "tokenizer");
5012
- __publicField(this, "countPromptTokens");
5013
5022
  this.maxValuesPerCall = settings.maxValuesPerCall ?? 1024;
5014
5023
  this.dimensions = settings.dimensions;
5015
5024
  }
5025
+ provider = "huggingface";
5016
5026
  get modelName() {
5017
5027
  return this.settings.model;
5018
5028
  }
5029
+ maxValuesPerCall;
5030
+ isParallelizable = true;
5031
+ contextWindowSize = void 0;
5032
+ dimensions;
5033
+ tokenizer = void 0;
5019
5034
  async callAPI(texts, callOptions) {
5020
5035
  if (texts.length > this.maxValuesPerCall) {
5021
5036
  throw new Error(
@@ -5056,6 +5071,7 @@ var HuggingFaceTextEmbeddingModel = class _HuggingFaceTextEmbeddingModel extends
5056
5071
  options: this.settings.options
5057
5072
  };
5058
5073
  }
5074
+ countPromptTokens = void 0;
5059
5075
  async doEmbedValues(texts, options) {
5060
5076
  const rawResponse = await this.callAPI(texts, options);
5061
5077
  return {
@@ -5076,14 +5092,14 @@ var import_zod10 = require("zod");
5076
5092
  var HuggingFaceTextGenerationModel = class _HuggingFaceTextGenerationModel extends AbstractModel {
5077
5093
  constructor(settings) {
5078
5094
  super({ settings });
5079
- __publicField(this, "provider", "huggingface");
5080
- __publicField(this, "contextWindowSize");
5081
- __publicField(this, "tokenizer");
5082
- __publicField(this, "countPromptTokens");
5083
5095
  }
5096
+ provider = "huggingface";
5084
5097
  get modelName() {
5085
5098
  return this.settings.model;
5086
5099
  }
5100
+ contextWindowSize = void 0;
5101
+ tokenizer = void 0;
5102
+ countPromptTokens = void 0;
5087
5103
  async callAPI(prompt, callOptions) {
5088
5104
  const api = this.settings.api ?? new HuggingFaceApiConfiguration();
5089
5105
  const abortSignal = callOptions?.run?.abortSignal;
@@ -5427,8 +5443,8 @@ var BakLLaVA1 = LlamaCppBakLLaVA1PromptTemplate_exports;
5427
5443
  // src/model-provider/llamacpp/LlamaCppTokenizer.ts
5428
5444
  var import_zod12 = require("zod");
5429
5445
  var LlamaCppTokenizer = class {
5446
+ api;
5430
5447
  constructor(api = new LlamaCppApiConfiguration()) {
5431
- __publicField(this, "api");
5432
5448
  this.api = api;
5433
5449
  }
5434
5450
  async callTokenizeAPI(text13, callOptions) {
@@ -5481,9 +5497,7 @@ var PRIMITIVE_RULES = {
5481
5497
  null: '"null" space'
5482
5498
  };
5483
5499
  var RuleMap = class {
5484
- constructor() {
5485
- __publicField(this, "rules", /* @__PURE__ */ new Map());
5486
- }
5500
+ rules = /* @__PURE__ */ new Map();
5487
5501
  add(name, rule) {
5488
5502
  const escapedName = this.escapeRuleName(name, rule);
5489
5503
  this.rules.set(escapedName, rule);
@@ -5575,16 +5589,16 @@ function visit(schema, name, rules) {
5575
5589
  var LlamaCppCompletionModel = class _LlamaCppCompletionModel extends AbstractModel {
5576
5590
  constructor(settings = {}) {
5577
5591
  super({ settings });
5578
- __publicField(this, "provider", "llamacpp");
5579
- __publicField(this, "tokenizer");
5580
5592
  this.tokenizer = new LlamaCppTokenizer(this.settings.api);
5581
5593
  }
5594
+ provider = "llamacpp";
5582
5595
  get modelName() {
5583
5596
  return null;
5584
5597
  }
5585
5598
  get contextWindowSize() {
5586
5599
  return this.settings.contextWindowSize;
5587
5600
  }
5601
+ tokenizer;
5588
5602
  async callAPI(prompt, callOptions, options) {
5589
5603
  const api = this.settings.api ?? new LlamaCppApiConfiguration();
5590
5604
  const responseFormat = options.responseFormat;
@@ -5884,21 +5898,21 @@ var import_zod14 = require("zod");
5884
5898
  var LlamaCppTextEmbeddingModel = class _LlamaCppTextEmbeddingModel extends AbstractModel {
5885
5899
  constructor(settings = {}) {
5886
5900
  super({ settings });
5887
- __publicField(this, "provider", "llamacpp");
5888
- __publicField(this, "maxValuesPerCall", 1);
5889
- __publicField(this, "contextWindowSize");
5890
- __publicField(this, "tokenizer");
5891
5901
  this.tokenizer = new LlamaCppTokenizer(this.settings.api);
5892
5902
  }
5903
+ provider = "llamacpp";
5893
5904
  get modelName() {
5894
5905
  return null;
5895
5906
  }
5907
+ maxValuesPerCall = 1;
5896
5908
  get isParallelizable() {
5897
5909
  return this.settings.isParallelizable ?? false;
5898
5910
  }
5911
+ contextWindowSize = void 0;
5899
5912
  get dimensions() {
5900
5913
  return this.settings.dimensions;
5901
5914
  }
5915
+ tokenizer;
5902
5916
  async tokenize(text13) {
5903
5917
  return this.tokenizer.tokenize(text13);
5904
5918
  }
@@ -6081,8 +6095,8 @@ var import_zod15 = require("zod");
6081
6095
  var LmntSpeechModel = class _LmntSpeechModel extends AbstractModel {
6082
6096
  constructor(settings) {
6083
6097
  super({ settings });
6084
- __publicField(this, "provider", "lmnt");
6085
6098
  }
6099
+ provider = "lmnt";
6086
6100
  get modelName() {
6087
6101
  return this.settings.voice;
6088
6102
  }
@@ -6326,14 +6340,14 @@ var failedMistralCallResponseHandler = createJsonErrorResponseHandler({
6326
6340
  var MistralChatModel = class _MistralChatModel extends AbstractModel {
6327
6341
  constructor(settings) {
6328
6342
  super({ settings });
6329
- __publicField(this, "provider", "mistral");
6330
- __publicField(this, "contextWindowSize");
6331
- __publicField(this, "tokenizer");
6332
- __publicField(this, "countPromptTokens");
6333
6343
  }
6344
+ provider = "mistral";
6334
6345
  get modelName() {
6335
6346
  return this.settings.model;
6336
6347
  }
6348
+ contextWindowSize = void 0;
6349
+ tokenizer = void 0;
6350
+ countPromptTokens = void 0;
6337
6351
  async callAPI(prompt, callOptions, options) {
6338
6352
  const api = this.settings.api ?? new MistralApiConfiguration();
6339
6353
  const abortSignal = callOptions.run?.abortSignal;
@@ -6518,18 +6532,18 @@ var import_zod18 = require("zod");
6518
6532
  var MistralTextEmbeddingModel = class _MistralTextEmbeddingModel extends AbstractModel {
6519
6533
  constructor(settings) {
6520
6534
  super({ settings });
6521
- __publicField(this, "provider", "mistral");
6522
- __publicField(this, "maxValuesPerCall", 32);
6523
- /**
6524
- * Parallel calls are technically possible, but I have been hitting rate limits and disabled
6525
- * them for now.
6526
- */
6527
- __publicField(this, "isParallelizable", false);
6528
- __publicField(this, "dimensions", 1024);
6529
6535
  }
6536
+ provider = "mistral";
6530
6537
  get modelName() {
6531
6538
  return this.settings.model;
6532
6539
  }
6540
+ maxValuesPerCall = 32;
6541
+ /**
6542
+ * Parallel calls are technically possible, but I have been hitting rate limits and disabled
6543
+ * them for now.
6544
+ */
6545
+ isParallelizable = false;
6546
+ dimensions = 1024;
6533
6547
  async callAPI(texts, callOptions) {
6534
6548
  if (texts.length > this.maxValuesPerCall) {
6535
6549
  throw new Error(
@@ -6724,14 +6738,14 @@ var failedOllamaCallResponseHandler = createJsonErrorResponseHandler({
6724
6738
  var OllamaChatModel = class _OllamaChatModel extends AbstractModel {
6725
6739
  constructor(settings) {
6726
6740
  super({ settings });
6727
- __publicField(this, "provider", "ollama");
6728
- __publicField(this, "tokenizer");
6729
- __publicField(this, "countPromptTokens");
6730
- __publicField(this, "contextWindowSize");
6731
6741
  }
6742
+ provider = "ollama";
6732
6743
  get modelName() {
6733
6744
  return this.settings.model;
6734
6745
  }
6746
+ tokenizer = void 0;
6747
+ countPromptTokens = void 0;
6748
+ contextWindowSize = void 0;
6735
6749
  async callAPI(prompt, callOptions, options) {
6736
6750
  const { responseFormat } = options;
6737
6751
  const api = this.settings.api ?? new OllamaApiConfiguration();
@@ -6840,7 +6854,7 @@ var OllamaChatModel = class _OllamaChatModel extends AbstractModel {
6840
6854
  asToolCallGenerationModel(promptTemplate) {
6841
6855
  return new TextGenerationToolCallModel({
6842
6856
  model: this,
6843
- format: promptTemplate
6857
+ template: promptTemplate
6844
6858
  });
6845
6859
  }
6846
6860
  asToolCallsOrTextGenerationModel(promptTemplate) {
@@ -7025,13 +7039,13 @@ var Vicuna2 = asOllamaCompletionTextPromptTemplateProvider(VicunaPromptTemplate_
7025
7039
  var OllamaCompletionModel = class _OllamaCompletionModel extends AbstractModel {
7026
7040
  constructor(settings) {
7027
7041
  super({ settings });
7028
- __publicField(this, "provider", "ollama");
7029
- __publicField(this, "tokenizer");
7030
- __publicField(this, "countPromptTokens");
7031
7042
  }
7043
+ provider = "ollama";
7032
7044
  get modelName() {
7033
7045
  return this.settings.model;
7034
7046
  }
7047
+ tokenizer = void 0;
7048
+ countPromptTokens = void 0;
7035
7049
  get contextWindowSize() {
7036
7050
  return this.settings.contextWindowSize;
7037
7051
  }
@@ -7162,7 +7176,7 @@ var OllamaCompletionModel = class _OllamaCompletionModel extends AbstractModel {
7162
7176
  asToolCallGenerationModel(promptTemplate) {
7163
7177
  return new TextGenerationToolCallModel({
7164
7178
  model: this,
7165
- format: promptTemplate
7179
+ template: promptTemplate
7166
7180
  });
7167
7181
  }
7168
7182
  asToolCallsOrTextGenerationModel(promptTemplate) {
@@ -7310,12 +7324,12 @@ var import_zod22 = require("zod");
7310
7324
  var OllamaTextEmbeddingModel = class _OllamaTextEmbeddingModel extends AbstractModel {
7311
7325
  constructor(settings) {
7312
7326
  super({ settings });
7313
- __publicField(this, "provider", "ollama");
7314
- __publicField(this, "maxValuesPerCall", 1);
7315
7327
  }
7328
+ provider = "ollama";
7316
7329
  get modelName() {
7317
7330
  return null;
7318
7331
  }
7332
+ maxValuesPerCall = 1;
7319
7333
  get isParallelizable() {
7320
7334
  return this.settings.isParallelizable ?? false;
7321
7335
  }
@@ -7876,11 +7890,11 @@ var import_zod26 = require("zod");
7876
7890
  var AbstractOpenAITextEmbeddingModel = class extends AbstractModel {
7877
7891
  constructor(settings) {
7878
7892
  super({ settings });
7879
- __publicField(this, "isParallelizable", true);
7880
7893
  }
7881
7894
  get maxValuesPerCall() {
7882
7895
  return this.settings.maxValuesPerCall ?? 2048;
7883
7896
  }
7897
+ isParallelizable = true;
7884
7898
  async callAPI(texts, callOptions) {
7885
7899
  const api = this.settings.api ?? new OpenAIApiConfiguration();
7886
7900
  const abortSignal = callOptions.run?.abortSignal;
@@ -7941,6 +7955,10 @@ var openAITextEmbeddingResponseSchema = import_zod26.z.object({
7941
7955
 
7942
7956
  // src/model-provider/openai/AzureOpenAIApiConfiguration.ts
7943
7957
  var AzureOpenAIApiConfiguration = class extends AbstractApiConfiguration {
7958
+ resourceName;
7959
+ deploymentId;
7960
+ apiVersion;
7961
+ fixedHeaderValue;
7944
7962
  constructor({
7945
7963
  resourceName,
7946
7964
  deploymentId,
@@ -7950,10 +7968,6 @@ var AzureOpenAIApiConfiguration = class extends AbstractApiConfiguration {
7950
7968
  throttle
7951
7969
  }) {
7952
7970
  super({ retry, throttle });
7953
- __publicField(this, "resourceName");
7954
- __publicField(this, "deploymentId");
7955
- __publicField(this, "apiVersion");
7956
- __publicField(this, "fixedHeaderValue");
7957
7971
  this.resourceName = resourceName;
7958
7972
  this.deploymentId = deploymentId;
7959
7973
  this.apiVersion = apiVersion;
@@ -8152,16 +8166,16 @@ function chat12() {
8152
8166
 
8153
8167
  // src/model-provider/openai/OpenAIChatFunctionCallObjectGenerationModel.ts
8154
8168
  var OpenAIChatFunctionCallObjectGenerationModel = class _OpenAIChatFunctionCallObjectGenerationModel {
8169
+ model;
8170
+ fnName;
8171
+ fnDescription;
8172
+ promptTemplate;
8155
8173
  constructor({
8156
8174
  model,
8157
8175
  fnName,
8158
8176
  fnDescription,
8159
8177
  promptTemplate
8160
8178
  }) {
8161
- __publicField(this, "model");
8162
- __publicField(this, "fnName");
8163
- __publicField(this, "fnDescription");
8164
- __publicField(this, "promptTemplate");
8165
8179
  this.model = model;
8166
8180
  this.fnName = fnName;
8167
8181
  this.fnDescription = fnDescription;
@@ -8303,9 +8317,9 @@ var TikTokenTokenizer = class {
8303
8317
  * Get a TikToken tokenizer for a specific model or encoding.
8304
8318
  */
8305
8319
  constructor(settings) {
8306
- __publicField(this, "tiktoken");
8307
8320
  this.tiktoken = new import_lite.Tiktoken(getTiktokenBPE(settings.model));
8308
8321
  }
8322
+ tiktoken;
8309
8323
  async tokenize(text13) {
8310
8324
  return this.tiktoken.encode(text13);
8311
8325
  }
@@ -8519,18 +8533,18 @@ var calculateOpenAIChatCostInMillicents = ({
8519
8533
  var OpenAIChatModel = class _OpenAIChatModel extends AbstractOpenAIChatModel {
8520
8534
  constructor(settings) {
8521
8535
  super(settings);
8522
- __publicField(this, "provider", "openai");
8523
- __publicField(this, "contextWindowSize");
8524
- __publicField(this, "tokenizer");
8525
8536
  const modelInformation = getOpenAIChatModelInformation(this.settings.model);
8526
8537
  this.tokenizer = new TikTokenTokenizer({
8527
8538
  model: modelInformation.baseModel
8528
8539
  });
8529
8540
  this.contextWindowSize = modelInformation.contextWindowSize;
8530
8541
  }
8542
+ provider = "openai";
8531
8543
  get modelName() {
8532
8544
  return this.settings.model;
8533
8545
  }
8546
+ contextWindowSize;
8547
+ tokenizer;
8534
8548
  /**
8535
8549
  * Counts the prompt tokens required for the messages. This includes the message base tokens
8536
8550
  * and the prompt base tokens.
@@ -8632,9 +8646,6 @@ var calculateOpenAICompletionCostInMillicents = ({
8632
8646
  var OpenAICompletionModel = class _OpenAICompletionModel extends AbstractOpenAICompletionModel {
8633
8647
  constructor(settings) {
8634
8648
  super(settings);
8635
- __publicField(this, "provider", "openai");
8636
- __publicField(this, "contextWindowSize");
8637
- __publicField(this, "tokenizer");
8638
8649
  const modelInformation = getOpenAICompletionModelInformation(
8639
8650
  this.settings.model
8640
8651
  );
@@ -8643,9 +8654,12 @@ var OpenAICompletionModel = class _OpenAICompletionModel extends AbstractOpenAIC
8643
8654
  });
8644
8655
  this.contextWindowSize = modelInformation.contextWindowSize;
8645
8656
  }
8657
+ provider = "openai";
8646
8658
  get modelName() {
8647
8659
  return this.settings.model;
8648
8660
  }
8661
+ contextWindowSize;
8662
+ tokenizer;
8649
8663
  async countPromptTokens(input) {
8650
8664
  return countTokens(this.tokenizer, input);
8651
8665
  }
@@ -8770,8 +8784,8 @@ var calculateOpenAIImageGenerationCostInMillicents = ({
8770
8784
  var OpenAIImageGenerationModel = class _OpenAIImageGenerationModel extends AbstractModel {
8771
8785
  constructor(settings) {
8772
8786
  super({ settings });
8773
- __publicField(this, "provider", "openai");
8774
8787
  }
8788
+ provider = "openai";
8775
8789
  get modelName() {
8776
8790
  return this.settings.model;
8777
8791
  }
@@ -8892,8 +8906,8 @@ var calculateOpenAISpeechCostInMillicents = ({
8892
8906
  var OpenAISpeechModel = class _OpenAISpeechModel extends AbstractModel {
8893
8907
  constructor(settings) {
8894
8908
  super({ settings });
8895
- __publicField(this, "provider", "openai");
8896
8909
  }
8910
+ provider = "openai";
8897
8911
  get voice() {
8898
8912
  return this.settings.voice;
8899
8913
  }
@@ -8978,17 +8992,17 @@ var calculateOpenAIEmbeddingCostInMillicents = ({
8978
8992
  var OpenAITextEmbeddingModel = class _OpenAITextEmbeddingModel extends AbstractOpenAITextEmbeddingModel {
8979
8993
  constructor(settings) {
8980
8994
  super(settings);
8981
- __publicField(this, "provider", "openai");
8982
- __publicField(this, "dimensions");
8983
- __publicField(this, "tokenizer");
8984
- __publicField(this, "contextWindowSize");
8985
8995
  this.tokenizer = new TikTokenTokenizer({ model: this.modelName });
8986
8996
  this.contextWindowSize = OPENAI_TEXT_EMBEDDING_MODELS[this.modelName].contextWindowSize;
8987
8997
  this.dimensions = this.settings.dimensions ?? OPENAI_TEXT_EMBEDDING_MODELS[this.modelName].dimensions;
8988
8998
  }
8999
+ provider = "openai";
8989
9000
  get modelName() {
8990
9001
  return this.settings.model;
8991
9002
  }
9003
+ dimensions;
9004
+ tokenizer;
9005
+ contextWindowSize;
8992
9006
  async countTokens(input) {
8993
9007
  return countTokens(this.tokenizer, input);
8994
9008
  }
@@ -9052,8 +9066,8 @@ var calculateOpenAITranscriptionCostInMillicents = ({
9052
9066
  var OpenAITranscriptionModel = class _OpenAITranscriptionModel extends AbstractModel {
9053
9067
  constructor(settings) {
9054
9068
  super({ settings });
9055
- __publicField(this, "provider", "openai");
9056
9069
  }
9070
+ provider = "openai";
9057
9071
  get modelName() {
9058
9072
  return this.settings.model;
9059
9073
  }
@@ -9233,17 +9247,14 @@ var FireworksAIApiConfiguration = class extends BaseUrlApiConfigurationWithDefau
9233
9247
  path: "/inference/v1"
9234
9248
  }
9235
9249
  });
9236
- __publicField(this, "provider", "openaicompatible-fireworksai");
9237
9250
  }
9251
+ provider = "openaicompatible-fireworksai";
9238
9252
  };
9239
9253
 
9240
9254
  // src/model-provider/openai-compatible/OpenAICompatibleChatModel.ts
9241
9255
  var OpenAICompatibleChatModel = class _OpenAICompatibleChatModel extends AbstractOpenAIChatModel {
9242
9256
  constructor(settings) {
9243
9257
  super(settings);
9244
- __publicField(this, "contextWindowSize");
9245
- __publicField(this, "tokenizer");
9246
- __publicField(this, "countPromptTokens");
9247
9258
  }
9248
9259
  get provider() {
9249
9260
  return this.settings.provider ?? this.settings.api.provider ?? "openaicompatible";
@@ -9251,6 +9262,9 @@ var OpenAICompatibleChatModel = class _OpenAICompatibleChatModel extends Abstrac
9251
9262
  get modelName() {
9252
9263
  return this.settings.model;
9253
9264
  }
9265
+ contextWindowSize = void 0;
9266
+ tokenizer = void 0;
9267
+ countPromptTokens = void 0;
9254
9268
  get settingsForEvent() {
9255
9269
  const eventSettingProperties = [
9256
9270
  ...textGenerationModelProperties,
@@ -9313,9 +9327,6 @@ var OpenAICompatibleChatModel = class _OpenAICompatibleChatModel extends Abstrac
9313
9327
  var OpenAICompatibleCompletionModel = class _OpenAICompatibleCompletionModel extends AbstractOpenAICompletionModel {
9314
9328
  constructor(settings) {
9315
9329
  super(settings);
9316
- __publicField(this, "contextWindowSize");
9317
- __publicField(this, "tokenizer");
9318
- __publicField(this, "countPromptTokens");
9319
9330
  }
9320
9331
  get provider() {
9321
9332
  return this.settings.provider ?? this.settings.api.provider ?? "openaicompatible";
@@ -9323,6 +9334,9 @@ var OpenAICompatibleCompletionModel = class _OpenAICompatibleCompletionModel ext
9323
9334
  get modelName() {
9324
9335
  return this.settings.model;
9325
9336
  }
9337
+ contextWindowSize = void 0;
9338
+ tokenizer = void 0;
9339
+ countPromptTokens = void 0;
9326
9340
  get settingsForEvent() {
9327
9341
  const eventSettingProperties = [
9328
9342
  ...textGenerationModelProperties,
@@ -9426,8 +9440,8 @@ var PerplexityApiConfiguration = class extends BaseUrlApiConfigurationWithDefaul
9426
9440
  path: ""
9427
9441
  }
9428
9442
  });
9429
- __publicField(this, "provider", "openaicompatible-perplexity");
9430
9443
  }
9444
+ provider = "openaicompatible-perplexity";
9431
9445
  };
9432
9446
 
9433
9447
  // src/model-provider/openai-compatible/TogetherAIApiConfiguration.ts
@@ -9449,8 +9463,8 @@ var TogetherAIApiConfiguration = class extends BaseUrlApiConfigurationWithDefaul
9449
9463
  path: "/v1"
9450
9464
  }
9451
9465
  });
9452
- __publicField(this, "provider", "openaicompatible-togetherai");
9453
9466
  }
9467
+ provider = "openaicompatible-togetherai";
9454
9468
  };
9455
9469
 
9456
9470
  // src/model-provider/openai-compatible/OpenAICompatibleFacade.ts
@@ -9526,8 +9540,8 @@ function mapBasicPromptToStabilityFormat() {
9526
9540
  var StabilityImageGenerationModel = class _StabilityImageGenerationModel extends AbstractModel {
9527
9541
  constructor(settings) {
9528
9542
  super({ settings });
9529
- __publicField(this, "provider", "stability");
9530
9543
  }
9544
+ provider = "stability";
9531
9545
  get modelName() {
9532
9546
  return this.settings.model;
9533
9547
  }
@@ -9647,9 +9661,9 @@ var import_zod31 = require("zod");
9647
9661
  var WhisperCppTranscriptionModel = class _WhisperCppTranscriptionModel extends AbstractModel {
9648
9662
  constructor(settings) {
9649
9663
  super({ settings });
9650
- __publicField(this, "provider", "whispercpp");
9651
- __publicField(this, "modelName", null);
9652
9664
  }
9665
+ provider = "whispercpp";
9666
+ modelName = null;
9653
9667
  async doTranscribe({
9654
9668
  audioData,
9655
9669
  mimeType
@@ -9875,6 +9889,9 @@ async function splitTextChunk(splitFunction, input) {
9875
9889
 
9876
9890
  // src/tool/NoSuchToolDefinitionError.ts
9877
9891
  var NoSuchToolDefinitionError = class extends Error {
9892
+ toolName;
9893
+ cause;
9894
+ parameters;
9878
9895
  constructor({
9879
9896
  toolName,
9880
9897
  parameters
@@ -9882,9 +9899,6 @@ var NoSuchToolDefinitionError = class extends Error {
9882
9899
  super(
9883
9900
  `Tool definition '${toolName}' not found. Parameters: ${JSON.stringify(parameters)}.`
9884
9901
  );
9885
- __publicField(this, "toolName");
9886
- __publicField(this, "cause");
9887
- __publicField(this, "parameters");
9888
9902
  this.name = "NoSuchToolDefinitionError";
9889
9903
  this.toolName = toolName;
9890
9904
  this.parameters = parameters;
@@ -9903,6 +9917,28 @@ var NoSuchToolDefinitionError = class extends Error {
9903
9917
 
9904
9918
  // src/tool/Tool.ts
9905
9919
  var Tool = class {
9920
+ /**
9921
+ * The name of the tool.
9922
+ * Should be understandable for language models and unique among the tools that they know.
9923
+ */
9924
+ name;
9925
+ /**
9926
+ * A optional description of what the tool does. Will be used by the language model to decide whether to use the tool.
9927
+ */
9928
+ description;
9929
+ /**
9930
+ * The schema of the input that the tool expects. The language model will use this to generate the input.
9931
+ * Use descriptions to make the input understandable for the language model.
9932
+ */
9933
+ parameters;
9934
+ /**
9935
+ * An optional schema of the output that the tool produces. This will be used to validate the output.
9936
+ */
9937
+ returnType;
9938
+ /**
9939
+ * The actual execution function of the tool.
9940
+ */
9941
+ execute;
9906
9942
  constructor({
9907
9943
  name,
9908
9944
  description,
@@ -9910,28 +9946,6 @@ var Tool = class {
9910
9946
  returnType,
9911
9947
  execute
9912
9948
  }) {
9913
- /**
9914
- * The name of the tool.
9915
- * Should be understandable for language models and unique among the tools that they know.
9916
- */
9917
- __publicField(this, "name");
9918
- /**
9919
- * A description of what the tool does. Will be used by the language model to decide whether to use the tool.
9920
- */
9921
- __publicField(this, "description");
9922
- /**
9923
- * The schema of the input that the tool expects. The language model will use this to generate the input.
9924
- * Use descriptions to make the input understandable for the language model.
9925
- */
9926
- __publicField(this, "parameters");
9927
- /**
9928
- * An optional schema of the output that the tool produces. This will be used to validate the output.
9929
- */
9930
- __publicField(this, "returnType");
9931
- /**
9932
- * The actual execution function of the tool.
9933
- */
9934
- __publicField(this, "execute");
9935
9949
  this.name = name;
9936
9950
  this.description = description;
9937
9951
  this.parameters = parameters;
@@ -9940,8 +9954,36 @@ var Tool = class {
9940
9954
  }
9941
9955
  };
9942
9956
 
9957
+ // src/tool/ObjectGeneratorTool.ts
9958
+ var ObjectGeneratorTool = class extends Tool {
9959
+ constructor({
9960
+ name = "object-generator",
9961
+ // eslint-disable-line @typescript-eslint/no-explicit-any
9962
+ description,
9963
+ model,
9964
+ parameters,
9965
+ objectSchema,
9966
+ prompt
9967
+ }) {
9968
+ super({
9969
+ name,
9970
+ description,
9971
+ parameters,
9972
+ execute: async (input, options) => generateObject({
9973
+ model,
9974
+ schema: objectSchema,
9975
+ prompt: prompt(input),
9976
+ ...options
9977
+ })
9978
+ });
9979
+ }
9980
+ };
9981
+
9943
9982
  // src/tool/ToolCallArgumentsValidationError.ts
9944
9983
  var ToolCallArgumentsValidationError = class extends Error {
9984
+ toolName;
9985
+ cause;
9986
+ args;
9945
9987
  constructor({
9946
9988
  toolName,
9947
9989
  args,
@@ -9952,9 +9994,6 @@ var ToolCallArgumentsValidationError = class extends Error {
9952
9994
  Arguments: ${JSON.stringify(args)}.
9953
9995
  Error message: ${getErrorMessage(cause)}`
9954
9996
  );
9955
- __publicField(this, "toolName");
9956
- __publicField(this, "cause");
9957
- __publicField(this, "args");
9958
9997
  this.name = "ToolCallArgumentsValidationError";
9959
9998
  this.toolName = toolName;
9960
9999
  this.cause = cause;
@@ -9974,14 +10013,14 @@ Error message: ${getErrorMessage(cause)}`
9974
10013
 
9975
10014
  // src/tool/ToolCallError.ts
9976
10015
  var ToolCallError = class extends Error {
10016
+ toolCall;
10017
+ cause;
9977
10018
  constructor({
9978
10019
  cause,
9979
10020
  toolCall,
9980
10021
  message = getErrorMessage(cause)
9981
10022
  }) {
9982
10023
  super(`Tool call for tool '${toolCall.name}' failed: ${message}`);
9983
- __publicField(this, "toolCall");
9984
- __publicField(this, "cause");
9985
10024
  this.name = "ToolCallError";
9986
10025
  this.toolCall = toolCall;
9987
10026
  this.cause = cause;
@@ -9999,12 +10038,12 @@ var ToolCallError = class extends Error {
9999
10038
 
10000
10039
  // src/tool/ToolCallGenerationError.ts
10001
10040
  var ToolCallGenerationError = class extends Error {
10041
+ toolName;
10042
+ cause;
10002
10043
  constructor({ toolName, cause }) {
10003
10044
  super(
10004
10045
  `Tool call generation failed for tool '${toolName}'. Error message: ${getErrorMessage(cause)}`
10005
10046
  );
10006
- __publicField(this, "toolName");
10007
- __publicField(this, "cause");
10008
10047
  this.name = "ToolCallsGenerationError";
10009
10048
  this.toolName = toolName;
10010
10049
  this.cause = cause;
@@ -10022,6 +10061,9 @@ var ToolCallGenerationError = class extends Error {
10022
10061
 
10023
10062
  // src/tool/ToolExecutionError.ts
10024
10063
  var ToolExecutionError = class extends Error {
10064
+ toolName;
10065
+ input;
10066
+ cause;
10025
10067
  constructor({
10026
10068
  toolName,
10027
10069
  input,
@@ -10029,9 +10071,6 @@ var ToolExecutionError = class extends Error {
10029
10071
  message = getErrorMessage(cause)
10030
10072
  }) {
10031
10073
  super(`Error executing tool '${toolName}': ${message}`);
10032
- __publicField(this, "toolName");
10033
- __publicField(this, "input");
10034
- __publicField(this, "cause");
10035
10074
  this.name = "ToolExecutionError";
10036
10075
  this.toolName = toolName;
10037
10076
  this.input = input;
@@ -10253,29 +10292,63 @@ async function generateToolCall({
10253
10292
 
10254
10293
  // src/tool/generate-tool-call/jsonToolCallPrompt.ts
10255
10294
  var import_nanoid6 = require("nanoid");
10295
+ var DEFAULT_TOOL_PROMPT = (tool) => [
10296
+ `You are calling the function "${tool.name}".`,
10297
+ tool.description != null ? `Function description: ${tool.description}` : null,
10298
+ `Function parameters JSON schema: ${JSON.stringify(
10299
+ tool.parameters.getJsonSchema()
10300
+ )}`,
10301
+ ``,
10302
+ `You MUST answer with a JSON object that matches the JSON schema above.`
10303
+ ].filter(Boolean).join("\n");
10256
10304
  var jsonToolCallPrompt = {
10257
- text() {
10305
+ text({
10306
+ toolPrompt
10307
+ } = {}) {
10258
10308
  return {
10259
- createPrompt(instruction13, tool) {
10309
+ createPrompt(prompt, tool) {
10260
10310
  return {
10261
- system: [
10262
- `You are calling a function "${tool.name}".`,
10263
- tool.description != null ? ` Function description: ${tool.description}` : null,
10264
- ` Function parameters JSON schema: ${JSON.stringify(
10265
- tool.parameters.getJsonSchema()
10266
- )}`,
10267
- ``,
10268
- `You MUST answer with a JSON object matches the above schema for the arguments.`
10269
- ].filter(Boolean).join("\n"),
10270
- instruction: instruction13
10311
+ system: createSystemPrompt2({ tool, toolPrompt }),
10312
+ instruction: prompt
10271
10313
  };
10272
10314
  },
10273
- extractToolCall(response) {
10274
- return { id: (0, import_nanoid6.nanoid)(), args: parseJSON({ text: response }) };
10275
- }
10315
+ extractToolCall,
10316
+ withJsonOutput: ({ model, schema }) => model.withJsonOutput(schema)
10317
+ };
10318
+ },
10319
+ instruction({
10320
+ toolPrompt
10321
+ } = {}) {
10322
+ return {
10323
+ createPrompt(prompt, tool) {
10324
+ return {
10325
+ system: createSystemPrompt2({
10326
+ originalSystemPrompt: prompt.system,
10327
+ tool,
10328
+ toolPrompt
10329
+ }),
10330
+ instruction: prompt.instruction
10331
+ };
10332
+ },
10333
+ extractToolCall,
10334
+ withJsonOutput: ({ model, schema }) => model.withJsonOutput(schema)
10276
10335
  };
10277
10336
  }
10278
10337
  };
10338
+ function createSystemPrompt2({
10339
+ originalSystemPrompt,
10340
+ toolPrompt = DEFAULT_TOOL_PROMPT,
10341
+ tool
10342
+ }) {
10343
+ return [
10344
+ originalSystemPrompt,
10345
+ originalSystemPrompt != null ? "" : null,
10346
+ toolPrompt(tool)
10347
+ ].filter(Boolean).join("\n");
10348
+ }
10349
+ function extractToolCall(response) {
10350
+ return { id: (0, import_nanoid6.nanoid)(), args: parseJSON({ text: response }) };
10351
+ }
10279
10352
 
10280
10353
  // src/tool/generate-tool-calls/generateToolCalls.ts
10281
10354
  async function generateToolCalls({
@@ -10463,6 +10536,9 @@ function createEventSourceStream(events) {
10463
10536
 
10464
10537
  // src/vector-index/VectorIndexRetriever.ts
10465
10538
  var VectorIndexRetriever = class _VectorIndexRetriever {
10539
+ vectorIndex;
10540
+ embeddingModel;
10541
+ settings;
10466
10542
  constructor({
10467
10543
  vectorIndex,
10468
10544
  embeddingModel,
@@ -10470,9 +10546,6 @@ var VectorIndexRetriever = class _VectorIndexRetriever {
10470
10546
  similarityThreshold,
10471
10547
  filter
10472
10548
  }) {
10473
- __publicField(this, "vectorIndex");
10474
- __publicField(this, "embeddingModel");
10475
- __publicField(this, "settings");
10476
10549
  this.vectorIndex = vectorIndex;
10477
10550
  this.embeddingModel = embeddingModel;
10478
10551
  this.settings = {
@@ -10517,9 +10590,6 @@ var jsonDataSchema = zodSchema(
10517
10590
  )
10518
10591
  );
10519
10592
  var MemoryVectorIndex = class _MemoryVectorIndex {
10520
- constructor() {
10521
- __publicField(this, "entries", /* @__PURE__ */ new Map());
10522
- }
10523
10593
  static async deserialize({
10524
10594
  serializedData,
10525
10595
  schema
@@ -10539,6 +10609,7 @@ var MemoryVectorIndex = class _MemoryVectorIndex {
10539
10609
  );
10540
10610
  return vectorIndex;
10541
10611
  }
10612
+ entries = /* @__PURE__ */ new Map();
10542
10613
  async upsertMany(data) {
10543
10614
  for (const entry of data) {
10544
10615
  this.entries.set(entry.id, entry);
@@ -10661,6 +10732,7 @@ async function upsertIntoVectorIndex({
10661
10732
  OPENAI_TRANSCRIPTION_MODELS,
10662
10733
  ObjectFromTextGenerationModel,
10663
10734
  ObjectFromTextStreamingModel,
10735
+ ObjectGeneratorTool,
10664
10736
  ObjectParseError,
10665
10737
  ObjectStreamFromResponse,
10666
10738
  ObjectStreamResponse,