modelfusion 0.134.0 → 0.135.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.js CHANGED
@@ -1,25 +1,20 @@
1
1
  var __defProp = Object.defineProperty;
2
- var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
3
2
  var __export = (target, all) => {
4
3
  for (var name in all)
5
4
  __defProp(target, name, { get: all[name], enumerable: true });
6
5
  };
7
- var __publicField = (obj, key, value) => {
8
- __defNormalProp(obj, typeof key !== "symbol" ? key + "" : key, value);
9
- return value;
10
- };
11
6
 
12
7
  // src/core/DefaultRun.ts
13
8
  import { nanoid as createId } from "nanoid";
14
9
 
15
10
  // src/core/FunctionEventSource.ts
16
11
  var FunctionEventSource = class {
12
+ observers;
13
+ errorHandler;
17
14
  constructor({
18
15
  observers,
19
16
  errorHandler
20
17
  }) {
21
- __publicField(this, "observers");
22
- __publicField(this, "errorHandler");
23
18
  this.observers = observers;
24
19
  this.errorHandler = errorHandler ?? ((error) => console.error(error));
25
20
  }
@@ -36,6 +31,13 @@ var FunctionEventSource = class {
36
31
 
37
32
  // src/core/DefaultRun.ts
38
33
  var DefaultRun = class {
34
+ runId;
35
+ sessionId;
36
+ userId;
37
+ abortSignal;
38
+ errorHandler;
39
+ events = [];
40
+ functionEventSource;
39
41
  constructor({
40
42
  runId = `run-${createId()}`,
41
43
  sessionId,
@@ -44,19 +46,6 @@ var DefaultRun = class {
44
46
  observers,
45
47
  errorHandler
46
48
  } = {}) {
47
- __publicField(this, "runId");
48
- __publicField(this, "sessionId");
49
- __publicField(this, "userId");
50
- __publicField(this, "abortSignal");
51
- __publicField(this, "errorHandler");
52
- __publicField(this, "events", []);
53
- __publicField(this, "functionEventSource");
54
- __publicField(this, "functionObserver", {
55
- onFunctionEvent: (event) => {
56
- this.events.push(event);
57
- this.functionEventSource.notify(event);
58
- }
59
- });
60
49
  this.runId = runId;
61
50
  this.sessionId = sessionId;
62
51
  this.userId = userId;
@@ -67,6 +56,12 @@ var DefaultRun = class {
67
56
  errorHandler: this.errorHandler.bind(this)
68
57
  });
69
58
  }
59
+ functionObserver = {
60
+ onFunctionEvent: (event) => {
61
+ this.events.push(event);
62
+ this.functionEventSource.notify(event);
63
+ }
64
+ };
70
65
  };
71
66
 
72
67
  // src/core/ModelFusionConfiguration.ts
@@ -116,6 +111,13 @@ var AbortError = class extends Error {
116
111
 
117
112
  // src/core/api/ApiCallError.ts
118
113
  var ApiCallError = class extends Error {
114
+ url;
115
+ requestBodyValues;
116
+ statusCode;
117
+ responseBody;
118
+ cause;
119
+ isRetryable;
120
+ data;
119
121
  constructor({
120
122
  message,
121
123
  url,
@@ -127,13 +129,6 @@ var ApiCallError = class extends Error {
127
129
  data
128
130
  }) {
129
131
  super(message);
130
- __publicField(this, "url");
131
- __publicField(this, "requestBodyValues");
132
- __publicField(this, "statusCode");
133
- __publicField(this, "responseBody");
134
- __publicField(this, "cause");
135
- __publicField(this, "isRetryable");
136
- __publicField(this, "data");
137
132
  this.name = "ApiCallError";
138
133
  this.url = url;
139
134
  this.requestBodyValues = requestBodyValues;
@@ -191,16 +186,16 @@ function getErrorMessage(error) {
191
186
 
192
187
  // src/core/api/RetryError.ts
193
188
  var RetryError = class extends Error {
189
+ // note: property order determines debugging output
190
+ reason;
191
+ lastError;
192
+ errors;
194
193
  constructor({
195
194
  message,
196
195
  reason,
197
196
  errors
198
197
  }) {
199
198
  super(message);
200
- // note: property order determines debugging output
201
- __publicField(this, "reason");
202
- __publicField(this, "lastError");
203
- __publicField(this, "errors");
204
199
  this.name = "RetryError";
205
200
  this.reason = reason;
206
201
  this.errors = errors;
@@ -268,10 +263,10 @@ async function _retryWithExponentialBackoff(f, {
268
263
 
269
264
  // src/core/api/throttleMaxConcurrency.ts
270
265
  var MaxConcurrencyThrottler = class {
266
+ maxConcurrentCalls;
267
+ activeCallCount;
268
+ callQueue;
271
269
  constructor({ maxConcurrentCalls }) {
272
- __publicField(this, "maxConcurrentCalls");
273
- __publicField(this, "activeCallCount");
274
- __publicField(this, "callQueue");
275
270
  this.maxConcurrentCalls = maxConcurrentCalls;
276
271
  this.activeCallCount = 0;
277
272
  this.callQueue = [];
@@ -315,14 +310,14 @@ var throttleOff = () => (fn) => fn();
315
310
 
316
311
  // src/core/api/AbstractApiConfiguration.ts
317
312
  var AbstractApiConfiguration = class {
313
+ retry;
314
+ throttle;
315
+ customCallHeaders;
318
316
  constructor({
319
317
  retry,
320
318
  throttle,
321
319
  customCallHeaders = () => ({})
322
320
  }) {
323
- __publicField(this, "retry");
324
- __publicField(this, "throttle");
325
- __publicField(this, "customCallHeaders");
326
321
  this.retry = retry;
327
322
  this.throttle = throttle;
328
323
  this.customCallHeaders = customCallHeaders;
@@ -342,6 +337,8 @@ var AbstractApiConfiguration = class {
342
337
 
343
338
  // src/core/api/BaseUrlApiConfiguration.ts
344
339
  var BaseUrlApiConfiguration = class extends AbstractApiConfiguration {
340
+ baseUrl;
341
+ fixedHeadersValue;
345
342
  constructor({
346
343
  baseUrl,
347
344
  headers,
@@ -350,8 +347,6 @@ var BaseUrlApiConfiguration = class extends AbstractApiConfiguration {
350
347
  customCallHeaders
351
348
  }) {
352
349
  super({ retry, throttle, customCallHeaders });
353
- __publicField(this, "baseUrl");
354
- __publicField(this, "fixedHeadersValue");
355
350
  this.baseUrl = typeof baseUrl == "string" ? parseBaseUrl(baseUrl) : baseUrl;
356
351
  this.fixedHeadersValue = headers ?? {};
357
352
  }
@@ -412,9 +407,7 @@ function resolveBaseUrl(baseUrl = {}, baseUrlDefaults) {
412
407
 
413
408
  // src/core/cache/MemoryCache.ts
414
409
  var MemoryCache = class {
415
- constructor() {
416
- __publicField(this, "cache", /* @__PURE__ */ new Map());
417
- }
410
+ cache = /* @__PURE__ */ new Map();
418
411
  hashKey(key) {
419
412
  return JSON.stringify(key);
420
413
  }
@@ -561,9 +554,7 @@ function startDurationMeasurement() {
561
554
  return globalThis.performance != null ? new PerformanceNowDurationMeasurement() : new DateDurationMeasurement();
562
555
  }
563
556
  var PerformanceNowDurationMeasurement = class {
564
- constructor() {
565
- __publicField(this, "startTime", globalThis.performance.now());
566
- }
557
+ startTime = globalThis.performance.now();
567
558
  get startEpochSeconds() {
568
559
  return Math.floor(
569
560
  (globalThis.performance.timeOrigin + this.startTime) / 1e3
@@ -577,9 +568,7 @@ var PerformanceNowDurationMeasurement = class {
577
568
  }
578
569
  };
579
570
  var DateDurationMeasurement = class {
580
- constructor() {
581
- __publicField(this, "startTime", Date.now());
582
- }
571
+ startTime = Date.now();
583
572
  get startEpochSeconds() {
584
573
  return Math.floor(this.startTime / 1e3);
585
574
  }
@@ -699,14 +688,14 @@ async function executeFunction(fn, input, options) {
699
688
 
700
689
  // src/core/schema/JSONParseError.ts
701
690
  var JSONParseError = class extends Error {
691
+ // note: property order determines debugging output
692
+ text;
693
+ cause;
702
694
  constructor({ text: text13, cause }) {
703
695
  super(
704
696
  `JSON parsing failed: Text: ${text13}.
705
697
  Error message: ${getErrorMessage(cause)}`
706
698
  );
707
- // note: property order determines debugging output
708
- __publicField(this, "text");
709
- __publicField(this, "cause");
710
699
  this.name = "JSONParseError";
711
700
  this.cause = cause;
712
701
  this.text = text13;
@@ -724,13 +713,13 @@ Error message: ${getErrorMessage(cause)}`
724
713
 
725
714
  // src/core/schema/TypeValidationError.ts
726
715
  var TypeValidationError = class extends Error {
716
+ value;
717
+ cause;
727
718
  constructor({ value, cause }) {
728
719
  super(
729
720
  `Type validation failed: Value: ${JSON.stringify(value)}.
730
721
  Error message: ${getErrorMessage(cause)}`
731
722
  );
732
- __publicField(this, "value");
733
- __publicField(this, "cause");
734
723
  this.name = "TypeValidationError";
735
724
  this.cause = cause;
736
725
  this.value = value;
@@ -753,7 +742,6 @@ function uncheckedSchema(jsonSchema) {
753
742
  var UncheckedSchema = class {
754
743
  constructor(jsonSchema) {
755
744
  this.jsonSchema = jsonSchema;
756
- __publicField(this, "_type");
757
745
  }
758
746
  validate(value) {
759
747
  return { success: true, value };
@@ -761,6 +749,7 @@ var UncheckedSchema = class {
761
749
  getJsonSchema() {
762
750
  return this.jsonSchema;
763
751
  }
752
+ _type;
764
753
  };
765
754
 
766
755
  // src/core/schema/ZodSchema.ts
@@ -769,16 +758,8 @@ function zodSchema(zodSchema2) {
769
758
  return new ZodSchema(zodSchema2);
770
759
  }
771
760
  var ZodSchema = class {
761
+ zodSchema;
772
762
  constructor(zodSchema2) {
773
- __publicField(this, "zodSchema");
774
- /**
775
- * Use only for typing purposes. The value is always `undefined`.
776
- */
777
- __publicField(this, "_type");
778
- /**
779
- * Use only for typing purposes. The value is always `undefined`.
780
- */
781
- __publicField(this, "_partialType");
782
763
  this.zodSchema = zodSchema2;
783
764
  }
784
765
  validate(value) {
@@ -788,6 +769,14 @@ var ZodSchema = class {
788
769
  getJsonSchema() {
789
770
  return zodToJsonSchema(this.zodSchema);
790
771
  }
772
+ /**
773
+ * Use only for typing purposes. The value is always `undefined`.
774
+ */
775
+ _type;
776
+ /**
777
+ * Use only for typing purposes. The value is always `undefined`.
778
+ */
779
+ _partialType;
791
780
  };
792
781
 
793
782
  // src/core/schema/parseJSON.ts
@@ -1086,13 +1075,13 @@ async function embed({
1086
1075
 
1087
1076
  // src/model-function/classify/EmbeddingSimilarityClassifier.ts
1088
1077
  var EmbeddingSimilarityClassifier = class _EmbeddingSimilarityClassifier {
1078
+ settings;
1079
+ modelInformation = {
1080
+ provider: "modelfusion",
1081
+ modelName: "EmbeddingSimilarityClassifier"
1082
+ };
1083
+ embeddings;
1089
1084
  constructor(settings) {
1090
- __publicField(this, "settings");
1091
- __publicField(this, "modelInformation", {
1092
- provider: "modelfusion",
1093
- modelName: "EmbeddingSimilarityClassifier"
1094
- });
1095
- __publicField(this, "embeddings");
1096
1085
  this.settings = settings;
1097
1086
  }
1098
1087
  async getEmbeddings(options) {
@@ -1189,12 +1178,12 @@ async function classify({
1189
1178
 
1190
1179
  // src/model-function/generate-image/PromptTemplateImageGenerationModel.ts
1191
1180
  var PromptTemplateImageGenerationModel = class _PromptTemplateImageGenerationModel {
1181
+ model;
1182
+ promptTemplate;
1192
1183
  constructor({
1193
1184
  model,
1194
1185
  promptTemplate
1195
1186
  }) {
1196
- __publicField(this, "model");
1197
- __publicField(this, "promptTemplate");
1198
1187
  this.model = model;
1199
1188
  this.promptTemplate = promptTemplate;
1200
1189
  }
@@ -1308,11 +1297,9 @@ async function generateSpeech({
1308
1297
 
1309
1298
  // src/util/AsyncQueue.ts
1310
1299
  var AsyncQueue = class {
1311
- constructor() {
1312
- __publicField(this, "values", Array());
1313
- __publicField(this, "pendingResolvers", []);
1314
- __publicField(this, "closed", false);
1315
- }
1300
+ values = Array();
1301
+ pendingResolvers = [];
1302
+ closed = false;
1316
1303
  processPendingResolvers() {
1317
1304
  while (this.pendingResolvers.length > 0) {
1318
1305
  this.pendingResolvers.shift()?.();
@@ -1678,13 +1665,13 @@ async function generateText({
1678
1665
 
1679
1666
  // src/model-function/generate-object/ObjectParseError.ts
1680
1667
  var ObjectParseError = class extends Error {
1668
+ cause;
1669
+ valueText;
1681
1670
  constructor({ valueText, cause }) {
1682
1671
  super(
1683
1672
  `Object parsing failed. Value: ${valueText}.
1684
1673
  Error message: ${getErrorMessage(cause)}`
1685
1674
  );
1686
- __publicField(this, "cause");
1687
- __publicField(this, "valueText");
1688
1675
  this.name = "ObjectParseError";
1689
1676
  this.cause = cause;
1690
1677
  this.valueText = valueText;
@@ -1702,12 +1689,12 @@ Error message: ${getErrorMessage(cause)}`
1702
1689
 
1703
1690
  // src/model-function/generate-object/ObjectFromTextGenerationModel.ts
1704
1691
  var ObjectFromTextGenerationModel = class _ObjectFromTextGenerationModel {
1692
+ model;
1693
+ template;
1705
1694
  constructor({
1706
1695
  model,
1707
1696
  template
1708
1697
  }) {
1709
- __publicField(this, "model");
1710
- __publicField(this, "template");
1711
1698
  this.model = model;
1712
1699
  this.template = template;
1713
1700
  }
@@ -2228,6 +2215,9 @@ function ObjectStreamToTextStream(stream) {
2228
2215
 
2229
2216
  // src/model-function/generate-object/ObjectValidationError.ts
2230
2217
  var ObjectValidationError = class extends Error {
2218
+ cause;
2219
+ valueText;
2220
+ value;
2231
2221
  constructor({
2232
2222
  value,
2233
2223
  valueText,
@@ -2237,9 +2227,6 @@ var ObjectValidationError = class extends Error {
2237
2227
  `Object validation failed. Value: ${valueText}.
2238
2228
  Error message: ${getErrorMessage(cause)}`
2239
2229
  );
2240
- __publicField(this, "cause");
2241
- __publicField(this, "valueText");
2242
- __publicField(this, "value");
2243
2230
  this.name = "ObjectValidationError";
2244
2231
  this.cause = cause;
2245
2232
  this.value = value;
@@ -2306,7 +2293,7 @@ async function generateObject({
2306
2293
 
2307
2294
  // src/model-function/generate-object/jsonObjectPrompt.ts
2308
2295
  var DEFAULT_SCHEMA_PREFIX = "JSON schema:";
2309
- var DEFAULT_SCHEMA_SUFFIX = "\nYou MUST answer with a JSON object matches the above schema.";
2296
+ var DEFAULT_SCHEMA_SUFFIX = "\nYou MUST answer with a JSON object that matches the JSON schema above.";
2310
2297
  var jsonObjectPrompt = {
2311
2298
  custom(createPrompt) {
2312
2299
  return { createPrompt, extractObject };
@@ -2353,6 +2340,7 @@ function createSystemPrompt({
2353
2340
  }) {
2354
2341
  return [
2355
2342
  originalSystemPrompt,
2343
+ originalSystemPrompt != null ? "" : null,
2356
2344
  schemaPrefix,
2357
2345
  JSON.stringify(schema.getJsonSchema()),
2358
2346
  schemaSuffix
@@ -2472,6 +2460,9 @@ async function streamObject({
2472
2460
 
2473
2461
  // src/tool/generate-tool-call/ToolCallParseError.ts
2474
2462
  var ToolCallParseError = class extends Error {
2463
+ toolName;
2464
+ valueText;
2465
+ cause;
2475
2466
  constructor({
2476
2467
  toolName,
2477
2468
  valueText,
@@ -2481,9 +2472,6 @@ var ToolCallParseError = class extends Error {
2481
2472
  `Tool call parsing failed for '${toolName}'. Value: ${valueText}.
2482
2473
  Error message: ${getErrorMessage(cause)}`
2483
2474
  );
2484
- __publicField(this, "toolName");
2485
- __publicField(this, "valueText");
2486
- __publicField(this, "cause");
2487
2475
  this.name = "ToolCallParseError";
2488
2476
  this.toolName = toolName;
2489
2477
  this.cause = cause;
@@ -2503,14 +2491,14 @@ Error message: ${getErrorMessage(cause)}`
2503
2491
 
2504
2492
  // src/tool/generate-tool-call/TextGenerationToolCallModel.ts
2505
2493
  var TextGenerationToolCallModel = class _TextGenerationToolCallModel {
2494
+ model;
2495
+ template;
2506
2496
  constructor({
2507
2497
  model,
2508
- format
2498
+ template
2509
2499
  }) {
2510
- __publicField(this, "model");
2511
- __publicField(this, "format");
2512
2500
  this.model = model;
2513
- this.format = format;
2501
+ this.template = template;
2514
2502
  }
2515
2503
  get modelInformation() {
2516
2504
  return this.model.modelInformation;
@@ -2521,17 +2509,26 @@ var TextGenerationToolCallModel = class _TextGenerationToolCallModel {
2521
2509
  get settingsForEvent() {
2522
2510
  return this.model.settingsForEvent;
2523
2511
  }
2512
+ getModelWithJsonOutput(schema) {
2513
+ if (this.template.withJsonOutput != null) {
2514
+ return this.template.withJsonOutput({
2515
+ model: this.model,
2516
+ schema
2517
+ });
2518
+ }
2519
+ return this.model;
2520
+ }
2524
2521
  async doGenerateToolCall(tool, prompt, options) {
2525
2522
  const { rawResponse, text: text13, metadata } = await generateText({
2526
- model: this.model,
2527
- prompt: this.format.createPrompt(prompt, tool),
2523
+ model: this.getModelWithJsonOutput(tool.parameters),
2524
+ prompt: this.template.createPrompt(prompt, tool),
2528
2525
  fullResponse: true,
2529
2526
  ...options
2530
2527
  });
2531
2528
  try {
2532
2529
  return {
2533
2530
  rawResponse,
2534
- toolCall: this.format.extractToolCall(text13, tool),
2531
+ toolCall: this.template.extractToolCall(text13, tool),
2535
2532
  usage: metadata?.usage
2536
2533
  };
2537
2534
  } catch (error) {
@@ -2545,20 +2542,20 @@ var TextGenerationToolCallModel = class _TextGenerationToolCallModel {
2545
2542
  withSettings(additionalSettings) {
2546
2543
  return new _TextGenerationToolCallModel({
2547
2544
  model: this.model.withSettings(additionalSettings),
2548
- format: this.format
2545
+ template: this.template
2549
2546
  });
2550
2547
  }
2551
2548
  };
2552
2549
 
2553
2550
  // src/tool/generate-tool-calls/ToolCallsParseError.ts
2554
2551
  var ToolCallsParseError = class extends Error {
2552
+ valueText;
2553
+ cause;
2555
2554
  constructor({ valueText, cause }) {
2556
2555
  super(
2557
2556
  `Tool calls parsing failed. Value: ${valueText}.
2558
2557
  Error message: ${getErrorMessage(cause)}`
2559
2558
  );
2560
- __publicField(this, "valueText");
2561
- __publicField(this, "cause");
2562
2559
  this.name = "ToolCallsParseError";
2563
2560
  this.cause = cause;
2564
2561
  this.valueText = valueText;
@@ -2576,12 +2573,12 @@ Error message: ${getErrorMessage(cause)}`
2576
2573
 
2577
2574
  // src/tool/generate-tool-calls/TextGenerationToolCallsModel.ts
2578
2575
  var TextGenerationToolCallsModel = class _TextGenerationToolCallsModel {
2576
+ model;
2577
+ template;
2579
2578
  constructor({
2580
2579
  model,
2581
2580
  template
2582
2581
  }) {
2583
- __publicField(this, "model");
2584
- __publicField(this, "template");
2585
2582
  this.model = model;
2586
2583
  this.template = template;
2587
2584
  }
@@ -2630,12 +2627,12 @@ var TextGenerationToolCallsModel = class _TextGenerationToolCallsModel {
2630
2627
 
2631
2628
  // src/model-function/generate-text/PromptTemplateTextGenerationModel.ts
2632
2629
  var PromptTemplateTextGenerationModel = class _PromptTemplateTextGenerationModel {
2630
+ model;
2631
+ promptTemplate;
2633
2632
  constructor({
2634
2633
  model,
2635
2634
  promptTemplate
2636
2635
  }) {
2637
- __publicField(this, "model");
2638
- __publicField(this, "promptTemplate");
2639
2636
  this.model = model;
2640
2637
  this.promptTemplate = promptTemplate;
2641
2638
  }
@@ -2675,7 +2672,7 @@ var PromptTemplateTextGenerationModel = class _PromptTemplateTextGenerationModel
2675
2672
  asToolCallGenerationModel(promptTemplate) {
2676
2673
  return new TextGenerationToolCallModel({
2677
2674
  model: this,
2678
- format: promptTemplate
2675
+ template: promptTemplate
2679
2676
  });
2680
2677
  }
2681
2678
  asToolCallsOrTextGenerationModel(promptTemplate) {
@@ -2775,9 +2772,9 @@ __export(AlpacaPromptTemplate_exports, {
2775
2772
 
2776
2773
  // src/model-function/generate-text/prompt-template/InvalidPromptError.ts
2777
2774
  var InvalidPromptError = class extends Error {
2775
+ prompt;
2778
2776
  constructor(message, prompt) {
2779
2777
  super(message);
2780
- __publicField(this, "prompt");
2781
2778
  this.name = "InvalidPromptError";
2782
2779
  this.prompt = prompt;
2783
2780
  }
@@ -3792,8 +3789,8 @@ var callWithRetryAndThrottle = async ({
3792
3789
 
3793
3790
  // src/model-function/AbstractModel.ts
3794
3791
  var AbstractModel = class {
3792
+ settings;
3795
3793
  constructor({ settings }) {
3796
- __publicField(this, "settings");
3797
3794
  this.settings = settings;
3798
3795
  }
3799
3796
  // implemented as a separate accessor to remove all other properties from the model
@@ -3816,8 +3813,8 @@ function mapBasicPromptToAutomatic1111Format() {
3816
3813
  var Automatic1111ImageGenerationModel = class _Automatic1111ImageGenerationModel extends AbstractModel {
3817
3814
  constructor(settings) {
3818
3815
  super({ settings });
3819
- __publicField(this, "provider", "Automatic1111");
3820
3816
  }
3817
+ provider = "Automatic1111";
3821
3818
  get modelName() {
3822
3819
  return this.settings.model;
3823
3820
  }
@@ -3993,8 +3990,8 @@ import { z as z5 } from "zod";
3993
3990
  // src/model-provider/cohere/CohereTokenizer.ts
3994
3991
  import { z as z4 } from "zod";
3995
3992
  var CohereTokenizer = class {
3993
+ settings;
3996
3994
  constructor(settings) {
3997
- __publicField(this, "settings");
3998
3995
  this.settings = settings;
3999
3996
  }
4000
3997
  async callTokenizeAPI(text13, callOptions) {
@@ -4116,12 +4113,6 @@ var COHERE_TEXT_EMBEDDING_MODELS = {
4116
4113
  var CohereTextEmbeddingModel = class _CohereTextEmbeddingModel extends AbstractModel {
4117
4114
  constructor(settings) {
4118
4115
  super({ settings });
4119
- __publicField(this, "provider", "cohere");
4120
- __publicField(this, "maxValuesPerCall", 96);
4121
- __publicField(this, "isParallelizable", true);
4122
- __publicField(this, "dimensions");
4123
- __publicField(this, "contextWindowSize");
4124
- __publicField(this, "tokenizer");
4125
4116
  this.contextWindowSize = COHERE_TEXT_EMBEDDING_MODELS[this.modelName].contextWindowSize;
4126
4117
  this.tokenizer = new CohereTokenizer({
4127
4118
  api: this.settings.api,
@@ -4129,9 +4120,15 @@ var CohereTextEmbeddingModel = class _CohereTextEmbeddingModel extends AbstractM
4129
4120
  });
4130
4121
  this.dimensions = COHERE_TEXT_EMBEDDING_MODELS[this.modelName].dimensions;
4131
4122
  }
4123
+ provider = "cohere";
4132
4124
  get modelName() {
4133
4125
  return this.settings.model;
4134
4126
  }
4127
+ maxValuesPerCall = 96;
4128
+ isParallelizable = true;
4129
+ dimensions;
4130
+ contextWindowSize;
4131
+ tokenizer;
4135
4132
  async tokenize(text13) {
4136
4133
  return this.tokenizer.tokenize(text13);
4137
4134
  }
@@ -4277,18 +4274,18 @@ var COHERE_TEXT_GENERATION_MODELS = {
4277
4274
  var CohereTextGenerationModel = class _CohereTextGenerationModel extends AbstractModel {
4278
4275
  constructor(settings) {
4279
4276
  super({ settings });
4280
- __publicField(this, "provider", "cohere");
4281
- __publicField(this, "contextWindowSize");
4282
- __publicField(this, "tokenizer");
4283
4277
  this.contextWindowSize = COHERE_TEXT_GENERATION_MODELS[this.settings.model].contextWindowSize;
4284
4278
  this.tokenizer = new CohereTokenizer({
4285
4279
  api: this.settings.api,
4286
4280
  model: this.settings.model
4287
4281
  });
4288
4282
  }
4283
+ provider = "cohere";
4289
4284
  get modelName() {
4290
4285
  return this.settings.model;
4291
4286
  }
4287
+ contextWindowSize;
4288
+ tokenizer;
4292
4289
  async countPromptTokens(input) {
4293
4290
  return countTokens(this.tokenizer, input);
4294
4291
  }
@@ -4546,8 +4543,8 @@ var defaultModel = "eleven_monolingual_v1";
4546
4543
  var ElevenLabsSpeechModel = class _ElevenLabsSpeechModel extends AbstractModel {
4547
4544
  constructor(settings) {
4548
4545
  super({ settings });
4549
- __publicField(this, "provider", "elevenlabs");
4550
4546
  }
4547
+ provider = "elevenlabs";
4551
4548
  get modelName() {
4552
4549
  return this.settings.voice;
4553
4550
  }
@@ -4778,19 +4775,18 @@ import { z as z9 } from "zod";
4778
4775
  var HuggingFaceTextEmbeddingModel = class _HuggingFaceTextEmbeddingModel extends AbstractModel {
4779
4776
  constructor(settings) {
4780
4777
  super({ settings });
4781
- __publicField(this, "provider", "huggingface");
4782
- __publicField(this, "maxValuesPerCall");
4783
- __publicField(this, "isParallelizable", true);
4784
- __publicField(this, "contextWindowSize");
4785
- __publicField(this, "dimensions");
4786
- __publicField(this, "tokenizer");
4787
- __publicField(this, "countPromptTokens");
4788
4778
  this.maxValuesPerCall = settings.maxValuesPerCall ?? 1024;
4789
4779
  this.dimensions = settings.dimensions;
4790
4780
  }
4781
+ provider = "huggingface";
4791
4782
  get modelName() {
4792
4783
  return this.settings.model;
4793
4784
  }
4785
+ maxValuesPerCall;
4786
+ isParallelizable = true;
4787
+ contextWindowSize = void 0;
4788
+ dimensions;
4789
+ tokenizer = void 0;
4794
4790
  async callAPI(texts, callOptions) {
4795
4791
  if (texts.length > this.maxValuesPerCall) {
4796
4792
  throw new Error(
@@ -4831,6 +4827,7 @@ var HuggingFaceTextEmbeddingModel = class _HuggingFaceTextEmbeddingModel extends
4831
4827
  options: this.settings.options
4832
4828
  };
4833
4829
  }
4830
+ countPromptTokens = void 0;
4834
4831
  async doEmbedValues(texts, options) {
4835
4832
  const rawResponse = await this.callAPI(texts, options);
4836
4833
  return {
@@ -4851,14 +4848,14 @@ import { z as z10 } from "zod";
4851
4848
  var HuggingFaceTextGenerationModel = class _HuggingFaceTextGenerationModel extends AbstractModel {
4852
4849
  constructor(settings) {
4853
4850
  super({ settings });
4854
- __publicField(this, "provider", "huggingface");
4855
- __publicField(this, "contextWindowSize");
4856
- __publicField(this, "tokenizer");
4857
- __publicField(this, "countPromptTokens");
4858
4851
  }
4852
+ provider = "huggingface";
4859
4853
  get modelName() {
4860
4854
  return this.settings.model;
4861
4855
  }
4856
+ contextWindowSize = void 0;
4857
+ tokenizer = void 0;
4858
+ countPromptTokens = void 0;
4862
4859
  async callAPI(prompt, callOptions) {
4863
4860
  const api = this.settings.api ?? new HuggingFaceApiConfiguration();
4864
4861
  const abortSignal = callOptions?.run?.abortSignal;
@@ -5204,8 +5201,8 @@ var BakLLaVA1 = LlamaCppBakLLaVA1PromptTemplate_exports;
5204
5201
  // src/model-provider/llamacpp/LlamaCppTokenizer.ts
5205
5202
  import { z as z12 } from "zod";
5206
5203
  var LlamaCppTokenizer = class {
5204
+ api;
5207
5205
  constructor(api = new LlamaCppApiConfiguration()) {
5208
- __publicField(this, "api");
5209
5206
  this.api = api;
5210
5207
  }
5211
5208
  async callTokenizeAPI(text13, callOptions) {
@@ -5258,9 +5255,7 @@ var PRIMITIVE_RULES = {
5258
5255
  null: '"null" space'
5259
5256
  };
5260
5257
  var RuleMap = class {
5261
- constructor() {
5262
- __publicField(this, "rules", /* @__PURE__ */ new Map());
5263
- }
5258
+ rules = /* @__PURE__ */ new Map();
5264
5259
  add(name, rule) {
5265
5260
  const escapedName = this.escapeRuleName(name, rule);
5266
5261
  this.rules.set(escapedName, rule);
@@ -5352,16 +5347,16 @@ function visit(schema, name, rules) {
5352
5347
  var LlamaCppCompletionModel = class _LlamaCppCompletionModel extends AbstractModel {
5353
5348
  constructor(settings = {}) {
5354
5349
  super({ settings });
5355
- __publicField(this, "provider", "llamacpp");
5356
- __publicField(this, "tokenizer");
5357
5350
  this.tokenizer = new LlamaCppTokenizer(this.settings.api);
5358
5351
  }
5352
+ provider = "llamacpp";
5359
5353
  get modelName() {
5360
5354
  return null;
5361
5355
  }
5362
5356
  get contextWindowSize() {
5363
5357
  return this.settings.contextWindowSize;
5364
5358
  }
5359
+ tokenizer;
5365
5360
  async callAPI(prompt, callOptions, options) {
5366
5361
  const api = this.settings.api ?? new LlamaCppApiConfiguration();
5367
5362
  const responseFormat = options.responseFormat;
@@ -5661,21 +5656,21 @@ import { z as z14 } from "zod";
5661
5656
  var LlamaCppTextEmbeddingModel = class _LlamaCppTextEmbeddingModel extends AbstractModel {
5662
5657
  constructor(settings = {}) {
5663
5658
  super({ settings });
5664
- __publicField(this, "provider", "llamacpp");
5665
- __publicField(this, "maxValuesPerCall", 1);
5666
- __publicField(this, "contextWindowSize");
5667
- __publicField(this, "tokenizer");
5668
5659
  this.tokenizer = new LlamaCppTokenizer(this.settings.api);
5669
5660
  }
5661
+ provider = "llamacpp";
5670
5662
  get modelName() {
5671
5663
  return null;
5672
5664
  }
5665
+ maxValuesPerCall = 1;
5673
5666
  get isParallelizable() {
5674
5667
  return this.settings.isParallelizable ?? false;
5675
5668
  }
5669
+ contextWindowSize = void 0;
5676
5670
  get dimensions() {
5677
5671
  return this.settings.dimensions;
5678
5672
  }
5673
+ tokenizer;
5679
5674
  async tokenize(text13) {
5680
5675
  return this.tokenizer.tokenize(text13);
5681
5676
  }
@@ -5858,8 +5853,8 @@ import { z as z15 } from "zod";
5858
5853
  var LmntSpeechModel = class _LmntSpeechModel extends AbstractModel {
5859
5854
  constructor(settings) {
5860
5855
  super({ settings });
5861
- __publicField(this, "provider", "lmnt");
5862
5856
  }
5857
+ provider = "lmnt";
5863
5858
  get modelName() {
5864
5859
  return this.settings.voice;
5865
5860
  }
@@ -6103,14 +6098,14 @@ var failedMistralCallResponseHandler = createJsonErrorResponseHandler({
6103
6098
  var MistralChatModel = class _MistralChatModel extends AbstractModel {
6104
6099
  constructor(settings) {
6105
6100
  super({ settings });
6106
- __publicField(this, "provider", "mistral");
6107
- __publicField(this, "contextWindowSize");
6108
- __publicField(this, "tokenizer");
6109
- __publicField(this, "countPromptTokens");
6110
6101
  }
6102
+ provider = "mistral";
6111
6103
  get modelName() {
6112
6104
  return this.settings.model;
6113
6105
  }
6106
+ contextWindowSize = void 0;
6107
+ tokenizer = void 0;
6108
+ countPromptTokens = void 0;
6114
6109
  async callAPI(prompt, callOptions, options) {
6115
6110
  const api = this.settings.api ?? new MistralApiConfiguration();
6116
6111
  const abortSignal = callOptions.run?.abortSignal;
@@ -6295,18 +6290,18 @@ import { z as z18 } from "zod";
6295
6290
  var MistralTextEmbeddingModel = class _MistralTextEmbeddingModel extends AbstractModel {
6296
6291
  constructor(settings) {
6297
6292
  super({ settings });
6298
- __publicField(this, "provider", "mistral");
6299
- __publicField(this, "maxValuesPerCall", 32);
6300
- /**
6301
- * Parallel calls are technically possible, but I have been hitting rate limits and disabled
6302
- * them for now.
6303
- */
6304
- __publicField(this, "isParallelizable", false);
6305
- __publicField(this, "dimensions", 1024);
6306
6293
  }
6294
+ provider = "mistral";
6307
6295
  get modelName() {
6308
6296
  return this.settings.model;
6309
6297
  }
6298
+ maxValuesPerCall = 32;
6299
+ /**
6300
+ * Parallel calls are technically possible, but I have been hitting rate limits and disabled
6301
+ * them for now.
6302
+ */
6303
+ isParallelizable = false;
6304
+ dimensions = 1024;
6310
6305
  async callAPI(texts, callOptions) {
6311
6306
  if (texts.length > this.maxValuesPerCall) {
6312
6307
  throw new Error(
@@ -6501,14 +6496,14 @@ var failedOllamaCallResponseHandler = createJsonErrorResponseHandler({
6501
6496
  var OllamaChatModel = class _OllamaChatModel extends AbstractModel {
6502
6497
  constructor(settings) {
6503
6498
  super({ settings });
6504
- __publicField(this, "provider", "ollama");
6505
- __publicField(this, "tokenizer");
6506
- __publicField(this, "countPromptTokens");
6507
- __publicField(this, "contextWindowSize");
6508
6499
  }
6500
+ provider = "ollama";
6509
6501
  get modelName() {
6510
6502
  return this.settings.model;
6511
6503
  }
6504
+ tokenizer = void 0;
6505
+ countPromptTokens = void 0;
6506
+ contextWindowSize = void 0;
6512
6507
  async callAPI(prompt, callOptions, options) {
6513
6508
  const { responseFormat } = options;
6514
6509
  const api = this.settings.api ?? new OllamaApiConfiguration();
@@ -6617,7 +6612,7 @@ var OllamaChatModel = class _OllamaChatModel extends AbstractModel {
6617
6612
  asToolCallGenerationModel(promptTemplate) {
6618
6613
  return new TextGenerationToolCallModel({
6619
6614
  model: this,
6620
- format: promptTemplate
6615
+ template: promptTemplate
6621
6616
  });
6622
6617
  }
6623
6618
  asToolCallsOrTextGenerationModel(promptTemplate) {
@@ -6802,13 +6797,13 @@ var Vicuna2 = asOllamaCompletionTextPromptTemplateProvider(VicunaPromptTemplate_
6802
6797
  var OllamaCompletionModel = class _OllamaCompletionModel extends AbstractModel {
6803
6798
  constructor(settings) {
6804
6799
  super({ settings });
6805
- __publicField(this, "provider", "ollama");
6806
- __publicField(this, "tokenizer");
6807
- __publicField(this, "countPromptTokens");
6808
6800
  }
6801
+ provider = "ollama";
6809
6802
  get modelName() {
6810
6803
  return this.settings.model;
6811
6804
  }
6805
+ tokenizer = void 0;
6806
+ countPromptTokens = void 0;
6812
6807
  get contextWindowSize() {
6813
6808
  return this.settings.contextWindowSize;
6814
6809
  }
@@ -6939,7 +6934,7 @@ var OllamaCompletionModel = class _OllamaCompletionModel extends AbstractModel {
6939
6934
  asToolCallGenerationModel(promptTemplate) {
6940
6935
  return new TextGenerationToolCallModel({
6941
6936
  model: this,
6942
- format: promptTemplate
6937
+ template: promptTemplate
6943
6938
  });
6944
6939
  }
6945
6940
  asToolCallsOrTextGenerationModel(promptTemplate) {
@@ -7087,12 +7082,12 @@ import { z as z22 } from "zod";
7087
7082
  var OllamaTextEmbeddingModel = class _OllamaTextEmbeddingModel extends AbstractModel {
7088
7083
  constructor(settings) {
7089
7084
  super({ settings });
7090
- __publicField(this, "provider", "ollama");
7091
- __publicField(this, "maxValuesPerCall", 1);
7092
7085
  }
7086
+ provider = "ollama";
7093
7087
  get modelName() {
7094
7088
  return null;
7095
7089
  }
7090
+ maxValuesPerCall = 1;
7096
7091
  get isParallelizable() {
7097
7092
  return this.settings.isParallelizable ?? false;
7098
7093
  }
@@ -7653,11 +7648,11 @@ import { z as z26 } from "zod";
7653
7648
  var AbstractOpenAITextEmbeddingModel = class extends AbstractModel {
7654
7649
  constructor(settings) {
7655
7650
  super({ settings });
7656
- __publicField(this, "isParallelizable", true);
7657
7651
  }
7658
7652
  get maxValuesPerCall() {
7659
7653
  return this.settings.maxValuesPerCall ?? 2048;
7660
7654
  }
7655
+ isParallelizable = true;
7661
7656
  async callAPI(texts, callOptions) {
7662
7657
  const api = this.settings.api ?? new OpenAIApiConfiguration();
7663
7658
  const abortSignal = callOptions.run?.abortSignal;
@@ -7718,6 +7713,10 @@ var openAITextEmbeddingResponseSchema = z26.object({
7718
7713
 
7719
7714
  // src/model-provider/openai/AzureOpenAIApiConfiguration.ts
7720
7715
  var AzureOpenAIApiConfiguration = class extends AbstractApiConfiguration {
7716
+ resourceName;
7717
+ deploymentId;
7718
+ apiVersion;
7719
+ fixedHeaderValue;
7721
7720
  constructor({
7722
7721
  resourceName,
7723
7722
  deploymentId,
@@ -7727,10 +7726,6 @@ var AzureOpenAIApiConfiguration = class extends AbstractApiConfiguration {
7727
7726
  throttle
7728
7727
  }) {
7729
7728
  super({ retry, throttle });
7730
- __publicField(this, "resourceName");
7731
- __publicField(this, "deploymentId");
7732
- __publicField(this, "apiVersion");
7733
- __publicField(this, "fixedHeaderValue");
7734
7729
  this.resourceName = resourceName;
7735
7730
  this.deploymentId = deploymentId;
7736
7731
  this.apiVersion = apiVersion;
@@ -7929,16 +7924,16 @@ function chat12() {
7929
7924
 
7930
7925
  // src/model-provider/openai/OpenAIChatFunctionCallObjectGenerationModel.ts
7931
7926
  var OpenAIChatFunctionCallObjectGenerationModel = class _OpenAIChatFunctionCallObjectGenerationModel {
7927
+ model;
7928
+ fnName;
7929
+ fnDescription;
7930
+ promptTemplate;
7932
7931
  constructor({
7933
7932
  model,
7934
7933
  fnName,
7935
7934
  fnDescription,
7936
7935
  promptTemplate
7937
7936
  }) {
7938
- __publicField(this, "model");
7939
- __publicField(this, "fnName");
7940
- __publicField(this, "fnDescription");
7941
- __publicField(this, "promptTemplate");
7942
7937
  this.model = model;
7943
7938
  this.fnName = fnName;
7944
7939
  this.fnDescription = fnDescription;
@@ -8080,9 +8075,9 @@ var TikTokenTokenizer = class {
8080
8075
  * Get a TikToken tokenizer for a specific model or encoding.
8081
8076
  */
8082
8077
  constructor(settings) {
8083
- __publicField(this, "tiktoken");
8084
8078
  this.tiktoken = new Tiktoken(getTiktokenBPE(settings.model));
8085
8079
  }
8080
+ tiktoken;
8086
8081
  async tokenize(text13) {
8087
8082
  return this.tiktoken.encode(text13);
8088
8083
  }
@@ -8296,18 +8291,18 @@ var calculateOpenAIChatCostInMillicents = ({
8296
8291
  var OpenAIChatModel = class _OpenAIChatModel extends AbstractOpenAIChatModel {
8297
8292
  constructor(settings) {
8298
8293
  super(settings);
8299
- __publicField(this, "provider", "openai");
8300
- __publicField(this, "contextWindowSize");
8301
- __publicField(this, "tokenizer");
8302
8294
  const modelInformation = getOpenAIChatModelInformation(this.settings.model);
8303
8295
  this.tokenizer = new TikTokenTokenizer({
8304
8296
  model: modelInformation.baseModel
8305
8297
  });
8306
8298
  this.contextWindowSize = modelInformation.contextWindowSize;
8307
8299
  }
8300
+ provider = "openai";
8308
8301
  get modelName() {
8309
8302
  return this.settings.model;
8310
8303
  }
8304
+ contextWindowSize;
8305
+ tokenizer;
8311
8306
  /**
8312
8307
  * Counts the prompt tokens required for the messages. This includes the message base tokens
8313
8308
  * and the prompt base tokens.
@@ -8409,9 +8404,6 @@ var calculateOpenAICompletionCostInMillicents = ({
8409
8404
  var OpenAICompletionModel = class _OpenAICompletionModel extends AbstractOpenAICompletionModel {
8410
8405
  constructor(settings) {
8411
8406
  super(settings);
8412
- __publicField(this, "provider", "openai");
8413
- __publicField(this, "contextWindowSize");
8414
- __publicField(this, "tokenizer");
8415
8407
  const modelInformation = getOpenAICompletionModelInformation(
8416
8408
  this.settings.model
8417
8409
  );
@@ -8420,9 +8412,12 @@ var OpenAICompletionModel = class _OpenAICompletionModel extends AbstractOpenAIC
8420
8412
  });
8421
8413
  this.contextWindowSize = modelInformation.contextWindowSize;
8422
8414
  }
8415
+ provider = "openai";
8423
8416
  get modelName() {
8424
8417
  return this.settings.model;
8425
8418
  }
8419
+ contextWindowSize;
8420
+ tokenizer;
8426
8421
  async countPromptTokens(input) {
8427
8422
  return countTokens(this.tokenizer, input);
8428
8423
  }
@@ -8547,8 +8542,8 @@ var calculateOpenAIImageGenerationCostInMillicents = ({
8547
8542
  var OpenAIImageGenerationModel = class _OpenAIImageGenerationModel extends AbstractModel {
8548
8543
  constructor(settings) {
8549
8544
  super({ settings });
8550
- __publicField(this, "provider", "openai");
8551
8545
  }
8546
+ provider = "openai";
8552
8547
  get modelName() {
8553
8548
  return this.settings.model;
8554
8549
  }
@@ -8669,8 +8664,8 @@ var calculateOpenAISpeechCostInMillicents = ({
8669
8664
  var OpenAISpeechModel = class _OpenAISpeechModel extends AbstractModel {
8670
8665
  constructor(settings) {
8671
8666
  super({ settings });
8672
- __publicField(this, "provider", "openai");
8673
8667
  }
8668
+ provider = "openai";
8674
8669
  get voice() {
8675
8670
  return this.settings.voice;
8676
8671
  }
@@ -8755,17 +8750,17 @@ var calculateOpenAIEmbeddingCostInMillicents = ({
8755
8750
  var OpenAITextEmbeddingModel = class _OpenAITextEmbeddingModel extends AbstractOpenAITextEmbeddingModel {
8756
8751
  constructor(settings) {
8757
8752
  super(settings);
8758
- __publicField(this, "provider", "openai");
8759
- __publicField(this, "dimensions");
8760
- __publicField(this, "tokenizer");
8761
- __publicField(this, "contextWindowSize");
8762
8753
  this.tokenizer = new TikTokenTokenizer({ model: this.modelName });
8763
8754
  this.contextWindowSize = OPENAI_TEXT_EMBEDDING_MODELS[this.modelName].contextWindowSize;
8764
8755
  this.dimensions = this.settings.dimensions ?? OPENAI_TEXT_EMBEDDING_MODELS[this.modelName].dimensions;
8765
8756
  }
8757
+ provider = "openai";
8766
8758
  get modelName() {
8767
8759
  return this.settings.model;
8768
8760
  }
8761
+ dimensions;
8762
+ tokenizer;
8763
+ contextWindowSize;
8769
8764
  async countTokens(input) {
8770
8765
  return countTokens(this.tokenizer, input);
8771
8766
  }
@@ -8829,8 +8824,8 @@ var calculateOpenAITranscriptionCostInMillicents = ({
8829
8824
  var OpenAITranscriptionModel = class _OpenAITranscriptionModel extends AbstractModel {
8830
8825
  constructor(settings) {
8831
8826
  super({ settings });
8832
- __publicField(this, "provider", "openai");
8833
8827
  }
8828
+ provider = "openai";
8834
8829
  get modelName() {
8835
8830
  return this.settings.model;
8836
8831
  }
@@ -9010,17 +9005,14 @@ var FireworksAIApiConfiguration = class extends BaseUrlApiConfigurationWithDefau
9010
9005
  path: "/inference/v1"
9011
9006
  }
9012
9007
  });
9013
- __publicField(this, "provider", "openaicompatible-fireworksai");
9014
9008
  }
9009
+ provider = "openaicompatible-fireworksai";
9015
9010
  };
9016
9011
 
9017
9012
  // src/model-provider/openai-compatible/OpenAICompatibleChatModel.ts
9018
9013
  var OpenAICompatibleChatModel = class _OpenAICompatibleChatModel extends AbstractOpenAIChatModel {
9019
9014
  constructor(settings) {
9020
9015
  super(settings);
9021
- __publicField(this, "contextWindowSize");
9022
- __publicField(this, "tokenizer");
9023
- __publicField(this, "countPromptTokens");
9024
9016
  }
9025
9017
  get provider() {
9026
9018
  return this.settings.provider ?? this.settings.api.provider ?? "openaicompatible";
@@ -9028,6 +9020,9 @@ var OpenAICompatibleChatModel = class _OpenAICompatibleChatModel extends Abstrac
9028
9020
  get modelName() {
9029
9021
  return this.settings.model;
9030
9022
  }
9023
+ contextWindowSize = void 0;
9024
+ tokenizer = void 0;
9025
+ countPromptTokens = void 0;
9031
9026
  get settingsForEvent() {
9032
9027
  const eventSettingProperties = [
9033
9028
  ...textGenerationModelProperties,
@@ -9090,9 +9085,6 @@ var OpenAICompatibleChatModel = class _OpenAICompatibleChatModel extends Abstrac
9090
9085
  var OpenAICompatibleCompletionModel = class _OpenAICompatibleCompletionModel extends AbstractOpenAICompletionModel {
9091
9086
  constructor(settings) {
9092
9087
  super(settings);
9093
- __publicField(this, "contextWindowSize");
9094
- __publicField(this, "tokenizer");
9095
- __publicField(this, "countPromptTokens");
9096
9088
  }
9097
9089
  get provider() {
9098
9090
  return this.settings.provider ?? this.settings.api.provider ?? "openaicompatible";
@@ -9100,6 +9092,9 @@ var OpenAICompatibleCompletionModel = class _OpenAICompatibleCompletionModel ext
9100
9092
  get modelName() {
9101
9093
  return this.settings.model;
9102
9094
  }
9095
+ contextWindowSize = void 0;
9096
+ tokenizer = void 0;
9097
+ countPromptTokens = void 0;
9103
9098
  get settingsForEvent() {
9104
9099
  const eventSettingProperties = [
9105
9100
  ...textGenerationModelProperties,
@@ -9203,8 +9198,8 @@ var PerplexityApiConfiguration = class extends BaseUrlApiConfigurationWithDefaul
9203
9198
  path: ""
9204
9199
  }
9205
9200
  });
9206
- __publicField(this, "provider", "openaicompatible-perplexity");
9207
9201
  }
9202
+ provider = "openaicompatible-perplexity";
9208
9203
  };
9209
9204
 
9210
9205
  // src/model-provider/openai-compatible/TogetherAIApiConfiguration.ts
@@ -9226,8 +9221,8 @@ var TogetherAIApiConfiguration = class extends BaseUrlApiConfigurationWithDefaul
9226
9221
  path: "/v1"
9227
9222
  }
9228
9223
  });
9229
- __publicField(this, "provider", "openaicompatible-togetherai");
9230
9224
  }
9225
+ provider = "openaicompatible-togetherai";
9231
9226
  };
9232
9227
 
9233
9228
  // src/model-provider/openai-compatible/OpenAICompatibleFacade.ts
@@ -9303,8 +9298,8 @@ function mapBasicPromptToStabilityFormat() {
9303
9298
  var StabilityImageGenerationModel = class _StabilityImageGenerationModel extends AbstractModel {
9304
9299
  constructor(settings) {
9305
9300
  super({ settings });
9306
- __publicField(this, "provider", "stability");
9307
9301
  }
9302
+ provider = "stability";
9308
9303
  get modelName() {
9309
9304
  return this.settings.model;
9310
9305
  }
@@ -9424,9 +9419,9 @@ import { z as z31 } from "zod";
9424
9419
  var WhisperCppTranscriptionModel = class _WhisperCppTranscriptionModel extends AbstractModel {
9425
9420
  constructor(settings) {
9426
9421
  super({ settings });
9427
- __publicField(this, "provider", "whispercpp");
9428
- __publicField(this, "modelName", null);
9429
9422
  }
9423
+ provider = "whispercpp";
9424
+ modelName = null;
9430
9425
  async doTranscribe({
9431
9426
  audioData,
9432
9427
  mimeType
@@ -9652,6 +9647,9 @@ async function splitTextChunk(splitFunction, input) {
9652
9647
 
9653
9648
  // src/tool/NoSuchToolDefinitionError.ts
9654
9649
  var NoSuchToolDefinitionError = class extends Error {
9650
+ toolName;
9651
+ cause;
9652
+ parameters;
9655
9653
  constructor({
9656
9654
  toolName,
9657
9655
  parameters
@@ -9659,9 +9657,6 @@ var NoSuchToolDefinitionError = class extends Error {
9659
9657
  super(
9660
9658
  `Tool definition '${toolName}' not found. Parameters: ${JSON.stringify(parameters)}.`
9661
9659
  );
9662
- __publicField(this, "toolName");
9663
- __publicField(this, "cause");
9664
- __publicField(this, "parameters");
9665
9660
  this.name = "NoSuchToolDefinitionError";
9666
9661
  this.toolName = toolName;
9667
9662
  this.parameters = parameters;
@@ -9680,6 +9675,28 @@ var NoSuchToolDefinitionError = class extends Error {
9680
9675
 
9681
9676
  // src/tool/Tool.ts
9682
9677
  var Tool = class {
9678
+ /**
9679
+ * The name of the tool.
9680
+ * Should be understandable for language models and unique among the tools that they know.
9681
+ */
9682
+ name;
9683
+ /**
9684
+ * A optional description of what the tool does. Will be used by the language model to decide whether to use the tool.
9685
+ */
9686
+ description;
9687
+ /**
9688
+ * The schema of the input that the tool expects. The language model will use this to generate the input.
9689
+ * Use descriptions to make the input understandable for the language model.
9690
+ */
9691
+ parameters;
9692
+ /**
9693
+ * An optional schema of the output that the tool produces. This will be used to validate the output.
9694
+ */
9695
+ returnType;
9696
+ /**
9697
+ * The actual execution function of the tool.
9698
+ */
9699
+ execute;
9683
9700
  constructor({
9684
9701
  name,
9685
9702
  description,
@@ -9687,28 +9704,6 @@ var Tool = class {
9687
9704
  returnType,
9688
9705
  execute
9689
9706
  }) {
9690
- /**
9691
- * The name of the tool.
9692
- * Should be understandable for language models and unique among the tools that they know.
9693
- */
9694
- __publicField(this, "name");
9695
- /**
9696
- * A description of what the tool does. Will be used by the language model to decide whether to use the tool.
9697
- */
9698
- __publicField(this, "description");
9699
- /**
9700
- * The schema of the input that the tool expects. The language model will use this to generate the input.
9701
- * Use descriptions to make the input understandable for the language model.
9702
- */
9703
- __publicField(this, "parameters");
9704
- /**
9705
- * An optional schema of the output that the tool produces. This will be used to validate the output.
9706
- */
9707
- __publicField(this, "returnType");
9708
- /**
9709
- * The actual execution function of the tool.
9710
- */
9711
- __publicField(this, "execute");
9712
9707
  this.name = name;
9713
9708
  this.description = description;
9714
9709
  this.parameters = parameters;
@@ -9717,8 +9712,36 @@ var Tool = class {
9717
9712
  }
9718
9713
  };
9719
9714
 
9715
+ // src/tool/ObjectGeneratorTool.ts
9716
+ var ObjectGeneratorTool = class extends Tool {
9717
+ constructor({
9718
+ name = "object-generator",
9719
+ // eslint-disable-line @typescript-eslint/no-explicit-any
9720
+ description,
9721
+ model,
9722
+ parameters,
9723
+ objectSchema,
9724
+ prompt
9725
+ }) {
9726
+ super({
9727
+ name,
9728
+ description,
9729
+ parameters,
9730
+ execute: async (input, options) => generateObject({
9731
+ model,
9732
+ schema: objectSchema,
9733
+ prompt: prompt(input),
9734
+ ...options
9735
+ })
9736
+ });
9737
+ }
9738
+ };
9739
+
9720
9740
  // src/tool/ToolCallArgumentsValidationError.ts
9721
9741
  var ToolCallArgumentsValidationError = class extends Error {
9742
+ toolName;
9743
+ cause;
9744
+ args;
9722
9745
  constructor({
9723
9746
  toolName,
9724
9747
  args,
@@ -9729,9 +9752,6 @@ var ToolCallArgumentsValidationError = class extends Error {
9729
9752
  Arguments: ${JSON.stringify(args)}.
9730
9753
  Error message: ${getErrorMessage(cause)}`
9731
9754
  );
9732
- __publicField(this, "toolName");
9733
- __publicField(this, "cause");
9734
- __publicField(this, "args");
9735
9755
  this.name = "ToolCallArgumentsValidationError";
9736
9756
  this.toolName = toolName;
9737
9757
  this.cause = cause;
@@ -9751,14 +9771,14 @@ Error message: ${getErrorMessage(cause)}`
9751
9771
 
9752
9772
  // src/tool/ToolCallError.ts
9753
9773
  var ToolCallError = class extends Error {
9774
+ toolCall;
9775
+ cause;
9754
9776
  constructor({
9755
9777
  cause,
9756
9778
  toolCall,
9757
9779
  message = getErrorMessage(cause)
9758
9780
  }) {
9759
9781
  super(`Tool call for tool '${toolCall.name}' failed: ${message}`);
9760
- __publicField(this, "toolCall");
9761
- __publicField(this, "cause");
9762
9782
  this.name = "ToolCallError";
9763
9783
  this.toolCall = toolCall;
9764
9784
  this.cause = cause;
@@ -9776,12 +9796,12 @@ var ToolCallError = class extends Error {
9776
9796
 
9777
9797
  // src/tool/ToolCallGenerationError.ts
9778
9798
  var ToolCallGenerationError = class extends Error {
9799
+ toolName;
9800
+ cause;
9779
9801
  constructor({ toolName, cause }) {
9780
9802
  super(
9781
9803
  `Tool call generation failed for tool '${toolName}'. Error message: ${getErrorMessage(cause)}`
9782
9804
  );
9783
- __publicField(this, "toolName");
9784
- __publicField(this, "cause");
9785
9805
  this.name = "ToolCallsGenerationError";
9786
9806
  this.toolName = toolName;
9787
9807
  this.cause = cause;
@@ -9799,6 +9819,9 @@ var ToolCallGenerationError = class extends Error {
9799
9819
 
9800
9820
  // src/tool/ToolExecutionError.ts
9801
9821
  var ToolExecutionError = class extends Error {
9822
+ toolName;
9823
+ input;
9824
+ cause;
9802
9825
  constructor({
9803
9826
  toolName,
9804
9827
  input,
@@ -9806,9 +9829,6 @@ var ToolExecutionError = class extends Error {
9806
9829
  message = getErrorMessage(cause)
9807
9830
  }) {
9808
9831
  super(`Error executing tool '${toolName}': ${message}`);
9809
- __publicField(this, "toolName");
9810
- __publicField(this, "input");
9811
- __publicField(this, "cause");
9812
9832
  this.name = "ToolExecutionError";
9813
9833
  this.toolName = toolName;
9814
9834
  this.input = input;
@@ -10030,29 +10050,63 @@ async function generateToolCall({
10030
10050
 
10031
10051
  // src/tool/generate-tool-call/jsonToolCallPrompt.ts
10032
10052
  import { nanoid } from "nanoid";
10053
+ var DEFAULT_TOOL_PROMPT = (tool) => [
10054
+ `You are calling the function "${tool.name}".`,
10055
+ tool.description != null ? `Function description: ${tool.description}` : null,
10056
+ `Function parameters JSON schema: ${JSON.stringify(
10057
+ tool.parameters.getJsonSchema()
10058
+ )}`,
10059
+ ``,
10060
+ `You MUST answer with a JSON object that matches the JSON schema above.`
10061
+ ].filter(Boolean).join("\n");
10033
10062
  var jsonToolCallPrompt = {
10034
- text() {
10063
+ text({
10064
+ toolPrompt
10065
+ } = {}) {
10035
10066
  return {
10036
- createPrompt(instruction13, tool) {
10067
+ createPrompt(prompt, tool) {
10037
10068
  return {
10038
- system: [
10039
- `You are calling a function "${tool.name}".`,
10040
- tool.description != null ? ` Function description: ${tool.description}` : null,
10041
- ` Function parameters JSON schema: ${JSON.stringify(
10042
- tool.parameters.getJsonSchema()
10043
- )}`,
10044
- ``,
10045
- `You MUST answer with a JSON object matches the above schema for the arguments.`
10046
- ].filter(Boolean).join("\n"),
10047
- instruction: instruction13
10069
+ system: createSystemPrompt2({ tool, toolPrompt }),
10070
+ instruction: prompt
10048
10071
  };
10049
10072
  },
10050
- extractToolCall(response) {
10051
- return { id: nanoid(), args: parseJSON({ text: response }) };
10052
- }
10073
+ extractToolCall,
10074
+ withJsonOutput: ({ model, schema }) => model.withJsonOutput(schema)
10075
+ };
10076
+ },
10077
+ instruction({
10078
+ toolPrompt
10079
+ } = {}) {
10080
+ return {
10081
+ createPrompt(prompt, tool) {
10082
+ return {
10083
+ system: createSystemPrompt2({
10084
+ originalSystemPrompt: prompt.system,
10085
+ tool,
10086
+ toolPrompt
10087
+ }),
10088
+ instruction: prompt.instruction
10089
+ };
10090
+ },
10091
+ extractToolCall,
10092
+ withJsonOutput: ({ model, schema }) => model.withJsonOutput(schema)
10053
10093
  };
10054
10094
  }
10055
10095
  };
10096
+ function createSystemPrompt2({
10097
+ originalSystemPrompt,
10098
+ toolPrompt = DEFAULT_TOOL_PROMPT,
10099
+ tool
10100
+ }) {
10101
+ return [
10102
+ originalSystemPrompt,
10103
+ originalSystemPrompt != null ? "" : null,
10104
+ toolPrompt(tool)
10105
+ ].filter(Boolean).join("\n");
10106
+ }
10107
+ function extractToolCall(response) {
10108
+ return { id: nanoid(), args: parseJSON({ text: response }) };
10109
+ }
10056
10110
 
10057
10111
  // src/tool/generate-tool-calls/generateToolCalls.ts
10058
10112
  async function generateToolCalls({
@@ -10240,6 +10294,9 @@ function createEventSourceStream(events) {
10240
10294
 
10241
10295
  // src/vector-index/VectorIndexRetriever.ts
10242
10296
  var VectorIndexRetriever = class _VectorIndexRetriever {
10297
+ vectorIndex;
10298
+ embeddingModel;
10299
+ settings;
10243
10300
  constructor({
10244
10301
  vectorIndex,
10245
10302
  embeddingModel,
@@ -10247,9 +10304,6 @@ var VectorIndexRetriever = class _VectorIndexRetriever {
10247
10304
  similarityThreshold,
10248
10305
  filter
10249
10306
  }) {
10250
- __publicField(this, "vectorIndex");
10251
- __publicField(this, "embeddingModel");
10252
- __publicField(this, "settings");
10253
10307
  this.vectorIndex = vectorIndex;
10254
10308
  this.embeddingModel = embeddingModel;
10255
10309
  this.settings = {
@@ -10294,9 +10348,6 @@ var jsonDataSchema = zodSchema(
10294
10348
  )
10295
10349
  );
10296
10350
  var MemoryVectorIndex = class _MemoryVectorIndex {
10297
- constructor() {
10298
- __publicField(this, "entries", /* @__PURE__ */ new Map());
10299
- }
10300
10351
  static async deserialize({
10301
10352
  serializedData,
10302
10353
  schema
@@ -10316,6 +10367,7 @@ var MemoryVectorIndex = class _MemoryVectorIndex {
10316
10367
  );
10317
10368
  return vectorIndex;
10318
10369
  }
10370
+ entries = /* @__PURE__ */ new Map();
10319
10371
  async upsertMany(data) {
10320
10372
  for (const entry of data) {
10321
10373
  this.entries.set(entry.id, entry);
@@ -10437,6 +10489,7 @@ export {
10437
10489
  OPENAI_TRANSCRIPTION_MODELS,
10438
10490
  ObjectFromTextGenerationModel,
10439
10491
  ObjectFromTextStreamingModel,
10492
+ ObjectGeneratorTool,
10440
10493
  ObjectParseError,
10441
10494
  ObjectStreamFromResponse,
10442
10495
  ObjectStreamResponse,