@ax-llm/ax 11.0.26 → 11.0.28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.js CHANGED
@@ -37,6 +37,9 @@ var axSpanAttributes = {
37
37
  DB_QUERY_RESULT_VECTOR: "db.query.result.vector",
38
38
  DB_QUERY_RESULT_DOCUMENT: "db.query.result.document"
39
39
  };
40
+ var axSpanEvents = {
41
+ LLM_PROMPT: "gen_ai.prompt"
42
+ };
40
43
  var AxLLMRequestTypeValues = /* @__PURE__ */ ((AxLLMRequestTypeValues2) => {
41
44
  AxLLMRequestTypeValues2["COMPLETION"] = "completion";
42
45
  AxLLMRequestTypeValues2["CHAT"] = "chat";
@@ -55,7 +58,7 @@ var AxSpanKindValues = /* @__PURE__ */ ((AxSpanKindValues2) => {
55
58
 
56
59
  // util/apicall.ts
57
60
  import {
58
- ReadableStream,
61
+ ReadableStream as ReadableStream2,
59
62
  TextDecoderStream as TextDecoderStreamNative,
60
63
  TransformStream as TransformStream3
61
64
  } from "stream/web";
@@ -424,7 +427,7 @@ var apiCall = async (api, json) => {
424
427
  }
425
428
  });
426
429
  let closed = false;
427
- return new ReadableStream({
430
+ return new ReadableStream2({
428
431
  start(controller2) {
429
432
  const reader = res.body.pipeThrough(new textDecoderStream()).pipeThrough(new SSEParser()).pipeThrough(trackingStream).getReader();
430
433
  async function read() {
@@ -700,6 +703,12 @@ var axBaseAIDefaultConfig = () => structuredClone({
700
703
  topK: 40,
701
704
  topP: 0.9
702
705
  });
706
+ var axBaseAIDefaultCreativeConfig = () => structuredClone({
707
+ maxTokens: 2e3,
708
+ temperature: 0.4,
709
+ topP: 0.7,
710
+ frequencyPenalty: 0.2
711
+ });
703
712
  var AxBaseAI = class {
704
713
  constructor(aiImpl, {
705
714
  name,
@@ -1741,6 +1750,19 @@ var axAIOpenAIDefaultConfig = () => structuredClone({
1741
1750
  embedModel: "text-embedding-3-small" /* TextEmbedding3Small */,
1742
1751
  ...axBaseAIDefaultConfig()
1743
1752
  });
1753
+ var axAIOpenAIBestConfig = () => structuredClone({
1754
+ ...axAIOpenAIDefaultConfig(),
1755
+ model: "gpt-4o" /* GPT4O */
1756
+ });
1757
+ var axAIOpenAICreativeConfig = () => structuredClone({
1758
+ model: "gpt-4o" /* GPT4O */,
1759
+ embedModel: "text-embedding-3-small" /* TextEmbedding3Small */,
1760
+ ...axBaseAIDefaultCreativeConfig()
1761
+ });
1762
+ var axAIOpenAIFastConfig = () => ({
1763
+ ...axAIOpenAIDefaultConfig(),
1764
+ model: "gpt-4o-mini" /* GPT4OMini */
1765
+ });
1744
1766
  var AxAIOpenAIImpl = class {
1745
1767
  constructor(config, streamingUsage, dimensions) {
1746
1768
  this.config = config;
@@ -2046,6 +2068,9 @@ var AxAIOpenAI = class extends AxAIOpenAIBase {
2046
2068
 
2047
2069
  // ai/azure-openai/api.ts
2048
2070
  var axAIAzureOpenAIDefaultConfig = axAIOpenAIDefaultConfig;
2071
+ var axAIAzureOpenAICreativeConfig = axAIOpenAICreativeConfig;
2072
+ var axAIAzureOpenAIFastConfig = axAIOpenAIFastConfig;
2073
+ var axAIAzureOpenAIBestConfig = axAIOpenAIBestConfig;
2049
2074
  var AxAIAzureOpenAI = class extends AxAIOpenAIBase {
2050
2075
  constructor({
2051
2076
  apiKey,
@@ -2162,6 +2187,11 @@ var axAICohereDefaultConfig = () => structuredClone({
2162
2187
  embedModel: "embed-english-v3.0" /* EmbedEnglishV30 */,
2163
2188
  ...axBaseAIDefaultConfig()
2164
2189
  });
2190
+ var axAICohereCreativeConfig = () => structuredClone({
2191
+ model: "command-r" /* CommandR */,
2192
+ embedModel: "embed-english-v3.0" /* EmbedEnglishV30 */,
2193
+ ...axBaseAIDefaultCreativeConfig()
2194
+ });
2165
2195
  var AxAICohereImpl = class {
2166
2196
  constructor(config) {
2167
2197
  this.config = config;
@@ -2438,6 +2468,10 @@ var axAIDeepSeekDefaultConfig = () => structuredClone({
2438
2468
  model: "deepseek-chat" /* DeepSeekChat */,
2439
2469
  ...axBaseAIDefaultConfig()
2440
2470
  });
2471
+ var axAIDeepSeekCodeConfig = () => structuredClone({
2472
+ model: "deepseek-coder" /* DeepSeekCoder */,
2473
+ ...axBaseAIDefaultCreativeConfig()
2474
+ });
2441
2475
  var AxAIDeepSeek = class extends AxAIOpenAIBase {
2442
2476
  constructor({
2443
2477
  apiKey,
@@ -2585,6 +2619,12 @@ var axAIGoogleGeminiDefaultConfig = () => structuredClone({
2585
2619
  safetySettings,
2586
2620
  ...axBaseAIDefaultConfig()
2587
2621
  });
2622
+ var axAIGoogleGeminiDefaultCreativeConfig = () => structuredClone({
2623
+ model: "gemini-2.0-flash" /* Gemini20Flash */,
2624
+ embedModel: "text-embedding-004" /* TextEmbedding004 */,
2625
+ safetySettings,
2626
+ ...axBaseAIDefaultCreativeConfig()
2627
+ });
2588
2628
  var AxAIGoogleGeminiImpl = class {
2589
2629
  constructor(config, isVertex, endpointId, apiKey, options) {
2590
2630
  this.config = config;
@@ -3096,6 +3136,10 @@ var axAIHuggingFaceDefaultConfig = () => structuredClone({
3096
3136
  model: "meta-llama/Llama-2-70b-chat-hf" /* MetaLlama270BChatHF */,
3097
3137
  ...axBaseAIDefaultConfig()
3098
3138
  });
3139
+ var axAIHuggingFaceCreativeConfig = () => structuredClone({
3140
+ model: "meta-llama/Llama-2-70b-chat-hf" /* MetaLlama270BChatHF */,
3141
+ ...axBaseAIDefaultCreativeConfig()
3142
+ });
3099
3143
  var AxAIHuggingFaceImpl = class {
3100
3144
  constructor(config) {
3101
3145
  this.config = config;
@@ -3277,6 +3321,10 @@ var axAIMistralDefaultConfig = () => structuredClone({
3277
3321
  model: "mistral-small-latest" /* MistralSmall */,
3278
3322
  ...axBaseAIDefaultConfig()
3279
3323
  });
3324
+ var axAIMistralBestConfig = () => structuredClone({
3325
+ ...axAIMistralDefaultConfig(),
3326
+ model: "mistral-large-latest" /* MistralLarge */
3327
+ });
3280
3328
  var AxAIMistral = class extends AxAIOpenAIBase {
3281
3329
  constructor({
3282
3330
  apiKey,
@@ -3309,6 +3357,11 @@ var axAIOllamaDefaultConfig = () => structuredClone({
3309
3357
  model: "nous-hermes2",
3310
3358
  embedModel: "all-minilm"
3311
3359
  });
3360
+ var axAIOllamaDefaultCreativeConfig = () => structuredClone({
3361
+ ...axBaseAIDefaultCreativeConfig(),
3362
+ model: "nous-hermes2",
3363
+ embedModel: "all-minilm"
3364
+ });
3312
3365
  var AxAIOllama = class extends AxAIOpenAIBase {
3313
3366
  constructor({
3314
3367
  apiKey = "not-set",
@@ -3368,6 +3421,18 @@ var axAIRekaDefaultConfig = () => structuredClone({
3368
3421
  model: "reka-core" /* RekaCore */,
3369
3422
  ...axBaseAIDefaultConfig()
3370
3423
  });
3424
+ var axAIRekaBestConfig = () => structuredClone({
3425
+ ...axAIRekaDefaultConfig(),
3426
+ model: "reka-core" /* RekaCore */
3427
+ });
3428
+ var axAIRekaCreativeConfig = () => structuredClone({
3429
+ model: "reka-core" /* RekaCore */,
3430
+ ...axBaseAIDefaultCreativeConfig()
3431
+ });
3432
+ var axAIRekaFastConfig = () => ({
3433
+ ...axAIRekaDefaultConfig(),
3434
+ model: "reka-flash" /* RekaFlash */
3435
+ });
3371
3436
  var AxAIRekaImpl = class {
3372
3437
  constructor(config) {
3373
3438
  this.config = config;
@@ -3668,7 +3733,7 @@ var AxAI = class {
3668
3733
  };
3669
3734
 
3670
3735
  // dsp/generate.ts
3671
- import { ReadableStream as ReadableStream2 } from "node:stream/web";
3736
+ import { ReadableStream as ReadableStream3 } from "node:stream/web";
3672
3737
  import { SpanKind as SpanKind2 } from "@opentelemetry/api";
3673
3738
 
3674
3739
  // ai/util.ts
@@ -4572,16 +4637,19 @@ var validateValue = (field, value) => {
4572
4637
  };
4573
4638
  function mergeProgramUsage(usages) {
4574
4639
  const usageMap = {};
4575
- usages.forEach((usage) => {
4640
+ for (const usage of usages) {
4576
4641
  const key = `${usage.ai}:${usage.model}`;
4577
4642
  if (!usageMap[key]) {
4578
4643
  usageMap[key] = { ...usage };
4579
- return;
4644
+ continue;
4580
4645
  }
4581
- usageMap[key].promptTokens += usage.promptTokens;
4582
- usageMap[key].completionTokens += usage.completionTokens;
4583
- usageMap[key].totalTokens += usage.totalTokens;
4584
- });
4646
+ const currentUsage = usageMap[key];
4647
+ if (currentUsage) {
4648
+ currentUsage.promptTokens += usage.promptTokens;
4649
+ currentUsage.completionTokens += usage.completionTokens;
4650
+ currentUsage.totalTokens += usage.totalTokens;
4651
+ }
4652
+ }
4585
4653
  return Object.values(usageMap);
4586
4654
  }
4587
4655
  var parseMarkdownList = (input) => {
@@ -4602,7 +4670,6 @@ var parseMarkdownList = (input) => {
4602
4670
  } else if (numberedListRegex.test(trimmedLine)) {
4603
4671
  list.push(trimmedLine.replace(numberedListRegex, "").trim());
4604
4672
  } else if (list.length === 0) {
4605
- continue;
4606
4673
  } else {
4607
4674
  throw new Error("Could not parse markdown list: mixed content detected");
4608
4675
  }
@@ -4675,12 +4742,72 @@ function matchesContent(content, prefix, startIndex = 0, prefixCache = globalPre
4675
4742
  );
4676
4743
  for (let i = 0; i < prefixes.length - 1; i++) {
4677
4744
  const partialPrefix = prefixes[i];
4678
- if (contentEnd.endsWith(partialPrefix)) {
4745
+ if (partialPrefix && contentEnd.endsWith(partialPrefix)) {
4679
4746
  return -2;
4680
4747
  }
4681
4748
  }
4682
4749
  return -1;
4683
4750
  }
4751
+ var formatTime = (ms) => {
4752
+ const seconds = Math.floor(ms / 1e3);
4753
+ if (seconds < 60) return `${seconds}s`;
4754
+ const minutes = Math.floor(seconds / 60);
4755
+ const remainingSeconds = seconds % 60;
4756
+ if (minutes < 60) return `${minutes}m ${remainingSeconds}s`;
4757
+ const hours = Math.floor(minutes / 60);
4758
+ const remainingMinutes = minutes % 60;
4759
+ return `${hours}h ${remainingMinutes}m ${remainingSeconds}s`;
4760
+ };
4761
+ var calculateETA = (current, total, elapsedMs) => {
4762
+ if (current === 0) return "calculating...";
4763
+ const msPerItem = elapsedMs / current;
4764
+ const remainingItems = total - current;
4765
+ const etaMs = msPerItem * remainingItems;
4766
+ return formatTime(etaMs);
4767
+ };
4768
+ var updateDetailedProgress = (roundIndex, current, total, elapsedTime, example, stats, configInfo, result, error) => {
4769
+ process.stdout.write("\r\x1B[K");
4770
+ const percentage = (current / total * 100).toFixed(1);
4771
+ const formattedTime = formatTime(elapsedTime);
4772
+ const itemsPerSecond = elapsedTime > 0 ? (current / elapsedTime * 1e3).toFixed(2) : "0.00";
4773
+ const eta = calculateETA(current, total, elapsedTime);
4774
+ let output = `Round ${roundIndex + 1}/${configInfo.maxRounds}: ${current}/${total} (${percentage}%) [${formattedTime}, ${itemsPerSecond} it/s, ETA: ${eta}]`;
4775
+ const successRate = stats.totalCalls > 0 ? stats.successfulDemos / stats.totalCalls * 100 : 0;
4776
+ output += ` | Success: ${stats.successfulDemos}/${stats.totalCalls} (${successRate.toFixed(1)}%)`;
4777
+ if (configInfo.verboseMode || configInfo.debugMode) {
4778
+ if (configInfo.costMonitoring) {
4779
+ output += `
4780
+ Tokens: ~${stats.estimatedTokenUsage.toLocaleString()} total`;
4781
+ }
4782
+ output += `
4783
+ Batch: ${Math.floor(current / configInfo.batchSize) + 1}/${Math.ceil(total / configInfo.batchSize)}`;
4784
+ if (configInfo.earlyStoppingPatience > 0 && stats.earlyStopping) {
4785
+ output += `
4786
+ Best round: ${stats.earlyStopping.bestScoreRound + 1}, Patience: ${configInfo.earlyStoppingPatience}`;
4787
+ }
4788
+ }
4789
+ if (configInfo.debugMode) {
4790
+ const exampleKeys = Object.keys(example).map((k) => {
4791
+ const valueStr = JSON.stringify(example[k]);
4792
+ const truncated = valueStr.length > 30 ? `${valueStr.substring(0, 30)}...` : valueStr;
4793
+ return `${k}: ${truncated}`;
4794
+ }).join(", ");
4795
+ output += `
4796
+ Example: {${exampleKeys}}`;
4797
+ if (error) {
4798
+ output += `
4799
+ ERROR: ${error.message}`;
4800
+ } else if (result) {
4801
+ const resultStr = JSON.stringify(result);
4802
+ const truncatedResult = resultStr.length > 50 ? `${resultStr.substring(0, 50)}...` : resultStr;
4803
+ output += `
4804
+ Result: ${truncatedResult}`;
4805
+ }
4806
+ output += `
4807
+ Temperature: ${(0.7 + 1e-3 * current).toFixed(3)}`;
4808
+ }
4809
+ console.log(output);
4810
+ };
4684
4811
 
4685
4812
  // dsp/program.ts
4686
4813
  var AxProgramWithSignature = class {
@@ -4922,7 +5049,7 @@ ${outputFields}`);
4922
5049
  demos
4923
5050
  }) => {
4924
5051
  const renderedExamples = examples ? [
4925
- { type: "text", text: "## Examples:\n" },
5052
+ { type: "text", text: "\n\n## Examples\n" },
4926
5053
  ...this.renderExamples(examples)
4927
5054
  ] : [];
4928
5055
  const renderedDemos = demos ? this.renderDemos(demos) : [];
@@ -5005,6 +5132,9 @@ ${outputFields}`);
5005
5132
  );
5006
5133
  }
5007
5134
  const renderedItem = [...renderedInputItem, ...renderedOutputItem];
5135
+ if (index > 0 && renderedItem.length > 0 && renderedItem[0]?.type === "text") {
5136
+ list.push({ type: "text", text: "---\n\n" });
5137
+ }
5008
5138
  renderedItem.forEach((v) => {
5009
5139
  if ("text" in v) {
5010
5140
  v.text = v.text + "\n";
@@ -5188,10 +5318,7 @@ var processValue = (field, value) => {
5188
5318
  if (typeof value === "string") {
5189
5319
  return value;
5190
5320
  }
5191
- if (Array.isArray(value)) {
5192
- return value;
5193
- }
5194
- return JSON.stringify(value);
5321
+ return JSON.stringify(value, null, 2);
5195
5322
  };
5196
5323
  var toFieldType = (type) => {
5197
5324
  const baseType = (() => {
@@ -5997,7 +6124,7 @@ var AxGen = class extends AxProgramWithSignature {
5997
6124
  mem,
5998
6125
  options
5999
6126
  });
6000
- if (res instanceof ReadableStream2) {
6127
+ if (res instanceof ReadableStream3) {
6001
6128
  yield* this.processStreamingResponse({
6002
6129
  ai,
6003
6130
  model,
@@ -6932,60 +7059,147 @@ function validateModels2(services) {
6932
7059
  // dsp/optimize.ts
6933
7060
  var AxBootstrapFewShot = class {
6934
7061
  ai;
7062
+ teacherAI;
6935
7063
  program;
6936
7064
  examples;
6937
7065
  maxRounds;
6938
7066
  maxDemos;
6939
7067
  maxExamples;
7068
+ batchSize;
7069
+ earlyStoppingPatience;
7070
+ costMonitoring;
7071
+ maxTokensPerGeneration;
7072
+ verboseMode;
7073
+ debugMode;
6940
7074
  traces = [];
7075
+ stats = {
7076
+ totalCalls: 0,
7077
+ successfulDemos: 0,
7078
+ estimatedTokenUsage: 0,
7079
+ earlyStopped: false
7080
+ };
6941
7081
  constructor({
6942
7082
  ai,
6943
7083
  program,
6944
7084
  examples = [],
6945
7085
  options
6946
7086
  }) {
6947
- if (examples.length == 0) {
7087
+ if (examples.length === 0) {
6948
7088
  throw new Error("No examples found");
6949
7089
  }
6950
7090
  this.maxRounds = options?.maxRounds ?? 3;
6951
7091
  this.maxDemos = options?.maxDemos ?? 4;
6952
7092
  this.maxExamples = options?.maxExamples ?? 16;
7093
+ this.batchSize = options?.batchSize ?? 1;
7094
+ this.earlyStoppingPatience = options?.earlyStoppingPatience ?? 0;
7095
+ this.costMonitoring = options?.costMonitoring ?? false;
7096
+ this.maxTokensPerGeneration = options?.maxTokensPerGeneration ?? 0;
7097
+ this.verboseMode = options?.verboseMode ?? true;
7098
+ this.debugMode = options?.debugMode ?? false;
6953
7099
  this.ai = ai;
7100
+ this.teacherAI = options?.teacherAI;
6954
7101
  this.program = program;
6955
7102
  this.examples = examples;
6956
7103
  }
6957
7104
  async compileRound(roundIndex, metricFn, options) {
6958
7105
  const st = (/* @__PURE__ */ new Date()).getTime();
6959
7106
  const maxDemos = options?.maxDemos ?? this.maxDemos;
6960
- const aiOpt = { modelConfig: { temperature: 0.7 } };
7107
+ const aiOpt = {
7108
+ modelConfig: {
7109
+ temperature: 0.7
7110
+ }
7111
+ };
7112
+ if (this.maxTokensPerGeneration > 0) {
7113
+ aiOpt.modelConfig.max_tokens = this.maxTokensPerGeneration;
7114
+ }
6961
7115
  const examples = randomSample(this.examples, this.maxExamples);
6962
- for (let i = 0; i < examples.length; i++) {
7116
+ const previousSuccessCount = this.traces.length;
7117
+ for (let i = 0; i < examples.length; i += this.batchSize) {
6963
7118
  if (i > 0) {
6964
7119
  aiOpt.modelConfig.temperature = 0.7 + 1e-3 * i;
6965
7120
  }
6966
- const ex = examples[i];
6967
- if (!ex) {
6968
- throw new Error("Invalid example");
6969
- }
6970
- const exList = [...examples.slice(0, i), ...examples.slice(i + 1)];
6971
- this.program.setExamples(exList);
6972
- const res = await this.program.forward(this.ai, ex, aiOpt);
6973
- const success = metricFn({ prediction: res, example: ex });
6974
- if (success) {
6975
- this.traces = [...this.traces, ...this.program.getTraces()];
7121
+ const batch = examples.slice(i, i + this.batchSize);
7122
+ for (const ex of batch) {
7123
+ if (!ex) {
7124
+ continue;
7125
+ }
7126
+ const exList = examples.filter((e) => e !== ex);
7127
+ this.program.setExamples(exList);
7128
+ const aiService = this.teacherAI || this.ai;
7129
+ this.stats.totalCalls++;
7130
+ let res;
7131
+ let error;
7132
+ try {
7133
+ res = await this.program.forward(aiService, ex, aiOpt);
7134
+ if (this.costMonitoring) {
7135
+ this.stats.estimatedTokenUsage += JSON.stringify(ex).length / 4 + JSON.stringify(res).length / 4;
7136
+ }
7137
+ const success = metricFn({ prediction: res, example: ex });
7138
+ if (success) {
7139
+ this.traces = [...this.traces, ...this.program.getTraces()];
7140
+ this.stats.successfulDemos++;
7141
+ }
7142
+ } catch (err) {
7143
+ error = err;
7144
+ res = {};
7145
+ }
7146
+ const current = i + examples.length * roundIndex + (batch.indexOf(ex) + 1);
7147
+ const total = examples.length * this.maxRounds;
7148
+ const et = (/* @__PURE__ */ new Date()).getTime() - st;
7149
+ if (this.verboseMode || this.debugMode) {
7150
+ const configInfo = {
7151
+ maxRounds: this.maxRounds,
7152
+ batchSize: this.batchSize,
7153
+ earlyStoppingPatience: this.earlyStoppingPatience,
7154
+ costMonitoring: this.costMonitoring,
7155
+ verboseMode: this.verboseMode,
7156
+ debugMode: this.debugMode
7157
+ };
7158
+ updateDetailedProgress(
7159
+ roundIndex,
7160
+ current,
7161
+ total,
7162
+ et,
7163
+ ex,
7164
+ this.stats,
7165
+ configInfo,
7166
+ res,
7167
+ error
7168
+ );
7169
+ } else {
7170
+ updateProgressBar(
7171
+ current,
7172
+ total,
7173
+ this.traces.length,
7174
+ et,
7175
+ "Tuning Prompt",
7176
+ 30
7177
+ );
7178
+ }
7179
+ if (this.traces.length >= maxDemos) {
7180
+ return;
7181
+ }
6976
7182
  }
6977
- const current = i + examples.length * roundIndex;
6978
- const total = examples.length * this.maxRounds;
6979
- const et = (/* @__PURE__ */ new Date()).getTime() - st;
6980
- updateProgressBar(
6981
- current,
6982
- total,
6983
- this.traces.length,
6984
- et,
6985
- "Tuning Prompt",
6986
- 30
6987
- );
6988
- if (this.traces.length > maxDemos) {
7183
+ }
7184
+ if (this.earlyStoppingPatience > 0) {
7185
+ const newSuccessCount = this.traces.length;
7186
+ const improvement = newSuccessCount - previousSuccessCount;
7187
+ if (!this.stats.earlyStopping) {
7188
+ this.stats.earlyStopping = {
7189
+ bestScoreRound: improvement > 0 ? roundIndex : 0,
7190
+ patienceExhausted: false
7191
+ };
7192
+ } else if (improvement > 0) {
7193
+ this.stats.earlyStopping.bestScoreRound = roundIndex;
7194
+ } else if (roundIndex - this.stats.earlyStopping.bestScoreRound >= this.earlyStoppingPatience) {
7195
+ this.stats.earlyStopping.patienceExhausted = true;
7196
+ this.stats.earlyStopped = true;
7197
+ if (this.verboseMode || this.debugMode) {
7198
+ console.log(
7199
+ `
7200
+ Early stopping triggered after ${roundIndex + 1} rounds. No improvement for ${this.earlyStoppingPatience} rounds.`
7201
+ );
7202
+ }
6989
7203
  return;
6990
7204
  }
6991
7205
  }
@@ -6993,8 +7207,17 @@ var AxBootstrapFewShot = class {
6993
7207
  async compile(metricFn, options) {
6994
7208
  const maxRounds = options?.maxRounds ?? this.maxRounds;
6995
7209
  this.traces = [];
7210
+ this.stats = {
7211
+ totalCalls: 0,
7212
+ successfulDemos: 0,
7213
+ estimatedTokenUsage: 0,
7214
+ earlyStopped: false
7215
+ };
6996
7216
  for (let i = 0; i < maxRounds; i++) {
6997
7217
  await this.compileRound(i, metricFn, options);
7218
+ if (this.stats.earlyStopped) {
7219
+ break;
7220
+ }
6998
7221
  }
6999
7222
  if (this.traces.length === 0) {
7000
7223
  throw new Error(
@@ -7002,22 +7225,32 @@ var AxBootstrapFewShot = class {
7002
7225
  );
7003
7226
  }
7004
7227
  const demos = groupTracesByKeys(this.traces);
7005
- return demos;
7228
+ return {
7229
+ demos,
7230
+ stats: this.stats
7231
+ };
7232
+ }
7233
+ // Get optimization statistics
7234
+ getStats() {
7235
+ return this.stats;
7006
7236
  }
7007
7237
  };
7008
7238
  function groupTracesByKeys(programTraces) {
7009
7239
  const groupedTraces = /* @__PURE__ */ new Map();
7010
7240
  for (const programTrace of programTraces) {
7011
7241
  if (groupedTraces.has(programTrace.programId)) {
7012
- groupedTraces.get(programTrace.programId).push(programTrace.trace);
7242
+ const traces = groupedTraces.get(programTrace.programId);
7243
+ if (traces) {
7244
+ traces.push(programTrace.trace);
7245
+ }
7013
7246
  } else {
7014
7247
  groupedTraces.set(programTrace.programId, [programTrace.trace]);
7015
7248
  }
7016
7249
  }
7017
7250
  const programDemosArray = [];
7018
- groupedTraces.forEach((traces, programId) => {
7251
+ for (const [programId, traces] of groupedTraces.entries()) {
7019
7252
  programDemosArray.push({ traces, programId });
7020
- });
7253
+ }
7021
7254
  return programDemosArray;
7022
7255
  }
7023
7256
  var randomSample = (array, n) => {
@@ -8137,6 +8370,639 @@ var AxJSInterpreter = class {
8137
8370
  }
8138
8371
  };
8139
8372
 
8373
+ // dsp/mipro.ts
8374
+ var AxMiPRO = class {
8375
+ ai;
8376
+ program;
8377
+ examples;
8378
+ maxBootstrappedDemos;
8379
+ maxLabeledDemos;
8380
+ numCandidates;
8381
+ initTemperature;
8382
+ numTrials;
8383
+ minibatch;
8384
+ minibatchSize;
8385
+ minibatchFullEvalSteps;
8386
+ programAwareProposer;
8387
+ dataAwareProposer;
8388
+ viewDataBatchSize;
8389
+ tipAwareProposer;
8390
+ fewshotAwareProposer;
8391
+ seed;
8392
+ verbose;
8393
+ bootstrapper;
8394
+ earlyStoppingTrials;
8395
+ minImprovementThreshold;
8396
+ constructor({
8397
+ ai,
8398
+ program,
8399
+ examples = [],
8400
+ options
8401
+ }) {
8402
+ if (examples.length === 0) {
8403
+ throw new Error("No examples found");
8404
+ }
8405
+ const miproOptions = options || {};
8406
+ this.numCandidates = miproOptions.numCandidates ?? 5;
8407
+ this.initTemperature = miproOptions.initTemperature ?? 0.7;
8408
+ this.maxBootstrappedDemos = miproOptions.maxBootstrappedDemos ?? 3;
8409
+ this.maxLabeledDemos = miproOptions.maxLabeledDemos ?? 4;
8410
+ this.numTrials = miproOptions.numTrials ?? 30;
8411
+ this.minibatch = miproOptions.minibatch ?? true;
8412
+ this.minibatchSize = miproOptions.minibatchSize ?? 25;
8413
+ this.minibatchFullEvalSteps = miproOptions.minibatchFullEvalSteps ?? 10;
8414
+ this.programAwareProposer = miproOptions.programAwareProposer ?? true;
8415
+ this.dataAwareProposer = miproOptions.dataAwareProposer ?? true;
8416
+ this.viewDataBatchSize = miproOptions.viewDataBatchSize ?? 10;
8417
+ this.tipAwareProposer = miproOptions.tipAwareProposer ?? true;
8418
+ this.fewshotAwareProposer = miproOptions.fewshotAwareProposer ?? true;
8419
+ this.seed = miproOptions.seed;
8420
+ this.verbose = miproOptions.verbose ?? false;
8421
+ this.earlyStoppingTrials = miproOptions.earlyStoppingTrials ?? 5;
8422
+ this.minImprovementThreshold = miproOptions.minImprovementThreshold ?? 0.01;
8423
+ this.ai = ai;
8424
+ this.program = program;
8425
+ this.examples = examples;
8426
+ this.bootstrapper = new AxBootstrapFewShot({
8427
+ ai,
8428
+ program,
8429
+ examples,
8430
+ options: {
8431
+ maxDemos: this.maxBootstrappedDemos,
8432
+ maxRounds: 3,
8433
+ // Default, or adjust based on your needs
8434
+ verboseMode: this.verbose
8435
+ }
8436
+ });
8437
+ }
8438
+ /**
8439
+ * Configures the optimizer for light, medium, or heavy optimization
8440
+ * @param level The optimization level: "light", "medium", or "heavy"
8441
+ */
8442
+ configureAuto(level) {
8443
+ switch (level) {
8444
+ case "light":
8445
+ this.numCandidates = 3;
8446
+ this.numTrials = 10;
8447
+ this.minibatch = true;
8448
+ this.minibatchSize = 20;
8449
+ break;
8450
+ case "medium":
8451
+ this.numCandidates = 5;
8452
+ this.numTrials = 20;
8453
+ this.minibatch = true;
8454
+ this.minibatchSize = 25;
8455
+ break;
8456
+ case "heavy":
8457
+ this.numCandidates = 7;
8458
+ this.numTrials = 30;
8459
+ this.minibatch = true;
8460
+ this.minibatchSize = 30;
8461
+ break;
8462
+ }
8463
+ }
8464
+ /**
8465
+ * Generates creative tips for instruction generation
8466
+ */
8467
+ generateTips() {
8468
+ return [
8469
+ "Be very specific and detailed in your instructions.",
8470
+ "Focus on step-by-step reasoning in your instructions.",
8471
+ "Provide clear constraints and guidelines in your instructions.",
8472
+ "Keep your instructions concise and to the point.",
8473
+ "Emphasize accuracy and precision in your instructions.",
8474
+ "Include examples of good outputs in your instructions.",
8475
+ "Focus on handling edge cases in your instructions.",
8476
+ "Explicitly outline the reasoning process in your instructions."
8477
+ ];
8478
+ }
8479
+ /**
8480
+ * Generates instruction candidates for each predictor in the program
8481
+ * @returns Array of generated instruction candidates
8482
+ */
8483
+ async proposeInstructionCandidates() {
8484
+ const instructions = [];
8485
+ let programContext = "";
8486
+ if (this.programAwareProposer) {
8487
+ programContext = await this.generateProgramSummary();
8488
+ }
8489
+ let dataContext = "";
8490
+ if (this.dataAwareProposer) {
8491
+ dataContext = await this.generateDataSummary();
8492
+ }
8493
+ const tips = this.tipAwareProposer ? this.generateTips() : [];
8494
+ for (let i = 0; i < this.numCandidates; i++) {
8495
+ const tipIndex = tips.length > 0 ? i % tips.length : -1;
8496
+ const tipToUse = tipIndex >= 0 ? tips[tipIndex] : "";
8497
+ const instruction = await this.generateInstruction({
8498
+ programContext,
8499
+ dataContext,
8500
+ tip: tipToUse,
8501
+ candidateIndex: i
8502
+ });
8503
+ instructions.push(instruction);
8504
+ }
8505
+ return instructions;
8506
+ }
8507
+ /**
8508
+ * Generates a summary of the program structure for instruction proposal
8509
+ */
8510
+ async generateProgramSummary() {
8511
+ const prompt = `Summarize the following program structure. Focus on the signatures,
8512
+ input/output fields, and the purpose of each component. Identify key components
8513
+ that might benefit from better instructions.`;
8514
+ const programStr = JSON.stringify(this.program);
8515
+ const response = await this.ai.chat({
8516
+ chatPrompt: [
8517
+ { role: "system", content: prompt },
8518
+ { role: "user", content: programStr }
8519
+ ],
8520
+ modelConfig: { temperature: 0.2 }
8521
+ });
8522
+ if (response instanceof ReadableStream) {
8523
+ return "";
8524
+ }
8525
+ return response.results[0]?.content || "";
8526
+ }
8527
+ /**
8528
+ * Generates a summary of the dataset for instruction proposal
8529
+ */
8530
+ async generateDataSummary() {
8531
+ const sampleSize = Math.min(this.viewDataBatchSize, this.examples.length);
8532
+ const sample = this.examples.slice(0, sampleSize);
8533
+ const prompt = `Analyze the following dataset examples and provide a summary
8534
+ of key patterns, input-output relationships, and any specific challenges
8535
+ the data presents. Focus on what makes a good answer and what patterns should
8536
+ be followed.`;
8537
+ const dataStr = JSON.stringify(sample);
8538
+ const response = await this.ai.chat({
8539
+ chatPrompt: [
8540
+ { role: "system", content: prompt },
8541
+ { role: "user", content: dataStr }
8542
+ ],
8543
+ modelConfig: { temperature: 0.2 }
8544
+ });
8545
+ if (response instanceof ReadableStream) {
8546
+ return "";
8547
+ }
8548
+ return response.results[0]?.content || "";
8549
+ }
8550
+ /**
8551
+ * Generates a specific instruction candidate
8552
+ */
8553
+ async generateInstruction({
8554
+ programContext,
8555
+ dataContext,
8556
+ tip,
8557
+ candidateIndex
8558
+ }) {
8559
+ const prompt = `Create a high-quality instruction for an AI model performing the task described below.
8560
+
8561
+ ${programContext ? `PROGRAM CONTEXT:
8562
+ ${programContext}
8563
+
8564
+ ` : ""}
8565
+ ${dataContext ? `DATA CONTEXT:
8566
+ ${dataContext}
8567
+
8568
+ ` : ""}
8569
+ ${tip ? `STYLE TIP: ${tip}
8570
+
8571
+ ` : ""}
8572
+
8573
+ Your task is to craft a clear, effective instruction that will help the AI model generate
8574
+ accurate outputs for this task. Instruction #${candidateIndex + 1}/${this.numCandidates}.
8575
+
8576
+ The instruction should be detailed enough to guide the model but not overly prescriptive
8577
+ or restrictive. Focus on what makes a good response rather than listing exact steps.
8578
+
8579
+ INSTRUCTION:`;
8580
+ const response = await this.ai.chat({
8581
+ chatPrompt: [{ role: "user", content: prompt }],
8582
+ modelConfig: { temperature: 0.7 + 0.1 * candidateIndex }
8583
+ });
8584
+ if (response instanceof ReadableStream) {
8585
+ return "";
8586
+ }
8587
+ return response.results[0]?.content || "";
8588
+ }
8589
+ /**
8590
+ * Bootstraps few-shot examples for the program
8591
+ */
8592
+ async bootstrapFewShotExamples(metricFn) {
8593
+ if (this.verbose) {
8594
+ console.log("Bootstrapping few-shot examples...");
8595
+ }
8596
+ const result = await this.bootstrapper.compile(metricFn, {
8597
+ maxDemos: this.maxBootstrappedDemos
8598
+ });
8599
+ return result.demos;
8600
+ }
8601
+ /**
8602
+ * Selects labeled examples directly from the training set
8603
+ */
8604
+ selectLabeledExamples() {
8605
+ const selectedExamples = [];
8606
+ const indices = /* @__PURE__ */ new Set();
8607
+ while (indices.size < this.maxLabeledDemos && indices.size < this.examples.length) {
8608
+ const idx = Math.floor(Math.random() * this.examples.length);
8609
+ if (!indices.has(idx)) {
8610
+ indices.add(idx);
8611
+ const example = this.examples[idx];
8612
+ if (example) {
8613
+ selectedExamples.push(example);
8614
+ }
8615
+ }
8616
+ }
8617
+ return selectedExamples;
8618
+ }
8619
+ /**
8620
+ * Runs Bayesian optimization to find the best combination of few-shot examples and instructions
8621
+ */
8622
+ async runBayesianOptimization(bootstrappedDemos, labeledExamples, instructions, valset, metricFn) {
8623
+ let bestConfig = null;
8624
+ let bestScore = Number.NEGATIVE_INFINITY;
8625
+ const evaluatedConfigs = [];
8626
+ const defaultConfig = {
8627
+ instruction: instructions[0] || "",
8628
+ bootstrappedDemos: Math.min(1, bootstrappedDemos.length),
8629
+ labeledExamples: Math.min(1, labeledExamples.length)
8630
+ };
8631
+ let trialsWithoutImprovement = 0;
8632
+ let lastBestScore = Number.NEGATIVE_INFINITY;
8633
+ const initialExplorationTrials = Math.min(
8634
+ 10,
8635
+ Math.floor(this.numTrials / 3)
8636
+ );
8637
+ const configs = [];
8638
+ for (let i = 0; i < initialExplorationTrials; i++) {
8639
+ const instructionIndex = Math.floor(Math.random() * instructions.length);
8640
+ const instructionValue = instructions[instructionIndex] || "";
8641
+ const config = {
8642
+ instruction: instructionValue,
8643
+ bootstrappedDemos: Math.floor(
8644
+ Math.random() * (bootstrappedDemos.length + 1)
8645
+ ),
8646
+ labeledExamples: Math.floor(
8647
+ Math.random() * (labeledExamples.length + 1)
8648
+ )
8649
+ };
8650
+ configs.push(config);
8651
+ }
8652
+ for (let i = 0; i < configs.length; i++) {
8653
+ const config = configs[i];
8654
+ if (!config) continue;
8655
+ const score = await this.evaluateConfig(
8656
+ config,
8657
+ bootstrappedDemos,
8658
+ labeledExamples,
8659
+ valset,
8660
+ metricFn,
8661
+ i
8662
+ );
8663
+ evaluatedConfigs.push({ config, score });
8664
+ if (score > bestScore) {
8665
+ bestScore = score;
8666
+ bestConfig = config;
8667
+ if (this.verbose) {
8668
+ console.log(
8669
+ `New best configuration found with score ${bestScore} (exploration phase)`
8670
+ );
8671
+ }
8672
+ }
8673
+ updateProgressBar(
8674
+ i + 1,
8675
+ this.numTrials,
8676
+ Math.round(bestScore * 100),
8677
+ 0,
8678
+ "Running MIPROv2 optimization",
8679
+ 30
8680
+ );
8681
+ }
8682
+ for (let i = configs.length; i < this.numTrials; i++) {
8683
+ const nextConfig = this.selectNextConfiguration(
8684
+ evaluatedConfigs,
8685
+ bootstrappedDemos.length,
8686
+ labeledExamples.length,
8687
+ instructions
8688
+ );
8689
+ const score = await this.evaluateConfig(
8690
+ nextConfig,
8691
+ bootstrappedDemos,
8692
+ labeledExamples,
8693
+ valset,
8694
+ metricFn,
8695
+ i
8696
+ );
8697
+ evaluatedConfigs.push({ config: nextConfig, score });
8698
+ if (score > bestScore) {
8699
+ bestScore = score;
8700
+ bestConfig = nextConfig;
8701
+ if (this.verbose) {
8702
+ console.log(
8703
+ `New best configuration found with score ${bestScore} (exploitation phase)`
8704
+ );
8705
+ }
8706
+ trialsWithoutImprovement = 0;
8707
+ lastBestScore = bestScore;
8708
+ } else {
8709
+ if (bestScore - lastBestScore < this.minImprovementThreshold) {
8710
+ trialsWithoutImprovement++;
8711
+ if (trialsWithoutImprovement >= this.earlyStoppingTrials) {
8712
+ if (this.verbose) {
8713
+ console.log(
8714
+ `Early stopping triggered after ${i + 1} trials. No improvement for ${trialsWithoutImprovement} trials.`
8715
+ );
8716
+ }
8717
+ break;
8718
+ }
8719
+ } else {
8720
+ lastBestScore = bestScore;
8721
+ trialsWithoutImprovement = 0;
8722
+ }
8723
+ }
8724
+ updateProgressBar(
8725
+ i + 1,
8726
+ this.numTrials,
8727
+ Math.round(bestScore * 100),
8728
+ 0,
8729
+ "Running MIPROv2 optimization",
8730
+ 30
8731
+ );
8732
+ if (this.minibatch && i > 0 && (i + 1) % this.minibatchFullEvalSteps === 0 && bestConfig) {
8733
+ if (this.verbose) {
8734
+ console.log(
8735
+ `Running full evaluation on best configuration at trial ${i + 1}`
8736
+ );
8737
+ }
8738
+ const fullScore = await this.fullEvaluation(
8739
+ bestConfig,
8740
+ bootstrappedDemos,
8741
+ labeledExamples,
8742
+ valset,
8743
+ metricFn
8744
+ );
8745
+ if (this.verbose) {
8746
+ console.log(`Full evaluation score: ${fullScore}`);
8747
+ }
8748
+ bestScore = fullScore;
8749
+ }
8750
+ }
8751
+ if (!bestConfig) {
8752
+ if (this.verbose) {
8753
+ console.warn(
8754
+ "Optimization failed to find any valid configurations, using default fallback configuration"
8755
+ );
8756
+ }
8757
+ bestConfig = defaultConfig;
8758
+ try {
8759
+ bestScore = await this.evaluateConfig(
8760
+ bestConfig,
8761
+ bootstrappedDemos,
8762
+ labeledExamples,
8763
+ valset,
8764
+ metricFn,
8765
+ this.numTrials - 1
8766
+ );
8767
+ } catch (err) {
8768
+ if (this.verbose) {
8769
+ console.error("Error evaluating default configuration:", err);
8770
+ }
8771
+ bestScore = 0;
8772
+ }
8773
+ }
8774
+ return { bestConfig, bestScore };
8775
+ }
8776
+ /**
8777
+ * Evaluates a configuration on the validation set
8778
+ */
8779
+ async evaluateConfig(config, bootstrappedDemos, labeledExamples, valset, metricFn, trialIndex) {
8780
+ this.applyConfigToProgram(
8781
+ this.program,
8782
+ config,
8783
+ bootstrappedDemos,
8784
+ labeledExamples
8785
+ );
8786
+ let evalSet = valset;
8787
+ if (this.minibatch) {
8788
+ const startIdx = trialIndex * this.minibatchSize % valset.length;
8789
+ const minibatchEvalSet = [];
8790
+ for (let j = 0; j < this.minibatchSize; j++) {
8791
+ const idx = (startIdx + j) % valset.length;
8792
+ const example = valset[idx];
8793
+ if (example) {
8794
+ minibatchEvalSet.push(example);
8795
+ }
8796
+ }
8797
+ evalSet = minibatchEvalSet;
8798
+ }
8799
+ let correctCount = 0;
8800
+ for (const example of evalSet) {
8801
+ try {
8802
+ const prediction = await this.program.forward(this.ai, example);
8803
+ const correct = metricFn({ prediction, example });
8804
+ if (correct) correctCount++;
8805
+ } catch (err) {
8806
+ if (this.verbose) {
8807
+ console.error("Error evaluating example:", err);
8808
+ }
8809
+ }
8810
+ }
8811
+ return correctCount / evalSet.length;
8812
+ }
8813
+ /**
8814
+ * Run full evaluation on the entire validation set
8815
+ */
8816
+ async fullEvaluation(config, bootstrappedDemos, labeledExamples, valset, metricFn) {
8817
+ this.applyConfigToProgram(
8818
+ this.program,
8819
+ config,
8820
+ bootstrappedDemos,
8821
+ labeledExamples
8822
+ );
8823
+ let fullCorrectCount = 0;
8824
+ for (const example of valset) {
8825
+ try {
8826
+ const prediction = await this.program.forward(this.ai, example);
8827
+ const correct = metricFn({ prediction, example });
8828
+ if (correct) fullCorrectCount++;
8829
+ } catch (err) {
8830
+ if (this.verbose) {
8831
+ console.error("Error evaluating example:", err);
8832
+ }
8833
+ }
8834
+ }
8835
+ return fullCorrectCount / valset.length;
8836
+ }
8837
+ /**
8838
+ * Implements a Bayesian-inspired selection of the next configuration to try
8839
+ * This is a simplified version using Upper Confidence Bound (UCB) strategy
8840
+ */
8841
+ selectNextConfiguration(evaluatedConfigs, maxBootstrappedDemos, maxLabeledExamples, instructions) {
8842
+ if (evaluatedConfigs.length < 5) {
8843
+ const instructionIndex = Math.floor(Math.random() * instructions.length);
8844
+ return {
8845
+ instruction: instructions[instructionIndex] || "",
8846
+ bootstrappedDemos: Math.floor(
8847
+ Math.random() * (maxBootstrappedDemos + 1)
8848
+ ),
8849
+ labeledExamples: Math.floor(Math.random() * (maxLabeledExamples + 1))
8850
+ };
8851
+ }
8852
+ const sortedConfigs = [...evaluatedConfigs].sort(
8853
+ (a, b) => b.score - a.score
8854
+ );
8855
+ const topConfigs = sortedConfigs.slice(0, Math.min(3, sortedConfigs.length));
8856
+ const meanBootstrappedDemos = topConfigs.reduce((sum, c) => sum + c.config.bootstrappedDemos, 0) / topConfigs.length;
8857
+ const meanLabeledExamples = topConfigs.reduce((sum, c) => sum + c.config.labeledExamples, 0) / topConfigs.length;
8858
+ const popularInstructions = topConfigs.map((c) => c.config.instruction);
8859
+ const explorationFactor = Math.max(
8860
+ 0.2,
8861
+ 1 - evaluatedConfigs.length / this.numTrials
8862
+ );
8863
+ let newBootstrappedDemos;
8864
+ let newLabeledExamples;
8865
+ let newInstruction;
8866
+ if (Math.random() < 0.7) {
8867
+ newBootstrappedDemos = Math.min(
8868
+ maxBootstrappedDemos,
8869
+ Math.max(
8870
+ 0,
8871
+ Math.round(
8872
+ meanBootstrappedDemos + (Math.random() * 2 - 1) * explorationFactor * 2
8873
+ )
8874
+ )
8875
+ );
8876
+ } else {
8877
+ newBootstrappedDemos = Math.floor(
8878
+ Math.random() * (maxBootstrappedDemos + 1)
8879
+ );
8880
+ }
8881
+ if (Math.random() < 0.7) {
8882
+ newLabeledExamples = Math.min(
8883
+ maxLabeledExamples,
8884
+ Math.max(
8885
+ 0,
8886
+ Math.round(
8887
+ meanLabeledExamples + (Math.random() * 2 - 1) * explorationFactor * 2
8888
+ )
8889
+ )
8890
+ );
8891
+ } else {
8892
+ newLabeledExamples = Math.floor(Math.random() * (maxLabeledExamples + 1));
8893
+ }
8894
+ if (Math.random() < 0.7 && popularInstructions.length > 0) {
8895
+ const idx = Math.floor(Math.random() * popularInstructions.length);
8896
+ newInstruction = popularInstructions[idx] || "";
8897
+ } else {
8898
+ const idx = Math.floor(Math.random() * instructions.length);
8899
+ newInstruction = instructions[idx] || "";
8900
+ }
8901
+ return {
8902
+ instruction: newInstruction,
8903
+ bootstrappedDemos: newBootstrappedDemos,
8904
+ labeledExamples: newLabeledExamples
8905
+ };
8906
+ }
8907
+ /**
8908
+ * Applies a configuration to a program instance
8909
+ */
8910
+ applyConfigToProgram(program, config, bootstrappedDemos, labeledExamples) {
8911
+ this.setInstructionToProgram(program, config.instruction);
8912
+ if (config.bootstrappedDemos > 0) {
8913
+ program.setDemos(bootstrappedDemos.slice(0, config.bootstrappedDemos));
8914
+ }
8915
+ if (config.labeledExamples > 0) {
8916
+ program.setExamples(labeledExamples.slice(0, config.labeledExamples));
8917
+ }
8918
+ }
8919
+ /**
8920
+ * Sets instruction to a program
8921
+ * Note: Workaround since setInstruction may not be available directly
8922
+ */
8923
+ setInstructionToProgram(program, instruction) {
8924
+ const programWithInstruction = program;
8925
+ programWithInstruction.setInstruction?.(instruction);
8926
+ }
8927
+ /**
8928
+ * The main compile method to run MIPROv2 optimization
8929
+ * @param metricFn Evaluation metric function
8930
+ * @param options Optional configuration options
8931
+ * @returns The optimized program
8932
+ */
8933
+ async compile(metricFn, options) {
8934
+ if (options?.auto) {
8935
+ this.configureAuto(options.auto);
8936
+ }
8937
+ const trainset = this.examples;
8938
+ const valset = options?.valset || this.examples.slice(0, Math.floor(this.examples.length * 0.8));
8939
+ if (this.verbose) {
8940
+ console.log(`Starting MIPROv2 optimization with ${this.numTrials} trials`);
8941
+ console.log(
8942
+ `Using ${trainset.length} examples for training and ${valset.length} for validation`
8943
+ );
8944
+ }
8945
+ if (options?.teacher) {
8946
+ if (this.verbose) {
8947
+ console.log("Using provided teacher to assist with bootstrapping");
8948
+ }
8949
+ const bootstrapperWithTeacher = new AxBootstrapFewShot({
8950
+ ai: this.ai,
8951
+ program: this.program,
8952
+ examples: this.examples,
8953
+ options: {
8954
+ maxDemos: this.maxBootstrappedDemos,
8955
+ maxRounds: 3,
8956
+ verboseMode: this.verbose,
8957
+ teacherAI: this.ai
8958
+ // Use the same AI but with the teacher program
8959
+ }
8960
+ });
8961
+ this.bootstrapper = bootstrapperWithTeacher;
8962
+ }
8963
+ let bootstrappedDemos = [];
8964
+ if (this.maxBootstrappedDemos > 0) {
8965
+ bootstrappedDemos = await this.bootstrapFewShotExamples(metricFn);
8966
+ if (this.verbose) {
8967
+ console.log(
8968
+ `Generated ${bootstrappedDemos.length} bootstrapped demonstrations`
8969
+ );
8970
+ }
8971
+ }
8972
+ let labeledExamples = [];
8973
+ if (this.maxLabeledDemos > 0) {
8974
+ labeledExamples = this.selectLabeledExamples();
8975
+ if (this.verbose) {
8976
+ console.log(
8977
+ `Selected ${labeledExamples.length} labeled examples from training set`
8978
+ );
8979
+ }
8980
+ }
8981
+ const instructions = await this.proposeInstructionCandidates();
8982
+ if (this.verbose) {
8983
+ console.log(`Generated ${instructions.length} instruction candidates`);
8984
+ }
8985
+ const { bestConfig, bestScore } = await this.runBayesianOptimization(
8986
+ bootstrappedDemos,
8987
+ labeledExamples,
8988
+ instructions,
8989
+ valset,
8990
+ metricFn
8991
+ );
8992
+ if (this.verbose) {
8993
+ console.log(`Optimization complete. Best score: ${bestScore}`);
8994
+ console.log(`Best configuration: ${JSON.stringify(bestConfig)}`);
8995
+ }
8996
+ this.applyConfigToProgram(
8997
+ this.program,
8998
+ bestConfig,
8999
+ bootstrappedDemos,
9000
+ labeledExamples
9001
+ );
9002
+ return this.program;
9003
+ }
9004
+ };
9005
+
8140
9006
  // ai/mock/api.ts
8141
9007
  var AxMockAIService = class {
8142
9008
  constructor(config = {}) {
@@ -8445,7 +9311,7 @@ var batchArray = (arr, size) => {
8445
9311
  }
8446
9312
  return chunkedArr;
8447
9313
  };
8448
- var axStringUtil = {
9314
+ var AxStringUtil = {
8449
9315
  trimNonAlphaNum,
8450
9316
  splitIntoTwo,
8451
9317
  dedup,
@@ -8464,7 +9330,7 @@ var AxDefaultResultReranker = class extends AxGen {
8464
9330
  forward = async (ai, input, options) => {
8465
9331
  const { rankedItems } = await super.forward(ai, input, options);
8466
9332
  const sortedIndexes = rankedItems.map((item) => {
8467
- const { id: index } = axStringUtil.extractIdAndText(item);
9333
+ const { id: index } = AxStringUtil.extractIdAndText(item);
8468
9334
  return index;
8469
9335
  });
8470
9336
  const sortedItems = input.items.map((_, index) => {
@@ -8519,6 +9385,1230 @@ var AxEmbeddingAdapter = class {
8519
9385
  }
8520
9386
  };
8521
9387
 
9388
+ // dsp/stopwords.ts
9389
+ var stopwords = /* @__PURE__ */ new Set([
9390
+ "0o",
9391
+ "0s",
9392
+ "3a",
9393
+ "3b",
9394
+ "3d",
9395
+ "6b",
9396
+ "6o",
9397
+ "a",
9398
+ "a1",
9399
+ "a2",
9400
+ "a3",
9401
+ "a4",
9402
+ "ab",
9403
+ "able",
9404
+ "about",
9405
+ "above",
9406
+ "abst",
9407
+ "ac",
9408
+ "accordance",
9409
+ "according",
9410
+ "accordingly",
9411
+ "across",
9412
+ "act",
9413
+ "actually",
9414
+ "ad",
9415
+ "added",
9416
+ "adj",
9417
+ "ae",
9418
+ "af",
9419
+ "affected",
9420
+ "affecting",
9421
+ "affects",
9422
+ "after",
9423
+ "afterwards",
9424
+ "ag",
9425
+ "again",
9426
+ "against",
9427
+ "ah",
9428
+ "ain",
9429
+ "ain't",
9430
+ "aj",
9431
+ "al",
9432
+ "all",
9433
+ "allow",
9434
+ "allows",
9435
+ "almost",
9436
+ "alone",
9437
+ "along",
9438
+ "already",
9439
+ "also",
9440
+ "although",
9441
+ "always",
9442
+ "am",
9443
+ "among",
9444
+ "amongst",
9445
+ "amoungst",
9446
+ "amount",
9447
+ "an",
9448
+ "and",
9449
+ "announce",
9450
+ "another",
9451
+ "any",
9452
+ "anybody",
9453
+ "anyhow",
9454
+ "anymore",
9455
+ "anyone",
9456
+ "anything",
9457
+ "anyway",
9458
+ "anyways",
9459
+ "anywhere",
9460
+ "ao",
9461
+ "ap",
9462
+ "apart",
9463
+ "apparently",
9464
+ "appear",
9465
+ "appreciate",
9466
+ "appropriate",
9467
+ "approximately",
9468
+ "ar",
9469
+ "are",
9470
+ "aren",
9471
+ "arent",
9472
+ "aren't",
9473
+ "arise",
9474
+ "around",
9475
+ "as",
9476
+ "a's",
9477
+ "aside",
9478
+ "ask",
9479
+ "asking",
9480
+ "associated",
9481
+ "at",
9482
+ "au",
9483
+ "auth",
9484
+ "av",
9485
+ "available",
9486
+ "aw",
9487
+ "away",
9488
+ "awfully",
9489
+ "ax",
9490
+ "ay",
9491
+ "az",
9492
+ "b",
9493
+ "b1",
9494
+ "b2",
9495
+ "b3",
9496
+ "ba",
9497
+ "back",
9498
+ "bc",
9499
+ "bd",
9500
+ "be",
9501
+ "became",
9502
+ "because",
9503
+ "become",
9504
+ "becomes",
9505
+ "becoming",
9506
+ "been",
9507
+ "before",
9508
+ "beforehand",
9509
+ "begin",
9510
+ "beginning",
9511
+ "beginnings",
9512
+ "begins",
9513
+ "behind",
9514
+ "being",
9515
+ "believe",
9516
+ "below",
9517
+ "beside",
9518
+ "besides",
9519
+ "best",
9520
+ "better",
9521
+ "between",
9522
+ "beyond",
9523
+ "bi",
9524
+ "bill",
9525
+ "biol",
9526
+ "bj",
9527
+ "bk",
9528
+ "bl",
9529
+ "bn",
9530
+ "both",
9531
+ "bottom",
9532
+ "bp",
9533
+ "br",
9534
+ "brief",
9535
+ "briefly",
9536
+ "bs",
9537
+ "bt",
9538
+ "bu",
9539
+ "but",
9540
+ "bx",
9541
+ "by",
9542
+ "c",
9543
+ "c1",
9544
+ "c2",
9545
+ "c3",
9546
+ "ca",
9547
+ "call",
9548
+ "came",
9549
+ "can",
9550
+ "cannot",
9551
+ "cant",
9552
+ "can't",
9553
+ "cause",
9554
+ "causes",
9555
+ "cc",
9556
+ "cd",
9557
+ "ce",
9558
+ "certain",
9559
+ "certainly",
9560
+ "cf",
9561
+ "cg",
9562
+ "ch",
9563
+ "changes",
9564
+ "ci",
9565
+ "cit",
9566
+ "cj",
9567
+ "cl",
9568
+ "clearly",
9569
+ "cm",
9570
+ "c'mon",
9571
+ "cn",
9572
+ "co",
9573
+ "com",
9574
+ "come",
9575
+ "comes",
9576
+ "con",
9577
+ "concerning",
9578
+ "consequently",
9579
+ "consider",
9580
+ "considering",
9581
+ "contain",
9582
+ "containing",
9583
+ "contains",
9584
+ "corresponding",
9585
+ "could",
9586
+ "couldn",
9587
+ "couldnt",
9588
+ "couldn't",
9589
+ "course",
9590
+ "cp",
9591
+ "cq",
9592
+ "cr",
9593
+ "cry",
9594
+ "cs",
9595
+ "c's",
9596
+ "ct",
9597
+ "cu",
9598
+ "currently",
9599
+ "cv",
9600
+ "cx",
9601
+ "cy",
9602
+ "cz",
9603
+ "d",
9604
+ "d2",
9605
+ "da",
9606
+ "date",
9607
+ "dc",
9608
+ "dd",
9609
+ "de",
9610
+ "definitely",
9611
+ "describe",
9612
+ "described",
9613
+ "despite",
9614
+ "detail",
9615
+ "df",
9616
+ "di",
9617
+ "did",
9618
+ "didn",
9619
+ "didn't",
9620
+ "different",
9621
+ "dj",
9622
+ "dk",
9623
+ "dl",
9624
+ "do",
9625
+ "does",
9626
+ "doesn",
9627
+ "doesn't",
9628
+ "doing",
9629
+ "don",
9630
+ "done",
9631
+ "don't",
9632
+ "down",
9633
+ "downwards",
9634
+ "dp",
9635
+ "dr",
9636
+ "ds",
9637
+ "dt",
9638
+ "du",
9639
+ "due",
9640
+ "during",
9641
+ "dx",
9642
+ "dy",
9643
+ "e",
9644
+ "e2",
9645
+ "e3",
9646
+ "ea",
9647
+ "each",
9648
+ "ec",
9649
+ "ed",
9650
+ "edu",
9651
+ "ee",
9652
+ "ef",
9653
+ "effect",
9654
+ "eg",
9655
+ "ei",
9656
+ "eight",
9657
+ "eighty",
9658
+ "either",
9659
+ "ej",
9660
+ "el",
9661
+ "eleven",
9662
+ "else",
9663
+ "elsewhere",
9664
+ "em",
9665
+ "empty",
9666
+ "en",
9667
+ "end",
9668
+ "ending",
9669
+ "enough",
9670
+ "entirely",
9671
+ "eo",
9672
+ "ep",
9673
+ "eq",
9674
+ "er",
9675
+ "es",
9676
+ "especially",
9677
+ "est",
9678
+ "et",
9679
+ "et-al",
9680
+ "etc",
9681
+ "eu",
9682
+ "ev",
9683
+ "even",
9684
+ "ever",
9685
+ "every",
9686
+ "everybody",
9687
+ "everyone",
9688
+ "everything",
9689
+ "everywhere",
9690
+ "ex",
9691
+ "exactly",
9692
+ "example",
9693
+ "except",
9694
+ "ey",
9695
+ "f",
9696
+ "f2",
9697
+ "fa",
9698
+ "far",
9699
+ "fc",
9700
+ "few",
9701
+ "ff",
9702
+ "fi",
9703
+ "fifteen",
9704
+ "fifth",
9705
+ "fify",
9706
+ "fill",
9707
+ "find",
9708
+ "fire",
9709
+ "first",
9710
+ "five",
9711
+ "fix",
9712
+ "fj",
9713
+ "fl",
9714
+ "fn",
9715
+ "fo",
9716
+ "followed",
9717
+ "following",
9718
+ "follows",
9719
+ "for",
9720
+ "former",
9721
+ "formerly",
9722
+ "forth",
9723
+ "forty",
9724
+ "found",
9725
+ "four",
9726
+ "fr",
9727
+ "from",
9728
+ "front",
9729
+ "node:fs",
9730
+ "ft",
9731
+ "fu",
9732
+ "full",
9733
+ "further",
9734
+ "furthermore",
9735
+ "fy",
9736
+ "g",
9737
+ "ga",
9738
+ "gave",
9739
+ "ge",
9740
+ "get",
9741
+ "gets",
9742
+ "getting",
9743
+ "gi",
9744
+ "give",
9745
+ "given",
9746
+ "gives",
9747
+ "giving",
9748
+ "gj",
9749
+ "gl",
9750
+ "go",
9751
+ "goes",
9752
+ "going",
9753
+ "gone",
9754
+ "got",
9755
+ "gotten",
9756
+ "gr",
9757
+ "greetings",
9758
+ "gs",
9759
+ "gy",
9760
+ "h",
9761
+ "h2",
9762
+ "h3",
9763
+ "had",
9764
+ "hadn",
9765
+ "hadn't",
9766
+ "happens",
9767
+ "hardly",
9768
+ "has",
9769
+ "hasn",
9770
+ "hasnt",
9771
+ "hasn't",
9772
+ "have",
9773
+ "haven",
9774
+ "haven't",
9775
+ "having",
9776
+ "he",
9777
+ "hed",
9778
+ "he'd",
9779
+ "he'll",
9780
+ "hello",
9781
+ "help",
9782
+ "hence",
9783
+ "her",
9784
+ "here",
9785
+ "hereafter",
9786
+ "hereby",
9787
+ "herein",
9788
+ "heres",
9789
+ "here's",
9790
+ "hereupon",
9791
+ "hers",
9792
+ "herself",
9793
+ "hes",
9794
+ "he's",
9795
+ "hh",
9796
+ "hi",
9797
+ "hid",
9798
+ "him",
9799
+ "himself",
9800
+ "his",
9801
+ "hither",
9802
+ "hj",
9803
+ "ho",
9804
+ "home",
9805
+ "hopefully",
9806
+ "how",
9807
+ "howbeit",
9808
+ "however",
9809
+ "how's",
9810
+ "hr",
9811
+ "hs",
9812
+ "http",
9813
+ "hu",
9814
+ "hundred",
9815
+ "hy",
9816
+ "i",
9817
+ "i2",
9818
+ "i3",
9819
+ "i4",
9820
+ "i6",
9821
+ "i7",
9822
+ "i8",
9823
+ "ia",
9824
+ "ib",
9825
+ "ibid",
9826
+ "ic",
9827
+ "id",
9828
+ "i'd",
9829
+ "ie",
9830
+ "if",
9831
+ "ig",
9832
+ "ignored",
9833
+ "ih",
9834
+ "ii",
9835
+ "ij",
9836
+ "il",
9837
+ "i'll",
9838
+ "im",
9839
+ "i'm",
9840
+ "immediate",
9841
+ "immediately",
9842
+ "importance",
9843
+ "important",
9844
+ "in",
9845
+ "inasmuch",
9846
+ "inc",
9847
+ "indeed",
9848
+ "index",
9849
+ "indicate",
9850
+ "indicated",
9851
+ "indicates",
9852
+ "information",
9853
+ "inner",
9854
+ "insofar",
9855
+ "instead",
9856
+ "interest",
9857
+ "into",
9858
+ "invention",
9859
+ "inward",
9860
+ "io",
9861
+ "ip",
9862
+ "iq",
9863
+ "ir",
9864
+ "is",
9865
+ "isn",
9866
+ "isn't",
9867
+ "it",
9868
+ "itd",
9869
+ "it'd",
9870
+ "it'll",
9871
+ "its",
9872
+ "it's",
9873
+ "itself",
9874
+ "iv",
9875
+ "i've",
9876
+ "ix",
9877
+ "iy",
9878
+ "iz",
9879
+ "j",
9880
+ "jj",
9881
+ "jr",
9882
+ "js",
9883
+ "jt",
9884
+ "ju",
9885
+ "just",
9886
+ "k",
9887
+ "ke",
9888
+ "keep",
9889
+ "keeps",
9890
+ "kept",
9891
+ "kg",
9892
+ "kj",
9893
+ "km",
9894
+ "know",
9895
+ "known",
9896
+ "knows",
9897
+ "ko",
9898
+ "l",
9899
+ "l2",
9900
+ "la",
9901
+ "largely",
9902
+ "last",
9903
+ "lately",
9904
+ "later",
9905
+ "latter",
9906
+ "latterly",
9907
+ "lb",
9908
+ "lc",
9909
+ "le",
9910
+ "least",
9911
+ "les",
9912
+ "less",
9913
+ "lest",
9914
+ "let",
9915
+ "lets",
9916
+ "let's",
9917
+ "lf",
9918
+ "like",
9919
+ "liked",
9920
+ "likely",
9921
+ "line",
9922
+ "little",
9923
+ "lj",
9924
+ "ll",
9925
+ "ll",
9926
+ "ln",
9927
+ "lo",
9928
+ "look",
9929
+ "looking",
9930
+ "looks",
9931
+ "los",
9932
+ "lr",
9933
+ "ls",
9934
+ "lt",
9935
+ "ltd",
9936
+ "m",
9937
+ "m2",
9938
+ "ma",
9939
+ "made",
9940
+ "mainly",
9941
+ "make",
9942
+ "makes",
9943
+ "many",
9944
+ "may",
9945
+ "maybe",
9946
+ "me",
9947
+ "mean",
9948
+ "means",
9949
+ "meantime",
9950
+ "meanwhile",
9951
+ "merely",
9952
+ "mg",
9953
+ "might",
9954
+ "mightn",
9955
+ "mightn't",
9956
+ "mill",
9957
+ "million",
9958
+ "mine",
9959
+ "miss",
9960
+ "ml",
9961
+ "mn",
9962
+ "mo",
9963
+ "more",
9964
+ "moreover",
9965
+ "most",
9966
+ "mostly",
9967
+ "move",
9968
+ "mr",
9969
+ "mrs",
9970
+ "ms",
9971
+ "mt",
9972
+ "mu",
9973
+ "much",
9974
+ "mug",
9975
+ "must",
9976
+ "mustn",
9977
+ "mustn't",
9978
+ "my",
9979
+ "myself",
9980
+ "model",
9981
+ "n",
9982
+ "n2",
9983
+ "na",
9984
+ "name",
9985
+ "namely",
9986
+ "nay",
9987
+ "nc",
9988
+ "nd",
9989
+ "ne",
9990
+ "near",
9991
+ "nearly",
9992
+ "necessarily",
9993
+ "necessary",
9994
+ "need",
9995
+ "needn",
9996
+ "needn't",
9997
+ "needs",
9998
+ "neither",
9999
+ "never",
10000
+ "nevertheless",
10001
+ "new",
10002
+ "next",
10003
+ "ng",
10004
+ "ni",
10005
+ "nine",
10006
+ "ninety",
10007
+ "nj",
10008
+ "nl",
10009
+ "nn",
10010
+ "no",
10011
+ "nobody",
10012
+ "non",
10013
+ "none",
10014
+ "nonetheless",
10015
+ "noone",
10016
+ "nor",
10017
+ "normally",
10018
+ "nos",
10019
+ "not",
10020
+ "noted",
10021
+ "nothing",
10022
+ "novel",
10023
+ "now",
10024
+ "nowhere",
10025
+ "nr",
10026
+ "ns",
10027
+ "nt",
10028
+ "ny",
10029
+ "o",
10030
+ "oa",
10031
+ "ob",
10032
+ "obtain",
10033
+ "obtained",
10034
+ "obviously",
10035
+ "oc",
10036
+ "od",
10037
+ "of",
10038
+ "off",
10039
+ "often",
10040
+ "og",
10041
+ "oh",
10042
+ "oi",
10043
+ "oj",
10044
+ "ok",
10045
+ "okay",
10046
+ "ol",
10047
+ "old",
10048
+ "om",
10049
+ "omitted",
10050
+ "on",
10051
+ "once",
10052
+ "one",
10053
+ "ones",
10054
+ "only",
10055
+ "onto",
10056
+ "oo",
10057
+ "op",
10058
+ "oq",
10059
+ "or",
10060
+ "ord",
10061
+ "os",
10062
+ "ot",
10063
+ "other",
10064
+ "others",
10065
+ "otherwise",
10066
+ "ou",
10067
+ "ought",
10068
+ "our",
10069
+ "ours",
10070
+ "ourselves",
10071
+ "out",
10072
+ "outside",
10073
+ "over",
10074
+ "overall",
10075
+ "ow",
10076
+ "owing",
10077
+ "own",
10078
+ "ox",
10079
+ "oz",
10080
+ "p",
10081
+ "p1",
10082
+ "p2",
10083
+ "p3",
10084
+ "page",
10085
+ "pagecount",
10086
+ "pages",
10087
+ "par",
10088
+ "part",
10089
+ "particular",
10090
+ "particularly",
10091
+ "pas",
10092
+ "past",
10093
+ "pc",
10094
+ "pd",
10095
+ "pe",
10096
+ "per",
10097
+ "perhaps",
10098
+ "pf",
10099
+ "ph",
10100
+ "pi",
10101
+ "pj",
10102
+ "pk",
10103
+ "pl",
10104
+ "placed",
10105
+ "please",
10106
+ "plus",
10107
+ "pm",
10108
+ "pn",
10109
+ "po",
10110
+ "poorly",
10111
+ "possible",
10112
+ "possibly",
10113
+ "potentially",
10114
+ "pp",
10115
+ "pq",
10116
+ "pr",
10117
+ "predominantly",
10118
+ "present",
10119
+ "presumably",
10120
+ "previously",
10121
+ "primarily",
10122
+ "probably",
10123
+ "promptly",
10124
+ "proud",
10125
+ "provides",
10126
+ "ps",
10127
+ "pt",
10128
+ "pu",
10129
+ "put",
10130
+ "py",
10131
+ "q",
10132
+ "qj",
10133
+ "qu",
10134
+ "que",
10135
+ "quickly",
10136
+ "quite",
10137
+ "qv",
10138
+ "r",
10139
+ "r2",
10140
+ "ra",
10141
+ "ran",
10142
+ "rather",
10143
+ "rc",
10144
+ "rd",
10145
+ "re",
10146
+ "readily",
10147
+ "really",
10148
+ "reasonably",
10149
+ "recent",
10150
+ "recently",
10151
+ "ref",
10152
+ "refs",
10153
+ "regarding",
10154
+ "regardless",
10155
+ "regards",
10156
+ "related",
10157
+ "relatively",
10158
+ "research",
10159
+ "research-articl",
10160
+ "respectively",
10161
+ "resulted",
10162
+ "resulting",
10163
+ "results",
10164
+ "rf",
10165
+ "rh",
10166
+ "ri",
10167
+ "right",
10168
+ "rj",
10169
+ "rl",
10170
+ "rm",
10171
+ "rn",
10172
+ "ro",
10173
+ "rq",
10174
+ "rr",
10175
+ "rs",
10176
+ "rt",
10177
+ "ru",
10178
+ "run",
10179
+ "rv",
10180
+ "ry",
10181
+ "s",
10182
+ "s2",
10183
+ "sa",
10184
+ "said",
10185
+ "same",
10186
+ "saw",
10187
+ "say",
10188
+ "saying",
10189
+ "says",
10190
+ "sc",
10191
+ "sd",
10192
+ "se",
10193
+ "sec",
10194
+ "second",
10195
+ "secondly",
10196
+ "section",
10197
+ "see",
10198
+ "seeing",
10199
+ "seem",
10200
+ "seemed",
10201
+ "seeming",
10202
+ "seems",
10203
+ "seen",
10204
+ "self",
10205
+ "selves",
10206
+ "sensible",
10207
+ "sent",
10208
+ "serious",
10209
+ "seriously",
10210
+ "seven",
10211
+ "several",
10212
+ "sf",
10213
+ "shall",
10214
+ "shan",
10215
+ "shan't",
10216
+ "she",
10217
+ "shed",
10218
+ "she'd",
10219
+ "she'll",
10220
+ "shes",
10221
+ "she's",
10222
+ "should",
10223
+ "shouldn",
10224
+ "shouldn't",
10225
+ "should've",
10226
+ "show",
10227
+ "showed",
10228
+ "shown",
10229
+ "showns",
10230
+ "shows",
10231
+ "si",
10232
+ "side",
10233
+ "significant",
10234
+ "significantly",
10235
+ "similar",
10236
+ "similarly",
10237
+ "since",
10238
+ "sincere",
10239
+ "six",
10240
+ "sixty",
10241
+ "sj",
10242
+ "sl",
10243
+ "slightly",
10244
+ "sm",
10245
+ "sn",
10246
+ "so",
10247
+ "some",
10248
+ "somebody",
10249
+ "somehow",
10250
+ "someone",
10251
+ "somethan",
10252
+ "something",
10253
+ "sometime",
10254
+ "sometimes",
10255
+ "somewhat",
10256
+ "somewhere",
10257
+ "soon",
10258
+ "sorry",
10259
+ "sp",
10260
+ "specifically",
10261
+ "specified",
10262
+ "specify",
10263
+ "specifying",
10264
+ "sq",
10265
+ "sr",
10266
+ "ss",
10267
+ "st",
10268
+ "still",
10269
+ "stop",
10270
+ "strongly",
10271
+ "sub",
10272
+ "substantially",
10273
+ "successfully",
10274
+ "such",
10275
+ "sufficiently",
10276
+ "suggest",
10277
+ "sup",
10278
+ "sure",
10279
+ "sy",
10280
+ "system",
10281
+ "sz",
10282
+ "t",
10283
+ "t1",
10284
+ "t2",
10285
+ "t3",
10286
+ "take",
10287
+ "taken",
10288
+ "taking",
10289
+ "tb",
10290
+ "tc",
10291
+ "td",
10292
+ "te",
10293
+ "tell",
10294
+ "ten",
10295
+ "tends",
10296
+ "tf",
10297
+ "th",
10298
+ "than",
10299
+ "thank",
10300
+ "thanks",
10301
+ "thanx",
10302
+ "that",
10303
+ "that'll",
10304
+ "thats",
10305
+ "that's",
10306
+ "that've",
10307
+ "the",
10308
+ "their",
10309
+ "theirs",
10310
+ "them",
10311
+ "themselves",
10312
+ "then",
10313
+ "thence",
10314
+ "there",
10315
+ "thereafter",
10316
+ "thereby",
10317
+ "thered",
10318
+ "therefore",
10319
+ "therein",
10320
+ "there'll",
10321
+ "thereof",
10322
+ "therere",
10323
+ "theres",
10324
+ "there's",
10325
+ "thereto",
10326
+ "thereupon",
10327
+ "there've",
10328
+ "these",
10329
+ "they",
10330
+ "theyd",
10331
+ "they'd",
10332
+ "they'll",
10333
+ "theyre",
10334
+ "they're",
10335
+ "they've",
10336
+ "thickv",
10337
+ "thin",
10338
+ "think",
10339
+ "third",
10340
+ "this",
10341
+ "thorough",
10342
+ "thoroughly",
10343
+ "those",
10344
+ "thou",
10345
+ "though",
10346
+ "thoughh",
10347
+ "thousand",
10348
+ "three",
10349
+ "throug",
10350
+ "through",
10351
+ "throughout",
10352
+ "thru",
10353
+ "thus",
10354
+ "ti",
10355
+ "til",
10356
+ "tip",
10357
+ "tj",
10358
+ "tl",
10359
+ "tm",
10360
+ "tn",
10361
+ "to",
10362
+ "together",
10363
+ "too",
10364
+ "took",
10365
+ "top",
10366
+ "toward",
10367
+ "towards",
10368
+ "tp",
10369
+ "tq",
10370
+ "tr",
10371
+ "tried",
10372
+ "tries",
10373
+ "truly",
10374
+ "try",
10375
+ "trying",
10376
+ "ts",
10377
+ "t's",
10378
+ "tt",
10379
+ "tv",
10380
+ "twelve",
10381
+ "twenty",
10382
+ "twice",
10383
+ "two",
10384
+ "tx",
10385
+ "u",
10386
+ "u201d",
10387
+ "ue",
10388
+ "ui",
10389
+ "uj",
10390
+ "uk",
10391
+ "um",
10392
+ "un",
10393
+ "under",
10394
+ "unfortunately",
10395
+ "unless",
10396
+ "unlike",
10397
+ "unlikely",
10398
+ "until",
10399
+ "unto",
10400
+ "uo",
10401
+ "up",
10402
+ "upon",
10403
+ "ups",
10404
+ "ur",
10405
+ "us",
10406
+ "use",
10407
+ "used",
10408
+ "useful",
10409
+ "usefully",
10410
+ "usefulness",
10411
+ "uses",
10412
+ "using",
10413
+ "usually",
10414
+ "ut",
10415
+ "v",
10416
+ "va",
10417
+ "value",
10418
+ "various",
10419
+ "vd",
10420
+ "ve",
10421
+ "ve",
10422
+ "very",
10423
+ "via",
10424
+ "viz",
10425
+ "vj",
10426
+ "vo",
10427
+ "vol",
10428
+ "vols",
10429
+ "volumtype",
10430
+ "vq",
10431
+ "vs",
10432
+ "vt",
10433
+ "vu",
10434
+ "w",
10435
+ "wa",
10436
+ "want",
10437
+ "wants",
10438
+ "was",
10439
+ "wasn",
10440
+ "wasnt",
10441
+ "wasn't",
10442
+ "way",
10443
+ "we",
10444
+ "wed",
10445
+ "we'd",
10446
+ "welcome",
10447
+ "well",
10448
+ "we'll",
10449
+ "well-b",
10450
+ "went",
10451
+ "were",
10452
+ "we're",
10453
+ "weren",
10454
+ "werent",
10455
+ "weren't",
10456
+ "we've",
10457
+ "what",
10458
+ "whatever",
10459
+ "what'll",
10460
+ "whats",
10461
+ "what's",
10462
+ "when",
10463
+ "whence",
10464
+ "whenever",
10465
+ "when's",
10466
+ "where",
10467
+ "whereafter",
10468
+ "whereas",
10469
+ "whereby",
10470
+ "wherein",
10471
+ "wheres",
10472
+ "where's",
10473
+ "whereupon",
10474
+ "wherever",
10475
+ "whether",
10476
+ "which",
10477
+ "while",
10478
+ "whim",
10479
+ "whither",
10480
+ "who",
10481
+ "whod",
10482
+ "whoever",
10483
+ "whole",
10484
+ "who'll",
10485
+ "whom",
10486
+ "whomever",
10487
+ "whos",
10488
+ "who's",
10489
+ "whose",
10490
+ "why",
10491
+ "why's",
10492
+ "wi",
10493
+ "widely",
10494
+ "will",
10495
+ "willing",
10496
+ "wish",
10497
+ "with",
10498
+ "within",
10499
+ "without",
10500
+ "wo",
10501
+ "won",
10502
+ "wonder",
10503
+ "wont",
10504
+ "won't",
10505
+ "words",
10506
+ "world",
10507
+ "would",
10508
+ "wouldn",
10509
+ "wouldnt",
10510
+ "wouldn't",
10511
+ "www",
10512
+ "x",
10513
+ "x1",
10514
+ "x2",
10515
+ "x3",
10516
+ "xf",
10517
+ "xi",
10518
+ "xj",
10519
+ "xk",
10520
+ "xl",
10521
+ "xn",
10522
+ "xo",
10523
+ "xs",
10524
+ "xt",
10525
+ "xv",
10526
+ "xx",
10527
+ "y",
10528
+ "y2",
10529
+ "yes",
10530
+ "yet",
10531
+ "yj",
10532
+ "yl",
10533
+ "you",
10534
+ "youd",
10535
+ "you'd",
10536
+ "you'll",
10537
+ "your",
10538
+ "youre",
10539
+ "you're",
10540
+ "yours",
10541
+ "yourself",
10542
+ "yourselves",
10543
+ "you've",
10544
+ "yr",
10545
+ "ys",
10546
+ "yt",
10547
+ "z",
10548
+ "zero",
10549
+ "zi",
10550
+ "zz",
10551
+ "task"
10552
+ ]);
10553
+
10554
+ // dsp/eval.ts
10555
+ function filterTokens(tokens, exclusions) {
10556
+ return tokens.filter((token) => !exclusions.has(token));
10557
+ }
10558
+ function countTokens(tokens) {
10559
+ const counter = {};
10560
+ for (const token of tokens) {
10561
+ counter[token] = (counter[token] || 0) + 1;
10562
+ }
10563
+ return counter;
10564
+ }
10565
+ function normalizeText(s) {
10566
+ s = s.normalize("NFD");
10567
+ s = s.replace(/\b(a|an|the)\b/g, " ");
10568
+ s = s.split(/\s+/).join(" ");
10569
+ s = s.replace(/[!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~]/g, "");
10570
+ return s.toLowerCase();
10571
+ }
10572
+ function emScore(prediction, groundTruth) {
10573
+ return normalizeText(prediction) === normalizeText(groundTruth);
10574
+ }
10575
+ function f1Score(prediction, groundTruth) {
10576
+ const predictionTokens = normalizeText(prediction).split(" ");
10577
+ const groundTruthTokens = normalizeText(groundTruth).split(" ");
10578
+ const predictionCounts = countTokens(predictionTokens);
10579
+ const groundTruthCounts = countTokens(groundTruthTokens);
10580
+ let numSame = 0;
10581
+ for (const token in predictionCounts) {
10582
+ const v1 = predictionCounts[token] ?? 0;
10583
+ const v2 = groundTruthCounts[token] ?? 0;
10584
+ numSame += Math.min(v1, v2);
10585
+ }
10586
+ if (numSame === 0) {
10587
+ return 0;
10588
+ }
10589
+ const precision = numSame / predictionTokens.length;
10590
+ const recall = numSame / groundTruthTokens.length;
10591
+ return 2 * precision * recall / (precision + recall);
10592
+ }
10593
+ function novelF1ScoreOptimized(history, prediction, groundTruth, returnRecall = false) {
10594
+ const historyTokens = normalizeText(history).split(" ");
10595
+ let predictionTokens = normalizeText(prediction).split(" ");
10596
+ let groundTruthTokens = normalizeText(groundTruth).split(" ");
10597
+ const exclusions = /* @__PURE__ */ new Set([...stopwords, ...historyTokens]);
10598
+ predictionTokens = filterTokens(predictionTokens, exclusions);
10599
+ groundTruthTokens = filterTokens(groundTruthTokens, exclusions);
10600
+ const numSame = 0;
10601
+ const precision = numSame / predictionTokens.length;
10602
+ const recall = numSame / groundTruthTokens.length;
10603
+ const f1 = 2 * precision * recall / (precision + recall);
10604
+ return returnRecall ? recall : f1;
10605
+ }
10606
+ var AxEvalUtil = {
10607
+ emScore,
10608
+ f1Score,
10609
+ novelF1ScoreOptimized
10610
+ };
10611
+
8522
10612
  // ../../node_modules/uuid/dist/esm-node/rng.js
8523
10613
  import crypto2 from "crypto";
8524
10614
  var rnds8Pool = new Uint8Array(256);
@@ -9015,7 +11105,7 @@ var AxRAG = class extends AxChainOfThought {
9015
11105
  options
9016
11106
  );
9017
11107
  const val = await this.queryFn(query);
9018
- context = axStringUtil.dedup([...context, val]);
11108
+ context = AxStringUtil.dedup([...context, val]);
9019
11109
  }
9020
11110
  return super.forward(ai, { context, question }, options);
9021
11111
  }
@@ -9076,6 +11166,7 @@ export {
9076
11166
  AxDefaultResultReranker,
9077
11167
  AxDockerSession,
9078
11168
  AxEmbeddingAdapter,
11169
+ AxEvalUtil,
9079
11170
  AxFunctionError,
9080
11171
  AxFunctionProcessor,
9081
11172
  AxGen,
@@ -9088,6 +11179,7 @@ export {
9088
11179
  AxMCPHTTPTransport,
9089
11180
  AxMCPStdioTransport,
9090
11181
  AxMemory,
11182
+ AxMiPRO,
9091
11183
  AxMockAIService,
9092
11184
  AxMultiServiceRouter,
9093
11185
  AxProgram,
@@ -9099,6 +11191,47 @@ export {
9099
11191
  AxSimpleClassifier,
9100
11192
  AxSimpleClassifierClass,
9101
11193
  AxSpanKindValues,
9102
- AxTestPrompt
11194
+ AxStringUtil,
11195
+ AxTestPrompt,
11196
+ axAIAnthropicDefaultConfig,
11197
+ axAIAzureOpenAIBestConfig,
11198
+ axAIAzureOpenAICreativeConfig,
11199
+ axAIAzureOpenAIDefaultConfig,
11200
+ axAIAzureOpenAIFastConfig,
11201
+ axAICohereCreativeConfig,
11202
+ axAICohereDefaultConfig,
11203
+ axAIDeepSeekCodeConfig,
11204
+ axAIDeepSeekDefaultConfig,
11205
+ axAIGoogleGeminiDefaultConfig,
11206
+ axAIGoogleGeminiDefaultCreativeConfig,
11207
+ axAIHuggingFaceCreativeConfig,
11208
+ axAIHuggingFaceDefaultConfig,
11209
+ axAIMistralBestConfig,
11210
+ axAIMistralDefaultConfig,
11211
+ axAIOllamaDefaultConfig,
11212
+ axAIOllamaDefaultCreativeConfig,
11213
+ axAIOpenAIBestConfig,
11214
+ axAIOpenAICreativeConfig,
11215
+ axAIOpenAIDefaultConfig,
11216
+ axAIOpenAIFastConfig,
11217
+ axAIRekaBestConfig,
11218
+ axAIRekaCreativeConfig,
11219
+ axAIRekaDefaultConfig,
11220
+ axAIRekaFastConfig,
11221
+ axAITogetherDefaultConfig,
11222
+ axBaseAIDefaultConfig,
11223
+ axBaseAIDefaultCreativeConfig,
11224
+ axModelInfoAnthropic,
11225
+ axModelInfoCohere,
11226
+ axModelInfoDeepSeek,
11227
+ axModelInfoGoogleGemini,
11228
+ axModelInfoGroq,
11229
+ axModelInfoHuggingFace,
11230
+ axModelInfoMistral,
11231
+ axModelInfoOpenAI,
11232
+ axModelInfoReka,
11233
+ axModelInfoTogether,
11234
+ axSpanAttributes,
11235
+ axSpanEvents
9103
11236
  };
9104
11237
  //# sourceMappingURL=index.js.map