@ax-llm/ax 12.0.17 → 12.0.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.cjs CHANGED
@@ -98,6 +98,7 @@ __export(index_exports, {
98
98
  AxEmbeddingAdapter: () => AxEmbeddingAdapter,
99
99
  AxEvalUtil: () => AxEvalUtil,
100
100
  AxFlow: () => AxFlow,
101
+ AxFlowTypedSubContextImpl: () => AxFlowTypedSubContextImpl,
101
102
  AxFunctionError: () => AxFunctionError,
102
103
  AxFunctionProcessor: () => AxFunctionProcessor,
103
104
  AxGen: () => AxGen,
@@ -175,6 +176,7 @@ __export(index_exports, {
175
176
  axModelInfoHuggingFace: () => axModelInfoHuggingFace,
176
177
  axModelInfoMistral: () => axModelInfoMistral,
177
178
  axModelInfoOpenAI: () => axModelInfoOpenAI,
179
+ axModelInfoOpenAIResponses: () => axModelInfoOpenAIResponses,
178
180
  axModelInfoReka: () => axModelInfoReka,
179
181
  axModelInfoTogether: () => axModelInfoTogether,
180
182
  axSpanAttributes: () => axSpanAttributes,
@@ -1434,6 +1436,14 @@ var AxBaseAI = class {
1434
1436
  if (options?.showThoughts && !this.getFeatures(model).hasShowThoughts) {
1435
1437
  throw new Error(`Model ${model} does not support showThoughts.`);
1436
1438
  }
1439
+ const modelInfo = this.modelInfo.find(
1440
+ (info) => info.name === model
1441
+ );
1442
+ if (modelInfo?.isExpensive && options?.useExpensiveModel !== "yes") {
1443
+ throw new Error(
1444
+ `Model ${model} is marked as expensive and requires explicit confirmation. Set useExpensiveModel: "yes" to proceed.`
1445
+ );
1446
+ }
1437
1447
  modelConfig.stream = (options?.stream !== void 0 ? options.stream : modelConfig.stream) ?? true;
1438
1448
  const canStream = this.getFeatures(model).streaming;
1439
1449
  if (!canStream) {
@@ -2568,8 +2578,6 @@ function mapFinishReason(stopReason) {
2568
2578
 
2569
2579
  // ai/openai/chat_types.ts
2570
2580
  var AxAIOpenAIModel = /* @__PURE__ */ ((AxAIOpenAIModel2) => {
2571
- AxAIOpenAIModel2["O1"] = "o1";
2572
- AxAIOpenAIModel2["O1Mini"] = "o1-mini";
2573
2581
  AxAIOpenAIModel2["GPT4"] = "gpt-4";
2574
2582
  AxAIOpenAIModel2["GPT41"] = "gpt-4.1";
2575
2583
  AxAIOpenAIModel2["GPT41Mini"] = "gpt-4.1-mini";
@@ -2582,6 +2590,11 @@ var AxAIOpenAIModel = /* @__PURE__ */ ((AxAIOpenAIModel2) => {
2582
2590
  AxAIOpenAIModel2["GPT35TextDavinci002"] = "text-davinci-002";
2583
2591
  AxAIOpenAIModel2["GPT3TextBabbage002"] = "text-babbage-002";
2584
2592
  AxAIOpenAIModel2["GPT3TextAda001"] = "text-ada-001";
2593
+ AxAIOpenAIModel2["O1"] = "o1";
2594
+ AxAIOpenAIModel2["O1Mini"] = "o1-mini";
2595
+ AxAIOpenAIModel2["O3"] = "o3";
2596
+ AxAIOpenAIModel2["O3Mini"] = "o3-mini";
2597
+ AxAIOpenAIModel2["O4Mini"] = "o4-mini";
2585
2598
  return AxAIOpenAIModel2;
2586
2599
  })(AxAIOpenAIModel || {});
2587
2600
  var AxAIOpenAIEmbedModel = /* @__PURE__ */ ((AxAIOpenAIEmbedModel2) => {
@@ -2593,8 +2606,6 @@ var AxAIOpenAIEmbedModel = /* @__PURE__ */ ((AxAIOpenAIEmbedModel2) => {
2593
2606
 
2594
2607
  // ai/openai/responses_types.ts
2595
2608
  var AxAIOpenAIResponsesModel = /* @__PURE__ */ ((AxAIOpenAIResponsesModel2) => {
2596
- AxAIOpenAIResponsesModel2["O1"] = "o1";
2597
- AxAIOpenAIResponsesModel2["O1Mini"] = "o1-mini";
2598
2609
  AxAIOpenAIResponsesModel2["GPT4"] = "gpt-4";
2599
2610
  AxAIOpenAIResponsesModel2["GPT41"] = "gpt-4.1";
2600
2611
  AxAIOpenAIResponsesModel2["GPT41Mini"] = "gpt-4.1-mini";
@@ -2607,6 +2618,10 @@ var AxAIOpenAIResponsesModel = /* @__PURE__ */ ((AxAIOpenAIResponsesModel2) => {
2607
2618
  AxAIOpenAIResponsesModel2["GPT35TextDavinci002"] = "text-davinci-002";
2608
2619
  AxAIOpenAIResponsesModel2["GPT3TextBabbage002"] = "text-babbage-002";
2609
2620
  AxAIOpenAIResponsesModel2["GPT3TextAda001"] = "text-ada-001";
2621
+ AxAIOpenAIResponsesModel2["O1Pro"] = "o1-pro";
2622
+ AxAIOpenAIResponsesModel2["O1"] = "o1";
2623
+ AxAIOpenAIResponsesModel2["O1Mini"] = "o1-mini";
2624
+ AxAIOpenAIResponsesModel2["O3Pro"] = "o3-pro";
2610
2625
  AxAIOpenAIResponsesModel2["O3"] = "o3";
2611
2626
  AxAIOpenAIResponsesModel2["O3Mini"] = "o3-mini";
2612
2627
  AxAIOpenAIResponsesModel2["O4Mini"] = "o4-mini";
@@ -2615,20 +2630,7 @@ var AxAIOpenAIResponsesModel = /* @__PURE__ */ ((AxAIOpenAIResponsesModel2) => {
2615
2630
 
2616
2631
  // ai/openai/info.ts
2617
2632
  var axModelInfoOpenAI = [
2618
- {
2619
- name: "o1" /* O1 */,
2620
- currency: "usd",
2621
- promptTokenCostPer1M: 15,
2622
- completionTokenCostPer1M: 60,
2623
- hasThinkingBudget: true
2624
- },
2625
- {
2626
- name: "o1-mini" /* O1Mini */,
2627
- currency: "usd",
2628
- promptTokenCostPer1M: 1.1,
2629
- completionTokenCostPer1M: 14.4,
2630
- hasThinkingBudget: true
2631
- },
2633
+ // Not Reasoning models
2632
2634
  {
2633
2635
  name: "gpt-4" /* GPT4 */,
2634
2636
  currency: "usd",
@@ -2677,30 +2679,36 @@ var axModelInfoOpenAI = [
2677
2679
  promptTokenCostPer1M: 0.5,
2678
2680
  completionTokenCostPer1M: 1.5
2679
2681
  },
2680
- // Responses API only models
2682
+ // Reasoning models
2683
+ {
2684
+ name: "o1" /* O1 */,
2685
+ currency: "usd",
2686
+ promptTokenCostPer1M: 15,
2687
+ completionTokenCostPer1M: 60
2688
+ },
2689
+ {
2690
+ name: "o1-mini" /* O1Mini */,
2691
+ currency: "usd",
2692
+ promptTokenCostPer1M: 1.1,
2693
+ completionTokenCostPer1M: 14.4
2694
+ },
2681
2695
  {
2682
2696
  name: "o3" /* O3 */,
2683
2697
  currency: "usd",
2684
2698
  promptTokenCostPer1M: 15,
2685
- completionTokenCostPer1M: 60,
2686
- hasThinkingBudget: true,
2687
- hasShowThoughts: true
2699
+ completionTokenCostPer1M: 60
2688
2700
  },
2689
2701
  {
2690
2702
  name: "o3-mini" /* O3Mini */,
2691
2703
  currency: "usd",
2692
2704
  promptTokenCostPer1M: 1.1,
2693
- completionTokenCostPer1M: 4.4,
2694
- hasThinkingBudget: true,
2695
- hasShowThoughts: true
2705
+ completionTokenCostPer1M: 4.4
2696
2706
  },
2697
2707
  {
2698
2708
  name: "o4-mini" /* O4Mini */,
2699
2709
  currency: "usd",
2700
2710
  promptTokenCostPer1M: 1.1,
2701
- completionTokenCostPer1M: 4.4,
2702
- hasThinkingBudget: true,
2703
- hasShowThoughts: true
2711
+ completionTokenCostPer1M: 4.4
2704
2712
  },
2705
2713
  // Embedding models
2706
2714
  {
@@ -2722,8 +2730,123 @@ var axModelInfoOpenAI = [
2722
2730
  completionTokenCostPer1M: 0.13
2723
2731
  }
2724
2732
  ];
2733
+ var axModelInfoOpenAIResponses = [
2734
+ // Not Reasoning models
2735
+ {
2736
+ name: "gpt-4" /* GPT4 */,
2737
+ currency: "usd",
2738
+ promptTokenCostPer1M: 30,
2739
+ completionTokenCostPer1M: 60
2740
+ },
2741
+ {
2742
+ name: "gpt-4.1" /* GPT41 */,
2743
+ currency: "usd",
2744
+ promptTokenCostPer1M: 2,
2745
+ completionTokenCostPer1M: 8
2746
+ },
2747
+ {
2748
+ name: "gpt-4.1-mini" /* GPT41Mini */,
2749
+ currency: "usd",
2750
+ promptTokenCostPer1M: 0.4,
2751
+ completionTokenCostPer1M: 1.6
2752
+ },
2753
+ {
2754
+ name: "gpt-4o" /* GPT4O */,
2755
+ currency: "usd",
2756
+ promptTokenCostPer1M: 5,
2757
+ completionTokenCostPer1M: 15
2758
+ },
2759
+ {
2760
+ name: "gpt-4o-mini" /* GPT4OMini */,
2761
+ currency: "usd",
2762
+ promptTokenCostPer1M: 0.15,
2763
+ completionTokenCostPer1M: 0.6
2764
+ },
2765
+ {
2766
+ name: "chatgpt-4o-latest" /* GPT4ChatGPT4O */,
2767
+ currency: "usd",
2768
+ promptTokenCostPer1M: 5,
2769
+ completionTokenCostPer1M: 15
2770
+ },
2771
+ {
2772
+ name: "gpt-4-turbo" /* GPT4Turbo */,
2773
+ currency: "usd",
2774
+ promptTokenCostPer1M: 10,
2775
+ completionTokenCostPer1M: 30
2776
+ },
2777
+ {
2778
+ name: "gpt-3.5-turbo" /* GPT35Turbo */,
2779
+ currency: "usd",
2780
+ promptTokenCostPer1M: 0.5,
2781
+ completionTokenCostPer1M: 1.5
2782
+ },
2783
+ // Reasoning models
2784
+ {
2785
+ name: "o1-pro" /* O1Pro */,
2786
+ currency: "usd",
2787
+ promptTokenCostPer1M: 150,
2788
+ completionTokenCostPer1M: 600,
2789
+ hasThinkingBudget: true,
2790
+ hasShowThoughts: true,
2791
+ isExpensive: true
2792
+ },
2793
+ {
2794
+ name: "o1" /* O1 */,
2795
+ currency: "usd",
2796
+ promptTokenCostPer1M: 15,
2797
+ completionTokenCostPer1M: 60,
2798
+ hasThinkingBudget: true,
2799
+ hasShowThoughts: true
2800
+ },
2801
+ {
2802
+ name: "o3-pro" /* O3Pro */,
2803
+ currency: "usd",
2804
+ promptTokenCostPer1M: 20,
2805
+ completionTokenCostPer1M: 80,
2806
+ hasThinkingBudget: true,
2807
+ hasShowThoughts: true,
2808
+ isExpensive: true
2809
+ },
2810
+ {
2811
+ name: "o3" /* O3 */,
2812
+ currency: "usd",
2813
+ promptTokenCostPer1M: 15,
2814
+ completionTokenCostPer1M: 60,
2815
+ hasThinkingBudget: true,
2816
+ hasShowThoughts: true
2817
+ },
2818
+ {
2819
+ name: "o3-mini" /* O3Mini */,
2820
+ currency: "usd",
2821
+ promptTokenCostPer1M: 1.1,
2822
+ completionTokenCostPer1M: 4.4,
2823
+ hasThinkingBudget: true,
2824
+ hasShowThoughts: true
2825
+ },
2826
+ {
2827
+ name: "o4-mini" /* O4Mini */,
2828
+ currency: "usd",
2829
+ promptTokenCostPer1M: 1.1,
2830
+ completionTokenCostPer1M: 4.4,
2831
+ hasThinkingBudget: true,
2832
+ hasShowThoughts: true
2833
+ }
2834
+ ];
2725
2835
 
2726
2836
  // ai/openai/api.ts
2837
+ var isOpenAIThinkingModel = (model) => {
2838
+ const thinkingModels = [
2839
+ "o1" /* O1 */,
2840
+ "o1-mini" /* O1Mini */,
2841
+ "o3" /* O3 */,
2842
+ "o3-mini" /* O3Mini */,
2843
+ "o4-mini" /* O4Mini */,
2844
+ // Pro models (string values since they're not in the regular chat enum)
2845
+ "o1-pro",
2846
+ "o3-pro"
2847
+ ];
2848
+ return thinkingModels.includes(model) || thinkingModels.includes(model);
2849
+ };
2727
2850
  var axAIOpenAIDefaultConfig = () => structuredClone({
2728
2851
  model: "gpt-4.1" /* GPT41 */,
2729
2852
  embedModel: "text-embedding-3-small" /* TextEmbedding3Small */,
@@ -2787,20 +2910,24 @@ var AxAIOpenAIImpl = class {
2787
2910
  const frequencyPenalty = req.modelConfig?.frequencyPenalty ?? this.config.frequencyPenalty;
2788
2911
  const stream = req.modelConfig?.stream ?? this.config.stream;
2789
2912
  const store = this.config.store;
2913
+ const isThinkingModel = isOpenAIThinkingModel(model);
2790
2914
  let reqValue = {
2791
2915
  model,
2792
2916
  messages,
2793
2917
  response_format: this.config?.responseFormat ? { type: this.config.responseFormat } : void 0,
2794
2918
  tools,
2795
2919
  tool_choice: toolsChoice,
2796
- max_completion_tokens: req.modelConfig?.maxTokens ?? this.config.maxTokens,
2797
- temperature: req.modelConfig?.temperature ?? this.config.temperature,
2798
- top_p: req.modelConfig?.topP ?? this.config.topP ?? 1,
2799
- n: req.modelConfig?.n ?? this.config.n,
2920
+ // For thinking models, don't set these parameters as they're not supported
2921
+ ...isThinkingModel ? {} : {
2922
+ max_completion_tokens: req.modelConfig?.maxTokens ?? this.config.maxTokens,
2923
+ temperature: req.modelConfig?.temperature ?? this.config.temperature,
2924
+ top_p: req.modelConfig?.topP ?? this.config.topP ?? 1,
2925
+ n: req.modelConfig?.n ?? this.config.n,
2926
+ presence_penalty: req.modelConfig?.presencePenalty ?? this.config.presencePenalty,
2927
+ ...frequencyPenalty ? { frequency_penalty: frequencyPenalty } : {}
2928
+ },
2800
2929
  stop: req.modelConfig?.stopSequences ?? this.config.stop,
2801
- presence_penalty: req.modelConfig?.presencePenalty ?? this.config.presencePenalty,
2802
2930
  logit_bias: this.config.logitBias,
2803
- ...frequencyPenalty ? { frequency_penalty: frequencyPenalty } : {},
2804
2931
  ...stream && this.streamingUsage ? { stream: true, stream_options: { include_usage: true } } : {},
2805
2932
  ...store ? { store } : {},
2806
2933
  ...this.config.serviceTier ? { service_tier: this.config.serviceTier } : {},
@@ -4692,6 +4819,18 @@ var AxAIOllama = class extends AxAIOpenAIBase {
4692
4819
  };
4693
4820
 
4694
4821
  // ai/openai/responses_api.ts
4822
+ var isOpenAIResponsesThinkingModel = (model) => {
4823
+ const thinkingModels = [
4824
+ "o1" /* O1 */,
4825
+ "o1-mini" /* O1Mini */,
4826
+ "o1-pro" /* O1Pro */,
4827
+ "o3" /* O3 */,
4828
+ "o3-mini" /* O3Mini */,
4829
+ "o3-pro" /* O3Pro */,
4830
+ "o4-mini" /* O4Mini */
4831
+ ];
4832
+ return thinkingModels.includes(model);
4833
+ };
4695
4834
  var AxAIOpenAIResponsesImpl = class {
4696
4835
  constructor(config, streamingUsage, responsesReqUpdater) {
4697
4836
  this.config = config;
@@ -4847,10 +4986,37 @@ var AxAIOpenAIResponsesImpl = class {
4847
4986
  parameters: v.parameters ?? {}
4848
4987
  })
4849
4988
  );
4850
- const includeFields = [];
4851
- const shouldShowThoughts = config?.thinkingTokenBudget === "none" ? false : config?.showThoughts;
4852
- if (shouldShowThoughts) {
4853
- includeFields.push("reasoning.encrypted_content");
4989
+ const includeFields = (
4990
+ // | 'computer_call_output.output.image_url'
4991
+ // | 'reasoning.encrypted_content'
4992
+ // | 'code_interpreter_call.outputs'
4993
+ []
4994
+ );
4995
+ const isThinkingModel = isOpenAIResponsesThinkingModel(model);
4996
+ let reasoningSummary = this.config.reasoningSummary;
4997
+ if (!config?.showThoughts) {
4998
+ reasoningSummary = void 0;
4999
+ } else if (!reasoningSummary) {
5000
+ reasoningSummary = "auto";
5001
+ }
5002
+ let reasoningEffort = this.config.reasoningEffort;
5003
+ if (config?.thinkingTokenBudget) {
5004
+ switch (config.thinkingTokenBudget) {
5005
+ case "none":
5006
+ reasoningEffort = void 0;
5007
+ break;
5008
+ case "minimal":
5009
+ reasoningEffort = "low";
5010
+ break;
5011
+ case "low":
5012
+ reasoningEffort = "medium";
5013
+ break;
5014
+ case "medium":
5015
+ case "high":
5016
+ case "highest":
5017
+ reasoningEffort = "high";
5018
+ break;
5019
+ }
4854
5020
  }
4855
5021
  let mutableReq = {
4856
5022
  model,
@@ -4859,9 +5025,15 @@ var AxAIOpenAIResponsesImpl = class {
4859
5025
  instructions: finalInstructions,
4860
5026
  tools: tools?.length ? tools : void 0,
4861
5027
  tool_choice: req.functionCall === "none" || req.functionCall === "auto" || req.functionCall === "required" ? req.functionCall : typeof req.functionCall === "object" && req.functionCall.function ? { type: "function", name: req.functionCall.function.name } : void 0,
4862
- max_output_tokens: req.modelConfig?.maxTokens ?? this.config.maxTokens ?? void 0,
4863
- temperature: req.modelConfig?.temperature ?? this.config.temperature ?? void 0,
4864
- top_p: req.modelConfig?.topP ?? this.config.topP ?? void 0,
5028
+ // For thinking models, don't set these parameters as they're not supported
5029
+ ...isThinkingModel ? {
5030
+ max_output_tokens: req.modelConfig?.maxTokens ?? this.config.maxTokens ?? void 0
5031
+ } : {
5032
+ temperature: req.modelConfig?.temperature ?? this.config.temperature ?? void 0,
5033
+ top_p: req.modelConfig?.topP ?? this.config.topP ?? void 0,
5034
+ presence_penalty: req.modelConfig?.presencePenalty ?? this.config.presencePenalty ?? void 0,
5035
+ frequency_penalty: req.modelConfig?.frequencyPenalty ?? this.config.frequencyPenalty ?? void 0
5036
+ },
4865
5037
  stream: req.modelConfig?.stream ?? this.config.stream ?? false,
4866
5038
  // Sourced from modelConfig or global config
4867
5039
  // Optional fields from AxAIOpenAIResponsesRequest that need to be in Mutable for initialization
@@ -4870,7 +5042,12 @@ var AxAIOpenAIResponsesImpl = class {
4870
5042
  metadata: void 0,
4871
5043
  parallel_tool_calls: this.config.parallelToolCalls,
4872
5044
  previous_response_id: void 0,
4873
- reasoning: void 0,
5045
+ ...reasoningEffort ? {
5046
+ reasoning: {
5047
+ effort: reasoningEffort,
5048
+ summary: reasoningSummary
5049
+ }
5050
+ } : {},
4874
5051
  service_tier: this.config.serviceTier,
4875
5052
  store: this.config.store,
4876
5053
  text: void 0,
@@ -5290,19 +5467,6 @@ var AxAIOpenAIResponsesImpl = class {
5290
5467
  ];
5291
5468
  }
5292
5469
  break;
5293
- case "reasoning":
5294
- {
5295
- const reasoningItem = event.item;
5296
- baseResult.id = event.item.id;
5297
- if (reasoningItem.encrypted_content) {
5298
- baseResult.thought = reasoningItem.encrypted_content;
5299
- } else if (reasoningItem.summary) {
5300
- baseResult.thought = reasoningItem.summary.map(
5301
- (s2) => typeof s2 === "object" ? JSON.stringify(s2) : s2
5302
- ).join("\n");
5303
- }
5304
- }
5305
- break;
5306
5470
  }
5307
5471
  break;
5308
5472
  case "response.content_part.added":
@@ -5328,16 +5492,21 @@ var AxAIOpenAIResponsesImpl = class {
5328
5492
  }
5329
5493
  ];
5330
5494
  break;
5331
- case "response.function_call_arguments.done":
5332
- break;
5495
+ // case 'response.function_call_arguments.done':
5496
+ // // Function call arguments done - don't return function calls here
5497
+ // // The mergeFunctionCalls will handle combining name and arguments
5498
+ // baseResult.id = event.item_id
5499
+ // baseResult.finishReason = 'function_call'
5500
+ // break
5333
5501
  case "response.reasoning_summary_text.delta":
5334
5502
  baseResult.id = event.item_id;
5335
5503
  baseResult.thought = event.delta;
5336
5504
  break;
5337
- case "response.reasoning_summary_text.done":
5338
- baseResult.id = event.item_id;
5339
- baseResult.thought = event.text;
5340
- break;
5505
+ // case 'response.reasoning_summary_text.done':
5506
+ // // Reasoning summary done
5507
+ // baseResult.id = event.item_id
5508
+ // baseResult.thought = event.text
5509
+ // break
5341
5510
  // File search tool events
5342
5511
  case "response.file_search_call.in_progress":
5343
5512
  case "response.file_search_call.searching":
@@ -5431,10 +5600,6 @@ var AxAIOpenAIResponsesImpl = class {
5431
5600
  baseResult.id = event.item.id;
5432
5601
  baseResult.finishReason = "function_call";
5433
5602
  break;
5434
- case "reasoning":
5435
- baseResult.id = event.item.id;
5436
- baseResult.finishReason = "stop";
5437
- break;
5438
5603
  }
5439
5604
  break;
5440
5605
  case "response.completed":
@@ -5562,7 +5727,7 @@ var AxAIOpenAIResponses = class extends AxAIOpenAIResponsesBase {
5562
5727
  if (!apiKey || apiKey === "") {
5563
5728
  throw new Error("OpenAI API key not set");
5564
5729
  }
5565
- modelInfo = [...axModelInfoOpenAI, ...modelInfo ?? []];
5730
+ modelInfo = [...axModelInfoOpenAIResponses, ...modelInfo ?? []];
5566
5731
  const supportFor = (model) => {
5567
5732
  const mi = getModelInfo({
5568
5733
  model,
@@ -9142,7 +9307,7 @@ async function* processStreamingResponse({
9142
9307
  usage.push(v.modelUsage);
9143
9308
  }
9144
9309
  for (const result of v.results) {
9145
- if (result.content === "" && (!result.functionCalls || result.functionCalls.length === 0)) {
9310
+ if (result.content === "" && (!result.thought || result.thought === "") && (!result.functionCalls || result.functionCalls.length === 0)) {
9146
9311
  continue;
9147
9312
  }
9148
9313
  const state = states.find((s2) => s2.index === result.index);
@@ -9854,7 +10019,7 @@ var toFieldType = (type) => {
9854
10019
  case "number":
9855
10020
  return "number";
9856
10021
  case "boolean":
9857
- return "boolean";
10022
+ return "boolean (true or false)";
9858
10023
  case "date":
9859
10024
  return 'date ("YYYY-MM-DD" format)';
9860
10025
  case "datetime":
@@ -13012,56 +13177,494 @@ var AxDockerSession = class {
13012
13177
  }
13013
13178
  };
13014
13179
 
13015
- // dsp/loader.ts
13016
- var AxHFDataLoader = class {
13017
- rows = [];
13018
- baseUrl;
13019
- dataset;
13020
- split;
13021
- config;
13022
- options;
13023
- constructor({
13024
- dataset,
13025
- split,
13026
- config,
13027
- options
13028
- }) {
13029
- this.baseUrl = "https://datasets-server.huggingface.co/rows";
13030
- this.dataset = dataset;
13031
- this.split = split;
13032
- this.config = config;
13033
- this.options = options;
13180
+ // flow/flow.ts
13181
+ var AxFlow = class extends AxProgramWithSignature {
13182
+ nodes = /* @__PURE__ */ new Map();
13183
+ flowDefinition = [];
13184
+ nodeGenerators = /* @__PURE__ */ new Map();
13185
+ loopStack = [];
13186
+ stepLabels = /* @__PURE__ */ new Map();
13187
+ branchContext = null;
13188
+ constructor(signature = "userInput:string -> flowOutput:string") {
13189
+ super(signature);
13034
13190
  }
13035
- async fetchDataFromAPI(url) {
13036
- try {
13037
- const response = await fetch(url);
13038
- if (!response.ok) {
13039
- throw new Error(`Error fetching data: ${response.statusText}`);
13040
- }
13041
- const data = await response.json();
13042
- if (!data?.rows) {
13043
- throw new Error("Invalid data format");
13044
- }
13045
- return data.rows;
13046
- } catch (error) {
13047
- console.error("Error fetching data from API:", error);
13048
- throw error;
13191
+ /**
13192
+ * Declares a reusable computational node and its input/output signature.
13193
+ * Returns a new AxFlow type that tracks this node in the TNodes registry.
13194
+ *
13195
+ * @param name - The name of the node
13196
+ * @param signature - Signature string in the same format as AxSignature
13197
+ * @param options - Optional program forward options (same as AxGen)
13198
+ * @returns New AxFlow instance with updated TNodes type
13199
+ *
13200
+ * @example
13201
+ * ```typescript
13202
+ * flow.node('summarizer', 'text:string -> summary:string')
13203
+ * flow.node('analyzer', 'text:string -> analysis:string, confidence:number', { debug: true })
13204
+ * ```
13205
+ */
13206
+ node(name, signature, options) {
13207
+ if (!signature) {
13208
+ throw new Error(
13209
+ `Invalid signature for node '${name}': signature cannot be empty`
13210
+ );
13049
13211
  }
13212
+ this.nodes.set(name, {
13213
+ inputs: {},
13214
+ outputs: {}
13215
+ });
13216
+ this.nodeGenerators.set(name, new AxGen(signature, options));
13217
+ return this;
13050
13218
  }
13051
- // https://datasets-server.huggingface.co/rows?dataset=hotpot_qa&config=distractor&split=train&offset=0&length=100
13052
- async loadData() {
13053
- const offset = this.options?.offset ?? 0;
13054
- const length = this.options?.length ?? 100;
13055
- const ds = encodeURIComponent(this.dataset);
13056
- const url = `${this.baseUrl}?dataset=${ds}&config=${this.config}&split=${this.split}&offset=${offset}&length=${length}`;
13057
- console.log("Downloading data from API.");
13058
- this.rows = await this.fetchDataFromAPI(url);
13059
- return this.rows;
13060
- }
13061
- setData(rows) {
13062
- this.rows = rows;
13219
+ /**
13220
+ * Applies a synchronous transformation to the state object.
13221
+ * Returns a new AxFlow type with the evolved state.
13222
+ *
13223
+ * @param transform - Function that takes the current state and returns a new state
13224
+ * @returns New AxFlow instance with updated TState type
13225
+ *
13226
+ * @example
13227
+ * ```typescript
13228
+ * flow.map(state => ({ ...state, processedText: state.text.toLowerCase() }))
13229
+ * ```
13230
+ */
13231
+ map(transform) {
13232
+ const step = (state) => {
13233
+ return transform(state);
13234
+ };
13235
+ if (this.branchContext?.currentBranchValue !== void 0) {
13236
+ const currentBranch = this.branchContext.branches.get(
13237
+ this.branchContext.currentBranchValue
13238
+ ) || [];
13239
+ currentBranch.push(step);
13240
+ this.branchContext.branches.set(
13241
+ this.branchContext.currentBranchValue,
13242
+ currentBranch
13243
+ );
13244
+ } else {
13245
+ this.flowDefinition.push(step);
13246
+ }
13247
+ return this;
13063
13248
  }
13064
- getData() {
13249
+ /**
13250
+ * Labels a step for later reference (useful for feedback loops).
13251
+ *
13252
+ * @param label - The label to assign to the current step position
13253
+ * @returns this (for chaining, no type change)
13254
+ *
13255
+ * @example
13256
+ * ```typescript
13257
+ * flow.label('retry-point')
13258
+ * .execute('queryGen', ...)
13259
+ * ```
13260
+ */
13261
+ label(label) {
13262
+ if (this.branchContext?.currentBranchValue !== void 0) {
13263
+ throw new Error("Cannot create labels inside branch blocks");
13264
+ }
13265
+ this.stepLabels.set(label, this.flowDefinition.length);
13266
+ return this;
13267
+ }
13268
+ /**
13269
+ * Executes a previously defined node with full type safety.
13270
+ * The node name must exist in TNodes, and the mapping function is typed based on the node's signature.
13271
+ *
13272
+ * @param nodeName - The name of the node to execute (must exist in TNodes)
13273
+ * @param mapping - Typed function that takes the current state and returns the input for the node
13274
+ * @param dynamicContext - Optional object to override the AI service or options for this specific step
13275
+ * @returns New AxFlow instance with TState augmented with the node's result
13276
+ *
13277
+ * @example
13278
+ * ```typescript
13279
+ * flow.execute('summarizer', state => ({ text: state.originalText }), { ai: cheapAI })
13280
+ * ```
13281
+ */
13282
+ execute(nodeName, mapping, dynamicContext) {
13283
+ if (!this.nodes.has(nodeName)) {
13284
+ throw new Error(
13285
+ `Node '${nodeName}' not found. Make sure to define it with .node() first.`
13286
+ );
13287
+ }
13288
+ const nodeGenerator = this.nodeGenerators.get(nodeName);
13289
+ if (!nodeGenerator) {
13290
+ throw new Error(`Node generator for '${nodeName}' not found.`);
13291
+ }
13292
+ const step = async (state, context3) => {
13293
+ const ai = dynamicContext?.ai ?? context3.mainAi;
13294
+ const options = dynamicContext?.options ?? context3.mainOptions;
13295
+ const nodeInputs = mapping(state);
13296
+ const result = await nodeGenerator.forward(ai, nodeInputs, options);
13297
+ return {
13298
+ ...state,
13299
+ [`${nodeName}Result`]: result
13300
+ };
13301
+ };
13302
+ if (this.branchContext?.currentBranchValue !== void 0) {
13303
+ const currentBranch = this.branchContext.branches.get(
13304
+ this.branchContext.currentBranchValue
13305
+ ) || [];
13306
+ currentBranch.push(step);
13307
+ this.branchContext.branches.set(
13308
+ this.branchContext.currentBranchValue,
13309
+ currentBranch
13310
+ );
13311
+ } else {
13312
+ this.flowDefinition.push(step);
13313
+ }
13314
+ return this;
13315
+ }
13316
+ /**
13317
+ * Starts a conditional branch based on a predicate function.
13318
+ *
13319
+ * @param predicate - Function that takes state and returns a value to branch on
13320
+ * @returns this (for chaining)
13321
+ *
13322
+ * @example
13323
+ * ```typescript
13324
+ * flow.branch(state => state.qualityResult.needsMoreInfo)
13325
+ * .when(true)
13326
+ * .execute('queryGen', ...)
13327
+ * .when(false)
13328
+ * .execute('answer', ...)
13329
+ * .merge()
13330
+ * ```
13331
+ */
13332
+ branch(predicate) {
13333
+ if (this.branchContext) {
13334
+ throw new Error("Nested branches are not supported");
13335
+ }
13336
+ this.branchContext = {
13337
+ predicate: (state) => predicate(state),
13338
+ branches: /* @__PURE__ */ new Map(),
13339
+ currentBranchValue: void 0
13340
+ };
13341
+ return this;
13342
+ }
13343
+ /**
13344
+ * Defines a branch case for the current branch context.
13345
+ *
13346
+ * @param value - The value to match against the branch predicate result
13347
+ * @returns this (for chaining)
13348
+ */
13349
+ when(value) {
13350
+ if (!this.branchContext) {
13351
+ throw new Error("when() called without matching branch()");
13352
+ }
13353
+ this.branchContext.currentBranchValue = value;
13354
+ this.branchContext.branches.set(value, []);
13355
+ return this;
13356
+ }
13357
+ /**
13358
+ * Ends the current branch and merges all branch paths back into the main flow.
13359
+ *
13360
+ * @returns this (for chaining)
13361
+ */
13362
+ merge() {
13363
+ if (!this.branchContext) {
13364
+ throw new Error("merge() called without matching branch()");
13365
+ }
13366
+ const branchContext = this.branchContext;
13367
+ this.branchContext = null;
13368
+ this.flowDefinition.push(async (state, context3) => {
13369
+ const branchValue = branchContext.predicate(state);
13370
+ const branchSteps = branchContext.branches.get(branchValue);
13371
+ if (!branchSteps) {
13372
+ return state;
13373
+ }
13374
+ let currentState = state;
13375
+ for (const step of branchSteps) {
13376
+ currentState = await step(currentState, context3);
13377
+ }
13378
+ return currentState;
13379
+ });
13380
+ return this;
13381
+ }
13382
+ /**
13383
+ * Executes multiple operations in parallel and merges their results.
13384
+ * Both typed and legacy untyped branches are supported.
13385
+ *
13386
+ * @param branches - Array of functions that define parallel operations
13387
+ * @returns Object with merge method for combining results
13388
+ *
13389
+ * @example
13390
+ * ```typescript
13391
+ * flow.parallel([
13392
+ * subFlow => subFlow.execute('retrieve1', state => ({ query: state.query1 })),
13393
+ * subFlow => subFlow.execute('retrieve2', state => ({ query: state.query2 })),
13394
+ * subFlow => subFlow.execute('retrieve3', state => ({ query: state.query3 }))
13395
+ * ]).merge('documents', (docs1, docs2, docs3) => [...docs1, ...docs2, ...docs3])
13396
+ * ```
13397
+ */
13398
+ parallel(branches) {
13399
+ const parallelStep = async (state, context3) => {
13400
+ const promises = branches.map(async (branchFn) => {
13401
+ const subContext = new AxFlowSubContextImpl(this.nodeGenerators);
13402
+ const populatedSubContext = branchFn(
13403
+ subContext
13404
+ );
13405
+ return await populatedSubContext.executeSteps(state, context3);
13406
+ });
13407
+ const results = await Promise.all(promises);
13408
+ return {
13409
+ ...state,
13410
+ _parallelResults: results
13411
+ };
13412
+ };
13413
+ this.flowDefinition.push(parallelStep);
13414
+ return {
13415
+ merge: (resultKey, mergeFunction) => {
13416
+ this.flowDefinition.push((state) => {
13417
+ const results = state._parallelResults;
13418
+ if (!Array.isArray(results)) {
13419
+ throw new Error("No parallel results found for merge");
13420
+ }
13421
+ const mergedValue = mergeFunction(...results);
13422
+ const newState = { ...state };
13423
+ delete newState._parallelResults;
13424
+ newState[resultKey] = mergedValue;
13425
+ return newState;
13426
+ });
13427
+ return this;
13428
+ }
13429
+ };
13430
+ }
13431
+ /**
13432
+ * Creates a feedback loop that jumps back to a labeled step if a condition is met.
13433
+ *
13434
+ * @param condition - Function that returns true to trigger the feedback loop
13435
+ * @param targetLabel - The label to jump back to
13436
+ * @param maxIterations - Maximum number of iterations to prevent infinite loops (default: 10)
13437
+ * @returns this (for chaining)
13438
+ *
13439
+ * @example
13440
+ * ```typescript
13441
+ * flow.label('retry-point')
13442
+ * .execute('answer', ...)
13443
+ * .execute('qualityCheck', ...)
13444
+ * .feedback(state => state.qualityCheckResult.confidence < 0.7, 'retry-point')
13445
+ * ```
13446
+ */
13447
+ feedback(condition, targetLabel, maxIterations = 10) {
13448
+ if (!this.stepLabels.has(targetLabel)) {
13449
+ throw new Error(
13450
+ `Label '${targetLabel}' not found. Make sure to define it with .label() before the feedback point.`
13451
+ );
13452
+ }
13453
+ const targetIndex = this.stepLabels.get(targetLabel);
13454
+ const feedbackStepIndex = this.flowDefinition.length;
13455
+ this.flowDefinition.push(async (state, context3) => {
13456
+ let currentState = state;
13457
+ let iterations = 1;
13458
+ const iterationKey = `_feedback_${targetLabel}_iterations`;
13459
+ if (typeof currentState[iterationKey] !== "number") {
13460
+ currentState = { ...currentState, [iterationKey]: 1 };
13461
+ }
13462
+ while (condition(currentState) && iterations < maxIterations) {
13463
+ iterations++;
13464
+ currentState = { ...currentState, [iterationKey]: iterations };
13465
+ for (let i = targetIndex; i < feedbackStepIndex; i++) {
13466
+ const step = this.flowDefinition[i];
13467
+ if (step) {
13468
+ currentState = await step(currentState, context3);
13469
+ }
13470
+ }
13471
+ }
13472
+ return currentState;
13473
+ });
13474
+ return this;
13475
+ }
13476
+ /**
13477
+ * Marks the beginning of a loop block.
13478
+ *
13479
+ * @param condition - Function that takes the current state and returns a boolean
13480
+ * @returns this (for chaining)
13481
+ *
13482
+ * @example
13483
+ * ```typescript
13484
+ * flow.while(state => state.iterations < 3)
13485
+ * .map(state => ({ ...state, iterations: (state.iterations || 0) + 1 }))
13486
+ * .endWhile()
13487
+ * ```
13488
+ */
13489
+ while(condition) {
13490
+ const loopStartIndex = this.flowDefinition.length;
13491
+ this.loopStack.push(loopStartIndex);
13492
+ const placeholderStep = Object.assign(
13493
+ (state) => state,
13494
+ {
13495
+ _condition: condition,
13496
+ _isLoopStart: true
13497
+ }
13498
+ );
13499
+ this.flowDefinition.push(placeholderStep);
13500
+ return this;
13501
+ }
13502
+ /**
13503
+ * Marks the end of a loop block.
13504
+ *
13505
+ * @returns this (for chaining)
13506
+ */
13507
+ endWhile() {
13508
+ if (this.loopStack.length === 0) {
13509
+ throw new Error("endWhile() called without matching while()");
13510
+ }
13511
+ const loopStartIndex = this.loopStack.pop();
13512
+ const placeholderStep = this.flowDefinition[loopStartIndex];
13513
+ if (!placeholderStep || !("_isLoopStart" in placeholderStep)) {
13514
+ throw new Error("Loop start step not found or invalid");
13515
+ }
13516
+ const condition = placeholderStep._condition;
13517
+ const loopBodySteps = this.flowDefinition.splice(loopStartIndex + 1);
13518
+ this.flowDefinition[loopStartIndex] = async (state, context3) => {
13519
+ let currentState = state;
13520
+ while (condition(currentState)) {
13521
+ for (const step of loopBodySteps) {
13522
+ currentState = await step(currentState, context3);
13523
+ }
13524
+ }
13525
+ return currentState;
13526
+ };
13527
+ return this;
13528
+ }
13529
+ /**
13530
+ * Executes the flow with the given AI service and input values.
13531
+ *
13532
+ * @param ai - The AI service to use as the default for all steps
13533
+ * @param values - The input values for the flow
13534
+ * @param options - Optional forward options to use as defaults
13535
+ * @returns Promise that resolves to the final output
13536
+ */
13537
+ async forward(ai, values, options) {
13538
+ let state = { ...values };
13539
+ const context3 = {
13540
+ mainAi: ai,
13541
+ mainOptions: options
13542
+ };
13543
+ for (const step of this.flowDefinition) {
13544
+ state = await step(state, context3);
13545
+ }
13546
+ return state;
13547
+ }
13548
+ };
13549
+ var AxFlowSubContextImpl = class {
13550
+ constructor(nodeGenerators) {
13551
+ this.nodeGenerators = nodeGenerators;
13552
+ }
13553
+ steps = [];
13554
+ execute(nodeName, mapping, dynamicContext) {
13555
+ const nodeGenerator = this.nodeGenerators.get(nodeName);
13556
+ if (!nodeGenerator) {
13557
+ throw new Error(`Node generator for '${nodeName}' not found.`);
13558
+ }
13559
+ this.steps.push(async (state, context3) => {
13560
+ const ai = dynamicContext?.ai ?? context3.mainAi;
13561
+ const options = dynamicContext?.options ?? context3.mainOptions;
13562
+ const nodeInputs = mapping(state);
13563
+ const result = await nodeGenerator.forward(ai, nodeInputs, options);
13564
+ return {
13565
+ ...state,
13566
+ [`${nodeName}Result`]: result
13567
+ };
13568
+ });
13569
+ return this;
13570
+ }
13571
+ map(transform) {
13572
+ this.steps.push((state) => transform(state));
13573
+ return this;
13574
+ }
13575
+ async executeSteps(initialState, context3) {
13576
+ let currentState = initialState;
13577
+ for (const step of this.steps) {
13578
+ currentState = await step(currentState, context3);
13579
+ }
13580
+ return currentState;
13581
+ }
13582
+ };
13583
+ var AxFlowTypedSubContextImpl = class {
13584
+ constructor(nodeGenerators) {
13585
+ this.nodeGenerators = nodeGenerators;
13586
+ }
13587
+ steps = [];
13588
+ execute(nodeName, mapping, dynamicContext) {
13589
+ const nodeGenerator = this.nodeGenerators.get(nodeName);
13590
+ if (!nodeGenerator) {
13591
+ throw new Error(`Node generator for '${nodeName}' not found.`);
13592
+ }
13593
+ this.steps.push(async (state, context3) => {
13594
+ const ai = dynamicContext?.ai ?? context3.mainAi;
13595
+ const options = dynamicContext?.options ?? context3.mainOptions;
13596
+ const nodeInputs = mapping(state);
13597
+ const result = await nodeGenerator.forward(ai, nodeInputs, options);
13598
+ return {
13599
+ ...state,
13600
+ [`${nodeName}Result`]: result
13601
+ };
13602
+ });
13603
+ return this;
13604
+ }
13605
+ map(transform) {
13606
+ this.steps.push((state) => transform(state));
13607
+ return this;
13608
+ }
13609
+ async executeSteps(initialState, context3) {
13610
+ let currentState = initialState;
13611
+ for (const step of this.steps) {
13612
+ currentState = await step(currentState, context3);
13613
+ }
13614
+ return currentState;
13615
+ }
13616
+ };
13617
+
13618
+ // dsp/loader.ts
13619
+ var AxHFDataLoader = class {
13620
+ rows = [];
13621
+ baseUrl;
13622
+ dataset;
13623
+ split;
13624
+ config;
13625
+ options;
13626
+ constructor({
13627
+ dataset,
13628
+ split,
13629
+ config,
13630
+ options
13631
+ }) {
13632
+ this.baseUrl = "https://datasets-server.huggingface.co/rows";
13633
+ this.dataset = dataset;
13634
+ this.split = split;
13635
+ this.config = config;
13636
+ this.options = options;
13637
+ }
13638
+ async fetchDataFromAPI(url) {
13639
+ try {
13640
+ const response = await fetch(url);
13641
+ if (!response.ok) {
13642
+ throw new Error(`Error fetching data: ${response.statusText}`);
13643
+ }
13644
+ const data = await response.json();
13645
+ if (!data?.rows) {
13646
+ throw new Error("Invalid data format");
13647
+ }
13648
+ return data.rows;
13649
+ } catch (error) {
13650
+ console.error("Error fetching data from API:", error);
13651
+ throw error;
13652
+ }
13653
+ }
13654
+ // https://datasets-server.huggingface.co/rows?dataset=hotpot_qa&config=distractor&split=train&offset=0&length=100
13655
+ async loadData() {
13656
+ const offset = this.options?.offset ?? 0;
13657
+ const length = this.options?.length ?? 100;
13658
+ const ds = encodeURIComponent(this.dataset);
13659
+ const url = `${this.baseUrl}?dataset=${ds}&config=${this.config}&split=${this.split}&offset=${offset}&length=${length}`;
13660
+ console.log("Downloading data from API.");
13661
+ this.rows = await this.fetchDataFromAPI(url);
13662
+ return this.rows;
13663
+ }
13664
+ setData(rows) {
13665
+ this.rows = rows;
13666
+ }
13667
+ getData() {
13065
13668
  return this.rows;
13066
13669
  }
13067
13670
  async getRows({
@@ -16427,198 +17030,6 @@ var AxEvalUtil = {
16427
17030
  novelF1ScoreOptimized
16428
17031
  };
16429
17032
 
16430
- // flow/flow.ts
16431
- var AxFlow = class extends AxProgramWithSignature {
16432
- nodes = /* @__PURE__ */ new Map();
16433
- flowDefinition = [];
16434
- nodeGenerators = /* @__PURE__ */ new Map();
16435
- loopStack = [];
16436
- constructor(signature = "userInput:string -> flowOutput:string") {
16437
- super(signature);
16438
- }
16439
- /**
16440
- * Declares a reusable computational node and its input/output signature.
16441
- *
16442
- * @param name - The name of the node
16443
- * @param signature - An object where the key is a string representation of inputs
16444
- * and the value is an object representing outputs
16445
- * @returns this (for chaining)
16446
- *
16447
- * @example
16448
- * ```typescript
16449
- * flow.node('summarizer', { 'text:string': { summary: f.string() } })
16450
- * ```
16451
- */
16452
- node(name, signature) {
16453
- const [inputSignature, outputSignature] = Object.entries(signature)[0] ?? [
16454
- "",
16455
- {}
16456
- ];
16457
- if (!inputSignature || !outputSignature) {
16458
- throw new Error(
16459
- `Invalid signature for node '${name}': signature must have at least one input->output mapping`
16460
- );
16461
- }
16462
- const outputFields = Object.entries(outputSignature).map(([key, value]) => {
16463
- if (typeof value === "object" && value !== null && "type" in value) {
16464
- const fieldType = value;
16465
- let fieldString = `${key}:`;
16466
- if (fieldType.isOptional) {
16467
- const colonIndex = fieldString.lastIndexOf(":");
16468
- fieldString = fieldString.slice(0, colonIndex) + "?" + fieldString.slice(colonIndex);
16469
- }
16470
- if (fieldType.isInternal) {
16471
- const colonIndex = fieldString.lastIndexOf(":");
16472
- fieldString = fieldString.slice(0, colonIndex) + "!" + fieldString.slice(colonIndex);
16473
- }
16474
- fieldString += fieldType.type;
16475
- if (fieldType.isArray) {
16476
- fieldString += "[]";
16477
- }
16478
- if (fieldType.type === "class" && fieldType.options) {
16479
- fieldString += ` "${fieldType.options.join(", ")}"`;
16480
- }
16481
- if (fieldType.description && (fieldType.type !== "class" || !fieldType.options)) {
16482
- fieldString += ` "${fieldType.description}"`;
16483
- }
16484
- return fieldString;
16485
- }
16486
- return `${key}:string`;
16487
- }).join(", ");
16488
- const signatureString = `${inputSignature} -> ${outputFields}`;
16489
- this.nodes.set(name, {
16490
- inputs: { [inputSignature]: true },
16491
- outputs: outputSignature
16492
- });
16493
- this.nodeGenerators.set(name, new AxGen(signatureString));
16494
- return this;
16495
- }
16496
- /**
16497
- * Applies a synchronous transformation to the state object.
16498
- *
16499
- * @param transform - Function that takes the current state and returns a new state
16500
- * @returns this (for chaining)
16501
- *
16502
- * @example
16503
- * ```typescript
16504
- * flow.map(state => ({ ...state, processedText: state.text.toLowerCase() }))
16505
- * ```
16506
- */
16507
- map(transform) {
16508
- this.flowDefinition.push((state) => {
16509
- return transform(state);
16510
- });
16511
- return this;
16512
- }
16513
- /**
16514
- * Executes a previously defined node.
16515
- *
16516
- * @param nodeName - The name of the node to execute (must exist in the nodes map)
16517
- * @param mapping - Function that takes the current state and returns the input object required by the node
16518
- * @param dynamicContext - Optional object to override the AI service or options for this specific step
16519
- * @returns this (for chaining)
16520
- *
16521
- * @example
16522
- * ```typescript
16523
- * flow.execute('summarizer', state => ({ text: state.originalText }), { ai: cheapAI })
16524
- * ```
16525
- */
16526
- execute(nodeName, mapping, dynamicContext) {
16527
- if (!this.nodes.has(nodeName)) {
16528
- throw new Error(
16529
- `Node '${nodeName}' not found. Make sure to define it with .node() first.`
16530
- );
16531
- }
16532
- const nodeGenerator = this.nodeGenerators.get(nodeName);
16533
- if (!nodeGenerator) {
16534
- throw new Error(`Node generator for '${nodeName}' not found.`);
16535
- }
16536
- this.flowDefinition.push(async (state, context3) => {
16537
- const ai = dynamicContext?.ai ?? context3.mainAi;
16538
- const options = dynamicContext?.options ?? context3.mainOptions;
16539
- const nodeInputs = mapping(state);
16540
- const result = await nodeGenerator.forward(ai, nodeInputs, options);
16541
- return {
16542
- ...state,
16543
- [`${nodeName}Result`]: result
16544
- };
16545
- });
16546
- return this;
16547
- }
16548
- /**
16549
- * Marks the beginning of a loop block.
16550
- *
16551
- * @param condition - Function that takes the current state and returns a boolean
16552
- * @returns this (for chaining)
16553
- *
16554
- * @example
16555
- * ```typescript
16556
- * flow.while(state => state.iterations < 3)
16557
- * .map(state => ({ ...state, iterations: (state.iterations || 0) + 1 }))
16558
- * .endWhile()
16559
- * ```
16560
- */
16561
- while(condition) {
16562
- const loopStartIndex = this.flowDefinition.length;
16563
- this.loopStack.push(loopStartIndex);
16564
- const placeholderStep = Object.assign(
16565
- (state) => state,
16566
- {
16567
- _condition: condition,
16568
- _isLoopStart: true
16569
- }
16570
- );
16571
- this.flowDefinition.push(placeholderStep);
16572
- return this;
16573
- }
16574
- /**
16575
- * Marks the end of a loop block.
16576
- *
16577
- * @returns this (for chaining)
16578
- */
16579
- endWhile() {
16580
- if (this.loopStack.length === 0) {
16581
- throw new Error("endWhile() called without matching while()");
16582
- }
16583
- const loopStartIndex = this.loopStack.pop();
16584
- const placeholderStep = this.flowDefinition[loopStartIndex];
16585
- if (!placeholderStep || !("_isLoopStart" in placeholderStep)) {
16586
- throw new Error("Loop start step not found or invalid");
16587
- }
16588
- const condition = placeholderStep._condition;
16589
- const loopBodySteps = this.flowDefinition.splice(loopStartIndex + 1);
16590
- this.flowDefinition[loopStartIndex] = async (state, context3) => {
16591
- let currentState = state;
16592
- while (condition(currentState)) {
16593
- for (const step of loopBodySteps) {
16594
- currentState = await step(currentState, context3);
16595
- }
16596
- }
16597
- return currentState;
16598
- };
16599
- return this;
16600
- }
16601
- /**
16602
- * Executes the flow with the given AI service and input values.
16603
- *
16604
- * @param ai - The AI service to use as the default for all steps
16605
- * @param values - The input values for the flow
16606
- * @param options - Optional forward options to use as defaults
16607
- * @returns Promise that resolves to the final output
16608
- */
16609
- async forward(ai, values, options) {
16610
- let state = { ...values };
16611
- const context3 = {
16612
- mainAi: ai,
16613
- mainOptions: options
16614
- };
16615
- for (const step of this.flowDefinition) {
16616
- state = await step(state, context3);
16617
- }
16618
- return state;
16619
- }
16620
- };
16621
-
16622
17033
  // ../../node_modules/uuid/dist/esm-node/rng.js
16623
17034
  var import_crypto5 = __toESM(require("crypto"));
16624
17035
  var rnds8Pool = new Uint8Array(256);
@@ -17189,6 +17600,7 @@ var AxRAG = class extends AxChainOfThought {
17189
17600
  AxEmbeddingAdapter,
17190
17601
  AxEvalUtil,
17191
17602
  AxFlow,
17603
+ AxFlowTypedSubContextImpl,
17192
17604
  AxFunctionError,
17193
17605
  AxFunctionProcessor,
17194
17606
  AxGen,
@@ -17266,6 +17678,7 @@ var AxRAG = class extends AxChainOfThought {
17266
17678
  axModelInfoHuggingFace,
17267
17679
  axModelInfoMistral,
17268
17680
  axModelInfoOpenAI,
17681
+ axModelInfoOpenAIResponses,
17269
17682
  axModelInfoReka,
17270
17683
  axModelInfoTogether,
17271
17684
  axSpanAttributes,