@ax-llm/ax 12.0.17 → 12.0.18
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.cjs +719 -306
- package/index.cjs.map +1 -1
- package/index.d.cts +257 -116
- package/index.d.ts +257 -116
- package/index.js +717 -306
- package/index.js.map +1 -1
- package/package.json +1 -1
package/index.js
CHANGED
|
@@ -1254,6 +1254,14 @@ var AxBaseAI = class {
|
|
|
1254
1254
|
if (options?.showThoughts && !this.getFeatures(model).hasShowThoughts) {
|
|
1255
1255
|
throw new Error(`Model ${model} does not support showThoughts.`);
|
|
1256
1256
|
}
|
|
1257
|
+
const modelInfo = this.modelInfo.find(
|
|
1258
|
+
(info) => info.name === model
|
|
1259
|
+
);
|
|
1260
|
+
if (modelInfo?.isExpensive && options?.useExpensiveModel !== "yes") {
|
|
1261
|
+
throw new Error(
|
|
1262
|
+
`Model ${model} is marked as expensive and requires explicit confirmation. Set useExpensiveModel: "yes" to proceed.`
|
|
1263
|
+
);
|
|
1264
|
+
}
|
|
1257
1265
|
modelConfig.stream = (options?.stream !== void 0 ? options.stream : modelConfig.stream) ?? true;
|
|
1258
1266
|
const canStream = this.getFeatures(model).streaming;
|
|
1259
1267
|
if (!canStream) {
|
|
@@ -2388,8 +2396,6 @@ function mapFinishReason(stopReason) {
|
|
|
2388
2396
|
|
|
2389
2397
|
// ai/openai/chat_types.ts
|
|
2390
2398
|
var AxAIOpenAIModel = /* @__PURE__ */ ((AxAIOpenAIModel2) => {
|
|
2391
|
-
AxAIOpenAIModel2["O1"] = "o1";
|
|
2392
|
-
AxAIOpenAIModel2["O1Mini"] = "o1-mini";
|
|
2393
2399
|
AxAIOpenAIModel2["GPT4"] = "gpt-4";
|
|
2394
2400
|
AxAIOpenAIModel2["GPT41"] = "gpt-4.1";
|
|
2395
2401
|
AxAIOpenAIModel2["GPT41Mini"] = "gpt-4.1-mini";
|
|
@@ -2402,6 +2408,11 @@ var AxAIOpenAIModel = /* @__PURE__ */ ((AxAIOpenAIModel2) => {
|
|
|
2402
2408
|
AxAIOpenAIModel2["GPT35TextDavinci002"] = "text-davinci-002";
|
|
2403
2409
|
AxAIOpenAIModel2["GPT3TextBabbage002"] = "text-babbage-002";
|
|
2404
2410
|
AxAIOpenAIModel2["GPT3TextAda001"] = "text-ada-001";
|
|
2411
|
+
AxAIOpenAIModel2["O1"] = "o1";
|
|
2412
|
+
AxAIOpenAIModel2["O1Mini"] = "o1-mini";
|
|
2413
|
+
AxAIOpenAIModel2["O3"] = "o3";
|
|
2414
|
+
AxAIOpenAIModel2["O3Mini"] = "o3-mini";
|
|
2415
|
+
AxAIOpenAIModel2["O4Mini"] = "o4-mini";
|
|
2405
2416
|
return AxAIOpenAIModel2;
|
|
2406
2417
|
})(AxAIOpenAIModel || {});
|
|
2407
2418
|
var AxAIOpenAIEmbedModel = /* @__PURE__ */ ((AxAIOpenAIEmbedModel2) => {
|
|
@@ -2413,8 +2424,6 @@ var AxAIOpenAIEmbedModel = /* @__PURE__ */ ((AxAIOpenAIEmbedModel2) => {
|
|
|
2413
2424
|
|
|
2414
2425
|
// ai/openai/responses_types.ts
|
|
2415
2426
|
var AxAIOpenAIResponsesModel = /* @__PURE__ */ ((AxAIOpenAIResponsesModel2) => {
|
|
2416
|
-
AxAIOpenAIResponsesModel2["O1"] = "o1";
|
|
2417
|
-
AxAIOpenAIResponsesModel2["O1Mini"] = "o1-mini";
|
|
2418
2427
|
AxAIOpenAIResponsesModel2["GPT4"] = "gpt-4";
|
|
2419
2428
|
AxAIOpenAIResponsesModel2["GPT41"] = "gpt-4.1";
|
|
2420
2429
|
AxAIOpenAIResponsesModel2["GPT41Mini"] = "gpt-4.1-mini";
|
|
@@ -2427,6 +2436,10 @@ var AxAIOpenAIResponsesModel = /* @__PURE__ */ ((AxAIOpenAIResponsesModel2) => {
|
|
|
2427
2436
|
AxAIOpenAIResponsesModel2["GPT35TextDavinci002"] = "text-davinci-002";
|
|
2428
2437
|
AxAIOpenAIResponsesModel2["GPT3TextBabbage002"] = "text-babbage-002";
|
|
2429
2438
|
AxAIOpenAIResponsesModel2["GPT3TextAda001"] = "text-ada-001";
|
|
2439
|
+
AxAIOpenAIResponsesModel2["O1Pro"] = "o1-pro";
|
|
2440
|
+
AxAIOpenAIResponsesModel2["O1"] = "o1";
|
|
2441
|
+
AxAIOpenAIResponsesModel2["O1Mini"] = "o1-mini";
|
|
2442
|
+
AxAIOpenAIResponsesModel2["O3Pro"] = "o3-pro";
|
|
2430
2443
|
AxAIOpenAIResponsesModel2["O3"] = "o3";
|
|
2431
2444
|
AxAIOpenAIResponsesModel2["O3Mini"] = "o3-mini";
|
|
2432
2445
|
AxAIOpenAIResponsesModel2["O4Mini"] = "o4-mini";
|
|
@@ -2435,20 +2448,7 @@ var AxAIOpenAIResponsesModel = /* @__PURE__ */ ((AxAIOpenAIResponsesModel2) => {
|
|
|
2435
2448
|
|
|
2436
2449
|
// ai/openai/info.ts
|
|
2437
2450
|
var axModelInfoOpenAI = [
|
|
2438
|
-
|
|
2439
|
-
name: "o1" /* O1 */,
|
|
2440
|
-
currency: "usd",
|
|
2441
|
-
promptTokenCostPer1M: 15,
|
|
2442
|
-
completionTokenCostPer1M: 60,
|
|
2443
|
-
hasThinkingBudget: true
|
|
2444
|
-
},
|
|
2445
|
-
{
|
|
2446
|
-
name: "o1-mini" /* O1Mini */,
|
|
2447
|
-
currency: "usd",
|
|
2448
|
-
promptTokenCostPer1M: 1.1,
|
|
2449
|
-
completionTokenCostPer1M: 14.4,
|
|
2450
|
-
hasThinkingBudget: true
|
|
2451
|
-
},
|
|
2451
|
+
// Not Reasoning models
|
|
2452
2452
|
{
|
|
2453
2453
|
name: "gpt-4" /* GPT4 */,
|
|
2454
2454
|
currency: "usd",
|
|
@@ -2497,30 +2497,36 @@ var axModelInfoOpenAI = [
|
|
|
2497
2497
|
promptTokenCostPer1M: 0.5,
|
|
2498
2498
|
completionTokenCostPer1M: 1.5
|
|
2499
2499
|
},
|
|
2500
|
-
//
|
|
2500
|
+
// Reasoning models
|
|
2501
|
+
{
|
|
2502
|
+
name: "o1" /* O1 */,
|
|
2503
|
+
currency: "usd",
|
|
2504
|
+
promptTokenCostPer1M: 15,
|
|
2505
|
+
completionTokenCostPer1M: 60
|
|
2506
|
+
},
|
|
2507
|
+
{
|
|
2508
|
+
name: "o1-mini" /* O1Mini */,
|
|
2509
|
+
currency: "usd",
|
|
2510
|
+
promptTokenCostPer1M: 1.1,
|
|
2511
|
+
completionTokenCostPer1M: 14.4
|
|
2512
|
+
},
|
|
2501
2513
|
{
|
|
2502
2514
|
name: "o3" /* O3 */,
|
|
2503
2515
|
currency: "usd",
|
|
2504
2516
|
promptTokenCostPer1M: 15,
|
|
2505
|
-
completionTokenCostPer1M: 60
|
|
2506
|
-
hasThinkingBudget: true,
|
|
2507
|
-
hasShowThoughts: true
|
|
2517
|
+
completionTokenCostPer1M: 60
|
|
2508
2518
|
},
|
|
2509
2519
|
{
|
|
2510
2520
|
name: "o3-mini" /* O3Mini */,
|
|
2511
2521
|
currency: "usd",
|
|
2512
2522
|
promptTokenCostPer1M: 1.1,
|
|
2513
|
-
completionTokenCostPer1M: 4.4
|
|
2514
|
-
hasThinkingBudget: true,
|
|
2515
|
-
hasShowThoughts: true
|
|
2523
|
+
completionTokenCostPer1M: 4.4
|
|
2516
2524
|
},
|
|
2517
2525
|
{
|
|
2518
2526
|
name: "o4-mini" /* O4Mini */,
|
|
2519
2527
|
currency: "usd",
|
|
2520
2528
|
promptTokenCostPer1M: 1.1,
|
|
2521
|
-
completionTokenCostPer1M: 4.4
|
|
2522
|
-
hasThinkingBudget: true,
|
|
2523
|
-
hasShowThoughts: true
|
|
2529
|
+
completionTokenCostPer1M: 4.4
|
|
2524
2530
|
},
|
|
2525
2531
|
// Embedding models
|
|
2526
2532
|
{
|
|
@@ -2542,8 +2548,123 @@ var axModelInfoOpenAI = [
|
|
|
2542
2548
|
completionTokenCostPer1M: 0.13
|
|
2543
2549
|
}
|
|
2544
2550
|
];
|
|
2551
|
+
var axModelInfoOpenAIResponses = [
|
|
2552
|
+
// Not Reasoning models
|
|
2553
|
+
{
|
|
2554
|
+
name: "gpt-4" /* GPT4 */,
|
|
2555
|
+
currency: "usd",
|
|
2556
|
+
promptTokenCostPer1M: 30,
|
|
2557
|
+
completionTokenCostPer1M: 60
|
|
2558
|
+
},
|
|
2559
|
+
{
|
|
2560
|
+
name: "gpt-4.1" /* GPT41 */,
|
|
2561
|
+
currency: "usd",
|
|
2562
|
+
promptTokenCostPer1M: 2,
|
|
2563
|
+
completionTokenCostPer1M: 8
|
|
2564
|
+
},
|
|
2565
|
+
{
|
|
2566
|
+
name: "gpt-4.1-mini" /* GPT41Mini */,
|
|
2567
|
+
currency: "usd",
|
|
2568
|
+
promptTokenCostPer1M: 0.4,
|
|
2569
|
+
completionTokenCostPer1M: 1.6
|
|
2570
|
+
},
|
|
2571
|
+
{
|
|
2572
|
+
name: "gpt-4o" /* GPT4O */,
|
|
2573
|
+
currency: "usd",
|
|
2574
|
+
promptTokenCostPer1M: 5,
|
|
2575
|
+
completionTokenCostPer1M: 15
|
|
2576
|
+
},
|
|
2577
|
+
{
|
|
2578
|
+
name: "gpt-4o-mini" /* GPT4OMini */,
|
|
2579
|
+
currency: "usd",
|
|
2580
|
+
promptTokenCostPer1M: 0.15,
|
|
2581
|
+
completionTokenCostPer1M: 0.6
|
|
2582
|
+
},
|
|
2583
|
+
{
|
|
2584
|
+
name: "chatgpt-4o-latest" /* GPT4ChatGPT4O */,
|
|
2585
|
+
currency: "usd",
|
|
2586
|
+
promptTokenCostPer1M: 5,
|
|
2587
|
+
completionTokenCostPer1M: 15
|
|
2588
|
+
},
|
|
2589
|
+
{
|
|
2590
|
+
name: "gpt-4-turbo" /* GPT4Turbo */,
|
|
2591
|
+
currency: "usd",
|
|
2592
|
+
promptTokenCostPer1M: 10,
|
|
2593
|
+
completionTokenCostPer1M: 30
|
|
2594
|
+
},
|
|
2595
|
+
{
|
|
2596
|
+
name: "gpt-3.5-turbo" /* GPT35Turbo */,
|
|
2597
|
+
currency: "usd",
|
|
2598
|
+
promptTokenCostPer1M: 0.5,
|
|
2599
|
+
completionTokenCostPer1M: 1.5
|
|
2600
|
+
},
|
|
2601
|
+
// Reasoning models
|
|
2602
|
+
{
|
|
2603
|
+
name: "o1-pro" /* O1Pro */,
|
|
2604
|
+
currency: "usd",
|
|
2605
|
+
promptTokenCostPer1M: 150,
|
|
2606
|
+
completionTokenCostPer1M: 600,
|
|
2607
|
+
hasThinkingBudget: true,
|
|
2608
|
+
hasShowThoughts: true,
|
|
2609
|
+
isExpensive: true
|
|
2610
|
+
},
|
|
2611
|
+
{
|
|
2612
|
+
name: "o1" /* O1 */,
|
|
2613
|
+
currency: "usd",
|
|
2614
|
+
promptTokenCostPer1M: 15,
|
|
2615
|
+
completionTokenCostPer1M: 60,
|
|
2616
|
+
hasThinkingBudget: true,
|
|
2617
|
+
hasShowThoughts: true
|
|
2618
|
+
},
|
|
2619
|
+
{
|
|
2620
|
+
name: "o3-pro" /* O3Pro */,
|
|
2621
|
+
currency: "usd",
|
|
2622
|
+
promptTokenCostPer1M: 20,
|
|
2623
|
+
completionTokenCostPer1M: 80,
|
|
2624
|
+
hasThinkingBudget: true,
|
|
2625
|
+
hasShowThoughts: true,
|
|
2626
|
+
isExpensive: true
|
|
2627
|
+
},
|
|
2628
|
+
{
|
|
2629
|
+
name: "o3" /* O3 */,
|
|
2630
|
+
currency: "usd",
|
|
2631
|
+
promptTokenCostPer1M: 15,
|
|
2632
|
+
completionTokenCostPer1M: 60,
|
|
2633
|
+
hasThinkingBudget: true,
|
|
2634
|
+
hasShowThoughts: true
|
|
2635
|
+
},
|
|
2636
|
+
{
|
|
2637
|
+
name: "o3-mini" /* O3Mini */,
|
|
2638
|
+
currency: "usd",
|
|
2639
|
+
promptTokenCostPer1M: 1.1,
|
|
2640
|
+
completionTokenCostPer1M: 4.4,
|
|
2641
|
+
hasThinkingBudget: true,
|
|
2642
|
+
hasShowThoughts: true
|
|
2643
|
+
},
|
|
2644
|
+
{
|
|
2645
|
+
name: "o4-mini" /* O4Mini */,
|
|
2646
|
+
currency: "usd",
|
|
2647
|
+
promptTokenCostPer1M: 1.1,
|
|
2648
|
+
completionTokenCostPer1M: 4.4,
|
|
2649
|
+
hasThinkingBudget: true,
|
|
2650
|
+
hasShowThoughts: true
|
|
2651
|
+
}
|
|
2652
|
+
];
|
|
2545
2653
|
|
|
2546
2654
|
// ai/openai/api.ts
|
|
2655
|
+
var isOpenAIThinkingModel = (model) => {
|
|
2656
|
+
const thinkingModels = [
|
|
2657
|
+
"o1" /* O1 */,
|
|
2658
|
+
"o1-mini" /* O1Mini */,
|
|
2659
|
+
"o3" /* O3 */,
|
|
2660
|
+
"o3-mini" /* O3Mini */,
|
|
2661
|
+
"o4-mini" /* O4Mini */,
|
|
2662
|
+
// Pro models (string values since they're not in the regular chat enum)
|
|
2663
|
+
"o1-pro",
|
|
2664
|
+
"o3-pro"
|
|
2665
|
+
];
|
|
2666
|
+
return thinkingModels.includes(model) || thinkingModels.includes(model);
|
|
2667
|
+
};
|
|
2547
2668
|
var axAIOpenAIDefaultConfig = () => structuredClone({
|
|
2548
2669
|
model: "gpt-4.1" /* GPT41 */,
|
|
2549
2670
|
embedModel: "text-embedding-3-small" /* TextEmbedding3Small */,
|
|
@@ -2607,20 +2728,24 @@ var AxAIOpenAIImpl = class {
|
|
|
2607
2728
|
const frequencyPenalty = req.modelConfig?.frequencyPenalty ?? this.config.frequencyPenalty;
|
|
2608
2729
|
const stream = req.modelConfig?.stream ?? this.config.stream;
|
|
2609
2730
|
const store = this.config.store;
|
|
2731
|
+
const isThinkingModel = isOpenAIThinkingModel(model);
|
|
2610
2732
|
let reqValue = {
|
|
2611
2733
|
model,
|
|
2612
2734
|
messages,
|
|
2613
2735
|
response_format: this.config?.responseFormat ? { type: this.config.responseFormat } : void 0,
|
|
2614
2736
|
tools,
|
|
2615
2737
|
tool_choice: toolsChoice,
|
|
2616
|
-
|
|
2617
|
-
|
|
2618
|
-
|
|
2619
|
-
|
|
2738
|
+
// For thinking models, don't set these parameters as they're not supported
|
|
2739
|
+
...isThinkingModel ? {} : {
|
|
2740
|
+
max_completion_tokens: req.modelConfig?.maxTokens ?? this.config.maxTokens,
|
|
2741
|
+
temperature: req.modelConfig?.temperature ?? this.config.temperature,
|
|
2742
|
+
top_p: req.modelConfig?.topP ?? this.config.topP ?? 1,
|
|
2743
|
+
n: req.modelConfig?.n ?? this.config.n,
|
|
2744
|
+
presence_penalty: req.modelConfig?.presencePenalty ?? this.config.presencePenalty,
|
|
2745
|
+
...frequencyPenalty ? { frequency_penalty: frequencyPenalty } : {}
|
|
2746
|
+
},
|
|
2620
2747
|
stop: req.modelConfig?.stopSequences ?? this.config.stop,
|
|
2621
|
-
presence_penalty: req.modelConfig?.presencePenalty ?? this.config.presencePenalty,
|
|
2622
2748
|
logit_bias: this.config.logitBias,
|
|
2623
|
-
...frequencyPenalty ? { frequency_penalty: frequencyPenalty } : {},
|
|
2624
2749
|
...stream && this.streamingUsage ? { stream: true, stream_options: { include_usage: true } } : {},
|
|
2625
2750
|
...store ? { store } : {},
|
|
2626
2751
|
...this.config.serviceTier ? { service_tier: this.config.serviceTier } : {},
|
|
@@ -4512,6 +4637,18 @@ var AxAIOllama = class extends AxAIOpenAIBase {
|
|
|
4512
4637
|
};
|
|
4513
4638
|
|
|
4514
4639
|
// ai/openai/responses_api.ts
|
|
4640
|
+
var isOpenAIResponsesThinkingModel = (model) => {
|
|
4641
|
+
const thinkingModels = [
|
|
4642
|
+
"o1" /* O1 */,
|
|
4643
|
+
"o1-mini" /* O1Mini */,
|
|
4644
|
+
"o1-pro" /* O1Pro */,
|
|
4645
|
+
"o3" /* O3 */,
|
|
4646
|
+
"o3-mini" /* O3Mini */,
|
|
4647
|
+
"o3-pro" /* O3Pro */,
|
|
4648
|
+
"o4-mini" /* O4Mini */
|
|
4649
|
+
];
|
|
4650
|
+
return thinkingModels.includes(model);
|
|
4651
|
+
};
|
|
4515
4652
|
var AxAIOpenAIResponsesImpl = class {
|
|
4516
4653
|
constructor(config, streamingUsage, responsesReqUpdater) {
|
|
4517
4654
|
this.config = config;
|
|
@@ -4667,10 +4804,37 @@ var AxAIOpenAIResponsesImpl = class {
|
|
|
4667
4804
|
parameters: v.parameters ?? {}
|
|
4668
4805
|
})
|
|
4669
4806
|
);
|
|
4670
|
-
const includeFields =
|
|
4671
|
-
|
|
4672
|
-
|
|
4673
|
-
|
|
4807
|
+
const includeFields = (
|
|
4808
|
+
// | 'computer_call_output.output.image_url'
|
|
4809
|
+
// | 'reasoning.encrypted_content'
|
|
4810
|
+
// | 'code_interpreter_call.outputs'
|
|
4811
|
+
[]
|
|
4812
|
+
);
|
|
4813
|
+
const isThinkingModel = isOpenAIResponsesThinkingModel(model);
|
|
4814
|
+
let reasoningSummary = this.config.reasoningSummary;
|
|
4815
|
+
if (!config?.showThoughts) {
|
|
4816
|
+
reasoningSummary = void 0;
|
|
4817
|
+
} else if (!reasoningSummary) {
|
|
4818
|
+
reasoningSummary = "auto";
|
|
4819
|
+
}
|
|
4820
|
+
let reasoningEffort = this.config.reasoningEffort;
|
|
4821
|
+
if (config?.thinkingTokenBudget) {
|
|
4822
|
+
switch (config.thinkingTokenBudget) {
|
|
4823
|
+
case "none":
|
|
4824
|
+
reasoningEffort = void 0;
|
|
4825
|
+
break;
|
|
4826
|
+
case "minimal":
|
|
4827
|
+
reasoningEffort = "low";
|
|
4828
|
+
break;
|
|
4829
|
+
case "low":
|
|
4830
|
+
reasoningEffort = "medium";
|
|
4831
|
+
break;
|
|
4832
|
+
case "medium":
|
|
4833
|
+
case "high":
|
|
4834
|
+
case "highest":
|
|
4835
|
+
reasoningEffort = "high";
|
|
4836
|
+
break;
|
|
4837
|
+
}
|
|
4674
4838
|
}
|
|
4675
4839
|
let mutableReq = {
|
|
4676
4840
|
model,
|
|
@@ -4679,9 +4843,15 @@ var AxAIOpenAIResponsesImpl = class {
|
|
|
4679
4843
|
instructions: finalInstructions,
|
|
4680
4844
|
tools: tools?.length ? tools : void 0,
|
|
4681
4845
|
tool_choice: req.functionCall === "none" || req.functionCall === "auto" || req.functionCall === "required" ? req.functionCall : typeof req.functionCall === "object" && req.functionCall.function ? { type: "function", name: req.functionCall.function.name } : void 0,
|
|
4682
|
-
|
|
4683
|
-
|
|
4684
|
-
|
|
4846
|
+
// For thinking models, don't set these parameters as they're not supported
|
|
4847
|
+
...isThinkingModel ? {
|
|
4848
|
+
max_output_tokens: req.modelConfig?.maxTokens ?? this.config.maxTokens ?? void 0
|
|
4849
|
+
} : {
|
|
4850
|
+
temperature: req.modelConfig?.temperature ?? this.config.temperature ?? void 0,
|
|
4851
|
+
top_p: req.modelConfig?.topP ?? this.config.topP ?? void 0,
|
|
4852
|
+
presence_penalty: req.modelConfig?.presencePenalty ?? this.config.presencePenalty ?? void 0,
|
|
4853
|
+
frequency_penalty: req.modelConfig?.frequencyPenalty ?? this.config.frequencyPenalty ?? void 0
|
|
4854
|
+
},
|
|
4685
4855
|
stream: req.modelConfig?.stream ?? this.config.stream ?? false,
|
|
4686
4856
|
// Sourced from modelConfig or global config
|
|
4687
4857
|
// Optional fields from AxAIOpenAIResponsesRequest that need to be in Mutable for initialization
|
|
@@ -4690,7 +4860,12 @@ var AxAIOpenAIResponsesImpl = class {
|
|
|
4690
4860
|
metadata: void 0,
|
|
4691
4861
|
parallel_tool_calls: this.config.parallelToolCalls,
|
|
4692
4862
|
previous_response_id: void 0,
|
|
4693
|
-
|
|
4863
|
+
...reasoningEffort ? {
|
|
4864
|
+
reasoning: {
|
|
4865
|
+
effort: reasoningEffort,
|
|
4866
|
+
summary: reasoningSummary
|
|
4867
|
+
}
|
|
4868
|
+
} : {},
|
|
4694
4869
|
service_tier: this.config.serviceTier,
|
|
4695
4870
|
store: this.config.store,
|
|
4696
4871
|
text: void 0,
|
|
@@ -5110,19 +5285,6 @@ var AxAIOpenAIResponsesImpl = class {
|
|
|
5110
5285
|
];
|
|
5111
5286
|
}
|
|
5112
5287
|
break;
|
|
5113
|
-
case "reasoning":
|
|
5114
|
-
{
|
|
5115
|
-
const reasoningItem = event.item;
|
|
5116
|
-
baseResult.id = event.item.id;
|
|
5117
|
-
if (reasoningItem.encrypted_content) {
|
|
5118
|
-
baseResult.thought = reasoningItem.encrypted_content;
|
|
5119
|
-
} else if (reasoningItem.summary) {
|
|
5120
|
-
baseResult.thought = reasoningItem.summary.map(
|
|
5121
|
-
(s2) => typeof s2 === "object" ? JSON.stringify(s2) : s2
|
|
5122
|
-
).join("\n");
|
|
5123
|
-
}
|
|
5124
|
-
}
|
|
5125
|
-
break;
|
|
5126
5288
|
}
|
|
5127
5289
|
break;
|
|
5128
5290
|
case "response.content_part.added":
|
|
@@ -5148,16 +5310,21 @@ var AxAIOpenAIResponsesImpl = class {
|
|
|
5148
5310
|
}
|
|
5149
5311
|
];
|
|
5150
5312
|
break;
|
|
5151
|
-
case
|
|
5152
|
-
|
|
5313
|
+
// case 'response.function_call_arguments.done':
|
|
5314
|
+
// // Function call arguments done - don't return function calls here
|
|
5315
|
+
// // The mergeFunctionCalls will handle combining name and arguments
|
|
5316
|
+
// baseResult.id = event.item_id
|
|
5317
|
+
// baseResult.finishReason = 'function_call'
|
|
5318
|
+
// break
|
|
5153
5319
|
case "response.reasoning_summary_text.delta":
|
|
5154
5320
|
baseResult.id = event.item_id;
|
|
5155
5321
|
baseResult.thought = event.delta;
|
|
5156
5322
|
break;
|
|
5157
|
-
case
|
|
5158
|
-
|
|
5159
|
-
|
|
5160
|
-
|
|
5323
|
+
// case 'response.reasoning_summary_text.done':
|
|
5324
|
+
// // Reasoning summary done
|
|
5325
|
+
// baseResult.id = event.item_id
|
|
5326
|
+
// baseResult.thought = event.text
|
|
5327
|
+
// break
|
|
5161
5328
|
// File search tool events
|
|
5162
5329
|
case "response.file_search_call.in_progress":
|
|
5163
5330
|
case "response.file_search_call.searching":
|
|
@@ -5251,10 +5418,6 @@ var AxAIOpenAIResponsesImpl = class {
|
|
|
5251
5418
|
baseResult.id = event.item.id;
|
|
5252
5419
|
baseResult.finishReason = "function_call";
|
|
5253
5420
|
break;
|
|
5254
|
-
case "reasoning":
|
|
5255
|
-
baseResult.id = event.item.id;
|
|
5256
|
-
baseResult.finishReason = "stop";
|
|
5257
|
-
break;
|
|
5258
5421
|
}
|
|
5259
5422
|
break;
|
|
5260
5423
|
case "response.completed":
|
|
@@ -5382,7 +5545,7 @@ var AxAIOpenAIResponses = class extends AxAIOpenAIResponsesBase {
|
|
|
5382
5545
|
if (!apiKey || apiKey === "") {
|
|
5383
5546
|
throw new Error("OpenAI API key not set");
|
|
5384
5547
|
}
|
|
5385
|
-
modelInfo = [...
|
|
5548
|
+
modelInfo = [...axModelInfoOpenAIResponses, ...modelInfo ?? []];
|
|
5386
5549
|
const supportFor = (model) => {
|
|
5387
5550
|
const mi = getModelInfo({
|
|
5388
5551
|
model,
|
|
@@ -8966,7 +9129,7 @@ async function* processStreamingResponse({
|
|
|
8966
9129
|
usage.push(v.modelUsage);
|
|
8967
9130
|
}
|
|
8968
9131
|
for (const result of v.results) {
|
|
8969
|
-
if (result.content === "" && (!result.functionCalls || result.functionCalls.length === 0)) {
|
|
9132
|
+
if (result.content === "" && (!result.thought || result.thought === "") && (!result.functionCalls || result.functionCalls.length === 0)) {
|
|
8970
9133
|
continue;
|
|
8971
9134
|
}
|
|
8972
9135
|
const state = states.find((s2) => s2.index === result.index);
|
|
@@ -9678,7 +9841,7 @@ var toFieldType = (type) => {
|
|
|
9678
9841
|
case "number":
|
|
9679
9842
|
return "number";
|
|
9680
9843
|
case "boolean":
|
|
9681
|
-
return "boolean";
|
|
9844
|
+
return "boolean (true or false)";
|
|
9682
9845
|
case "date":
|
|
9683
9846
|
return 'date ("YYYY-MM-DD" format)';
|
|
9684
9847
|
case "datetime":
|
|
@@ -12836,56 +12999,494 @@ var AxDockerSession = class {
|
|
|
12836
12999
|
}
|
|
12837
13000
|
};
|
|
12838
13001
|
|
|
12839
|
-
//
|
|
12840
|
-
var
|
|
12841
|
-
|
|
12842
|
-
|
|
12843
|
-
|
|
12844
|
-
|
|
12845
|
-
|
|
12846
|
-
|
|
12847
|
-
constructor({
|
|
12848
|
-
|
|
12849
|
-
split,
|
|
12850
|
-
config,
|
|
12851
|
-
options
|
|
12852
|
-
}) {
|
|
12853
|
-
this.baseUrl = "https://datasets-server.huggingface.co/rows";
|
|
12854
|
-
this.dataset = dataset;
|
|
12855
|
-
this.split = split;
|
|
12856
|
-
this.config = config;
|
|
12857
|
-
this.options = options;
|
|
13002
|
+
// flow/flow.ts
|
|
13003
|
+
var AxFlow = class extends AxProgramWithSignature {
|
|
13004
|
+
nodes = /* @__PURE__ */ new Map();
|
|
13005
|
+
flowDefinition = [];
|
|
13006
|
+
nodeGenerators = /* @__PURE__ */ new Map();
|
|
13007
|
+
loopStack = [];
|
|
13008
|
+
stepLabels = /* @__PURE__ */ new Map();
|
|
13009
|
+
branchContext = null;
|
|
13010
|
+
constructor(signature = "userInput:string -> flowOutput:string") {
|
|
13011
|
+
super(signature);
|
|
12858
13012
|
}
|
|
12859
|
-
|
|
12860
|
-
|
|
12861
|
-
|
|
12862
|
-
|
|
12863
|
-
|
|
12864
|
-
|
|
12865
|
-
|
|
12866
|
-
|
|
12867
|
-
|
|
12868
|
-
|
|
12869
|
-
|
|
12870
|
-
|
|
12871
|
-
|
|
12872
|
-
|
|
13013
|
+
/**
|
|
13014
|
+
* Declares a reusable computational node and its input/output signature.
|
|
13015
|
+
* Returns a new AxFlow type that tracks this node in the TNodes registry.
|
|
13016
|
+
*
|
|
13017
|
+
* @param name - The name of the node
|
|
13018
|
+
* @param signature - Signature string in the same format as AxSignature
|
|
13019
|
+
* @param options - Optional program forward options (same as AxGen)
|
|
13020
|
+
* @returns New AxFlow instance with updated TNodes type
|
|
13021
|
+
*
|
|
13022
|
+
* @example
|
|
13023
|
+
* ```typescript
|
|
13024
|
+
* flow.node('summarizer', 'text:string -> summary:string')
|
|
13025
|
+
* flow.node('analyzer', 'text:string -> analysis:string, confidence:number', { debug: true })
|
|
13026
|
+
* ```
|
|
13027
|
+
*/
|
|
13028
|
+
node(name, signature, options) {
|
|
13029
|
+
if (!signature) {
|
|
13030
|
+
throw new Error(
|
|
13031
|
+
`Invalid signature for node '${name}': signature cannot be empty`
|
|
13032
|
+
);
|
|
12873
13033
|
}
|
|
13034
|
+
this.nodes.set(name, {
|
|
13035
|
+
inputs: {},
|
|
13036
|
+
outputs: {}
|
|
13037
|
+
});
|
|
13038
|
+
this.nodeGenerators.set(name, new AxGen(signature, options));
|
|
13039
|
+
return this;
|
|
12874
13040
|
}
|
|
12875
|
-
|
|
12876
|
-
|
|
12877
|
-
|
|
12878
|
-
|
|
12879
|
-
|
|
12880
|
-
|
|
12881
|
-
|
|
12882
|
-
|
|
12883
|
-
|
|
12884
|
-
|
|
12885
|
-
|
|
12886
|
-
|
|
13041
|
+
/**
|
|
13042
|
+
* Applies a synchronous transformation to the state object.
|
|
13043
|
+
* Returns a new AxFlow type with the evolved state.
|
|
13044
|
+
*
|
|
13045
|
+
* @param transform - Function that takes the current state and returns a new state
|
|
13046
|
+
* @returns New AxFlow instance with updated TState type
|
|
13047
|
+
*
|
|
13048
|
+
* @example
|
|
13049
|
+
* ```typescript
|
|
13050
|
+
* flow.map(state => ({ ...state, processedText: state.text.toLowerCase() }))
|
|
13051
|
+
* ```
|
|
13052
|
+
*/
|
|
13053
|
+
map(transform) {
|
|
13054
|
+
const step = (state) => {
|
|
13055
|
+
return transform(state);
|
|
13056
|
+
};
|
|
13057
|
+
if (this.branchContext?.currentBranchValue !== void 0) {
|
|
13058
|
+
const currentBranch = this.branchContext.branches.get(
|
|
13059
|
+
this.branchContext.currentBranchValue
|
|
13060
|
+
) || [];
|
|
13061
|
+
currentBranch.push(step);
|
|
13062
|
+
this.branchContext.branches.set(
|
|
13063
|
+
this.branchContext.currentBranchValue,
|
|
13064
|
+
currentBranch
|
|
13065
|
+
);
|
|
13066
|
+
} else {
|
|
13067
|
+
this.flowDefinition.push(step);
|
|
13068
|
+
}
|
|
13069
|
+
return this;
|
|
12887
13070
|
}
|
|
12888
|
-
|
|
13071
|
+
/**
|
|
13072
|
+
* Labels a step for later reference (useful for feedback loops).
|
|
13073
|
+
*
|
|
13074
|
+
* @param label - The label to assign to the current step position
|
|
13075
|
+
* @returns this (for chaining, no type change)
|
|
13076
|
+
*
|
|
13077
|
+
* @example
|
|
13078
|
+
* ```typescript
|
|
13079
|
+
* flow.label('retry-point')
|
|
13080
|
+
* .execute('queryGen', ...)
|
|
13081
|
+
* ```
|
|
13082
|
+
*/
|
|
13083
|
+
label(label) {
|
|
13084
|
+
if (this.branchContext?.currentBranchValue !== void 0) {
|
|
13085
|
+
throw new Error("Cannot create labels inside branch blocks");
|
|
13086
|
+
}
|
|
13087
|
+
this.stepLabels.set(label, this.flowDefinition.length);
|
|
13088
|
+
return this;
|
|
13089
|
+
}
|
|
13090
|
+
/**
|
|
13091
|
+
* Executes a previously defined node with full type safety.
|
|
13092
|
+
* The node name must exist in TNodes, and the mapping function is typed based on the node's signature.
|
|
13093
|
+
*
|
|
13094
|
+
* @param nodeName - The name of the node to execute (must exist in TNodes)
|
|
13095
|
+
* @param mapping - Typed function that takes the current state and returns the input for the node
|
|
13096
|
+
* @param dynamicContext - Optional object to override the AI service or options for this specific step
|
|
13097
|
+
* @returns New AxFlow instance with TState augmented with the node's result
|
|
13098
|
+
*
|
|
13099
|
+
* @example
|
|
13100
|
+
* ```typescript
|
|
13101
|
+
* flow.execute('summarizer', state => ({ text: state.originalText }), { ai: cheapAI })
|
|
13102
|
+
* ```
|
|
13103
|
+
*/
|
|
13104
|
+
execute(nodeName, mapping, dynamicContext) {
|
|
13105
|
+
if (!this.nodes.has(nodeName)) {
|
|
13106
|
+
throw new Error(
|
|
13107
|
+
`Node '${nodeName}' not found. Make sure to define it with .node() first.`
|
|
13108
|
+
);
|
|
13109
|
+
}
|
|
13110
|
+
const nodeGenerator = this.nodeGenerators.get(nodeName);
|
|
13111
|
+
if (!nodeGenerator) {
|
|
13112
|
+
throw new Error(`Node generator for '${nodeName}' not found.`);
|
|
13113
|
+
}
|
|
13114
|
+
const step = async (state, context3) => {
|
|
13115
|
+
const ai = dynamicContext?.ai ?? context3.mainAi;
|
|
13116
|
+
const options = dynamicContext?.options ?? context3.mainOptions;
|
|
13117
|
+
const nodeInputs = mapping(state);
|
|
13118
|
+
const result = await nodeGenerator.forward(ai, nodeInputs, options);
|
|
13119
|
+
return {
|
|
13120
|
+
...state,
|
|
13121
|
+
[`${nodeName}Result`]: result
|
|
13122
|
+
};
|
|
13123
|
+
};
|
|
13124
|
+
if (this.branchContext?.currentBranchValue !== void 0) {
|
|
13125
|
+
const currentBranch = this.branchContext.branches.get(
|
|
13126
|
+
this.branchContext.currentBranchValue
|
|
13127
|
+
) || [];
|
|
13128
|
+
currentBranch.push(step);
|
|
13129
|
+
this.branchContext.branches.set(
|
|
13130
|
+
this.branchContext.currentBranchValue,
|
|
13131
|
+
currentBranch
|
|
13132
|
+
);
|
|
13133
|
+
} else {
|
|
13134
|
+
this.flowDefinition.push(step);
|
|
13135
|
+
}
|
|
13136
|
+
return this;
|
|
13137
|
+
}
|
|
13138
|
+
/**
|
|
13139
|
+
* Starts a conditional branch based on a predicate function.
|
|
13140
|
+
*
|
|
13141
|
+
* @param predicate - Function that takes state and returns a value to branch on
|
|
13142
|
+
* @returns this (for chaining)
|
|
13143
|
+
*
|
|
13144
|
+
* @example
|
|
13145
|
+
* ```typescript
|
|
13146
|
+
* flow.branch(state => state.qualityResult.needsMoreInfo)
|
|
13147
|
+
* .when(true)
|
|
13148
|
+
* .execute('queryGen', ...)
|
|
13149
|
+
* .when(false)
|
|
13150
|
+
* .execute('answer', ...)
|
|
13151
|
+
* .merge()
|
|
13152
|
+
* ```
|
|
13153
|
+
*/
|
|
13154
|
+
branch(predicate) {
|
|
13155
|
+
if (this.branchContext) {
|
|
13156
|
+
throw new Error("Nested branches are not supported");
|
|
13157
|
+
}
|
|
13158
|
+
this.branchContext = {
|
|
13159
|
+
predicate: (state) => predicate(state),
|
|
13160
|
+
branches: /* @__PURE__ */ new Map(),
|
|
13161
|
+
currentBranchValue: void 0
|
|
13162
|
+
};
|
|
13163
|
+
return this;
|
|
13164
|
+
}
|
|
13165
|
+
/**
|
|
13166
|
+
* Defines a branch case for the current branch context.
|
|
13167
|
+
*
|
|
13168
|
+
* @param value - The value to match against the branch predicate result
|
|
13169
|
+
* @returns this (for chaining)
|
|
13170
|
+
*/
|
|
13171
|
+
when(value) {
|
|
13172
|
+
if (!this.branchContext) {
|
|
13173
|
+
throw new Error("when() called without matching branch()");
|
|
13174
|
+
}
|
|
13175
|
+
this.branchContext.currentBranchValue = value;
|
|
13176
|
+
this.branchContext.branches.set(value, []);
|
|
13177
|
+
return this;
|
|
13178
|
+
}
|
|
13179
|
+
/**
|
|
13180
|
+
* Ends the current branch and merges all branch paths back into the main flow.
|
|
13181
|
+
*
|
|
13182
|
+
* @returns this (for chaining)
|
|
13183
|
+
*/
|
|
13184
|
+
merge() {
|
|
13185
|
+
if (!this.branchContext) {
|
|
13186
|
+
throw new Error("merge() called without matching branch()");
|
|
13187
|
+
}
|
|
13188
|
+
const branchContext = this.branchContext;
|
|
13189
|
+
this.branchContext = null;
|
|
13190
|
+
this.flowDefinition.push(async (state, context3) => {
|
|
13191
|
+
const branchValue = branchContext.predicate(state);
|
|
13192
|
+
const branchSteps = branchContext.branches.get(branchValue);
|
|
13193
|
+
if (!branchSteps) {
|
|
13194
|
+
return state;
|
|
13195
|
+
}
|
|
13196
|
+
let currentState = state;
|
|
13197
|
+
for (const step of branchSteps) {
|
|
13198
|
+
currentState = await step(currentState, context3);
|
|
13199
|
+
}
|
|
13200
|
+
return currentState;
|
|
13201
|
+
});
|
|
13202
|
+
return this;
|
|
13203
|
+
}
|
|
13204
|
+
/**
|
|
13205
|
+
* Executes multiple operations in parallel and merges their results.
|
|
13206
|
+
* Both typed and legacy untyped branches are supported.
|
|
13207
|
+
*
|
|
13208
|
+
* @param branches - Array of functions that define parallel operations
|
|
13209
|
+
* @returns Object with merge method for combining results
|
|
13210
|
+
*
|
|
13211
|
+
* @example
|
|
13212
|
+
* ```typescript
|
|
13213
|
+
* flow.parallel([
|
|
13214
|
+
* subFlow => subFlow.execute('retrieve1', state => ({ query: state.query1 })),
|
|
13215
|
+
* subFlow => subFlow.execute('retrieve2', state => ({ query: state.query2 })),
|
|
13216
|
+
* subFlow => subFlow.execute('retrieve3', state => ({ query: state.query3 }))
|
|
13217
|
+
* ]).merge('documents', (docs1, docs2, docs3) => [...docs1, ...docs2, ...docs3])
|
|
13218
|
+
* ```
|
|
13219
|
+
*/
|
|
13220
|
+
parallel(branches) {
|
|
13221
|
+
const parallelStep = async (state, context3) => {
|
|
13222
|
+
const promises = branches.map(async (branchFn) => {
|
|
13223
|
+
const subContext = new AxFlowSubContextImpl(this.nodeGenerators);
|
|
13224
|
+
const populatedSubContext = branchFn(
|
|
13225
|
+
subContext
|
|
13226
|
+
);
|
|
13227
|
+
return await populatedSubContext.executeSteps(state, context3);
|
|
13228
|
+
});
|
|
13229
|
+
const results = await Promise.all(promises);
|
|
13230
|
+
return {
|
|
13231
|
+
...state,
|
|
13232
|
+
_parallelResults: results
|
|
13233
|
+
};
|
|
13234
|
+
};
|
|
13235
|
+
this.flowDefinition.push(parallelStep);
|
|
13236
|
+
return {
|
|
13237
|
+
merge: (resultKey, mergeFunction) => {
|
|
13238
|
+
this.flowDefinition.push((state) => {
|
|
13239
|
+
const results = state._parallelResults;
|
|
13240
|
+
if (!Array.isArray(results)) {
|
|
13241
|
+
throw new Error("No parallel results found for merge");
|
|
13242
|
+
}
|
|
13243
|
+
const mergedValue = mergeFunction(...results);
|
|
13244
|
+
const newState = { ...state };
|
|
13245
|
+
delete newState._parallelResults;
|
|
13246
|
+
newState[resultKey] = mergedValue;
|
|
13247
|
+
return newState;
|
|
13248
|
+
});
|
|
13249
|
+
return this;
|
|
13250
|
+
}
|
|
13251
|
+
};
|
|
13252
|
+
}
|
|
13253
|
+
/**
|
|
13254
|
+
* Creates a feedback loop that jumps back to a labeled step if a condition is met.
|
|
13255
|
+
*
|
|
13256
|
+
* @param condition - Function that returns true to trigger the feedback loop
|
|
13257
|
+
* @param targetLabel - The label to jump back to
|
|
13258
|
+
* @param maxIterations - Maximum number of iterations to prevent infinite loops (default: 10)
|
|
13259
|
+
* @returns this (for chaining)
|
|
13260
|
+
*
|
|
13261
|
+
* @example
|
|
13262
|
+
* ```typescript
|
|
13263
|
+
* flow.label('retry-point')
|
|
13264
|
+
* .execute('answer', ...)
|
|
13265
|
+
* .execute('qualityCheck', ...)
|
|
13266
|
+
* .feedback(state => state.qualityCheckResult.confidence < 0.7, 'retry-point')
|
|
13267
|
+
* ```
|
|
13268
|
+
*/
|
|
13269
|
+
feedback(condition, targetLabel, maxIterations = 10) {
|
|
13270
|
+
if (!this.stepLabels.has(targetLabel)) {
|
|
13271
|
+
throw new Error(
|
|
13272
|
+
`Label '${targetLabel}' not found. Make sure to define it with .label() before the feedback point.`
|
|
13273
|
+
);
|
|
13274
|
+
}
|
|
13275
|
+
const targetIndex = this.stepLabels.get(targetLabel);
|
|
13276
|
+
const feedbackStepIndex = this.flowDefinition.length;
|
|
13277
|
+
this.flowDefinition.push(async (state, context3) => {
|
|
13278
|
+
let currentState = state;
|
|
13279
|
+
let iterations = 1;
|
|
13280
|
+
const iterationKey = `_feedback_${targetLabel}_iterations`;
|
|
13281
|
+
if (typeof currentState[iterationKey] !== "number") {
|
|
13282
|
+
currentState = { ...currentState, [iterationKey]: 1 };
|
|
13283
|
+
}
|
|
13284
|
+
while (condition(currentState) && iterations < maxIterations) {
|
|
13285
|
+
iterations++;
|
|
13286
|
+
currentState = { ...currentState, [iterationKey]: iterations };
|
|
13287
|
+
for (let i = targetIndex; i < feedbackStepIndex; i++) {
|
|
13288
|
+
const step = this.flowDefinition[i];
|
|
13289
|
+
if (step) {
|
|
13290
|
+
currentState = await step(currentState, context3);
|
|
13291
|
+
}
|
|
13292
|
+
}
|
|
13293
|
+
}
|
|
13294
|
+
return currentState;
|
|
13295
|
+
});
|
|
13296
|
+
return this;
|
|
13297
|
+
}
|
|
13298
|
+
/**
|
|
13299
|
+
* Marks the beginning of a loop block.
|
|
13300
|
+
*
|
|
13301
|
+
* @param condition - Function that takes the current state and returns a boolean
|
|
13302
|
+
* @returns this (for chaining)
|
|
13303
|
+
*
|
|
13304
|
+
* @example
|
|
13305
|
+
* ```typescript
|
|
13306
|
+
* flow.while(state => state.iterations < 3)
|
|
13307
|
+
* .map(state => ({ ...state, iterations: (state.iterations || 0) + 1 }))
|
|
13308
|
+
* .endWhile()
|
|
13309
|
+
* ```
|
|
13310
|
+
*/
|
|
13311
|
+
while(condition) {
|
|
13312
|
+
const loopStartIndex = this.flowDefinition.length;
|
|
13313
|
+
this.loopStack.push(loopStartIndex);
|
|
13314
|
+
const placeholderStep = Object.assign(
|
|
13315
|
+
(state) => state,
|
|
13316
|
+
{
|
|
13317
|
+
_condition: condition,
|
|
13318
|
+
_isLoopStart: true
|
|
13319
|
+
}
|
|
13320
|
+
);
|
|
13321
|
+
this.flowDefinition.push(placeholderStep);
|
|
13322
|
+
return this;
|
|
13323
|
+
}
|
|
13324
|
+
/**
|
|
13325
|
+
* Marks the end of a loop block.
|
|
13326
|
+
*
|
|
13327
|
+
* @returns this (for chaining)
|
|
13328
|
+
*/
|
|
13329
|
+
endWhile() {
|
|
13330
|
+
if (this.loopStack.length === 0) {
|
|
13331
|
+
throw new Error("endWhile() called without matching while()");
|
|
13332
|
+
}
|
|
13333
|
+
const loopStartIndex = this.loopStack.pop();
|
|
13334
|
+
const placeholderStep = this.flowDefinition[loopStartIndex];
|
|
13335
|
+
if (!placeholderStep || !("_isLoopStart" in placeholderStep)) {
|
|
13336
|
+
throw new Error("Loop start step not found or invalid");
|
|
13337
|
+
}
|
|
13338
|
+
const condition = placeholderStep._condition;
|
|
13339
|
+
const loopBodySteps = this.flowDefinition.splice(loopStartIndex + 1);
|
|
13340
|
+
this.flowDefinition[loopStartIndex] = async (state, context3) => {
|
|
13341
|
+
let currentState = state;
|
|
13342
|
+
while (condition(currentState)) {
|
|
13343
|
+
for (const step of loopBodySteps) {
|
|
13344
|
+
currentState = await step(currentState, context3);
|
|
13345
|
+
}
|
|
13346
|
+
}
|
|
13347
|
+
return currentState;
|
|
13348
|
+
};
|
|
13349
|
+
return this;
|
|
13350
|
+
}
|
|
13351
|
+
/**
|
|
13352
|
+
* Executes the flow with the given AI service and input values.
|
|
13353
|
+
*
|
|
13354
|
+
* @param ai - The AI service to use as the default for all steps
|
|
13355
|
+
* @param values - The input values for the flow
|
|
13356
|
+
* @param options - Optional forward options to use as defaults
|
|
13357
|
+
* @returns Promise that resolves to the final output
|
|
13358
|
+
*/
|
|
13359
|
+
async forward(ai, values, options) {
|
|
13360
|
+
let state = { ...values };
|
|
13361
|
+
const context3 = {
|
|
13362
|
+
mainAi: ai,
|
|
13363
|
+
mainOptions: options
|
|
13364
|
+
};
|
|
13365
|
+
for (const step of this.flowDefinition) {
|
|
13366
|
+
state = await step(state, context3);
|
|
13367
|
+
}
|
|
13368
|
+
return state;
|
|
13369
|
+
}
|
|
13370
|
+
};
|
|
13371
|
+
var AxFlowSubContextImpl = class {
|
|
13372
|
+
constructor(nodeGenerators) {
|
|
13373
|
+
this.nodeGenerators = nodeGenerators;
|
|
13374
|
+
}
|
|
13375
|
+
steps = [];
|
|
13376
|
+
execute(nodeName, mapping, dynamicContext) {
|
|
13377
|
+
const nodeGenerator = this.nodeGenerators.get(nodeName);
|
|
13378
|
+
if (!nodeGenerator) {
|
|
13379
|
+
throw new Error(`Node generator for '${nodeName}' not found.`);
|
|
13380
|
+
}
|
|
13381
|
+
this.steps.push(async (state, context3) => {
|
|
13382
|
+
const ai = dynamicContext?.ai ?? context3.mainAi;
|
|
13383
|
+
const options = dynamicContext?.options ?? context3.mainOptions;
|
|
13384
|
+
const nodeInputs = mapping(state);
|
|
13385
|
+
const result = await nodeGenerator.forward(ai, nodeInputs, options);
|
|
13386
|
+
return {
|
|
13387
|
+
...state,
|
|
13388
|
+
[`${nodeName}Result`]: result
|
|
13389
|
+
};
|
|
13390
|
+
});
|
|
13391
|
+
return this;
|
|
13392
|
+
}
|
|
13393
|
+
map(transform) {
|
|
13394
|
+
this.steps.push((state) => transform(state));
|
|
13395
|
+
return this;
|
|
13396
|
+
}
|
|
13397
|
+
async executeSteps(initialState, context3) {
|
|
13398
|
+
let currentState = initialState;
|
|
13399
|
+
for (const step of this.steps) {
|
|
13400
|
+
currentState = await step(currentState, context3);
|
|
13401
|
+
}
|
|
13402
|
+
return currentState;
|
|
13403
|
+
}
|
|
13404
|
+
};
|
|
13405
|
+
var AxFlowTypedSubContextImpl = class {
|
|
13406
|
+
constructor(nodeGenerators) {
|
|
13407
|
+
this.nodeGenerators = nodeGenerators;
|
|
13408
|
+
}
|
|
13409
|
+
steps = [];
|
|
13410
|
+
execute(nodeName, mapping, dynamicContext) {
|
|
13411
|
+
const nodeGenerator = this.nodeGenerators.get(nodeName);
|
|
13412
|
+
if (!nodeGenerator) {
|
|
13413
|
+
throw new Error(`Node generator for '${nodeName}' not found.`);
|
|
13414
|
+
}
|
|
13415
|
+
this.steps.push(async (state, context3) => {
|
|
13416
|
+
const ai = dynamicContext?.ai ?? context3.mainAi;
|
|
13417
|
+
const options = dynamicContext?.options ?? context3.mainOptions;
|
|
13418
|
+
const nodeInputs = mapping(state);
|
|
13419
|
+
const result = await nodeGenerator.forward(ai, nodeInputs, options);
|
|
13420
|
+
return {
|
|
13421
|
+
...state,
|
|
13422
|
+
[`${nodeName}Result`]: result
|
|
13423
|
+
};
|
|
13424
|
+
});
|
|
13425
|
+
return this;
|
|
13426
|
+
}
|
|
13427
|
+
map(transform) {
|
|
13428
|
+
this.steps.push((state) => transform(state));
|
|
13429
|
+
return this;
|
|
13430
|
+
}
|
|
13431
|
+
async executeSteps(initialState, context3) {
|
|
13432
|
+
let currentState = initialState;
|
|
13433
|
+
for (const step of this.steps) {
|
|
13434
|
+
currentState = await step(currentState, context3);
|
|
13435
|
+
}
|
|
13436
|
+
return currentState;
|
|
13437
|
+
}
|
|
13438
|
+
};
|
|
13439
|
+
|
|
13440
|
+
// dsp/loader.ts
|
|
13441
|
+
var AxHFDataLoader = class {
|
|
13442
|
+
rows = [];
|
|
13443
|
+
baseUrl;
|
|
13444
|
+
dataset;
|
|
13445
|
+
split;
|
|
13446
|
+
config;
|
|
13447
|
+
options;
|
|
13448
|
+
constructor({
|
|
13449
|
+
dataset,
|
|
13450
|
+
split,
|
|
13451
|
+
config,
|
|
13452
|
+
options
|
|
13453
|
+
}) {
|
|
13454
|
+
this.baseUrl = "https://datasets-server.huggingface.co/rows";
|
|
13455
|
+
this.dataset = dataset;
|
|
13456
|
+
this.split = split;
|
|
13457
|
+
this.config = config;
|
|
13458
|
+
this.options = options;
|
|
13459
|
+
}
|
|
13460
|
+
async fetchDataFromAPI(url) {
|
|
13461
|
+
try {
|
|
13462
|
+
const response = await fetch(url);
|
|
13463
|
+
if (!response.ok) {
|
|
13464
|
+
throw new Error(`Error fetching data: ${response.statusText}`);
|
|
13465
|
+
}
|
|
13466
|
+
const data = await response.json();
|
|
13467
|
+
if (!data?.rows) {
|
|
13468
|
+
throw new Error("Invalid data format");
|
|
13469
|
+
}
|
|
13470
|
+
return data.rows;
|
|
13471
|
+
} catch (error) {
|
|
13472
|
+
console.error("Error fetching data from API:", error);
|
|
13473
|
+
throw error;
|
|
13474
|
+
}
|
|
13475
|
+
}
|
|
13476
|
+
// https://datasets-server.huggingface.co/rows?dataset=hotpot_qa&config=distractor&split=train&offset=0&length=100
|
|
13477
|
+
async loadData() {
|
|
13478
|
+
const offset = this.options?.offset ?? 0;
|
|
13479
|
+
const length = this.options?.length ?? 100;
|
|
13480
|
+
const ds = encodeURIComponent(this.dataset);
|
|
13481
|
+
const url = `${this.baseUrl}?dataset=${ds}&config=${this.config}&split=${this.split}&offset=${offset}&length=${length}`;
|
|
13482
|
+
console.log("Downloading data from API.");
|
|
13483
|
+
this.rows = await this.fetchDataFromAPI(url);
|
|
13484
|
+
return this.rows;
|
|
13485
|
+
}
|
|
13486
|
+
setData(rows) {
|
|
13487
|
+
this.rows = rows;
|
|
13488
|
+
}
|
|
13489
|
+
getData() {
|
|
12889
13490
|
return this.rows;
|
|
12890
13491
|
}
|
|
12891
13492
|
async getRows({
|
|
@@ -16251,198 +16852,6 @@ var AxEvalUtil = {
|
|
|
16251
16852
|
novelF1ScoreOptimized
|
|
16252
16853
|
};
|
|
16253
16854
|
|
|
16254
|
-
// flow/flow.ts
|
|
16255
|
-
var AxFlow = class extends AxProgramWithSignature {
|
|
16256
|
-
nodes = /* @__PURE__ */ new Map();
|
|
16257
|
-
flowDefinition = [];
|
|
16258
|
-
nodeGenerators = /* @__PURE__ */ new Map();
|
|
16259
|
-
loopStack = [];
|
|
16260
|
-
constructor(signature = "userInput:string -> flowOutput:string") {
|
|
16261
|
-
super(signature);
|
|
16262
|
-
}
|
|
16263
|
-
/**
|
|
16264
|
-
* Declares a reusable computational node and its input/output signature.
|
|
16265
|
-
*
|
|
16266
|
-
* @param name - The name of the node
|
|
16267
|
-
* @param signature - An object where the key is a string representation of inputs
|
|
16268
|
-
* and the value is an object representing outputs
|
|
16269
|
-
* @returns this (for chaining)
|
|
16270
|
-
*
|
|
16271
|
-
* @example
|
|
16272
|
-
* ```typescript
|
|
16273
|
-
* flow.node('summarizer', { 'text:string': { summary: f.string() } })
|
|
16274
|
-
* ```
|
|
16275
|
-
*/
|
|
16276
|
-
node(name, signature) {
|
|
16277
|
-
const [inputSignature, outputSignature] = Object.entries(signature)[0] ?? [
|
|
16278
|
-
"",
|
|
16279
|
-
{}
|
|
16280
|
-
];
|
|
16281
|
-
if (!inputSignature || !outputSignature) {
|
|
16282
|
-
throw new Error(
|
|
16283
|
-
`Invalid signature for node '${name}': signature must have at least one input->output mapping`
|
|
16284
|
-
);
|
|
16285
|
-
}
|
|
16286
|
-
const outputFields = Object.entries(outputSignature).map(([key, value]) => {
|
|
16287
|
-
if (typeof value === "object" && value !== null && "type" in value) {
|
|
16288
|
-
const fieldType = value;
|
|
16289
|
-
let fieldString = `${key}:`;
|
|
16290
|
-
if (fieldType.isOptional) {
|
|
16291
|
-
const colonIndex = fieldString.lastIndexOf(":");
|
|
16292
|
-
fieldString = fieldString.slice(0, colonIndex) + "?" + fieldString.slice(colonIndex);
|
|
16293
|
-
}
|
|
16294
|
-
if (fieldType.isInternal) {
|
|
16295
|
-
const colonIndex = fieldString.lastIndexOf(":");
|
|
16296
|
-
fieldString = fieldString.slice(0, colonIndex) + "!" + fieldString.slice(colonIndex);
|
|
16297
|
-
}
|
|
16298
|
-
fieldString += fieldType.type;
|
|
16299
|
-
if (fieldType.isArray) {
|
|
16300
|
-
fieldString += "[]";
|
|
16301
|
-
}
|
|
16302
|
-
if (fieldType.type === "class" && fieldType.options) {
|
|
16303
|
-
fieldString += ` "${fieldType.options.join(", ")}"`;
|
|
16304
|
-
}
|
|
16305
|
-
if (fieldType.description && (fieldType.type !== "class" || !fieldType.options)) {
|
|
16306
|
-
fieldString += ` "${fieldType.description}"`;
|
|
16307
|
-
}
|
|
16308
|
-
return fieldString;
|
|
16309
|
-
}
|
|
16310
|
-
return `${key}:string`;
|
|
16311
|
-
}).join(", ");
|
|
16312
|
-
const signatureString = `${inputSignature} -> ${outputFields}`;
|
|
16313
|
-
this.nodes.set(name, {
|
|
16314
|
-
inputs: { [inputSignature]: true },
|
|
16315
|
-
outputs: outputSignature
|
|
16316
|
-
});
|
|
16317
|
-
this.nodeGenerators.set(name, new AxGen(signatureString));
|
|
16318
|
-
return this;
|
|
16319
|
-
}
|
|
16320
|
-
/**
|
|
16321
|
-
* Applies a synchronous transformation to the state object.
|
|
16322
|
-
*
|
|
16323
|
-
* @param transform - Function that takes the current state and returns a new state
|
|
16324
|
-
* @returns this (for chaining)
|
|
16325
|
-
*
|
|
16326
|
-
* @example
|
|
16327
|
-
* ```typescript
|
|
16328
|
-
* flow.map(state => ({ ...state, processedText: state.text.toLowerCase() }))
|
|
16329
|
-
* ```
|
|
16330
|
-
*/
|
|
16331
|
-
map(transform) {
|
|
16332
|
-
this.flowDefinition.push((state) => {
|
|
16333
|
-
return transform(state);
|
|
16334
|
-
});
|
|
16335
|
-
return this;
|
|
16336
|
-
}
|
|
16337
|
-
/**
|
|
16338
|
-
* Executes a previously defined node.
|
|
16339
|
-
*
|
|
16340
|
-
* @param nodeName - The name of the node to execute (must exist in the nodes map)
|
|
16341
|
-
* @param mapping - Function that takes the current state and returns the input object required by the node
|
|
16342
|
-
* @param dynamicContext - Optional object to override the AI service or options for this specific step
|
|
16343
|
-
* @returns this (for chaining)
|
|
16344
|
-
*
|
|
16345
|
-
* @example
|
|
16346
|
-
* ```typescript
|
|
16347
|
-
* flow.execute('summarizer', state => ({ text: state.originalText }), { ai: cheapAI })
|
|
16348
|
-
* ```
|
|
16349
|
-
*/
|
|
16350
|
-
execute(nodeName, mapping, dynamicContext) {
|
|
16351
|
-
if (!this.nodes.has(nodeName)) {
|
|
16352
|
-
throw new Error(
|
|
16353
|
-
`Node '${nodeName}' not found. Make sure to define it with .node() first.`
|
|
16354
|
-
);
|
|
16355
|
-
}
|
|
16356
|
-
const nodeGenerator = this.nodeGenerators.get(nodeName);
|
|
16357
|
-
if (!nodeGenerator) {
|
|
16358
|
-
throw new Error(`Node generator for '${nodeName}' not found.`);
|
|
16359
|
-
}
|
|
16360
|
-
this.flowDefinition.push(async (state, context3) => {
|
|
16361
|
-
const ai = dynamicContext?.ai ?? context3.mainAi;
|
|
16362
|
-
const options = dynamicContext?.options ?? context3.mainOptions;
|
|
16363
|
-
const nodeInputs = mapping(state);
|
|
16364
|
-
const result = await nodeGenerator.forward(ai, nodeInputs, options);
|
|
16365
|
-
return {
|
|
16366
|
-
...state,
|
|
16367
|
-
[`${nodeName}Result`]: result
|
|
16368
|
-
};
|
|
16369
|
-
});
|
|
16370
|
-
return this;
|
|
16371
|
-
}
|
|
16372
|
-
/**
|
|
16373
|
-
* Marks the beginning of a loop block.
|
|
16374
|
-
*
|
|
16375
|
-
* @param condition - Function that takes the current state and returns a boolean
|
|
16376
|
-
* @returns this (for chaining)
|
|
16377
|
-
*
|
|
16378
|
-
* @example
|
|
16379
|
-
* ```typescript
|
|
16380
|
-
* flow.while(state => state.iterations < 3)
|
|
16381
|
-
* .map(state => ({ ...state, iterations: (state.iterations || 0) + 1 }))
|
|
16382
|
-
* .endWhile()
|
|
16383
|
-
* ```
|
|
16384
|
-
*/
|
|
16385
|
-
while(condition) {
|
|
16386
|
-
const loopStartIndex = this.flowDefinition.length;
|
|
16387
|
-
this.loopStack.push(loopStartIndex);
|
|
16388
|
-
const placeholderStep = Object.assign(
|
|
16389
|
-
(state) => state,
|
|
16390
|
-
{
|
|
16391
|
-
_condition: condition,
|
|
16392
|
-
_isLoopStart: true
|
|
16393
|
-
}
|
|
16394
|
-
);
|
|
16395
|
-
this.flowDefinition.push(placeholderStep);
|
|
16396
|
-
return this;
|
|
16397
|
-
}
|
|
16398
|
-
/**
|
|
16399
|
-
* Marks the end of a loop block.
|
|
16400
|
-
*
|
|
16401
|
-
* @returns this (for chaining)
|
|
16402
|
-
*/
|
|
16403
|
-
endWhile() {
|
|
16404
|
-
if (this.loopStack.length === 0) {
|
|
16405
|
-
throw new Error("endWhile() called without matching while()");
|
|
16406
|
-
}
|
|
16407
|
-
const loopStartIndex = this.loopStack.pop();
|
|
16408
|
-
const placeholderStep = this.flowDefinition[loopStartIndex];
|
|
16409
|
-
if (!placeholderStep || !("_isLoopStart" in placeholderStep)) {
|
|
16410
|
-
throw new Error("Loop start step not found or invalid");
|
|
16411
|
-
}
|
|
16412
|
-
const condition = placeholderStep._condition;
|
|
16413
|
-
const loopBodySteps = this.flowDefinition.splice(loopStartIndex + 1);
|
|
16414
|
-
this.flowDefinition[loopStartIndex] = async (state, context3) => {
|
|
16415
|
-
let currentState = state;
|
|
16416
|
-
while (condition(currentState)) {
|
|
16417
|
-
for (const step of loopBodySteps) {
|
|
16418
|
-
currentState = await step(currentState, context3);
|
|
16419
|
-
}
|
|
16420
|
-
}
|
|
16421
|
-
return currentState;
|
|
16422
|
-
};
|
|
16423
|
-
return this;
|
|
16424
|
-
}
|
|
16425
|
-
/**
|
|
16426
|
-
* Executes the flow with the given AI service and input values.
|
|
16427
|
-
*
|
|
16428
|
-
* @param ai - The AI service to use as the default for all steps
|
|
16429
|
-
* @param values - The input values for the flow
|
|
16430
|
-
* @param options - Optional forward options to use as defaults
|
|
16431
|
-
* @returns Promise that resolves to the final output
|
|
16432
|
-
*/
|
|
16433
|
-
async forward(ai, values, options) {
|
|
16434
|
-
let state = { ...values };
|
|
16435
|
-
const context3 = {
|
|
16436
|
-
mainAi: ai,
|
|
16437
|
-
mainOptions: options
|
|
16438
|
-
};
|
|
16439
|
-
for (const step of this.flowDefinition) {
|
|
16440
|
-
state = await step(state, context3);
|
|
16441
|
-
}
|
|
16442
|
-
return state;
|
|
16443
|
-
}
|
|
16444
|
-
};
|
|
16445
|
-
|
|
16446
16855
|
// ../../node_modules/uuid/dist/esm-node/rng.js
|
|
16447
16856
|
import crypto4 from "crypto";
|
|
16448
16857
|
var rnds8Pool = new Uint8Array(256);
|
|
@@ -17012,6 +17421,7 @@ export {
|
|
|
17012
17421
|
AxEmbeddingAdapter,
|
|
17013
17422
|
AxEvalUtil,
|
|
17014
17423
|
AxFlow,
|
|
17424
|
+
AxFlowTypedSubContextImpl,
|
|
17015
17425
|
AxFunctionError,
|
|
17016
17426
|
AxFunctionProcessor,
|
|
17017
17427
|
AxGen,
|
|
@@ -17089,6 +17499,7 @@ export {
|
|
|
17089
17499
|
axModelInfoHuggingFace,
|
|
17090
17500
|
axModelInfoMistral,
|
|
17091
17501
|
axModelInfoOpenAI,
|
|
17502
|
+
axModelInfoOpenAIResponses,
|
|
17092
17503
|
axModelInfoReka,
|
|
17093
17504
|
axModelInfoTogether,
|
|
17094
17505
|
axSpanAttributes,
|