@ax-llm/ax 11.0.37 → 11.0.38
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.cjs +87 -7
- package/index.cjs.map +1 -1
- package/index.d.cts +99 -77
- package/index.d.ts +99 -77
- package/index.js +87 -7
- package/index.js.map +1 -1
- package/package.json +1 -1
package/index.js
CHANGED
|
@@ -754,6 +754,9 @@ var AxBaseAI = class {
|
|
|
754
754
|
modelUsage;
|
|
755
755
|
embedModelUsage;
|
|
756
756
|
defaults;
|
|
757
|
+
lastUsedModelConfig;
|
|
758
|
+
lastUsedChatModel;
|
|
759
|
+
lastUsedEmbedModel;
|
|
757
760
|
apiURL;
|
|
758
761
|
name;
|
|
759
762
|
id;
|
|
@@ -843,6 +846,15 @@ var AxBaseAI = class {
|
|
|
843
846
|
getFeatures(model) {
|
|
844
847
|
return typeof this.supportFor === "function" ? this.supportFor(model ?? this.defaults.model) : this.supportFor;
|
|
845
848
|
}
|
|
849
|
+
getLastUsedChatModel() {
|
|
850
|
+
return this.lastUsedChatModel;
|
|
851
|
+
}
|
|
852
|
+
getLastUsedEmbedModel() {
|
|
853
|
+
return this.lastUsedEmbedModel;
|
|
854
|
+
}
|
|
855
|
+
getLastUsedModelConfig() {
|
|
856
|
+
return this.lastUsedModelConfig;
|
|
857
|
+
}
|
|
846
858
|
// Method to calculate percentiles
|
|
847
859
|
calculatePercentile(samples, percentile) {
|
|
848
860
|
if (samples.length === 0) return 0;
|
|
@@ -965,6 +977,8 @@ var AxBaseAI = class {
|
|
|
965
977
|
functions,
|
|
966
978
|
modelConfig
|
|
967
979
|
};
|
|
980
|
+
this.lastUsedChatModel = model;
|
|
981
|
+
this.lastUsedModelConfig = modelConfig;
|
|
968
982
|
const fn = async () => {
|
|
969
983
|
const [apiConfig, reqValue] = this.aiImpl.createChatReq(
|
|
970
984
|
req,
|
|
@@ -1095,6 +1109,7 @@ var AxBaseAI = class {
|
|
|
1095
1109
|
...embedReq,
|
|
1096
1110
|
embedModel
|
|
1097
1111
|
};
|
|
1112
|
+
this.lastUsedEmbedModel = embedModel;
|
|
1098
1113
|
const fn = async () => {
|
|
1099
1114
|
const [apiConfig, reqValue] = this.aiImpl.createEmbedReq(req);
|
|
1100
1115
|
const res2 = await apiCall(
|
|
@@ -3788,6 +3803,15 @@ var AxAI = class {
|
|
|
3788
3803
|
getModelList() {
|
|
3789
3804
|
return this.ai.getModelList();
|
|
3790
3805
|
}
|
|
3806
|
+
getLastUsedChatModel() {
|
|
3807
|
+
return this.ai.getLastUsedChatModel();
|
|
3808
|
+
}
|
|
3809
|
+
getLastUsedEmbedModel() {
|
|
3810
|
+
return this.ai.getLastUsedEmbedModel();
|
|
3811
|
+
}
|
|
3812
|
+
getLastUsedModelConfig() {
|
|
3813
|
+
return this.ai.getLastUsedModelConfig();
|
|
3814
|
+
}
|
|
3791
3815
|
getMetrics() {
|
|
3792
3816
|
return this.ai.getMetrics();
|
|
3793
3817
|
}
|
|
@@ -6359,7 +6383,10 @@ var AxGen = class extends AxProgramWithSignature {
|
|
|
6359
6383
|
await assertAssertions(this.asserts, values);
|
|
6360
6384
|
}
|
|
6361
6385
|
if (result.finishReason === "length") {
|
|
6362
|
-
throw new Error(
|
|
6386
|
+
throw new Error(
|
|
6387
|
+
`Max tokens reached before completion
|
|
6388
|
+
Content: ${content}`
|
|
6389
|
+
);
|
|
6363
6390
|
}
|
|
6364
6391
|
}
|
|
6365
6392
|
const funcs = parseFunctionCalls(ai, functionCalls, values, model);
|
|
@@ -6454,7 +6481,10 @@ var AxGen = class extends AxProgramWithSignature {
|
|
|
6454
6481
|
}
|
|
6455
6482
|
}
|
|
6456
6483
|
if (result.finishReason === "length") {
|
|
6457
|
-
throw new Error(
|
|
6484
|
+
throw new Error(
|
|
6485
|
+
`Max tokens reached before completion
|
|
6486
|
+
Content: ${result.content}`
|
|
6487
|
+
);
|
|
6458
6488
|
}
|
|
6459
6489
|
}
|
|
6460
6490
|
const publicValues = { ...values };
|
|
@@ -6519,7 +6549,7 @@ var AxGen = class extends AxProgramWithSignature {
|
|
|
6519
6549
|
err = e;
|
|
6520
6550
|
} else if (e instanceof AxAIServiceStreamTerminatedError) {
|
|
6521
6551
|
} else {
|
|
6522
|
-
throw e;
|
|
6552
|
+
throw enhanceError(e, ai);
|
|
6523
6553
|
}
|
|
6524
6554
|
if (errorFields) {
|
|
6525
6555
|
handleValidationError(
|
|
@@ -6534,7 +6564,7 @@ var AxGen = class extends AxProgramWithSignature {
|
|
|
6534
6564
|
}
|
|
6535
6565
|
throw new Error(`Unable to fix validation error: ${err?.toString()}`);
|
|
6536
6566
|
}
|
|
6537
|
-
throw new Error(`Max steps reached: ${maxSteps}`);
|
|
6567
|
+
throw enhanceError(new Error(`Max steps reached: ${maxSteps}`), ai);
|
|
6538
6568
|
}
|
|
6539
6569
|
shouldContinueSteps(lastMemItem, stopFunction) {
|
|
6540
6570
|
const stopFunctionExecuted = stopFunction && this.functionsExecuted.has(stopFunction);
|
|
@@ -6605,6 +6635,20 @@ var AxGen = class extends AxProgramWithSignature {
|
|
|
6605
6635
|
});
|
|
6606
6636
|
}
|
|
6607
6637
|
};
|
|
6638
|
+
function enhanceError(e, ai) {
|
|
6639
|
+
const originalError = e instanceof Error ? e : new Error(String(e));
|
|
6640
|
+
const model = ai.getLastUsedChatModel();
|
|
6641
|
+
const modelConfig = ai.getLastUsedModelConfig();
|
|
6642
|
+
const details = [
|
|
6643
|
+
`model=${model}`,
|
|
6644
|
+
`maxTokens=${modelConfig?.maxTokens ?? "N/A"}`,
|
|
6645
|
+
`streaming=${modelConfig?.stream ?? false}`
|
|
6646
|
+
].join(", ");
|
|
6647
|
+
const enhancedError = new Error();
|
|
6648
|
+
enhancedError.stack = `Generate Failed: ${details}
|
|
6649
|
+
${originalError.stack}`;
|
|
6650
|
+
return enhancedError;
|
|
6651
|
+
}
|
|
6608
6652
|
|
|
6609
6653
|
// prompts/agent.ts
|
|
6610
6654
|
function processChildAgentFunction(childFunction, parentValues, parentInputKeys, modelList, options) {
|
|
@@ -6985,6 +7029,15 @@ var AxBalancer = class _AxBalancer {
|
|
|
6985
7029
|
this.maxBackoffMs = options?.maxBackoffMs ?? 32e3;
|
|
6986
7030
|
this.maxRetries = options?.maxRetries ?? 3;
|
|
6987
7031
|
}
|
|
7032
|
+
getLastUsedChatModel() {
|
|
7033
|
+
return this.currentService.getLastUsedChatModel();
|
|
7034
|
+
}
|
|
7035
|
+
getLastUsedEmbedModel() {
|
|
7036
|
+
return this.currentService.getLastUsedEmbedModel();
|
|
7037
|
+
}
|
|
7038
|
+
getLastUsedModelConfig() {
|
|
7039
|
+
return this.currentService.getLastUsedModelConfig();
|
|
7040
|
+
}
|
|
6988
7041
|
/**
|
|
6989
7042
|
* Service comparator that respects the input order of services.
|
|
6990
7043
|
*/
|
|
@@ -9149,6 +9202,15 @@ var AxMockAIService = class {
|
|
|
9149
9202
|
embed: { count: 0, rate: 0, total: 0 }
|
|
9150
9203
|
}
|
|
9151
9204
|
};
|
|
9205
|
+
getLastUsedChatModel() {
|
|
9206
|
+
throw new Error("Method not implemented.");
|
|
9207
|
+
}
|
|
9208
|
+
getLastUsedEmbedModel() {
|
|
9209
|
+
throw new Error("Method not implemented.");
|
|
9210
|
+
}
|
|
9211
|
+
getLastUsedModelConfig() {
|
|
9212
|
+
throw new Error("Method not implemented.");
|
|
9213
|
+
}
|
|
9152
9214
|
getName() {
|
|
9153
9215
|
return this.config.name ?? "mock-ai-service";
|
|
9154
9216
|
}
|
|
@@ -11058,6 +11120,7 @@ var AxMCPStdioTransport = class {
|
|
|
11058
11120
|
// ai/multiservice.ts
|
|
11059
11121
|
var AxMultiServiceRouter = class {
|
|
11060
11122
|
options;
|
|
11123
|
+
lastUsedService;
|
|
11061
11124
|
services = /* @__PURE__ */ new Map();
|
|
11062
11125
|
/**
|
|
11063
11126
|
* Constructs a new multi-service router.
|
|
@@ -11116,6 +11179,15 @@ var AxMultiServiceRouter = class {
|
|
|
11116
11179
|
}
|
|
11117
11180
|
}
|
|
11118
11181
|
}
|
|
11182
|
+
getLastUsedChatModel() {
|
|
11183
|
+
return this.lastUsedService?.getLastUsedChatModel();
|
|
11184
|
+
}
|
|
11185
|
+
getLastUsedEmbedModel() {
|
|
11186
|
+
return this.lastUsedService?.getLastUsedEmbedModel();
|
|
11187
|
+
}
|
|
11188
|
+
getLastUsedModelConfig() {
|
|
11189
|
+
return this.lastUsedService?.getLastUsedModelConfig();
|
|
11190
|
+
}
|
|
11119
11191
|
/**
|
|
11120
11192
|
* Delegates the chat call to the service matching the provided model key.
|
|
11121
11193
|
*/
|
|
@@ -11128,6 +11200,7 @@ var AxMultiServiceRouter = class {
|
|
|
11128
11200
|
if (!item) {
|
|
11129
11201
|
throw new Error(`No service found for model key: ${modelKey}`);
|
|
11130
11202
|
}
|
|
11203
|
+
this.lastUsedService = item.service;
|
|
11131
11204
|
if (!item.model) {
|
|
11132
11205
|
const { model, ...reqWithoutModel } = req;
|
|
11133
11206
|
return await item.service.chat(reqWithoutModel, options);
|
|
@@ -11146,6 +11219,7 @@ var AxMultiServiceRouter = class {
|
|
|
11146
11219
|
if (!item) {
|
|
11147
11220
|
throw new Error(`No service found for embed model key: ${embedModelKey}`);
|
|
11148
11221
|
}
|
|
11222
|
+
this.lastUsedService = item.service;
|
|
11149
11223
|
if (!item.model) {
|
|
11150
11224
|
const { embedModel, ...reqWithoutEmbedModel } = req;
|
|
11151
11225
|
return await item.service.embed(reqWithoutEmbedModel, options);
|
|
@@ -11200,11 +11274,17 @@ var AxMultiServiceRouter = class {
|
|
|
11200
11274
|
* or falls back to the first service if none has been used.
|
|
11201
11275
|
*/
|
|
11202
11276
|
getMetrics() {
|
|
11203
|
-
|
|
11204
|
-
if (!
|
|
11277
|
+
let serviceInstance = this.lastUsedService;
|
|
11278
|
+
if (!serviceInstance) {
|
|
11279
|
+
const firstServiceEntry = this.services.values().next().value;
|
|
11280
|
+
if (firstServiceEntry) {
|
|
11281
|
+
serviceInstance = "service" in firstServiceEntry ? firstServiceEntry.service : firstServiceEntry;
|
|
11282
|
+
}
|
|
11283
|
+
}
|
|
11284
|
+
if (!serviceInstance) {
|
|
11205
11285
|
throw new Error("No service available to get metrics.");
|
|
11206
11286
|
}
|
|
11207
|
-
return
|
|
11287
|
+
return serviceInstance.getMetrics();
|
|
11208
11288
|
}
|
|
11209
11289
|
/**
|
|
11210
11290
|
* Sets options on all underlying services.
|