@ax-llm/ax 11.0.58 → 11.0.60
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.cjs +66 -61
- package/index.cjs.map +1 -1
- package/index.d.cts +231 -241
- package/index.d.ts +231 -241
- package/index.js +65 -61
- package/index.js.map +1 -1
- package/package.json +1 -1
package/index.js
CHANGED
|
@@ -1036,6 +1036,9 @@ var AxBaseAI = class {
|
|
|
1036
1036
|
`Model ${model} does not support thinkingTokenBudget.`
|
|
1037
1037
|
);
|
|
1038
1038
|
}
|
|
1039
|
+
if (options?.showThoughts && !this.getFeatures(model).hasShowThoughts) {
|
|
1040
|
+
throw new Error(`Model ${model} does not support showThoughts.`);
|
|
1041
|
+
}
|
|
1039
1042
|
modelConfig.stream = (options?.stream !== void 0 ? options.stream : modelConfig.stream) ?? true;
|
|
1040
1043
|
const canStream = this.getFeatures(model).streaming;
|
|
1041
1044
|
if (!canStream) {
|
|
@@ -1144,13 +1147,6 @@ var AxBaseAI = class {
|
|
|
1144
1147
|
const wrappedRespFn = (state) => (resp) => {
|
|
1145
1148
|
const res2 = respFn(resp, state);
|
|
1146
1149
|
res2.sessionId = options?.sessionId;
|
|
1147
|
-
if (options?.hideThought) {
|
|
1148
|
-
res2.results.forEach((result) => {
|
|
1149
|
-
if (result.thought) {
|
|
1150
|
-
result.thought = void 0;
|
|
1151
|
-
}
|
|
1152
|
-
});
|
|
1153
|
-
}
|
|
1154
1150
|
if (!res2.modelUsage) {
|
|
1155
1151
|
res2.modelUsage = {
|
|
1156
1152
|
ai: this.name,
|
|
@@ -1189,13 +1185,6 @@ var AxBaseAI = class {
|
|
|
1189
1185
|
}
|
|
1190
1186
|
const res = this.aiImpl.createChatResp(rv);
|
|
1191
1187
|
res.sessionId = options?.sessionId;
|
|
1192
|
-
if (options?.hideThought) {
|
|
1193
|
-
res.results.forEach((result) => {
|
|
1194
|
-
if (result.thought) {
|
|
1195
|
-
result.thought = void 0;
|
|
1196
|
-
}
|
|
1197
|
-
});
|
|
1198
|
-
}
|
|
1199
1188
|
if (!res.modelUsage) {
|
|
1200
1189
|
const tokenUsage = this.aiImpl.getTokenUsage();
|
|
1201
1190
|
if (tokenUsage) {
|
|
@@ -1416,7 +1405,14 @@ function setChatResponseEvents(res, span, excludeContentFromTrace) {
|
|
|
1416
1405
|
if (!res.results) {
|
|
1417
1406
|
return;
|
|
1418
1407
|
}
|
|
1419
|
-
for (
|
|
1408
|
+
for (let index = 0; index < res.results.length; index++) {
|
|
1409
|
+
const result = res.results[index];
|
|
1410
|
+
if (!result) {
|
|
1411
|
+
continue;
|
|
1412
|
+
}
|
|
1413
|
+
if (!result.content && !result.thought && !result.functionCalls?.length && !result.finishReason) {
|
|
1414
|
+
continue;
|
|
1415
|
+
}
|
|
1420
1416
|
const toolCalls = result.functionCalls?.map((call) => {
|
|
1421
1417
|
return {
|
|
1422
1418
|
id: call.id,
|
|
@@ -3331,6 +3327,9 @@ var AxAIGoogleGeminiImpl = class {
|
|
|
3331
3327
|
break;
|
|
3332
3328
|
}
|
|
3333
3329
|
}
|
|
3330
|
+
if (config.showThoughts !== void 0) {
|
|
3331
|
+
thinkingConfig.includeThoughts = config.showThoughts;
|
|
3332
|
+
}
|
|
3334
3333
|
const generationConfig = {
|
|
3335
3334
|
maxOutputTokens: req.modelConfig?.maxTokens ?? this.config.maxTokens,
|
|
3336
3335
|
temperature: req.modelConfig?.temperature ?? this.config.temperature,
|
|
@@ -4119,7 +4118,7 @@ var AxAIOpenAIResponsesImpl = class {
|
|
|
4119
4118
|
}
|
|
4120
4119
|
return items;
|
|
4121
4120
|
}
|
|
4122
|
-
createChatReq(req,
|
|
4121
|
+
createChatReq(req, config) {
|
|
4123
4122
|
const model = req.model;
|
|
4124
4123
|
const apiConfig = { name: "/responses" };
|
|
4125
4124
|
let instructionsFromPrompt = null;
|
|
@@ -4142,6 +4141,10 @@ var AxAIOpenAIResponsesImpl = class {
|
|
|
4142
4141
|
parameters: v.parameters ?? {}
|
|
4143
4142
|
})
|
|
4144
4143
|
);
|
|
4144
|
+
const includeFields = [];
|
|
4145
|
+
if (config.showThoughts) {
|
|
4146
|
+
includeFields.push("reasoning.encrypted_content");
|
|
4147
|
+
}
|
|
4145
4148
|
let mutableReq = {
|
|
4146
4149
|
model,
|
|
4147
4150
|
input: "",
|
|
@@ -4156,7 +4159,7 @@ var AxAIOpenAIResponsesImpl = class {
|
|
|
4156
4159
|
// Sourced from modelConfig or global config
|
|
4157
4160
|
// Optional fields from AxAIOpenAIResponsesRequest that need to be in Mutable for initialization
|
|
4158
4161
|
background: void 0,
|
|
4159
|
-
include: void 0,
|
|
4162
|
+
include: includeFields.length > 0 ? includeFields : void 0,
|
|
4160
4163
|
metadata: void 0,
|
|
4161
4164
|
parallel_tool_calls: this.config.parallelToolCalls,
|
|
4162
4165
|
previous_response_id: void 0,
|
|
@@ -4229,9 +4232,13 @@ var AxAIOpenAIResponsesImpl = class {
|
|
|
4229
4232
|
break;
|
|
4230
4233
|
case "reasoning":
|
|
4231
4234
|
currentResult.id = item.id;
|
|
4232
|
-
|
|
4233
|
-
|
|
4234
|
-
|
|
4235
|
+
if (item.encrypted_content) {
|
|
4236
|
+
currentResult.thought = item.encrypted_content;
|
|
4237
|
+
} else {
|
|
4238
|
+
currentResult.thought = item.summary.map(
|
|
4239
|
+
(s) => typeof s === "object" ? JSON.stringify(s) : s
|
|
4240
|
+
).join("\n");
|
|
4241
|
+
}
|
|
4235
4242
|
break;
|
|
4236
4243
|
case "file_search_call":
|
|
4237
4244
|
currentResult.id = item.id;
|
|
@@ -4549,7 +4556,9 @@ var AxAIOpenAIResponsesImpl = class {
|
|
|
4549
4556
|
{
|
|
4550
4557
|
const reasoningItem = event.item;
|
|
4551
4558
|
baseResult.id = event.item.id;
|
|
4552
|
-
if (reasoningItem.
|
|
4559
|
+
if (reasoningItem.encrypted_content) {
|
|
4560
|
+
baseResult.thought = reasoningItem.encrypted_content;
|
|
4561
|
+
} else if (reasoningItem.summary) {
|
|
4553
4562
|
baseResult.thought = reasoningItem.summary.map(
|
|
4554
4563
|
(s) => typeof s === "object" ? JSON.stringify(s) : s
|
|
4555
4564
|
).join("\n");
|
|
@@ -5994,24 +6003,11 @@ ${outputFields}`);
|
|
|
5994
6003
|
);
|
|
5995
6004
|
messageContent = userMsgParts.map((part) => part.type === "text" ? part.text : "").join("").trim();
|
|
5996
6005
|
} else if (message.role === "assistant") {
|
|
5997
|
-
const
|
|
5998
|
-
|
|
5999
|
-
|
|
6000
|
-
|
|
6001
|
-
|
|
6002
|
-
if (value !== void 0 && value !== null && (typeof value === "string" ? value !== "" : true)) {
|
|
6003
|
-
const renderedValue = processValue(field, value);
|
|
6004
|
-
assistantContentParts.push(`${field.name}: ${renderedValue}`);
|
|
6005
|
-
} else {
|
|
6006
|
-
const isThoughtField = field.name === this.thoughtFieldName;
|
|
6007
|
-
if (!field.isOptional && !field.isInternal && !isThoughtField) {
|
|
6008
|
-
throw new Error(
|
|
6009
|
-
`Value for output field '${field.name}' ('${field.title}') is required in assistant message history but was not found or was empty.`
|
|
6010
|
-
);
|
|
6011
|
-
}
|
|
6012
|
-
}
|
|
6013
|
-
}
|
|
6014
|
-
messageContent = assistantContentParts.join("\n");
|
|
6006
|
+
const assistantMsgParts = this.renderInputFields(
|
|
6007
|
+
message.values
|
|
6008
|
+
// Cast message.values (AxGenIn) to T (which extends AxGenIn)
|
|
6009
|
+
);
|
|
6010
|
+
messageContent = assistantMsgParts.map((part) => part.type === "text" ? part.text : "").join("").trim();
|
|
6015
6011
|
}
|
|
6016
6012
|
if (messageContent) {
|
|
6017
6013
|
if (lastRole === message.role && userMessages.length > 0) {
|
|
@@ -8000,7 +7996,8 @@ var AxGen = class extends AxProgramWithSignature {
|
|
|
8000
7996
|
stream,
|
|
8001
7997
|
functions: _functions,
|
|
8002
7998
|
functionCall: _functionCall,
|
|
8003
|
-
thinkingTokenBudget
|
|
7999
|
+
thinkingTokenBudget,
|
|
8000
|
+
showThoughts
|
|
8004
8001
|
} = options ?? {};
|
|
8005
8002
|
const chatPrompt = mem?.history(sessionId) ?? [];
|
|
8006
8003
|
if (chatPrompt.length === 0) {
|
|
@@ -8026,6 +8023,7 @@ var AxGen = class extends AxProgramWithSignature {
|
|
|
8026
8023
|
stream,
|
|
8027
8024
|
debug: false,
|
|
8028
8025
|
thinkingTokenBudget,
|
|
8026
|
+
showThoughts,
|
|
8029
8027
|
traceContext,
|
|
8030
8028
|
abortSignal: options?.abortSignal
|
|
8031
8029
|
}
|
|
@@ -8440,6 +8438,7 @@ Content: ${result.content}`
|
|
|
8440
8438
|
...funcNames ? { provided_functions: funcNames } : {},
|
|
8441
8439
|
...options?.model ? { model: options.model } : {},
|
|
8442
8440
|
...options?.thinkingTokenBudget ? { thinking_token_budget: options.thinkingTokenBudget } : {},
|
|
8441
|
+
...options?.showThoughts ? { show_thoughts: options.showThoughts } : {},
|
|
8443
8442
|
...options?.maxSteps ? { max_steps: options.maxSteps } : {},
|
|
8444
8443
|
...options?.maxRetries ? { max_retries: options.maxRetries } : {},
|
|
8445
8444
|
...options?.fastFail ? { fast_fail: options.fastFail } : {}
|
|
@@ -10051,6 +10050,23 @@ var getTopInPercent = (entries, percent = 0.1) => {
|
|
|
10051
10050
|
return sortedEntries.slice(0, topTenPercentCount);
|
|
10052
10051
|
};
|
|
10053
10052
|
|
|
10053
|
+
// docs/rewriter.ts
|
|
10054
|
+
var AxDefaultQueryRewriter = class extends AxGen {
|
|
10055
|
+
constructor(options) {
|
|
10056
|
+
const signature = `"You are a query rewriter assistant tasked with rewriting a given query to improve its clarity, specificity, and relevance. Your role involves analyzing the query to identify any ambiguities, generalizations, or irrelevant information and then rephrasing it to make it more focused and precise. The rewritten query should be concise, easy to understand, and directly related to the original query. Output only the rewritten query."
|
|
10057
|
+
query: string -> rewrittenQuery: string`;
|
|
10058
|
+
super(signature, options);
|
|
10059
|
+
}
|
|
10060
|
+
};
|
|
10061
|
+
var AxRewriter = class extends AxGen {
|
|
10062
|
+
constructor(options) {
|
|
10063
|
+
super(
|
|
10064
|
+
'"Rewrite a given text to be clear and concise" original -> rewritten "improved text"',
|
|
10065
|
+
options
|
|
10066
|
+
);
|
|
10067
|
+
}
|
|
10068
|
+
};
|
|
10069
|
+
|
|
10054
10070
|
// funcs/docker.ts
|
|
10055
10071
|
var AxDockerSession = class {
|
|
10056
10072
|
apiUrl;
|
|
@@ -11684,15 +11700,6 @@ var AxChainOfThought = class extends AxGen {
|
|
|
11684
11700
|
}
|
|
11685
11701
|
};
|
|
11686
11702
|
|
|
11687
|
-
// docs/rewriter.ts
|
|
11688
|
-
var AxDefaultQueryRewriter = class extends AxGen {
|
|
11689
|
-
constructor(options) {
|
|
11690
|
-
const signature = `"You are a query rewriter assistant tasked with rewriting a given query to improve its clarity, specificity, and relevance. Your role involves analyzing the query to identify any ambiguities, generalizations, or irrelevant information and then rephrasing it to make it more focused and precise. The rewritten query should be concise, easy to understand, and directly related to the original query. Output only the rewritten query."
|
|
11691
|
-
query: string -> rewrittenQuery: string`;
|
|
11692
|
-
super(signature, options);
|
|
11693
|
-
}
|
|
11694
|
-
};
|
|
11695
|
-
|
|
11696
11703
|
// dsp/strutil.ts
|
|
11697
11704
|
var trimNonAlphaNum = (str) => {
|
|
11698
11705
|
return str.replace(/^\W+|\W+$/g, "");
|
|
@@ -13526,20 +13533,16 @@ var AxRAG = class extends AxChainOfThought {
|
|
|
13526
13533
|
this.register(this.genQuery);
|
|
13527
13534
|
}
|
|
13528
13535
|
async forward(ai, { question }, options) {
|
|
13536
|
+
let hop = 0;
|
|
13529
13537
|
let context3 = [];
|
|
13530
|
-
|
|
13531
|
-
const
|
|
13532
|
-
|
|
13533
|
-
|
|
13534
|
-
|
|
13535
|
-
question
|
|
13536
|
-
},
|
|
13537
|
-
options
|
|
13538
|
-
);
|
|
13539
|
-
const val = await this.queryFn(query);
|
|
13540
|
-
context3 = AxStringUtil.dedup([...context3, val]);
|
|
13538
|
+
while (hop < this.maxHops) {
|
|
13539
|
+
const query = await this.genQuery.forward(ai, { context: context3, question });
|
|
13540
|
+
const queryResult = await this.queryFn(query.query);
|
|
13541
|
+
context3 = AxStringUtil.dedup([...context3, queryResult]);
|
|
13542
|
+
hop++;
|
|
13541
13543
|
}
|
|
13542
|
-
|
|
13544
|
+
const res = await super.forward(ai, { context: context3, question }, options);
|
|
13545
|
+
return res;
|
|
13543
13546
|
}
|
|
13544
13547
|
};
|
|
13545
13548
|
export {
|
|
@@ -13629,6 +13632,7 @@ export {
|
|
|
13629
13632
|
AxPromptTemplate,
|
|
13630
13633
|
AxRAG,
|
|
13631
13634
|
AxRateLimiterTokenUsage,
|
|
13635
|
+
AxRewriter,
|
|
13632
13636
|
AxSignature,
|
|
13633
13637
|
AxSimpleClassifier,
|
|
13634
13638
|
AxSimpleClassifierClass,
|