@pensar/apex 0.0.37-canary.0 → 0.0.39-canary.2f181ec5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +29 -1
- package/build/benchmark.js +121 -22
- package/build/index.js +180 -62
- package/build/pentest.js +121 -22
- package/build/quicktest.js +90 -16
- package/build/swarm.js +90 -16
- package/package.json +1 -1
package/build/index.js
CHANGED
|
@@ -27849,24 +27849,40 @@ function useAgent() {
|
|
|
27849
27849
|
}
|
|
27850
27850
|
function AgentProvider({ children }) {
|
|
27851
27851
|
const [model, setModel] = import_react10.useState(AVAILABLE_MODELS[0]);
|
|
27852
|
-
const [
|
|
27852
|
+
const [tokenUsage, setTokenUsage] = import_react10.useState({
|
|
27853
|
+
inputTokens: 0,
|
|
27854
|
+
outputTokens: 0,
|
|
27855
|
+
totalTokens: 0
|
|
27856
|
+
});
|
|
27857
|
+
const [hasExecuted, setHasExecuted] = import_react10.useState(false);
|
|
27853
27858
|
const [thinking, setThinking] = import_react10.useState(false);
|
|
27854
27859
|
const [isExecuting, setIsExecuting] = import_react10.useState(false);
|
|
27855
|
-
const
|
|
27856
|
-
|
|
27857
|
-
|
|
27860
|
+
const addTokenUsage = import_react10.useCallback((input, output) => {
|
|
27861
|
+
setHasExecuted(true);
|
|
27862
|
+
setTokenUsage((prev) => ({
|
|
27863
|
+
inputTokens: prev.inputTokens + input,
|
|
27864
|
+
outputTokens: prev.outputTokens + output,
|
|
27865
|
+
totalTokens: prev.totalTokens + input + output
|
|
27866
|
+
}));
|
|
27867
|
+
}, []);
|
|
27868
|
+
const resetTokenUsage = import_react10.useCallback(() => {
|
|
27869
|
+
setHasExecuted(false);
|
|
27870
|
+
setTokenUsage({ inputTokens: 0, outputTokens: 0, totalTokens: 0 });
|
|
27871
|
+
}, []);
|
|
27872
|
+
const contextValue = import_react10.useMemo(() => ({
|
|
27873
|
+
model,
|
|
27874
|
+
setModel,
|
|
27875
|
+
tokenUsage,
|
|
27876
|
+
addTokenUsage,
|
|
27877
|
+
resetTokenUsage,
|
|
27878
|
+
hasExecuted,
|
|
27879
|
+
thinking,
|
|
27880
|
+
setThinking,
|
|
27881
|
+
isExecuting,
|
|
27882
|
+
setIsExecuting
|
|
27883
|
+
}), [model, tokenUsage, hasExecuted, thinking, isExecuting, addTokenUsage, resetTokenUsage]);
|
|
27858
27884
|
return /* @__PURE__ */ import_jsx_dev_runtime2.jsxDEV(AgentContext.Provider, {
|
|
27859
|
-
value:
|
|
27860
|
-
model,
|
|
27861
|
-
setModel,
|
|
27862
|
-
tokenCount,
|
|
27863
|
-
setTokenCount,
|
|
27864
|
-
addTokens,
|
|
27865
|
-
thinking,
|
|
27866
|
-
setThinking,
|
|
27867
|
-
isExecuting,
|
|
27868
|
-
setIsExecuting
|
|
27869
|
-
},
|
|
27885
|
+
value: contextValue,
|
|
27870
27886
|
children
|
|
27871
27887
|
}, undefined, false, undefined, this);
|
|
27872
27888
|
}
|
|
@@ -27940,7 +27956,7 @@ function Footer({
|
|
|
27940
27956
|
showExitWarning = false
|
|
27941
27957
|
}) {
|
|
27942
27958
|
cwd = "~" + cwd.split(os2.homedir()).pop() || "";
|
|
27943
|
-
const { model,
|
|
27959
|
+
const { model, tokenUsage, hasExecuted, thinking, isExecuting } = useAgent();
|
|
27944
27960
|
const hotkeys = isExecuting ? [{ key: "Ctrl+C", label: "Stop Execution" }] : [{ key: "Ctrl+C", label: "Clear/Exit" }];
|
|
27945
27961
|
return /* @__PURE__ */ import_jsx_dev_runtime2.jsxDEV("box", {
|
|
27946
27962
|
flexDirection: "row",
|
|
@@ -28006,27 +28022,21 @@ function Footer({
|
|
|
28006
28022
|
}, undefined, true, undefined, this);
|
|
28007
28023
|
}
|
|
28008
28024
|
function AgentStatus() {
|
|
28009
|
-
const {
|
|
28025
|
+
const { tokenUsage, hasExecuted, thinking, isExecuting } = useAgent();
|
|
28010
28026
|
return /* @__PURE__ */ import_jsx_dev_runtime2.jsxDEV("box", {
|
|
28011
28027
|
flexDirection: "row",
|
|
28012
28028
|
gap: 1,
|
|
28013
28029
|
children: [
|
|
28014
|
-
|
|
28030
|
+
hasExecuted && /* @__PURE__ */ import_jsx_dev_runtime2.jsxDEV(import_jsx_dev_runtime2.Fragment, {
|
|
28015
28031
|
children: [
|
|
28016
28032
|
/* @__PURE__ */ import_jsx_dev_runtime2.jsxDEV("box", {
|
|
28017
28033
|
border: ["right"],
|
|
28018
28034
|
borderColor: "green"
|
|
28019
28035
|
}, undefined, false, undefined, this),
|
|
28020
28036
|
/* @__PURE__ */ import_jsx_dev_runtime2.jsxDEV("text", {
|
|
28021
|
-
fg: "
|
|
28022
|
-
children:
|
|
28023
|
-
|
|
28024
|
-
/* @__PURE__ */ import_jsx_dev_runtime2.jsxDEV("span", {
|
|
28025
|
-
fg: "white",
|
|
28026
|
-
children: formatTokenCount(tokenCount)
|
|
28027
|
-
}, undefined, false, undefined, this)
|
|
28028
|
-
]
|
|
28029
|
-
}, undefined, true, undefined, this),
|
|
28037
|
+
fg: "white",
|
|
28038
|
+
children: `↓${formatTokenCount(tokenUsage.inputTokens)} ↑${formatTokenCount(tokenUsage.outputTokens)} Σ${formatTokenCount(tokenUsage.totalTokens)}`
|
|
28039
|
+
}, undefined, false, undefined, this),
|
|
28030
28040
|
/* @__PURE__ */ import_jsx_dev_runtime2.jsxDEV(ContextProgress, {
|
|
28031
28041
|
width: 10
|
|
28032
28042
|
}, undefined, false, undefined, this)
|
|
@@ -28048,10 +28058,11 @@ function AgentStatus() {
|
|
|
28048
28058
|
}, undefined, true, undefined, this);
|
|
28049
28059
|
}
|
|
28050
28060
|
function ContextProgress({ width }) {
|
|
28051
|
-
const { model,
|
|
28052
|
-
|
|
28053
|
-
if (!thinking)
|
|
28061
|
+
const { model, tokenUsage, thinking } = useAgent();
|
|
28062
|
+
if (!thinking || tokenUsage.totalTokens === 0)
|
|
28054
28063
|
return null;
|
|
28064
|
+
const contextLength = model.contextLength ?? 200000;
|
|
28065
|
+
const contextProgress = Math.max(0, Math.min(100, Number((tokenUsage.totalTokens / contextLength * 100).toFixed(2))));
|
|
28055
28066
|
return /* @__PURE__ */ import_jsx_dev_runtime2.jsxDEV(ProgressBar, {
|
|
28056
28067
|
value: contextProgress,
|
|
28057
28068
|
width
|
|
@@ -65017,11 +65028,38 @@ async function summarizeConversation(messages, opts, model) {
|
|
|
65017
65028
|
content: `Summarize this conversation to pass to another agent. This was the system prompt: ${opts.system} `
|
|
65018
65029
|
}
|
|
65019
65030
|
];
|
|
65020
|
-
const { text: summary } = await generateText({
|
|
65031
|
+
const { text: summary, usage: summaryUsage } = await generateText({
|
|
65021
65032
|
model,
|
|
65022
65033
|
system: `You are a helpful assistant that summarizes conversations to pass to another agent. Review the conversation and system prompt at the end provided by the user.`,
|
|
65023
65034
|
messages: summarizedMessages
|
|
65024
65035
|
});
|
|
65036
|
+
if (opts.onStepFinish && summaryUsage) {
|
|
65037
|
+
opts.onStepFinish({
|
|
65038
|
+
text: "",
|
|
65039
|
+
reasoning: undefined,
|
|
65040
|
+
reasoningDetails: [],
|
|
65041
|
+
files: [],
|
|
65042
|
+
sources: [],
|
|
65043
|
+
toolCalls: [],
|
|
65044
|
+
toolResults: [],
|
|
65045
|
+
finishReason: "stop",
|
|
65046
|
+
usage: {
|
|
65047
|
+
inputTokens: summaryUsage.inputTokens ?? 0,
|
|
65048
|
+
outputTokens: summaryUsage.outputTokens ?? 0,
|
|
65049
|
+
totalTokens: summaryUsage.totalTokens ?? 0
|
|
65050
|
+
},
|
|
65051
|
+
warnings: [],
|
|
65052
|
+
request: {},
|
|
65053
|
+
response: {
|
|
65054
|
+
id: "summarization",
|
|
65055
|
+
timestamp: new Date,
|
|
65056
|
+
modelId: ""
|
|
65057
|
+
},
|
|
65058
|
+
providerMetadata: undefined,
|
|
65059
|
+
stepType: "initial",
|
|
65060
|
+
isContinued: false
|
|
65061
|
+
});
|
|
65062
|
+
}
|
|
65025
65063
|
const originalLength = typeof opts.prompt === "string" ? opts.prompt.length : 0;
|
|
65026
65064
|
const enhancedPrompt = originalLength > 1e5 ? `Context: The previous conversation contained very long content that was summarized.
|
|
65027
65065
|
|
|
@@ -65184,6 +65222,7 @@ function streamResponse(opts) {
|
|
|
65184
65222
|
} = opts;
|
|
65185
65223
|
const messagesContainer = { current: messages || [] };
|
|
65186
65224
|
const providerModel = getProviderModel(model, authConfig);
|
|
65225
|
+
let rateLimitRetryCount = 0;
|
|
65187
65226
|
try {
|
|
65188
65227
|
const response = streamText({
|
|
65189
65228
|
model: providerModel,
|
|
@@ -65197,6 +65236,16 @@ function streamResponse(opts) {
|
|
|
65197
65236
|
messagesContainer.current = opts2.messages;
|
|
65198
65237
|
return;
|
|
65199
65238
|
},
|
|
65239
|
+
onError: async ({ error: error46 }) => {
|
|
65240
|
+
if (error46.message.toLowerCase().includes("too many tokens") || error46.message.toLowerCase().includes("overloaded")) {
|
|
65241
|
+
rateLimitRetryCount++;
|
|
65242
|
+
await new Promise((resolve4) => setTimeout(resolve4, 1000 * rateLimitRetryCount));
|
|
65243
|
+
if (rateLimitRetryCount < 20) {
|
|
65244
|
+
return;
|
|
65245
|
+
}
|
|
65246
|
+
}
|
|
65247
|
+
throw error46;
|
|
65248
|
+
},
|
|
65200
65249
|
onStepFinish,
|
|
65201
65250
|
abortSignal,
|
|
65202
65251
|
activeTools,
|
|
@@ -65215,7 +65264,7 @@ function streamResponse(opts) {
|
|
|
65215
65264
|
throw new Error(`Tool ${toolCall.toolName} not found or has no schema`);
|
|
65216
65265
|
}
|
|
65217
65266
|
const jsonSchema2 = inputSchema({ toolName: toolCall.toolName });
|
|
65218
|
-
const { object: repairedArgs } = await generateObject({
|
|
65267
|
+
const { object: repairedArgs, usage: repairUsage } = await generateObject({
|
|
65219
65268
|
model: providerModel,
|
|
65220
65269
|
schema: tool2.inputSchema,
|
|
65221
65270
|
prompt: [
|
|
@@ -65228,6 +65277,33 @@ function streamResponse(opts) {
|
|
|
65228
65277
|
].join(`
|
|
65229
65278
|
`)
|
|
65230
65279
|
});
|
|
65280
|
+
if (onStepFinish && repairUsage) {
|
|
65281
|
+
onStepFinish({
|
|
65282
|
+
text: "",
|
|
65283
|
+
reasoning: undefined,
|
|
65284
|
+
reasoningDetails: [],
|
|
65285
|
+
files: [],
|
|
65286
|
+
sources: [],
|
|
65287
|
+
toolCalls: [],
|
|
65288
|
+
toolResults: [],
|
|
65289
|
+
finishReason: "stop",
|
|
65290
|
+
usage: {
|
|
65291
|
+
inputTokens: repairUsage.inputTokens ?? 0,
|
|
65292
|
+
outputTokens: repairUsage.outputTokens ?? 0,
|
|
65293
|
+
totalTokens: repairUsage.totalTokens ?? 0
|
|
65294
|
+
},
|
|
65295
|
+
warnings: [],
|
|
65296
|
+
request: {},
|
|
65297
|
+
response: {
|
|
65298
|
+
id: "tool-repair",
|
|
65299
|
+
timestamp: new Date,
|
|
65300
|
+
modelId: ""
|
|
65301
|
+
},
|
|
65302
|
+
providerMetadata: undefined,
|
|
65303
|
+
stepType: "initial",
|
|
65304
|
+
isContinued: false
|
|
65305
|
+
});
|
|
65306
|
+
}
|
|
65231
65307
|
return { ...toolCall, input: JSON.stringify(repairedArgs) };
|
|
65232
65308
|
} catch (repairError) {
|
|
65233
65309
|
if (!silent) {
|
|
@@ -65254,9 +65330,9 @@ function streamResponse(opts) {
|
|
|
65254
65330
|
}
|
|
65255
65331
|
}
|
|
65256
65332
|
async function generateObjectResponse(opts) {
|
|
65257
|
-
const { model, schema, prompt, system, maxTokens, temperature, authConfig } = opts;
|
|
65333
|
+
const { model, schema, prompt, system, maxTokens, temperature, authConfig, onTokenUsage } = opts;
|
|
65258
65334
|
const providerModel = getProviderModel(model, authConfig);
|
|
65259
|
-
const { object: object3 } = await generateObject({
|
|
65335
|
+
const { object: object3, usage } = await generateObject({
|
|
65260
65336
|
model: providerModel,
|
|
65261
65337
|
schema,
|
|
65262
65338
|
prompt,
|
|
@@ -65264,6 +65340,9 @@ async function generateObjectResponse(opts) {
|
|
|
65264
65340
|
maxTokens,
|
|
65265
65341
|
temperature
|
|
65266
65342
|
});
|
|
65343
|
+
if (onTokenUsage && usage) {
|
|
65344
|
+
onTokenUsage(usage.inputTokens ?? 0, usage.outputTokens ?? 0);
|
|
65345
|
+
}
|
|
65267
65346
|
return object3;
|
|
65268
65347
|
}
|
|
65269
65348
|
// src/core/agent/pentestAgent/prompts.ts
|
|
@@ -68645,7 +68724,7 @@ Example workflow:
|
|
|
68645
68724
|
execute: async (params) => recordTestResultCore(session, params)
|
|
68646
68725
|
});
|
|
68647
68726
|
}
|
|
68648
|
-
async function generateTestStrategy(params, model) {
|
|
68727
|
+
async function generateTestStrategy(params, model, onTokenUsage) {
|
|
68649
68728
|
const prompt = `You are a penetration testing expert. Generate a concise testing strategy:
|
|
68650
68729
|
|
|
68651
68730
|
Attack Type: ${params.knowledge.name}
|
|
@@ -68671,12 +68750,15 @@ Be tactical and specific.`;
|
|
|
68671
68750
|
model: providerModel,
|
|
68672
68751
|
prompt
|
|
68673
68752
|
});
|
|
68753
|
+
if (onTokenUsage && result.usage) {
|
|
68754
|
+
onTokenUsage(result.usage.inputTokens ?? 0, result.usage.outputTokens ?? 0);
|
|
68755
|
+
}
|
|
68674
68756
|
return result.text;
|
|
68675
68757
|
} catch (error46) {
|
|
68676
68758
|
return params.knowledge.adaptiveStrategy;
|
|
68677
68759
|
}
|
|
68678
68760
|
}
|
|
68679
|
-
async function generatePayload(params, model) {
|
|
68761
|
+
async function generatePayload(params, model, onTokenUsage) {
|
|
68680
68762
|
const prompt = `Generate ONE ${params.knowledge.name} payload for testing.
|
|
68681
68763
|
|
|
68682
68764
|
Techniques:
|
|
@@ -68700,7 +68782,8 @@ Generate ONE specific payload. Return ONLY JSON:
|
|
|
68700
68782
|
const result = await generateObjectResponse({
|
|
68701
68783
|
model,
|
|
68702
68784
|
schema: PayloadSchema,
|
|
68703
|
-
prompt
|
|
68785
|
+
prompt,
|
|
68786
|
+
onTokenUsage
|
|
68704
68787
|
});
|
|
68705
68788
|
return result;
|
|
68706
68789
|
} catch (error46) {
|
|
@@ -68713,7 +68796,7 @@ Generate ONE specific payload. Return ONLY JSON:
|
|
|
68713
68796
|
technique: technique.name
|
|
68714
68797
|
};
|
|
68715
68798
|
}
|
|
68716
|
-
async function analyzeResponse(params, model) {
|
|
68799
|
+
async function analyzeResponse(params, model, onTokenUsage) {
|
|
68717
68800
|
const prompt = `Analyze this security test response:
|
|
68718
68801
|
|
|
68719
68802
|
Attack: ${params.knowledge.name}
|
|
@@ -68741,7 +68824,8 @@ Analyze: Is this vulnerable? Return ONLY JSON:
|
|
|
68741
68824
|
const result = await generateObjectResponse({
|
|
68742
68825
|
model,
|
|
68743
68826
|
schema: AnalysisSchema,
|
|
68744
|
-
prompt
|
|
68827
|
+
prompt,
|
|
68828
|
+
onTokenUsage
|
|
68745
68829
|
});
|
|
68746
68830
|
return result;
|
|
68747
68831
|
} catch (error46) {
|
|
@@ -68760,7 +68844,7 @@ Analyze: Is this vulnerable? Return ONLY JSON:
|
|
|
68760
68844
|
suggestedNextTest: "Try alternative payload or technique"
|
|
68761
68845
|
};
|
|
68762
68846
|
}
|
|
68763
|
-
function createSmartTestTool(session, model) {
|
|
68847
|
+
function createSmartTestTool(session, model, onTokenUsage) {
|
|
68764
68848
|
return tool({
|
|
68765
68849
|
name: "test_parameter",
|
|
68766
68850
|
description: `Intelligently test a parameter for a vulnerability using AI-powered adaptive testing.
|
|
@@ -68830,7 +68914,7 @@ test_parameter({
|
|
|
68830
68914
|
parameter,
|
|
68831
68915
|
endpoint,
|
|
68832
68916
|
context
|
|
68833
|
-
}, model);
|
|
68917
|
+
}, model, onTokenUsage);
|
|
68834
68918
|
console.log(`Strategy: ${strategy}`);
|
|
68835
68919
|
const results = [];
|
|
68836
68920
|
let vulnerable = false;
|
|
@@ -68843,7 +68927,7 @@ test_parameter({
|
|
|
68843
68927
|
context: { ...context, parameter, endpoint },
|
|
68844
68928
|
previousResults: results,
|
|
68845
68929
|
round
|
|
68846
|
-
}, model);
|
|
68930
|
+
}, model, onTokenUsage);
|
|
68847
68931
|
console.log(` Payload: ${payloadData.payload}`);
|
|
68848
68932
|
console.log(` Reasoning: ${payloadData.reasoning}`);
|
|
68849
68933
|
let response;
|
|
@@ -68872,7 +68956,7 @@ test_parameter({
|
|
|
68872
68956
|
attackType,
|
|
68873
68957
|
knowledge,
|
|
68874
68958
|
previousResults: results
|
|
68875
|
-
}, model);
|
|
68959
|
+
}, model, onTokenUsage);
|
|
68876
68960
|
console.log(` Analysis: ${analysis.reasoning}`);
|
|
68877
68961
|
console.log(` Vulnerable: ${analysis.vulnerable} (confidence: ${analysis.confidence})`);
|
|
68878
68962
|
results.push({
|
|
@@ -69322,7 +69406,7 @@ function wrapCommandWithHeaders(command, headers) {
|
|
|
69322
69406
|
}
|
|
69323
69407
|
return wrapped;
|
|
69324
69408
|
}
|
|
69325
|
-
function createPentestTools(session, model, toolOverride) {
|
|
69409
|
+
function createPentestTools(session, model, toolOverride, onTokenUsage) {
|
|
69326
69410
|
const offensiveHeaders = getOffensiveHeaders(session);
|
|
69327
69411
|
const rateLimiter = session._rateLimiter;
|
|
69328
69412
|
const executeCommand = tool({
|
|
@@ -69496,7 +69580,7 @@ COMMON TESTING PATTERNS:
|
|
|
69496
69580
|
http_request: httpRequest,
|
|
69497
69581
|
document_finding: createDocumentFindingTool(session),
|
|
69498
69582
|
record_test_result: createRecordTestResultTool(session),
|
|
69499
|
-
test_parameter: createSmartTestTool(session, model || "claude-sonnet-4-20250514"),
|
|
69583
|
+
test_parameter: createSmartTestTool(session, model || "claude-sonnet-4-20250514", onTokenUsage),
|
|
69500
69584
|
check_testing_coverage: createCheckTestingCoverageTool(session),
|
|
69501
69585
|
validate_completeness: createValidateCompletenessTool(session),
|
|
69502
69586
|
enumerate_endpoints: createEnumerateEndpointsTool(session),
|
|
@@ -70555,6 +70639,7 @@ function runAgent(opts) {
|
|
|
70555
70639
|
objective,
|
|
70556
70640
|
model,
|
|
70557
70641
|
onStepFinish,
|
|
70642
|
+
onToolTokenUsage,
|
|
70558
70643
|
abortSignal,
|
|
70559
70644
|
silent,
|
|
70560
70645
|
authConfig,
|
|
@@ -70574,7 +70659,7 @@ function runAgent(opts) {
|
|
|
70574
70659
|
analyze_scan,
|
|
70575
70660
|
scratchpad,
|
|
70576
70661
|
generate_report
|
|
70577
|
-
} = createPentestTools(session, undefined, toolOverride);
|
|
70662
|
+
} = createPentestTools(session, undefined, toolOverride, onToolTokenUsage);
|
|
70578
70663
|
const document_finding = tool({
|
|
70579
70664
|
name: "document_finding",
|
|
70580
70665
|
description: `Document a security finding with severity, impact, and remediation guidance.
|
|
@@ -72340,8 +72425,8 @@ function PentestAgentDisplay() {
|
|
|
72340
72425
|
const { closePentest } = useCommand();
|
|
72341
72426
|
const {
|
|
72342
72427
|
model,
|
|
72343
|
-
|
|
72344
|
-
|
|
72428
|
+
addTokenUsage,
|
|
72429
|
+
resetTokenUsage,
|
|
72345
72430
|
setThinking,
|
|
72346
72431
|
isExecuting,
|
|
72347
72432
|
setIsExecuting
|
|
@@ -72506,6 +72591,7 @@ function PentestAgentDisplay() {
|
|
|
72506
72591
|
setHasStarted(true);
|
|
72507
72592
|
setThinking(true);
|
|
72508
72593
|
setIsExecuting(true);
|
|
72594
|
+
resetTokenUsage();
|
|
72509
72595
|
const controller = new AbortController;
|
|
72510
72596
|
setAbortController(controller);
|
|
72511
72597
|
try {
|
|
@@ -72516,8 +72602,12 @@ function PentestAgentDisplay() {
|
|
|
72516
72602
|
abortSignal: controller.signal,
|
|
72517
72603
|
sessionConfig,
|
|
72518
72604
|
onStepFinish: ({ usage }) => {
|
|
72519
|
-
const
|
|
72520
|
-
|
|
72605
|
+
const inputTokens = usage.inputTokens ?? 0;
|
|
72606
|
+
const outputTokens = usage.outputTokens ?? 0;
|
|
72607
|
+
addTokenUsage(inputTokens, outputTokens);
|
|
72608
|
+
},
|
|
72609
|
+
onToolTokenUsage: (inputTokens, outputTokens) => {
|
|
72610
|
+
addTokenUsage(inputTokens, outputTokens);
|
|
72521
72611
|
}
|
|
72522
72612
|
});
|
|
72523
72613
|
setSessionPath(result.session.rootPath);
|
|
@@ -72535,7 +72625,6 @@ Path: ${result.session.rootPath}`,
|
|
|
72535
72625
|
for await (const delta of result.fullStream) {
|
|
72536
72626
|
if (delta.type === "text-delta") {
|
|
72537
72627
|
currentAssistantText += delta.text;
|
|
72538
|
-
addTokens(1);
|
|
72539
72628
|
const lastMessage = allMessages[allMessages.length - 1];
|
|
72540
72629
|
if (lastMessage && lastMessage.role === "assistant") {
|
|
72541
72630
|
lastMessage.content = currentAssistantText;
|
|
@@ -74827,7 +74916,7 @@ If resuming from summarization, review the assets in the session assets folder a
|
|
|
74827
74916
|
import { join as join5 } from "path";
|
|
74828
74917
|
import { writeFileSync as writeFileSync5, mkdirSync as mkdirSync5, existsSync as existsSync10 } from "fs";
|
|
74829
74918
|
function runAgent2(opts) {
|
|
74830
|
-
const { target, model, onStepFinish, abortSignal } = opts;
|
|
74919
|
+
const { target, model, onStepFinish, onToolTokenUsage, abortSignal } = opts;
|
|
74831
74920
|
const session = opts.session || createSession(target);
|
|
74832
74921
|
const subagentId = `attack-surface-${nanoid3(6)}`;
|
|
74833
74922
|
console.log(`Created attack surface session: ${session.id}`);
|
|
@@ -74836,7 +74925,7 @@ function runAgent2(opts) {
|
|
|
74836
74925
|
if (!existsSync10(assetsPath)) {
|
|
74837
74926
|
mkdirSync5(assetsPath, { recursive: true });
|
|
74838
74927
|
}
|
|
74839
|
-
const { analyze_scan, execute_command, http_request } = createPentestTools(session, model);
|
|
74928
|
+
const { analyze_scan, execute_command, http_request } = createPentestTools(session, model, undefined, onToolTokenUsage);
|
|
74840
74929
|
const document_asset = tool({
|
|
74841
74930
|
name: "document_asset",
|
|
74842
74931
|
description: `Document a discovered asset during attack surface analysis.
|
|
@@ -75133,13 +75222,14 @@ function runAgent3(opts) {
|
|
|
75133
75222
|
onSubagentSpawn,
|
|
75134
75223
|
onSubagentMessage,
|
|
75135
75224
|
onSubagentComplete,
|
|
75225
|
+
onSubagentTokenUsage,
|
|
75136
75226
|
session: sessionProp
|
|
75137
75227
|
} = opts;
|
|
75138
75228
|
const session = sessionProp || createSession(target, undefined, undefined, sessionConfig);
|
|
75139
75229
|
const logger = new Logger(session);
|
|
75140
75230
|
logger.log(`Created thorough pentest session: ${session.id}`);
|
|
75141
75231
|
logger.log(`Session path: ${session.rootPath}`);
|
|
75142
|
-
const tools2 = createOrchestratorTools(session, model, abortSignal, onSubagentSpawn, onSubagentMessage, onSubagentComplete, logger);
|
|
75232
|
+
const tools2 = createOrchestratorTools(session, model, abortSignal, onSubagentSpawn, onSubagentMessage, onSubagentComplete, onSubagentTokenUsage, logger);
|
|
75143
75233
|
const enhancedPrompt = `
|
|
75144
75234
|
TARGET: ${target}
|
|
75145
75235
|
|
|
@@ -75175,7 +75265,7 @@ Begin by using the get_attack_surface tool to map the complete attack surface of
|
|
|
75175
75265
|
streamResult.session = session;
|
|
75176
75266
|
return { streamResult, session };
|
|
75177
75267
|
}
|
|
75178
|
-
function createOrchestratorTools(session, model, abortSignal, onSubagentSpawn, onSubagentMessage, onSubagentComplete, logger) {
|
|
75268
|
+
function createOrchestratorTools(session, model, abortSignal, onSubagentSpawn, onSubagentMessage, onSubagentComplete, onSubagentTokenUsage, logger) {
|
|
75179
75269
|
const getAttackSurface = tool({
|
|
75180
75270
|
name: "get_attack_surface",
|
|
75181
75271
|
description: `Run the attack surface analysis agent to discover all assets and identify targets.
|
|
@@ -75207,7 +75297,19 @@ Use this as the FIRST step in your thorough penetration test.`,
|
|
|
75207
75297
|
target,
|
|
75208
75298
|
objective,
|
|
75209
75299
|
model,
|
|
75210
|
-
abortSignal
|
|
75300
|
+
abortSignal,
|
|
75301
|
+
onStepFinish: ({ usage }) => {
|
|
75302
|
+
if (onSubagentTokenUsage) {
|
|
75303
|
+
const inputTokens = usage.inputTokens ?? 0;
|
|
75304
|
+
const outputTokens = usage.outputTokens ?? 0;
|
|
75305
|
+
onSubagentTokenUsage(subagentId, inputTokens, outputTokens);
|
|
75306
|
+
}
|
|
75307
|
+
},
|
|
75308
|
+
onToolTokenUsage: (inputTokens, outputTokens) => {
|
|
75309
|
+
if (onSubagentTokenUsage) {
|
|
75310
|
+
onSubagentTokenUsage(subagentId, inputTokens, outputTokens);
|
|
75311
|
+
}
|
|
75312
|
+
}
|
|
75211
75313
|
});
|
|
75212
75314
|
const allMessages = [];
|
|
75213
75315
|
let currentAssistantText = "";
|
|
@@ -75382,7 +75484,19 @@ You can spawn multiple agents in parallel - they will run concurrently.`,
|
|
|
75382
75484
|
target: targetInfo.target,
|
|
75383
75485
|
objective: targetInfo.objective,
|
|
75384
75486
|
model,
|
|
75385
|
-
abortSignal
|
|
75487
|
+
abortSignal,
|
|
75488
|
+
onStepFinish: ({ usage }) => {
|
|
75489
|
+
if (onSubagentTokenUsage) {
|
|
75490
|
+
const inputTokens = usage.inputTokens ?? 0;
|
|
75491
|
+
const outputTokens = usage.outputTokens ?? 0;
|
|
75492
|
+
onSubagentTokenUsage(subagentId, inputTokens, outputTokens);
|
|
75493
|
+
}
|
|
75494
|
+
},
|
|
75495
|
+
onToolTokenUsage: (inputTokens, outputTokens) => {
|
|
75496
|
+
if (onSubagentTokenUsage) {
|
|
75497
|
+
onSubagentTokenUsage(subagentId, inputTokens, outputTokens);
|
|
75498
|
+
}
|
|
75499
|
+
}
|
|
75386
75500
|
});
|
|
75387
75501
|
const allMessages = [];
|
|
75388
75502
|
let currentAssistantText = "";
|
|
@@ -75709,8 +75823,8 @@ function useThoroughPentestAgent() {
|
|
|
75709
75823
|
const [abortController, setAbortController] = import_react22.useState(null);
|
|
75710
75824
|
const {
|
|
75711
75825
|
model,
|
|
75712
|
-
|
|
75713
|
-
|
|
75826
|
+
addTokenUsage,
|
|
75827
|
+
resetTokenUsage,
|
|
75714
75828
|
setThinking,
|
|
75715
75829
|
isExecuting,
|
|
75716
75830
|
setIsExecuting
|
|
@@ -75738,6 +75852,7 @@ function useThoroughPentestAgent() {
|
|
|
75738
75852
|
setHasStarted(true);
|
|
75739
75853
|
setThinking(true);
|
|
75740
75854
|
setIsExecuting(true);
|
|
75855
|
+
resetTokenUsage();
|
|
75741
75856
|
const controller = new AbortController;
|
|
75742
75857
|
setAbortController(controller);
|
|
75743
75858
|
try {
|
|
@@ -75747,8 +75862,12 @@ function useThoroughPentestAgent() {
|
|
|
75747
75862
|
abortSignal: controller.signal,
|
|
75748
75863
|
sessionConfig,
|
|
75749
75864
|
onStepFinish: ({ usage }) => {
|
|
75750
|
-
const
|
|
75751
|
-
|
|
75865
|
+
const inputTokens = usage.inputTokens ?? 0;
|
|
75866
|
+
const outputTokens = usage.outputTokens ?? 0;
|
|
75867
|
+
addTokenUsage(inputTokens, outputTokens);
|
|
75868
|
+
},
|
|
75869
|
+
onSubagentTokenUsage: (subagentId, input, output) => {
|
|
75870
|
+
addTokenUsage(input, output);
|
|
75752
75871
|
},
|
|
75753
75872
|
onSubagentSpawn: (subagentInfo) => {
|
|
75754
75873
|
setSubagents((prev) => [
|
|
@@ -75859,7 +75978,6 @@ Mode: Pentest (Orchestrator)`,
|
|
|
75859
75978
|
await consumeStream2(result, {
|
|
75860
75979
|
onTextDelta: (delta) => {
|
|
75861
75980
|
currentAssistantText += delta.text;
|
|
75862
|
-
addTokens(1);
|
|
75863
75981
|
const lastMessage = allMessages[allMessages.length - 1];
|
|
75864
75982
|
if (lastMessage && lastMessage.role === "assistant") {
|
|
75865
75983
|
lastMessage.content = currentAssistantText;
|