@pensar/apex 0.0.36-canary.0 → 0.0.39-canary.2f181ec5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/build/pentest.js CHANGED
@@ -39513,6 +39513,12 @@ var OPENROUTER_MODELS = [
39513
39513
  provider: "openrouter",
39514
39514
  contextLength: 64000
39515
39515
  },
39516
+ {
39517
+ id: "mistralai/mistral-large-2512",
39518
+ name: "Mistral Large 3 2512",
39519
+ provider: "openrouter",
39520
+ contextLength: 262144
39521
+ },
39516
39522
  {
39517
39523
  id: "moonshotai/kimi-k2-thinking",
39518
39524
  name: "Kimi K2 Thinking",
@@ -40114,11 +40120,38 @@ async function summarizeConversation(messages, opts, model) {
40114
40120
  content: `Summarize this conversation to pass to another agent. This was the system prompt: ${opts.system} `
40115
40121
  }
40116
40122
  ];
40117
- const { text: summary } = await generateText({
40123
+ const { text: summary, usage: summaryUsage } = await generateText({
40118
40124
  model,
40119
40125
  system: `You are a helpful assistant that summarizes conversations to pass to another agent. Review the conversation and system prompt at the end provided by the user.`,
40120
40126
  messages: summarizedMessages
40121
40127
  });
40128
+ if (opts.onStepFinish && summaryUsage) {
40129
+ opts.onStepFinish({
40130
+ text: "",
40131
+ reasoning: undefined,
40132
+ reasoningDetails: [],
40133
+ files: [],
40134
+ sources: [],
40135
+ toolCalls: [],
40136
+ toolResults: [],
40137
+ finishReason: "stop",
40138
+ usage: {
40139
+ inputTokens: summaryUsage.inputTokens ?? 0,
40140
+ outputTokens: summaryUsage.outputTokens ?? 0,
40141
+ totalTokens: summaryUsage.totalTokens ?? 0
40142
+ },
40143
+ warnings: [],
40144
+ request: {},
40145
+ response: {
40146
+ id: "summarization",
40147
+ timestamp: new Date,
40148
+ modelId: ""
40149
+ },
40150
+ providerMetadata: undefined,
40151
+ stepType: "initial",
40152
+ isContinued: false
40153
+ });
40154
+ }
40122
40155
  const originalLength = typeof opts.prompt === "string" ? opts.prompt.length : 0;
40123
40156
  const enhancedPrompt = originalLength > 1e5 ? `Context: The previous conversation contained very long content that was summarized.
40124
40157
 
@@ -40281,6 +40314,7 @@ function streamResponse(opts) {
40281
40314
  } = opts;
40282
40315
  const messagesContainer = { current: messages || [] };
40283
40316
  const providerModel = getProviderModel(model, authConfig);
40317
+ let rateLimitRetryCount = 0;
40284
40318
  try {
40285
40319
  const response = streamText({
40286
40320
  model: providerModel,
@@ -40294,6 +40328,16 @@ function streamResponse(opts) {
40294
40328
  messagesContainer.current = opts2.messages;
40295
40329
  return;
40296
40330
  },
40331
+ onError: async ({ error: error46 }) => {
40332
+ if (error46.message.toLowerCase().includes("too many tokens") || error46.message.toLowerCase().includes("overloaded")) {
40333
+ rateLimitRetryCount++;
40334
+ await new Promise((resolve2) => setTimeout(resolve2, 1000 * rateLimitRetryCount));
40335
+ if (rateLimitRetryCount < 20) {
40336
+ return;
40337
+ }
40338
+ }
40339
+ throw error46;
40340
+ },
40297
40341
  onStepFinish,
40298
40342
  abortSignal,
40299
40343
  activeTools,
@@ -40312,7 +40356,7 @@ function streamResponse(opts) {
40312
40356
  throw new Error(`Tool ${toolCall.toolName} not found or has no schema`);
40313
40357
  }
40314
40358
  const jsonSchema2 = inputSchema({ toolName: toolCall.toolName });
40315
- const { object: repairedArgs } = await generateObject({
40359
+ const { object: repairedArgs, usage: repairUsage } = await generateObject({
40316
40360
  model: providerModel,
40317
40361
  schema: tool2.inputSchema,
40318
40362
  prompt: [
@@ -40325,6 +40369,33 @@ function streamResponse(opts) {
40325
40369
  ].join(`
40326
40370
  `)
40327
40371
  });
40372
+ if (onStepFinish && repairUsage) {
40373
+ onStepFinish({
40374
+ text: "",
40375
+ reasoning: undefined,
40376
+ reasoningDetails: [],
40377
+ files: [],
40378
+ sources: [],
40379
+ toolCalls: [],
40380
+ toolResults: [],
40381
+ finishReason: "stop",
40382
+ usage: {
40383
+ inputTokens: repairUsage.inputTokens ?? 0,
40384
+ outputTokens: repairUsage.outputTokens ?? 0,
40385
+ totalTokens: repairUsage.totalTokens ?? 0
40386
+ },
40387
+ warnings: [],
40388
+ request: {},
40389
+ response: {
40390
+ id: "tool-repair",
40391
+ timestamp: new Date,
40392
+ modelId: ""
40393
+ },
40394
+ providerMetadata: undefined,
40395
+ stepType: "initial",
40396
+ isContinued: false
40397
+ });
40398
+ }
40328
40399
  return { ...toolCall, input: JSON.stringify(repairedArgs) };
40329
40400
  } catch (repairError) {
40330
40401
  if (!silent) {
@@ -40351,9 +40422,9 @@ function streamResponse(opts) {
40351
40422
  }
40352
40423
  }
40353
40424
  async function generateObjectResponse(opts) {
40354
- const { model, schema, prompt, system, maxTokens, temperature, authConfig } = opts;
40425
+ const { model, schema, prompt, system, maxTokens, temperature, authConfig, onTokenUsage } = opts;
40355
40426
  const providerModel = getProviderModel(model, authConfig);
40356
- const { object: object3 } = await generateObject({
40427
+ const { object: object3, usage } = await generateObject({
40357
40428
  model: providerModel,
40358
40429
  schema,
40359
40430
  prompt,
@@ -40361,6 +40432,9 @@ async function generateObjectResponse(opts) {
40361
40432
  maxTokens,
40362
40433
  temperature
40363
40434
  });
40435
+ if (onTokenUsage && usage) {
40436
+ onTokenUsage(usage.inputTokens ?? 0, usage.outputTokens ?? 0);
40437
+ }
40364
40438
  return object3;
40365
40439
  }
40366
40440
  // src/core/agent/thoroughPentestAgent/prompts.ts
@@ -44911,6 +44985,7 @@ function runAgent(opts) {
44911
44985
  objective,
44912
44986
  model,
44913
44987
  onStepFinish,
44988
+ onToolTokenUsage,
44914
44989
  abortSignal,
44915
44990
  silent,
44916
44991
  authConfig,
@@ -44930,7 +45005,7 @@ function runAgent(opts) {
44930
45005
  analyze_scan,
44931
45006
  scratchpad,
44932
45007
  generate_report
44933
- } = createPentestTools(session, undefined, toolOverride);
45008
+ } = createPentestTools(session, undefined, toolOverride, onToolTokenUsage);
44934
45009
  const document_finding = tool({
44935
45010
  name: "document_finding",
44936
45011
  description: `Document a security finding with severity, impact, and remediation guidance.
@@ -46761,7 +46836,7 @@ Example workflow:
46761
46836
  execute: async (params) => recordTestResultCore(session, params)
46762
46837
  });
46763
46838
  }
46764
- async function generateTestStrategy(params, model) {
46839
+ async function generateTestStrategy(params, model, onTokenUsage) {
46765
46840
  const prompt = `You are a penetration testing expert. Generate a concise testing strategy:
46766
46841
 
46767
46842
  Attack Type: ${params.knowledge.name}
@@ -46787,12 +46862,15 @@ Be tactical and specific.`;
46787
46862
  model: providerModel,
46788
46863
  prompt
46789
46864
  });
46865
+ if (onTokenUsage && result.usage) {
46866
+ onTokenUsage(result.usage.inputTokens ?? 0, result.usage.outputTokens ?? 0);
46867
+ }
46790
46868
  return result.text;
46791
46869
  } catch (error46) {
46792
46870
  return params.knowledge.adaptiveStrategy;
46793
46871
  }
46794
46872
  }
46795
- async function generatePayload(params, model) {
46873
+ async function generatePayload(params, model, onTokenUsage) {
46796
46874
  const prompt = `Generate ONE ${params.knowledge.name} payload for testing.
46797
46875
 
46798
46876
  Techniques:
@@ -46816,7 +46894,8 @@ Generate ONE specific payload. Return ONLY JSON:
46816
46894
  const result = await generateObjectResponse({
46817
46895
  model,
46818
46896
  schema: PayloadSchema,
46819
- prompt
46897
+ prompt,
46898
+ onTokenUsage
46820
46899
  });
46821
46900
  return result;
46822
46901
  } catch (error46) {
@@ -46829,7 +46908,7 @@ Generate ONE specific payload. Return ONLY JSON:
46829
46908
  technique: technique.name
46830
46909
  };
46831
46910
  }
46832
- async function analyzeResponse(params, model) {
46911
+ async function analyzeResponse(params, model, onTokenUsage) {
46833
46912
  const prompt = `Analyze this security test response:
46834
46913
 
46835
46914
  Attack: ${params.knowledge.name}
@@ -46857,7 +46936,8 @@ Analyze: Is this vulnerable? Return ONLY JSON:
46857
46936
  const result = await generateObjectResponse({
46858
46937
  model,
46859
46938
  schema: AnalysisSchema,
46860
- prompt
46939
+ prompt,
46940
+ onTokenUsage
46861
46941
  });
46862
46942
  return result;
46863
46943
  } catch (error46) {
@@ -46876,7 +46956,7 @@ Analyze: Is this vulnerable? Return ONLY JSON:
46876
46956
  suggestedNextTest: "Try alternative payload or technique"
46877
46957
  };
46878
46958
  }
46879
- function createSmartTestTool(session, model) {
46959
+ function createSmartTestTool(session, model, onTokenUsage) {
46880
46960
  return tool({
46881
46961
  name: "test_parameter",
46882
46962
  description: `Intelligently test a parameter for a vulnerability using AI-powered adaptive testing.
@@ -46946,7 +47026,7 @@ test_parameter({
46946
47026
  parameter,
46947
47027
  endpoint,
46948
47028
  context
46949
- }, model);
47029
+ }, model, onTokenUsage);
46950
47030
  console.log(`Strategy: ${strategy}`);
46951
47031
  const results = [];
46952
47032
  let vulnerable = false;
@@ -46959,7 +47039,7 @@ test_parameter({
46959
47039
  context: { ...context, parameter, endpoint },
46960
47040
  previousResults: results,
46961
47041
  round
46962
- }, model);
47042
+ }, model, onTokenUsage);
46963
47043
  console.log(` Payload: ${payloadData.payload}`);
46964
47044
  console.log(` Reasoning: ${payloadData.reasoning}`);
46965
47045
  let response;
@@ -46988,7 +47068,7 @@ test_parameter({
46988
47068
  attackType,
46989
47069
  knowledge,
46990
47070
  previousResults: results
46991
- }, model);
47071
+ }, model, onTokenUsage);
46992
47072
  console.log(` Analysis: ${analysis.reasoning}`);
46993
47073
  console.log(` Vulnerable: ${analysis.vulnerable} (confidence: ${analysis.confidence})`);
46994
47074
  results.push({
@@ -47438,7 +47518,7 @@ function wrapCommandWithHeaders(command, headers) {
47438
47518
  }
47439
47519
  return wrapped;
47440
47520
  }
47441
- function createPentestTools(session, model, toolOverride) {
47521
+ function createPentestTools(session, model, toolOverride, onTokenUsage) {
47442
47522
  const offensiveHeaders = getOffensiveHeaders(session);
47443
47523
  const rateLimiter = session._rateLimiter;
47444
47524
  const executeCommand = tool({
@@ -47612,7 +47692,7 @@ COMMON TESTING PATTERNS:
47612
47692
  http_request: httpRequest,
47613
47693
  document_finding: createDocumentFindingTool(session),
47614
47694
  record_test_result: createRecordTestResultTool(session),
47615
- test_parameter: createSmartTestTool(session, model || "claude-sonnet-4-20250514"),
47695
+ test_parameter: createSmartTestTool(session, model || "claude-sonnet-4-20250514", onTokenUsage),
47616
47696
  check_testing_coverage: createCheckTestingCoverageTool(session),
47617
47697
  validate_completeness: createValidateCompletenessTool(session),
47618
47698
  enumerate_endpoints: createEnumerateEndpointsTool(session),
@@ -47628,7 +47708,7 @@ COMMON TESTING PATTERNS:
47628
47708
  import { join as join5 } from "path";
47629
47709
  import { writeFileSync as writeFileSync5, mkdirSync as mkdirSync5, existsSync as existsSync7 } from "fs";
47630
47710
  function runAgent2(opts) {
47631
- const { target, model, onStepFinish, abortSignal } = opts;
47711
+ const { target, model, onStepFinish, onToolTokenUsage, abortSignal } = opts;
47632
47712
  const session = opts.session || createSession(target);
47633
47713
  const subagentId = `attack-surface-${nanoid3(6)}`;
47634
47714
  console.log(`Created attack surface session: ${session.id}`);
@@ -47637,7 +47717,7 @@ function runAgent2(opts) {
47637
47717
  if (!existsSync7(assetsPath)) {
47638
47718
  mkdirSync5(assetsPath, { recursive: true });
47639
47719
  }
47640
- const { analyze_scan, execute_command, http_request } = createPentestTools(session, model);
47720
+ const { analyze_scan, execute_command, http_request } = createPentestTools(session, model, undefined, onToolTokenUsage);
47641
47721
  const document_asset = tool({
47642
47722
  name: "document_asset",
47643
47723
  description: `Document a discovered asset during attack surface analysis.
@@ -47934,13 +48014,14 @@ function runAgent3(opts) {
47934
48014
  onSubagentSpawn,
47935
48015
  onSubagentMessage,
47936
48016
  onSubagentComplete,
48017
+ onSubagentTokenUsage,
47937
48018
  session: sessionProp
47938
48019
  } = opts;
47939
48020
  const session = sessionProp || createSession(target, undefined, undefined, sessionConfig);
47940
48021
  const logger = new Logger(session);
47941
48022
  logger.log(`Created thorough pentest session: ${session.id}`);
47942
48023
  logger.log(`Session path: ${session.rootPath}`);
47943
- const tools2 = createOrchestratorTools(session, model, abortSignal, onSubagentSpawn, onSubagentMessage, onSubagentComplete, logger);
48024
+ const tools2 = createOrchestratorTools(session, model, abortSignal, onSubagentSpawn, onSubagentMessage, onSubagentComplete, onSubagentTokenUsage, logger);
47944
48025
  const enhancedPrompt = `
47945
48026
  TARGET: ${target}
47946
48027
 
@@ -47976,7 +48057,7 @@ Begin by using the get_attack_surface tool to map the complete attack surface of
47976
48057
  streamResult.session = session;
47977
48058
  return { streamResult, session };
47978
48059
  }
47979
- function createOrchestratorTools(session, model, abortSignal, onSubagentSpawn, onSubagentMessage, onSubagentComplete, logger) {
48060
+ function createOrchestratorTools(session, model, abortSignal, onSubagentSpawn, onSubagentMessage, onSubagentComplete, onSubagentTokenUsage, logger) {
47980
48061
  const getAttackSurface = tool({
47981
48062
  name: "get_attack_surface",
47982
48063
  description: `Run the attack surface analysis agent to discover all assets and identify targets.
@@ -48008,7 +48089,19 @@ Use this as the FIRST step in your thorough penetration test.`,
48008
48089
  target,
48009
48090
  objective,
48010
48091
  model,
48011
- abortSignal
48092
+ abortSignal,
48093
+ onStepFinish: ({ usage }) => {
48094
+ if (onSubagentTokenUsage) {
48095
+ const inputTokens = usage.inputTokens ?? 0;
48096
+ const outputTokens = usage.outputTokens ?? 0;
48097
+ onSubagentTokenUsage(subagentId, inputTokens, outputTokens);
48098
+ }
48099
+ },
48100
+ onToolTokenUsage: (inputTokens, outputTokens) => {
48101
+ if (onSubagentTokenUsage) {
48102
+ onSubagentTokenUsage(subagentId, inputTokens, outputTokens);
48103
+ }
48104
+ }
48012
48105
  });
48013
48106
  const allMessages = [];
48014
48107
  let currentAssistantText = "";
@@ -48183,7 +48276,19 @@ You can spawn multiple agents in parallel - they will run concurrently.`,
48183
48276
  target: targetInfo.target,
48184
48277
  objective: targetInfo.objective,
48185
48278
  model,
48186
- abortSignal
48279
+ abortSignal,
48280
+ onStepFinish: ({ usage }) => {
48281
+ if (onSubagentTokenUsage) {
48282
+ const inputTokens = usage.inputTokens ?? 0;
48283
+ const outputTokens = usage.outputTokens ?? 0;
48284
+ onSubagentTokenUsage(subagentId, inputTokens, outputTokens);
48285
+ }
48286
+ },
48287
+ onToolTokenUsage: (inputTokens, outputTokens) => {
48288
+ if (onSubagentTokenUsage) {
48289
+ onSubagentTokenUsage(subagentId, inputTokens, outputTokens);
48290
+ }
48291
+ }
48187
48292
  });
48188
48293
  const allMessages = [];
48189
48294
  let currentAssistantText = "";
@@ -39513,6 +39513,12 @@ var OPENROUTER_MODELS = [
39513
39513
  provider: "openrouter",
39514
39514
  contextLength: 64000
39515
39515
  },
39516
+ {
39517
+ id: "mistralai/mistral-large-2512",
39518
+ name: "Mistral Large 3 2512",
39519
+ provider: "openrouter",
39520
+ contextLength: 262144
39521
+ },
39516
39522
  {
39517
39523
  id: "moonshotai/kimi-k2-thinking",
39518
39524
  name: "Kimi K2 Thinking",
@@ -40114,11 +40120,38 @@ async function summarizeConversation(messages, opts, model) {
40114
40120
  content: `Summarize this conversation to pass to another agent. This was the system prompt: ${opts.system} `
40115
40121
  }
40116
40122
  ];
40117
- const { text: summary } = await generateText({
40123
+ const { text: summary, usage: summaryUsage } = await generateText({
40118
40124
  model,
40119
40125
  system: `You are a helpful assistant that summarizes conversations to pass to another agent. Review the conversation and system prompt at the end provided by the user.`,
40120
40126
  messages: summarizedMessages
40121
40127
  });
40128
+ if (opts.onStepFinish && summaryUsage) {
40129
+ opts.onStepFinish({
40130
+ text: "",
40131
+ reasoning: undefined,
40132
+ reasoningDetails: [],
40133
+ files: [],
40134
+ sources: [],
40135
+ toolCalls: [],
40136
+ toolResults: [],
40137
+ finishReason: "stop",
40138
+ usage: {
40139
+ inputTokens: summaryUsage.inputTokens ?? 0,
40140
+ outputTokens: summaryUsage.outputTokens ?? 0,
40141
+ totalTokens: summaryUsage.totalTokens ?? 0
40142
+ },
40143
+ warnings: [],
40144
+ request: {},
40145
+ response: {
40146
+ id: "summarization",
40147
+ timestamp: new Date,
40148
+ modelId: ""
40149
+ },
40150
+ providerMetadata: undefined,
40151
+ stepType: "initial",
40152
+ isContinued: false
40153
+ });
40154
+ }
40122
40155
  const originalLength = typeof opts.prompt === "string" ? opts.prompt.length : 0;
40123
40156
  const enhancedPrompt = originalLength > 1e5 ? `Context: The previous conversation contained very long content that was summarized.
40124
40157
 
@@ -40266,6 +40299,7 @@ function streamResponse(opts) {
40266
40299
  } = opts;
40267
40300
  const messagesContainer = { current: messages || [] };
40268
40301
  const providerModel = getProviderModel(model, authConfig);
40302
+ let rateLimitRetryCount = 0;
40269
40303
  try {
40270
40304
  const response = streamText({
40271
40305
  model: providerModel,
@@ -40279,6 +40313,16 @@ function streamResponse(opts) {
40279
40313
  messagesContainer.current = opts2.messages;
40280
40314
  return;
40281
40315
  },
40316
+ onError: async ({ error: error46 }) => {
40317
+ if (error46.message.toLowerCase().includes("too many tokens") || error46.message.toLowerCase().includes("overloaded")) {
40318
+ rateLimitRetryCount++;
40319
+ await new Promise((resolve2) => setTimeout(resolve2, 1000 * rateLimitRetryCount));
40320
+ if (rateLimitRetryCount < 20) {
40321
+ return;
40322
+ }
40323
+ }
40324
+ throw error46;
40325
+ },
40282
40326
  onStepFinish,
40283
40327
  abortSignal,
40284
40328
  activeTools,
@@ -40297,7 +40341,7 @@ function streamResponse(opts) {
40297
40341
  throw new Error(`Tool ${toolCall.toolName} not found or has no schema`);
40298
40342
  }
40299
40343
  const jsonSchema2 = inputSchema({ toolName: toolCall.toolName });
40300
- const { object: repairedArgs } = await generateObject({
40344
+ const { object: repairedArgs, usage: repairUsage } = await generateObject({
40301
40345
  model: providerModel,
40302
40346
  schema: tool2.inputSchema,
40303
40347
  prompt: [
@@ -40310,6 +40354,33 @@ function streamResponse(opts) {
40310
40354
  ].join(`
40311
40355
  `)
40312
40356
  });
40357
+ if (onStepFinish && repairUsage) {
40358
+ onStepFinish({
40359
+ text: "",
40360
+ reasoning: undefined,
40361
+ reasoningDetails: [],
40362
+ files: [],
40363
+ sources: [],
40364
+ toolCalls: [],
40365
+ toolResults: [],
40366
+ finishReason: "stop",
40367
+ usage: {
40368
+ inputTokens: repairUsage.inputTokens ?? 0,
40369
+ outputTokens: repairUsage.outputTokens ?? 0,
40370
+ totalTokens: repairUsage.totalTokens ?? 0
40371
+ },
40372
+ warnings: [],
40373
+ request: {},
40374
+ response: {
40375
+ id: "tool-repair",
40376
+ timestamp: new Date,
40377
+ modelId: ""
40378
+ },
40379
+ providerMetadata: undefined,
40380
+ stepType: "initial",
40381
+ isContinued: false
40382
+ });
40383
+ }
40313
40384
  return { ...toolCall, input: JSON.stringify(repairedArgs) };
40314
40385
  } catch (repairError) {
40315
40386
  if (!silent) {
@@ -40336,9 +40407,9 @@ function streamResponse(opts) {
40336
40407
  }
40337
40408
  }
40338
40409
  async function generateObjectResponse(opts) {
40339
- const { model, schema, prompt, system, maxTokens, temperature, authConfig } = opts;
40410
+ const { model, schema, prompt, system, maxTokens, temperature, authConfig, onTokenUsage } = opts;
40340
40411
  const providerModel = getProviderModel(model, authConfig);
40341
- const { object: object3 } = await generateObject({
40412
+ const { object: object3, usage } = await generateObject({
40342
40413
  model: providerModel,
40343
40414
  schema,
40344
40415
  prompt,
@@ -40346,6 +40417,9 @@ async function generateObjectResponse(opts) {
40346
40417
  maxTokens,
40347
40418
  temperature
40348
40419
  });
40420
+ if (onTokenUsage && usage) {
40421
+ onTokenUsage(usage.inputTokens ?? 0, usage.outputTokens ?? 0);
40422
+ }
40349
40423
  return object3;
40350
40424
  }
40351
40425
  // src/core/agent/pentestAgent/prompts.ts
@@ -43697,7 +43771,7 @@ Example workflow:
43697
43771
  execute: async (params) => recordTestResultCore(session, params)
43698
43772
  });
43699
43773
  }
43700
- async function generateTestStrategy(params, model) {
43774
+ async function generateTestStrategy(params, model, onTokenUsage) {
43701
43775
  const prompt = `You are a penetration testing expert. Generate a concise testing strategy:
43702
43776
 
43703
43777
  Attack Type: ${params.knowledge.name}
@@ -43723,12 +43797,15 @@ Be tactical and specific.`;
43723
43797
  model: providerModel,
43724
43798
  prompt
43725
43799
  });
43800
+ if (onTokenUsage && result.usage) {
43801
+ onTokenUsage(result.usage.inputTokens ?? 0, result.usage.outputTokens ?? 0);
43802
+ }
43726
43803
  return result.text;
43727
43804
  } catch (error46) {
43728
43805
  return params.knowledge.adaptiveStrategy;
43729
43806
  }
43730
43807
  }
43731
- async function generatePayload(params, model) {
43808
+ async function generatePayload(params, model, onTokenUsage) {
43732
43809
  const prompt = `Generate ONE ${params.knowledge.name} payload for testing.
43733
43810
 
43734
43811
  Techniques:
@@ -43752,7 +43829,8 @@ Generate ONE specific payload. Return ONLY JSON:
43752
43829
  const result = await generateObjectResponse({
43753
43830
  model,
43754
43831
  schema: PayloadSchema,
43755
- prompt
43832
+ prompt,
43833
+ onTokenUsage
43756
43834
  });
43757
43835
  return result;
43758
43836
  } catch (error46) {
@@ -43765,7 +43843,7 @@ Generate ONE specific payload. Return ONLY JSON:
43765
43843
  technique: technique.name
43766
43844
  };
43767
43845
  }
43768
- async function analyzeResponse(params, model) {
43846
+ async function analyzeResponse(params, model, onTokenUsage) {
43769
43847
  const prompt = `Analyze this security test response:
43770
43848
 
43771
43849
  Attack: ${params.knowledge.name}
@@ -43793,7 +43871,8 @@ Analyze: Is this vulnerable? Return ONLY JSON:
43793
43871
  const result = await generateObjectResponse({
43794
43872
  model,
43795
43873
  schema: AnalysisSchema,
43796
- prompt
43874
+ prompt,
43875
+ onTokenUsage
43797
43876
  });
43798
43877
  return result;
43799
43878
  } catch (error46) {
@@ -43812,7 +43891,7 @@ Analyze: Is this vulnerable? Return ONLY JSON:
43812
43891
  suggestedNextTest: "Try alternative payload or technique"
43813
43892
  };
43814
43893
  }
43815
- function createSmartTestTool(session, model) {
43894
+ function createSmartTestTool(session, model, onTokenUsage) {
43816
43895
  return tool({
43817
43896
  name: "test_parameter",
43818
43897
  description: `Intelligently test a parameter for a vulnerability using AI-powered adaptive testing.
@@ -43882,7 +43961,7 @@ test_parameter({
43882
43961
  parameter,
43883
43962
  endpoint,
43884
43963
  context
43885
- }, model);
43964
+ }, model, onTokenUsage);
43886
43965
  console.log(`Strategy: ${strategy}`);
43887
43966
  const results = [];
43888
43967
  let vulnerable = false;
@@ -43895,7 +43974,7 @@ test_parameter({
43895
43974
  context: { ...context, parameter, endpoint },
43896
43975
  previousResults: results,
43897
43976
  round
43898
- }, model);
43977
+ }, model, onTokenUsage);
43899
43978
  console.log(` Payload: ${payloadData.payload}`);
43900
43979
  console.log(` Reasoning: ${payloadData.reasoning}`);
43901
43980
  let response;
@@ -43924,7 +44003,7 @@ test_parameter({
43924
44003
  attackType,
43925
44004
  knowledge,
43926
44005
  previousResults: results
43927
- }, model);
44006
+ }, model, onTokenUsage);
43928
44007
  console.log(` Analysis: ${analysis.reasoning}`);
43929
44008
  console.log(` Vulnerable: ${analysis.vulnerable} (confidence: ${analysis.confidence})`);
43930
44009
  results.push({
@@ -44374,7 +44453,7 @@ function wrapCommandWithHeaders(command, headers) {
44374
44453
  }
44375
44454
  return wrapped;
44376
44455
  }
44377
- function createPentestTools(session, model, toolOverride) {
44456
+ function createPentestTools(session, model, toolOverride, onTokenUsage) {
44378
44457
  const offensiveHeaders = getOffensiveHeaders(session);
44379
44458
  const rateLimiter = session._rateLimiter;
44380
44459
  const executeCommand = tool({
@@ -44548,7 +44627,7 @@ COMMON TESTING PATTERNS:
44548
44627
  http_request: httpRequest,
44549
44628
  document_finding: createDocumentFindingTool(session),
44550
44629
  record_test_result: createRecordTestResultTool(session),
44551
- test_parameter: createSmartTestTool(session, model || "claude-sonnet-4-20250514"),
44630
+ test_parameter: createSmartTestTool(session, model || "claude-sonnet-4-20250514", onTokenUsage),
44552
44631
  check_testing_coverage: createCheckTestingCoverageTool(session),
44553
44632
  validate_completeness: createValidateCompletenessTool(session),
44554
44633
  enumerate_endpoints: createEnumerateEndpointsTool(session),
@@ -45600,6 +45679,7 @@ function runAgent(opts) {
45600
45679
  objective,
45601
45680
  model,
45602
45681
  onStepFinish,
45682
+ onToolTokenUsage,
45603
45683
  abortSignal,
45604
45684
  silent,
45605
45685
  authConfig,
@@ -45619,7 +45699,7 @@ function runAgent(opts) {
45619
45699
  analyze_scan,
45620
45700
  scratchpad,
45621
45701
  generate_report
45622
- } = createPentestTools(session, undefined, toolOverride);
45702
+ } = createPentestTools(session, undefined, toolOverride, onToolTokenUsage);
45623
45703
  const document_finding = tool({
45624
45704
  name: "document_finding",
45625
45705
  description: `Document a security finding with severity, impact, and remediation guidance.