pentesting 0.2.2 → 0.2.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +14 -17
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -1374,15 +1374,12 @@ const { chromium } = require('playwright');
|
|
|
1374
1374
|
}
|
|
1375
1375
|
|
|
1376
1376
|
// src/config/constants.ts
|
|
1377
|
-
var APP_VERSION = "0.2.
|
|
1377
|
+
var APP_VERSION = "0.2.3";
|
|
1378
1378
|
var APP_DESCRIPTION = "Autonomous Penetration Testing AI Agent";
|
|
1379
1379
|
var LLM_API_KEY = process.env.PENTEST_API_KEY || process.env.ANTHROPIC_API_KEY || "";
|
|
1380
|
-
var LLM_BASE_URL = process.env.PENTEST_BASE_URL ||
|
|
1380
|
+
var LLM_BASE_URL = process.env.PENTEST_BASE_URL || void 0;
|
|
1381
1381
|
var LLM_MODEL = process.env.PENTEST_MODEL || "claude-sonnet-4-20250514";
|
|
1382
1382
|
var LLM_MAX_TOKENS = parseInt(process.env.PENTEST_MAX_TOKENS || "16384", 10);
|
|
1383
|
-
var CLAUDE_MODEL = LLM_MODEL;
|
|
1384
|
-
var CLAUDE_MAX_TOKENS = LLM_MAX_TOKENS;
|
|
1385
|
-
var ANTHROPIC_BASE_URL = LLM_BASE_URL;
|
|
1386
1383
|
var AGENT_CONFIG = {
|
|
1387
1384
|
maxIterations: 200,
|
|
1388
1385
|
maxToolCallsPerIteration: 10,
|
|
@@ -1889,8 +1886,8 @@ async function compactHistory(client, messages, keepRecent = 4) {
|
|
|
1889
1886
|
return `[${msg.role.toUpperCase()}]: ${content}`;
|
|
1890
1887
|
}).join("\n\n");
|
|
1891
1888
|
const response = await client.messages.create({
|
|
1892
|
-
model:
|
|
1893
|
-
max_tokens:
|
|
1889
|
+
model: LLM_MODEL,
|
|
1890
|
+
max_tokens: LLM_MAX_TOKENS,
|
|
1894
1891
|
system: COMPACTION_PROMPT,
|
|
1895
1892
|
messages: [{
|
|
1896
1893
|
role: "user",
|
|
@@ -2983,8 +2980,8 @@ var AutonomousHackingAgent = class extends EventEmitter3 {
|
|
|
2983
2980
|
constructor(apiKey, config) {
|
|
2984
2981
|
super();
|
|
2985
2982
|
this.client = new Anthropic({
|
|
2986
|
-
apiKey: apiKey || LLM_API_KEY || process.env.
|
|
2987
|
-
baseURL:
|
|
2983
|
+
apiKey: apiKey || LLM_API_KEY || process.env.PENTEST_API_KEY,
|
|
2984
|
+
baseURL: LLM_BASE_URL
|
|
2988
2985
|
});
|
|
2989
2986
|
this.config = { ...AGENT_CONFIG, ...config };
|
|
2990
2987
|
this.tools = ALL_TOOLS;
|
|
@@ -3238,7 +3235,7 @@ Current situation:
|
|
|
3238
3235
|
What went wrong and what different approach should be tried?
|
|
3239
3236
|
`;
|
|
3240
3237
|
const response = await this.client.messages.create({
|
|
3241
|
-
model:
|
|
3238
|
+
model: LLM_MODEL,
|
|
3242
3239
|
max_tokens: 4096,
|
|
3243
3240
|
messages: [{ role: "user", content: reflectionPrompt }]
|
|
3244
3241
|
});
|
|
@@ -3386,8 +3383,8 @@ Goal: Deep penetration to obtain root/system privileges, extract internal data,
|
|
|
3386
3383
|
}
|
|
3387
3384
|
const response = await withRetry(
|
|
3388
3385
|
() => this.client.messages.create({
|
|
3389
|
-
model:
|
|
3390
|
-
max_tokens:
|
|
3386
|
+
model: LLM_MODEL,
|
|
3387
|
+
max_tokens: LLM_MAX_TOKENS,
|
|
3391
3388
|
system: systemPrompt,
|
|
3392
3389
|
tools: this.tools,
|
|
3393
3390
|
messages
|
|
@@ -3674,8 +3671,8 @@ ${this.state.findings.filter((f) => f.severity !== "info").map((f) => `- Address
|
|
|
3674
3671
|
try {
|
|
3675
3672
|
const systemPrompt = this.buildContextualPrompt();
|
|
3676
3673
|
const response = await this.client.messages.create({
|
|
3677
|
-
model:
|
|
3678
|
-
max_tokens:
|
|
3674
|
+
model: LLM_MODEL,
|
|
3675
|
+
max_tokens: LLM_MAX_TOKENS,
|
|
3679
3676
|
system: systemPrompt,
|
|
3680
3677
|
messages: this.state.history,
|
|
3681
3678
|
tools: this.tools
|
|
@@ -3707,8 +3704,8 @@ ${this.state.findings.filter((f) => f.severity !== "info").map((f) => `- Address
|
|
|
3707
3704
|
}
|
|
3708
3705
|
if (hasToolCalls && response.stop_reason === "tool_use") {
|
|
3709
3706
|
const followUp = await this.client.messages.create({
|
|
3710
|
-
model:
|
|
3711
|
-
max_tokens:
|
|
3707
|
+
model: LLM_MODEL,
|
|
3708
|
+
max_tokens: LLM_MAX_TOKENS,
|
|
3712
3709
|
system: systemPrompt,
|
|
3713
3710
|
messages: this.state.history,
|
|
3714
3711
|
tools: this.tools
|
|
@@ -4765,7 +4762,7 @@ ${chalk.hex(THEME.status.warning)("Examples:")}
|
|
|
4765
4762
|
|
|
4766
4763
|
${chalk.hex(THEME.status.warning)("Environment:")}
|
|
4767
4764
|
|
|
4768
|
-
${chalk.hex(THEME.text.accent)("
|
|
4765
|
+
${chalk.hex(THEME.text.accent)("PENTEST_API_KEY")} Required - LLM API key
|
|
4769
4766
|
${chalk.hex(THEME.text.accent)("PENTEST_MODEL")} Optional - Model override
|
|
4770
4767
|
|
|
4771
4768
|
${chalk.hex(THEME.text.muted)("For ethical hacking and authorized testing only.")}
|