@pensar/apex 1.0.0 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/assets/wordlists/LICENSE +21 -0
  2. package/assets/wordlists/README.md +36 -0
  3. package/assets/wordlists/common.txt +4751 -0
  4. package/assets/wordlists/large.txt +29999 -0
  5. package/assets/wordlists/tiny.txt +227 -0
  6. package/build/agent-0jmzw6zx.js +18 -0
  7. package/build/{agent-95ysppvr.js → agent-wmynfx37.js} +10 -9
  8. package/build/{auth-h84w23gx.js → auth-p2n15nfp.js} +4 -4
  9. package/build/{authentication-3m2qm7ym.js → authentication-ngxxzcvc.js} +11 -11
  10. package/build/blackboxAgent-v698p7e4.js +18 -0
  11. package/build/{blackboxPentest-kn3y84jf.js → blackboxPentest-7jvcbz3f.js} +15 -15
  12. package/build/{cli-r8cerdwk.js → cli-03z6pswp.js} +1 -1
  13. package/build/{cli-abkgxjcc.js → cli-06q6sz4x.js} +2 -2
  14. package/build/{cli-dfth2beg.js → cli-1tv4x6xh.js} +8 -8
  15. package/build/{cli-ey40xb9a.js → cli-1xdc0keq.js} +1 -1
  16. package/build/{cli-qcsv2e9h.js → cli-5xfjvm8j.js} +1 -1
  17. package/build/{cli-wqeja2k6.js → cli-6negm843.js} +1 -1
  18. package/build/{cli-x5t1x7ts.js → cli-ahmpjgg6.js} +1 -1
  19. package/build/{cli-j4qm285k.js → cli-ch1yfrj1.js} +2 -2
  20. package/build/{cli-dsybj1jp.js → cli-d5mcmzp1.js} +1 -1
  21. package/build/{cli-etrmgpa5.js → cli-gs7zy230.js} +5 -5
  22. package/build/{cli-awjwsbrz.js → cli-mazg4ajq.js} +2 -2
  23. package/build/{cli-mxj8tz9b.js → cli-nwcvgx5m.js} +51 -14
  24. package/build/{cli-r4jzb7aj.js → cli-st6vsbzv.js} +2 -1
  25. package/build/{cli-qvq41y3z.js → cli-t1nkahx2.js} +1 -1
  26. package/build/cli-tp1tqn3k.js +184 -0
  27. package/build/{cli-yz3dzpxd.js → cli-tyrzasca.js} +351 -104
  28. package/build/{cli-sgzbqavm.js → cli-yvnb3k0x.js} +1 -1
  29. package/build/cli.js +28 -27
  30. package/build/{doctor-b7612pzw.js → doctor-8tva8j99.js} +1 -1
  31. package/build/{fixes-5f9xv4yx.js → fixes-p4e3bjcg.js} +4 -4
  32. package/build/{index-b7e18f8m.js → index-esgrht7q.js} +7 -7
  33. package/build/{index-d86fgcjm.js → index-gpvx8y17.js} +4 -4
  34. package/build/{index-h5r11f3q.js → index-m6gw4113.js} +2 -2
  35. package/build/{index-0qfckx3a.js → index-ywrq8mhc.js} +1555 -1449
  36. package/build/{issues-dy4rrtr2.js → issues-8had86x1.js} +4 -4
  37. package/build/{logs-d3sjx7vk.js → logs-3fpd8xq8.js} +4 -4
  38. package/build/pentest-4ty38pt8.js +28 -0
  39. package/build/{pentests-vwekf0zm.js → pentests-tqv6zrqy.js} +4 -4
  40. package/build/{projects-se9jgfb0.js → projects-eh2g7061.js} +4 -4
  41. package/build/{targetedPentest-mhkts702.js → targetedPentest-60td56me.js} +10 -9
  42. package/build/{threatModel-gdvgv7pc.js → threatModel-v46xygtn.js} +10 -9
  43. package/build/{uninstall-11a98j87.js → uninstall-vz6jzt86.js} +1 -1
  44. package/build/{utils-zvr7bcyw.js → utils-8yqe12jr.js} +1 -1
  45. package/package.json +2 -1
  46. package/build/agent-e3r90w2x.js +0 -17
  47. package/build/blackboxAgent-4t68wah3.js +0 -18
  48. package/build/cli-6gtnyaqf.js +0 -109
  49. package/build/pentest-6ctf263k.js +0 -28
@@ -14,33 +14,33 @@ import {
14
14
  updateManifestEntryStatus,
15
15
  writeAgentManifest,
16
16
  writeExecutionMetrics
17
- } from "./cli-abkgxjcc.js";
17
+ } from "./cli-06q6sz4x.js";
18
18
  import {
19
19
  TargetedPentestAgent,
20
20
  buildPentestSystemPrompt
21
- } from "./cli-j4qm285k.js";
21
+ } from "./cli-ch1yfrj1.js";
22
22
  import {
23
23
  BlackboxAttackSurfaceAgent
24
- } from "./cli-etrmgpa5.js";
24
+ } from "./cli-gs7zy230.js";
25
25
  import {
26
26
  createThreatModelPrompt
27
27
  } from "./cli-fw5r7pfj.js";
28
28
  import {
29
29
  CodeAgent
30
- } from "./cli-awjwsbrz.js";
30
+ } from "./cli-mazg4ajq.js";
31
31
  import {
32
32
  EndpointSchema
33
- } from "./cli-ey40xb9a.js";
33
+ } from "./cli-1xdc0keq.js";
34
34
  import {
35
35
  FindingsRegistry,
36
36
  OffensiveSecurityAgent,
37
37
  PLAN_MODE_TOOL_NAMES
38
- } from "./cli-yz3dzpxd.js";
38
+ } from "./cli-tyrzasca.js";
39
39
  import {
40
40
  exports_external,
41
41
  hasToolCall,
42
42
  init_zod
43
- } from "./cli-dfth2beg.js";
43
+ } from "./cli-1tv4x6xh.js";
44
44
 
45
45
  // src/core/workflows/pentest.ts
46
46
  import { existsSync as existsSync3, readdirSync as readdirSync2, readFileSync as readFileSync3, writeFileSync as writeFileSync2 } from "fs";
@@ -150,6 +150,16 @@ Use this to document each application/service you identify. Persists a JSON reco
150
150
  ## document_endpoint
151
151
  **This is your primary output tool for endpoints.** Use it to document every endpoint you discover. Each call persists a JSON record to the session's endpoints directory, organized by app.
152
152
 
153
+ **HARD RULE — call this tool DIRECTLY, one route at a time.** The moment you have enough information about a route to document it, your very next tool call must be \`document_endpoint\` for that route. Do not defer. Do not batch. Do not collect routes into a list to "process later."
154
+
155
+ **You MUST NOT, under any circumstances:**
156
+ - Build a manifest, JSON file, list, or array of routes to document later (e.g. \`cat > /tmp/pages.json << EOF [...] EOF\`).
157
+ - Write a shell, Python, or any other script whose purpose is to generate \`document_endpoint\` tool calls.
158
+ - Use a single message to "summarize all the routes I'll document" before documenting them.
159
+ - Stop documentation early because you "have enough" or it's "getting repetitive." If you discovered N routes, you must produce N \`document_endpoint\` calls.
160
+
161
+ These patterns silently truncate at output-token limits and routes get dropped. The only correct workflow is: discover a route → call \`document_endpoint\` for it → discover the next route → call \`document_endpoint\` for it → ... until every route is documented. Repetition is expected and required.
162
+
153
163
  **CRITICAL — endpoint documentation rules:**
154
164
  - **One entry per unique route path.** Do NOT create separate entries for different HTTP methods on the same path. If \`/api/users\` supports GET, POST, and DELETE, that is ONE entry with \`method: ["GET", "POST", "DELETE"]\`.
155
165
  - **Use \`method: "PAGE"\`** for web pages and views (non-API routes).
@@ -168,9 +178,9 @@ When your objective includes structured output, call \`response\` with your fina
168
178
  1. **Orient first** — list files and read key entry points to understand the structure.
169
179
  2. **Ignore submodules** — check for a \`.gitmodules\` file or run \`git submodule status\`. Any directories that are git submodules are external dependencies and must be **completely excluded** from your analysis.
170
180
  3. **Search, then read** — use grep to locate what you need, then read the relevant files.
171
- 4. **Document as you go** — call document_app for apps and document_endpoint for every endpoint you discover. Don't batch them up.
181
+ 4. **Document each item the instant you discover it** — every \`document_app\` / \`document_endpoint\` call must be made directly, one item per call, immediately after you identify it. Never collect items into a manifest, JSON file, or batch script. If you find yourself thinking "let me list all of these and then document them," stop — that pattern silently drops items when output tokens run out.
172
182
  5. **Follow the trail** — trace through imports, function calls, and references to build full understanding.
173
- 6. **Be thorough** — don't stop at the first match. Cover everything relevant to the objective.
183
+ 6. **Be thorough** — don't stop at the first match. Cover everything relevant to the objective. Repetitive \`document_endpoint\` calls are expected; do not summarize, deduplicate, or shortcut them.
174
184
  `;
175
185
  var AppInfoSchema = exports_external.object({
176
186
  name: exports_external.string().describe("Application or service name"),
@@ -608,8 +618,20 @@ For each page, call \`document_endpoint\` with:
608
618
  - **authRequired**: Whether the page requires authentication
609
619
  - **riskLevel**: CRITICAL for admin/auth pages, HIGH for user data, MEDIUM for general, LOW for static/public
610
620
 
611
- Be thoroughexamine every route file, every page directory, every template **within \`${appInfo.location}\`**.
612
- When finished, call \`response\` with a summary of how many pages you documented.`;
621
+ ### Required workflow NO MANIFESTS, NO BATCHING
622
+ **You MUST call \`document_endpoint\` directly, one page at a time, the moment you identify a route.** It is a hard error to:
623
+ - Build a JSON file, array, or list of pages-to-document and then "process" it (e.g. \`cat > /tmp/pages.json << EOF [...] EOF\`).
624
+ - Write a Python or shell script that emits \`document_endpoint\` calls.
625
+ - Defer documentation until "the end" or until "you have the full picture."
626
+ - Stop early because the calls feel repetitive or because you've documented "the important ones."
627
+
628
+ These patterns hit per-message output-token limits and silently drop pages — usually the alphabetically-later ones. The only correct loop is: identify route → call \`document_endpoint\` → identify next route → call \`document_endpoint\` → ...
629
+
630
+ You may use \`list_files\`, \`grep\`, or \`execute_command\` (e.g. \`find ... -name page.tsx\`) to **enumerate** the routes that exist. That enumeration step is fine and encouraged. What is not allowed is using a script to **emit the documentation calls themselves** — those must come directly from you, one tool call per route.
631
+
632
+ Be thorough — examine every route file, every page directory, every template **within \`${appInfo.location}\`**. Every page surfaced by your enumeration must result in its own \`document_endpoint\` call. Repetitive calls are expected; do not summarize, deduplicate to "interesting" routes, or skip any.
633
+
634
+ When finished, call \`response\` with a summary of how many pages you documented. The reported count must equal the number of successful \`document_endpoint\` calls you made.`;
613
635
  }
614
636
  function buildApiEndpointsDiscoveryObjective(codebasePath, appInfo) {
615
637
  return `# Find All API Endpoints in ${appInfo.name}
@@ -657,8 +679,20 @@ For each **unique route path**, call \`document_endpoint\` with:
657
679
 
658
680
  **IMPORTANT — Method consolidation for document_endpoint:** When using the \`document_endpoint\` tool, do NOT create separate entries for different HTTP methods on the same route path. For example, if \`/api/users\` supports GET, POST, and DELETE, document it as ONE entry with \`method: ["GET", "POST", "DELETE"]\` and include pentest objectives covering all methods.
659
681
 
660
- Be thoroughtrace through all route registrations, middleware chains, and controller files **within \`${appInfo.location}\`**.
661
- When finished, call \`response\` with a summary of how many endpoints you documented.`;
682
+ ### Required workflow NO MANIFESTS, NO BATCHING
683
+ **You MUST call \`document_endpoint\` directly, one route at a time, the moment you identify it.** It is a hard error to:
684
+ - Build a JSON file, array, or list of endpoints-to-document and then "process" it (e.g. \`cat > /tmp/endpoints.json << EOF [...] EOF\`).
685
+ - Write a Python or shell script that emits \`document_endpoint\` calls.
686
+ - Defer documentation until you've "mapped everything out."
687
+ - Stop early because the calls feel repetitive or because you've covered "the important ones."
688
+
689
+ These patterns hit per-message output-token limits and silently drop endpoints — usually the alphabetically-later ones. The only correct loop is: identify route → call \`document_endpoint\` → identify next route → call \`document_endpoint\` → ...
690
+
691
+ You may use \`list_files\`, \`grep\`, or \`execute_command\` to **enumerate** routes (e.g. extracting all route registrations into a list to read). That enumeration step is fine. What is not allowed is using a script to **emit the documentation calls themselves** — those must come directly from you, one tool call per unique route path.
692
+
693
+ Be thorough — trace through all route registrations, middleware chains, and controller files **within \`${appInfo.location}\`**. Every unique route path your enumeration surfaces must result in its own \`document_endpoint\` call.
694
+
695
+ When finished, call \`response\` with a summary of how many endpoints you documented. The reported count must equal the number of successful \`document_endpoint\` calls you made.`;
662
696
  }
663
697
  function buildCloudResourceEndpointsObjective(codebasePath, appInfo, environments) {
664
698
  const envNote = environments?.length ? `
@@ -717,7 +751,10 @@ For each entry point, call \`document_endpoint\` with:
717
751
  - **authRequired**: Whether external access requires authentication
718
752
  - **riskLevel**: CRITICAL for publicly accessible storage with write access or sensitive data, HIGH for resources with broad IAM permissions, MEDIUM for internal resources, LOW for read-only public assets
719
753
 
720
- When finished, call \`response\` with a summary of how many entry points you documented.`;
754
+ ### Required workflow NO MANIFESTS, NO BATCHING
755
+ **You MUST call \`document_endpoint\` directly, one entry point at a time, the moment you identify it.** Do not build a JSON file or list of entry points to "process later," and do not write a script that emits \`document_endpoint\` calls. Those patterns hit per-message output-token limits and silently drop entries. The only correct loop is: identify entry point → call \`document_endpoint\` → identify next → call \`document_endpoint\` → ...
756
+
757
+ When finished, call \`response\` with a summary of how many entry points you documented. The reported count must equal the number of successful \`document_endpoint\` calls you made.`;
721
758
  }
722
759
 
723
760
  // src/core/agents/specialized/pentest/planPrompt.ts
@@ -3,7 +3,7 @@ import { spawnSync } from "child_process";
3
3
  // package.json
4
4
  var package_default = {
5
5
  name: "@pensar/apex",
6
- version: "1.0.0",
6
+ version: "1.1.0",
7
7
  description: "AI-powered penetration testing CLI tool with terminal UI",
8
8
  module: "src/tui/index.tsx",
9
9
  main: "build/cli.js",
@@ -18,6 +18,7 @@ var package_default = {
18
18
  files: [
19
19
  "build",
20
20
  "bin",
21
+ "assets",
21
22
  "pensar.svg",
22
23
  "LICENSE"
23
24
  ],
@@ -3,7 +3,7 @@ import {
3
3
  ensureValidToken,
4
4
  getPensarApiUrl,
5
5
  getPensarGatewayUrl
6
- } from "./cli-qcsv2e9h.js";
6
+ } from "./cli-5xfjvm8j.js";
7
7
 
8
8
  // src/core/auth/signing.ts
9
9
  import { createHmac, createHash, randomUUID } from "crypto";
@@ -0,0 +1,184 @@
1
+ // src/core/agents/specialized/utils.ts
2
+ import { readFileSync as readFileSync2, existsSync as existsSync2 } from "fs";
3
+ import { execSync } from "child_process";
4
+
5
+ // src/core/assets/wordlists.ts
6
+ import { existsSync, readFileSync, statSync } from "fs";
7
+ import { dirname, join, resolve } from "path";
8
+ import { fileURLToPath } from "url";
9
+ var RELATIVE = {
10
+ tiny: "assets/wordlists/tiny.txt",
11
+ common: "assets/wordlists/common.txt",
12
+ large: "assets/wordlists/large.txt"
13
+ };
14
+ var cached;
15
+ function findPackageRoot(start) {
16
+ let dir = start;
17
+ while (true) {
18
+ const pkgPath = join(dir, "package.json");
19
+ if (existsSync(pkgPath)) {
20
+ try {
21
+ const pkg = JSON.parse(readFileSync(pkgPath, "utf8"));
22
+ if (pkg.name === "@pensar/apex" && existsSync(join(dir, "assets", "wordlists"))) {
23
+ return dir;
24
+ }
25
+ } catch {}
26
+ }
27
+ const parent = dirname(dir);
28
+ if (parent === dir)
29
+ return null;
30
+ dir = parent;
31
+ }
32
+ }
33
+ function getBundledWordlists() {
34
+ if (cached !== undefined)
35
+ return cached;
36
+ const here = dirname(fileURLToPath(import.meta.url));
37
+ const root = findPackageRoot(here);
38
+ if (root === null) {
39
+ cached = null;
40
+ return cached;
41
+ }
42
+ const paths = {
43
+ tiny: resolve(root, RELATIVE.tiny),
44
+ common: resolve(root, RELATIVE.common),
45
+ large: resolve(root, RELATIVE.large)
46
+ };
47
+ for (const p of Object.values(paths)) {
48
+ if (!existsSync(p) || statSync(p).size === 0) {
49
+ cached = null;
50
+ return cached;
51
+ }
52
+ }
53
+ cached = paths;
54
+ return cached;
55
+ }
56
+
57
+ // src/core/agents/specialized/utils.ts
58
+ function readOsRelease() {
59
+ try {
60
+ const content = readFileSync2("/etc/os-release", "utf8");
61
+ const lines = content.split(/\r?\n/);
62
+ const map = {};
63
+ for (const line of lines) {
64
+ const idx = line.indexOf("=");
65
+ if (idx === -1)
66
+ continue;
67
+ const key = line.slice(0, idx);
68
+ let value = line.slice(idx + 1);
69
+ if (value.startsWith('"') && value.endsWith('"')) {
70
+ value = value.slice(1, -1);
71
+ }
72
+ map[key] = value;
73
+ }
74
+ return map;
75
+ } catch {
76
+ return {};
77
+ }
78
+ }
79
+ function detectDocker() {
80
+ try {
81
+ if (existsSync2("/.dockerenv"))
82
+ return true;
83
+ } catch {}
84
+ try {
85
+ const cgroup = readFileSync2("/proc/1/cgroup", "utf8");
86
+ if (/docker|containerd|kubepods/i.test(cgroup))
87
+ return true;
88
+ } catch {}
89
+ return false;
90
+ }
91
+ function toolExists(commandName) {
92
+ try {
93
+ execSync(`command -v ${commandName} >/dev/null 2>&1`, {
94
+ stdio: "ignore",
95
+ shell: "/bin/bash"
96
+ });
97
+ return true;
98
+ } catch {
99
+ try {
100
+ execSync(`which ${commandName} >/dev/null 2>&1`, {
101
+ stdio: "ignore",
102
+ shell: "/bin/bash"
103
+ });
104
+ return true;
105
+ } catch {
106
+ return false;
107
+ }
108
+ }
109
+ }
110
+ function detectEnvironment() {
111
+ const osRelease = readOsRelease();
112
+ const prettyName = osRelease["PRETTY_NAME"];
113
+ const id = osRelease["ID"]?.toLowerCase();
114
+ const idLike = osRelease["ID_LIKE"];
115
+ const isKali = Boolean(id && /kali/.test(id) || prettyName && /kali/i.test(prettyName));
116
+ const isDocker = detectDocker();
117
+ const toolsToCheck = [
118
+ "nmap",
119
+ "gobuster",
120
+ "sqlmap",
121
+ "nikto",
122
+ "hydra",
123
+ "john",
124
+ "hashcat",
125
+ "tcpdump",
126
+ "tshark",
127
+ "nc",
128
+ "socat",
129
+ "curl",
130
+ "wget",
131
+ "git"
132
+ ];
133
+ const availableTools = [];
134
+ const missingTools = [];
135
+ for (const tool of toolsToCheck) {
136
+ (toolExists(tool) ? availableTools : missingTools).push(tool);
137
+ }
138
+ return { isDocker, isKali, prettyName, idLike, availableTools, missingTools };
139
+ }
140
+ function buildBundledAssetsBlock() {
141
+ const wordlists = getBundledWordlists();
142
+ if (wordlists === null)
143
+ return null;
144
+ return `[BUNDLED ASSETS]
145
+ This block is your authoritative inventory of wordlist assets shipped with the CLI. When the user asks what wordlists / assets / capabilities you have, answer directly from the entries below — do NOT probe the filesystem (\`ls /usr/share/wordlists\`, \`which gobuster\`, \`find / -name wordlists\`, etc.). Those paths are not where these live; the inventory is here.
146
+
147
+ TINY_WORDLIST=${wordlists.tiny} (~200 entries — smoke checks / time-pressured runs)
148
+ DEFAULT_WORDLIST=${wordlists.common} (~4.7k entries — normal recon, the default)
149
+ LARGE_WORDLIST=${wordlists.large} (~30k entries — escalation only)
150
+
151
+ These paths can be passed as \`-w\` arguments to gobuster/ffuf/dirb/wfuzz/dirsearch, OR iterated line-by-line in shell loops and \`http_request\` scripts. Their presence is NOT a reason to run a wordlist-based tool; choose techniques based on the task.
152
+
153
+ If you do invoke a wordlist-based tool: default to DEFAULT_WORDLIST. Use TINY only under explicit time pressure or for a first-pass smoke probe. Use LARGE only after DEFAULT finishes and the target still looks under-mapped, or when the user explicitly asked for a deeper scan. Do NOT chain tiers automatically. Do NOT assume /usr/share/wordlists/* exists — it is missing on macOS, Alpine, most Docker images, and CI.
154
+ [/BUNDLED ASSETS]`;
155
+ }
156
+ function detectOSAndEnhancePrompt(prompt) {
157
+ try {
158
+ const env = detectEnvironment();
159
+ const lines = [];
160
+ lines.push("[ENV CONTEXT]");
161
+ lines.push(`OS: ${env.prettyName ?? process.platform} | InDocker: ${env.isDocker ? "yes" : "no"} | Kali: ${env.isKali ? "yes" : "no"}`);
162
+ if (env.availableTools.length > 0) {
163
+ lines.push(`Tools available: ${env.availableTools.sort().join(", ")}`);
164
+ }
165
+ if (env.missingTools.length > 0) {
166
+ lines.push(`Tools missing: ${env.missingTools.sort().join(", ")}`);
167
+ }
168
+ lines.push("[/ENV CONTEXT]");
169
+ const bundledAssetsBlock = buildBundledAssetsBlock();
170
+ if (bundledAssetsBlock !== null) {
171
+ lines.push("");
172
+ lines.push(bundledAssetsBlock);
173
+ }
174
+ lines.push("");
175
+ return `${lines.join(`
176
+ `)}
177
+ ${prompt}`;
178
+ } catch (error) {
179
+ console.error("Error detecting environment:", error);
180
+ return prompt;
181
+ }
182
+ }
183
+
184
+ export { toolExists, detectOSAndEnhancePrompt };