agileflow 3.4.2 → 3.4.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/CHANGELOG.md +5 -0
  2. package/README.md +2 -2
  3. package/lib/drivers/claude-driver.ts +1 -1
  4. package/lib/lazy-require.js +1 -1
  5. package/package.json +1 -1
  6. package/scripts/agent-loop.js +290 -230
  7. package/scripts/check-sessions.js +116 -0
  8. package/scripts/lib/quality-gates.js +35 -8
  9. package/scripts/lib/signal-detectors.js +0 -13
  10. package/scripts/lib/team-events.js +1 -1
  11. package/scripts/lib/tmux-audit-monitor.js +2 -1
  12. package/src/core/commands/ads/audit.md +19 -3
  13. package/src/core/commands/code/accessibility.md +22 -6
  14. package/src/core/commands/code/api.md +22 -6
  15. package/src/core/commands/code/architecture.md +22 -6
  16. package/src/core/commands/code/completeness.md +22 -6
  17. package/src/core/commands/code/legal.md +22 -6
  18. package/src/core/commands/code/logic.md +22 -6
  19. package/src/core/commands/code/performance.md +22 -6
  20. package/src/core/commands/code/security.md +22 -6
  21. package/src/core/commands/code/test.md +22 -6
  22. package/src/core/commands/ideate/features.md +5 -4
  23. package/src/core/commands/ideate/new.md +8 -7
  24. package/src/core/commands/seo/audit.md +21 -5
  25. package/lib/claude-cli-bridge.js +0 -215
  26. package/lib/dashboard-automations.js +0 -130
  27. package/lib/dashboard-git.js +0 -254
  28. package/lib/dashboard-inbox.js +0 -64
  29. package/lib/dashboard-protocol.js +0 -605
  30. package/lib/dashboard-server.js +0 -1296
  31. package/lib/dashboard-session.js +0 -136
  32. package/lib/dashboard-status.js +0 -72
  33. package/lib/dashboard-terminal.js +0 -354
  34. package/lib/dashboard-websocket.js +0 -88
  35. package/scripts/dashboard-serve.js +0 -336
  36. package/src/core/commands/serve.md +0 -127
  37. package/tools/cli/commands/serve.js +0 -492
@@ -0,0 +1,116 @@
1
+ #!/usr/bin/env node
2
+
3
+ /**
4
+ * check-sessions.js - Top-level entry point for monitoring tmux audit sessions
5
+ *
6
+ * Thin wrapper around lib/tmux-audit-monitor.js for easy discovery and calling.
7
+ *
8
+ * Subcommands:
9
+ * list - List all active audit traces
10
+ * status <trace_id> - Check progress of a specific audit trace
11
+ * wait <trace_id> [--timeout=1800] - Block until audit trace completes
12
+ * collect <trace_id> - Collect findings from completed analyzers
13
+ * retry <trace_id> [--analyzer=key] - Retry stalled analyzers
14
+ * kill <trace_id> [--keep-files] - Clean shutdown
15
+ *
16
+ * All output is JSON to stdout. Progress goes to stderr.
17
+ *
18
+ * Usage:
19
+ * node .agileflow/scripts/check-sessions.js list
20
+ * node .agileflow/scripts/check-sessions.js status abc123ef
21
+ * node .agileflow/scripts/check-sessions.js wait abc123ef --timeout=600
22
+ */
23
+
24
+ const USAGE = `Usage: check-sessions.js <subcommand> [trace_id] [options]
25
+
26
+ Subcommands:
27
+ list List all active audit traces
28
+ status <trace_id> Check progress of a specific audit trace
29
+ wait <trace_id> [--timeout=N] Block until audit trace completes (default: 1800s)
30
+ collect <trace_id> Collect findings from completed analyzers
31
+ retry <trace_id> [--analyzer=X] Retry stalled analyzers
32
+ kill <trace_id> [--keep-files] Clean shutdown of audit sessions
33
+
34
+ Options:
35
+ --timeout=N Seconds to wait before timeout (default: 1800)
36
+ --poll=N Seconds between status checks (default: 5)
37
+ --analyzer=KEY Retry specific analyzer only
38
+ --model=MODEL Model override for retry
39
+ --keep-files Don't remove sentinel files on kill
40
+ --help Show this help message`;
41
+
42
+ function jsonOut(obj) {
43
+ console.log(JSON.stringify(obj));
44
+ }
45
+
46
+ if (require.main === module) {
47
+ const args = process.argv.slice(2);
48
+
49
+ if (args.length === 0 || args.includes('--help') || args.includes('-h')) {
50
+ console.error(USAGE);
51
+ process.exit(args.length === 0 ? 1 : 0);
52
+ }
53
+
54
+ // Delegate to tmux-audit-monitor.js
55
+ let monitor;
56
+ try {
57
+ monitor = require('./lib/tmux-audit-monitor');
58
+ } catch (err) {
59
+ jsonOut({ ok: false, error: `Failed to load tmux-audit-monitor: ${err.message}` });
60
+ process.exit(1);
61
+ }
62
+
63
+ const subcommand = args[0];
64
+ const traceId = args[1];
65
+ const restArgs = args.slice(2);
66
+ const rootDir = process.cwd();
67
+ const opts = monitor.parseSubcommandArgs(restArgs);
68
+
69
+ // Validate traceId for commands that require it
70
+ const needsTraceId = ['status', 'wait', 'collect', 'retry', 'kill'];
71
+ if (needsTraceId.includes(subcommand) && !traceId) {
72
+ jsonOut({ ok: false, error: 'trace_id required' });
73
+ process.exit(1);
74
+ }
75
+
76
+ try {
77
+ switch (subcommand) {
78
+ case 'status':
79
+ monitor.cmdStatus(rootDir, traceId);
80
+ break;
81
+
82
+ case 'wait':
83
+ // Not awaited — event loop stays alive via sleep() timer inside cmdWait
84
+ monitor.cmdWait(rootDir, traceId, opts.timeout, opts.poll).catch(err => {
85
+ jsonOut({ ok: false, error: err.message });
86
+ process.exit(1);
87
+ });
88
+ break;
89
+
90
+ case 'collect':
91
+ monitor.cmdCollect(rootDir, traceId);
92
+ break;
93
+
94
+ case 'retry':
95
+ monitor.cmdRetry(rootDir, traceId, opts.analyzer, opts.model);
96
+ break;
97
+
98
+ case 'kill':
99
+ monitor.cmdKill(rootDir, traceId, opts.keepFiles);
100
+ break;
101
+
102
+ case 'list':
103
+ monitor.cmdList(rootDir);
104
+ break;
105
+
106
+ default:
107
+ jsonOut({ ok: false, error: `Unknown subcommand: ${subcommand}` });
108
+ process.exit(1);
109
+ }
110
+ } catch (err) {
111
+ jsonOut({ ok: false, error: err.message });
112
+ process.exit(1);
113
+ }
114
+ }
115
+
116
+ module.exports = { USAGE };
@@ -20,6 +20,7 @@
20
20
  const { spawnSync } = require('child_process');
21
21
  const fs = require('fs');
22
22
  const path = require('path');
23
+ const { buildSpawnArgs } = require('../../lib/validate-commands');
23
24
 
24
25
  // ============================================================================
25
26
  // Constants
@@ -206,20 +207,45 @@ function executeGate(gate, options = {}) {
206
207
  }
207
208
 
208
209
  try {
209
- const result = spawnSync('sh', ['-c', gate.command], {
210
- cwd,
211
- env,
212
- timeout: gate.timeout,
213
- encoding: 'utf8',
214
- maxBuffer: 10 * 1024 * 1024, // 10MB
215
- });
210
+ // Try safe spawn first (no shell injection possible)
211
+ const validation = buildSpawnArgs(gate.command, { strict: false });
212
+ let result;
213
+ if (validation.ok) {
214
+ result = spawnSync(validation.data.file, validation.data.args, {
215
+ cwd,
216
+ env,
217
+ timeout: gate.timeout,
218
+ encoding: 'utf8',
219
+ shell: false,
220
+ maxBuffer: 10 * 1024 * 1024, // 10MB
221
+ });
222
+ // If spawn failed (e.g., shell builtin, command not found), fall back to shell
223
+ if (result.error) {
224
+ result = spawnSync('sh', ['-c', gate.command], {
225
+ cwd,
226
+ env,
227
+ timeout: gate.timeout,
228
+ encoding: 'utf8',
229
+ maxBuffer: 10 * 1024 * 1024, // 10MB
230
+ });
231
+ }
232
+ } else {
233
+ // Fallback for complex commands (pipes, shell builtins, etc.)
234
+ result = spawnSync('sh', ['-c', gate.command], {
235
+ cwd,
236
+ env,
237
+ timeout: gate.timeout,
238
+ encoding: 'utf8',
239
+ maxBuffer: 10 * 1024 * 1024, // 10MB
240
+ });
241
+ }
216
242
 
217
243
  const duration = Date.now() - startTime;
218
244
 
219
245
  // Check exit code
220
246
  if (result.status === 0) {
221
247
  // Check threshold if applicable
222
- if (gate.type === GATE_TYPES.COVERAGE && gate.threshold) {
248
+ if (gate.type === GATE_TYPES.COVERAGE && gate.threshold != null) {
223
249
  const coverage = parseCoverageOutput(result.stdout + result.stderr);
224
250
  if (coverage !== null && coverage < gate.threshold) {
225
251
  return {
@@ -320,6 +346,7 @@ function executeGates(gates, options = {}) {
320
346
  * @returns {number|null} Coverage percentage or null
321
347
  */
322
348
  function parseCoverageOutput(output) {
349
+ if (!output || typeof output !== 'string') return null;
323
350
  // Common coverage patterns
324
351
  const patterns = [
325
352
  /All files[^|]*\|[^|]*\|\s*([\d.]+)/,
@@ -523,18 +523,6 @@ const FEATURE_DETECTORS = {
523
523
  return null;
524
524
  },
525
525
 
526
- serve: signals => {
527
- const { metadata } = signals;
528
- const dashboardEnabled = metadata?.features?.dashboard?.enabled;
529
- if (!dashboardEnabled) return null;
530
- return recommend('serve', {
531
- priority: 'low',
532
- trigger: 'Dashboard server available',
533
- action: 'offer',
534
- phase: 'implementation',
535
- });
536
- },
537
-
538
526
  'ac-verify': signals => {
539
527
  const { story, tests } = signals;
540
528
  if (!story || story.status !== 'in-progress') return null;
@@ -767,7 +755,6 @@ const PHASE_MAP = {
767
755
  'maintain',
768
756
  'packages',
769
757
  'deploy',
770
- 'serve',
771
758
  ],
772
759
  'post-impl': [
773
760
  'ac-verify',
@@ -482,7 +482,7 @@ function saveAggregatedMetrics(rootDir, metrics) {
482
482
  fs.writeFileSync(sessionStatePath, JSON.stringify(state, null, 2) + '\n');
483
483
  }
484
484
 
485
- // Notify listeners (e.g., dashboard server) that metrics were saved
485
+ // Notify listeners that metrics were saved
486
486
  try {
487
487
  teamMetricsEmitter.emit('metrics_saved', { trace_id: metrics.trace_id });
488
488
  } catch (_) {
@@ -520,7 +520,8 @@ function parseSubcommandArgs(args) {
520
520
  const val = arg.split('=')[1];
521
521
  if (val) parsed.analyzer = val;
522
522
  } else if (arg.startsWith('--model=')) {
523
- parsed.model = arg.split('=')[1];
523
+ const val = arg.split('=')[1];
524
+ if (val) parsed.model = val;
524
525
  } else if (arg === '--keep-files') {
525
526
  parsed.keepFiles = true;
526
527
  }
@@ -12,6 +12,8 @@ compact_context:
12
12
  - "MUST detect industry type before deploying analyzers"
13
13
  - "Pass all analyzer outputs to ads-consensus for final report"
14
14
  - "Quality Gates: No optimization without tracking, 3x Kill Rule, Broad Match needs Smart Bidding"
15
+ - "DEPTH GATE: ultradeep/extreme MUST spawn tmux sessions via spawn-audit-sessions.js — NEVER deploy in-process"
16
+ - "Use check-sessions.js to monitor spawned tmux sessions — NEVER write custom polling scripts"
15
17
  state_fields:
16
18
  - depth
17
19
  - platforms
@@ -112,7 +114,7 @@ PLATFORMS = all detected (default) or comma-separated list
112
114
  Parse the JSON output to get `traceId`. Example: `{"ok":true,"traceId":"abc123ef",...}`
113
115
  4. Wait for all analyzers to complete:
114
116
  ```bash
115
- node .agileflow/scripts/lib/tmux-audit-monitor.js wait TRACE_ID --timeout=1800
117
+ node .agileflow/scripts/check-sessions.js wait TRACE_ID --timeout=1800
116
118
  ```
117
119
  - Exit 0 = all complete (JSON results on stdout)
118
120
  - Exit 1 = timeout (partial results on stdout, `missing` array shows what's left)
@@ -131,7 +133,7 @@ Partition-based multi-agent audit. Instead of auditing all platforms together, e
131
133
  ```bash
132
134
  node .agileflow/scripts/spawn-audit-sessions.js --audit=ads --target=account-data --depth=extreme --partitions=google,meta,linkedin --model=MODEL --json
133
135
  ```
134
- 5. Wait and collect results (same as ultradeep - use tmux-audit-monitor.js)
136
+ 5. Wait and collect results (same as ultradeep - use check-sessions.js)
135
137
  6. Run consensus on combined results from all platform partitions
136
138
 
137
139
  **PARTITIONS argument** (only used with DEPTH=extreme):
@@ -141,6 +143,20 @@ Partition-based multi-agent audit. Instead of auditing all platforms together, e
141
143
  | `PARTITIONS=3` | AI merges into 3 platform groups |
142
144
  | `PARTITIONS=google,meta` | Use these exact platforms |
143
145
 
146
+ ---
147
+
148
+ ### DEPTH ROUTING GATE
149
+
150
+ | DEPTH | Route |
151
+ |-------|-------|
152
+ | `quick` or `deep` | Continue to STEP 2 below |
153
+ | `ultradeep` | STOP. Follow ULTRADEEP instructions above. Do NOT proceed to STEP 2. |
154
+ | `extreme` | STOP. Follow EXTREME instructions above. Do NOT proceed to STEP 2. |
155
+
156
+ **CRITICAL**: STEPs 2-4 are for `quick`/`deep` ONLY. For `ultradeep`/`extreme`, the analyzers run in separate tmux sessions — NOT in-process via Task calls. If you deploy Task calls for ultradeep/extreme, you are doing it wrong. Follow the spawn-audit-sessions.js workflow above, then skip to the consensus step with the collected results.
157
+
158
+ ---
159
+
144
160
  Accept data in any format:
145
161
  - **Pasted CSV/text** - Parse columns and metrics
146
162
  - **File path** - Read CSV/JSON export files
@@ -170,7 +186,7 @@ From the account data, identify:
170
186
  - **Active platforms**: Which ad platforms have campaigns
171
187
  - **Account maturity**: New (< 3 months), Growing (3-12 months), Mature (12+ months)
172
188
 
173
- ### STEP 3: Deploy 6 Analyzers in Parallel
189
+ ### STEP 3: Deploy 6 Analyzers in Parallel (quick/deep ONLY)
174
190
 
175
191
  **CRITICAL**: Deploy ALL 6 analyzers in a SINGLE message with multiple Task calls.
176
192
 
@@ -10,6 +10,8 @@ compact_context:
10
10
  - "CRITICAL: Severity scale: BLOCKER | MAJOR | MINOR | ENHANCEMENT"
11
11
  - "MUST parse arguments: TARGET (file/dir), DEPTH (quick/deep/ultradeep), FOCUS (semantic|aria|visual|keyboard|forms|all)"
12
12
  - "Pass consensus all analyzer outputs, let it synthesize the final report"
13
+ - "DEPTH GATE: ultradeep/extreme MUST spawn tmux sessions via spawn-audit-sessions.js — NEVER deploy in-process"
14
+ - "Use check-sessions.js to monitor spawned tmux sessions — NEVER write custom polling scripts"
13
15
  state_fields:
14
16
  - target_path
15
17
  - depth
@@ -70,7 +72,7 @@ Deploy multiple specialized WCAG accessibility analyzers in parallel to find a11
70
72
  | Argument | Values | Default | Description |
71
73
  |----------|--------|---------|-------------|
72
74
  | TARGET | file/directory | `.` | What to analyze |
73
- | DEPTH | quick, deep, ultradeep | quick | quick = focus on BLOCKER/MAJOR, deep = all severities, ultradeep = separate tmux |
75
+ | DEPTH | quick, deep, ultradeep, extreme | quick | quick = focus on BLOCKER/MAJOR, deep = all severities, ultradeep = separate tmux, extreme = partitioned tmux |
74
76
  | FOCUS | semantic,aria,visual,keyboard,forms,all | all | Which analyzers to deploy |
75
77
  | MODEL | haiku, sonnet, opus | haiku | Model for analyzer subagents |
76
78
 
@@ -116,12 +118,12 @@ FOCUS = all (default) or comma-separated list
116
118
  Parse the JSON output to get `traceId`. Example: `{"ok":true,"traceId":"abc123ef",...}`
117
119
  4. Wait for all analyzers to complete:
118
120
  ```bash
119
- node .agileflow/scripts/lib/tmux-audit-monitor.js wait TRACE_ID --timeout=1800
121
+ node .agileflow/scripts/check-sessions.js wait TRACE_ID --timeout=1800
120
122
  ```
121
123
  - Exit 0 = all complete (JSON results on stdout)
122
124
  - Exit 1 = timeout (partial results on stdout, `missing` array shows what's left)
123
- - To check progress without blocking: `node .agileflow/scripts/lib/tmux-audit-monitor.js status TRACE_ID`
124
- - To retry stalled analyzers: `node .agileflow/scripts/lib/tmux-audit-monitor.js retry TRACE_ID`
125
+ - To check progress without blocking: `node .agileflow/scripts/check-sessions.js status TRACE_ID`
126
+ - To retry stalled analyzers: `node .agileflow/scripts/check-sessions.js retry TRACE_ID`
125
127
  5. Parse `results` array from the JSON output. Pass all findings to consensus coordinator (same as deep mode).
126
128
  6. If tmux unavailable (spawn exits code 2), fall back to `DEPTH=deep` with warning
127
129
 
@@ -138,7 +140,7 @@ Partition-based multi-agent audit. Instead of 1 analyzer per tmux window, the co
138
140
  ```bash
139
141
  node .agileflow/scripts/spawn-audit-sessions.js --audit=accessibility --target=TARGET --depth=extreme --partitions=dir1,dir2,dir3 --model=MODEL --json
140
142
  ```
141
- 4. Wait and collect results (same as ultradeep - use tmux-audit-monitor.js)
143
+ 4. Wait and collect results (same as ultradeep - use check-sessions.js)
142
144
  5. Run consensus on combined results from all partitions
143
145
 
144
146
  **PARTITIONS argument** (only used with DEPTH=extreme):
@@ -148,7 +150,21 @@ Partition-based multi-agent audit. Instead of 1 analyzer per tmux window, the co
148
150
  | `PARTITIONS=5` | AI creates exactly 5 partitions |
149
151
  | `PARTITIONS=src/auth,src/api,lib` | Use these exact directories |
150
152
 
151
- ### STEP 2: Deploy Analyzers in Parallel
153
+ ---
154
+
155
+ ### DEPTH ROUTING GATE
156
+
157
+ | DEPTH | Route |
158
+ |-------|-------|
159
+ | `quick` or `deep` | Continue to STEP 2 below |
160
+ | `ultradeep` | STOP. Follow ULTRADEEP instructions above. Do NOT proceed to STEP 2. |
161
+ | `extreme` | STOP. Follow EXTREME instructions above. Do NOT proceed to STEP 2. |
162
+
163
+ **CRITICAL**: STEP 2 is for `quick`/`deep` ONLY. For `ultradeep`/`extreme`, the analyzers run in separate tmux sessions — NOT in-process via Task calls. If you deploy Task calls for ultradeep/extreme, you are doing it wrong. Follow the spawn-audit-sessions.js workflow above, then skip to the consensus step with the collected results.
164
+
165
+ ---
166
+
167
+ ### STEP 2: Deploy Analyzers in Parallel (quick/deep ONLY)
152
168
 
153
169
  **CRITICAL**: Deploy ALL selected analyzers in a SINGLE message with multiple Task calls.
154
170
 
@@ -10,6 +10,8 @@ compact_context:
10
10
  - "CRITICAL: Severity scale: BREAKING | INCONSISTENT | GAP | POLISH"
11
11
  - "MUST parse arguments: TARGET (file/dir), DEPTH (quick/deep/ultradeep), FOCUS (conventions|errors|versioning|pagination|docs|all)"
12
12
  - "Pass consensus all analyzer outputs, let it synthesize the final report"
13
+ - "DEPTH GATE: ultradeep/extreme MUST spawn tmux sessions via spawn-audit-sessions.js — NEVER deploy in-process"
14
+ - "Use check-sessions.js to monitor spawned tmux sessions — NEVER write custom polling scripts"
13
15
  state_fields:
14
16
  - target_path
15
17
  - depth
@@ -70,7 +72,7 @@ Deploy multiple specialized API quality analyzers in parallel to assess API desi
70
72
  | Argument | Values | Default | Description |
71
73
  |----------|--------|---------|-------------|
72
74
  | TARGET | file/directory | `.` | What to analyze |
73
- | DEPTH | quick, deep, ultradeep | quick | quick = BREAKING/INCONSISTENT only, deep = all severities, ultradeep = separate tmux |
75
+ | DEPTH | quick, deep, ultradeep, extreme | quick | quick = BREAKING/INCONSISTENT only, deep = all severities, ultradeep = separate tmux, extreme = partitioned tmux |
74
76
  | FOCUS | conventions,errors,versioning,pagination,docs,all | all | Which analyzers to deploy |
75
77
  | MODEL | haiku, sonnet, opus | haiku | Model for analyzer subagents |
76
78
 
@@ -115,12 +117,12 @@ FOCUS = all (default) or comma-separated list
115
117
  Parse the JSON output to get `traceId`. Example: `{"ok":true,"traceId":"abc123ef",...}`
116
118
  4. Wait for all analyzers to complete:
117
119
  ```bash
118
- node .agileflow/scripts/lib/tmux-audit-monitor.js wait TRACE_ID --timeout=1800
120
+ node .agileflow/scripts/check-sessions.js wait TRACE_ID --timeout=1800
119
121
  ```
120
122
  - Exit 0 = all complete (JSON results on stdout)
121
123
  - Exit 1 = timeout (partial results on stdout, `missing` array shows what's left)
122
- - To check progress without blocking: `node .agileflow/scripts/lib/tmux-audit-monitor.js status TRACE_ID`
123
- - To retry stalled analyzers: `node .agileflow/scripts/lib/tmux-audit-monitor.js retry TRACE_ID`
124
+ - To check progress without blocking: `node .agileflow/scripts/check-sessions.js status TRACE_ID`
125
+ - To retry stalled analyzers: `node .agileflow/scripts/check-sessions.js retry TRACE_ID`
124
126
  5. Parse `results` array from the JSON output. Pass all findings to consensus coordinator (same as deep mode).
125
127
  6. If tmux unavailable (spawn exits code 2), fall back to `DEPTH=deep` with warning
126
128
 
@@ -137,7 +139,7 @@ Partition-based multi-agent audit. Instead of 1 analyzer per tmux window, the co
137
139
  ```bash
138
140
  node .agileflow/scripts/spawn-audit-sessions.js --audit=api --target=TARGET --depth=extreme --partitions=dir1,dir2,dir3 --model=MODEL --json
139
141
  ```
140
- 4. Wait and collect results (same as ultradeep - use tmux-audit-monitor.js)
142
+ 4. Wait and collect results (same as ultradeep - use check-sessions.js)
141
143
  5. Run consensus on combined results from all partitions
142
144
 
143
145
  **PARTITIONS argument** (only used with DEPTH=extreme):
@@ -147,7 +149,21 @@ Partition-based multi-agent audit. Instead of 1 analyzer per tmux window, the co
147
149
  | `PARTITIONS=5` | AI creates exactly 5 partitions |
148
150
  | `PARTITIONS=src/auth,src/api,lib` | Use these exact directories |
149
151
 
150
- ### STEP 2: Deploy Analyzers in Parallel
152
+ ---
153
+
154
+ ### DEPTH ROUTING GATE
155
+
156
+ | DEPTH | Route |
157
+ |-------|-------|
158
+ | `quick` or `deep` | Continue to STEP 2 below |
159
+ | `ultradeep` | STOP. Follow ULTRADEEP instructions above. Do NOT proceed to STEP 2. |
160
+ | `extreme` | STOP. Follow EXTREME instructions above. Do NOT proceed to STEP 2. |
161
+
162
+ **CRITICAL**: STEP 2 is for `quick`/`deep` ONLY. For `ultradeep`/`extreme`, the analyzers run in separate tmux sessions — NOT in-process via Task calls. If you deploy Task calls for ultradeep/extreme, you are doing it wrong. Follow the spawn-audit-sessions.js workflow above, then skip to the consensus step with the collected results.
163
+
164
+ ---
165
+
166
+ ### STEP 2: Deploy Analyzers in Parallel (quick/deep ONLY)
151
167
 
152
168
  **CRITICAL**: Deploy ALL selected analyzers in a SINGLE message with multiple Task calls.
153
169
 
@@ -10,6 +10,8 @@ compact_context:
10
10
  - "CRITICAL: Severity scale: STRUCTURAL | DEGRADED | SMELL | STYLE"
11
11
  - "MUST parse arguments: TARGET (file/dir), DEPTH (quick/deep/ultradeep), FOCUS (coupling|layering|complexity|patterns|circular|all)"
12
12
  - "Pass consensus all analyzer outputs, let it synthesize the final report"
13
+ - "DEPTH GATE: ultradeep/extreme MUST spawn tmux sessions via spawn-audit-sessions.js — NEVER deploy in-process"
14
+ - "Use check-sessions.js to monitor spawned tmux sessions — NEVER write custom polling scripts"
13
15
  state_fields:
14
16
  - target_path
15
17
  - depth
@@ -70,7 +72,7 @@ Deploy multiple specialized architecture analyzers in parallel to assess structu
70
72
  | Argument | Values | Default | Description |
71
73
  |----------|--------|---------|-------------|
72
74
  | TARGET | file/directory | `.` | What to analyze |
73
- | DEPTH | quick, deep, ultradeep | quick | quick = STRUCTURAL/DEGRADED only, deep = all severities, ultradeep = separate tmux |
75
+ | DEPTH | quick, deep, ultradeep, extreme | quick | quick = STRUCTURAL/DEGRADED only, deep = all severities, ultradeep = separate tmux, extreme = partitioned tmux |
74
76
  | FOCUS | coupling,layering,complexity,patterns,circular,all | all | Which analyzers to deploy |
75
77
  | MODEL | haiku, sonnet, opus | haiku | Model for analyzer subagents |
76
78
 
@@ -115,12 +117,12 @@ FOCUS = all (default) or comma-separated list
115
117
  Parse the JSON output to get `traceId`. Example: `{"ok":true,"traceId":"abc123ef",...}`
116
118
  4. Wait for all analyzers to complete:
117
119
  ```bash
118
- node .agileflow/scripts/lib/tmux-audit-monitor.js wait TRACE_ID --timeout=1800
120
+ node .agileflow/scripts/check-sessions.js wait TRACE_ID --timeout=1800
119
121
  ```
120
122
  - Exit 0 = all complete (JSON results on stdout)
121
123
  - Exit 1 = timeout (partial results on stdout, `missing` array shows what's left)
122
- - To check progress without blocking: `node .agileflow/scripts/lib/tmux-audit-monitor.js status TRACE_ID`
123
- - To retry stalled analyzers: `node .agileflow/scripts/lib/tmux-audit-monitor.js retry TRACE_ID`
124
+ - To check progress without blocking: `node .agileflow/scripts/check-sessions.js status TRACE_ID`
125
+ - To retry stalled analyzers: `node .agileflow/scripts/check-sessions.js retry TRACE_ID`
124
126
  5. Parse `results` array from the JSON output. Pass all findings to consensus coordinator (same as deep mode).
125
127
  6. If tmux unavailable (spawn exits code 2), fall back to `DEPTH=deep` with warning
126
128
 
@@ -137,7 +139,7 @@ Partition-based multi-agent audit. Instead of 1 analyzer per tmux window, the co
137
139
  ```bash
138
140
  node .agileflow/scripts/spawn-audit-sessions.js --audit=architecture --target=TARGET --depth=extreme --partitions=dir1,dir2,dir3 --model=MODEL --json
139
141
  ```
140
- 4. Wait and collect results (same as ultradeep - use tmux-audit-monitor.js)
142
+ 4. Wait and collect results (same as ultradeep - use check-sessions.js)
141
143
  5. Run consensus on combined results from all partitions
142
144
 
143
145
  **PARTITIONS argument** (only used with DEPTH=extreme):
@@ -147,7 +149,21 @@ Partition-based multi-agent audit. Instead of 1 analyzer per tmux window, the co
147
149
  | `PARTITIONS=5` | AI creates exactly 5 partitions |
148
150
  | `PARTITIONS=src/auth,src/api,lib` | Use these exact directories |
149
151
 
150
- ### STEP 2: Deploy Analyzers in Parallel
152
+ ---
153
+
154
+ ### DEPTH ROUTING GATE
155
+
156
+ | DEPTH | Route |
157
+ |-------|-------|
158
+ | `quick` or `deep` | Continue to STEP 2 below |
159
+ | `ultradeep` | STOP. Follow ULTRADEEP instructions above. Do NOT proceed to STEP 2. |
160
+ | `extreme` | STOP. Follow EXTREME instructions above. Do NOT proceed to STEP 2. |
161
+
162
+ **CRITICAL**: STEP 2 is for `quick`/`deep` ONLY. For `ultradeep`/`extreme`, the analyzers run in separate tmux sessions — NOT in-process via Task calls. If you deploy Task calls for ultradeep/extreme, you are doing it wrong. Follow the spawn-audit-sessions.js workflow above, then skip to the consensus step with the collected results.
163
+
164
+ ---
165
+
166
+ ### STEP 2: Deploy Analyzers in Parallel (quick/deep ONLY)
151
167
 
152
168
  **CRITICAL**: Deploy ALL selected analyzers in a SINGLE message with multiple Task calls.
153
169
 
@@ -11,6 +11,8 @@ compact_context:
11
11
  - "CRITICAL: Confidence scoring: CONFIRMED (2+ agree), LIKELY (1 with evidence), INVESTIGATE (1 weak)"
12
12
  - "MUST parse arguments: TARGET (file/dir), DEPTH (quick/deep/ultradeep), FOCUS (handlers|routes|api|stubs|state|imports|conditional|all)"
13
13
  - "Pass consensus all analyzer outputs, let it synthesize the final report"
14
+ - "DEPTH GATE: ultradeep/extreme MUST spawn tmux sessions via spawn-audit-sessions.js — NEVER deploy in-process"
15
+ - "Use check-sessions.js to monitor spawned tmux sessions — NEVER write custom polling scripts"
14
16
  state_fields:
15
17
  - target_path
16
18
  - depth
@@ -74,7 +76,7 @@ Deploy multiple specialized completeness analyzers in parallel to find forgotten
74
76
  | Argument | Values | Default | Description |
75
77
  |----------|--------|---------|-------------|
76
78
  | TARGET | file/directory | `.` | What to analyze |
77
- | DEPTH | quick, deep, ultradeep | quick | quick = core 5, deep = all 7, ultradeep = separate tmux sessions |
79
+ | DEPTH | quick, deep, ultradeep, extreme | quick | quick = core 5, deep = all 7, ultradeep = separate tmux sessions, extreme = partitioned tmux sessions |
78
80
  | FOCUS | handlers,routes,api,stubs,state,imports,conditional,all | all | Which analyzers to deploy |
79
81
  | MODEL | haiku, sonnet, opus | haiku | Model for analyzer subagents. Default preserves existing behavior. |
80
82
 
@@ -134,12 +136,12 @@ FOCUS = all (default) or comma-separated list
134
136
  Parse the JSON output to get `traceId`. Example: `{"ok":true,"traceId":"abc123ef",...}`
135
137
  4. Wait for all analyzers to complete:
136
138
  ```bash
137
- node .agileflow/scripts/lib/tmux-audit-monitor.js wait TRACE_ID --timeout=1800
139
+ node .agileflow/scripts/check-sessions.js wait TRACE_ID --timeout=1800
138
140
  ```
139
141
  - Exit 0 = all complete (JSON results on stdout)
140
142
  - Exit 1 = timeout (partial results on stdout, `missing` array shows what's left)
141
- - To check progress without blocking: `node .agileflow/scripts/lib/tmux-audit-monitor.js status TRACE_ID`
142
- - To retry stalled analyzers: `node .agileflow/scripts/lib/tmux-audit-monitor.js retry TRACE_ID`
143
+ - To check progress without blocking: `node .agileflow/scripts/check-sessions.js status TRACE_ID`
144
+ - To retry stalled analyzers: `node .agileflow/scripts/check-sessions.js retry TRACE_ID`
143
145
  5. Parse `results` array from the JSON output. Pass all findings to consensus coordinator (same as deep mode).
144
146
  6. If tmux unavailable (spawn exits code 2), fall back to `DEPTH=deep` with warning
145
147
 
@@ -156,7 +158,7 @@ Partition-based multi-agent audit. Instead of 1 analyzer per tmux window, the co
156
158
  ```bash
157
159
  node .agileflow/scripts/spawn-audit-sessions.js --audit=completeness --target=TARGET --depth=extreme --partitions=dir1,dir2,dir3 --model=MODEL --json
158
160
  ```
159
- 4. Wait and collect results (same as ultradeep - use tmux-audit-monitor.js)
161
+ 4. Wait and collect results (same as ultradeep - use check-sessions.js)
160
162
  5. Run consensus on combined results from all partitions
161
163
 
162
164
  **PARTITIONS argument** (only used with DEPTH=extreme):
@@ -166,7 +168,21 @@ Partition-based multi-agent audit. Instead of 1 analyzer per tmux window, the co
166
168
  | `PARTITIONS=5` | AI creates exactly 5 partitions |
167
169
  | `PARTITIONS=src/auth,src/api,lib` | Use these exact directories |
168
170
 
169
- ### STEP 2: Deploy Analyzers in Parallel
171
+ ---
172
+
173
+ ### DEPTH ROUTING GATE
174
+
175
+ | DEPTH | Route |
176
+ |-------|-------|
177
+ | `quick` or `deep` | Continue to STEP 2 below |
178
+ | `ultradeep` | STOP. Follow ULTRADEEP instructions above. Do NOT proceed to STEP 2. |
179
+ | `extreme` | STOP. Follow EXTREME instructions above. Do NOT proceed to STEP 2. |
180
+
181
+ **CRITICAL**: STEP 2 is for `quick`/`deep` ONLY. For `ultradeep`/`extreme`, the analyzers run in separate tmux sessions — NOT in-process via Task calls. If you deploy Task calls for ultradeep/extreme, you are doing it wrong. Follow the spawn-audit-sessions.js workflow above, then skip to the consensus step with the collected results.
182
+
183
+ ---
184
+
185
+ ### STEP 2: Deploy Analyzers in Parallel (quick/deep ONLY)
170
186
 
171
187
  **CRITICAL**: Deploy ALL selected analyzers in a SINGLE message with multiple Task calls.
172
188
 
@@ -10,6 +10,8 @@ compact_context:
10
10
  - "CRITICAL: Confidence scoring: CONFIRMED (2+ agree), LIKELY (1 with evidence), INVESTIGATE (1 weak)"
11
11
  - "MUST parse arguments: TARGET (file/dir), DEPTH (quick/deep/ultradeep), FOCUS (privacy|terms|a11y|licensing|consumer|security|ai|content|international|all)"
12
12
  - "Pass consensus all analyzer outputs, let it synthesize the final report"
13
+ - "DEPTH GATE: ultradeep/extreme MUST spawn tmux sessions via spawn-audit-sessions.js — NEVER deploy in-process"
14
+ - "Use check-sessions.js to monitor spawned tmux sessions — NEVER write custom polling scripts"
13
15
  state_fields:
14
16
  - target_path
15
17
  - depth
@@ -73,7 +75,7 @@ Deploy multiple specialized legal risk analyzers in parallel to find compliance
73
75
  | Argument | Values | Default | Description |
74
76
  |----------|--------|---------|-------------|
75
77
  | TARGET | file/directory | `.` | What to analyze |
76
- | DEPTH | quick, deep, ultradeep | quick | quick = core 5, deep = all 9, ultradeep = separate tmux sessions |
78
+ | DEPTH | quick, deep, ultradeep, extreme | quick | quick = core 5, deep = all 9, ultradeep = separate tmux sessions, extreme = partitioned tmux sessions |
77
79
  | FOCUS | privacy,terms,a11y,licensing,consumer,security,ai,content,international,all | all | Which analyzers to deploy |
78
80
  | MODEL | haiku, sonnet, opus | haiku | Model for analyzer subagents. Default preserves existing behavior. |
79
81
 
@@ -124,12 +126,12 @@ FOCUS = all (default) or comma-separated list
124
126
  Parse the JSON output to get `traceId`. Example: `{"ok":true,"traceId":"abc123ef",...}`
125
127
  4. Wait for all analyzers to complete:
126
128
  ```bash
127
- node .agileflow/scripts/lib/tmux-audit-monitor.js wait TRACE_ID --timeout=1800
129
+ node .agileflow/scripts/check-sessions.js wait TRACE_ID --timeout=1800
128
130
  ```
129
131
  - Exit 0 = all complete (JSON results on stdout)
130
132
  - Exit 1 = timeout (partial results on stdout, `missing` array shows what's left)
131
- - To check progress without blocking: `node .agileflow/scripts/lib/tmux-audit-monitor.js status TRACE_ID`
132
- - To retry stalled analyzers: `node .agileflow/scripts/lib/tmux-audit-monitor.js retry TRACE_ID`
133
+ - To check progress without blocking: `node .agileflow/scripts/check-sessions.js status TRACE_ID`
134
+ - To retry stalled analyzers: `node .agileflow/scripts/check-sessions.js retry TRACE_ID`
133
135
  5. Parse `results` array from the JSON output. Pass all findings to consensus coordinator (same as deep mode).
134
136
  6. If tmux unavailable (spawn exits code 2), fall back to `DEPTH=deep` with warning
135
137
 
@@ -146,7 +148,7 @@ Partition-based multi-agent audit. Instead of 1 analyzer per tmux window, the co
146
148
  ```bash
147
149
  node .agileflow/scripts/spawn-audit-sessions.js --audit=legal --target=TARGET --depth=extreme --partitions=dir1,dir2,dir3 --model=MODEL --json
148
150
  ```
149
- 4. Wait and collect results (same as ultradeep - use tmux-audit-monitor.js)
151
+ 4. Wait and collect results (same as ultradeep - use check-sessions.js)
150
152
  5. Run consensus on combined results from all partitions
151
153
 
152
154
  **PARTITIONS argument** (only used with DEPTH=extreme):
@@ -156,7 +158,21 @@ Partition-based multi-agent audit. Instead of 1 analyzer per tmux window, the co
156
158
  | `PARTITIONS=5` | AI creates exactly 5 partitions |
157
159
  | `PARTITIONS=src/auth,src/api,lib` | Use these exact directories |
158
160
 
159
- ### STEP 2: Deploy Analyzers in Parallel
161
+ ---
162
+
163
+ ### DEPTH ROUTING GATE
164
+
165
+ | DEPTH | Route |
166
+ |-------|-------|
167
+ | `quick` or `deep` | Continue to STEP 2 below |
168
+ | `ultradeep` | STOP. Follow ULTRADEEP instructions above. Do NOT proceed to STEP 2. |
169
+ | `extreme` | STOP. Follow EXTREME instructions above. Do NOT proceed to STEP 2. |
170
+
171
+ **CRITICAL**: STEP 2 is for `quick`/`deep` ONLY. For `ultradeep`/`extreme`, the analyzers run in separate tmux sessions — NOT in-process via Task calls. If you deploy Task calls for ultradeep/extreme, you are doing it wrong. Follow the spawn-audit-sessions.js workflow above, then skip to the consensus step with the collected results.
172
+
173
+ ---
174
+
175
+ ### STEP 2: Deploy Analyzers in Parallel (quick/deep ONLY)
160
176
 
161
177
  **CRITICAL**: Deploy ALL selected analyzers in a SINGLE message with multiple Task calls.
162
178