agileflow 3.4.1 → 3.4.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/CHANGELOG.md +10 -0
  2. package/README.md +2 -2
  3. package/lib/drivers/claude-driver.ts +1 -1
  4. package/lib/lazy-require.js +1 -1
  5. package/package.json +1 -1
  6. package/scripts/agent-loop.js +290 -230
  7. package/scripts/check-sessions.js +116 -0
  8. package/scripts/lib/audit-registry.js +36 -0
  9. package/scripts/lib/quality-gates.js +35 -8
  10. package/scripts/lib/signal-detectors.js +0 -13
  11. package/scripts/lib/team-events.js +1 -1
  12. package/scripts/lib/tmux-audit-monitor.js +2 -1
  13. package/scripts/lib/tmux-group-colors.js +2 -2
  14. package/scripts/spawn-audit-sessions.js +1 -1
  15. package/src/core/commands/ads/audit.md +84 -6
  16. package/src/core/commands/code/accessibility.md +22 -6
  17. package/src/core/commands/code/api.md +22 -6
  18. package/src/core/commands/code/architecture.md +22 -6
  19. package/src/core/commands/code/completeness.md +22 -6
  20. package/src/core/commands/code/legal.md +22 -6
  21. package/src/core/commands/code/logic.md +22 -6
  22. package/src/core/commands/code/performance.md +22 -6
  23. package/src/core/commands/code/security.md +22 -6
  24. package/src/core/commands/code/test.md +22 -6
  25. package/src/core/commands/ideate/features.md +5 -4
  26. package/src/core/commands/ideate/new.md +8 -7
  27. package/src/core/commands/seo/audit.md +78 -7
  28. package/lib/claude-cli-bridge.js +0 -215
  29. package/lib/dashboard-automations.js +0 -130
  30. package/lib/dashboard-git.js +0 -254
  31. package/lib/dashboard-inbox.js +0 -64
  32. package/lib/dashboard-protocol.js +0 -605
  33. package/lib/dashboard-server.js +0 -1296
  34. package/lib/dashboard-session.js +0 -136
  35. package/lib/dashboard-status.js +0 -72
  36. package/lib/dashboard-terminal.js +0 -354
  37. package/lib/dashboard-websocket.js +0 -88
  38. package/scripts/dashboard-serve.js +0 -336
  39. package/src/core/commands/serve.md +0 -127
  40. package/tools/cli/commands/serve.js +0 -492
@@ -0,0 +1,116 @@
1
+ #!/usr/bin/env node
2
+
3
+ /**
4
+ * check-sessions.js - Top-level entry point for monitoring tmux audit sessions
5
+ *
6
+ * Thin wrapper around lib/tmux-audit-monitor.js for easy discovery and calling.
7
+ *
8
+ * Subcommands:
9
+ * list - List all active audit traces
10
+ * status <trace_id> - Check progress of a specific audit trace
11
+ * wait <trace_id> [--timeout=1800] - Block until audit trace completes
12
+ * collect <trace_id> - Collect findings from completed analyzers
13
+ * retry <trace_id> [--analyzer=key] - Retry stalled analyzers
14
+ * kill <trace_id> [--keep-files] - Clean shutdown
15
+ *
16
+ * All output is JSON to stdout. Progress goes to stderr.
17
+ *
18
+ * Usage:
19
+ * node .agileflow/scripts/check-sessions.js list
20
+ * node .agileflow/scripts/check-sessions.js status abc123ef
21
+ * node .agileflow/scripts/check-sessions.js wait abc123ef --timeout=600
22
+ */
23
+
24
+ const USAGE = `Usage: check-sessions.js <subcommand> [trace_id] [options]
25
+
26
+ Subcommands:
27
+ list List all active audit traces
28
+ status <trace_id> Check progress of a specific audit trace
29
+ wait <trace_id> [--timeout=N] Block until audit trace completes (default: 1800s)
30
+ collect <trace_id> Collect findings from completed analyzers
31
+ retry <trace_id> [--analyzer=X] Retry stalled analyzers
32
+ kill <trace_id> [--keep-files] Clean shutdown of audit sessions
33
+
34
+ Options:
35
+ --timeout=N Seconds to wait before timeout (default: 1800)
36
+ --poll=N Seconds between status checks (default: 5)
37
+ --analyzer=KEY Retry specific analyzer only
38
+ --model=MODEL Model override for retry
39
+ --keep-files Don't remove sentinel files on kill
40
+ --help Show this help message`;
41
+
42
+ function jsonOut(obj) {
43
+ console.log(JSON.stringify(obj));
44
+ }
45
+
46
+ if (require.main === module) {
47
+ const args = process.argv.slice(2);
48
+
49
+ if (args.length === 0 || args.includes('--help') || args.includes('-h')) {
50
+ console.error(USAGE);
51
+ process.exit(args.length === 0 ? 1 : 0);
52
+ }
53
+
54
+ // Delegate to tmux-audit-monitor.js
55
+ let monitor;
56
+ try {
57
+ monitor = require('./lib/tmux-audit-monitor');
58
+ } catch (err) {
59
+ jsonOut({ ok: false, error: `Failed to load tmux-audit-monitor: ${err.message}` });
60
+ process.exit(1);
61
+ }
62
+
63
+ const subcommand = args[0];
64
+ const traceId = args[1];
65
+ const restArgs = args.slice(2);
66
+ const rootDir = process.cwd();
67
+ const opts = monitor.parseSubcommandArgs(restArgs);
68
+
69
+ // Validate traceId for commands that require it
70
+ const needsTraceId = ['status', 'wait', 'collect', 'retry', 'kill'];
71
+ if (needsTraceId.includes(subcommand) && !traceId) {
72
+ jsonOut({ ok: false, error: 'trace_id required' });
73
+ process.exit(1);
74
+ }
75
+
76
+ try {
77
+ switch (subcommand) {
78
+ case 'status':
79
+ monitor.cmdStatus(rootDir, traceId);
80
+ break;
81
+
82
+ case 'wait':
83
+ // Not awaited — event loop stays alive via sleep() timer inside cmdWait
84
+ monitor.cmdWait(rootDir, traceId, opts.timeout, opts.poll).catch(err => {
85
+ jsonOut({ ok: false, error: err.message });
86
+ process.exit(1);
87
+ });
88
+ break;
89
+
90
+ case 'collect':
91
+ monitor.cmdCollect(rootDir, traceId);
92
+ break;
93
+
94
+ case 'retry':
95
+ monitor.cmdRetry(rootDir, traceId, opts.analyzer, opts.model);
96
+ break;
97
+
98
+ case 'kill':
99
+ monitor.cmdKill(rootDir, traceId, opts.keepFiles);
100
+ break;
101
+
102
+ case 'list':
103
+ monitor.cmdList(rootDir);
104
+ break;
105
+
106
+ default:
107
+ jsonOut({ ok: false, error: `Unknown subcommand: ${subcommand}` });
108
+ process.exit(1);
109
+ }
110
+ } catch (err) {
111
+ jsonOut({ ok: false, error: err.message });
112
+ process.exit(1);
113
+ }
114
+ }
115
+
116
+ module.exports = { USAGE };
@@ -188,6 +188,42 @@ const AUDIT_TYPES = {
188
188
  ],
189
189
  },
190
190
 
191
+ seo: {
192
+ name: 'SEO Audit',
193
+ prefix: 'SEO',
194
+ color: '#ff9e64', // rose/orange
195
+ command: 'seo/audit',
196
+ analyzers: {
197
+ technical: { subagent_type: 'seo-analyzer-technical', label: 'Technical SEO' },
198
+ content: { subagent_type: 'seo-analyzer-content', label: 'Content Quality' },
199
+ schema: { subagent_type: 'seo-analyzer-schema', label: 'Schema Markup' },
200
+ images: { subagent_type: 'seo-analyzer-images', label: 'Image Optimization' },
201
+ performance: { subagent_type: 'seo-analyzer-performance', label: 'Core Web Vitals' },
202
+ sitemap: { subagent_type: 'seo-analyzer-sitemap', label: 'Sitemap' },
203
+ },
204
+ consensus: { subagent_type: 'seo-consensus', label: 'SEO Consensus' },
205
+ quick_analyzers: ['technical', 'content', 'schema', 'images', 'performance', 'sitemap'],
206
+ deep_analyzers: ['technical', 'content', 'schema', 'images', 'performance', 'sitemap'],
207
+ },
208
+
209
+ ads: {
210
+ name: 'Ads Audit',
211
+ prefix: 'Ads',
212
+ color: '#89ddff', // ice
213
+ command: 'ads/audit',
214
+ analyzers: {
215
+ google: { subagent_type: 'ads-audit-google', label: 'Google Ads' },
216
+ meta: { subagent_type: 'ads-audit-meta', label: 'Meta Ads' },
217
+ budget: { subagent_type: 'ads-audit-budget', label: 'Budget & Bidding' },
218
+ creative: { subagent_type: 'ads-audit-creative', label: 'Creative Quality' },
219
+ tracking: { subagent_type: 'ads-audit-tracking', label: 'Conversion Tracking' },
220
+ compliance: { subagent_type: 'ads-audit-compliance', label: 'Compliance' },
221
+ },
222
+ consensus: { subagent_type: 'ads-consensus', label: 'Ads Consensus' },
223
+ quick_analyzers: ['google', 'meta', 'budget', 'creative', 'tracking', 'compliance'],
224
+ deep_analyzers: ['google', 'meta', 'budget', 'creative', 'tracking', 'compliance'],
225
+ },
226
+
191
227
  legal: {
192
228
  name: 'Legal Risk',
193
229
  prefix: 'Legal',
@@ -20,6 +20,7 @@
20
20
  const { spawnSync } = require('child_process');
21
21
  const fs = require('fs');
22
22
  const path = require('path');
23
+ const { buildSpawnArgs } = require('../../lib/validate-commands');
23
24
 
24
25
  // ============================================================================
25
26
  // Constants
@@ -206,20 +207,45 @@ function executeGate(gate, options = {}) {
206
207
  }
207
208
 
208
209
  try {
209
- const result = spawnSync('sh', ['-c', gate.command], {
210
- cwd,
211
- env,
212
- timeout: gate.timeout,
213
- encoding: 'utf8',
214
- maxBuffer: 10 * 1024 * 1024, // 10MB
215
- });
210
+ // Try safe spawn first (no shell injection possible)
211
+ const validation = buildSpawnArgs(gate.command, { strict: false });
212
+ let result;
213
+ if (validation.ok) {
214
+ result = spawnSync(validation.data.file, validation.data.args, {
215
+ cwd,
216
+ env,
217
+ timeout: gate.timeout,
218
+ encoding: 'utf8',
219
+ shell: false,
220
+ maxBuffer: 10 * 1024 * 1024, // 10MB
221
+ });
222
+ // If spawn failed (e.g., shell builtin, command not found), fall back to shell
223
+ if (result.error) {
224
+ result = spawnSync('sh', ['-c', gate.command], {
225
+ cwd,
226
+ env,
227
+ timeout: gate.timeout,
228
+ encoding: 'utf8',
229
+ maxBuffer: 10 * 1024 * 1024, // 10MB
230
+ });
231
+ }
232
+ } else {
233
+ // Fallback for complex commands (pipes, shell builtins, etc.)
234
+ result = spawnSync('sh', ['-c', gate.command], {
235
+ cwd,
236
+ env,
237
+ timeout: gate.timeout,
238
+ encoding: 'utf8',
239
+ maxBuffer: 10 * 1024 * 1024, // 10MB
240
+ });
241
+ }
216
242
 
217
243
  const duration = Date.now() - startTime;
218
244
 
219
245
  // Check exit code
220
246
  if (result.status === 0) {
221
247
  // Check threshold if applicable
222
- if (gate.type === GATE_TYPES.COVERAGE && gate.threshold) {
248
+ if (gate.type === GATE_TYPES.COVERAGE && gate.threshold != null) {
223
249
  const coverage = parseCoverageOutput(result.stdout + result.stderr);
224
250
  if (coverage !== null && coverage < gate.threshold) {
225
251
  return {
@@ -320,6 +346,7 @@ function executeGates(gates, options = {}) {
320
346
  * @returns {number|null} Coverage percentage or null
321
347
  */
322
348
  function parseCoverageOutput(output) {
349
+ if (!output || typeof output !== 'string') return null;
323
350
  // Common coverage patterns
324
351
  const patterns = [
325
352
  /All files[^|]*\|[^|]*\|\s*([\d.]+)/,
@@ -523,18 +523,6 @@ const FEATURE_DETECTORS = {
523
523
  return null;
524
524
  },
525
525
 
526
- serve: signals => {
527
- const { metadata } = signals;
528
- const dashboardEnabled = metadata?.features?.dashboard?.enabled;
529
- if (!dashboardEnabled) return null;
530
- return recommend('serve', {
531
- priority: 'low',
532
- trigger: 'Dashboard server available',
533
- action: 'offer',
534
- phase: 'implementation',
535
- });
536
- },
537
-
538
526
  'ac-verify': signals => {
539
527
  const { story, tests } = signals;
540
528
  if (!story || story.status !== 'in-progress') return null;
@@ -767,7 +755,6 @@ const PHASE_MAP = {
767
755
  'maintain',
768
756
  'packages',
769
757
  'deploy',
770
- 'serve',
771
758
  ],
772
759
  'post-impl': [
773
760
  'ac-verify',
@@ -482,7 +482,7 @@ function saveAggregatedMetrics(rootDir, metrics) {
482
482
  fs.writeFileSync(sessionStatePath, JSON.stringify(state, null, 2) + '\n');
483
483
  }
484
484
 
485
- // Notify listeners (e.g., dashboard server) that metrics were saved
485
+ // Notify listeners that metrics were saved
486
486
  try {
487
487
  teamMetricsEmitter.emit('metrics_saved', { trace_id: metrics.trace_id });
488
488
  } catch (_) {
@@ -520,7 +520,8 @@ function parseSubcommandArgs(args) {
520
520
  const val = arg.split('=')[1];
521
521
  if (val) parsed.analyzer = val;
522
522
  } else if (arg.startsWith('--model=')) {
523
- parsed.model = arg.split('=')[1];
523
+ const val = arg.split('=')[1];
524
+ if (val) parsed.model = val;
524
525
  } else if (arg === '--keep-files') {
525
526
  parsed.keepFiles = true;
526
527
  }
@@ -23,8 +23,8 @@ const GROUP_PALETTE = [
23
23
  { name: 'amber', hex: '#e0af68', audit: 'test' },
24
24
  { name: 'violet', hex: '#bb9af7', audit: 'completeness' },
25
25
  { name: 'lime', hex: '#9ece6a', audit: 'legal' },
26
- { name: 'rose', hex: '#ff9e64', audit: null },
27
- { name: 'ice', hex: '#89ddff', audit: null },
26
+ { name: 'rose', hex: '#ff9e64', audit: 'seo' },
27
+ { name: 'ice', hex: '#89ddff', audit: 'ads' },
28
28
  ];
29
29
 
30
30
  /**
@@ -420,7 +420,7 @@ async function spawnAuditInTmux(options) {
420
420
 
421
421
  if (!auditType) {
422
422
  console.error(`Unknown audit type: ${options.audit}`);
423
- console.error(`Valid types: logic, security, performance, test, completeness, legal`);
423
+ console.error(`Valid types: logic, security, performance, test, completeness, legal, seo, ads`);
424
424
  process.exit(1);
425
425
  }
426
426
 
@@ -1,6 +1,6 @@
1
1
  ---
2
2
  description: Full multi-platform paid advertising audit with 6 parallel analyzers, industry detection, weighted Ads Health Score 0-100, and prioritized action plan
3
- argument-hint: "<account-data> [PLATFORMS=all]"
3
+ argument-hint: "<account-data> [DEPTH=quick|deep|ultradeep|extreme] [PLATFORMS=all]"
4
4
  compact_context:
5
5
  priority: high
6
6
  preserve_rules:
@@ -8,10 +8,14 @@ compact_context:
8
8
  - "CRITICAL: Deploy 6 analyzers IN PARALLEL in ONE message with multiple Task calls"
9
9
  - "CRITICAL: Wait for all results before running consensus (use TaskOutput with block=true)"
10
10
  - "CRITICAL: Weighted scoring - Tracking 25%, Wasted Spend 20%, Structure 15%, Creative 15%, Budget 15%, Compliance 10%"
11
+ - "MUST parse DEPTH argument: quick (default), deep, ultradeep, extreme"
11
12
  - "MUST detect industry type before deploying analyzers"
12
13
  - "Pass all analyzer outputs to ads-consensus for final report"
13
14
  - "Quality Gates: No optimization without tracking, 3x Kill Rule, Broad Match needs Smart Bidding"
15
+ - "DEPTH GATE: ultradeep/extreme MUST spawn tmux sessions via spawn-audit-sessions.js — NEVER deploy in-process"
16
+ - "Use check-sessions.js to monitor spawned tmux sessions — NEVER write custom polling scripts"
14
17
  state_fields:
18
+ - depth
15
19
  - platforms
16
20
  - industry_type
17
21
  - analyzers_deployed
@@ -27,7 +31,10 @@ Deploy 6 specialized advertising analyzers in parallel to audit ad accounts, the
27
31
  ## Quick Reference
28
32
 
29
33
  ```
30
- /agileflow:ads:audit <account-data> # Full audit (all platforms detected)
34
+ /agileflow:ads:audit <account-data> # Quick audit (all platforms detected)
35
+ /agileflow:ads:audit <account-data> DEPTH=deep # Deep audit (more thorough)
36
+ /agileflow:ads:audit <account-data> DEPTH=ultradeep # Each analyzer in its own tmux session
37
+ /agileflow:ads:audit <account-data> DEPTH=extreme # Platform-partitioned audit
31
38
  /agileflow:ads:audit <account-data> PLATFORMS=google,meta # Specific platforms only
32
39
  ```
33
40
 
@@ -74,13 +81,81 @@ Deploy 6 specialized advertising analyzers in parallel to audit ad accounts, the
74
81
  | Argument | Values | Default | Description |
75
82
  |----------|--------|---------|-------------|
76
83
  | account-data | Text, file path, or description | Required | Account data to audit |
84
+ | DEPTH | quick, deep, ultradeep, extreme | quick | quick = standard, deep = comprehensive, ultradeep = tmux sessions, extreme = platform partitions |
77
85
  | PLATFORMS | google,meta,linkedin,tiktok,microsoft,youtube | all detected | Limit to specific platforms |
78
86
 
79
87
  ---
80
88
 
81
89
  ## Step-by-Step Process
82
90
 
83
- ### STEP 1: Parse Account Data
91
+ ### STEP 1: Parse Arguments & Account Data
92
+
93
+ ```
94
+ DEPTH = quick (default), deep, ultradeep, or extreme
95
+ PLATFORMS = all detected (default) or comma-separated list
96
+ ```
97
+
98
+ **DEPTH behavior**:
99
+ - `quick` (default): Deploy all 6 analyzers in-process. Standard analysis.
100
+ - `deep`: Deploy all 6 analyzers in-process. More thorough, includes lower-priority findings.
101
+ - `ultradeep`: Spawn each analyzer as a separate Claude Code session in tmux. Requires tmux. Falls back to `deep` if tmux unavailable.
102
+ - `extreme`: Partition-based multi-agent audit. Account data is split by platform and each platform runs ALL analyzers.
103
+
104
+ **ULTRADEEP mode** (DEPTH=ultradeep):
105
+ 1. Show cost estimate:
106
+ ```bash
107
+ node .agileflow/scripts/spawn-audit-sessions.js --audit=ads --target=account-data --model=MODEL --dry-run
108
+ ```
109
+ 2. Confirm with user before launching
110
+ 3. Spawn sessions (use `--json` to capture trace ID):
111
+ ```bash
112
+ node .agileflow/scripts/spawn-audit-sessions.js --audit=ads --target=account-data --model=MODEL --json
113
+ ```
114
+ Parse the JSON output to get `traceId`. Example: `{"ok":true,"traceId":"abc123ef",...}`
115
+ 4. Wait for all analyzers to complete:
116
+ ```bash
117
+ node .agileflow/scripts/check-sessions.js wait TRACE_ID --timeout=1800
118
+ ```
119
+ - Exit 0 = all complete (JSON results on stdout)
120
+ - Exit 1 = timeout (partial results on stdout, `missing` array shows what's left)
121
+ 5. Parse `results` array from the JSON output. Pass all findings to consensus coordinator.
122
+ 6. If tmux unavailable (spawn exits code 2), fall back to `DEPTH=deep` with warning
123
+
124
+ **EXTREME mode** (DEPTH=extreme):
125
+ Partition-based multi-agent audit. Instead of auditing all platforms together, each platform runs ALL 6 analyzers independently.
126
+ 1. Identify active platforms from account data (Google, Meta, LinkedIn, TikTok, etc.)
127
+ 2. Group into partitions by platform (each platform = 1 partition)
128
+ - If user provided PARTITIONS=N (a number), merge smaller platforms into N groups
129
+ - If user provided PARTITIONS=google,meta, use those exact platforms
130
+ 3. Show the partition plan and agent count to the user, confirm before launching:
131
+ Example: "3 platforms x 6 analyzers = 18 agents. Estimated cost: $X. Proceed?"
132
+ 4. Spawn sessions with partitions:
133
+ ```bash
134
+ node .agileflow/scripts/spawn-audit-sessions.js --audit=ads --target=account-data --depth=extreme --partitions=google,meta,linkedin --model=MODEL --json
135
+ ```
136
+ 5. Wait and collect results (same as ultradeep - use check-sessions.js)
137
+ 6. Run consensus on combined results from all platform partitions
138
+
139
+ **PARTITIONS argument** (only used with DEPTH=extreme):
140
+ | Value | Behavior |
141
+ |-------|----------|
142
+ | Not set | AI decides partitions (1 per detected platform) |
143
+ | `PARTITIONS=3` | AI merges into 3 platform groups |
144
+ | `PARTITIONS=google,meta` | Use these exact platforms |
145
+
146
+ ---
147
+
148
+ ### DEPTH ROUTING GATE
149
+
150
+ | DEPTH | Route |
151
+ |-------|-------|
152
+ | `quick` or `deep` | Continue to STEP 2 below |
153
+ | `ultradeep` | STOP. Follow ULTRADEEP instructions above. Do NOT proceed to STEP 2. |
154
+ | `extreme` | STOP. Follow EXTREME instructions above. Do NOT proceed to STEP 2. |
155
+
156
+ **CRITICAL**: STEPs 2-4 are for `quick`/`deep` ONLY. For `ultradeep`/`extreme`, the analyzers run in separate tmux sessions — NOT in-process via Task calls. If you deploy Task calls for ultradeep/extreme, you are doing it wrong. Follow the spawn-audit-sessions.js workflow above, then skip to the consensus step with the collected results.
157
+
158
+ ---
84
159
 
85
160
  Accept data in any format:
86
161
  - **Pasted CSV/text** - Parse columns and metrics
@@ -111,7 +186,7 @@ From the account data, identify:
111
186
  - **Active platforms**: Which ad platforms have campaigns
112
187
  - **Account maturity**: New (< 3 months), Growing (3-12 months), Mature (12+ months)
113
188
 
114
- ### STEP 3: Deploy 6 Analyzers in Parallel
189
+ ### STEP 3: Deploy 6 Analyzers in Parallel (quick/deep ONLY)
115
190
 
116
191
  **CRITICAL**: Deploy ALL 6 analyzers in a SINGLE message with multiple Task calls.
117
192
 
@@ -355,8 +430,11 @@ After consensus completes, show summary and offer next steps:
355
430
 
356
431
  **Quick Usage**:
357
432
  ```
358
- /agileflow:ads:audit <account-data> # Full audit
359
- /agileflow:ads:audit <data> PLATFORMS=google,meta # Specific platforms
433
+ /agileflow:ads:audit <account-data> # Quick audit
434
+ /agileflow:ads:audit <account-data> DEPTH=deep # Deep audit
435
+ /agileflow:ads:audit <account-data> DEPTH=ultradeep # Tmux sessions
436
+ /agileflow:ads:audit <account-data> DEPTH=extreme # Platform partition audit
437
+ /agileflow:ads:audit <data> PLATFORMS=google,meta # Specific platforms
360
438
  ```
361
439
 
362
440
  **What It Does**: Parse data -> Detect industry -> Deploy 6 analyzers in parallel -> Consensus weights scores -> Ads Health Score 0-100 -> Prioritized action plan
@@ -10,6 +10,8 @@ compact_context:
10
10
  - "CRITICAL: Severity scale: BLOCKER | MAJOR | MINOR | ENHANCEMENT"
11
11
  - "MUST parse arguments: TARGET (file/dir), DEPTH (quick/deep/ultradeep), FOCUS (semantic|aria|visual|keyboard|forms|all)"
12
12
  - "Pass consensus all analyzer outputs, let it synthesize the final report"
13
+ - "DEPTH GATE: ultradeep/extreme MUST spawn tmux sessions via spawn-audit-sessions.js — NEVER deploy in-process"
14
+ - "Use check-sessions.js to monitor spawned tmux sessions — NEVER write custom polling scripts"
13
15
  state_fields:
14
16
  - target_path
15
17
  - depth
@@ -70,7 +72,7 @@ Deploy multiple specialized WCAG accessibility analyzers in parallel to find a11
70
72
  | Argument | Values | Default | Description |
71
73
  |----------|--------|---------|-------------|
72
74
  | TARGET | file/directory | `.` | What to analyze |
73
- | DEPTH | quick, deep, ultradeep | quick | quick = focus on BLOCKER/MAJOR, deep = all severities, ultradeep = separate tmux |
75
+ | DEPTH | quick, deep, ultradeep, extreme | quick | quick = focus on BLOCKER/MAJOR, deep = all severities, ultradeep = separate tmux, extreme = partitioned tmux |
74
76
  | FOCUS | semantic,aria,visual,keyboard,forms,all | all | Which analyzers to deploy |
75
77
  | MODEL | haiku, sonnet, opus | haiku | Model for analyzer subagents |
76
78
 
@@ -116,12 +118,12 @@ FOCUS = all (default) or comma-separated list
116
118
  Parse the JSON output to get `traceId`. Example: `{"ok":true,"traceId":"abc123ef",...}`
117
119
  4. Wait for all analyzers to complete:
118
120
  ```bash
119
- node .agileflow/scripts/lib/tmux-audit-monitor.js wait TRACE_ID --timeout=1800
121
+ node .agileflow/scripts/check-sessions.js wait TRACE_ID --timeout=1800
120
122
  ```
121
123
  - Exit 0 = all complete (JSON results on stdout)
122
124
  - Exit 1 = timeout (partial results on stdout, `missing` array shows what's left)
123
- - To check progress without blocking: `node .agileflow/scripts/lib/tmux-audit-monitor.js status TRACE_ID`
124
- - To retry stalled analyzers: `node .agileflow/scripts/lib/tmux-audit-monitor.js retry TRACE_ID`
125
+ - To check progress without blocking: `node .agileflow/scripts/check-sessions.js status TRACE_ID`
126
+ - To retry stalled analyzers: `node .agileflow/scripts/check-sessions.js retry TRACE_ID`
125
127
  5. Parse `results` array from the JSON output. Pass all findings to consensus coordinator (same as deep mode).
126
128
  6. If tmux unavailable (spawn exits code 2), fall back to `DEPTH=deep` with warning
127
129
 
@@ -138,7 +140,7 @@ Partition-based multi-agent audit. Instead of 1 analyzer per tmux window, the co
138
140
  ```bash
139
141
  node .agileflow/scripts/spawn-audit-sessions.js --audit=accessibility --target=TARGET --depth=extreme --partitions=dir1,dir2,dir3 --model=MODEL --json
140
142
  ```
141
- 4. Wait and collect results (same as ultradeep - use tmux-audit-monitor.js)
143
+ 4. Wait and collect results (same as ultradeep - use check-sessions.js)
142
144
  5. Run consensus on combined results from all partitions
143
145
 
144
146
  **PARTITIONS argument** (only used with DEPTH=extreme):
@@ -148,7 +150,21 @@ Partition-based multi-agent audit. Instead of 1 analyzer per tmux window, the co
148
150
  | `PARTITIONS=5` | AI creates exactly 5 partitions |
149
151
  | `PARTITIONS=src/auth,src/api,lib` | Use these exact directories |
150
152
 
151
- ### STEP 2: Deploy Analyzers in Parallel
153
+ ---
154
+
155
+ ### DEPTH ROUTING GATE
156
+
157
+ | DEPTH | Route |
158
+ |-------|-------|
159
+ | `quick` or `deep` | Continue to STEP 2 below |
160
+ | `ultradeep` | STOP. Follow ULTRADEEP instructions above. Do NOT proceed to STEP 2. |
161
+ | `extreme` | STOP. Follow EXTREME instructions above. Do NOT proceed to STEP 2. |
162
+
163
+ **CRITICAL**: STEP 2 is for `quick`/`deep` ONLY. For `ultradeep`/`extreme`, the analyzers run in separate tmux sessions — NOT in-process via Task calls. If you deploy Task calls for ultradeep/extreme, you are doing it wrong. Follow the spawn-audit-sessions.js workflow above, then skip to the consensus step with the collected results.
164
+
165
+ ---
166
+
167
+ ### STEP 2: Deploy Analyzers in Parallel (quick/deep ONLY)
152
168
 
153
169
  **CRITICAL**: Deploy ALL selected analyzers in a SINGLE message with multiple Task calls.
154
170
 
@@ -10,6 +10,8 @@ compact_context:
10
10
  - "CRITICAL: Severity scale: BREAKING | INCONSISTENT | GAP | POLISH"
11
11
  - "MUST parse arguments: TARGET (file/dir), DEPTH (quick/deep/ultradeep), FOCUS (conventions|errors|versioning|pagination|docs|all)"
12
12
  - "Pass consensus all analyzer outputs, let it synthesize the final report"
13
+ - "DEPTH GATE: ultradeep/extreme MUST spawn tmux sessions via spawn-audit-sessions.js — NEVER deploy in-process"
14
+ - "Use check-sessions.js to monitor spawned tmux sessions — NEVER write custom polling scripts"
13
15
  state_fields:
14
16
  - target_path
15
17
  - depth
@@ -70,7 +72,7 @@ Deploy multiple specialized API quality analyzers in parallel to assess API desi
70
72
  | Argument | Values | Default | Description |
71
73
  |----------|--------|---------|-------------|
72
74
  | TARGET | file/directory | `.` | What to analyze |
73
- | DEPTH | quick, deep, ultradeep | quick | quick = BREAKING/INCONSISTENT only, deep = all severities, ultradeep = separate tmux |
75
+ | DEPTH | quick, deep, ultradeep, extreme | quick | quick = BREAKING/INCONSISTENT only, deep = all severities, ultradeep = separate tmux, extreme = partitioned tmux |
74
76
  | FOCUS | conventions,errors,versioning,pagination,docs,all | all | Which analyzers to deploy |
75
77
  | MODEL | haiku, sonnet, opus | haiku | Model for analyzer subagents |
76
78
 
@@ -115,12 +117,12 @@ FOCUS = all (default) or comma-separated list
115
117
  Parse the JSON output to get `traceId`. Example: `{"ok":true,"traceId":"abc123ef",...}`
116
118
  4. Wait for all analyzers to complete:
117
119
  ```bash
118
- node .agileflow/scripts/lib/tmux-audit-monitor.js wait TRACE_ID --timeout=1800
120
+ node .agileflow/scripts/check-sessions.js wait TRACE_ID --timeout=1800
119
121
  ```
120
122
  - Exit 0 = all complete (JSON results on stdout)
121
123
  - Exit 1 = timeout (partial results on stdout, `missing` array shows what's left)
122
- - To check progress without blocking: `node .agileflow/scripts/lib/tmux-audit-monitor.js status TRACE_ID`
123
- - To retry stalled analyzers: `node .agileflow/scripts/lib/tmux-audit-monitor.js retry TRACE_ID`
124
+ - To check progress without blocking: `node .agileflow/scripts/check-sessions.js status TRACE_ID`
125
+ - To retry stalled analyzers: `node .agileflow/scripts/check-sessions.js retry TRACE_ID`
124
126
  5. Parse `results` array from the JSON output. Pass all findings to consensus coordinator (same as deep mode).
125
127
  6. If tmux unavailable (spawn exits code 2), fall back to `DEPTH=deep` with warning
126
128
 
@@ -137,7 +139,7 @@ Partition-based multi-agent audit. Instead of 1 analyzer per tmux window, the co
137
139
  ```bash
138
140
  node .agileflow/scripts/spawn-audit-sessions.js --audit=api --target=TARGET --depth=extreme --partitions=dir1,dir2,dir3 --model=MODEL --json
139
141
  ```
140
- 4. Wait and collect results (same as ultradeep - use tmux-audit-monitor.js)
142
+ 4. Wait and collect results (same as ultradeep - use check-sessions.js)
141
143
  5. Run consensus on combined results from all partitions
142
144
 
143
145
  **PARTITIONS argument** (only used with DEPTH=extreme):
@@ -147,7 +149,21 @@ Partition-based multi-agent audit. Instead of 1 analyzer per tmux window, the co
147
149
  | `PARTITIONS=5` | AI creates exactly 5 partitions |
148
150
  | `PARTITIONS=src/auth,src/api,lib` | Use these exact directories |
149
151
 
150
- ### STEP 2: Deploy Analyzers in Parallel
152
+ ---
153
+
154
+ ### DEPTH ROUTING GATE
155
+
156
+ | DEPTH | Route |
157
+ |-------|-------|
158
+ | `quick` or `deep` | Continue to STEP 2 below |
159
+ | `ultradeep` | STOP. Follow ULTRADEEP instructions above. Do NOT proceed to STEP 2. |
160
+ | `extreme` | STOP. Follow EXTREME instructions above. Do NOT proceed to STEP 2. |
161
+
162
+ **CRITICAL**: STEP 2 is for `quick`/`deep` ONLY. For `ultradeep`/`extreme`, the analyzers run in separate tmux sessions — NOT in-process via Task calls. If you deploy Task calls for ultradeep/extreme, you are doing it wrong. Follow the spawn-audit-sessions.js workflow above, then skip to the consensus step with the collected results.
163
+
164
+ ---
165
+
166
+ ### STEP 2: Deploy Analyzers in Parallel (quick/deep ONLY)
151
167
 
152
168
  **CRITICAL**: Deploy ALL selected analyzers in a SINGLE message with multiple Task calls.
153
169
 
@@ -10,6 +10,8 @@ compact_context:
10
10
  - "CRITICAL: Severity scale: STRUCTURAL | DEGRADED | SMELL | STYLE"
11
11
  - "MUST parse arguments: TARGET (file/dir), DEPTH (quick/deep/ultradeep), FOCUS (coupling|layering|complexity|patterns|circular|all)"
12
12
  - "Pass consensus all analyzer outputs, let it synthesize the final report"
13
+ - "DEPTH GATE: ultradeep/extreme MUST spawn tmux sessions via spawn-audit-sessions.js — NEVER deploy in-process"
14
+ - "Use check-sessions.js to monitor spawned tmux sessions — NEVER write custom polling scripts"
13
15
  state_fields:
14
16
  - target_path
15
17
  - depth
@@ -70,7 +72,7 @@ Deploy multiple specialized architecture analyzers in parallel to assess structu
70
72
  | Argument | Values | Default | Description |
71
73
  |----------|--------|---------|-------------|
72
74
  | TARGET | file/directory | `.` | What to analyze |
73
- | DEPTH | quick, deep, ultradeep | quick | quick = STRUCTURAL/DEGRADED only, deep = all severities, ultradeep = separate tmux |
75
+ | DEPTH | quick, deep, ultradeep, extreme | quick | quick = STRUCTURAL/DEGRADED only, deep = all severities, ultradeep = separate tmux, extreme = partitioned tmux |
74
76
  | FOCUS | coupling,layering,complexity,patterns,circular,all | all | Which analyzers to deploy |
75
77
  | MODEL | haiku, sonnet, opus | haiku | Model for analyzer subagents |
76
78
 
@@ -115,12 +117,12 @@ FOCUS = all (default) or comma-separated list
115
117
  Parse the JSON output to get `traceId`. Example: `{"ok":true,"traceId":"abc123ef",...}`
116
118
  4. Wait for all analyzers to complete:
117
119
  ```bash
118
- node .agileflow/scripts/lib/tmux-audit-monitor.js wait TRACE_ID --timeout=1800
120
+ node .agileflow/scripts/check-sessions.js wait TRACE_ID --timeout=1800
119
121
  ```
120
122
  - Exit 0 = all complete (JSON results on stdout)
121
123
  - Exit 1 = timeout (partial results on stdout, `missing` array shows what's left)
122
- - To check progress without blocking: `node .agileflow/scripts/lib/tmux-audit-monitor.js status TRACE_ID`
123
- - To retry stalled analyzers: `node .agileflow/scripts/lib/tmux-audit-monitor.js retry TRACE_ID`
124
+ - To check progress without blocking: `node .agileflow/scripts/check-sessions.js status TRACE_ID`
125
+ - To retry stalled analyzers: `node .agileflow/scripts/check-sessions.js retry TRACE_ID`
124
126
  5. Parse `results` array from the JSON output. Pass all findings to consensus coordinator (same as deep mode).
125
127
  6. If tmux unavailable (spawn exits code 2), fall back to `DEPTH=deep` with warning
126
128
 
@@ -137,7 +139,7 @@ Partition-based multi-agent audit. Instead of 1 analyzer per tmux window, the co
137
139
  ```bash
138
140
  node .agileflow/scripts/spawn-audit-sessions.js --audit=architecture --target=TARGET --depth=extreme --partitions=dir1,dir2,dir3 --model=MODEL --json
139
141
  ```
140
- 4. Wait and collect results (same as ultradeep - use tmux-audit-monitor.js)
142
+ 4. Wait and collect results (same as ultradeep - use check-sessions.js)
141
143
  5. Run consensus on combined results from all partitions
142
144
 
143
145
  **PARTITIONS argument** (only used with DEPTH=extreme):
@@ -147,7 +149,21 @@ Partition-based multi-agent audit. Instead of 1 analyzer per tmux window, the co
147
149
  | `PARTITIONS=5` | AI creates exactly 5 partitions |
148
150
  | `PARTITIONS=src/auth,src/api,lib` | Use these exact directories |
149
151
 
150
- ### STEP 2: Deploy Analyzers in Parallel
152
+ ---
153
+
154
+ ### DEPTH ROUTING GATE
155
+
156
+ | DEPTH | Route |
157
+ |-------|-------|
158
+ | `quick` or `deep` | Continue to STEP 2 below |
159
+ | `ultradeep` | STOP. Follow ULTRADEEP instructions above. Do NOT proceed to STEP 2. |
160
+ | `extreme` | STOP. Follow EXTREME instructions above. Do NOT proceed to STEP 2. |
161
+
162
+ **CRITICAL**: STEP 2 is for `quick`/`deep` ONLY. For `ultradeep`/`extreme`, the analyzers run in separate tmux sessions — NOT in-process via Task calls. If you deploy Task calls for ultradeep/extreme, you are doing it wrong. Follow the spawn-audit-sessions.js workflow above, then skip to the consensus step with the collected results.
163
+
164
+ ---
165
+
166
+ ### STEP 2: Deploy Analyzers in Parallel (quick/deep ONLY)
151
167
 
152
168
  **CRITICAL**: Deploy ALL selected analyzers in a SINGLE message with multiple Task calls.
153
169