@wazir-dev/cli 1.2.0 → 1.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +54 -44
- package/README.md +13 -13
- package/assets/demo.cast +47 -0
- package/assets/demo.gif +0 -0
- package/docs/anti-patterns/AP-23-skipping-enabled-workflows.md +28 -0
- package/docs/anti-patterns/AP-24-clarifier-deciding-scope.md +34 -0
- package/docs/concepts/architecture.md +1 -1
- package/docs/concepts/why-wazir.md +1 -1
- package/docs/readmes/INDEX.md +1 -1
- package/docs/readmes/features/expertise/README.md +1 -1
- package/docs/readmes/features/hooks/pre-compact-summary.md +1 -1
- package/docs/reference/hooks.md +1 -0
- package/docs/reference/launch-checklist.md +3 -3
- package/docs/reference/review-loop-pattern.md +3 -2
- package/docs/reference/skill-tiers.md +2 -2
- package/docs/research/2026-03-20-agents/a18fb002157904af5.txt +187 -0
- package/docs/research/2026-03-20-agents/a1d0ac79ac2f11e6f.txt +2 -0
- package/docs/research/2026-03-20-agents/a324079de037abd7c.txt +198 -0
- package/docs/research/2026-03-20-agents/a357586bccfafb0e5.txt +256 -0
- package/docs/research/2026-03-20-agents/a4365394e4d753105.txt +137 -0
- package/docs/research/2026-03-20-agents/a492af28bc52d3613.txt +136 -0
- package/docs/research/2026-03-20-agents/a4984db0b6a8eee07.txt +124 -0
- package/docs/research/2026-03-20-agents/a5b30e59d34bbb062.txt +214 -0
- package/docs/research/2026-03-20-agents/a5cf7829dab911586.txt +165 -0
- package/docs/research/2026-03-20-agents/a607157c30dd97c9e.txt +96 -0
- package/docs/research/2026-03-20-agents/a60b68b1e19d1e16b.txt +115 -0
- package/docs/research/2026-03-20-agents/a722af01c5594aba0.txt +166 -0
- package/docs/research/2026-03-20-agents/a787bdc516faa5829.txt +181 -0
- package/docs/research/2026-03-20-agents/a7c46d1bba1056ed2.txt +132 -0
- package/docs/research/2026-03-20-agents/a7e5abbab2b281a0d.txt +100 -0
- package/docs/research/2026-03-20-agents/a8dbadc66cd0d7d5a.txt +95 -0
- package/docs/research/2026-03-20-agents/a904d9f45d6b86a6d.txt +75 -0
- package/docs/research/2026-03-20-agents/a927659a942ee7f60.txt +102 -0
- package/docs/research/2026-03-20-agents/a962cb569191f7583.txt +125 -0
- package/docs/research/2026-03-20-agents/aab6decea538aac41.txt +148 -0
- package/docs/research/2026-03-20-agents/abd58b853dd938a1b.txt +295 -0
- package/docs/research/2026-03-20-agents/ac009da573eff7f65.txt +100 -0
- package/docs/research/2026-03-20-agents/ac1bc783364405e5f.txt +190 -0
- package/docs/research/2026-03-20-agents/aca5e2b57fde152a0.txt +132 -0
- package/docs/research/2026-03-20-agents/ad849b8c0a7e95b8b.txt +176 -0
- package/docs/research/2026-03-20-agents/adc2b12a4da32c962.txt +258 -0
- package/docs/research/2026-03-20-agents/af97caaaa9a80e4cb.txt +146 -0
- package/docs/research/2026-03-20-agents/afc5faceee368b3ca.txt +111 -0
- package/docs/research/2026-03-20-agents/afdb282d866e3c1e4.txt +164 -0
- package/docs/research/2026-03-20-agents/afe9d1f61c02b1e8d.txt +299 -0
- package/docs/research/2026-03-20-agents/b4hmkwril.txt +1856 -0
- package/docs/research/2026-03-20-agents/b80ptk89g.txt +1856 -0
- package/docs/research/2026-03-20-agents/bf54s1jss.txt +1150 -0
- package/docs/research/2026-03-20-agents/bhd6kq2kx.txt +1856 -0
- package/docs/research/2026-03-20-agents/bmb2fodyr.txt +988 -0
- package/docs/research/2026-03-20-agents/bmmsrij8i.txt +826 -0
- package/docs/research/2026-03-20-agents/bn4t2ywpu.txt +2175 -0
- package/docs/research/2026-03-20-agents/bu22t9f1z.txt +0 -0
- package/docs/research/2026-03-20-agents/bwvl98v2p.txt +738 -0
- package/docs/research/2026-03-20-agents/psych-a3697a7fd06eb64fd.txt +135 -0
- package/docs/research/2026-03-20-agents/psych-a37776fabc870feae.txt +123 -0
- package/docs/research/2026-03-20-agents/psych-a5b1fe05c0589efaf.txt +2 -0
- package/docs/research/2026-03-20-agents/psych-a95c15b1f29424435.txt +76 -0
- package/docs/research/2026-03-20-agents/psych-a9c26f4d9172dde7c.txt +2 -0
- package/docs/research/2026-03-20-agents/psych-aa19c69f0ca2c5ad3.txt +2 -0
- package/docs/research/2026-03-20-agents/psych-aa4e4cb70e1be5ecb.txt +95 -0
- package/docs/research/2026-03-20-agents/psych-ab5b302f26a554663.txt +102 -0
- package/docs/research/2026-03-20-deep-research-complete.md +101 -0
- package/docs/research/2026-03-20-deep-research-status.md +38 -0
- package/docs/research/2026-03-20-enforcement-research.md +107 -0
- package/expertise/antipatterns/process/ai-coding-antipatterns.md +117 -0
- package/expertise/composition-map.yaml +27 -8
- package/expertise/digests/reviewer/ai-coding-digest.md +83 -0
- package/expertise/digests/reviewer/architectural-thinking-digest.md +63 -0
- package/expertise/digests/reviewer/architecture-antipatterns-digest.md +49 -0
- package/expertise/digests/reviewer/code-smells-digest.md +53 -0
- package/expertise/digests/reviewer/coupling-cohesion-digest.md +54 -0
- package/expertise/digests/reviewer/ddd-digest.md +60 -0
- package/expertise/digests/reviewer/dependency-risk-digest.md +40 -0
- package/expertise/digests/reviewer/error-handling-digest.md +55 -0
- package/expertise/digests/reviewer/review-methodology-digest.md +49 -0
- package/exports/hosts/claude/.claude/commands/learn.md +61 -8
- package/exports/hosts/claude/.claude/commands/plan-review.md +3 -1
- package/exports/hosts/claude/.claude/commands/verify.md +30 -1
- package/exports/hosts/claude/.claude/settings.json +7 -6
- package/exports/hosts/claude/export.manifest.json +8 -5
- package/exports/hosts/claude/host-package.json +3 -0
- package/exports/hosts/codex/export.manifest.json +8 -5
- package/exports/hosts/codex/host-package.json +3 -0
- package/exports/hosts/cursor/.cursor/hooks.json +6 -6
- package/exports/hosts/cursor/export.manifest.json +8 -5
- package/exports/hosts/cursor/host-package.json +3 -0
- package/exports/hosts/gemini/export.manifest.json +8 -5
- package/exports/hosts/gemini/host-package.json +3 -0
- package/hooks/definitions/pretooluse_dispatcher.yaml +26 -0
- package/hooks/definitions/pretooluse_pipeline_guard.yaml +22 -0
- package/hooks/definitions/stop_pipeline_gate.yaml +22 -0
- package/hooks/hooks.json +7 -6
- package/hooks/pretooluse-dispatcher +84 -0
- package/hooks/pretooluse-pipeline-guard +9 -0
- package/hooks/stop-pipeline-gate +9 -0
- package/llms-full.txt +48 -18
- package/package.json +2 -3
- package/schemas/decision.schema.json +15 -0
- package/schemas/hook.schema.json +4 -1
- package/schemas/phase-report.schema.json +9 -0
- package/skills/TEMPLATE-3-ZONE.md +160 -0
- package/skills/brainstorming/SKILL.md +137 -21
- package/skills/clarifier/SKILL.md +364 -53
- package/skills/claude-cli/SKILL.md +91 -12
- package/skills/codex-cli/SKILL.md +91 -12
- package/skills/debugging/SKILL.md +133 -38
- package/skills/design/SKILL.md +173 -37
- package/skills/dispatching-parallel-agents/SKILL.md +129 -31
- package/skills/executing-plans/SKILL.md +113 -25
- package/skills/executor/SKILL.md +252 -21
- package/skills/finishing-a-development-branch/SKILL.md +107 -18
- package/skills/gemini-cli/SKILL.md +91 -12
- package/skills/humanize/SKILL.md +92 -13
- package/skills/init-pipeline/SKILL.md +90 -18
- package/skills/prepare-next/SKILL.md +93 -24
- package/skills/receiving-code-review/SKILL.md +90 -16
- package/skills/requesting-code-review/SKILL.md +100 -24
- package/skills/requesting-code-review/code-reviewer.md +29 -17
- package/skills/reviewer/SKILL.md +270 -57
- package/skills/run-audit/SKILL.md +92 -15
- package/skills/scan-project/SKILL.md +93 -14
- package/skills/self-audit/SKILL.md +133 -39
- package/skills/skill-research/SKILL.md +275 -0
- package/skills/subagent-driven-development/SKILL.md +129 -30
- package/skills/subagent-driven-development/code-quality-reviewer-prompt.md +30 -2
- package/skills/subagent-driven-development/implementer-prompt.md +40 -27
- package/skills/subagent-driven-development/spec-reviewer-prompt.md +25 -12
- package/skills/tdd/SKILL.md +125 -20
- package/skills/using-git-worktrees/SKILL.md +118 -28
- package/skills/using-skills/SKILL.md +116 -29
- package/skills/verification/SKILL.md +160 -17
- package/skills/wazir/SKILL.md +750 -120
- package/skills/writing-plans/SKILL.md +134 -28
- package/skills/writing-skills/SKILL.md +91 -13
- package/skills/writing-skills/anthropic-best-practices.md +104 -64
- package/skills/writing-skills/persuasion-principles.md +100 -34
- package/tooling/src/capture/command.js +46 -2
- package/tooling/src/capture/decision.js +40 -0
- package/tooling/src/capture/store.js +33 -0
- package/tooling/src/capture/user-input.js +66 -0
- package/tooling/src/checks/security-sensitivity.js +69 -0
- package/tooling/src/cli.js +28 -26
- package/tooling/src/config/depth-table.js +60 -0
- package/tooling/src/export/compiler.js +7 -8
- package/tooling/src/guards/guardrail-functions.js +131 -0
- package/tooling/src/guards/phase-prerequisite-guard.js +97 -3
- package/tooling/src/hooks/pretooluse-dispatcher.js +300 -0
- package/tooling/src/hooks/pretooluse-pipeline-guard.js +141 -0
- package/tooling/src/hooks/stop-pipeline-gate.js +92 -0
- package/tooling/src/init/auto-detect.js +0 -2
- package/tooling/src/init/command.js +3 -95
- package/tooling/src/learn/pipeline.js +177 -0
- package/tooling/src/state/db.js +251 -2
- package/tooling/src/state/pipeline-state.js +262 -0
- package/tooling/src/status/command.js +6 -1
- package/tooling/src/verify/proof-collector.js +299 -0
- package/wazir.manifest.yaml +3 -0
- package/workflows/learn.md +61 -8
- package/workflows/plan-review.md +3 -1
- package/workflows/verify.md +30 -1
package/tooling/src/state/db.js
CHANGED
|
@@ -11,6 +11,21 @@ function hashDescription(description) {
|
|
|
11
11
|
return crypto.createHash('sha256').update(description).digest('hex');
|
|
12
12
|
}
|
|
13
13
|
|
|
14
|
+
/**
|
|
15
|
+
* Normalize a finding description for clustering.
|
|
16
|
+
* Strips file paths, line numbers, identifiers to produce a canonical pattern.
|
|
17
|
+
*/
|
|
18
|
+
function canonicalizeFindingText(description) {
|
|
19
|
+
return description
|
|
20
|
+
.replace(/[a-zA-Z0-9_\-./]+\.[a-zA-Z]{1,4}(:\d+)?/g, '<FILE>')
|
|
21
|
+
.replace(/line \d+/gi, 'line <N>')
|
|
22
|
+
.replace(/['"`][\w.]+['"`]/g, '<ID>')
|
|
23
|
+
.replace(/[0-9a-f]{7,40}/gi, '<HASH>')
|
|
24
|
+
.replace(/\s+/g, ' ')
|
|
25
|
+
.trim()
|
|
26
|
+
.toLowerCase();
|
|
27
|
+
}
|
|
28
|
+
|
|
14
29
|
function ensureStateSchema(db) {
|
|
15
30
|
db.exec(`
|
|
16
31
|
CREATE TABLE IF NOT EXISTS learnings (
|
|
@@ -67,7 +82,54 @@ function ensureStateSchema(db) {
|
|
|
67
82
|
CREATE INDEX IF NOT EXISTS idx_findings_finding_hash ON findings(finding_hash);
|
|
68
83
|
CREATE INDEX IF NOT EXISTS idx_audit_history_run_id ON audit_history(run_id);
|
|
69
84
|
CREATE INDEX IF NOT EXISTS idx_usage_aggregate_run_id ON usage_aggregate(run_id);
|
|
85
|
+
|
|
86
|
+
CREATE TABLE IF NOT EXISTS finding_clusters (
|
|
87
|
+
id TEXT PRIMARY KEY,
|
|
88
|
+
canonical_hash TEXT NOT NULL,
|
|
89
|
+
category TEXT NOT NULL,
|
|
90
|
+
pattern_description TEXT NOT NULL,
|
|
91
|
+
finding_hashes TEXT NOT NULL DEFAULT '[]',
|
|
92
|
+
run_ids TEXT NOT NULL DEFAULT '[]',
|
|
93
|
+
occurrence_count INTEGER DEFAULT 1,
|
|
94
|
+
distinct_runs INTEGER DEFAULT 1,
|
|
95
|
+
first_seen TEXT NOT NULL DEFAULT (datetime('now')),
|
|
96
|
+
last_seen TEXT NOT NULL DEFAULT (datetime('now')),
|
|
97
|
+
status TEXT NOT NULL DEFAULT 'tally' CHECK(status IN ('tally','candidate','promoted','active','demoted')),
|
|
98
|
+
promoted_at TEXT,
|
|
99
|
+
antipattern_id TEXT
|
|
100
|
+
);
|
|
101
|
+
|
|
102
|
+
CREATE TABLE IF NOT EXISTS antipattern_candidates (
|
|
103
|
+
id TEXT PRIMARY KEY,
|
|
104
|
+
cluster_id TEXT NOT NULL REFERENCES finding_clusters(id),
|
|
105
|
+
title TEXT NOT NULL,
|
|
106
|
+
description TEXT NOT NULL,
|
|
107
|
+
detection_signal TEXT NOT NULL,
|
|
108
|
+
severity TEXT NOT NULL CHECK(severity IN ('critical','high','medium','low')),
|
|
109
|
+
scope_roles TEXT DEFAULT 'reviewer',
|
|
110
|
+
scope_stacks TEXT DEFAULT 'all',
|
|
111
|
+
evidence_runs TEXT NOT NULL DEFAULT '[]',
|
|
112
|
+
evidence_count INTEGER DEFAULT 0,
|
|
113
|
+
status TEXT NOT NULL DEFAULT 'proposed' CHECK(status IN ('proposed','accepted','rejected','expired')),
|
|
114
|
+
proposed_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
115
|
+
reviewed_at TEXT,
|
|
116
|
+
expires_at TEXT
|
|
117
|
+
);
|
|
118
|
+
|
|
119
|
+
CREATE INDEX IF NOT EXISTS idx_finding_clusters_status ON finding_clusters(status);
|
|
120
|
+
CREATE INDEX IF NOT EXISTS idx_finding_clusters_canonical_hash ON finding_clusters(canonical_hash);
|
|
121
|
+
CREATE INDEX IF NOT EXISTS idx_antipattern_candidates_status ON antipattern_candidates(status);
|
|
70
122
|
`);
|
|
123
|
+
|
|
124
|
+
// Safe migration: add category column to findings if it doesn't exist
|
|
125
|
+
try {
|
|
126
|
+
db.exec(`ALTER TABLE findings ADD COLUMN category TEXT DEFAULT ''`);
|
|
127
|
+
} catch (_) {
|
|
128
|
+
// Column already exists — ignore
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
// Index on findings.category (must run after migration adds the column)
|
|
132
|
+
db.exec(`CREATE INDEX IF NOT EXISTS idx_findings_category ON findings(category)`);
|
|
71
133
|
}
|
|
72
134
|
|
|
73
135
|
// ---------------------------------------------------------------------------
|
|
@@ -171,10 +233,11 @@ export function insertFinding(db, record) {
|
|
|
171
233
|
const id = crypto.randomUUID();
|
|
172
234
|
const findingHash = record.finding_hash ?? hashDescription(record.description);
|
|
173
235
|
const createdAt = new Date().toISOString();
|
|
236
|
+
const category = record.category || '';
|
|
174
237
|
|
|
175
238
|
db.prepare(`
|
|
176
|
-
INSERT INTO findings (id, run_id, phase, source, severity, description, finding_hash, created_at)
|
|
177
|
-
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
|
239
|
+
INSERT INTO findings (id, run_id, phase, source, severity, description, finding_hash, category, created_at)
|
|
240
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
178
241
|
`).run(
|
|
179
242
|
id,
|
|
180
243
|
record.run_id,
|
|
@@ -183,9 +246,19 @@ export function insertFinding(db, record) {
|
|
|
183
246
|
record.severity,
|
|
184
247
|
record.description,
|
|
185
248
|
findingHash,
|
|
249
|
+
category,
|
|
186
250
|
createdAt,
|
|
187
251
|
);
|
|
188
252
|
|
|
253
|
+
// Auto-tally: cluster the finding for the learning pipeline
|
|
254
|
+
upsertFindingCluster(db, {
|
|
255
|
+
canonical_hash: hashDescription(canonicalizeFindingText(record.description)),
|
|
256
|
+
category,
|
|
257
|
+
pattern_description: canonicalizeFindingText(record.description),
|
|
258
|
+
finding_hash: findingHash,
|
|
259
|
+
run_id: record.run_id,
|
|
260
|
+
});
|
|
261
|
+
|
|
189
262
|
return id;
|
|
190
263
|
}
|
|
191
264
|
|
|
@@ -273,6 +346,179 @@ export function getUsageSummary(db) {
|
|
|
273
346
|
return row;
|
|
274
347
|
}
|
|
275
348
|
|
|
349
|
+
// ---------------------------------------------------------------------------
|
|
350
|
+
// Finding Clusters (Learning Pipeline)
|
|
351
|
+
// ---------------------------------------------------------------------------
|
|
352
|
+
|
|
353
|
+
export function upsertFindingCluster(db, record) {
|
|
354
|
+
const existing = db.prepare(`
|
|
355
|
+
SELECT * FROM finding_clusters WHERE canonical_hash = ?
|
|
356
|
+
`).get(record.canonical_hash);
|
|
357
|
+
|
|
358
|
+
if (existing) {
|
|
359
|
+
const hashes = JSON.parse(existing.finding_hashes);
|
|
360
|
+
if (!hashes.includes(record.finding_hash)) {
|
|
361
|
+
hashes.push(record.finding_hash);
|
|
362
|
+
}
|
|
363
|
+
// Track distinct runs from the DB row, not the incoming record
|
|
364
|
+
const existingRuns = new Set(JSON.parse(existing.run_ids || '[]'));
|
|
365
|
+
if (record.run_id) existingRuns.add(record.run_id);
|
|
366
|
+
|
|
367
|
+
db.prepare(`
|
|
368
|
+
UPDATE finding_clusters
|
|
369
|
+
SET finding_hashes = ?,
|
|
370
|
+
run_ids = ?,
|
|
371
|
+
occurrence_count = occurrence_count + 1,
|
|
372
|
+
distinct_runs = ?,
|
|
373
|
+
last_seen = datetime('now'),
|
|
374
|
+
category = COALESCE(NULLIF(?, ''), category)
|
|
375
|
+
WHERE id = ?
|
|
376
|
+
`).run(
|
|
377
|
+
JSON.stringify(hashes),
|
|
378
|
+
JSON.stringify([...existingRuns]),
|
|
379
|
+
existingRuns.size,
|
|
380
|
+
record.category || '',
|
|
381
|
+
existing.id,
|
|
382
|
+
);
|
|
383
|
+
|
|
384
|
+
return existing.id;
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
const id = crypto.randomUUID();
|
|
388
|
+
db.prepare(`
|
|
389
|
+
INSERT INTO finding_clusters (id, canonical_hash, category, pattern_description, finding_hashes, run_ids, occurrence_count, distinct_runs)
|
|
390
|
+
VALUES (?, ?, ?, ?, ?, ?, 1, 1)
|
|
391
|
+
`).run(
|
|
392
|
+
id,
|
|
393
|
+
record.canonical_hash,
|
|
394
|
+
record.category || 'uncategorized',
|
|
395
|
+
record.pattern_description,
|
|
396
|
+
JSON.stringify([record.finding_hash]),
|
|
397
|
+
JSON.stringify(record.run_id ? [record.run_id] : []),
|
|
398
|
+
);
|
|
399
|
+
|
|
400
|
+
return id;
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
export function getClustersByStatus(db, status) {
|
|
404
|
+
return db.prepare(`
|
|
405
|
+
SELECT * FROM finding_clusters
|
|
406
|
+
WHERE status = ?
|
|
407
|
+
ORDER BY occurrence_count DESC
|
|
408
|
+
`).all(status);
|
|
409
|
+
}
|
|
410
|
+
|
|
411
|
+
export function getClustersReadyForPromotion(db, minOccurrences = 3, minRuns = 2) {
|
|
412
|
+
return db.prepare(`
|
|
413
|
+
SELECT * FROM finding_clusters
|
|
414
|
+
WHERE status = 'tally'
|
|
415
|
+
AND occurrence_count >= ?
|
|
416
|
+
AND distinct_runs >= ?
|
|
417
|
+
ORDER BY occurrence_count DESC
|
|
418
|
+
`).all(minOccurrences, minRuns);
|
|
419
|
+
}
|
|
420
|
+
|
|
421
|
+
export function promoteClusterToCandidate(db, clusterId) {
|
|
422
|
+
db.prepare(`
|
|
423
|
+
UPDATE finding_clusters
|
|
424
|
+
SET status = 'candidate',
|
|
425
|
+
promoted_at = datetime('now')
|
|
426
|
+
WHERE id = ?
|
|
427
|
+
`).run(clusterId);
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
// ---------------------------------------------------------------------------
|
|
431
|
+
// Antipattern Candidates (Learning Pipeline)
|
|
432
|
+
// ---------------------------------------------------------------------------
|
|
433
|
+
|
|
434
|
+
export function insertAntipatternCandidate(db, record) {
|
|
435
|
+
const id = crypto.randomUUID();
|
|
436
|
+
const expiresAt = new Date(Date.now() + 90 * 24 * 60 * 60 * 1000).toISOString(); // 90-day TTL
|
|
437
|
+
|
|
438
|
+
db.prepare(`
|
|
439
|
+
INSERT INTO antipattern_candidates (id, cluster_id, title, description, detection_signal, severity, scope_roles, scope_stacks, evidence_runs, evidence_count, expires_at)
|
|
440
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
441
|
+
`).run(
|
|
442
|
+
id,
|
|
443
|
+
record.cluster_id,
|
|
444
|
+
record.title,
|
|
445
|
+
record.description,
|
|
446
|
+
record.detection_signal,
|
|
447
|
+
record.severity,
|
|
448
|
+
record.scope_roles || 'reviewer',
|
|
449
|
+
record.scope_stacks || 'all',
|
|
450
|
+
JSON.stringify(Array.isArray(record.evidence_runs) ? record.evidence_runs : []),
|
|
451
|
+
record.evidence_count || 0,
|
|
452
|
+
expiresAt,
|
|
453
|
+
);
|
|
454
|
+
|
|
455
|
+
return id;
|
|
456
|
+
}
|
|
457
|
+
|
|
458
|
+
export function getAntipatternCandidatesByStatus(db, status) {
|
|
459
|
+
return db.prepare(`
|
|
460
|
+
SELECT * FROM antipattern_candidates
|
|
461
|
+
WHERE status = ?
|
|
462
|
+
ORDER BY proposed_at DESC
|
|
463
|
+
`).all(status);
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
export function acceptAntipatternCandidate(db, candidateId) {
|
|
467
|
+
const now = new Date().toISOString();
|
|
468
|
+
db.prepare(`
|
|
469
|
+
UPDATE antipattern_candidates
|
|
470
|
+
SET status = 'accepted',
|
|
471
|
+
reviewed_at = ?
|
|
472
|
+
WHERE id = ?
|
|
473
|
+
`).run(now, candidateId);
|
|
474
|
+
}
|
|
475
|
+
|
|
476
|
+
export function rejectAntipatternCandidate(db, candidateId) {
|
|
477
|
+
const now = new Date().toISOString();
|
|
478
|
+
// Get the cluster_id before updating so we can reset the cluster
|
|
479
|
+
const candidate = db.prepare(`SELECT cluster_id FROM antipattern_candidates WHERE id = ?`).get(candidateId);
|
|
480
|
+
db.prepare(`
|
|
481
|
+
UPDATE antipattern_candidates
|
|
482
|
+
SET status = 'rejected',
|
|
483
|
+
reviewed_at = ?
|
|
484
|
+
WHERE id = ?
|
|
485
|
+
`).run(now, candidateId);
|
|
486
|
+
// Reset cluster back to 'tally' so the pattern can be re-proposed if it keeps recurring
|
|
487
|
+
if (candidate) {
|
|
488
|
+
db.prepare(`UPDATE finding_clusters SET status = 'tally' WHERE id = ?`).run(candidate.cluster_id);
|
|
489
|
+
}
|
|
490
|
+
}
|
|
491
|
+
|
|
492
|
+
export function expireStaleAntipatternCandidates(db) {
|
|
493
|
+
const now = new Date().toISOString();
|
|
494
|
+
// Get cluster IDs for candidates about to expire so we can reset them
|
|
495
|
+
const expiring = db.prepare(`
|
|
496
|
+
SELECT cluster_id FROM antipattern_candidates
|
|
497
|
+
WHERE status = 'proposed' AND expires_at < ?
|
|
498
|
+
`).all(now);
|
|
499
|
+
|
|
500
|
+
const result = db.prepare(`
|
|
501
|
+
UPDATE antipattern_candidates
|
|
502
|
+
SET status = 'expired'
|
|
503
|
+
WHERE status = 'proposed'
|
|
504
|
+
AND expires_at < ?
|
|
505
|
+
`).run(now);
|
|
506
|
+
|
|
507
|
+
// Reset clusters back to 'tally' so patterns can be re-proposed
|
|
508
|
+
for (const { cluster_id } of expiring) {
|
|
509
|
+
db.prepare(`UPDATE finding_clusters SET status = 'tally' WHERE id = ?`).run(cluster_id);
|
|
510
|
+
}
|
|
511
|
+
|
|
512
|
+
return result;
|
|
513
|
+
}
|
|
514
|
+
|
|
515
|
+
export function getActiveLearningsCount(db) {
|
|
516
|
+
return db.prepare(`
|
|
517
|
+
SELECT COUNT(*) AS count FROM antipattern_candidates
|
|
518
|
+
WHERE status = 'accepted'
|
|
519
|
+
`).get().count;
|
|
520
|
+
}
|
|
521
|
+
|
|
276
522
|
// ---------------------------------------------------------------------------
|
|
277
523
|
// Stats (for CLI)
|
|
278
524
|
// ---------------------------------------------------------------------------
|
|
@@ -283,5 +529,8 @@ export function getStateCounts(db) {
|
|
|
283
529
|
finding_count: db.prepare('SELECT COUNT(*) AS count FROM findings').get().count,
|
|
284
530
|
audit_count: db.prepare('SELECT COUNT(*) AS count FROM audit_history').get().count,
|
|
285
531
|
usage_count: db.prepare('SELECT COUNT(*) AS count FROM usage_aggregate').get().count,
|
|
532
|
+
cluster_count: db.prepare('SELECT COUNT(*) AS count FROM finding_clusters').get().count,
|
|
533
|
+
candidate_count: db.prepare('SELECT COUNT(*) AS count FROM antipattern_candidates WHERE status = ?').get('proposed').count,
|
|
534
|
+
active_antipattern_count: db.prepare('SELECT COUNT(*) AS count FROM antipattern_candidates WHERE status = ?').get('accepted').count,
|
|
286
535
|
};
|
|
287
536
|
}
|
|
@@ -0,0 +1,262 @@
|
|
|
1
|
+
import crypto from 'node:crypto';
|
|
2
|
+
import fs from 'node:fs';
|
|
3
|
+
import path from 'node:path';
|
|
4
|
+
|
|
5
|
+
export const PHASE_ORDER = ['init', 'clarify', 'execute', 'verify', 'review', 'complete'];
|
|
6
|
+
|
|
7
|
+
const STATE_FILE = 'pipeline-state.json';
|
|
8
|
+
|
|
9
|
+
// ---------------------------------------------------------------------------
|
|
10
|
+
// Atomic file write — temp + rename to prevent corruption
|
|
11
|
+
// ---------------------------------------------------------------------------
|
|
12
|
+
|
|
13
|
+
function atomicWriteJson(filePath, data) {
|
|
14
|
+
const dir = path.dirname(filePath);
|
|
15
|
+
fs.mkdirSync(dir, { recursive: true });
|
|
16
|
+
const tmpPath = `${filePath}.${process.pid}.tmp`;
|
|
17
|
+
fs.writeFileSync(tmpPath, JSON.stringify(data, null, 2) + '\n', 'utf8');
|
|
18
|
+
fs.renameSync(tmpPath, filePath);
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
function statePath(stateRoot) {
|
|
22
|
+
return path.join(stateRoot, STATE_FILE);
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
// ---------------------------------------------------------------------------
|
|
26
|
+
// Read / Create
|
|
27
|
+
// ---------------------------------------------------------------------------
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* Read the current pipeline state. Returns null if no state file exists.
|
|
31
|
+
*/
|
|
32
|
+
export function readPipelineState(stateRoot) {
|
|
33
|
+
const fp = statePath(stateRoot);
|
|
34
|
+
if (!fs.existsSync(fp)) return null;
|
|
35
|
+
try {
|
|
36
|
+
return JSON.parse(fs.readFileSync(fp, 'utf8'));
|
|
37
|
+
} catch {
|
|
38
|
+
return null;
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
/**
|
|
43
|
+
* Create a fresh pipeline state for a new run.
|
|
44
|
+
*/
|
|
45
|
+
export function createPipelineState(runId, stateRoot) {
|
|
46
|
+
const state = {
|
|
47
|
+
run_id: runId,
|
|
48
|
+
current_phase: 'init',
|
|
49
|
+
phase_history: [],
|
|
50
|
+
allowed_transitions: ['clarify'],
|
|
51
|
+
stop_hook_active: false,
|
|
52
|
+
artifacts: {},
|
|
53
|
+
guardrail_results: {},
|
|
54
|
+
session_id: crypto.randomUUID(),
|
|
55
|
+
updated_at: new Date().toISOString(),
|
|
56
|
+
};
|
|
57
|
+
atomicWriteJson(statePath(stateRoot), state);
|
|
58
|
+
return state;
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
// ---------------------------------------------------------------------------
|
|
62
|
+
// Transitions
|
|
63
|
+
// ---------------------------------------------------------------------------
|
|
64
|
+
|
|
65
|
+
/**
|
|
66
|
+
* Check whether a transition from currentPhase to nextPhase is valid.
|
|
67
|
+
* Only forward, sequential transitions are allowed.
|
|
68
|
+
*/
|
|
69
|
+
export function isTransitionAllowed(currentPhase, nextPhase) {
|
|
70
|
+
const currentIdx = PHASE_ORDER.indexOf(currentPhase);
|
|
71
|
+
const nextIdx = PHASE_ORDER.indexOf(nextPhase);
|
|
72
|
+
if (currentIdx === -1 || nextIdx === -1) return false;
|
|
73
|
+
return nextIdx === currentIdx + 1;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
/**
|
|
77
|
+
* Transition to the next phase. Validates the transition is legal.
|
|
78
|
+
* Throws on invalid transition or missing state.
|
|
79
|
+
*/
|
|
80
|
+
export function transitionPhase(stateRoot, nextPhase) {
|
|
81
|
+
const state = readPipelineState(stateRoot);
|
|
82
|
+
if (!state) {
|
|
83
|
+
throw new Error('No pipeline state found. Call createPipelineState first.');
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
if (!isTransitionAllowed(state.current_phase, nextPhase)) {
|
|
87
|
+
throw new Error(
|
|
88
|
+
`Invalid transition: ${state.current_phase} → ${nextPhase}. ` +
|
|
89
|
+
`Allowed: ${state.current_phase} → ${PHASE_ORDER[PHASE_ORDER.indexOf(state.current_phase) + 1] ?? 'none'}`,
|
|
90
|
+
);
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
const now = new Date().toISOString();
|
|
94
|
+
|
|
95
|
+
// Record the outgoing phase in history
|
|
96
|
+
state.phase_history.push({
|
|
97
|
+
phase: state.current_phase,
|
|
98
|
+
entered_at: state.phase_entered_at ?? state.updated_at,
|
|
99
|
+
exited_at: now,
|
|
100
|
+
status: 'completed',
|
|
101
|
+
});
|
|
102
|
+
|
|
103
|
+
// Move to new phase
|
|
104
|
+
state.current_phase = nextPhase;
|
|
105
|
+
state.phase_entered_at = now;
|
|
106
|
+
|
|
107
|
+
// Compute next allowed transition
|
|
108
|
+
const nextIdx = PHASE_ORDER.indexOf(nextPhase);
|
|
109
|
+
state.allowed_transitions = nextIdx < PHASE_ORDER.length - 1
|
|
110
|
+
? [PHASE_ORDER[nextIdx + 1]]
|
|
111
|
+
: [];
|
|
112
|
+
|
|
113
|
+
state.updated_at = now;
|
|
114
|
+
atomicWriteJson(statePath(stateRoot), state);
|
|
115
|
+
return state;
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
// ---------------------------------------------------------------------------
|
|
119
|
+
// Phase completion (artifact recording)
|
|
120
|
+
// ---------------------------------------------------------------------------
|
|
121
|
+
|
|
122
|
+
/**
|
|
123
|
+
* Mark the current phase as having produced artifacts.
|
|
124
|
+
* artifacts: { name: { path: string } }
|
|
125
|
+
*/
|
|
126
|
+
export function completePhase(stateRoot, phase, artifacts = {}) {
|
|
127
|
+
const state = readPipelineState(stateRoot);
|
|
128
|
+
if (!state) {
|
|
129
|
+
throw new Error('No pipeline state found.');
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
const now = new Date().toISOString();
|
|
133
|
+
|
|
134
|
+
for (const [name, meta] of Object.entries(artifacts)) {
|
|
135
|
+
const digest = meta.path ? computeArtifactDigest(meta.path) : null;
|
|
136
|
+
state.artifacts[name] = {
|
|
137
|
+
path: meta.path,
|
|
138
|
+
digest,
|
|
139
|
+
created_at: now,
|
|
140
|
+
};
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
state.guardrail_results[phase] = { passed: true, checked_at: now };
|
|
144
|
+
state.updated_at = now;
|
|
145
|
+
atomicWriteJson(statePath(stateRoot), state);
|
|
146
|
+
return state;
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
// ---------------------------------------------------------------------------
|
|
150
|
+
// Stop hook flag
|
|
151
|
+
// ---------------------------------------------------------------------------
|
|
152
|
+
|
|
153
|
+
/**
|
|
154
|
+
* Set or clear the stop_hook_active flag to prevent infinite loops.
|
|
155
|
+
*/
|
|
156
|
+
export function setStopHookActive(stateRoot, active) {
|
|
157
|
+
const state = readPipelineState(stateRoot);
|
|
158
|
+
if (!state) {
|
|
159
|
+
throw new Error('No pipeline state found.');
|
|
160
|
+
}
|
|
161
|
+
state.stop_hook_active = !!active;
|
|
162
|
+
state.updated_at = new Date().toISOString();
|
|
163
|
+
atomicWriteJson(statePath(stateRoot), state);
|
|
164
|
+
return state;
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
// ---------------------------------------------------------------------------
|
|
168
|
+
// Artifact dependency graph
|
|
169
|
+
// ---------------------------------------------------------------------------
|
|
170
|
+
|
|
171
|
+
/**
|
|
172
|
+
* Canonical artifact dependency graph for the pipeline.
|
|
173
|
+
* Each artifact lists the artifacts it requires as inputs.
|
|
174
|
+
*/
|
|
175
|
+
export const ARTIFACT_DEPENDENCY_GRAPH = {
|
|
176
|
+
'clarification.md': { requires: [] },
|
|
177
|
+
'spec-hardened.md': { requires: ['clarification.md'] },
|
|
178
|
+
'design.md': { requires: ['spec-hardened.md'] },
|
|
179
|
+
'execution-plan.md': { requires: ['design.md'] },
|
|
180
|
+
};
|
|
181
|
+
|
|
182
|
+
/**
|
|
183
|
+
* Store artifact dependencies in pipeline state.
|
|
184
|
+
*/
|
|
185
|
+
export function setArtifactDependencies(stateRoot, depGraph) {
|
|
186
|
+
const state = readPipelineState(stateRoot);
|
|
187
|
+
if (!state) throw new Error('No pipeline state found.');
|
|
188
|
+
state.artifact_dependencies = depGraph;
|
|
189
|
+
state.updated_at = new Date().toISOString();
|
|
190
|
+
atomicWriteJson(statePath(stateRoot), state);
|
|
191
|
+
return state;
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
/**
|
|
195
|
+
* Compute all artifacts downstream of a changed artifact.
|
|
196
|
+
* Walks the dependency graph to find everything that transitively requires
|
|
197
|
+
* the changed artifact.
|
|
198
|
+
*
|
|
199
|
+
* @param {string} changedArtifact — the artifact that was modified
|
|
200
|
+
* @param {object} depGraph — the dependency graph
|
|
201
|
+
* @returns {string[]} downstream artifact names
|
|
202
|
+
*/
|
|
203
|
+
export function computeDownstreamArtifacts(changedArtifact, depGraph) {
|
|
204
|
+
const downstream = [];
|
|
205
|
+
const visited = new Set();
|
|
206
|
+
|
|
207
|
+
function walk(target) {
|
|
208
|
+
for (const [name, meta] of Object.entries(depGraph)) {
|
|
209
|
+
if (visited.has(name)) continue;
|
|
210
|
+
if (meta.requires.includes(target)) {
|
|
211
|
+
visited.add(name);
|
|
212
|
+
downstream.push(name);
|
|
213
|
+
walk(name);
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
walk(changedArtifact);
|
|
219
|
+
return downstream;
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
/**
|
|
223
|
+
* Classify the mutation level of a changed artifact.
|
|
224
|
+
*
|
|
225
|
+
* - L0 (cosmetic): unknown artifact, no graph impact
|
|
226
|
+
* - L1 (local): leaf artifact with no downstream dependents
|
|
227
|
+
* - L2 (structural): mid-graph artifact with some downstream dependents
|
|
228
|
+
* - L3 (fundamental): root artifact — everything downstream is affected
|
|
229
|
+
*
|
|
230
|
+
* @param {string} changedArtifact
|
|
231
|
+
* @param {object} depGraph
|
|
232
|
+
* @returns {'L0'|'L1'|'L2'|'L3'}
|
|
233
|
+
*/
|
|
234
|
+
export function classifyMutation(changedArtifact, depGraph) {
|
|
235
|
+
if (!(changedArtifact in depGraph)) return 'L0';
|
|
236
|
+
|
|
237
|
+
const downstream = computeDownstreamArtifacts(changedArtifact, depGraph);
|
|
238
|
+
const entry = depGraph[changedArtifact];
|
|
239
|
+
|
|
240
|
+
// Root artifact (no requirements) with downstream dependents
|
|
241
|
+
if (entry.requires.length === 0 && downstream.length > 0) return 'L3';
|
|
242
|
+
|
|
243
|
+
// Mid-graph: has downstream dependents
|
|
244
|
+
if (downstream.length > 0) return 'L2';
|
|
245
|
+
|
|
246
|
+
// Leaf: no downstream dependents
|
|
247
|
+
return 'L1';
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
// ---------------------------------------------------------------------------
|
|
251
|
+
// Artifact digest
|
|
252
|
+
// ---------------------------------------------------------------------------
|
|
253
|
+
|
|
254
|
+
/**
|
|
255
|
+
* Compute sha256 digest of a file. Returns null if file doesn't exist.
|
|
256
|
+
*/
|
|
257
|
+
export function computeArtifactDigest(filePath) {
|
|
258
|
+
if (!fs.existsSync(filePath)) return null;
|
|
259
|
+
const content = fs.readFileSync(filePath);
|
|
260
|
+
const hash = crypto.createHash('sha256').update(content).digest('hex');
|
|
261
|
+
return `sha256:${hash}`;
|
|
262
|
+
}
|
|
@@ -54,7 +54,12 @@ function success(payload, options = {}) {
|
|
|
54
54
|
};
|
|
55
55
|
}
|
|
56
56
|
|
|
57
|
-
|
|
57
|
+
const parentPhase = payload.parent_phase ?? payload.phase;
|
|
58
|
+
const workflow = payload.workflow;
|
|
59
|
+
const phaseLabel = workflow
|
|
60
|
+
? `Phase: ${parentPhase} > Workflow: ${workflow}`
|
|
61
|
+
: `Phase: ${parentPhase}`;
|
|
62
|
+
let output = `${payload.run_id} ${phaseLabel} ${payload.status}\n`;
|
|
58
63
|
|
|
59
64
|
if (payload.savings_summary) {
|
|
60
65
|
output += `${payload.savings_summary}\n`;
|