work-kit-cli 0.2.8 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +24 -13
- package/cli/src/commands/bootstrap.test.ts +40 -0
- package/cli/src/commands/bootstrap.ts +77 -13
- package/cli/src/commands/cancel.ts +1 -16
- package/cli/src/commands/complete.ts +92 -98
- package/cli/src/commands/completions.ts +2 -2
- package/cli/src/commands/doctor.ts +1 -1
- package/cli/src/commands/extract.ts +217 -0
- package/cli/src/commands/init.test.ts +50 -0
- package/cli/src/commands/init.ts +70 -35
- package/cli/src/commands/learn.test.ts +217 -0
- package/cli/src/commands/learn.ts +104 -0
- package/cli/src/commands/loopback.ts +8 -11
- package/cli/src/commands/next.ts +93 -60
- package/cli/src/commands/observe.ts +16 -21
- package/cli/src/commands/pause-resume.test.ts +142 -0
- package/cli/src/commands/pause.ts +34 -0
- package/cli/src/commands/report.ts +217 -0
- package/cli/src/commands/resume.ts +126 -0
- package/cli/src/commands/setup.ts +280 -0
- package/cli/src/commands/status.ts +8 -6
- package/cli/src/commands/uninstall.ts +8 -3
- package/cli/src/commands/workflow.ts +43 -33
- package/cli/src/config/agent-map.ts +9 -9
- package/cli/src/config/constants.ts +54 -0
- package/cli/src/config/loopback-routes.ts +13 -13
- package/cli/src/config/model-routing.test.ts +190 -0
- package/cli/src/config/model-routing.ts +208 -0
- package/cli/src/config/project-config.test.ts +127 -0
- package/cli/src/config/project-config.ts +106 -0
- package/cli/src/config/{phases.ts → workflow.ts} +40 -23
- package/cli/src/context/prompt-builder.ts +10 -9
- package/cli/src/index.ts +130 -9
- package/cli/src/observer/data.ts +196 -65
- package/cli/src/observer/renderer.ts +127 -107
- package/cli/src/observer/watcher.ts +28 -16
- package/cli/src/state/helpers.test.ts +28 -28
- package/cli/src/state/helpers.ts +37 -25
- package/cli/src/state/schema.ts +135 -45
- package/cli/src/state/store.ts +127 -7
- package/cli/src/state/validators.test.ts +13 -13
- package/cli/src/state/validators.ts +3 -4
- package/cli/src/utils/colors.ts +2 -0
- package/cli/src/utils/fs.ts +13 -0
- package/cli/src/utils/json.ts +20 -0
- package/cli/src/utils/knowledge.ts +471 -0
- package/cli/src/utils/time.ts +27 -0
- package/cli/src/{engine → workflow}/loopbacks.test.ts +2 -2
- package/cli/src/workflow/loopbacks.ts +42 -0
- package/cli/src/workflow/parallel.ts +64 -0
- package/cli/src/workflow/transitions.test.ts +129 -0
- package/cli/src/{engine → workflow}/transitions.ts +18 -22
- package/package.json +2 -2
- package/skills/auto-kit/SKILL.md +44 -27
- package/skills/cancel-kit/SKILL.md +4 -4
- package/skills/full-kit/SKILL.md +45 -28
- package/skills/pause-kit/SKILL.md +25 -0
- package/skills/resume-kit/SKILL.md +64 -0
- package/skills/wk-bootstrap/SKILL.md +11 -5
- package/skills/wk-build/SKILL.md +12 -11
- package/skills/wk-build/{stages → steps}/commit.md +1 -1
- package/skills/wk-build/{stages → steps}/core.md +3 -3
- package/skills/wk-build/{stages → steps}/integration.md +2 -2
- package/skills/wk-build/{stages → steps}/migration.md +1 -1
- package/skills/wk-build/{stages → steps}/red.md +1 -1
- package/skills/wk-build/{stages → steps}/refactor.md +1 -1
- package/skills/wk-build/{stages → steps}/setup.md +1 -1
- package/skills/wk-build/{stages → steps}/ui.md +1 -1
- package/skills/wk-deploy/SKILL.md +7 -6
- package/skills/wk-deploy/{stages → steps}/merge.md +1 -1
- package/skills/wk-deploy/{stages → steps}/monitor.md +1 -1
- package/skills/wk-deploy/{stages → steps}/remediate.md +1 -1
- package/skills/wk-plan/SKILL.md +15 -14
- package/skills/wk-plan/{stages → steps}/architecture.md +1 -1
- package/skills/wk-plan/{stages → steps}/audit.md +2 -2
- package/skills/wk-plan/{stages → steps}/blueprint.md +2 -2
- package/skills/wk-plan/{stages → steps}/clarify.md +1 -1
- package/skills/wk-plan/{stages → steps}/investigate.md +1 -1
- package/skills/wk-plan/{stages → steps}/scope.md +1 -1
- package/skills/wk-plan/{stages → steps}/sketch.md +1 -1
- package/skills/wk-plan/{stages → steps}/ux-flow.md +1 -1
- package/skills/wk-review/SKILL.md +11 -10
- package/skills/wk-review/{stages → steps}/compliance.md +1 -1
- package/skills/wk-review/{stages → steps}/handoff.md +2 -2
- package/skills/wk-review/{stages → steps}/performance.md +1 -1
- package/skills/wk-review/{stages → steps}/security.md +1 -1
- package/skills/wk-review/{stages → steps}/self-review.md +1 -1
- package/skills/wk-test/SKILL.md +9 -8
- package/skills/wk-test/steps/e2e.md +56 -0
- package/skills/wk-test/{stages → steps}/validate.md +1 -1
- package/skills/wk-test/{stages → steps}/verify.md +1 -1
- package/skills/wk-wrap-up/SKILL.md +19 -5
- package/skills/wk-wrap-up/steps/knowledge.md +76 -0
- package/skills/wk-wrap-up/steps/summary.md +86 -0
- package/cli/src/engine/loopbacks.ts +0 -32
- package/cli/src/engine/parallel.ts +0 -60
- package/cli/src/engine/transitions.test.ts +0 -129
- package/skills/wk-test/stages/e2e.md +0 -53
- /package/cli/src/{engine/phases.ts → workflow/gates.ts} +0 -0
|
@@ -0,0 +1,471 @@
|
|
|
1
|
+
import * as fs from "node:fs";
|
|
2
|
+
import * as path from "node:path";
|
|
3
|
+
import * as crypto from "node:crypto";
|
|
4
|
+
import { KNOWLEDGE_DIR, KNOWLEDGE_LOCK } from "../config/constants.js";
|
|
5
|
+
import { atomicWriteFile } from "./fs.js";
|
|
6
|
+
|
|
7
|
+
// Re-exported so existing call sites that import from here keep working.
|
|
8
|
+
export { KNOWLEDGE_DIR, KNOWLEDGE_LOCK };
|
|
9
|
+
|
|
10
|
+
// ── Constants ───────────────────────────────────────────────────────
|
|
11
|
+
|
|
12
|
+
export const AUTO_BLOCK_START = "<!-- work-kit:auto:start -->";
|
|
13
|
+
export const AUTO_BLOCK_END = "<!-- work-kit:auto:end -->";
|
|
14
|
+
export const MANUAL_HEADER = "## Manual";
|
|
15
|
+
|
|
16
|
+
export const KNOWLEDGE_TYPES = ["lesson", "convention", "risk", "workflow"] as const;
|
|
17
|
+
export type KnowledgeType = (typeof KNOWLEDGE_TYPES)[number];
|
|
18
|
+
|
|
19
|
+
export function isKnowledgeType(value: string): value is KnowledgeType {
|
|
20
|
+
return (KNOWLEDGE_TYPES as readonly string[]).includes(value);
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
const TYPE_TO_FILE: Record<KnowledgeType, string> = {
|
|
24
|
+
lesson: "lessons.md",
|
|
25
|
+
convention: "conventions.md",
|
|
26
|
+
risk: "risks.md",
|
|
27
|
+
workflow: "workflow.md",
|
|
28
|
+
};
|
|
29
|
+
|
|
30
|
+
const FILE_TO_TITLE: Record<string, string> = {
|
|
31
|
+
"lessons.md": "Lessons",
|
|
32
|
+
"conventions.md": "Conventions",
|
|
33
|
+
"risks.md": "Risks",
|
|
34
|
+
"workflow.md": "Workflow Feedback",
|
|
35
|
+
};
|
|
36
|
+
|
|
37
|
+
const FILE_TO_BLURB: Record<string, string> = {
|
|
38
|
+
"lessons.md":
|
|
39
|
+
"Project-specific learnings discovered while doing work in this codebase.",
|
|
40
|
+
"conventions.md":
|
|
41
|
+
"Codified rules this project follows. Once a convention is here, future sessions should respect it.",
|
|
42
|
+
"risks.md":
|
|
43
|
+
"Known fragile or dangerous areas. Touch these with care.",
|
|
44
|
+
"workflow.md":
|
|
45
|
+
"Feedback about the work-kit workflow itself as observed in this project — skill quality, step skips, loopbacks, failure modes. Mined manually to improve work-kit upstream.",
|
|
46
|
+
};
|
|
47
|
+
|
|
48
|
+
// ── Path Resolvers ──────────────────────────────────────────────────
|
|
49
|
+
|
|
50
|
+
export function knowledgeDir(mainRepoRoot: string): string {
|
|
51
|
+
return path.join(mainRepoRoot, KNOWLEDGE_DIR);
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
export function knowledgePath(mainRepoRoot: string, file: string): string {
|
|
55
|
+
return path.join(knowledgeDir(mainRepoRoot), file);
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
export function fileForType(type: KnowledgeType): string {
|
|
59
|
+
return TYPE_TO_FILE[type];
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
// ── Lock ────────────────────────────────────────────────────────────
|
|
63
|
+
|
|
64
|
+
const LOCK_TIMEOUT_MS = 5000;
|
|
65
|
+
const LOCK_POLL_MS = 50;
|
|
66
|
+
|
|
67
|
+
// Reused across sleep calls — Atomics.wait needs an Int32Array view but the
|
|
68
|
+
// contents don't matter, so we allocate it once per process.
|
|
69
|
+
const SLEEP_BUF = new Int32Array(new SharedArrayBuffer(4));
|
|
70
|
+
function sleepSync(ms: number): void {
|
|
71
|
+
Atomics.wait(SLEEP_BUF, 0, 0, ms);
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
/**
|
|
75
|
+
* Polling file-lock around `<knowledge>/.lock`. Uses fs.openSync(... 'wx')
|
|
76
|
+
* for atomic create-or-fail. Held only during the read-modify-write of a
|
|
77
|
+
* single .md file. Two parallel worktrees calling `learn` simultaneously
|
|
78
|
+
* are serialized — both succeed.
|
|
79
|
+
*/
|
|
80
|
+
export function withKnowledgeLock<T>(mainRepoRoot: string, fn: () => T): T {
|
|
81
|
+
const dir = knowledgeDir(mainRepoRoot);
|
|
82
|
+
if (!fs.existsSync(dir)) {
|
|
83
|
+
fs.mkdirSync(dir, { recursive: true });
|
|
84
|
+
}
|
|
85
|
+
const lockPath = path.join(dir, KNOWLEDGE_LOCK);
|
|
86
|
+
const start = Date.now();
|
|
87
|
+
|
|
88
|
+
while (true) {
|
|
89
|
+
try {
|
|
90
|
+
const fd = fs.openSync(lockPath, "wx");
|
|
91
|
+
try {
|
|
92
|
+
return fn();
|
|
93
|
+
} finally {
|
|
94
|
+
try {
|
|
95
|
+
fs.closeSync(fd);
|
|
96
|
+
} catch {
|
|
97
|
+
// ignore
|
|
98
|
+
}
|
|
99
|
+
try {
|
|
100
|
+
fs.unlinkSync(lockPath);
|
|
101
|
+
} catch {
|
|
102
|
+
// ignore
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
} catch (err: any) {
|
|
106
|
+
if (err?.code !== "EEXIST") throw err;
|
|
107
|
+
if (Date.now() - start > LOCK_TIMEOUT_MS) {
|
|
108
|
+
throw new Error(
|
|
109
|
+
`Could not acquire knowledge lock at ${lockPath} within ${LOCK_TIMEOUT_MS}ms. Another work-kit process may be stuck — remove the .lock file if no work-kit process is running.`
|
|
110
|
+
);
|
|
111
|
+
}
|
|
112
|
+
sleepSync(LOCK_POLL_MS);
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
// ── Redaction ───────────────────────────────────────────────────────
|
|
118
|
+
|
|
119
|
+
const SECRET_PATTERNS: { name: string; re: RegExp }[] = [
|
|
120
|
+
{ name: "openai-style", re: /sk-[A-Za-z0-9]{20,}/g },
|
|
121
|
+
{ name: "anthropic", re: /sk-ant-[A-Za-z0-9_-]{20,}/g },
|
|
122
|
+
{ name: "github-pat", re: /github_pat_[A-Za-z0-9_]{82}/g },
|
|
123
|
+
{ name: "github-token", re: /ghp_[A-Za-z0-9]{36}/g },
|
|
124
|
+
{ name: "github-oauth", re: /gho_[A-Za-z0-9]{36}/g },
|
|
125
|
+
{ name: "aws-access-key", re: /AKIA[0-9A-Z]{16}/g },
|
|
126
|
+
// Generic 40-char hex token (matches API keys, hashes, etc.)
|
|
127
|
+
{ name: "hex-40", re: /\b[a-fA-F0-9]{40}\b/g },
|
|
128
|
+
];
|
|
129
|
+
|
|
130
|
+
export interface RedactionResult {
|
|
131
|
+
text: string;
|
|
132
|
+
redacted: boolean;
|
|
133
|
+
matches: string[];
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
export function redact(input: string): RedactionResult {
|
|
137
|
+
let text = input;
|
|
138
|
+
const matches: string[] = [];
|
|
139
|
+
for (const { name, re } of SECRET_PATTERNS) {
|
|
140
|
+
text = text.replace(re, () => {
|
|
141
|
+
matches.push(name);
|
|
142
|
+
return "[REDACTED]";
|
|
143
|
+
});
|
|
144
|
+
}
|
|
145
|
+
return { text, redacted: matches.length > 0, matches };
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
// ── Stub File Scaffolding ───────────────────────────────────────────
|
|
149
|
+
|
|
150
|
+
function stubContent(file: string): string {
|
|
151
|
+
const title = FILE_TO_TITLE[file] ?? file;
|
|
152
|
+
const blurb = FILE_TO_BLURB[file] ?? "";
|
|
153
|
+
return [
|
|
154
|
+
`# ${title}`,
|
|
155
|
+
"",
|
|
156
|
+
blurb,
|
|
157
|
+
"",
|
|
158
|
+
AUTO_BLOCK_START,
|
|
159
|
+
"## Auto-captured",
|
|
160
|
+
"",
|
|
161
|
+
"<!-- Tooling appends new entries inside this block. Do not edit by hand. -->",
|
|
162
|
+
"",
|
|
163
|
+
AUTO_BLOCK_END,
|
|
164
|
+
"",
|
|
165
|
+
MANUAL_HEADER,
|
|
166
|
+
"",
|
|
167
|
+
"<!-- Curated by humans. Tooling never edits below this line. -->",
|
|
168
|
+
"",
|
|
169
|
+
].join("\n");
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
const README_CONTENT = `# .work-kit-knowledge
|
|
173
|
+
|
|
174
|
+
This directory holds project knowledge that work-kit captures and reads
|
|
175
|
+
across sessions. It is **committed to your repo** so the whole team
|
|
176
|
+
benefits.
|
|
177
|
+
|
|
178
|
+
## Files
|
|
179
|
+
|
|
180
|
+
- **lessons.md** — things you learned about this codebase (project-specific).
|
|
181
|
+
- **conventions.md** — codified rules this project follows.
|
|
182
|
+
- **risks.md** — fragile or dangerous areas to handle with care.
|
|
183
|
+
- **workflow.md** — feedback about the work-kit workflow itself as observed
|
|
184
|
+
in this project. Mined manually across projects to improve work-kit.
|
|
185
|
+
|
|
186
|
+
Each file has two sections:
|
|
187
|
+
|
|
188
|
+
- **Auto-captured** — appended by work-kit during \`wrap-up/knowledge\` and
|
|
189
|
+
by \`work-kit learn\`. Inside \`<!-- work-kit:auto:start -->\` markers.
|
|
190
|
+
**Do not edit by hand.**
|
|
191
|
+
- **Manual** — for humans only. Tooling never touches it. Add curated rules
|
|
192
|
+
here.
|
|
193
|
+
|
|
194
|
+
## Privacy warning
|
|
195
|
+
|
|
196
|
+
Files in this directory are committed to your repo. **Don't write secrets
|
|
197
|
+
here.** Work-kit redacts known secret shapes (API keys, tokens) at write
|
|
198
|
+
time, but the regex sweep is best-effort. Treat these files like any other
|
|
199
|
+
source you commit.
|
|
200
|
+
|
|
201
|
+
## How is this populated?
|
|
202
|
+
|
|
203
|
+
- During a session, agents append typed bullets to \`## Observations\` in
|
|
204
|
+
\`.work-kit/state.md\`.
|
|
205
|
+
- At \`wrap-up/knowledge\`, the kit parses Observations + Decisions +
|
|
206
|
+
Deviations + tracker.json loopbacks and routes them to the four files.
|
|
207
|
+
- Agents may also call \`work-kit learn --type X --text "..."\` mid-session.
|
|
208
|
+
|
|
209
|
+
## Reading
|
|
210
|
+
|
|
211
|
+
\`work-kit bootstrap\` injects \`lessons.md\`, \`conventions.md\`, and
|
|
212
|
+
\`risks.md\` into every new session's opening context. \`workflow.md\` is
|
|
213
|
+
**not** injected — it's a write-only artifact for human review.
|
|
214
|
+
`;
|
|
215
|
+
|
|
216
|
+
// Roots whose knowledge dir we've already verified this process. Lets
|
|
217
|
+
// repeated calls (one per `learn`/`extract` invocation) skip the 6 stat
|
|
218
|
+
// calls after the first hit per process.
|
|
219
|
+
const ensuredRoots = new Set<string>();
|
|
220
|
+
|
|
221
|
+
export function ensureKnowledgeDir(mainRepoRoot: string): { created: string[] } {
|
|
222
|
+
if (ensuredRoots.has(mainRepoRoot)) {
|
|
223
|
+
return { created: [] };
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
const dir = knowledgeDir(mainRepoRoot);
|
|
227
|
+
const created: string[] = [];
|
|
228
|
+
|
|
229
|
+
if (!fs.existsSync(dir)) {
|
|
230
|
+
fs.mkdirSync(dir, { recursive: true });
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
const readmePath = path.join(dir, "README.md");
|
|
234
|
+
if (!fs.existsSync(readmePath)) {
|
|
235
|
+
fs.writeFileSync(readmePath, README_CONTENT, "utf-8");
|
|
236
|
+
created.push("README.md");
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
for (const file of Object.values(TYPE_TO_FILE)) {
|
|
240
|
+
const p = path.join(dir, file);
|
|
241
|
+
if (!fs.existsSync(p)) {
|
|
242
|
+
fs.writeFileSync(p, stubContent(file), "utf-8");
|
|
243
|
+
created.push(file);
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
ensuredRoots.add(mainRepoRoot);
|
|
248
|
+
return { created };
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
// ── Append / Read ───────────────────────────────────────────────────
|
|
252
|
+
|
|
253
|
+
export interface KnowledgeEntry {
|
|
254
|
+
/** ISO timestamp */
|
|
255
|
+
ts: string;
|
|
256
|
+
sessionSlug?: string;
|
|
257
|
+
phase?: string;
|
|
258
|
+
step?: string;
|
|
259
|
+
skillPath?: string;
|
|
260
|
+
gitSha?: string;
|
|
261
|
+
/** "auto-state-md" | "auto-tracker" | "explicit-cli" */
|
|
262
|
+
source: string;
|
|
263
|
+
/** Free-form text. Will be redacted at write time. */
|
|
264
|
+
text: string;
|
|
265
|
+
/** Optional path glob for future filtering. Stored, not yet used. */
|
|
266
|
+
scope?: string;
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
/** Format an entry as a single markdown bullet inside the auto block. */
|
|
270
|
+
function formatEntry(entry: KnowledgeEntry): string {
|
|
271
|
+
const date = entry.ts.slice(0, 10);
|
|
272
|
+
const ctx: string[] = [];
|
|
273
|
+
if (entry.sessionSlug) ctx.push(`\`${entry.sessionSlug}\``);
|
|
274
|
+
if (entry.phase && entry.step) ctx.push(`(${entry.phase}/${entry.step})`);
|
|
275
|
+
else if (entry.phase) ctx.push(`(${entry.phase})`);
|
|
276
|
+
const ctxStr = ctx.length > 0 ? ` ${ctx.join(" ")}` : "";
|
|
277
|
+
const scopeStr = entry.scope ? ` _scope: \`${entry.scope}\`_` : "";
|
|
278
|
+
return `- **${date}**${ctxStr}: ${entry.text}${scopeStr}`;
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
/** Stable hash of an entry's identifying content (for idempotent dedup). */
|
|
282
|
+
function entryHash(entry: KnowledgeEntry): string {
|
|
283
|
+
const key = JSON.stringify({
|
|
284
|
+
text: entry.text,
|
|
285
|
+
phase: entry.phase ?? null,
|
|
286
|
+
step: entry.step ?? null,
|
|
287
|
+
source: entry.source,
|
|
288
|
+
});
|
|
289
|
+
return crypto.createHash("sha1").update(key).digest("hex").slice(0, 12);
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
/**
|
|
293
|
+
* Read a knowledge file. Returns null if it doesn't exist. If markers are
|
|
294
|
+
* missing or corrupted, the raw content is returned and append rebuilds them.
|
|
295
|
+
*/
|
|
296
|
+
function readKnowledgeFileRaw(mainRepoRoot: string, file: string): string | null {
|
|
297
|
+
const p = knowledgePath(mainRepoRoot, file);
|
|
298
|
+
try {
|
|
299
|
+
return fs.readFileSync(p, "utf-8");
|
|
300
|
+
} catch (err: any) {
|
|
301
|
+
if (err?.code === "ENOENT") return null;
|
|
302
|
+
throw err;
|
|
303
|
+
}
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
/**
|
|
307
|
+
* Read a knowledge file capped at `capLines` lines for bootstrap injection.
|
|
308
|
+
* Strategy: include the entire `## Manual` section + the most recent N
|
|
309
|
+
* entries from the auto-captured block until the cap is hit. If still over,
|
|
310
|
+
* append a "(N more entries)" tail.
|
|
311
|
+
*/
|
|
312
|
+
export function readKnowledgeFile(
|
|
313
|
+
mainRepoRoot: string,
|
|
314
|
+
file: string,
|
|
315
|
+
capLines: number = 200
|
|
316
|
+
): string | null {
|
|
317
|
+
const raw = readKnowledgeFileRaw(mainRepoRoot, file);
|
|
318
|
+
if (raw === null) return null;
|
|
319
|
+
|
|
320
|
+
const lines = raw.split("\n");
|
|
321
|
+
if (lines.length <= capLines) return raw;
|
|
322
|
+
|
|
323
|
+
// Find the auto block and manual section markers.
|
|
324
|
+
const autoStart = lines.findIndex((l) => l.includes(AUTO_BLOCK_START));
|
|
325
|
+
const autoEnd = lines.findIndex((l) => l.includes(AUTO_BLOCK_END));
|
|
326
|
+
const manualIdx = lines.findIndex((l) => l.trim() === MANUAL_HEADER);
|
|
327
|
+
|
|
328
|
+
if (autoStart === -1 || autoEnd === -1) {
|
|
329
|
+
// Markers missing — just truncate from the top.
|
|
330
|
+
return lines.slice(0, capLines).concat([`... (${lines.length - capLines} more lines)`]).join("\n");
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
const headerLines = lines.slice(0, autoStart + 1);
|
|
334
|
+
const autoBodyLines = lines.slice(autoStart + 1, autoEnd);
|
|
335
|
+
const autoCloseLine = lines[autoEnd];
|
|
336
|
+
const manualLines = manualIdx !== -1 ? lines.slice(manualIdx) : [];
|
|
337
|
+
|
|
338
|
+
// Reserve budget for header + manual + auto markers + safety
|
|
339
|
+
const reserved = headerLines.length + manualLines.length + 2;
|
|
340
|
+
let autoBudget = Math.max(5, capLines - reserved);
|
|
341
|
+
|
|
342
|
+
let truncatedAuto = autoBodyLines;
|
|
343
|
+
let omitted = 0;
|
|
344
|
+
if (autoBodyLines.length > autoBudget) {
|
|
345
|
+
truncatedAuto = autoBodyLines.slice(autoBodyLines.length - autoBudget);
|
|
346
|
+
omitted = autoBodyLines.length - autoBudget;
|
|
347
|
+
}
|
|
348
|
+
|
|
349
|
+
const out = [
|
|
350
|
+
...headerLines,
|
|
351
|
+
...(omitted > 0 ? [`<!-- ... ${omitted} older auto entries truncated for context budget ... -->`] : []),
|
|
352
|
+
...truncatedAuto,
|
|
353
|
+
autoCloseLine,
|
|
354
|
+
...manualLines,
|
|
355
|
+
];
|
|
356
|
+
return out.join("\n");
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
/**
|
|
360
|
+
* Read a knowledge file's current content (or a fresh stub) and ensure the
|
|
361
|
+
* auto-block markers are present. If markers were missing, existing content
|
|
362
|
+
* is rebased under the new stub.
|
|
363
|
+
*/
|
|
364
|
+
function loadOrStub(mainRepoRoot: string, file: string): string {
|
|
365
|
+
let content = readKnowledgeFileRaw(mainRepoRoot, file) ?? stubContent(file);
|
|
366
|
+
|
|
367
|
+
if (!content.includes(AUTO_BLOCK_START) || !content.includes(AUTO_BLOCK_END)) {
|
|
368
|
+
const existing = content.trim();
|
|
369
|
+
content = stubContent(file);
|
|
370
|
+
if (existing.length > 0 && !existing.startsWith(`# ${FILE_TO_TITLE[file]}`)) {
|
|
371
|
+
content = content.trimEnd() + "\n" + existing + "\n";
|
|
372
|
+
}
|
|
373
|
+
}
|
|
374
|
+
return content;
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
/**
|
|
378
|
+
* Insert a new entry into `content` just before the auto-block close marker.
|
|
379
|
+
* Returns the new content, or null if the entry's hash is already present
|
|
380
|
+
* (idempotent skip).
|
|
381
|
+
*/
|
|
382
|
+
function insertEntry(content: string, entry: KnowledgeEntry): string | null {
|
|
383
|
+
const hashMarker = `<!-- hash:${entryHash(entry)} -->`;
|
|
384
|
+
const startIdx = content.indexOf(AUTO_BLOCK_START);
|
|
385
|
+
const endIdx = content.indexOf(AUTO_BLOCK_END);
|
|
386
|
+
|
|
387
|
+
if (startIdx !== -1 && endIdx !== -1) {
|
|
388
|
+
if (content.indexOf(hashMarker, startIdx) > -1 && content.indexOf(hashMarker, startIdx) < endIdx) {
|
|
389
|
+
return null;
|
|
390
|
+
}
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
const formatted = formatEntry(entry) + ` ${hashMarker}`;
|
|
394
|
+
const before = content.slice(0, endIdx);
|
|
395
|
+
const after = content.slice(endIdx);
|
|
396
|
+
// Ensure newline before the close marker so bullets don't fuse onto its line
|
|
397
|
+
const sep = before.endsWith("\n") ? "" : "\n";
|
|
398
|
+
return before + sep + formatted + "\n" + after;
|
|
399
|
+
}
|
|
400
|
+
|
|
401
|
+
/**
|
|
402
|
+
* Append an entry to a knowledge file's auto-captured block. Read-modify-write
|
|
403
|
+
* inside the lock. Idempotent: if an identical entry (by hash) already exists
|
|
404
|
+
* in the auto block, the write is skipped.
|
|
405
|
+
*
|
|
406
|
+
* Returns true if a new entry was appended, false if it was a duplicate.
|
|
407
|
+
*
|
|
408
|
+
* For multiple entries to the same file, prefer `appendAutoEntries` to do
|
|
409
|
+
* a single read-modify-write per file.
|
|
410
|
+
*/
|
|
411
|
+
export function appendAutoEntry(
|
|
412
|
+
mainRepoRoot: string,
|
|
413
|
+
file: string,
|
|
414
|
+
entry: KnowledgeEntry
|
|
415
|
+
): boolean {
|
|
416
|
+
return withKnowledgeLock(mainRepoRoot, () => {
|
|
417
|
+
const content = loadOrStub(mainRepoRoot, file);
|
|
418
|
+
const next = insertEntry(content, entry);
|
|
419
|
+
if (next === null) return false;
|
|
420
|
+
atomicWriteFile(knowledgePath(mainRepoRoot, file), next);
|
|
421
|
+
return true;
|
|
422
|
+
});
|
|
423
|
+
}
|
|
424
|
+
|
|
425
|
+
export interface AppendBatchResult {
|
|
426
|
+
written: number;
|
|
427
|
+
duplicates: number;
|
|
428
|
+
/** Per-file counts so callers can map back to whatever taxonomy they care about. */
|
|
429
|
+
perFile: Map<string, { written: number; duplicates: number }>;
|
|
430
|
+
}
|
|
431
|
+
|
|
432
|
+
/**
|
|
433
|
+
* Batched version of `appendAutoEntry`. Groups entries by file and does one
|
|
434
|
+
* read-modify-write per file under a single lock acquisition.
|
|
435
|
+
*/
|
|
436
|
+
export function appendAutoEntries(
|
|
437
|
+
mainRepoRoot: string,
|
|
438
|
+
entriesByFile: Map<string, KnowledgeEntry[]>
|
|
439
|
+
): AppendBatchResult {
|
|
440
|
+
return withKnowledgeLock(mainRepoRoot, () => {
|
|
441
|
+
const perFile = new Map<string, { written: number; duplicates: number }>();
|
|
442
|
+
let written = 0;
|
|
443
|
+
let duplicates = 0;
|
|
444
|
+
|
|
445
|
+
for (const [file, entries] of entriesByFile) {
|
|
446
|
+
let content = loadOrStub(mainRepoRoot, file);
|
|
447
|
+
let fileWritten = 0;
|
|
448
|
+
let fileDuplicates = 0;
|
|
449
|
+
|
|
450
|
+
for (const entry of entries) {
|
|
451
|
+
const next = insertEntry(content, entry);
|
|
452
|
+
if (next === null) {
|
|
453
|
+
fileDuplicates++;
|
|
454
|
+
} else {
|
|
455
|
+
content = next;
|
|
456
|
+
fileWritten++;
|
|
457
|
+
}
|
|
458
|
+
}
|
|
459
|
+
|
|
460
|
+
if (fileWritten > 0) {
|
|
461
|
+
atomicWriteFile(knowledgePath(mainRepoRoot, file), content);
|
|
462
|
+
}
|
|
463
|
+
|
|
464
|
+
perFile.set(file, { written: fileWritten, duplicates: fileDuplicates });
|
|
465
|
+
written += fileWritten;
|
|
466
|
+
duplicates += fileDuplicates;
|
|
467
|
+
}
|
|
468
|
+
|
|
469
|
+
return { written, duplicates, perFile };
|
|
470
|
+
});
|
|
471
|
+
}
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
/** Difference in ms between two ISO timestamps; 0 on missing/invalid input. */
|
|
2
|
+
export function durationMs(start?: string, end?: string): number {
|
|
3
|
+
if (!start || !end) return 0;
|
|
4
|
+
const a = new Date(start).getTime();
|
|
5
|
+
const b = new Date(end).getTime();
|
|
6
|
+
if (isNaN(a) || isNaN(b)) return 0;
|
|
7
|
+
return Math.max(0, b - a);
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
/** Human-readable duration: "12s", "5m", "2h30m", or "—" for non-positive. */
|
|
11
|
+
export function formatDurationMs(ms: number): string {
|
|
12
|
+
if (ms <= 0) return "—";
|
|
13
|
+
const sec = Math.round(ms / 1000);
|
|
14
|
+
if (sec < 60) return `${sec}s`;
|
|
15
|
+
const min = Math.round(sec / 60);
|
|
16
|
+
if (min < 60) return `${min}m`;
|
|
17
|
+
const hr = Math.floor(min / 60);
|
|
18
|
+
const rem = min % 60;
|
|
19
|
+
return rem > 0 ? `${hr}h${rem}m` : `${hr}h`;
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
/** Elapsed since `start` formatted with `formatDurationMs`. Returns "" on bad input. */
|
|
23
|
+
export function formatDurationSince(start: string): string {
|
|
24
|
+
const startMs = new Date(start).getTime();
|
|
25
|
+
if (isNaN(startMs)) return "";
|
|
26
|
+
return formatDurationMs(Date.now() - startMs);
|
|
27
|
+
}
|
|
@@ -6,7 +6,7 @@ describe("checkLoopback", () => {
|
|
|
6
6
|
it("plan/audit with 'revise' loops back to plan/blueprint", () => {
|
|
7
7
|
const result = checkLoopback("plan", "audit", "revise");
|
|
8
8
|
assert.notEqual(result, null);
|
|
9
|
-
assert.deepStrictEqual(result!.to, { phase: "plan",
|
|
9
|
+
assert.deepStrictEqual(result!.to, { phase: "plan", step: "blueprint" });
|
|
10
10
|
assert.ok(result!.reason.length > 0);
|
|
11
11
|
});
|
|
12
12
|
|
|
@@ -18,7 +18,7 @@ describe("checkLoopback", () => {
|
|
|
18
18
|
it("review/handoff with 'changes_requested' loops back to build/core", () => {
|
|
19
19
|
const result = checkLoopback("review", "handoff", "changes_requested");
|
|
20
20
|
assert.notEqual(result, null);
|
|
21
|
-
assert.deepStrictEqual(result!.to, { phase: "build",
|
|
21
|
+
assert.deepStrictEqual(result!.to, { phase: "build", step: "core" });
|
|
22
22
|
});
|
|
23
23
|
|
|
24
24
|
it("build/core with 'done' returns null", () => {
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
import { PhaseName, Location, LoopbackRecord, StepOutcome } from "../state/schema.js";
|
|
2
|
+
import { LOOPBACK_ROUTES } from "../config/loopback-routes.js";
|
|
3
|
+
|
|
4
|
+
interface LoopbackResult {
|
|
5
|
+
to: Location;
|
|
6
|
+
reason: string;
|
|
7
|
+
}
|
|
8
|
+
|
|
9
|
+
/**
|
|
10
|
+
* Count how many times a specific loopback route has been taken.
|
|
11
|
+
*/
|
|
12
|
+
export function countLoopbacksForRoute(loopbacks: LoopbackRecord[], from: Location, to: Location): number {
|
|
13
|
+
return loopbacks.filter(
|
|
14
|
+
(lb) => lb.from.phase === from.phase && lb.from.step === from.step
|
|
15
|
+
&& lb.to.phase === to.phase && lb.to.step === to.step
|
|
16
|
+
).length;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
/**
|
|
20
|
+
* Check if completing a step with a given outcome should trigger a loop-back.
|
|
21
|
+
*/
|
|
22
|
+
export function checkLoopback(
|
|
23
|
+
phase: PhaseName,
|
|
24
|
+
step: string,
|
|
25
|
+
outcome?: StepOutcome
|
|
26
|
+
): LoopbackResult | null {
|
|
27
|
+
if (!outcome) return null;
|
|
28
|
+
|
|
29
|
+
const route = LOOPBACK_ROUTES.find(
|
|
30
|
+
(r) =>
|
|
31
|
+
r.from.phase === phase &&
|
|
32
|
+
r.from.step === step &&
|
|
33
|
+
r.triggerOutcome === outcome
|
|
34
|
+
);
|
|
35
|
+
|
|
36
|
+
if (!route) return null;
|
|
37
|
+
|
|
38
|
+
return {
|
|
39
|
+
to: route.to,
|
|
40
|
+
reason: route.reason,
|
|
41
|
+
};
|
|
42
|
+
}
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
import type { PhaseName, WorkKitState } from "../state/schema.js";
|
|
2
|
+
import { loadProjectConfig } from "../config/project-config.js";
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Defines which steps run in parallel and which runs sequentially after.
|
|
6
|
+
*/
|
|
7
|
+
export interface ParallelGroup {
|
|
8
|
+
parallel: string[]; // steps that run concurrently
|
|
9
|
+
thenSequential?: string; // step that runs after all parallel complete
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* Default parallel groups per phase. Most projects should not need to override
|
|
14
|
+
* these — the defaults reflect the canonical work-kit pipeline.
|
|
15
|
+
*/
|
|
16
|
+
export const DEFAULT_PARALLEL_GROUPS: Record<string, ParallelGroup> = {
|
|
17
|
+
test: {
|
|
18
|
+
parallel: ["verify", "e2e"],
|
|
19
|
+
thenSequential: "validate",
|
|
20
|
+
},
|
|
21
|
+
review: {
|
|
22
|
+
parallel: ["self-review", "security", "performance", "compliance"],
|
|
23
|
+
thenSequential: "handoff",
|
|
24
|
+
},
|
|
25
|
+
};
|
|
26
|
+
|
|
27
|
+
/**
|
|
28
|
+
* Resolve parallel groups for a project, merging defaults with optional
|
|
29
|
+
* project config overrides at `<mainRepoRoot>/.work-kit-config.json`.
|
|
30
|
+
*/
|
|
31
|
+
export function resolveParallelGroups(mainRepoRoot?: string): Record<string, ParallelGroup> {
|
|
32
|
+
if (!mainRepoRoot) return DEFAULT_PARALLEL_GROUPS;
|
|
33
|
+
const config = loadProjectConfig(mainRepoRoot);
|
|
34
|
+
if (!config.parallel || Object.keys(config.parallel).length === 0) {
|
|
35
|
+
return DEFAULT_PARALLEL_GROUPS;
|
|
36
|
+
}
|
|
37
|
+
return { ...DEFAULT_PARALLEL_GROUPS, ...config.parallel };
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
/**
|
|
41
|
+
* Check if a step triggers a parallel group.
|
|
42
|
+
* Triggers on the first non-skipped, non-completed parallel member.
|
|
43
|
+
*/
|
|
44
|
+
export function getParallelGroup(phase: PhaseName, step: string, state?: WorkKitState): ParallelGroup | null {
|
|
45
|
+
const groups = resolveParallelGroups(state?.metadata?.mainRepoRoot);
|
|
46
|
+
const group = groups[phase];
|
|
47
|
+
if (!group) return null;
|
|
48
|
+
|
|
49
|
+
if (!group.parallel.includes(step)) return null;
|
|
50
|
+
|
|
51
|
+
if (state) {
|
|
52
|
+
const phaseState = state.phases[phase];
|
|
53
|
+
const firstActive = group.parallel.find((s) => {
|
|
54
|
+
const sState = phaseState?.steps[s];
|
|
55
|
+
return sState && sState.status !== "skipped" && sState.status !== "completed";
|
|
56
|
+
});
|
|
57
|
+
if (firstActive !== step) return null;
|
|
58
|
+
} else {
|
|
59
|
+
if (group.parallel[0] !== step) return null;
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
return group;
|
|
63
|
+
}
|
|
64
|
+
|