@workflow-cannon/workspace-kit 0.7.0 → 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. package/README.md +5 -4
  2. package/dist/cli/run-command.d.ts +11 -0
  3. package/dist/cli/run-command.js +138 -0
  4. package/dist/cli.js +18 -135
  5. package/dist/contracts/index.d.ts +1 -1
  6. package/dist/contracts/module-contract.d.ts +13 -0
  7. package/dist/core/config-cli.js +4 -4
  8. package/dist/core/config-metadata.js +199 -5
  9. package/dist/core/index.d.ts +6 -0
  10. package/dist/core/index.js +6 -0
  11. package/dist/core/instruction-template-mapper.d.ts +9 -0
  12. package/dist/core/instruction-template-mapper.js +35 -0
  13. package/dist/core/lineage-contract.d.ts +1 -1
  14. package/dist/core/lineage-contract.js +1 -1
  15. package/dist/core/policy.d.ts +13 -2
  16. package/dist/core/policy.js +42 -25
  17. package/dist/core/response-template-contract.d.ts +15 -0
  18. package/dist/core/response-template-contract.js +10 -0
  19. package/dist/core/response-template-registry.d.ts +4 -0
  20. package/dist/core/response-template-registry.js +44 -0
  21. package/dist/core/response-template-shaping.d.ts +6 -0
  22. package/dist/core/response-template-shaping.js +128 -0
  23. package/dist/core/session-policy.d.ts +18 -0
  24. package/dist/core/session-policy.js +57 -0
  25. package/dist/core/transcript-completion-hook.d.ts +7 -0
  26. package/dist/core/transcript-completion-hook.js +128 -0
  27. package/dist/core/workspace-kit-config.d.ts +2 -1
  28. package/dist/core/workspace-kit-config.js +19 -23
  29. package/dist/modules/approvals/index.js +2 -2
  30. package/dist/modules/documentation/runtime.js +413 -20
  31. package/dist/modules/improvement/generate-recommendations-runtime.d.ts +7 -0
  32. package/dist/modules/improvement/generate-recommendations-runtime.js +37 -4
  33. package/dist/modules/improvement/improvement-state.d.ts +10 -1
  34. package/dist/modules/improvement/improvement-state.js +36 -7
  35. package/dist/modules/improvement/index.js +70 -23
  36. package/dist/modules/improvement/ingest.js +2 -1
  37. package/dist/modules/improvement/transcript-redaction.d.ts +4 -0
  38. package/dist/modules/improvement/transcript-redaction.js +10 -0
  39. package/dist/modules/improvement/transcript-sync-runtime.d.ts +42 -1
  40. package/dist/modules/improvement/transcript-sync-runtime.js +215 -9
  41. package/dist/modules/index.d.ts +1 -1
  42. package/dist/modules/index.js +1 -1
  43. package/dist/modules/task-engine/index.d.ts +0 -2
  44. package/dist/modules/task-engine/index.js +4 -78
  45. package/package.json +6 -2
  46. package/src/modules/documentation/README.md +39 -0
  47. package/src/modules/documentation/RULES.md +70 -0
  48. package/src/modules/documentation/config.md +14 -0
  49. package/src/modules/documentation/index.ts +120 -0
  50. package/src/modules/documentation/instructions/document-project.md +44 -0
  51. package/src/modules/documentation/instructions/documentation-maintainer.md +81 -0
  52. package/src/modules/documentation/instructions/generate-document.md +44 -0
  53. package/src/modules/documentation/runtime.ts +870 -0
  54. package/src/modules/documentation/schemas/documentation-schema.md +54 -0
  55. package/src/modules/documentation/state.md +8 -0
  56. package/src/modules/documentation/templates/AGENTS.md +84 -0
  57. package/src/modules/documentation/templates/ARCHITECTURE.md +71 -0
  58. package/src/modules/documentation/templates/PRINCIPLES.md +122 -0
  59. package/src/modules/documentation/templates/RELEASING.md +96 -0
  60. package/src/modules/documentation/templates/ROADMAP.md +131 -0
  61. package/src/modules/documentation/templates/SECURITY.md +53 -0
  62. package/src/modules/documentation/templates/SUPPORT.md +40 -0
  63. package/src/modules/documentation/templates/TERMS.md +61 -0
  64. package/src/modules/documentation/templates/runbooks/consumer-cadence.md +55 -0
  65. package/src/modules/documentation/templates/runbooks/parity-validation-flow.md +68 -0
  66. package/src/modules/documentation/templates/runbooks/release-channels.md +30 -0
  67. package/src/modules/documentation/templates/workbooks/phase2-config-policy-workbook.md +42 -0
  68. package/src/modules/documentation/templates/workbooks/task-engine-workbook.md +42 -0
  69. package/src/modules/documentation/templates/workbooks/transcript-automation-baseline.md +68 -0
  70. package/src/modules/documentation/types.ts +51 -0
  71. package/dist/modules/task-engine/generator.d.ts +0 -3
  72. package/dist/modules/task-engine/generator.js +0 -118
  73. package/dist/modules/task-engine/importer.d.ts +0 -8
  74. package/dist/modules/task-engine/importer.js +0 -163
@@ -1,7 +1,8 @@
1
1
  import { mkdir, readFile, writeFile } from "node:fs/promises";
2
2
  import { existsSync } from "node:fs";
3
- import { resolve, sep } from "node:path";
3
+ import { dirname, resolve, sep } from "node:path";
4
4
  import { readdir } from "node:fs/promises";
5
+ import { fileURLToPath } from "node:url";
5
6
  function isPathWithinRoot(path, root) {
6
7
  return path === root || path.startsWith(`${root}${sep}`);
7
8
  }
@@ -12,8 +13,30 @@ function parseDefaultValue(fileContent, key, fallback) {
12
13
  return match?.[1] ?? fallback;
13
14
  }
14
15
  async function loadRuntimeConfig(workspacePath) {
15
- const configPath = resolve(workspacePath, "src/modules/documentation/config.md");
16
- const configContent = await readFile(configPath, "utf8");
16
+ const runtimeSourceRoot = resolve(dirname(fileURLToPath(import.meta.url)), "..", "..", "..");
17
+ const sourceRoots = [workspacePath, runtimeSourceRoot];
18
+ let sourceRoot = workspacePath;
19
+ let configContent;
20
+ for (const candidateRoot of sourceRoots) {
21
+ const candidate = resolve(candidateRoot, "src/modules/documentation/config.md");
22
+ if (!existsSync(candidate)) {
23
+ continue;
24
+ }
25
+ configContent = await readFile(candidate, "utf8");
26
+ sourceRoot = candidateRoot;
27
+ break;
28
+ }
29
+ if (!configContent) {
30
+ return {
31
+ aiRoot: "/.ai",
32
+ humanRoot: "docs/maintainers",
33
+ templatesRoot: "src/modules/documentation/templates",
34
+ instructionsRoot: "src/modules/documentation/instructions",
35
+ schemasRoot: "src/modules/documentation/schemas",
36
+ maxValidationAttempts: 3,
37
+ sourceRoot
38
+ };
39
+ }
17
40
  const aiRoot = parseDefaultValue(configContent, "sources.aiRoot", "/.ai");
18
41
  const humanRoot = parseDefaultValue(configContent, "sources.humanRoot", "docs/maintainers");
19
42
  const templatesRoot = parseDefaultValue(configContent, "sources.templatesRoot", "src/modules/documentation/templates");
@@ -27,17 +50,340 @@ async function loadRuntimeConfig(workspacePath) {
27
50
  templatesRoot,
28
51
  instructionsRoot,
29
52
  schemasRoot,
30
- maxValidationAttempts: Number.isFinite(maxValidationAttempts) ? maxValidationAttempts : 3
53
+ maxValidationAttempts: Number.isFinite(maxValidationAttempts) ? maxValidationAttempts : 3,
54
+ sourceRoot
31
55
  };
32
56
  }
33
- function validateAiSchema(aiOutput) {
57
+ function parseAiRecordLine(line) {
58
+ const trimmed = line.trim();
59
+ if (!trimmed || trimmed.startsWith("#"))
60
+ return null;
61
+ const parts = trimmed.split("|");
62
+ // Record format is `type|token|token...`. Ignore non-record markdown lines.
63
+ if (parts.length < 2)
64
+ return null;
65
+ const type = parts[0] ?? "";
66
+ if (!type)
67
+ return null;
68
+ const positional = [];
69
+ const kv = {};
70
+ for (const token of parts.slice(1)) {
71
+ if (!token)
72
+ continue;
73
+ const idx = token.indexOf("=");
74
+ if (idx >= 0) {
75
+ const k = token.slice(0, idx).trim();
76
+ const v = token.slice(idx + 1).trim();
77
+ if (!k)
78
+ continue;
79
+ kv[k] = v;
80
+ }
81
+ else {
82
+ positional.push(token);
83
+ }
84
+ }
85
+ return { type, positional, kv, raw: line };
86
+ }
87
+ function isAllowedMetaDoc(doc) {
88
+ return (doc === "rules" ||
89
+ doc === "runbook" ||
90
+ doc === "workbook" ||
91
+ doc === "generator" ||
92
+ doc === "map" ||
93
+ doc === "workflows" ||
94
+ doc === "commands" ||
95
+ doc === "decisions" ||
96
+ doc === "glossary" ||
97
+ doc === "observed" ||
98
+ doc === "planned" ||
99
+ doc === "checks" ||
100
+ doc === "manifest");
101
+ }
102
+ function validateAiSchema(aiOutput, ctx) {
34
103
  const issues = [];
35
- const lines = aiOutput.split("\n").filter((line) => line.trim().length > 0);
36
- if (!lines[0]?.startsWith("meta|v=")) {
104
+ const lines = aiOutput.split("\n").map((l) => l.trim()).filter((l) => l.length > 0);
105
+ if (lines.length === 0) {
106
+ return [
107
+ {
108
+ check: "schema",
109
+ message: "AI output is empty",
110
+ resolved: false,
111
+ }
112
+ ];
113
+ }
114
+ const metaLine = lines[0];
115
+ const meta = parseAiRecordLine(metaLine);
116
+ if (!meta || meta.type !== "meta") {
117
+ return [
118
+ {
119
+ check: "schema",
120
+ message: "AI output must start with a meta record",
121
+ resolved: false,
122
+ }
123
+ ];
124
+ }
125
+ const v = meta.kv["v"];
126
+ const doc = meta.kv["doc"];
127
+ const truth = meta.kv["truth"];
128
+ const st = meta.kv["st"];
129
+ if (v !== "1") {
130
+ issues.push({
131
+ check: "schema",
132
+ message: "AI meta schemaVersion must be v=1",
133
+ resolved: false,
134
+ });
135
+ }
136
+ if (!doc || !isAllowedMetaDoc(doc)) {
137
+ issues.push({
138
+ check: "schema",
139
+ message: `Unsupported meta.doc '${doc ?? ""}'`,
140
+ resolved: false,
141
+ });
142
+ }
143
+ if (!truth || truth.length === 0) {
144
+ issues.push({
145
+ check: "schema",
146
+ message: "AI meta.truth is required",
147
+ resolved: false,
148
+ });
149
+ }
150
+ if (!st || st.length === 0) {
151
+ issues.push({
152
+ check: "schema",
153
+ message: "AI meta.st is required",
154
+ resolved: false,
155
+ });
156
+ }
157
+ if (ctx.expectedDoc && doc && ctx.expectedDoc !== doc) {
158
+ issues.push({
159
+ check: "schema",
160
+ message: `meta.doc '${doc}' does not match expected doc family for '${ctx.expectedDoc}'`,
161
+ resolved: !ctx.strict,
162
+ });
163
+ }
164
+ const requireActiveRecords = st === "active";
165
+ const allowedTypes = new Set([
166
+ // Global ai record families used across .ai/*.
167
+ "project",
168
+ "stack",
169
+ "prio",
170
+ "ref",
171
+ "rule",
172
+ "check",
173
+ "path",
174
+ "role",
175
+ "has",
176
+ "xhas",
177
+ "deps",
178
+ "xdeps",
179
+ "module",
180
+ "wf",
181
+ "cmd",
182
+ "decision",
183
+ "term",
184
+ "observed",
185
+ "planned",
186
+ "map",
187
+ // Runbooks
188
+ "runbook",
189
+ "intent",
190
+ "chain",
191
+ "artifact",
192
+ "state",
193
+ "transition",
194
+ "promotion",
195
+ "rollback",
196
+ // Workbooks
197
+ "workbook",
198
+ "scope",
199
+ "command",
200
+ "config",
201
+ "cadence",
202
+ "guardrail",
203
+ ]);
204
+ const presentByType = {};
205
+ const missingRequired = [];
206
+ for (const line of lines.slice(1)) {
207
+ const rec = parseAiRecordLine(line);
208
+ if (!rec)
209
+ continue;
210
+ presentByType[rec.type] = true;
211
+ if (!allowedTypes.has(rec.type)) {
212
+ issues.push({
213
+ check: "schema",
214
+ message: `Unknown AI record type '${rec.type}'`,
215
+ resolved: !ctx.strict,
216
+ });
217
+ continue;
218
+ }
219
+ // Minimal record-level validation for current runbook/workbook families.
220
+ if (rec.type === "ref") {
221
+ const p = rec.kv["path"];
222
+ const n = rec.kv["name"];
223
+ if (!p || !n) {
224
+ issues.push({
225
+ check: "schema",
226
+ message: "ref records require both 'name' and 'path'",
227
+ resolved: !ctx.strict,
228
+ });
229
+ }
230
+ else {
231
+ const abs = resolve(ctx.workspacePath, p);
232
+ const ok = existsSync(abs);
233
+ if (!ok) {
234
+ issues.push({
235
+ check: "schema",
236
+ message: `ref.path does not exist: '${p}'`,
237
+ resolved: !ctx.strict,
238
+ });
239
+ }
240
+ }
241
+ continue;
242
+ }
243
+ if (rec.type === "rule") {
244
+ const rid = rec.positional[0];
245
+ const lvl = rec.positional[1] ?? rec.kv["lvl"];
246
+ const directive = (() => {
247
+ // rule lines can be either:
248
+ // rule|RID|lvl|scope|directive|...
249
+ // or the scope can be omitted:
250
+ // rule|RID|lvl|directive|...
251
+ const nonKey = rec.positional.slice(2);
252
+ return nonKey[nonKey.length - 1];
253
+ })();
254
+ if (!rid || !/^R\d{3,}$/.test(rid)) {
255
+ issues.push({
256
+ check: "schema",
257
+ message: "rule records require RID formatted like R### or R####",
258
+ resolved: !ctx.strict,
259
+ });
260
+ }
261
+ if (!lvl || !["must", "must_not", "should", "may"].includes(lvl)) {
262
+ issues.push({
263
+ check: "schema",
264
+ message: `rule lvl is invalid: '${lvl ?? ""}'`,
265
+ resolved: !ctx.strict,
266
+ });
267
+ }
268
+ if (!directive || directive.length < 2) {
269
+ issues.push({
270
+ check: "schema",
271
+ message: "rule directive cannot be empty",
272
+ resolved: !ctx.strict,
273
+ });
274
+ }
275
+ continue;
276
+ }
277
+ if (rec.type === "runbook") {
278
+ if (!rec.kv["name"] || !rec.kv["scope"]) {
279
+ issues.push({
280
+ check: "schema",
281
+ message: "runbook records require at least name and scope",
282
+ resolved: !ctx.strict,
283
+ });
284
+ }
285
+ continue;
286
+ }
287
+ if (rec.type === "workbook") {
288
+ if (!rec.kv["name"]) {
289
+ issues.push({
290
+ check: "schema",
291
+ message: "workbook records require 'name'",
292
+ resolved: !ctx.strict,
293
+ });
294
+ }
295
+ continue;
296
+ }
297
+ if (rec.type === "chain") {
298
+ const step = rec.kv["step"];
299
+ const command = rec.kv["command"];
300
+ const expect = rec.kv["expect_exit"];
301
+ if (!step || !command || expect === undefined) {
302
+ issues.push({
303
+ check: "schema",
304
+ message: "chain records require step, command, and expect_exit",
305
+ resolved: !ctx.strict,
306
+ });
307
+ }
308
+ continue;
309
+ }
310
+ if (rec.type === "transition") {
311
+ if (!rec.kv["from"] || !rec.kv["to"] || !rec.kv["requires"]) {
312
+ issues.push({
313
+ check: "schema",
314
+ message: "transition records require from, to, requires",
315
+ resolved: !ctx.strict,
316
+ });
317
+ }
318
+ continue;
319
+ }
320
+ if (rec.type === "state") {
321
+ if (!rec.kv["name"]) {
322
+ issues.push({
323
+ check: "schema",
324
+ message: "state records require name",
325
+ resolved: !ctx.strict,
326
+ });
327
+ }
328
+ continue;
329
+ }
330
+ if (rec.type === "artifact") {
331
+ if (!rec.kv["path"] || !rec.kv["schema"]) {
332
+ issues.push({
333
+ check: "schema",
334
+ message: "artifact records require path and schema",
335
+ resolved: !ctx.strict,
336
+ });
337
+ }
338
+ continue;
339
+ }
340
+ if (rec.type === "command") {
341
+ if (!rec.kv["name"]) {
342
+ issues.push({
343
+ check: "schema",
344
+ message: "command records require name",
345
+ resolved: !ctx.strict,
346
+ });
347
+ }
348
+ continue;
349
+ }
350
+ if (rec.type === "config") {
351
+ if (!rec.kv["key"]) {
352
+ issues.push({
353
+ check: "schema",
354
+ message: "config records require key",
355
+ resolved: !ctx.strict,
356
+ });
357
+ }
358
+ continue;
359
+ }
360
+ }
361
+ // Per-doc required record sets.
362
+ if (requireActiveRecords) {
363
+ if (doc === "runbook") {
364
+ if (!presentByType["runbook"])
365
+ missingRequired.push("runbook| record");
366
+ if (!presentByType["rule"] && !presentByType["chain"])
367
+ missingRequired.push("at least one rule| or chain| record");
368
+ }
369
+ if (doc === "workbook") {
370
+ if (!presentByType["workbook"])
371
+ missingRequired.push("workbook| record");
372
+ if (!presentByType["command"])
373
+ missingRequired.push("at least one command| record");
374
+ if (!presentByType["config"])
375
+ missingRequired.push("at least one config| record");
376
+ }
377
+ if (doc === "rules") {
378
+ if (!presentByType["rule"] && !presentByType["check"])
379
+ missingRequired.push("at least one rule| or check| record");
380
+ }
381
+ }
382
+ if (missingRequired.length > 0) {
37
383
  issues.push({
38
384
  check: "schema",
39
- message: "AI output must start with a meta record",
40
- resolved: false
385
+ message: `Missing required AI records for doc family '${doc}': ${missingRequired.join(", ")}`,
386
+ resolved: !ctx.strict,
41
387
  });
42
388
  }
43
389
  return issues;
@@ -109,6 +455,8 @@ export async function generateDocument(args, ctx) {
109
455
  };
110
456
  }
111
457
  const options = args.options ?? {};
458
+ const canOverwriteAi = options.overwriteAi ?? options.overwrite ?? true;
459
+ const canOverwriteHuman = options.overwriteHuman ?? options.overwrite ?? true;
112
460
  const config = await loadRuntimeConfig(ctx.workspacePath);
113
461
  const filesRead = [];
114
462
  const filesWritten = [];
@@ -117,7 +465,7 @@ export async function generateDocument(args, ctx) {
117
465
  const conflicts = [];
118
466
  const aiRoot = resolve(ctx.workspacePath, config.aiRoot.replace(/^\//, ""));
119
467
  const humanRoot = resolve(ctx.workspacePath, config.humanRoot.replace(/^\//, ""));
120
- const templatePath = resolve(ctx.workspacePath, config.templatesRoot, documentType);
468
+ const templatePath = resolve(config.sourceRoot, config.templatesRoot, documentType);
121
469
  const aiOutputPath = resolve(aiRoot, documentType);
122
470
  const humanOutputPath = resolve(humanRoot, documentType);
123
471
  if (!isPathWithinRoot(aiOutputPath, aiRoot) || !isPathWithinRoot(humanOutputPath, humanRoot)) {
@@ -169,25 +517,54 @@ export async function generateDocument(args, ctx) {
169
517
  };
170
518
  }
171
519
  }
172
- const schemaPath = resolve(ctx.workspacePath, config.schemasRoot, "documentation-schema.md");
520
+ const schemaPath = resolve(config.sourceRoot, config.schemasRoot, "documentation-schema.md");
173
521
  if (existsSync(schemaPath)) {
174
522
  filesRead.push(schemaPath);
175
523
  await readFile(schemaPath, "utf8");
176
524
  }
177
- let aiOutput = `meta|v=1|doc=rules|truth=canonical|st=draft\nproject|name=workflow-cannon|type=generated_doc|scope=${documentType}`;
525
+ function resolveExpectedDocFamily(docType) {
526
+ if (docType.includes("runbooks/") || docType.startsWith("runbooks/"))
527
+ return "runbook";
528
+ if (docType.includes("workbooks/") || docType.startsWith("workbooks/"))
529
+ return "workbook";
530
+ return "rules";
531
+ }
532
+ const expectedDoc = resolveExpectedDocFamily(documentType);
533
+ // Default AI output for draft generation. When AI files already exist and overwriteAi is false,
534
+ // we validate and preserve the existing AI surface content instead of using this stub.
535
+ let aiOutput = `meta|v=1|doc=${expectedDoc}|truth=canonical|st=draft\nproject|name=workflow-cannon|type=generated_doc|scope=${documentType}`;
178
536
  let attemptsUsed = 0;
179
537
  const maxAttempts = options.maxValidationAttempts ?? config.maxValidationAttempts;
538
+ const strict = options.strict !== false;
539
+ if (existsSync(aiOutputPath) && !canOverwriteAi) {
540
+ // Preserve existing AI docs: validate them instead of validating the stub.
541
+ // This avoids schema regressions from breaking doc regeneration when AI docs are already curated.
542
+ aiOutput = await readFile(aiOutputPath, "utf8");
543
+ }
180
544
  while (attemptsUsed < maxAttempts) {
181
545
  attemptsUsed += 1;
182
- const schemaIssues = validateAiSchema(aiOutput);
546
+ const schemaIssues = validateAiSchema(aiOutput, {
547
+ strict,
548
+ workspacePath: ctx.workspacePath,
549
+ expectedDoc,
550
+ });
183
551
  if (schemaIssues.length === 0) {
184
552
  break;
185
553
  }
554
+ const hasUnresolved = schemaIssues.some((i) => !i.resolved);
186
555
  validationIssues.push(...schemaIssues);
556
+ if (!hasUnresolved) {
557
+ // In advisory mode, schema warnings should not block generation.
558
+ break;
559
+ }
187
560
  aiOutput = autoResolveAiSchema(aiOutput);
188
561
  }
189
- const aiFinalIssues = validateAiSchema(aiOutput);
190
- if (aiFinalIssues.length > 0) {
562
+ const aiFinalIssues = validateAiSchema(aiOutput, {
563
+ strict,
564
+ workspacePath: ctx.workspacePath,
565
+ expectedDoc,
566
+ });
567
+ if (aiFinalIssues.some((i) => !i.resolved)) {
191
568
  validationIssues.push(...aiFinalIssues);
192
569
  return {
193
570
  ok: false,
@@ -249,8 +626,6 @@ export async function generateDocument(args, ctx) {
249
626
  };
250
627
  }
251
628
  if (!options.dryRun) {
252
- const canOverwriteAi = options.overwriteAi ?? options.overwrite ?? true;
253
- const canOverwriteHuman = options.overwriteHuman ?? options.overwrite ?? true;
254
629
  const aiExists = existsSync(aiOutputPath);
255
630
  const humanExists = existsSync(humanOutputPath);
256
631
  if ((!canOverwriteAi && aiExists) && (!canOverwriteHuman && humanExists)) {
@@ -277,6 +652,8 @@ export async function generateDocument(args, ctx) {
277
652
  }
278
653
  await mkdir(aiRoot, { recursive: true });
279
654
  await mkdir(humanRoot, { recursive: true });
655
+ await mkdir(dirname(aiOutputPath), { recursive: true });
656
+ await mkdir(dirname(humanOutputPath), { recursive: true });
280
657
  if (canOverwriteAi || !aiExists) {
281
658
  await writeFile(aiOutputPath, `${aiOutput}\n`, "utf8");
282
659
  filesWritten.push(aiOutputPath);
@@ -310,11 +687,27 @@ export async function generateDocument(args, ctx) {
310
687
  }
311
688
  export async function generateAllDocuments(args, ctx) {
312
689
  const config = await loadRuntimeConfig(ctx.workspacePath);
313
- const templatesDir = resolve(ctx.workspacePath, config.templatesRoot);
690
+ const templatesDir = resolve(config.sourceRoot, config.templatesRoot);
691
+ async function listTemplateFiles(dir, baseDir) {
692
+ const entries = await readdir(dir, { withFileTypes: true });
693
+ const files = [];
694
+ for (const entry of entries) {
695
+ const absPath = resolve(dir, entry.name);
696
+ if (entry.isDirectory()) {
697
+ files.push(...(await listTemplateFiles(absPath, baseDir)));
698
+ continue;
699
+ }
700
+ if (!entry.isFile() || !entry.name.endsWith(".md")) {
701
+ continue;
702
+ }
703
+ const relPath = absPath.slice(baseDir.length + 1).split("\\").join("/");
704
+ files.push(relPath);
705
+ }
706
+ return files;
707
+ }
314
708
  let templateFiles = [];
315
709
  try {
316
- const entries = await readdir(templatesDir);
317
- templateFiles = entries.filter((f) => f.endsWith(".md")).sort();
710
+ templateFiles = (await listTemplateFiles(templatesDir, templatesDir)).sort();
318
711
  }
319
712
  catch {
320
713
  return {
@@ -1,4 +1,5 @@
1
1
  import type { ModuleLifecycleContext } from "../../contracts/module-contract.js";
2
+ export declare function getMaxRecommendationCandidatesPerRun(ctx: ModuleLifecycleContext): number;
2
3
  export type GenerateRecommendationsArgs = {
3
4
  /** Directory relative to workspace containing agent `*.jsonl` transcripts (default: agent-transcripts). */
4
5
  transcriptsRoot?: string;
@@ -7,7 +8,13 @@ export type GenerateRecommendationsArgs = {
7
8
  toTag?: string;
8
9
  };
9
10
  export declare function runGenerateRecommendations(ctx: ModuleLifecycleContext, args: GenerateRecommendationsArgs): Promise<{
11
+ runId: string;
10
12
  created: string[];
11
13
  skipped: number;
12
14
  candidates: number;
15
+ dedupe: {
16
+ skippedDuplicateEvidenceKey: number;
17
+ skippedExistingTaskId: number;
18
+ cappedRemaining: number;
19
+ };
13
20
  }>;
@@ -1,3 +1,4 @@
1
+ import { randomUUID } from "node:crypto";
1
2
  import { TaskStore } from "../task-engine/store.js";
2
3
  import { appendLineageEvent } from "../../core/lineage-store.js";
3
4
  import { loadImprovementState, saveImprovementState } from "./improvement-state.js";
@@ -32,7 +33,21 @@ function resolveTranscriptArchivePath(ctx, args) {
32
33
  const archivePath = typeof transcripts.archivePath === "string" ? transcripts.archivePath.trim() : "";
33
34
  return archivePath || "agent-transcripts";
34
35
  }
36
+ export function getMaxRecommendationCandidatesPerRun(ctx) {
37
+ const improvement = ctx.effectiveConfig?.improvement && typeof ctx.effectiveConfig.improvement === "object"
38
+ ? ctx.effectiveConfig.improvement
39
+ : {};
40
+ const cadence = improvement.cadence && typeof improvement.cadence === "object"
41
+ ? improvement.cadence
42
+ : {};
43
+ const raw = cadence.maxRecommendationCandidatesPerRun;
44
+ if (typeof raw === "number" && Number.isFinite(raw)) {
45
+ return Math.max(1, Math.floor(raw));
46
+ }
47
+ return 500;
48
+ }
35
49
  export async function runGenerateRecommendations(ctx, args) {
50
+ const runId = randomUUID();
36
51
  const store = new TaskStore(ctx.workspacePath, taskStoreRelativePath(ctx));
37
52
  await store.load();
38
53
  const state = await loadImprovementState(ctx.workspacePath);
@@ -51,16 +66,23 @@ export async function runGenerateRecommendations(ctx, args) {
51
66
  }
52
67
  const allTasks = store.getAllTasks();
53
68
  const created = [];
54
- let skipped = 0;
69
+ let skippedDuplicateEvidenceKey = 0;
70
+ let skippedExistingTaskId = 0;
71
+ let cappedRemaining = 0;
55
72
  const now = new Date().toISOString();
73
+ const maxCreates = getMaxRecommendationCandidatesPerRun(ctx);
56
74
  for (const c of candidates) {
57
75
  if (hasEvidenceKey(allTasks, c.evidenceKey)) {
58
- skipped += 1;
76
+ skippedDuplicateEvidenceKey += 1;
59
77
  continue;
60
78
  }
61
79
  const id = taskIdForEvidenceKey(c.evidenceKey);
62
80
  if (store.getTask(id)) {
63
- skipped += 1;
81
+ skippedExistingTaskId += 1;
82
+ continue;
83
+ }
84
+ if (created.length >= maxCreates) {
85
+ cappedRemaining += 1;
64
86
  continue;
65
87
  }
66
88
  const task = {
@@ -99,5 +121,16 @@ export async function runGenerateRecommendations(ctx, args) {
99
121
  }
100
122
  await store.save();
101
123
  await saveImprovementState(ctx.workspacePath, state);
102
- return { created, skipped, candidates: candidates.length };
124
+ const skipped = skippedDuplicateEvidenceKey + skippedExistingTaskId;
125
+ return {
126
+ runId,
127
+ created,
128
+ skipped,
129
+ candidates: candidates.length,
130
+ dedupe: {
131
+ skippedDuplicateEvidenceKey,
132
+ skippedExistingTaskId,
133
+ cappedRemaining
134
+ }
135
+ };
103
136
  }
@@ -1,4 +1,11 @@
1
- export declare const IMPROVEMENT_STATE_SCHEMA_VERSION: 1;
1
+ export declare const IMPROVEMENT_STATE_SCHEMA_VERSION: 2;
2
+ export type TranscriptRetryEntry = {
3
+ relativePath: string;
4
+ attempts: number;
5
+ lastErrorCode: string;
6
+ lastErrorMessage: string;
7
+ nextRetryAt: string;
8
+ };
2
9
  export type ImprovementStateDocument = {
3
10
  schemaVersion: typeof IMPROVEMENT_STATE_SCHEMA_VERSION;
4
11
  policyTraceLineCursor: number;
@@ -7,6 +14,8 @@ export type ImprovementStateDocument = {
7
14
  transcriptLineCursors: Record<string, number>;
8
15
  lastSyncRunAt: string | null;
9
16
  lastIngestRunAt: string | null;
17
+ /** Bounded queue of transcript files that failed to copy; retried on subsequent syncs. */
18
+ transcriptRetryQueue: TranscriptRetryEntry[];
10
19
  };
11
20
  export declare function emptyImprovementState(): ImprovementStateDocument;
12
21
  export declare function loadImprovementState(workspacePath: string): Promise<ImprovementStateDocument>;