@workflow-cannon/workspace-kit 0.6.0 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/README.md +3 -3
  2. package/dist/cli.js +31 -21
  3. package/dist/contracts/index.d.ts +1 -1
  4. package/dist/contracts/module-contract.d.ts +13 -0
  5. package/dist/core/config-metadata.js +303 -1
  6. package/dist/core/index.d.ts +6 -0
  7. package/dist/core/index.js +6 -0
  8. package/dist/core/instruction-template-mapper.d.ts +9 -0
  9. package/dist/core/instruction-template-mapper.js +35 -0
  10. package/dist/core/lineage-contract.d.ts +1 -1
  11. package/dist/core/lineage-contract.js +1 -1
  12. package/dist/core/policy.d.ts +4 -1
  13. package/dist/core/policy.js +5 -4
  14. package/dist/core/response-template-contract.d.ts +15 -0
  15. package/dist/core/response-template-contract.js +10 -0
  16. package/dist/core/response-template-registry.d.ts +4 -0
  17. package/dist/core/response-template-registry.js +44 -0
  18. package/dist/core/response-template-shaping.d.ts +6 -0
  19. package/dist/core/response-template-shaping.js +128 -0
  20. package/dist/core/session-policy.d.ts +18 -0
  21. package/dist/core/session-policy.js +57 -0
  22. package/dist/core/transcript-completion-hook.d.ts +7 -0
  23. package/dist/core/transcript-completion-hook.js +90 -0
  24. package/dist/core/workspace-kit-config.js +42 -2
  25. package/dist/modules/documentation/runtime.js +383 -14
  26. package/dist/modules/improvement/generate-recommendations-runtime.d.ts +7 -0
  27. package/dist/modules/improvement/generate-recommendations-runtime.js +51 -7
  28. package/dist/modules/improvement/improvement-state.d.ts +12 -1
  29. package/dist/modules/improvement/improvement-state.js +38 -7
  30. package/dist/modules/improvement/index.js +124 -2
  31. package/dist/modules/improvement/ingest.js +2 -1
  32. package/dist/modules/improvement/transcript-redaction.d.ts +4 -0
  33. package/dist/modules/improvement/transcript-redaction.js +10 -0
  34. package/dist/modules/improvement/transcript-sync-runtime.d.ts +60 -0
  35. package/dist/modules/improvement/transcript-sync-runtime.js +320 -0
  36. package/dist/modules/index.d.ts +1 -1
  37. package/dist/modules/index.js +1 -1
  38. package/dist/modules/task-engine/index.d.ts +0 -2
  39. package/dist/modules/task-engine/index.js +4 -70
  40. package/package.json +6 -2
  41. package/dist/modules/task-engine/generator.d.ts +0 -2
  42. package/dist/modules/task-engine/generator.js +0 -101
  43. package/dist/modules/task-engine/importer.d.ts +0 -8
  44. package/dist/modules/task-engine/importer.js +0 -157
@@ -1,6 +1,6 @@
1
1
  import { mkdir, readFile, writeFile } from "node:fs/promises";
2
2
  import { existsSync } from "node:fs";
3
- import { resolve, sep } from "node:path";
3
+ import { dirname, resolve, sep } from "node:path";
4
4
  import { readdir } from "node:fs/promises";
5
5
  function isPathWithinRoot(path, root) {
6
6
  return path === root || path.startsWith(`${root}${sep}`);
@@ -30,14 +30,336 @@ async function loadRuntimeConfig(workspacePath) {
30
30
  maxValidationAttempts: Number.isFinite(maxValidationAttempts) ? maxValidationAttempts : 3
31
31
  };
32
32
  }
33
- function validateAiSchema(aiOutput) {
33
+ function parseAiRecordLine(line) {
34
+ const trimmed = line.trim();
35
+ if (!trimmed || trimmed.startsWith("#"))
36
+ return null;
37
+ const parts = trimmed.split("|");
38
+ // Record format is `type|token|token...`. Ignore non-record markdown lines.
39
+ if (parts.length < 2)
40
+ return null;
41
+ const type = parts[0] ?? "";
42
+ if (!type)
43
+ return null;
44
+ const positional = [];
45
+ const kv = {};
46
+ for (const token of parts.slice(1)) {
47
+ if (!token)
48
+ continue;
49
+ const idx = token.indexOf("=");
50
+ if (idx >= 0) {
51
+ const k = token.slice(0, idx).trim();
52
+ const v = token.slice(idx + 1).trim();
53
+ if (!k)
54
+ continue;
55
+ kv[k] = v;
56
+ }
57
+ else {
58
+ positional.push(token);
59
+ }
60
+ }
61
+ return { type, positional, kv, raw: line };
62
+ }
63
+ function isAllowedMetaDoc(doc) {
64
+ return (doc === "rules" ||
65
+ doc === "runbook" ||
66
+ doc === "workbook" ||
67
+ doc === "generator" ||
68
+ doc === "map" ||
69
+ doc === "workflows" ||
70
+ doc === "commands" ||
71
+ doc === "decisions" ||
72
+ doc === "glossary" ||
73
+ doc === "observed" ||
74
+ doc === "planned" ||
75
+ doc === "checks" ||
76
+ doc === "manifest");
77
+ }
78
+ function validateAiSchema(aiOutput, ctx) {
34
79
  const issues = [];
35
- const lines = aiOutput.split("\n").filter((line) => line.trim().length > 0);
36
- if (!lines[0]?.startsWith("meta|v=")) {
80
+ const lines = aiOutput.split("\n").map((l) => l.trim()).filter((l) => l.length > 0);
81
+ if (lines.length === 0) {
82
+ return [
83
+ {
84
+ check: "schema",
85
+ message: "AI output is empty",
86
+ resolved: false,
87
+ }
88
+ ];
89
+ }
90
+ const metaLine = lines[0];
91
+ const meta = parseAiRecordLine(metaLine);
92
+ if (!meta || meta.type !== "meta") {
93
+ return [
94
+ {
95
+ check: "schema",
96
+ message: "AI output must start with a meta record",
97
+ resolved: false,
98
+ }
99
+ ];
100
+ }
101
+ const v = meta.kv["v"];
102
+ const doc = meta.kv["doc"];
103
+ const truth = meta.kv["truth"];
104
+ const st = meta.kv["st"];
105
+ if (v !== "1") {
37
106
  issues.push({
38
107
  check: "schema",
39
- message: "AI output must start with a meta record",
40
- resolved: false
108
+ message: "AI meta schemaVersion must be v=1",
109
+ resolved: false,
110
+ });
111
+ }
112
+ if (!doc || !isAllowedMetaDoc(doc)) {
113
+ issues.push({
114
+ check: "schema",
115
+ message: `Unsupported meta.doc '${doc ?? ""}'`,
116
+ resolved: false,
117
+ });
118
+ }
119
+ if (!truth || truth.length === 0) {
120
+ issues.push({
121
+ check: "schema",
122
+ message: "AI meta.truth is required",
123
+ resolved: false,
124
+ });
125
+ }
126
+ if (!st || st.length === 0) {
127
+ issues.push({
128
+ check: "schema",
129
+ message: "AI meta.st is required",
130
+ resolved: false,
131
+ });
132
+ }
133
+ if (ctx.expectedDoc && doc && ctx.expectedDoc !== doc) {
134
+ issues.push({
135
+ check: "schema",
136
+ message: `meta.doc '${doc}' does not match expected doc family for '${ctx.expectedDoc}'`,
137
+ resolved: !ctx.strict,
138
+ });
139
+ }
140
+ const requireActiveRecords = st === "active";
141
+ const allowedTypes = new Set([
142
+ // Global ai record families used across .ai/*.
143
+ "project",
144
+ "stack",
145
+ "prio",
146
+ "ref",
147
+ "rule",
148
+ "check",
149
+ "path",
150
+ "role",
151
+ "has",
152
+ "xhas",
153
+ "deps",
154
+ "xdeps",
155
+ "module",
156
+ "wf",
157
+ "cmd",
158
+ "decision",
159
+ "term",
160
+ "observed",
161
+ "planned",
162
+ "map",
163
+ // Runbooks
164
+ "runbook",
165
+ "intent",
166
+ "chain",
167
+ "artifact",
168
+ "state",
169
+ "transition",
170
+ "promotion",
171
+ "rollback",
172
+ // Workbooks
173
+ "workbook",
174
+ "scope",
175
+ "command",
176
+ "config",
177
+ "cadence",
178
+ "guardrail",
179
+ ]);
180
+ const presentByType = {};
181
+ const missingRequired = [];
182
+ for (const line of lines.slice(1)) {
183
+ const rec = parseAiRecordLine(line);
184
+ if (!rec)
185
+ continue;
186
+ presentByType[rec.type] = true;
187
+ if (!allowedTypes.has(rec.type)) {
188
+ issues.push({
189
+ check: "schema",
190
+ message: `Unknown AI record type '${rec.type}'`,
191
+ resolved: !ctx.strict,
192
+ });
193
+ continue;
194
+ }
195
+ // Minimal record-level validation for current runbook/workbook families.
196
+ if (rec.type === "ref") {
197
+ const p = rec.kv["path"];
198
+ const n = rec.kv["name"];
199
+ if (!p || !n) {
200
+ issues.push({
201
+ check: "schema",
202
+ message: "ref records require both 'name' and 'path'",
203
+ resolved: !ctx.strict,
204
+ });
205
+ }
206
+ else {
207
+ const abs = resolve(ctx.workspacePath, p);
208
+ const ok = existsSync(abs);
209
+ if (!ok) {
210
+ issues.push({
211
+ check: "schema",
212
+ message: `ref.path does not exist: '${p}'`,
213
+ resolved: !ctx.strict,
214
+ });
215
+ }
216
+ }
217
+ continue;
218
+ }
219
+ if (rec.type === "rule") {
220
+ const rid = rec.positional[0];
221
+ const lvl = rec.positional[1] ?? rec.kv["lvl"];
222
+ const directive = (() => {
223
+ // rule lines can be either:
224
+ // rule|RID|lvl|scope|directive|...
225
+ // or the scope can be omitted:
226
+ // rule|RID|lvl|directive|...
227
+ const nonKey = rec.positional.slice(2);
228
+ return nonKey[nonKey.length - 1];
229
+ })();
230
+ if (!rid || !/^R\d{3,}$/.test(rid)) {
231
+ issues.push({
232
+ check: "schema",
233
+ message: "rule records require RID formatted like R### or R####",
234
+ resolved: !ctx.strict,
235
+ });
236
+ }
237
+ if (!lvl || !["must", "must_not", "should", "may"].includes(lvl)) {
238
+ issues.push({
239
+ check: "schema",
240
+ message: `rule lvl is invalid: '${lvl ?? ""}'`,
241
+ resolved: !ctx.strict,
242
+ });
243
+ }
244
+ if (!directive || directive.length < 2) {
245
+ issues.push({
246
+ check: "schema",
247
+ message: "rule directive cannot be empty",
248
+ resolved: !ctx.strict,
249
+ });
250
+ }
251
+ continue;
252
+ }
253
+ if (rec.type === "runbook") {
254
+ if (!rec.kv["name"] || !rec.kv["scope"]) {
255
+ issues.push({
256
+ check: "schema",
257
+ message: "runbook records require at least name and scope",
258
+ resolved: !ctx.strict,
259
+ });
260
+ }
261
+ continue;
262
+ }
263
+ if (rec.type === "workbook") {
264
+ if (!rec.kv["name"]) {
265
+ issues.push({
266
+ check: "schema",
267
+ message: "workbook records require 'name'",
268
+ resolved: !ctx.strict,
269
+ });
270
+ }
271
+ continue;
272
+ }
273
+ if (rec.type === "chain") {
274
+ const step = rec.kv["step"];
275
+ const command = rec.kv["command"];
276
+ const expect = rec.kv["expect_exit"];
277
+ if (!step || !command || expect === undefined) {
278
+ issues.push({
279
+ check: "schema",
280
+ message: "chain records require step, command, and expect_exit",
281
+ resolved: !ctx.strict,
282
+ });
283
+ }
284
+ continue;
285
+ }
286
+ if (rec.type === "transition") {
287
+ if (!rec.kv["from"] || !rec.kv["to"] || !rec.kv["requires"]) {
288
+ issues.push({
289
+ check: "schema",
290
+ message: "transition records require from, to, requires",
291
+ resolved: !ctx.strict,
292
+ });
293
+ }
294
+ continue;
295
+ }
296
+ if (rec.type === "state") {
297
+ if (!rec.kv["name"]) {
298
+ issues.push({
299
+ check: "schema",
300
+ message: "state records require name",
301
+ resolved: !ctx.strict,
302
+ });
303
+ }
304
+ continue;
305
+ }
306
+ if (rec.type === "artifact") {
307
+ if (!rec.kv["path"] || !rec.kv["schema"]) {
308
+ issues.push({
309
+ check: "schema",
310
+ message: "artifact records require path and schema",
311
+ resolved: !ctx.strict,
312
+ });
313
+ }
314
+ continue;
315
+ }
316
+ if (rec.type === "command") {
317
+ if (!rec.kv["name"]) {
318
+ issues.push({
319
+ check: "schema",
320
+ message: "command records require name",
321
+ resolved: !ctx.strict,
322
+ });
323
+ }
324
+ continue;
325
+ }
326
+ if (rec.type === "config") {
327
+ if (!rec.kv["key"]) {
328
+ issues.push({
329
+ check: "schema",
330
+ message: "config records require key",
331
+ resolved: !ctx.strict,
332
+ });
333
+ }
334
+ continue;
335
+ }
336
+ }
337
+ // Per-doc required record sets.
338
+ if (requireActiveRecords) {
339
+ if (doc === "runbook") {
340
+ if (!presentByType["runbook"])
341
+ missingRequired.push("runbook| record");
342
+ if (!presentByType["rule"] && !presentByType["chain"])
343
+ missingRequired.push("at least one rule| or chain| record");
344
+ }
345
+ if (doc === "workbook") {
346
+ if (!presentByType["workbook"])
347
+ missingRequired.push("workbook| record");
348
+ if (!presentByType["command"])
349
+ missingRequired.push("at least one command| record");
350
+ if (!presentByType["config"])
351
+ missingRequired.push("at least one config| record");
352
+ }
353
+ if (doc === "rules") {
354
+ if (!presentByType["rule"] && !presentByType["check"])
355
+ missingRequired.push("at least one rule| or check| record");
356
+ }
357
+ }
358
+ if (missingRequired.length > 0) {
359
+ issues.push({
360
+ check: "schema",
361
+ message: `Missing required AI records for doc family '${doc}': ${missingRequired.join(", ")}`,
362
+ resolved: !ctx.strict,
41
363
  });
42
364
  }
43
365
  return issues;
@@ -109,6 +431,8 @@ export async function generateDocument(args, ctx) {
109
431
  };
110
432
  }
111
433
  const options = args.options ?? {};
434
+ const canOverwriteAi = options.overwriteAi ?? options.overwrite ?? true;
435
+ const canOverwriteHuman = options.overwriteHuman ?? options.overwrite ?? true;
112
436
  const config = await loadRuntimeConfig(ctx.workspacePath);
113
437
  const filesRead = [];
114
438
  const filesWritten = [];
@@ -174,20 +498,49 @@ export async function generateDocument(args, ctx) {
174
498
  filesRead.push(schemaPath);
175
499
  await readFile(schemaPath, "utf8");
176
500
  }
177
- let aiOutput = `meta|v=1|doc=rules|truth=canonical|st=draft\nproject|name=workflow-cannon|type=generated_doc|scope=${documentType}`;
501
+ function resolveExpectedDocFamily(docType) {
502
+ if (docType.includes("runbooks/") || docType.startsWith("runbooks/"))
503
+ return "runbook";
504
+ if (docType.includes("workbooks/") || docType.startsWith("workbooks/"))
505
+ return "workbook";
506
+ return "rules";
507
+ }
508
+ const expectedDoc = resolveExpectedDocFamily(documentType);
509
+ // Default AI output for draft generation. When AI files already exist and overwriteAi is false,
510
+ // we validate and preserve the existing AI surface content instead of using this stub.
511
+ let aiOutput = `meta|v=1|doc=${expectedDoc}|truth=canonical|st=draft\nproject|name=workflow-cannon|type=generated_doc|scope=${documentType}`;
178
512
  let attemptsUsed = 0;
179
513
  const maxAttempts = options.maxValidationAttempts ?? config.maxValidationAttempts;
514
+ const strict = options.strict !== false;
515
+ if (existsSync(aiOutputPath) && !canOverwriteAi) {
516
+ // Preserve existing AI docs: validate them instead of validating the stub.
517
+ // This avoids schema regressions from breaking doc regeneration when AI docs are already curated.
518
+ aiOutput = await readFile(aiOutputPath, "utf8");
519
+ }
180
520
  while (attemptsUsed < maxAttempts) {
181
521
  attemptsUsed += 1;
182
- const schemaIssues = validateAiSchema(aiOutput);
522
+ const schemaIssues = validateAiSchema(aiOutput, {
523
+ strict,
524
+ workspacePath: ctx.workspacePath,
525
+ expectedDoc,
526
+ });
183
527
  if (schemaIssues.length === 0) {
184
528
  break;
185
529
  }
530
+ const hasUnresolved = schemaIssues.some((i) => !i.resolved);
186
531
  validationIssues.push(...schemaIssues);
532
+ if (!hasUnresolved) {
533
+ // In advisory mode, schema warnings should not block generation.
534
+ break;
535
+ }
187
536
  aiOutput = autoResolveAiSchema(aiOutput);
188
537
  }
189
- const aiFinalIssues = validateAiSchema(aiOutput);
190
- if (aiFinalIssues.length > 0) {
538
+ const aiFinalIssues = validateAiSchema(aiOutput, {
539
+ strict,
540
+ workspacePath: ctx.workspacePath,
541
+ expectedDoc,
542
+ });
543
+ if (aiFinalIssues.some((i) => !i.resolved)) {
191
544
  validationIssues.push(...aiFinalIssues);
192
545
  return {
193
546
  ok: false,
@@ -249,8 +602,6 @@ export async function generateDocument(args, ctx) {
249
602
  };
250
603
  }
251
604
  if (!options.dryRun) {
252
- const canOverwriteAi = options.overwriteAi ?? options.overwrite ?? true;
253
- const canOverwriteHuman = options.overwriteHuman ?? options.overwrite ?? true;
254
605
  const aiExists = existsSync(aiOutputPath);
255
606
  const humanExists = existsSync(humanOutputPath);
256
607
  if ((!canOverwriteAi && aiExists) && (!canOverwriteHuman && humanExists)) {
@@ -277,6 +628,8 @@ export async function generateDocument(args, ctx) {
277
628
  }
278
629
  await mkdir(aiRoot, { recursive: true });
279
630
  await mkdir(humanRoot, { recursive: true });
631
+ await mkdir(dirname(aiOutputPath), { recursive: true });
632
+ await mkdir(dirname(humanOutputPath), { recursive: true });
280
633
  if (canOverwriteAi || !aiExists) {
281
634
  await writeFile(aiOutputPath, `${aiOutput}\n`, "utf8");
282
635
  filesWritten.push(aiOutputPath);
@@ -311,10 +664,26 @@ export async function generateDocument(args, ctx) {
311
664
  export async function generateAllDocuments(args, ctx) {
312
665
  const config = await loadRuntimeConfig(ctx.workspacePath);
313
666
  const templatesDir = resolve(ctx.workspacePath, config.templatesRoot);
667
+ async function listTemplateFiles(dir, baseDir) {
668
+ const entries = await readdir(dir, { withFileTypes: true });
669
+ const files = [];
670
+ for (const entry of entries) {
671
+ const absPath = resolve(dir, entry.name);
672
+ if (entry.isDirectory()) {
673
+ files.push(...(await listTemplateFiles(absPath, baseDir)));
674
+ continue;
675
+ }
676
+ if (!entry.isFile() || !entry.name.endsWith(".md")) {
677
+ continue;
678
+ }
679
+ const relPath = absPath.slice(baseDir.length + 1).split("\\").join("/");
680
+ files.push(relPath);
681
+ }
682
+ return files;
683
+ }
314
684
  let templateFiles = [];
315
685
  try {
316
- const entries = await readdir(templatesDir);
317
- templateFiles = entries.filter((f) => f.endsWith(".md")).sort();
686
+ templateFiles = (await listTemplateFiles(templatesDir, templatesDir)).sort();
318
687
  }
319
688
  catch {
320
689
  return {
@@ -1,4 +1,5 @@
1
1
  import type { ModuleLifecycleContext } from "../../contracts/module-contract.js";
2
+ export declare function getMaxRecommendationCandidatesPerRun(ctx: ModuleLifecycleContext): number;
2
3
  export type GenerateRecommendationsArgs = {
3
4
  /** Directory relative to workspace containing agent `*.jsonl` transcripts (default: agent-transcripts). */
4
5
  transcriptsRoot?: string;
@@ -7,7 +8,13 @@ export type GenerateRecommendationsArgs = {
7
8
  toTag?: string;
8
9
  };
9
10
  export declare function runGenerateRecommendations(ctx: ModuleLifecycleContext, args: GenerateRecommendationsArgs): Promise<{
11
+ runId: string;
10
12
  created: string[];
11
13
  skipped: number;
12
14
  candidates: number;
15
+ dedupe: {
16
+ skippedDuplicateEvidenceKey: number;
17
+ skippedExistingTaskId: number;
18
+ cappedRemaining: number;
19
+ };
13
20
  }>;
@@ -1,3 +1,4 @@
1
+ import { randomUUID } from "node:crypto";
1
2
  import { TaskStore } from "../task-engine/store.js";
2
3
  import { appendLineageEvent } from "../../core/lineage-store.js";
3
4
  import { loadImprovementState, saveImprovementState } from "./improvement-state.js";
@@ -19,13 +20,38 @@ function hasEvidenceKey(tasks, key) {
19
20
  return m.evidenceKey === key;
20
21
  });
21
22
  }
23
+ function resolveTranscriptArchivePath(ctx, args) {
24
+ if (typeof args.transcriptsRoot === "string" && args.transcriptsRoot.trim().length > 0) {
25
+ return args.transcriptsRoot.trim();
26
+ }
27
+ const improvement = ctx.effectiveConfig?.improvement && typeof ctx.effectiveConfig.improvement === "object"
28
+ ? ctx.effectiveConfig.improvement
29
+ : {};
30
+ const transcripts = improvement.transcripts && typeof improvement.transcripts === "object"
31
+ ? improvement.transcripts
32
+ : {};
33
+ const archivePath = typeof transcripts.archivePath === "string" ? transcripts.archivePath.trim() : "";
34
+ return archivePath || "agent-transcripts";
35
+ }
36
+ export function getMaxRecommendationCandidatesPerRun(ctx) {
37
+ const improvement = ctx.effectiveConfig?.improvement && typeof ctx.effectiveConfig.improvement === "object"
38
+ ? ctx.effectiveConfig.improvement
39
+ : {};
40
+ const cadence = improvement.cadence && typeof improvement.cadence === "object"
41
+ ? improvement.cadence
42
+ : {};
43
+ const raw = cadence.maxRecommendationCandidatesPerRun;
44
+ if (typeof raw === "number" && Number.isFinite(raw)) {
45
+ return Math.max(1, Math.floor(raw));
46
+ }
47
+ return 500;
48
+ }
22
49
  export async function runGenerateRecommendations(ctx, args) {
50
+ const runId = randomUUID();
23
51
  const store = new TaskStore(ctx.workspacePath, taskStoreRelativePath(ctx));
24
52
  await store.load();
25
53
  const state = await loadImprovementState(ctx.workspacePath);
26
- const transcriptsRoot = typeof args.transcriptsRoot === "string" && args.transcriptsRoot.trim()
27
- ? args.transcriptsRoot.trim()
28
- : "agent-transcripts";
54
+ const transcriptsRoot = resolveTranscriptArchivePath(ctx, args);
29
55
  const fromTag = typeof args.fromTag === "string" ? args.fromTag.trim() : undefined;
30
56
  const toTag = typeof args.toTag === "string" ? args.toTag.trim() : undefined;
31
57
  const candidates = [];
@@ -40,16 +66,23 @@ export async function runGenerateRecommendations(ctx, args) {
40
66
  }
41
67
  const allTasks = store.getAllTasks();
42
68
  const created = [];
43
- let skipped = 0;
69
+ let skippedDuplicateEvidenceKey = 0;
70
+ let skippedExistingTaskId = 0;
71
+ let cappedRemaining = 0;
44
72
  const now = new Date().toISOString();
73
+ const maxCreates = getMaxRecommendationCandidatesPerRun(ctx);
45
74
  for (const c of candidates) {
46
75
  if (hasEvidenceKey(allTasks, c.evidenceKey)) {
47
- skipped += 1;
76
+ skippedDuplicateEvidenceKey += 1;
48
77
  continue;
49
78
  }
50
79
  const id = taskIdForEvidenceKey(c.evidenceKey);
51
80
  if (store.getTask(id)) {
52
- skipped += 1;
81
+ skippedExistingTaskId += 1;
82
+ continue;
83
+ }
84
+ if (created.length >= maxCreates) {
85
+ cappedRemaining += 1;
53
86
  continue;
54
87
  }
55
88
  const task = {
@@ -88,5 +121,16 @@ export async function runGenerateRecommendations(ctx, args) {
88
121
  }
89
122
  await store.save();
90
123
  await saveImprovementState(ctx.workspacePath, state);
91
- return { created, skipped, candidates: candidates.length };
124
+ const skipped = skippedDuplicateEvidenceKey + skippedExistingTaskId;
125
+ return {
126
+ runId,
127
+ created,
128
+ skipped,
129
+ candidates: candidates.length,
130
+ dedupe: {
131
+ skippedDuplicateEvidenceKey,
132
+ skippedExistingTaskId,
133
+ cappedRemaining
134
+ }
135
+ };
92
136
  }
@@ -1,10 +1,21 @@
1
- export declare const IMPROVEMENT_STATE_SCHEMA_VERSION: 1;
1
+ export declare const IMPROVEMENT_STATE_SCHEMA_VERSION: 2;
2
+ export type TranscriptRetryEntry = {
3
+ relativePath: string;
4
+ attempts: number;
5
+ lastErrorCode: string;
6
+ lastErrorMessage: string;
7
+ nextRetryAt: string;
8
+ };
2
9
  export type ImprovementStateDocument = {
3
10
  schemaVersion: typeof IMPROVEMENT_STATE_SCHEMA_VERSION;
4
11
  policyTraceLineCursor: number;
5
12
  mutationLineCursor: number;
6
13
  transitionLogLengthCursor: number;
7
14
  transcriptLineCursors: Record<string, number>;
15
+ lastSyncRunAt: string | null;
16
+ lastIngestRunAt: string | null;
17
+ /** Bounded queue of transcript files that failed to copy; retried on subsequent syncs. */
18
+ transcriptRetryQueue: TranscriptRetryEntry[];
8
19
  };
9
20
  export declare function emptyImprovementState(): ImprovementStateDocument;
10
21
  export declare function loadImprovementState(workspacePath: string): Promise<ImprovementStateDocument>;
@@ -1,6 +1,6 @@
1
1
  import fs from "node:fs/promises";
2
2
  import path from "node:path";
3
- export const IMPROVEMENT_STATE_SCHEMA_VERSION = 1;
3
+ export const IMPROVEMENT_STATE_SCHEMA_VERSION = 2;
4
4
  const DEFAULT_REL = ".workspace-kit/improvement/state.json";
5
5
  function statePath(workspacePath) {
6
6
  return path.join(workspacePath, DEFAULT_REL);
@@ -11,21 +11,48 @@ export function emptyImprovementState() {
11
11
  policyTraceLineCursor: 0,
12
12
  mutationLineCursor: 0,
13
13
  transitionLogLengthCursor: 0,
14
- transcriptLineCursors: {}
14
+ transcriptLineCursors: {},
15
+ lastSyncRunAt: null,
16
+ lastIngestRunAt: null,
17
+ transcriptRetryQueue: []
18
+ };
19
+ }
20
+ function migrateFromV1(raw) {
21
+ const base = emptyImprovementState();
22
+ return {
23
+ ...base,
24
+ policyTraceLineCursor: typeof raw.policyTraceLineCursor === "number" ? raw.policyTraceLineCursor : 0,
25
+ mutationLineCursor: typeof raw.mutationLineCursor === "number" ? raw.mutationLineCursor : 0,
26
+ transitionLogLengthCursor: typeof raw.transitionLogLengthCursor === "number" ? raw.transitionLogLengthCursor : 0,
27
+ transcriptLineCursors: raw.transcriptLineCursors && typeof raw.transcriptLineCursors === "object" && raw.transcriptLineCursors !== null
28
+ ? raw.transcriptLineCursors
29
+ : {},
30
+ lastSyncRunAt: typeof raw.lastSyncRunAt === "string" ? raw.lastSyncRunAt : null,
31
+ lastIngestRunAt: typeof raw.lastIngestRunAt === "string" ? raw.lastIngestRunAt : null
15
32
  };
16
33
  }
17
34
  export async function loadImprovementState(workspacePath) {
18
35
  const fp = statePath(workspacePath);
19
36
  try {
20
- const raw = await fs.readFile(fp, "utf8");
21
- const doc = JSON.parse(raw);
22
- if (doc.schemaVersion !== IMPROVEMENT_STATE_SCHEMA_VERSION) {
37
+ const rawText = await fs.readFile(fp, "utf8");
38
+ const raw = JSON.parse(rawText);
39
+ const ver = raw.schemaVersion;
40
+ if (ver === 1) {
41
+ return migrateFromV1(raw);
42
+ }
43
+ if (ver !== IMPROVEMENT_STATE_SCHEMA_VERSION) {
23
44
  return emptyImprovementState();
24
45
  }
46
+ const doc = raw;
25
47
  return {
26
48
  ...emptyImprovementState(),
27
49
  ...doc,
28
- transcriptLineCursors: doc.transcriptLineCursors ?? {}
50
+ transcriptLineCursors: doc.transcriptLineCursors ?? {},
51
+ transcriptRetryQueue: Array.isArray(doc.transcriptRetryQueue)
52
+ ? doc.transcriptRetryQueue.filter((e) => e !== null &&
53
+ typeof e === "object" &&
54
+ typeof e.relativePath === "string")
55
+ : []
29
56
  };
30
57
  }
31
58
  catch (e) {
@@ -38,5 +65,9 @@ export async function loadImprovementState(workspacePath) {
38
65
  export async function saveImprovementState(workspacePath, doc) {
39
66
  const fp = statePath(workspacePath);
40
67
  await fs.mkdir(path.dirname(fp), { recursive: true });
41
- await fs.writeFile(fp, `${JSON.stringify(doc, null, 2)}\n`, "utf8");
68
+ const out = {
69
+ ...doc,
70
+ schemaVersion: IMPROVEMENT_STATE_SCHEMA_VERSION
71
+ };
72
+ await fs.writeFile(fp, `${JSON.stringify(out, null, 2)}\n`, "utf8");
42
73
  }