@nathapp/nax 0.64.1 → 0.64.2-canary.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/nax.js +1513 -1167
  2. package/package.json +1 -1
package/dist/nax.js CHANGED
@@ -5304,7 +5304,11 @@ class AgentManager {
5304
5304
  const sessionRole = handle.role ?? opts.sessionRole ?? "main";
5305
5305
  const start = Date.now();
5306
5306
  try {
5307
- const result = await this._sendPrompt(handle, prompt, opts);
5307
+ const rawResult = await this._sendPrompt(handle, prompt, opts);
5308
+ const result = {
5309
+ ...rawResult,
5310
+ protocolIds: rawResult.protocolIds ?? handle.protocolIds
5311
+ };
5308
5312
  const event = {
5309
5313
  kind: "session-turn",
5310
5314
  sessionName: handle.id,
@@ -21027,7 +21031,7 @@ function reshapeSelector(name, fn) {
21027
21031
  }
21028
21032
 
21029
21033
  // src/config/selectors.ts
21030
- var reviewConfigSelector, planConfigSelector, decomposeConfigSelector, rectifyConfigSelector, acceptanceConfigSelector, acceptanceFixConfigSelector, acceptanceGenConfigSelector, tddConfigSelector, debateConfigSelector, routingConfigSelector, verifyConfigSelector, rectificationGateConfigSelector, agentConfigSelector, agentManagerConfigSelector, interactionConfigSelector, precheckConfigSelector, qualityConfigSelector, testPatternConfigSelector, contextConfigSelector, contextToolRuntimeConfigSelector, promptLoaderConfigSelector, llmRoutingConfigSelector;
21034
+ var reviewConfigSelector, planConfigSelector, decomposeConfigSelector, rectifyConfigSelector, acceptanceConfigSelector, acceptanceFixConfigSelector, acceptanceGenConfigSelector, tddConfigSelector, debateConfigSelector, routingConfigSelector, verifyConfigSelector, rectificationGateConfigSelector, agentConfigSelector, agentManagerConfigSelector, interactionConfigSelector, precheckConfigSelector, qualityConfigSelector, autofixConfigSelector, testPatternConfigSelector, contextConfigSelector, contextToolRuntimeConfigSelector, promptLoaderConfigSelector, llmRoutingConfigSelector;
21031
21035
  var init_selectors = __esm(() => {
21032
21036
  reviewConfigSelector = pickSelector("review", "review", "debate", "models", "execution", "project", "quality", "agent");
21033
21037
  planConfigSelector = pickSelector("plan", "plan", "debate");
@@ -21049,6 +21053,7 @@ var init_selectors = __esm(() => {
21049
21053
  interactionConfigSelector = pickSelector("interaction", "interaction");
21050
21054
  precheckConfigSelector = pickSelector("precheck", "precheck", "quality", "execution", "prompts", "review", "project");
21051
21055
  qualityConfigSelector = pickSelector("quality", "quality", "execution");
21056
+ autofixConfigSelector = pickSelector("autofix", "quality", "execution");
21052
21057
  testPatternConfigSelector = pickSelector("test-pattern", "execution", "project", "quality");
21053
21058
  contextConfigSelector = pickSelector("context", "context");
21054
21059
  contextToolRuntimeConfigSelector = pickSelector("context-tool-runtime", "context", "execution", "project", "quality");
@@ -21664,6 +21669,13 @@ var init_path_security2 = () => {};
21664
21669
  // src/context/engine/providers/code-neighbor.ts
21665
21670
  import { createHash as createHash3 } from "crypto";
21666
21671
  import { join as join6, relative, resolve as resolve5 } from "path";
21672
+ function isExcludedPath(file3, ignoreMatchers) {
21673
+ for (const prefix of EXCLUDED_DIR_PREFIXES) {
21674
+ if (file3.startsWith(prefix) || file3.includes(`/${prefix}`))
21675
+ return true;
21676
+ }
21677
+ return ignoreMatchers.some((m) => m.test(file3));
21678
+ }
21667
21679
  function contentHash8(content) {
21668
21680
  return createHash3("sha256").update(content).digest("hex").slice(0, 8);
21669
21681
  }
@@ -21768,7 +21780,7 @@ function stripExt(s) {
21768
21780
  function isTestFile(filePath, regex) {
21769
21781
  return regex.some((re) => re.test(filePath));
21770
21782
  }
21771
- async function collectNeighbors(filePath, workdir, extraGlobWorkdirs, siblingTestContext) {
21783
+ async function collectNeighbors(filePath, workdir, extraGlobWorkdirs, siblingTestContext, ignoreMatchers) {
21772
21784
  const neighbors = new Set;
21773
21785
  if (await _codeNeighborDeps.fileExists(join6(workdir, filePath))) {
21774
21786
  const content = await _codeNeighborDeps.readFile(join6(workdir, filePath));
@@ -21781,7 +21793,7 @@ async function collectNeighbors(filePath, workdir, extraGlobWorkdirs, siblingTes
21781
21793
  const fileBaseName = (filePath.split("/").pop() ?? filePath).replace(/\.[^.]+$/, "");
21782
21794
  const fileNoExt = filePath.replace(/\.[^.]+$/, "");
21783
21795
  const scanForReverseDeps = async (scanWorkdir) => {
21784
- const srcFiles = _codeNeighborDeps.glob(SOURCE_GLOB, scanWorkdir);
21796
+ const srcFiles = _codeNeighborDeps.glob(SOURCE_GLOB, scanWorkdir, ignoreMatchers);
21785
21797
  for (const srcFile of srcFiles) {
21786
21798
  if (neighbors.size >= MAX_NEIGHBORS_PER_FILE)
21787
21799
  break;
@@ -21864,9 +21876,10 @@ class CodeNeighborProvider {
21864
21876
  globs: request.resolvedTestPatterns.globs,
21865
21877
  regex: request.resolvedTestPatterns.regex
21866
21878
  } : undefined;
21879
+ const ignoreMatchers = request.naxIgnoreIndex?.getMatchers(workdir);
21867
21880
  const sections = [];
21868
21881
  for (const file3 of filesToProcess) {
21869
- const neighbors = await collectNeighbors(file3, workdir, extraGlobWorkdirs, siblingTestContext);
21882
+ const neighbors = await collectNeighbors(file3, workdir, extraGlobWorkdirs, siblingTestContext, ignoreMatchers);
21870
21883
  if (neighbors.length > 0) {
21871
21884
  sections.push(`### ${file3}
21872
21885
  ${neighbors.map((n) => `- ${n}`).join(`
@@ -21899,22 +21912,34 @@ ${sections.join(`
21899
21912
  return { chunks: [chunk], pullTools: [] };
21900
21913
  }
21901
21914
  }
21902
- var MAX_FILES = 10, MAX_NEIGHBORS_PER_FILE = 8, MAX_GLOB_FILES = 200, MAX_CHUNK_TOKENS = 500, SOURCE_GLOB = "src/**/*.{ts,tsx,js,jsx,py,go,rs,java,rb,php,cs,cpp,c,h}", _codeNeighborDeps, FROM_PATTERN, REQUIRE_PATTERN, IMPORT_SIDE_EFFECT_PATTERN;
21915
+ var MAX_FILES = 10, MAX_NEIGHBORS_PER_FILE = 8, MAX_GLOB_FILES = 200, MAX_CHUNK_TOKENS = 500, SOURCE_GLOB = "**/*.{ts,tsx,js,jsx,py,go,rs,java,rb,php,cs,cpp,c,h}", EXCLUDED_DIR_PREFIXES, _codeNeighborDeps, FROM_PATTERN, REQUIRE_PATTERN, IMPORT_SIDE_EFFECT_PATTERN;
21903
21916
  var init_code_neighbor = __esm(() => {
21904
21917
  init_logger2();
21905
21918
  init_workspace();
21906
21919
  init_path_security2();
21920
+ EXCLUDED_DIR_PREFIXES = [
21921
+ "node_modules/",
21922
+ ".git/",
21923
+ ".nax/",
21924
+ "vendor/",
21925
+ "dist/",
21926
+ "build/",
21927
+ "out/",
21928
+ ".cache/"
21929
+ ];
21907
21930
  _codeNeighborDeps = {
21908
21931
  fileExists: (path) => Bun.file(path).exists(),
21909
21932
  readFile: (path) => Bun.file(path).text(),
21910
21933
  discoverWorkspacePackages: (repoRoot) => discoverWorkspacePackages(repoRoot),
21911
21934
  getLogger,
21912
- glob: (pattern, cwd) => {
21935
+ glob: (pattern, cwd, ignoreMatchers = []) => {
21913
21936
  const g = new Bun.Glob(pattern);
21914
21937
  const results = [];
21915
21938
  let count = 0;
21916
21939
  let truncated = false;
21917
21940
  for (const file3 of g.scanSync({ cwd, absolute: false })) {
21941
+ if (isExcludedPath(file3, ignoreMatchers))
21942
+ continue;
21918
21943
  if (count >= MAX_GLOB_FILES) {
21919
21944
  truncated = true;
21920
21945
  break;
@@ -24663,7 +24688,7 @@ async function gitLsFiles(workdir) {
24663
24688
  }
24664
24689
  }
24665
24690
  function isExcluded(path) {
24666
- return EXCLUDED_DIR_PREFIXES.some((prefix) => path.startsWith(prefix) || path.includes(`/${prefix}`));
24691
+ return EXCLUDED_DIR_PREFIXES2.some((prefix) => path.startsWith(prefix) || path.includes(`/${prefix}`));
24667
24692
  }
24668
24693
  async function detectFromFileScan(workdir) {
24669
24694
  const files = await gitLsFiles(workdir);
@@ -24701,9 +24726,9 @@ async function detectFromFileScan(workdir) {
24701
24726
  patterns
24702
24727
  };
24703
24728
  }
24704
- var EXCLUDED_DIR_PREFIXES, MIN_COUNT_THRESHOLD = 5, MIN_FRACTION_THRESHOLD = 0.1, CANDIDATE_SUFFIXES, SUFFIX_TO_GLOB, _fileScanDeps;
24729
+ var EXCLUDED_DIR_PREFIXES2, MIN_COUNT_THRESHOLD = 5, MIN_FRACTION_THRESHOLD = 0.1, CANDIDATE_SUFFIXES, SUFFIX_TO_GLOB, _fileScanDeps;
24705
24730
  var init_file_scan = __esm(() => {
24706
- EXCLUDED_DIR_PREFIXES = ["node_modules/", "dist/", "build/", ".nax/", "coverage/", ".git/"];
24731
+ EXCLUDED_DIR_PREFIXES2 = ["node_modules/", "dist/", "build/", ".nax/", "coverage/", ".git/"];
24707
24732
  CANDIDATE_SUFFIXES = [
24708
24733
  ".test.ts",
24709
24734
  ".test.tsx",
@@ -27118,12 +27143,13 @@ function formatPriorFailures(failures) {
27118
27143
  **Review Findings (fix these issues):**`);
27119
27144
  for (const finding of failure.reviewFindings) {
27120
27145
  const source = finding.source ? ` (${finding.source})` : "";
27146
+ const loc = finding.file ? `${finding.file}:${finding.line ?? 0}` : "global";
27121
27147
  parts.push(`
27122
- - **[${finding.severity}]** \`${finding.file}:${finding.line}\`${source}`);
27123
- parts.push(` **Rule:** ${finding.ruleId}`);
27148
+ - **[${finding.severity}]** \`${loc}\`${source}`);
27149
+ parts.push(` **Rule:** ${finding.rule ?? finding.category}`);
27124
27150
  parts.push(` **Issue:** ${finding.message}`);
27125
- if (finding.url) {
27126
- parts.push(` **Docs:** ${finding.url}`);
27151
+ if (typeof finding.meta?.url === "string") {
27152
+ parts.push(` **Docs:** ${finding.meta.url}`);
27127
27153
  }
27128
27154
  }
27129
27155
  }
@@ -27685,7 +27711,7 @@ isolation scope: Implement source code in src/ to make tests pass. Do not modify
27685
27711
  if (role === "verifier") {
27686
27712
  return `${header}
27687
27713
 
27688
- isolation scope: Read-only inspection. Review all test results, implementation code, and acceptance criteria compliance. You MAY write a verdict file (.nax-verifier-verdict.json) and apply legitimate fixes if needed.${footer}`;
27714
+ isolation scope: Read-only TDD integrity inspection. Review story-scoped test results and test-file modifications. Do NOT apply source or test fixes. You MAY write only the verdict file (.nax-verifier-verdict.json).${footer}`;
27689
27715
  }
27690
27716
  if (role === "single-session") {
27691
27717
  return `${header}
@@ -27783,17 +27809,18 @@ Instructions:
27783
27809
  if (role === "verifier") {
27784
27810
  return `# Role: Verifier
27785
27811
 
27786
- Your task: Review and verify the implementation against acceptance criteria.
27812
+ Your task: verify the TDD handoff integrity for this story.
27787
27813
 
27788
- Context: You are the final session in a multi-session workflow. A test-writer created tests, and an implementer wrote the code. The orchestrator has already run the full test suite and confirmed it passes before handing off to you.
27814
+ Context: You are the final session in a multi-session workflow. A test-writer created tests, and an implementer wrote the code. The orchestrator has already attempted the full-suite gate before handing off to you; it may have passed, failed, or exhausted rectification.
27789
27815
 
27790
27816
  Instructions:
27791
27817
  - Run ONLY the story's scoped test files \u2014 do NOT run the full test suite (the orchestrator already handled that)
27792
- - Check that implementation meets all acceptance criteria from the story
27793
- - Inspect code quality, error handling, and edge cases
27818
+ - Confirm the story-scoped tests pass
27819
+ - Check whether the implementer modified test files after the test-writer phase
27794
27820
  - Verify any test modifications (if any) are legitimate fixes, not shortcuts
27821
+ - Do NOT perform semantic acceptance review; semantic/adversarial review stages own acceptance criteria and broad code-quality findings
27795
27822
  - Write a detailed verdict with reasoning
27796
- - Goal: verify story-scoped tests pass, provide comprehensive code review and quality assurance`;
27823
+ - Goal: verify story-scoped tests pass and test integrity was preserved`;
27797
27824
  }
27798
27825
  if (role === "single-session") {
27799
27826
  return `# Role: Single-Session
@@ -27911,16 +27938,13 @@ After completing your verification, you **MUST** write a verdict file at the **p
27911
27938
  **File:** \`.nax-verifier-verdict.json\`
27912
27939
 
27913
27940
  Set \`approved: true\` when ALL of these conditions are met:
27914
- - All story-scoped tests pass (the orchestrator already confirmed the full suite passes \u2014 you only need to verify the story's own tests)
27915
- - Implementation is clean and follows conventions
27916
- - All acceptance criteria met
27941
+ - All story-scoped tests pass (the orchestrator already attempted the full-suite gate \u2014 you only need to verify the story's own tests)
27917
27942
  - Any test modifications by implementer are legitimate fixes
27918
27943
 
27919
27944
  Set \`approved: false\` when ANY of these conditions are true:
27920
27945
  - Tests are failing and you cannot fix them
27921
27946
  - The implementer loosened test assertions to mask bugs
27922
- - Critical acceptance criteria are not met
27923
- - Code quality is poor (security issues, severe bugs, etc.)
27947
+ - The implementer made illegitimate test changes
27924
27948
 
27925
27949
  **JSON schema** (fill in all fields with real values):
27926
27950
 
@@ -27931,10 +27955,11 @@ Set \`approved: false\` when ANY of these conditions are true:
27931
27955
  **Field notes:**
27932
27956
  - \`quality.rating\` must be one of: \`"good"\`, \`"acceptable"\`, \`"poor"\`
27933
27957
  - \`testModifications.files\` \u2014 list any test files the implementer changed
27934
- - \`fixes\` \u2014 list any fixes you applied yourself during this verification session
27958
+ - \`acceptanceCriteria\` and \`quality\` are advisory in this TDD verifier verdict; do not use them to reject semantic correctness
27959
+ - \`fixes\` \u2014 keep this empty; the verifier must not apply code or test fixes
27935
27960
  - \`reasoning\` \u2014 brief summary of your overall assessment
27936
27961
 
27937
- When done, commit any fixes with message: "fix: verify and adjust ${story.title}"`;
27962
+ When done, do not commit code changes. Only write the verdict file.`;
27938
27963
  }
27939
27964
 
27940
27965
  // src/prompts/sections/conventions.ts
@@ -28397,7 +28422,7 @@ ${c.output}`).join(`
28397
28422
  `);
28398
28423
  }
28399
28424
  buildReReviewPrompt(updatedDiff, previousFindings) {
28400
- const findingsList = previousFindings.length > 0 ? previousFindings.map((f) => `- ${f.ruleId}: ${f.message}`).join(`
28425
+ const findingsList = previousFindings.length > 0 ? previousFindings.map((f) => `- ${f.rule ?? "semantic"}: ${f.message}`).join(`
28401
28426
  `) : "(none)";
28402
28427
  return [
28403
28428
  "This is a follow-up re-review. Please review the updated diff below.",
@@ -28442,7 +28467,7 @@ ${c.output}`).join(`
28442
28467
  }
28443
28468
  buildReResolverPrompt(proposals, critiques, diffContext, previousFindings, resolverContext) {
28444
28469
  const framing = this.buildResolverFraming(resolverContext);
28445
- const findingsList = previousFindings.length > 0 ? previousFindings.map((f) => `- ${f.ruleId}: ${f.message}`).join(`
28470
+ const findingsList = previousFindings.length > 0 ? previousFindings.map((f) => `- ${f.rule ?? "semantic"}: ${f.message}`).join(`
28446
28471
  `) : "(none)";
28447
28472
  const proposalsSection = this.buildLabeledProposalsSection(proposals);
28448
28473
  const critiquesSection = this.buildLabeledCritiquesSection(critiques);
@@ -28655,6 +28680,58 @@ function tryParseLLMJson(text) {
28655
28680
  }
28656
28681
  }
28657
28682
 
28683
+ // src/prompts/builders/prior-iterations-builder.ts
28684
+ function buildPriorIterationsBlock(iterations) {
28685
+ if (iterations.length === 0)
28686
+ return "";
28687
+ const rows = iterations.map((iter) => {
28688
+ const strategies = iter.fixesApplied.map((fa) => fa.strategyName).join(", ") || "-";
28689
+ const files = iter.fixesApplied.flatMap((fa) => fa.targetFiles).join(", ") || "-";
28690
+ const outcome = iter.outcome;
28691
+ const findingSummary = formatFindingSummary(iter.findingsBefore, iter.findingsAfter);
28692
+ return `| ${iter.iterationNum} | ${strategies} | ${files} | ${outcome} | ${findingSummary} |`;
28693
+ });
28694
+ const header = "| # | Strategies run | Files touched | Outcome | Findings before \u2192 after |";
28695
+ const separator = "|---|----------------|---------------|---------|--------------------------|";
28696
+ const table = [header, separator, ...rows].join(`
28697
+ `);
28698
+ const hasUnchanged = iterations.some((i) => i.outcome === "unchanged");
28699
+ const unchangedNote = hasUnchanged ? `
28700
+ When outcome is "unchanged", the prior hypothesis is FALSIFIED \u2014 the change did not affect what was tested. Choose a different category before producing a new verdict. Do NOT repeat fixes listed above.` : "";
28701
+ return `## Prior Iterations \u2014 verdict required before new analysis
28702
+
28703
+ ${table}${unchangedNote}
28704
+
28705
+ `;
28706
+ }
28707
+ function formatFindingSummary(before, after) {
28708
+ const beforeStr = before.length === 0 ? "0" : formatFindingCount(before);
28709
+ const afterStr = after.length === 0 ? "0" : formatFindingCount(after);
28710
+ return `${beforeStr} \u2192 ${afterStr}`;
28711
+ }
28712
+ function formatFindingCount(findings) {
28713
+ const count = findings.length;
28714
+ const topCategory = mostFrequentCategory(findings);
28715
+ return topCategory !== null ? `${count} [${topCategory}]` : `${count}`;
28716
+ }
28717
+ function mostFrequentCategory(findings) {
28718
+ if (findings.length === 0)
28719
+ return null;
28720
+ const freq = new Map;
28721
+ for (const f of findings) {
28722
+ freq.set(f.category, (freq.get(f.category) ?? 0) + 1);
28723
+ }
28724
+ let top = null;
28725
+ let topCount = 0;
28726
+ for (const [cat, cnt] of freq) {
28727
+ if (cnt > topCount) {
28728
+ topCount = cnt;
28729
+ top = cat;
28730
+ }
28731
+ }
28732
+ return top;
28733
+ }
28734
+
28658
28735
  // src/prompts/builders/review-builder.ts
28659
28736
  class ReviewPromptBuilder {
28660
28737
  buildSemanticReviewPrompt(story, semanticConfig, options) {
@@ -28665,7 +28742,7 @@ class ReviewPromptBuilder {
28665
28742
  ${semanticConfig.rules.map((r, i) => `${i + 1}. ${r}`).join(`
28666
28743
  `)}
28667
28744
  ` : "";
28668
- const attemptContextBlock = buildAttemptContextBlock(options.priorFailures);
28745
+ const priorIterationsBlock = buildPriorIterationsBlock(options.priorSemanticIterations ?? []);
28669
28746
  let diffSection;
28670
28747
  if (options.mode === "ref") {
28671
28748
  diffSection = buildRefDiffSection(options.storyGitRef ?? "", options.stat ?? "", options.excludePatterns ?? []);
@@ -28681,7 +28758,7 @@ ${story.description}
28681
28758
 
28682
28759
  ### Acceptance Criteria
28683
28760
  ${acList}
28684
- ${customRulesBlock}${attemptContextBlock}${diffSection}
28761
+ ${customRulesBlock}${priorIterationsBlock}${diffSection}
28685
28762
  ${SEMANTIC_INSTRUCTIONS}
28686
28763
  ${SEMANTIC_OUTPUT_SCHEMA}`;
28687
28764
  return wrapJsonPrompt(core2);
@@ -28705,17 +28782,6 @@ Output ONLY a complete, valid JSON object. It must start with { and end with }.
28705
28782
  Schema: {"passed": boolean, "findings": [{"severity": string, "category": string, "file": string, "line": number, "issue": string, "suggestion": string}]}`;
28706
28783
  }
28707
28784
  }
28708
- function buildAttemptContextBlock(priorFailures) {
28709
- if (!priorFailures || priorFailures.length === 0)
28710
- return "";
28711
- const attemptNumber = priorFailures.length + 1;
28712
- const stages = priorFailures.map((f) => f.stage).join(", ");
28713
- return `## Attempt Context
28714
- This is escalation attempt ${attemptNumber}. Prior attempts failed at stages: ${stages}.
28715
- The diff shows the NET result of all changes since story start \u2014 verify against the current codebase state.
28716
-
28717
- `;
28718
- }
28719
28785
  function buildEmbeddedDiffSection(diff) {
28720
28786
  return `## Git Diff (production code only \u2014 test files excluded)
28721
28787
 
@@ -28769,7 +28835,7 @@ Do NOT flag: style issues, naming conventions, import ordering, file length, or
28769
28835
  "passed": boolean,
28770
28836
  "findings": [
28771
28837
  {
28772
- "severity": "error" | "warn" | "info" | "unverifiable",
28838
+ "severity": "error" | "warning" | "info" | "unverifiable",
28773
28839
  "file": "path/to/file",
28774
28840
  "line": 42,
28775
28841
  "issue": "description of the issue",
@@ -28853,25 +28919,6 @@ ${testInventory.addedTestFiles.map((f) => ` - ${f}`).join(`
28853
28919
  \`\`\`diff
28854
28920
  ${diff}\`\`\`
28855
28921
 
28856
- `;
28857
- }
28858
- function buildPriorFindingsBlock(round, findings) {
28859
- const rows = findings.map((f) => {
28860
- const location = f.line !== undefined ? `${f.file}:${f.line}` : f.file;
28861
- const category = f.category ?? "\u2014";
28862
- return `| ${f.severity} | ${category} | ${location} | ${f.issue} |`;
28863
- }).join(`
28864
- `);
28865
- return `## Prior Adversarial Findings \u2014 Round ${round}
28866
-
28867
- The following issues were flagged in the previous adversarial review round.
28868
- **Verdict on each of these first \u2014 determine whether each has been fixed, partially addressed, or is still present.**
28869
- Then continue scanning for new issues.
28870
-
28871
- | Severity | Category | Location | Issue |
28872
- |:---------|:---------|:---------|:------|
28873
- ${rows}
28874
-
28875
28922
  `;
28876
28923
  }
28877
28924
 
@@ -28882,14 +28929,13 @@ class AdversarialReviewPromptBuilder {
28882
28929
  diff,
28883
28930
  storyGitRef,
28884
28931
  stat,
28885
- priorFailures,
28886
28932
  testInventory,
28887
28933
  excludePatterns,
28888
28934
  testGlobs,
28889
28935
  refExcludePatterns,
28890
- priorAdversarialFindings
28936
+ priorAdversarialIterations
28891
28937
  } = options;
28892
- const priorFindingsBlock = priorAdversarialFindings && priorAdversarialFindings.findings.length > 0 ? buildPriorFindingsBlock(priorAdversarialFindings.round, priorAdversarialFindings.findings) : "";
28938
+ const priorFindingsBlock = buildPriorIterationsBlock(priorAdversarialIterations ?? []);
28893
28939
  const storyBlock = `## Story Under Review
28894
28940
 
28895
28941
  **ID:** ${story.id}
@@ -28907,7 +28953,6 @@ ${config2.rules.map((r) => `- ${r}`).join(`
28907
28953
  `)}
28908
28954
 
28909
28955
  ` : "";
28910
- const attemptBlock = buildAttemptContextBlock(priorFailures);
28911
28956
  let diffBlock;
28912
28957
  if (mode === "ref" && storyGitRef) {
28913
28958
  diffBlock = buildAdversarialRefDiffSection(storyGitRef, stat, excludePatterns ?? [], testGlobs ?? [], refExcludePatterns ?? []);
@@ -28936,7 +28981,6 @@ ${config2.rules.map((r) => `- ${r}`).join(`
28936
28981
  `
28937
28982
 
28938
28983
  `,
28939
- attemptBlock,
28940
28984
  diffBlock
28941
28985
  ].join("");
28942
28986
  }
@@ -28998,7 +29042,7 @@ Respond with ONLY a JSON object \u2014 no preamble, no explanation outside the J
28998
29042
  "passed": true | false,
28999
29043
  "findings": [
29000
29044
  {
29001
- "severity": "error" | "warn" | "info" | "unverifiable",
29045
+ "severity": "error" | "warning" | "info" | "unverifiable",
29002
29046
  "category": "input" | "error-path" | "abandonment" | "test-gap" | "convention" | "assumption",
29003
29047
  "file": "relative/path/to/file.ts",
29004
29048
  "line": 42,
@@ -29011,15 +29055,13 @@ Respond with ONLY a JSON object \u2014 no preamble, no explanation outside the J
29011
29055
 
29012
29056
  Severity guide:
29013
29057
  - \`"error"\`: confident this will cause real failure or regression
29014
- - \`"warn"\`: fragile or incomplete but may ship without immediate breakage
29058
+ - \`"warning"\`: fragile or incomplete but may ship without immediate breakage
29015
29059
  - \`"info"\`: noteworthy but not actionable as a blocker
29016
29060
  - \`"unverifiable"\`: suspect problem but couldn't confirm from available artifacts
29017
29061
 
29018
- \`passed\` must be \`false\` if any finding has severity \`"error"\` or \`"warn"\`.
29062
+ \`passed\` must be \`false\` if any finding has severity \`"error"\` or \`"warning"\`.
29019
29063
  \`passed\` may be \`true\` with findings if all findings are \`"info"\` or \`"unverifiable"\`.`;
29020
- var init_adversarial_review_builder = __esm(() => {
29021
- init_review_builder();
29022
- });
29064
+ var init_adversarial_review_builder = () => {};
29023
29065
 
29024
29066
  // src/prompts/builders/acceptance-builder.ts
29025
29067
  class AcceptancePromptBuilder {
@@ -29036,9 +29078,6 @@ ${f.content}
29036
29078
  \`\`\``).join(`
29037
29079
 
29038
29080
  `)}` : "";
29039
- const prevFailureSection = p.previousFailure && p.previousFailure.length > 0 ? `
29040
-
29041
- Previous test failed because: ${p.previousFailure}` : "";
29042
29081
  return `You are a senior test engineer. Your task is to generate a complete acceptance test file for the "${p.featureName}" feature.
29043
29082
 
29044
29083
  ${STEP1}
@@ -29052,7 +29091,7 @@ ${STEP3_HEADER}
29052
29091
  ${STEP3_SHARED_RULES}
29053
29092
  - **File output (REQUIRED)**: Write the acceptance test file DIRECTLY to the path shown below. Do NOT output the test code in your response. After writing the file, reply with a brief confirmation.
29054
29093
  - **Path anchor (CRITICAL)**: Write the test file to this exact path: \`${p.targetTestFilePath}\`. Import from package sources using relative paths like \`../../../src/...\` (3 levels up from \`.nax/features/<name>/\` to the package root).
29055
- - **Process cwd**: When spawning child processes to invoke a CLI or binary, set the working directory to the **package root** (\`join(import.meta.dir, "../../..")\`) as your default \u2014 unless your Step 2 exploration reveals the CLI uses a different working directory convention (e.g. reads config from \`~/.config/\`, or resolves paths relative to a flag value). Always check how the CLI resolves file paths before assuming.${implSection}${prevFailureSection}`;
29094
+ - **Process cwd**: When spawning child processes to invoke a CLI or binary, set the working directory to the **package root** (\`join(import.meta.dir, "../../..")\`) as your default \u2014 unless your Step 2 exploration reveals the CLI uses a different working directory convention (e.g. reads config from \`~/.config/\`, or resolves paths relative to a flag value). Always check how the CLI resolves file paths before assuming.${implSection}`;
29056
29095
  }
29057
29096
  buildGeneratorFromSpecPrompt(p) {
29058
29097
  return `You are a senior test engineer. Your task is to generate a complete acceptance test file for the "${p.featureName}" feature.
@@ -29070,6 +29109,21 @@ ${STEP3_SHARED_RULES}
29070
29109
  - **Path anchor (CRITICAL)**: This test file will be saved at \`<repo-root>/.nax/features/${p.featureName}/${p.resolvedTestPath}\` and will ALWAYS run from the repo root. The repo root is exactly 3 \`../\` levels above \`__dirname\`: \`join(__dirname, '..', '..', '..')\`. For monorepo projects, navigate into packages from root (e.g. \`join(root, 'apps/api/src')\`).`;
29071
29110
  }
29072
29111
  buildDiagnosisPromptTemplate(p) {
29112
+ const responseSchema = `{
29113
+ "verdict": "source_bug" | "test_bug" | "both",
29114
+ "reasoning": "Your analysis explaining why this is a source_bug, test_bug, or both",
29115
+ "confidence": 0.0-1.0,
29116
+ "findings": [
29117
+ {
29118
+ "fixTarget": "source" | "test",
29119
+ "category": "stdout-capture" | "ac-mismatch" | "framework-misuse" | "missing-impl" | "import-path" | "hook-failure" | "test-runner-error" | "stub-test" | "other",
29120
+ "file": "optional/path/relative/to/workdir.ts",
29121
+ "line": 0,
29122
+ "message": "Concrete description of the issue",
29123
+ "suggestion": "Optional concrete fix suggestion"
29124
+ }
29125
+ ]
29126
+ }`;
29073
29127
  return `You are a debugging expert. An acceptance test has failed.
29074
29128
 
29075
29129
  TASK: Diagnose whether the failure is due to a bug in the SOURCE CODE or a bug in the TEST CODE.
@@ -29084,15 +29138,9 @@ ${p.testFileContent}
29084
29138
 
29085
29139
  SOURCE FILES (auto-detected from imports, up to ${p.maxFileLines} lines each):
29086
29140
  ${p.sourceFilesSection}
29087
- ${p.verdictSection}${p.previousFailureSection}
29141
+ ${p.verdictSection}
29088
29142
  Respond with ONLY a JSON object in this exact format (no markdown, no extra text):
29089
- {
29090
- "verdict": "source_bug" | "test_bug" | "both",
29091
- "reasoning": "Your analysis explaining why this is a source_bug, test_bug, or both",
29092
- "confidence": 0.0-1.0,
29093
- "testIssues": ["Issue in test code if any"],
29094
- "sourceIssues": ["Issue in source code if any"]
29095
- }`;
29143
+ ${responseSchema}`;
29096
29144
  }
29097
29145
  buildSourceFixPrompt(p) {
29098
29146
  let prompt = `ACCEPTANCE TEST FAILURE:
@@ -29104,6 +29152,8 @@ ${p.testOutput}
29104
29152
  ${p.diagnosisReasoning}
29105
29153
 
29106
29154
  `;
29155
+ if (p.priorIterationsBlock)
29156
+ prompt += p.priorIterationsBlock;
29107
29157
  prompt += `ACCEPTANCE TEST FILE: ${p.acceptanceTestPath}
29108
29158
 
29109
29159
  `;
@@ -29167,17 +29217,12 @@ ${f.content}
29167
29217
  SEMANTIC VERDICTS:
29168
29218
  ${p.semanticVerdicts.map((v) => `- ${v.storyId}: ${v.passed ? "likely test bug (semantic review confirmed AC implementation)" : "unconfirmed"}`).join(`
29169
29219
  `)}
29170
- ` : "";
29171
- const previousFailureSection = p.previousFailure && p.previousFailure.length > 0 ? `
29172
- PREVIOUS FIX ATTEMPTS:
29173
- ${p.previousFailure}
29174
29220
  ` : "";
29175
29221
  return this.buildDiagnosisPromptTemplate({
29176
29222
  truncatedOutput,
29177
29223
  testFileContent: p.testFileContent,
29178
29224
  sourceFilesSection,
29179
29225
  verdictSection,
29180
- previousFailureSection,
29181
29226
  maxFileLines: MAX_FILE_LINES
29182
29227
  });
29183
29228
  }
@@ -29277,12 +29322,8 @@ ${p.testOutput}
29277
29322
  ${p.diagnosisReasoning}
29278
29323
 
29279
29324
  `;
29280
- if (p.previousFailure && p.previousFailure.length > 0) {
29281
- prompt += `PREVIOUS FAILED ATTEMPTS:
29282
- ${p.previousFailure}
29283
-
29284
- `;
29285
- }
29325
+ if (p.priorIterationsBlock)
29326
+ prompt += p.priorIterationsBlock;
29286
29327
  prompt += `ACCEPTANCE TEST FILE: ${p.acceptanceTestPath}
29287
29328
 
29288
29329
  `;
@@ -30318,7 +30359,9 @@ function turnResultToAgentResult(r) {
30318
30359
  durationMs: 0,
30319
30360
  estimatedCostUsd: r.estimatedCostUsd ?? 0,
30320
30361
  exactCostUsd: r.exactCostUsd,
30321
- tokenUsage: r.tokenUsage
30362
+ tokenUsage: r.tokenUsage,
30363
+ protocolIds: r.protocolIds,
30364
+ internalRoundTrips: r.internalRoundTrips
30322
30365
  };
30323
30366
  }
30324
30367
  function buildHopCallback(ctx, sessionId, _initialOptions) {
@@ -31876,8 +31919,7 @@ var init_acceptance_generate = __esm(() => {
31876
31919
  criteriaList: input.criteriaList,
31877
31920
  frameworkOverrideLine: input.frameworkOverrideLine,
31878
31921
  targetTestFilePath: input.targetTestFilePath,
31879
- implementationContext: input.implementationContext,
31880
- previousFailure: input.previousFailure
31922
+ implementationContext: input.implementationContext
31881
31923
  });
31882
31924
  return {
31883
31925
  role: { id: "role", content: "", overridable: false },
@@ -31990,8 +32032,7 @@ var init_acceptance_diagnose = __esm(() => {
31990
32032
  testOutput: input.testOutput,
31991
32033
  testFileContent: input.testFileContent,
31992
32034
  sourceFiles: input.sourceFiles,
31993
- semanticVerdicts: input.semanticVerdicts,
31994
- previousFailure: input.previousFailure
32035
+ semanticVerdicts: input.semanticVerdicts
31995
32036
  });
31996
32037
  return {
31997
32038
  role: { id: "role", content: "", overridable: false },
@@ -32001,13 +32042,26 @@ var init_acceptance_diagnose = __esm(() => {
32001
32042
  parse(output, _input, _ctx) {
32002
32043
  const raw = tryParseLLMJson(output);
32003
32044
  if (raw && typeof raw.verdict === "string" && typeof raw.reasoning === "string" && typeof raw.confidence === "number") {
32004
- return {
32045
+ const base = {
32005
32046
  verdict: raw.verdict,
32006
32047
  reasoning: raw.reasoning,
32007
- confidence: raw.confidence,
32008
- testIssues: Array.isArray(raw.testIssues) ? raw.testIssues : undefined,
32009
- sourceIssues: Array.isArray(raw.sourceIssues) ? raw.sourceIssues : undefined
32048
+ confidence: raw.confidence
32010
32049
  };
32050
+ if (Array.isArray(raw.findings) && raw.findings.length > 0) {
32051
+ const findings = raw.findings.filter((f) => typeof f.message === "string" && typeof f.category === "string").map((f) => ({
32052
+ source: "acceptance-diagnose",
32053
+ severity: typeof f.severity === "string" ? f.severity : "error",
32054
+ category: String(f.category),
32055
+ message: String(f.message),
32056
+ fixTarget: f.fixTarget ?? undefined,
32057
+ file: typeof f.file === "string" ? f.file : undefined,
32058
+ line: typeof f.line === "number" ? f.line : undefined,
32059
+ suggestion: typeof f.suggestion === "string" ? f.suggestion : undefined
32060
+ }));
32061
+ if (findings.length > 0)
32062
+ return { ...base, findings };
32063
+ }
32064
+ return base;
32011
32065
  }
32012
32066
  return FALLBACK;
32013
32067
  }
@@ -32030,6 +32084,7 @@ var init_acceptance_fix = __esm(() => {
32030
32084
  const prompt = new AcceptancePromptBuilder().buildSourceFixPrompt({
32031
32085
  testOutput: input.testOutput,
32032
32086
  diagnosisReasoning: input.diagnosisReasoning,
32087
+ priorIterationsBlock: input.priorIterationsBlock,
32033
32088
  acceptanceTestPath: input.acceptanceTestPath,
32034
32089
  testFileContent: input.testFileContent
32035
32090
  });
@@ -32053,10 +32108,10 @@ var init_acceptance_fix = __esm(() => {
32053
32108
  const prompt = new AcceptancePromptBuilder().buildTestFixPrompt({
32054
32109
  testOutput: input.testOutput,
32055
32110
  diagnosisReasoning: input.diagnosisReasoning,
32111
+ priorIterationsBlock: input.priorIterationsBlock,
32056
32112
  failedACs: input.failedACs,
32057
32113
  acceptanceTestPath: input.acceptanceTestPath,
32058
- testFileContent: input.testFileContent ?? "",
32059
- previousFailure: input.previousFailure
32114
+ testFileContent: input.testFileContent ?? ""
32060
32115
  });
32061
32116
  return {
32062
32117
  role: { id: "role", content: "", overridable: false },
@@ -32069,32 +32124,120 @@ var init_acceptance_fix = __esm(() => {
32069
32124
  };
32070
32125
  });
32071
32126
 
32072
- // src/review/truncation.ts
32073
- function looksLikeTruncatedJson(raw) {
32074
- return raw.trimEnd().length >= MAX_AGENT_OUTPUT_CHARS - 100;
32075
- }
32076
- var init_truncation = __esm(() => {
32077
- init_adapter();
32127
+ // src/review/severity.ts
32128
+ var SEVERITY_RANK;
32129
+ var init_severity = __esm(() => {
32130
+ SEVERITY_RANK = {
32131
+ info: 0,
32132
+ unverifiable: 0,
32133
+ low: 1,
32134
+ warning: 1,
32135
+ error: 2,
32136
+ critical: 3
32137
+ };
32078
32138
  });
32079
32139
 
32080
- // src/operations/types.ts
32081
- function parseLlmReviewShape(raw) {
32082
- if (typeof raw !== "object" || raw === null)
32140
+ // src/review/semantic-helpers.ts
32141
+ function validateLLMShape(parsed) {
32142
+ if (typeof parsed !== "object" || parsed === null)
32083
32143
  return null;
32084
- const obj = raw;
32144
+ const obj = parsed;
32085
32145
  if (typeof obj.passed !== "boolean")
32086
32146
  return null;
32087
32147
  if (!Array.isArray(obj.findings))
32088
32148
  return null;
32089
32149
  return { passed: obj.passed, findings: obj.findings };
32090
32150
  }
32151
+ function parseLLMResponse(raw) {
32152
+ try {
32153
+ return validateLLMShape(tryParseLLMJson(raw));
32154
+ } catch {
32155
+ return null;
32156
+ }
32157
+ }
32158
+ function formatFindings(findings) {
32159
+ return findings.map((f) => `[${f.severity}] ${f.file}:${f.line} \u2014 ${f.issue}
32160
+ Suggestion: ${f.suggestion}`).join(`
32161
+ `);
32162
+ }
32163
+ function normalizeSeverity(sev) {
32164
+ if (sev === "warn")
32165
+ return "warning";
32166
+ if (sev === "critical" || sev === "error" || sev === "warning" || sev === "info" || sev === "low" || sev === "unverifiable")
32167
+ return sev;
32168
+ return "info";
32169
+ }
32170
+ function isBlockingSeverity(sev, threshold = "error") {
32171
+ return (SEVERITY_RANK[sev] ?? 0) >= (SEVERITY_RANK[threshold] ?? 2);
32172
+ }
32173
+ function sanitizeRefModeFindings(findings, diffMode) {
32174
+ if (diffMode !== "ref")
32175
+ return findings;
32176
+ return findings.map((finding) => needsDowngradeForMissingEvidence(finding) ? downgradeToUnverifiable(finding) : finding);
32177
+ }
32178
+ function needsDowngradeForMissingEvidence(finding) {
32179
+ if ((SEVERITY_RANK[finding.severity] ?? 0) < SEVERITY_RANK.error)
32180
+ return false;
32181
+ return mentionsUnverifiedSource(finding) || !hasVerifiedEvidence(finding);
32182
+ }
32183
+ function mentionsUnverifiedSource(finding) {
32184
+ const text = `${finding.issue} ${finding.suggestion}`.toLowerCase();
32185
+ return UNVERIFIED_FINDING_PATTERNS.some((pattern) => text.includes(pattern));
32186
+ }
32187
+ function hasVerifiedEvidence(finding) {
32188
+ const evidence = finding.verifiedBy;
32189
+ return !!evidence?.file?.trim() && !!evidence.observed?.trim();
32190
+ }
32191
+ function downgradeToUnverifiable(finding) {
32192
+ return {
32193
+ ...finding,
32194
+ severity: "unverifiable"
32195
+ };
32196
+ }
32197
+ function llmFindingToFinding(f) {
32198
+ return {
32199
+ source: "semantic-review",
32200
+ severity: normalizeSeverity(f.severity),
32201
+ category: "",
32202
+ file: f.file,
32203
+ line: f.line,
32204
+ message: f.issue,
32205
+ suggestion: f.suggestion ?? undefined,
32206
+ fixTarget: "source",
32207
+ meta: f.verifiedBy ? { verifiedBy: f.verifiedBy } : undefined
32208
+ };
32209
+ }
32210
+ function toReviewFindings(findings) {
32211
+ return findings.map(llmFindingToFinding);
32212
+ }
32213
+ var UNVERIFIED_FINDING_PATTERNS;
32214
+ var init_semantic_helpers = __esm(() => {
32215
+ init_severity();
32216
+ UNVERIFIED_FINDING_PATTERNS = [
32217
+ "cannot verify",
32218
+ "can't verify",
32219
+ "from diff alone",
32220
+ "missing from diff",
32221
+ "not found in diff",
32222
+ "not present in diff",
32223
+ "does not appear in diff"
32224
+ ];
32225
+ });
32226
+
32227
+ // src/review/truncation.ts
32228
+ function looksLikeTruncatedJson(raw) {
32229
+ return raw.trimEnd().length >= MAX_AGENT_OUTPUT_CHARS - 100;
32230
+ }
32231
+ var init_truncation = __esm(() => {
32232
+ init_adapter();
32233
+ });
32091
32234
 
32092
32235
  // src/operations/semantic-review.ts
32093
32236
  var FAIL_OPEN, semanticReviewHopBody = async (initialPrompt, ctx) => {
32094
32237
  const first = await ctx.send(initialPrompt);
32095
32238
  const isTruncated = looksLikeTruncatedJson(first.output);
32096
32239
  const parsed = tryParseLLMJson(first.output);
32097
- if (!isTruncated && parsed && parseLlmReviewShape(parsed))
32240
+ if (!isTruncated && parsed && validateLLMShape(parsed))
32098
32241
  return first;
32099
32242
  const retryPrompt = isTruncated ? ReviewPromptBuilder.jsonRetryCondensed({ blockingThreshold: ctx.input.blockingThreshold }) : ReviewPromptBuilder.jsonRetry();
32100
32243
  if (isTruncated) {
@@ -32114,6 +32257,7 @@ var init_semantic_review = __esm(() => {
32114
32257
  init_config();
32115
32258
  init_logger2();
32116
32259
  init_prompts();
32260
+ init_semantic_helpers();
32117
32261
  init_truncation();
32118
32262
  FAIL_OPEN = { passed: true, findings: [], failOpen: true };
32119
32263
  semanticReviewOp = {
@@ -32131,7 +32275,7 @@ var init_semantic_review = __esm(() => {
32131
32275
  diff: input.diff,
32132
32276
  storyGitRef: input.storyGitRef,
32133
32277
  stat: input.stat,
32134
- priorFailures: input.priorFailures,
32278
+ priorSemanticIterations: input.priorSemanticIterations,
32135
32279
  excludePatterns: input.excludePatterns
32136
32280
  });
32137
32281
  const content = input.featureCtxBlock ? `${input.featureCtxBlock}${base}` : base;
@@ -32142,9 +32286,9 @@ var init_semantic_review = __esm(() => {
32142
32286
  },
32143
32287
  parse(output, _input, _ctx) {
32144
32288
  const raw = tryParseLLMJson(output);
32145
- const parsed = parseLlmReviewShape(raw);
32289
+ const parsed = validateLLMShape(raw);
32146
32290
  if (parsed)
32147
- return parsed;
32291
+ return { passed: parsed.passed, findings: parsed.findings };
32148
32292
  if (/"passed"\s*:\s*false/.test(output))
32149
32293
  return { passed: false, findings: [], looksLikeFail: true };
32150
32294
  return FAIL_OPEN;
@@ -32152,12 +32296,54 @@ var init_semantic_review = __esm(() => {
32152
32296
  };
32153
32297
  });
32154
32298
 
32299
+ // src/review/adversarial-helpers.ts
32300
+ function validateAdversarialShape(parsed) {
32301
+ if (typeof parsed !== "object" || parsed === null)
32302
+ return null;
32303
+ const obj = parsed;
32304
+ if (typeof obj.passed !== "boolean")
32305
+ return null;
32306
+ if (!Array.isArray(obj.findings))
32307
+ return null;
32308
+ return { passed: obj.passed, findings: obj.findings };
32309
+ }
32310
+ function formatFindings2(findings) {
32311
+ return findings.map((f) => `[${f.severity}][${f.category}] ${f.file}:${f.line} \u2014 ${f.issue}
32312
+ Suggestion: ${f.suggestion}`).join(`
32313
+ `);
32314
+ }
32315
+ function normalizeSeverity2(sev) {
32316
+ if (sev === "warn")
32317
+ return "warning";
32318
+ if (sev === "critical" || sev === "error" || sev === "warning" || sev === "info" || sev === "low" || sev === "unverifiable")
32319
+ return sev;
32320
+ return "info";
32321
+ }
32322
+ function isBlockingSeverity2(sev, threshold = "error") {
32323
+ return (SEVERITY_RANK[sev] ?? 0) >= (SEVERITY_RANK[threshold] ?? 2);
32324
+ }
32325
+ function toAdversarialReviewFindings(findings) {
32326
+ return findings.map((f) => ({
32327
+ source: "adversarial-review",
32328
+ severity: normalizeSeverity2(f.severity),
32329
+ category: f.category,
32330
+ file: f.file,
32331
+ line: f.line,
32332
+ message: f.issue,
32333
+ suggestion: f.suggestion,
32334
+ fixTarget: f.category === "test-gap" ? "test" : undefined
32335
+ }));
32336
+ }
32337
+ var init_adversarial_helpers = __esm(() => {
32338
+ init_severity();
32339
+ });
32340
+
32155
32341
  // src/operations/adversarial-review.ts
32156
32342
  var FAIL_OPEN2, adversarialReviewHopBody = async (initialPrompt, ctx) => {
32157
32343
  const first = await ctx.send(initialPrompt);
32158
32344
  const isTruncated = looksLikeTruncatedJson(first.output);
32159
32345
  const parsed = tryParseLLMJson(first.output);
32160
- if (!isTruncated && parsed && parseLlmReviewShape(parsed))
32346
+ if (!isTruncated && parsed && validateAdversarialShape(parsed))
32161
32347
  return first;
32162
32348
  const retryPrompt = isTruncated ? ReviewPromptBuilder.jsonRetryCondensed({ blockingThreshold: ctx.input.blockingThreshold }) : ReviewPromptBuilder.jsonRetry();
32163
32349
  if (isTruncated) {
@@ -32177,6 +32363,7 @@ var init_adversarial_review = __esm(() => {
32177
32363
  init_config();
32178
32364
  init_logger2();
32179
32365
  init_prompts();
32366
+ init_adversarial_helpers();
32180
32367
  init_truncation();
32181
32368
  FAIL_OPEN2 = { passed: true, findings: [], failOpen: true };
32182
32369
  adversarialReviewOp = {
@@ -32194,12 +32381,11 @@ var init_adversarial_review = __esm(() => {
32194
32381
  diff: input.diff,
32195
32382
  storyGitRef: input.storyGitRef,
32196
32383
  stat: input.stat,
32197
- priorFailures: input.priorFailures,
32198
32384
  testInventory: input.testInventory,
32199
32385
  excludePatterns: input.excludePatterns,
32200
32386
  testGlobs: input.testGlobs,
32201
32387
  refExcludePatterns: input.refExcludePatterns,
32202
- priorAdversarialFindings: input.priorAdversarialFindings
32388
+ priorAdversarialIterations: input.priorAdversarialIterations
32203
32389
  });
32204
32390
  const content = input.featureCtxBlock ? `${input.featureCtxBlock}${base}` : base;
32205
32391
  return {
@@ -32209,9 +32395,9 @@ var init_adversarial_review = __esm(() => {
32209
32395
  },
32210
32396
  parse(output, _input, _ctx) {
32211
32397
  const raw = tryParseLLMJson(output);
32212
- const parsed = parseLlmReviewShape(raw);
32398
+ const parsed = validateAdversarialShape(raw);
32213
32399
  if (parsed)
32214
- return parsed;
32400
+ return { passed: parsed.passed, findings: parsed.findings };
32215
32401
  if (/"passed"\s*:\s*false/.test(output))
32216
32402
  return { passed: false, findings: [], looksLikeFail: true };
32217
32403
  return FAIL_OPEN2;
@@ -32225,6 +32411,55 @@ var init_rectify = __esm(() => {
32225
32411
  init_prompts();
32226
32412
  });
32227
32413
 
32414
+ // src/operations/autofix-implementer.ts
32415
+ var implementerRectifyOp;
32416
+ var init_autofix_implementer = __esm(() => {
32417
+ init_config();
32418
+ init_prompts();
32419
+ implementerRectifyOp = {
32420
+ kind: "run",
32421
+ name: "autofix-implementer",
32422
+ stage: "rectification",
32423
+ session: { role: "implementer", lifetime: "fresh" },
32424
+ config: autofixConfigSelector,
32425
+ build(input, _ctx) {
32426
+ const prompt = RectifierPromptBuilder.reviewRectification(input.failedChecks, input.story);
32427
+ return {
32428
+ role: { id: "role", content: "", overridable: false },
32429
+ task: { id: "task", content: prompt, overridable: false }
32430
+ };
32431
+ },
32432
+ parse(output, _input, _ctx) {
32433
+ const match = output.match(/^UNRESOLVED:\s*(.+)$/ms);
32434
+ return { applied: true, ...match ? { unresolvedReason: match[1]?.trim() } : {} };
32435
+ }
32436
+ };
32437
+ });
32438
+
32439
+ // src/operations/autofix-test-writer.ts
32440
+ var testWriterRectifyOp;
32441
+ var init_autofix_test_writer = __esm(() => {
32442
+ init_config();
32443
+ init_prompts();
32444
+ testWriterRectifyOp = {
32445
+ kind: "run",
32446
+ name: "autofix-test-writer",
32447
+ stage: "rectification",
32448
+ session: { role: "test-writer", lifetime: "fresh" },
32449
+ config: autofixConfigSelector,
32450
+ build(input, _ctx) {
32451
+ const prompt = RectifierPromptBuilder.testWriterRectification(input.failedChecks, input.story);
32452
+ return {
32453
+ role: { id: "role", content: "", overridable: false },
32454
+ task: { id: "task", content: prompt, overridable: false }
32455
+ };
32456
+ },
32457
+ parse(_output, _input, _ctx) {
32458
+ return { applied: true };
32459
+ }
32460
+ };
32461
+ });
32462
+
32228
32463
  // src/operations/debate-propose.ts
32229
32464
  var debateProposeOp;
32230
32465
  var init_debate_propose = __esm(() => {
@@ -32645,7 +32880,8 @@ async function runTddSessionOp(op, options, beforeRef, contextBundle, sessionBin
32645
32880
  break;
32646
32881
  }
32647
32882
  const interactionBridge = includeContext ? buildInteractionBridge(interactionChain, { featureName, storyId: story.id, stage: "execution" }) : undefined;
32648
- return runTddSession(role, agent, agentManager, story, config2, workdir, tier, beforeRef, includeContext ? contextMarkdown : undefined, lite, skipIsolation, constitution, featureName, interactionBridge, projectDir, includeContext ? featureContextMarkdown : undefined, contextBundle, sessionBinding, abortSignal);
32883
+ const verifierLimitedContext = role === "verifier";
32884
+ return runTddSession(role, agent, agentManager, story, config2, workdir, tier, beforeRef, includeContext ? contextMarkdown : undefined, lite, skipIsolation, verifierLimitedContext ? undefined : constitution, featureName, interactionBridge, projectDir, includeContext ? featureContextMarkdown : undefined, verifierLimitedContext ? undefined : contextBundle, sessionBinding, abortSignal);
32649
32885
  }
32650
32886
  var writeTddTestOp, implementTddOp, verifyTddOp;
32651
32887
  var init_session_op = __esm(() => {
@@ -32764,6 +33000,8 @@ var init_operations = __esm(() => {
32764
33000
  init_semantic_review();
32765
33001
  init_adversarial_review();
32766
33002
  init_rectify();
33003
+ init_autofix_implementer();
33004
+ init_autofix_test_writer();
32767
33005
  init_debate_propose();
32768
33006
  init_debate_rebut();
32769
33007
  init_write_test();
@@ -36608,9 +36846,9 @@ async function runTrackedSession(state, id, runner, request) {
36608
36846
  ...request,
36609
36847
  runOptions: {
36610
36848
  ...request.runOptions,
36611
- onSessionEstablished: (protocolIds, sessionName2) => {
36849
+ onSessionEstablished: (protocolIds2, sessionName2) => {
36612
36850
  try {
36613
- state.bindHandle(id, sessionName2, protocolIds);
36851
+ state.bindHandle(id, sessionName2, protocolIds2);
36614
36852
  } catch (err) {
36615
36853
  getLogger().warn("session", "bindHandle via onSessionEstablished failed", {
36616
36854
  storyId: state.sessions.get(id)?.storyId,
@@ -36618,7 +36856,7 @@ async function runTrackedSession(state, id, runner, request) {
36618
36856
  error: err instanceof Error ? err.message : String(err)
36619
36857
  });
36620
36858
  }
36621
- callerCallback?.(protocolIds, sessionName2);
36859
+ callerCallback?.(protocolIds2, sessionName2);
36622
36860
  }
36623
36861
  }
36624
36862
  };
@@ -36693,6 +36931,8 @@ async function runTrackedSession(state, id, runner, request) {
36693
36931
  if (current?.state === "RUNNING") {
36694
36932
  state.transition(id, result.success ? "COMPLETED" : "FAILED");
36695
36933
  }
36934
+ const protocolIds = result.protocolIds ?? current?.protocolIds;
36935
+ const turn = Math.max(result.internalRoundTrips ?? 1, 1);
36696
36936
  const sessionName = state.nameFor({
36697
36937
  workdir: pre.workdir,
36698
36938
  featureName: pre.featureName,
@@ -36711,10 +36951,10 @@ async function runTrackedSession(state, id, runner, request) {
36711
36951
  featureName: pre.featureName,
36712
36952
  workdir: pre.workdir,
36713
36953
  resolvedPermissions,
36714
- turn: result.internalRoundTrips ?? 0,
36954
+ turn,
36715
36955
  protocolIds: {
36716
- sessionId: result.protocolIds?.sessionId ?? null,
36717
- recordId: result.protocolIds?.recordId ?? null
36956
+ sessionId: protocolIds?.sessionId ?? null,
36957
+ recordId: protocolIds?.recordId ?? null
36718
36958
  },
36719
36959
  origin: "runTrackedSession",
36720
36960
  tokenUsage: result.tokenUsage,
@@ -37104,11 +37344,12 @@ class SessionManager {
37104
37344
  }
37105
37345
  this._busySessions.add(handle.id);
37106
37346
  try {
37107
- return await adapter.sendTurn(handle, prompt, {
37347
+ const result = await adapter.sendTurn(handle, prompt, {
37108
37348
  interactionHandler: opts?.interactionHandler ?? NO_OP_INTERACTION_HANDLER,
37109
37349
  signal: opts?.signal,
37110
37350
  maxTurns: opts?.maxTurns
37111
37351
  });
37352
+ return { ...result, protocolIds: result.protocolIds ?? handle.protocolIds };
37112
37353
  } catch (err) {
37113
37354
  if (opts?.signal?.aborted || err instanceof Error && err.name === "AbortError") {
37114
37355
  this._cancelledSessions.add(handle.id);
@@ -37416,7 +37657,9 @@ function createSessionRunHop(sessionManager) {
37416
37657
  durationMs: Date.now() - startMs,
37417
37658
  estimatedCostUsd: turnResult.estimatedCostUsd ?? 0,
37418
37659
  exactCostUsd: turnResult.exactCostUsd,
37419
- tokenUsage: turnResult.tokenUsage
37660
+ tokenUsage: turnResult.tokenUsage,
37661
+ protocolIds: handle.protocolIds,
37662
+ internalRoundTrips: turnResult.internalRoundTrips
37420
37663
  }
37421
37664
  };
37422
37665
  } catch (err) {
@@ -39538,6 +39781,393 @@ var init_runner2 = __esm(() => {
39538
39781
  init_logger2();
39539
39782
  });
39540
39783
 
39784
+ // src/findings/types.ts
39785
+ function findingKey(f) {
39786
+ return JSON.stringify([f.source, f.file ?? null, f.line ?? null, f.rule ?? null, f.message]);
39787
+ }
39788
+ var SEVERITY_ORDER;
39789
+ var init_types7 = __esm(() => {
39790
+ SEVERITY_ORDER = Object.freeze({
39791
+ critical: 5,
39792
+ error: 4,
39793
+ warning: 3,
39794
+ info: 2,
39795
+ low: 1,
39796
+ unverifiable: 0
39797
+ });
39798
+ });
39799
+
39800
+ // src/findings/path-utils.ts
39801
+ import { relative as relative9, resolve as resolve14 } from "path";
39802
+ function rebaseToWorkdir(rawPath, cwd, workdir) {
39803
+ if (rawPath.startsWith("/")) {
39804
+ return relative9(workdir, rawPath);
39805
+ }
39806
+ return relative9(workdir, resolve14(cwd, rawPath));
39807
+ }
39808
+ var init_path_utils = () => {};
39809
+
39810
+ // src/findings/adapters/lint.ts
39811
+ function lintDiagnosticToFinding(d, workdir, tool) {
39812
+ return {
39813
+ source: "lint",
39814
+ tool,
39815
+ severity: d.severity ?? "warning",
39816
+ category: "lint",
39817
+ rule: d.ruleId,
39818
+ file: rebaseToWorkdir(d.file, workdir, workdir),
39819
+ line: d.line,
39820
+ column: d.column,
39821
+ message: d.message
39822
+ };
39823
+ }
39824
+ var init_lint = __esm(() => {
39825
+ init_path_utils();
39826
+ });
39827
+
39828
+ // src/findings/adapters/plugin.ts
39829
+ function pluginToFinding(rf, _workdir) {
39830
+ return {
39831
+ source: "plugin",
39832
+ tool: rf.source ?? "plugin",
39833
+ severity: rf.severity,
39834
+ category: rf.category ?? "general",
39835
+ rule: rf.ruleId,
39836
+ file: rf.file,
39837
+ line: rf.line,
39838
+ column: rf.column,
39839
+ endLine: rf.endLine,
39840
+ endColumn: rf.endColumn,
39841
+ message: rf.message,
39842
+ meta: rf.url ? { url: rf.url } : undefined
39843
+ };
39844
+ }
39845
+
39846
+ // src/findings/adapters/semantic-review.ts
39847
+ function reviewFindingToFinding(f) {
39848
+ return {
39849
+ source: "semantic-review",
39850
+ severity: f.severity,
39851
+ category: f.category ?? "",
39852
+ rule: f.ruleId,
39853
+ file: f.file,
39854
+ line: f.line,
39855
+ column: f.column,
39856
+ endLine: f.endLine,
39857
+ endColumn: f.endColumn,
39858
+ message: f.message,
39859
+ fixTarget: "source"
39860
+ };
39861
+ }
39862
+
39863
+ // src/findings/adapters/test-runner.ts
39864
+ function extractExcerpt(output, acId) {
39865
+ const lines = output.split(`
39866
+ `);
39867
+ const idx = lines.findIndex((l) => l.toLowerCase().includes(acId.toLowerCase()));
39868
+ if (idx === -1)
39869
+ return `${acId} failed`;
39870
+ const end = Math.min(lines.length, idx + 5);
39871
+ return lines.slice(idx, end).join(`
39872
+ `).trim() || `${acId} failed`;
39873
+ }
39874
+ function acFailureToFinding(acId, output) {
39875
+ return {
39876
+ source: "test-runner",
39877
+ severity: "error",
39878
+ category: "assertion-failure",
39879
+ rule: acId,
39880
+ message: extractExcerpt(output, acId),
39881
+ fixTarget: "source"
39882
+ };
39883
+ }
39884
+ function acSentinelToFinding(sentinel, _output) {
39885
+ if (sentinel === "AC-HOOK") {
39886
+ return {
39887
+ source: "test-runner",
39888
+ severity: "error",
39889
+ category: "hook-failure",
39890
+ message: "beforeAll/afterAll hook timed out",
39891
+ fixTarget: "test"
39892
+ };
39893
+ }
39894
+ return {
39895
+ source: "test-runner",
39896
+ severity: "critical",
39897
+ category: "test-runner-error",
39898
+ message: "Test runner crashed before test bodies ran",
39899
+ fixTarget: "test"
39900
+ };
39901
+ }
39902
+
39903
+ // src/findings/adapters/typecheck.ts
39904
+ function tscDiagnosticToFinding(d, workdir) {
39905
+ return {
39906
+ source: "typecheck",
39907
+ tool: "tsc",
39908
+ severity: "error",
39909
+ category: "type-error",
39910
+ rule: d.code ? `TS${d.code}` : undefined,
39911
+ file: rebaseToWorkdir(d.file, workdir, workdir),
39912
+ line: d.line,
39913
+ column: d.column,
39914
+ message: d.message
39915
+ };
39916
+ }
39917
+ var init_typecheck = __esm(() => {
39918
+ init_path_utils();
39919
+ });
39920
+
39921
+ // src/findings/adapters/index.ts
39922
+ var init_adapters = __esm(() => {
39923
+ init_lint();
39924
+ init_typecheck();
39925
+ });
39926
+
39927
+ // src/findings/cycle.ts
39928
+ function classifySingleSource(before, after) {
39929
+ const beforeKeys = new Set(before.map(findingKey));
39930
+ const afterKeys = new Set(after.map(findingKey));
39931
+ if (afterKeys.size === 0 && beforeKeys.size === 0)
39932
+ return "resolved";
39933
+ if (afterKeys.size === 0)
39934
+ return "resolved";
39935
+ const hasNew = [...afterKeys].some((k) => !beforeKeys.has(k));
39936
+ const hasResolved = [...beforeKeys].some((k) => !afterKeys.has(k));
39937
+ if (hasNew && !hasResolved)
39938
+ return "regressed";
39939
+ if (!hasNew && !hasResolved)
39940
+ return "unchanged";
39941
+ if (hasNew && hasResolved)
39942
+ return "regressed";
39943
+ return "partial";
39944
+ }
39945
+ function classifyOutcome(before, after) {
39946
+ if (before.length === 0 && after.length === 0)
39947
+ return "resolved";
39948
+ if (before.length === 0)
39949
+ return "regressed";
39950
+ const beforeSources = new Set(before.map((f) => f.source));
39951
+ const afterSources = new Set(after.map((f) => f.source));
39952
+ const newSources = [...afterSources].filter((s) => !beforeSources.has(s));
39953
+ if (newSources.length > 0)
39954
+ return "regressed-different-source";
39955
+ const sources = [...beforeSources];
39956
+ const perSource = sources.map((source) => classifySingleSource(before.filter((f) => f.source === source), after.filter((f) => f.source === source)));
39957
+ if (perSource.every((o) => o === "resolved"))
39958
+ return "resolved";
39959
+ if (perSource.some((o) => o === "regressed"))
39960
+ return "regressed";
39961
+ if (perSource.every((o) => o === "unchanged"))
39962
+ return "unchanged";
39963
+ return "partial";
39964
+ }
39965
+ function selectActiveStrategies(strategies, findings, verdict) {
39966
+ if (findings.length > 0) {
39967
+ return strategies.filter((s) => findings.some((f) => s.appliesTo(f)));
39968
+ }
39969
+ if (verdict !== undefined) {
39970
+ return strategies.filter((s) => s.appliesToVerdict?.(verdict) ?? false);
39971
+ }
39972
+ return [];
39973
+ }
39974
+ function selectExecutionGroup(active) {
39975
+ const exclusive = active.find((s) => !s.coRun || s.coRun === "exclusive");
39976
+ if (exclusive)
39977
+ return [exclusive];
39978
+ return active.filter((s) => s.coRun === "co-run-sequential");
39979
+ }
39980
+ function countStrategyAttempts(iterations, strategyName) {
39981
+ return iterations.reduce((sum, iter) => sum + iter.fixesApplied.filter((fa) => fa.strategyName === strategyName).length, 0);
39982
+ }
39983
+ function countTotalAttempts(iterations) {
39984
+ return iterations.reduce((sum, iter) => sum + iter.fixesApplied.length, 0);
39985
+ }
39986
+ async function runFixCycle(cycle, ctx, cycleName, _deps = {}) {
39987
+ const logger = getSafeLogger();
39988
+ const doCallOp = _deps.callOp ?? _cycleDeps.callOp;
39989
+ const now = _deps.now ?? _cycleDeps.now;
39990
+ const storyId = ctx.storyId;
39991
+ const packageDir = ctx.packageDir;
39992
+ let totalCostUsd = 0;
39993
+ for (;; ) {
39994
+ if (cycle.findings.length === 0 && cycle.verdict === undefined) {
39995
+ return { iterations: cycle.iterations, finalFindings: [], exitReason: "resolved", costUsd: totalCostUsd };
39996
+ }
39997
+ const active = selectActiveStrategies(cycle.strategies, cycle.findings, cycle.verdict);
39998
+ if (active.length === 0) {
39999
+ logger?.info("findings.cycle", "cycle exited \u2014 no matching strategy", {
40000
+ storyId,
40001
+ packageDir,
40002
+ cycleName,
40003
+ reason: "no-strategy",
40004
+ findingsCount: cycle.findings.length
40005
+ });
40006
+ return {
40007
+ iterations: cycle.iterations,
40008
+ finalFindings: cycle.findings,
40009
+ exitReason: "no-strategy",
40010
+ costUsd: totalCostUsd
40011
+ };
40012
+ }
40013
+ for (const strategy of active) {
40014
+ const attempts = countStrategyAttempts(cycle.iterations, strategy.name);
40015
+ if (attempts >= strategy.maxAttempts) {
40016
+ logger?.info("findings.cycle", "cycle exited \u2014 strategy attempt cap reached", {
40017
+ storyId,
40018
+ packageDir,
40019
+ cycleName,
40020
+ reason: "max-attempts-per-strategy",
40021
+ exhaustedStrategy: strategy.name,
40022
+ attempts,
40023
+ maxAttempts: strategy.maxAttempts
40024
+ });
40025
+ return {
40026
+ iterations: cycle.iterations,
40027
+ finalFindings: cycle.findings,
40028
+ exitReason: "max-attempts-per-strategy",
40029
+ exhaustedStrategy: strategy.name,
40030
+ costUsd: totalCostUsd
40031
+ };
40032
+ }
40033
+ }
40034
+ const totalAttempts = countTotalAttempts(cycle.iterations);
40035
+ if (totalAttempts >= cycle.config.maxAttemptsTotal) {
40036
+ logger?.info("findings.cycle", "cycle exited \u2014 total attempt cap reached", {
40037
+ storyId,
40038
+ packageDir,
40039
+ cycleName,
40040
+ reason: "max-attempts-total",
40041
+ totalAttempts,
40042
+ maxAttemptsTotal: cycle.config.maxAttemptsTotal
40043
+ });
40044
+ return {
40045
+ iterations: cycle.iterations,
40046
+ finalFindings: cycle.findings,
40047
+ exitReason: "max-attempts-total",
40048
+ costUsd: totalCostUsd
40049
+ };
40050
+ }
40051
+ for (const strategy of active) {
40052
+ const bailReason = strategy.bailWhen?.(cycle.iterations) ?? null;
40053
+ if (bailReason !== null) {
40054
+ logger?.info("findings.cycle", "cycle exited \u2014 bail predicate fired", {
40055
+ storyId,
40056
+ packageDir,
40057
+ cycleName,
40058
+ reason: "bail-when",
40059
+ strategyName: strategy.name,
40060
+ bailDetail: bailReason
40061
+ });
40062
+ return {
40063
+ iterations: cycle.iterations,
40064
+ finalFindings: cycle.findings,
40065
+ exitReason: "bail-when",
40066
+ bailDetail: bailReason,
40067
+ costUsd: totalCostUsd
40068
+ };
40069
+ }
40070
+ }
40071
+ const group = selectExecutionGroup(active);
40072
+ const startedAt = now();
40073
+ const findingsBefore = [...cycle.findings];
40074
+ const fixesApplied = [];
40075
+ for (const strategy of group) {
40076
+ const relevantFindings = findingsBefore.filter((f) => strategy.appliesTo(f));
40077
+ const input = strategy.buildInput(relevantFindings, cycle.iterations, ctx);
40078
+ const output = await doCallOp(ctx, strategy.fixOp, input);
40079
+ const extracted = strategy.extractApplied?.(output, input) ?? {};
40080
+ fixesApplied.push({
40081
+ strategyName: strategy.name,
40082
+ op: strategy.fixOp.name,
40083
+ targetFiles: extracted.targetFiles ?? [],
40084
+ summary: extracted.summary ?? "",
40085
+ costUsd: extracted.costUsd
40086
+ });
40087
+ }
40088
+ let findingsAfter;
40089
+ let validatorAttempt = 0;
40090
+ for (;; ) {
40091
+ try {
40092
+ findingsAfter = await cycle.validate(ctx);
40093
+ break;
40094
+ } catch (err) {
40095
+ if (validatorAttempt >= cycle.config.validatorRetries) {
40096
+ logger?.error("findings.cycle", "cycle exited \u2014 validator error", {
40097
+ storyId,
40098
+ packageDir,
40099
+ cycleName,
40100
+ reason: "validator-error",
40101
+ error: errorMessage(err)
40102
+ });
40103
+ return {
40104
+ iterations: cycle.iterations,
40105
+ finalFindings: cycle.findings,
40106
+ exitReason: "validator-error",
40107
+ costUsd: totalCostUsd
40108
+ };
40109
+ }
40110
+ logger?.warn("findings.cycle", "validator retry", {
40111
+ storyId,
40112
+ packageDir,
40113
+ cycleName,
40114
+ attempt: validatorAttempt + 1,
40115
+ error: errorMessage(err)
40116
+ });
40117
+ validatorAttempt++;
40118
+ }
40119
+ }
40120
+ const outcome = classifyOutcome(findingsBefore, findingsAfter);
40121
+ const finishedAt = now();
40122
+ const iterationNum = cycle.iterations.length + 1;
40123
+ const iteration = {
40124
+ iterationNum,
40125
+ findingsBefore,
40126
+ fixesApplied,
40127
+ findingsAfter,
40128
+ outcome,
40129
+ startedAt,
40130
+ finishedAt
40131
+ };
40132
+ cycle.iterations.push(iteration);
40133
+ cycle.findings = findingsAfter;
40134
+ const iterationCostUsd = fixesApplied.reduce((sum, fa) => sum + (fa.costUsd ?? 0), 0);
40135
+ totalCostUsd += iterationCostUsd;
40136
+ logger?.info("findings.cycle", "iteration completed", {
40137
+ storyId,
40138
+ packageDir,
40139
+ cycleName,
40140
+ iterationNum,
40141
+ strategiesRan: fixesApplied.map((fa) => fa.strategyName),
40142
+ outcome,
40143
+ findingsBefore: findingsBefore.length,
40144
+ findingsAfter: findingsAfter.length,
40145
+ ...iterationCostUsd > 0 ? { costUsd: iterationCostUsd } : {}
40146
+ });
40147
+ if (outcome === "resolved") {
40148
+ return { iterations: cycle.iterations, finalFindings: [], exitReason: "resolved", costUsd: totalCostUsd };
40149
+ }
40150
+ }
40151
+ }
40152
+ var _cycleDeps;
40153
+ var init_cycle = __esm(() => {
40154
+ init_logger2();
40155
+ init_call();
40156
+ init_types7();
40157
+ _cycleDeps = {
40158
+ callOp,
40159
+ now: () => new Date().toISOString()
40160
+ };
40161
+ });
40162
+
40163
+ // src/findings/index.ts
40164
+ var init_findings = __esm(() => {
40165
+ init_types7();
40166
+ init_adapters();
40167
+ init_path_utils();
40168
+ init_cycle();
40169
+ });
40170
+
39541
40171
  // src/utils/log-test-output.ts
39542
40172
  function logTestOutput(logger, stage, output, opts = {}) {
39543
40173
  if (!logger || !output)
@@ -39719,6 +40349,7 @@ var _acceptanceStageDeps, parseTestFailures2, acceptanceStage;
39719
40349
  var init_acceptance2 = __esm(() => {
39720
40350
  init_generator();
39721
40351
  init_test_path();
40352
+ init_findings();
39722
40353
  init_logger2();
39723
40354
  init_prd();
39724
40355
  init_ac_parser();
@@ -39754,6 +40385,7 @@ var init_acceptance2 = __esm(() => {
39754
40385
  }
39755
40386
  ];
39756
40387
  const allFailedACs = [];
40388
+ const allFindings = [];
39757
40389
  const allOutputParts = [];
39758
40390
  let anyError = false;
39759
40391
  let errorExitCode = 0;
@@ -39806,11 +40438,13 @@ ${stderr}`;
39806
40438
  anyError = true;
39807
40439
  errorExitCode = exitCode;
39808
40440
  allFailedACs.push("AC-ERROR");
40441
+ allFindings.push(acSentinelToFinding("AC-ERROR", output));
39809
40442
  continue;
39810
40443
  }
39811
40444
  for (const acId of actualFailures) {
39812
40445
  if (!allFailedACs.includes(acId)) {
39813
40446
  allFailedACs.push(acId);
40447
+ allFindings.push(acId === "AC-HOOK" ? acSentinelToFinding("AC-HOOK", output) : acFailureToFinding(acId, output));
39814
40448
  }
39815
40449
  }
39816
40450
  if (actualFailures.length > 0) {
@@ -39861,6 +40495,7 @@ ${stderr}`;
39861
40495
  }
39862
40496
  ctx.acceptanceFailures = {
39863
40497
  failedACs: allFailedACs,
40498
+ findings: allFindings,
39864
40499
  testOutput: combinedOutput
39865
40500
  };
39866
40501
  if (anyError) {
@@ -40092,8 +40727,7 @@ ${stderr}` };
40092
40727
  criteriaList,
40093
40728
  frameworkOverrideLine,
40094
40729
  targetTestFilePath: testPath,
40095
- ..."implementationContext" in ctx && ctx.implementationContext ? { implementationContext: ctx.implementationContext } : {},
40096
- ..."previousFailure" in ctx && ctx.previousFailure ? { previousFailure: ctx.previousFailure } : {}
40730
+ ..."implementationContext" in ctx && ctx.implementationContext ? { implementationContext: ctx.implementationContext } : {}
40097
40731
  }, groupStoryId);
40098
40732
  const testCode = genResult.testCode;
40099
40733
  if (testCode) {
@@ -40361,10 +40995,10 @@ async function executeWithTimeout(command, timeoutSeconds, env2, options) {
40361
40995
  const timeoutMs = timeoutSeconds * 1000;
40362
40996
  let timedOut = false;
40363
40997
  const timer = { id: undefined };
40364
- const timeoutPromise = new Promise((resolve14) => {
40998
+ const timeoutPromise = new Promise((resolve15) => {
40365
40999
  timer.id = setTimeout(() => {
40366
41000
  timedOut = true;
40367
- resolve14();
41001
+ resolve15();
40368
41002
  }, timeoutMs);
40369
41003
  });
40370
41004
  const processPromise = proc.exited;
@@ -40378,8 +41012,8 @@ async function executeWithTimeout(command, timeoutSeconds, env2, options) {
40378
41012
  proc.exited.then(() => {
40379
41013
  exitedDuringGrace = true;
40380
41014
  }),
40381
- new Promise((resolve14) => {
40382
- setTimeout(resolve14, gracePeriodMs);
41015
+ new Promise((resolve15) => {
41016
+ setTimeout(resolve15, gracePeriodMs);
40383
41017
  })
40384
41018
  ]);
40385
41019
  if (!exitedDuringGrace) {
@@ -40557,7 +41191,7 @@ var init_runners = __esm(() => {
40557
41191
  });
40558
41192
 
40559
41193
  // src/verification/smart-runner.ts
40560
- import { join as join33, relative as relative9 } from "path";
41194
+ import { join as join33, relative as relative10 } from "path";
40561
41195
  function extractPatternSuffix(pattern) {
40562
41196
  const lastStar = pattern.lastIndexOf("*");
40563
41197
  if (lastStar === -1)
@@ -40665,7 +41299,7 @@ async function getChangedNonTestFiles(workdir, baseRef, packagePrefix, testFileR
40665
41299
  let effectivePrefix = packagePrefix;
40666
41300
  if (packagePrefix && repoRoot) {
40667
41301
  const gitRoot = await _gitUtilDeps.getGitRoot(workdir);
40668
- const extraPrefix2 = gitRoot && gitRoot !== repoRoot ? relative9(gitRoot, repoRoot) : "";
41302
+ const extraPrefix2 = gitRoot && gitRoot !== repoRoot ? relative10(gitRoot, repoRoot) : "";
40669
41303
  effectivePrefix = extraPrefix2 ? `${extraPrefix2}/${packagePrefix}` : packagePrefix;
40670
41304
  }
40671
41305
  const scopedRaw = effectivePrefix ? lines.filter((f) => f.startsWith(`${effectivePrefix}/`)) : lines;
@@ -40692,7 +41326,7 @@ async function getChangedTestFiles(workdir, repoRoot, baseRef, packagePrefix, te
40692
41326
  const packageDir = packagePrefix ? join33(repoRoot, packagePrefix) : undefined;
40693
41327
  const ignoreMatchers = naxIgnoreIndex?.getMatchers(packageDir) ?? await resolveNaxIgnorePatterns(repoRoot, packageDir);
40694
41328
  const gitRoot = await _gitUtilDeps.getGitRoot(workdir);
40695
- const extraPrefix = gitRoot && gitRoot !== repoRoot ? relative9(gitRoot, repoRoot) : "";
41329
+ const extraPrefix = gitRoot && gitRoot !== repoRoot ? relative10(gitRoot, repoRoot) : "";
40696
41330
  const effectivePrefix = packagePrefix ? extraPrefix ? `${extraPrefix}/${packagePrefix}` : packagePrefix : undefined;
40697
41331
  const scopedRaw = effectivePrefix ? lines.filter((f) => f.startsWith(`${effectivePrefix}/`)) : lines;
40698
41332
  const scoped = filterNaxInternalPaths(scopedRaw, ignoreMatchers);
@@ -40977,6 +41611,158 @@ var init_event_bus = __esm(() => {
40977
41611
  pipelineEventBus = new PipelineEventBus;
40978
41612
  });
40979
41613
 
41614
+ // src/pipeline/stages/autofix-cycle.ts
41615
+ import { join as join35 } from "path";
41616
+ function fixCallCtx(ctx) {
41617
+ const packageView = ctx.packageView ?? ctx.runtime.packages.repo();
41618
+ return {
41619
+ runtime: ctx.runtime,
41620
+ packageView,
41621
+ packageDir: ctx.workdir,
41622
+ storyId: ctx.story.id,
41623
+ featureName: ctx.prd.feature,
41624
+ agentName: ctx.agentManager.getDefault(),
41625
+ story: ctx.story
41626
+ };
41627
+ }
41628
+ function collectFailedChecks(ctx) {
41629
+ return (ctx.reviewResult?.checks ?? []).filter((c) => !c.success);
41630
+ }
41631
+ function collectCurrentFindings(ctx) {
41632
+ const checks3 = collectFailedChecks(ctx);
41633
+ if (checks3.length === 0)
41634
+ return [];
41635
+ return checks3.flatMap((c) => {
41636
+ if (c.findings?.length)
41637
+ return c.findings;
41638
+ return [
41639
+ {
41640
+ source: c.check === "adversarial" ? "adversarial-review" : c.check === "semantic" ? "semantic-review" : "lint",
41641
+ severity: "error",
41642
+ category: c.check,
41643
+ message: (c.output ?? c.check).slice(0, 200),
41644
+ fixTarget: "source"
41645
+ }
41646
+ ];
41647
+ });
41648
+ }
41649
+ function collectTestTargetedChecks(ctx) {
41650
+ return collectFailedChecks(ctx).filter((c) => c.findings?.some((f) => f.fixTarget === "test"));
41651
+ }
41652
+ function buildAutofixStrategies(ctx, maxAttempts) {
41653
+ const implementer = {
41654
+ name: "autofix-implementer",
41655
+ appliesTo: (f) => (f.fixTarget ?? "source") === "source",
41656
+ fixOp: implementerRectifyOp,
41657
+ maxAttempts,
41658
+ coRun: "co-run-sequential",
41659
+ buildInput: (_findings, _prior, _cycleCtx) => ({
41660
+ failedChecks: collectFailedChecks(ctx),
41661
+ story: ctx.story
41662
+ }),
41663
+ extractApplied: (output) => ({
41664
+ summary: output.unresolvedReason ?? ""
41665
+ })
41666
+ };
41667
+ const testWriter = {
41668
+ name: "autofix-test-writer",
41669
+ appliesTo: (f) => f.fixTarget === "test",
41670
+ fixOp: testWriterRectifyOp,
41671
+ maxAttempts: 1,
41672
+ coRun: "co-run-sequential",
41673
+ buildInput: (_findings, _prior, _cycleCtx) => ({
41674
+ failedChecks: collectTestTargetedChecks(ctx),
41675
+ story: ctx.story
41676
+ })
41677
+ };
41678
+ return [implementer, testWriter];
41679
+ }
41680
+ function findUnresolvedReason(result) {
41681
+ for (const iter of result.iterations) {
41682
+ for (const fa of iter.fixesApplied) {
41683
+ if (fa.strategyName === "autofix-implementer" && fa.summary) {
41684
+ return fa.summary;
41685
+ }
41686
+ }
41687
+ }
41688
+ return;
41689
+ }
41690
+ async function writeShadowReport(ctx, result, initialFindingsCount) {
41691
+ const logger = getLogger();
41692
+ const shadowDir = join35(ctx.workdir, ".nax", "cycle-shadow", ctx.story.id);
41693
+ const timestamp = new Date().toISOString().replace(/[:.]/g, "-");
41694
+ const report = {
41695
+ storyId: ctx.story.id,
41696
+ timestamp,
41697
+ initialFindingsCount,
41698
+ exitReason: result.exitReason,
41699
+ iterations: result.iterations.length,
41700
+ finalFindingsCount: result.finalFindings.length,
41701
+ ...result.exhaustedStrategy ? { exhaustedStrategy: result.exhaustedStrategy } : {}
41702
+ };
41703
+ try {
41704
+ const file3 = join35(shadowDir, `${timestamp}.json`);
41705
+ await Bun.write(file3, JSON.stringify(report, null, 2));
41706
+ } catch (err) {
41707
+ logger.debug("autofix-cycle", "Shadow report write failed (non-fatal)", {
41708
+ storyId: ctx.story.id,
41709
+ error: String(err)
41710
+ });
41711
+ }
41712
+ }
41713
+ async function runAgentRectificationV2(ctx, _lintFixCmd, _formatFixCmd, _effectiveWorkdir) {
41714
+ const logger = getLogger();
41715
+ const storyId = ctx.story.id;
41716
+ const cycleCtx = fixCallCtx(ctx);
41717
+ const initialFindings = collectCurrentFindings(ctx);
41718
+ const maxAttempts = ctx.config.quality.autofix?.maxAttempts ?? 3;
41719
+ const maxTotalAttempts = ctx.config.quality.autofix?.maxTotalAttempts ?? 12;
41720
+ logger.info("autofix-cycle", "Starting V2 fix cycle", {
41721
+ storyId,
41722
+ initialFindingsCount: initialFindings.length,
41723
+ maxAttempts,
41724
+ maxTotalAttempts
41725
+ });
41726
+ const cycle = {
41727
+ findings: initialFindings,
41728
+ iterations: [...ctx.autofixPriorIterations ?? []],
41729
+ strategies: buildAutofixStrategies(ctx, maxAttempts),
41730
+ config: {
41731
+ maxAttemptsTotal: maxTotalAttempts,
41732
+ validatorRetries: 1
41733
+ },
41734
+ async validate(_cycleCtx) {
41735
+ await _autofixDeps.recheckReview(ctx);
41736
+ return collectCurrentFindings(ctx);
41737
+ }
41738
+ };
41739
+ const result = await runFixCycle(cycle, cycleCtx, "autofix-v2");
41740
+ ctx.autofixPriorIterations = result.iterations;
41741
+ await writeShadowReport(ctx, result, initialFindings.length);
41742
+ const unresolvedReason = findUnresolvedReason(result);
41743
+ const succeeded = result.exitReason === "resolved" || result.finalFindings.length === 0;
41744
+ logger.info("autofix-cycle", "V2 fix cycle complete", {
41745
+ storyId,
41746
+ exitReason: result.exitReason,
41747
+ iterations: result.iterations.length,
41748
+ finalFindingsCount: result.finalFindings.length,
41749
+ succeeded,
41750
+ ...unresolvedReason ? { unresolvedReason } : {}
41751
+ });
41752
+ return { succeeded, cost: 0, ...unresolvedReason ? { unresolvedReason } : {} };
41753
+ }
41754
+ var init_autofix_cycle = __esm(() => {
41755
+ init_findings();
41756
+ init_logger2();
41757
+ init_operations();
41758
+ init_autofix();
41759
+ });
41760
+
41761
+ // src/pipeline/stages/autofix-agent.ts
41762
+ var init_autofix_agent = __esm(() => {
41763
+ init_autofix_cycle();
41764
+ });
41765
+
40980
41766
  // src/review/lint-parsing/strategies/biome-json.ts
40981
41767
  function asRecord(value) {
40982
41768
  return typeof value === "object" && value !== null ? value : null;
@@ -41223,12 +42009,24 @@ function strategiesFor(format) {
41223
42009
  return [];
41224
42010
  return [eslintJsonStrategy, biomeJsonStrategy, textBlockStrategy];
41225
42011
  }
41226
- function parseLintOutput(output, format = "auto") {
42012
+ function toolForFormat(format) {
42013
+ if (format === "biome-json")
42014
+ return "biome";
42015
+ if (format === "eslint-json")
42016
+ return "eslint";
42017
+ return "text";
42018
+ }
42019
+ function parseLintOutput(output, format = "auto", opts) {
41227
42020
  if (!output.trim())
41228
42021
  return null;
41229
42022
  for (const strategy of strategiesFor(format)) {
41230
42023
  const parsed = strategy.parse(output);
41231
42024
  if (parsed && parsed.diagnostics.length > 0) {
42025
+ if (opts) {
42026
+ const tool = toolForFormat(parsed.format);
42027
+ const findings = parsed.diagnostics.map((d) => lintDiagnosticToFinding(d, opts.workdir, tool));
42028
+ return { ...parsed, findings };
42029
+ }
41232
42030
  return parsed;
41233
42031
  }
41234
42032
  }
@@ -41242,6 +42040,7 @@ function formatDiagnosticsOutput(diagnostics) {
41242
42040
  `).trim() || null;
41243
42041
  }
41244
42042
  var init_parse3 = __esm(() => {
42043
+ init_findings();
41245
42044
  init_biome_json();
41246
42045
  init_eslint_json();
41247
42046
  init_text_block();
@@ -41438,12 +42237,16 @@ function strategiesFor2(format) {
41438
42237
  return [];
41439
42238
  return [tscStrategy, typecheckTextBlockStrategy];
41440
42239
  }
41441
- function parseTypecheckOutput(output, format = "auto") {
42240
+ function parseTypecheckOutput(output, format = "auto", opts) {
41442
42241
  if (!output.trim())
41443
42242
  return null;
41444
42243
  for (const strategy of strategiesFor2(format)) {
41445
42244
  const parsed = strategy.parse(output);
41446
42245
  if (parsed && parsed.diagnostics.length > 0) {
42246
+ if (opts) {
42247
+ const findings = parsed.diagnostics.map((d) => tscDiagnosticToFinding(d, opts.workdir));
42248
+ return { ...parsed, findings };
42249
+ }
41447
42250
  return parsed;
41448
42251
  }
41449
42252
  }
@@ -41457,6 +42260,7 @@ function formatTypecheckDiagnosticsOutput(diagnostics) {
41457
42260
  `).trim() || null;
41458
42261
  }
41459
42262
  var init_parse4 = __esm(() => {
42263
+ init_findings();
41460
42264
  init_text_block2();
41461
42265
  init_tsc();
41462
42266
  });
@@ -41493,49 +42297,84 @@ function splitByStructuredFindings(check2, testFilePatterns) {
41493
42297
  };
41494
42298
  return { testFindings: toCheck(testFs), sourceFindings: toCheck(sourceFs) };
41495
42299
  }
41496
- function splitByOutputParsing(check2, testFilePatterns, format = "auto") {
41497
- const parsed = parseLintOutput(check2.output, format);
42300
+ function deriveFixTarget(file3, testFilePatterns) {
42301
+ return file3 && isTestFile2(file3, testFilePatterns) ? "test" : "source";
42302
+ }
42303
+ function splitFindingsByFixTarget(findings, diagnostics, testFilePatterns) {
42304
+ const testDiagnostics = [];
42305
+ const sourceDiagnostics = [];
42306
+ for (let i = 0;i < findings.length; i++) {
42307
+ const diagnostic = diagnostics[i];
42308
+ if (!diagnostic) {
42309
+ throw new NaxError(`findings and diagnostics arrays are not co-produced: length mismatch at index ${i}`, "INVARIANT_VIOLATION", {
42310
+ stage: "autofix-scope-split",
42311
+ index: i,
42312
+ findingsCount: findings.length,
42313
+ diagnosticsCount: diagnostics.length
42314
+ });
42315
+ }
42316
+ const f = findings[i];
42317
+ const target = f.fixTarget ?? deriveFixTarget(f.file, testFilePatterns);
42318
+ (target === "test" ? testDiagnostics : sourceDiagnostics).push(diagnostic);
42319
+ }
42320
+ return { testDiagnostics, sourceDiagnostics };
42321
+ }
42322
+ function splitByOutputParsing(check2, testFilePatterns, format = "auto", opts) {
42323
+ const parsed = parseLintOutput(check2.output, format, opts);
41498
42324
  if (!parsed) {
41499
42325
  if (check2.output.trim()) {
41500
42326
  return { testFindings: null, sourceFindings: check2 };
41501
42327
  }
41502
42328
  return { testFindings: null, sourceFindings: null };
41503
42329
  }
41504
- const testDiagnostics = parsed.diagnostics.filter((d) => isTestFile2(d.file, testFilePatterns));
41505
- const sourceDiagnostics = parsed.diagnostics.filter((d) => !isTestFile2(d.file, testFilePatterns));
42330
+ let testDiagnostics;
42331
+ let sourceDiagnostics;
42332
+ if (parsed.findings) {
42333
+ ({ testDiagnostics, sourceDiagnostics } = splitFindingsByFixTarget(parsed.findings, parsed.diagnostics, testFilePatterns));
42334
+ } else {
42335
+ testDiagnostics = parsed.diagnostics.filter((d) => isTestFile2(d.file, testFilePatterns));
42336
+ sourceDiagnostics = parsed.diagnostics.filter((d) => !isTestFile2(d.file, testFilePatterns));
42337
+ }
41506
42338
  return {
41507
42339
  testFindings: buildScopedLintCheck(check2, testDiagnostics),
41508
42340
  sourceFindings: buildScopedLintCheck(check2, sourceDiagnostics)
41509
42341
  };
41510
42342
  }
41511
- function splitByTypecheckOutputParsing(check2, testFilePatterns, format = "auto") {
41512
- const parsed = parseTypecheckOutput(check2.output, format);
42343
+ function splitByTypecheckOutputParsing(check2, testFilePatterns, format = "auto", opts) {
42344
+ const parsed = parseTypecheckOutput(check2.output, format, opts);
41513
42345
  if (!parsed) {
41514
42346
  if (check2.output.trim()) {
41515
42347
  return { testFindings: null, sourceFindings: check2 };
41516
42348
  }
41517
42349
  return { testFindings: null, sourceFindings: null };
41518
42350
  }
41519
- const testDiagnostics = parsed.diagnostics.filter((d) => isTestFile2(d.file, testFilePatterns));
41520
- const sourceDiagnostics = parsed.diagnostics.filter((d) => !isTestFile2(d.file, testFilePatterns));
42351
+ let testDiagnostics;
42352
+ let sourceDiagnostics;
42353
+ if (parsed.findings) {
42354
+ ({ testDiagnostics, sourceDiagnostics } = splitFindingsByFixTarget(parsed.findings, parsed.diagnostics, testFilePatterns));
42355
+ } else {
42356
+ testDiagnostics = parsed.diagnostics.filter((d) => isTestFile2(d.file, testFilePatterns));
42357
+ sourceDiagnostics = parsed.diagnostics.filter((d) => !isTestFile2(d.file, testFilePatterns));
42358
+ }
41521
42359
  return {
41522
42360
  testFindings: buildScopedTypecheckCheck(check2, testDiagnostics),
41523
42361
  sourceFindings: buildScopedTypecheckCheck(check2, sourceDiagnostics)
41524
42362
  };
41525
42363
  }
41526
- function splitFindingsByScope(check2, testFilePatterns, lintOutputFormat = "auto", typecheckOutputFormat = "auto") {
42364
+ function splitFindingsByScope(check2, testFilePatterns, lintOutputFormat = "auto", typecheckOutputFormat = "auto", opts) {
41527
42365
  if (check2.check === "adversarial") {
41528
42366
  return splitByStructuredFindings(check2, testFilePatterns);
41529
42367
  }
41530
42368
  if (check2.check === "lint") {
41531
- return splitByOutputParsing(check2, testFilePatterns, lintOutputFormat);
42369
+ return splitByOutputParsing(check2, testFilePatterns, lintOutputFormat, opts);
41532
42370
  }
41533
42371
  if (check2.check === "typecheck") {
41534
- return splitByTypecheckOutputParsing(check2, testFilePatterns, typecheckOutputFormat);
42372
+ return splitByTypecheckOutputParsing(check2, testFilePatterns, typecheckOutputFormat, opts);
41535
42373
  }
41536
42374
  return { testFindings: null, sourceFindings: null };
41537
42375
  }
41538
42376
  var init_autofix_scope_split = __esm(() => {
42377
+ init_errors();
41539
42378
  init_lint_parsing();
41540
42379
  init_typecheck_parsing();
41541
42380
  init_test_runners();
@@ -41591,7 +42430,7 @@ async function runTestWriterRectification(ctx, testWriterChecks, story, agentMan
41591
42430
  return 0;
41592
42431
  }
41593
42432
  }
41594
- var init_autofix_test_writer = __esm(() => {
42433
+ var init_autofix_test_writer2 = __esm(() => {
41595
42434
  init_config();
41596
42435
  init_errors();
41597
42436
  init_logger2();
@@ -41600,25 +42439,28 @@ var init_autofix_test_writer = __esm(() => {
41600
42439
  });
41601
42440
 
41602
42441
  // src/review/dialogue.ts
42442
+ function findingId(f) {
42443
+ return f.rule ?? `${f.file ?? ""}:${f.line ?? 0}:${f.message.slice(0, 40)}`;
42444
+ }
41603
42445
  function extractDeltaSummary(rawOutput, previousFindings, newFindings) {
41604
42446
  const parsed = tryParseLLMJson(rawOutput);
41605
42447
  if (parsed && typeof parsed.deltaSummary === "string" && parsed.deltaSummary.length > 0) {
41606
42448
  return parsed.deltaSummary;
41607
42449
  }
41608
- const newIds = new Set(newFindings.map((f) => f.ruleId));
41609
- const prevIds = new Set(previousFindings.map((f) => f.ruleId));
41610
- const resolved = previousFindings.filter((f) => !newIds.has(f.ruleId));
41611
- const stillPresent = newFindings.filter((f) => prevIds.has(f.ruleId));
41612
- const added = newFindings.filter((f) => !prevIds.has(f.ruleId));
42450
+ const newIds = new Set(newFindings.map(findingId));
42451
+ const prevIds = new Set(previousFindings.map(findingId));
42452
+ const resolved = previousFindings.filter((f) => !newIds.has(findingId(f)));
42453
+ const stillPresent = newFindings.filter((f) => prevIds.has(findingId(f)));
42454
+ const added = newFindings.filter((f) => !prevIds.has(findingId(f)));
41613
42455
  const parts = [];
41614
42456
  if (resolved.length > 0) {
41615
- parts.push(`Resolved: ${resolved.map((f) => f.ruleId).join(", ")}.`);
42457
+ parts.push(`Resolved: ${resolved.map(findingId).join(", ")}.`);
41616
42458
  }
41617
42459
  if (stillPresent.length > 0) {
41618
- parts.push(`Still present: ${stillPresent.map((f) => f.ruleId).join(", ")}.`);
42460
+ parts.push(`Still present: ${stillPresent.map(findingId).join(", ")}.`);
41619
42461
  }
41620
42462
  if (added.length > 0) {
41621
- parts.push(`New findings: ${added.map((f) => f.ruleId).join(", ")}.`);
42463
+ parts.push(`New findings: ${added.map(findingId).join(", ")}.`);
41622
42464
  }
41623
42465
  if (parts.length === 0) {
41624
42466
  return previousFindings.length > 0 ? "All previous findings resolved." : "No changes from previous review.";
@@ -41641,24 +42483,21 @@ function compactHistory(history) {
41641
42483
  history.push(lastReviewer);
41642
42484
  return summary;
41643
42485
  }
41644
- function mapLLMFindingToReviewFinding(f) {
42486
+ function mapLLMFindingToFinding(f) {
41645
42487
  const rawSeverity = typeof f.severity === "string" ? f.severity : "info";
41646
42488
  let severity = "info";
41647
42489
  if (rawSeverity === "warn" || rawSeverity === "warning")
41648
42490
  severity = "warning";
41649
- else if (rawSeverity === "critical" || rawSeverity === "error" || rawSeverity === "low")
42491
+ else if (rawSeverity === "critical" || rawSeverity === "error" || rawSeverity === "low" || rawSeverity === "unverifiable")
41650
42492
  severity = rawSeverity;
41651
- else if (rawSeverity === "unverifiable")
41652
- severity = "info";
41653
- else if (rawSeverity === "info")
41654
- severity = "info";
41655
42493
  return {
41656
- ruleId: typeof f.ruleId === "string" && f.ruleId ? f.ruleId : "semantic",
42494
+ source: "semantic-review",
41657
42495
  severity,
42496
+ category: "",
42497
+ rule: typeof f.ruleId === "string" && f.ruleId ? f.ruleId : undefined,
41658
42498
  file: typeof f.file === "string" ? f.file : "",
41659
42499
  line: typeof f.line === "number" ? f.line : 0,
41660
- message: typeof f.message === "string" && f.message ? f.message : typeof f.issue === "string" ? f.issue : "",
41661
- source: typeof f.source === "string" ? f.source : "semantic-review"
42500
+ message: typeof f.message === "string" && f.message ? f.message : typeof f.issue === "string" ? f.issue : ""
41662
42501
  };
41663
42502
  }
41664
42503
  function parseReviewResponse(output) {
@@ -41679,7 +42518,7 @@ function parseReviewResponse(output) {
41679
42518
  }
41680
42519
  const success2 = Boolean(parsed.passed);
41681
42520
  const rawFindings = Array.isArray(parsed.findings) ? parsed.findings : [];
41682
- const findings = rawFindings.map((f) => mapLLMFindingToReviewFinding(f));
42521
+ const findings = rawFindings.map((f) => mapLLMFindingToFinding(f));
41683
42522
  const reasoningObj = parsed.findingReasoning && typeof parsed.findingReasoning === "object" ? parsed.findingReasoning : {};
41684
42523
  const findingReasoning = new Map(Object.entries(reasoningObj));
41685
42524
  return { checkResult: { success: success2, findings }, findingReasoning };
@@ -41898,52 +42737,6 @@ var init_dialogue = __esm(() => {
41898
42737
  init_prompts();
41899
42738
  });
41900
42739
 
41901
- // src/review/severity.ts
41902
- var SEVERITY_RANK;
41903
- var init_severity = __esm(() => {
41904
- SEVERITY_RANK = {
41905
- info: 0,
41906
- unverifiable: 0,
41907
- low: 1,
41908
- warning: 1,
41909
- error: 2,
41910
- critical: 3
41911
- };
41912
- });
41913
-
41914
- // src/review/adversarial-helpers.ts
41915
- function formatFindings(findings) {
41916
- return findings.map((f) => `[${f.severity}][${f.category}] ${f.file}:${f.line} \u2014 ${f.issue}
41917
- Suggestion: ${f.suggestion}`).join(`
41918
- `);
41919
- }
41920
- function normalizeSeverity(sev) {
41921
- if (sev === "warn")
41922
- return "warning";
41923
- if (sev === "unverifiable")
41924
- return "info";
41925
- if (sev === "critical" || sev === "error" || sev === "warning" || sev === "info" || sev === "low")
41926
- return sev;
41927
- return "info";
41928
- }
41929
- function isBlockingSeverity(sev, threshold = "error") {
41930
- return (SEVERITY_RANK[sev] ?? 0) >= (SEVERITY_RANK[threshold] ?? 2);
41931
- }
41932
- function toAdversarialReviewFindings(findings) {
41933
- return findings.map((f) => ({
41934
- ruleId: "adversarial",
41935
- severity: normalizeSeverity(f.severity),
41936
- file: f.file,
41937
- line: f.line,
41938
- message: f.issue,
41939
- source: "adversarial-review",
41940
- category: f.category
41941
- }));
41942
- }
41943
- var init_adversarial_helpers = __esm(() => {
41944
- init_severity();
41945
- });
41946
-
41947
42740
  // src/review/diff-utils.ts
41948
42741
  var {spawn: spawn3 } = globalThis.Bun;
41949
42742
  async function resolveNaxIgnorePathspecExcludes(workdir, options) {
@@ -42090,7 +42883,7 @@ var init_diff_utils = __esm(() => {
42090
42883
  });
42091
42884
 
42092
42885
  // src/review/adversarial.ts
42093
- import { relative as relative10, sep as sep3 } from "path";
42886
+ import { relative as relative11, sep as sep3 } from "path";
42094
42887
  function recordAdversarialAudit(opts) {
42095
42888
  opts.runtime?.reviewAuditor.recordDecision({
42096
42889
  reviewer: "adversarial",
@@ -42123,7 +42916,7 @@ async function runAdversarialReview(opts) {
42123
42916
  projectDir,
42124
42917
  naxIgnoreIndex,
42125
42918
  runtime,
42126
- priorAdversarialFindings
42919
+ priorAdversarialIterations
42127
42920
  } = opts;
42128
42921
  const startTime = Date.now();
42129
42922
  const logger = getSafeLogger();
@@ -42161,7 +42954,7 @@ async function runAdversarialReview(opts) {
42161
42954
  let testInventory;
42162
42955
  const effectiveConfig = naxConfig ?? reviewConfigSelector.select(DEFAULT_CONFIG);
42163
42956
  const packageDirRelative = projectDir && workdir !== projectDir ? (() => {
42164
- const rel = relative10(projectDir, workdir);
42957
+ const rel = relative11(projectDir, workdir);
42165
42958
  if (rel === ".." || rel.startsWith(`..${sep3}`))
42166
42959
  return;
42167
42960
  return rel && rel !== "." ? rel : undefined;
@@ -42248,7 +43041,7 @@ async function runAdversarialReview(opts) {
42248
43041
  excludePatterns: adversarialConfig.excludePatterns,
42249
43042
  testGlobs: resolvedTestPatterns.globs,
42250
43043
  featureCtxBlock,
42251
- priorAdversarialFindings,
43044
+ priorAdversarialIterations,
42252
43045
  blockingThreshold,
42253
43046
  refExcludePatterns: effectiveRefExcludePatterns
42254
43047
  });
@@ -42333,8 +43126,8 @@ async function runAdversarialReview(opts) {
42333
43126
  findings: opResult.findings
42334
43127
  };
42335
43128
  const threshold = blockingThreshold ?? "error";
42336
- const blockingFindings = parsed.findings.filter((f) => isBlockingSeverity(f.severity, threshold));
42337
- const advisoryFindings = parsed.findings.filter((f) => !isBlockingSeverity(f.severity, threshold));
43129
+ const blockingFindings = parsed.findings.filter((f) => isBlockingSeverity2(f.severity, threshold));
43130
+ const advisoryFindings = parsed.findings.filter((f) => !isBlockingSeverity2(f.severity, threshold));
42338
43131
  if (advisoryFindings.length > 0) {
42339
43132
  logger?.debug("review", `Adversarial review: ${advisoryFindings.length} advisory findings (below threshold '${threshold}')`, {
42340
43133
  storyId: story.id,
@@ -42382,7 +43175,7 @@ async function runAdversarialReview(opts) {
42382
43175
  exitCode: 1,
42383
43176
  output: `Adversarial review failed:
42384
43177
 
42385
- ${formatFindings(blockingFindings)}`,
43178
+ ${formatFindings2(blockingFindings)}`,
42386
43179
  durationMs: durationMs2,
42387
43180
  findings: toAdversarialReviewFindings(blockingFindings),
42388
43181
  advisoryFindings: advisoryFindings.length > 0 ? toAdversarialReviewFindings(advisoryFindings) : undefined,
@@ -42507,89 +43300,6 @@ var init_language_commands = __esm(() => {
42507
43300
  };
42508
43301
  });
42509
43302
 
42510
- // src/review/semantic-helpers.ts
42511
- function validateLLMShape(parsed) {
42512
- if (typeof parsed !== "object" || parsed === null)
42513
- return null;
42514
- const obj = parsed;
42515
- if (typeof obj.passed !== "boolean")
42516
- return null;
42517
- if (!Array.isArray(obj.findings))
42518
- return null;
42519
- return { passed: obj.passed, findings: obj.findings };
42520
- }
42521
- function parseLLMResponse(raw) {
42522
- try {
42523
- return validateLLMShape(tryParseLLMJson(raw));
42524
- } catch {
42525
- return null;
42526
- }
42527
- }
42528
- function formatFindings2(findings) {
42529
- return findings.map((f) => `[${f.severity}] ${f.file}:${f.line} \u2014 ${f.issue}
42530
- Suggestion: ${f.suggestion}`).join(`
42531
- `);
42532
- }
42533
- function normalizeSeverity2(sev) {
42534
- if (sev === "warn")
42535
- return "warning";
42536
- if (sev === "unverifiable")
42537
- return "info";
42538
- if (sev === "critical" || sev === "error" || sev === "warning" || sev === "info" || sev === "low")
42539
- return sev;
42540
- return "info";
42541
- }
42542
- function isBlockingSeverity2(sev, threshold = "error") {
42543
- return (SEVERITY_RANK[sev] ?? 0) >= (SEVERITY_RANK[threshold] ?? 2);
42544
- }
42545
- function sanitizeRefModeFindings(findings, diffMode) {
42546
- if (diffMode !== "ref")
42547
- return findings;
42548
- return findings.map((finding) => needsDowngradeForMissingEvidence(finding) ? downgradeToUnverifiable(finding) : finding);
42549
- }
42550
- function needsDowngradeForMissingEvidence(finding) {
42551
- if ((SEVERITY_RANK[finding.severity] ?? 0) < SEVERITY_RANK.error)
42552
- return false;
42553
- return mentionsUnverifiedSource(finding) || !hasVerifiedEvidence(finding);
42554
- }
42555
- function mentionsUnverifiedSource(finding) {
42556
- const text = `${finding.issue} ${finding.suggestion}`.toLowerCase();
42557
- return UNVERIFIED_FINDING_PATTERNS.some((pattern) => text.includes(pattern));
42558
- }
42559
- function hasVerifiedEvidence(finding) {
42560
- const evidence = finding.verifiedBy;
42561
- return !!evidence?.file?.trim() && !!evidence.observed?.trim();
42562
- }
42563
- function downgradeToUnverifiable(finding) {
42564
- return {
42565
- ...finding,
42566
- severity: "unverifiable"
42567
- };
42568
- }
42569
- function toReviewFindings(findings) {
42570
- return findings.map((f) => ({
42571
- ruleId: "semantic",
42572
- severity: normalizeSeverity2(f.severity),
42573
- file: f.file,
42574
- line: f.line,
42575
- message: f.issue,
42576
- source: "semantic-review"
42577
- }));
42578
- }
42579
- var UNVERIFIED_FINDING_PATTERNS;
42580
- var init_semantic_helpers = __esm(() => {
42581
- init_severity();
42582
- UNVERIFIED_FINDING_PATTERNS = [
42583
- "cannot verify",
42584
- "can't verify",
42585
- "from diff alone",
42586
- "missing from diff",
42587
- "not found in diff",
42588
- "not present in diff",
42589
- "does not appear in diff"
42590
- ];
42591
- });
42592
-
42593
43303
  // src/review/semantic-debate.ts
42594
43304
  function recordSemanticDebateAudit(opts) {
42595
43305
  opts.runtime.reviewAuditor.recordDecision({
@@ -42698,7 +43408,7 @@ async function runSemanticDebate(opts) {
42698
43408
  exitCode: 1,
42699
43409
  output: `Semantic review failed:
42700
43410
 
42701
- ${findings.map((f) => `${f.ruleId}: ${f.message}`).join(`
43411
+ ${findings.map((f) => `${f.rule ?? "semantic"}: ${f.message}`).join(`
42702
43412
  `)}`,
42703
43413
  durationMs: durationMs2,
42704
43414
  findings,
@@ -42751,8 +43461,8 @@ ${findings.map((f) => `${f.ruleId}: ${f.message}`).join(`
42751
43461
  }
42752
43462
  const debateFindings = sanitizeRefModeFindings(deduped, diffMode);
42753
43463
  const debateThreshold = blockingThreshold ?? "error";
42754
- const debateBlocking = debateFindings.filter((f) => isBlockingSeverity2(f.severity, debateThreshold));
42755
- const debateAdvisory = debateFindings.filter((f) => !isBlockingSeverity2(f.severity, debateThreshold));
43464
+ const debateBlocking = debateFindings.filter((f) => isBlockingSeverity(f.severity, debateThreshold));
43465
+ const debateAdvisory = debateFindings.filter((f) => !isBlockingSeverity(f.severity, debateThreshold));
42756
43466
  const durationMs = Date.now() - startTime;
42757
43467
  if (!resolverPassed) {
42758
43468
  if (debateBlocking.length > 0) {
@@ -42778,7 +43488,7 @@ ${findings.map((f) => `${f.ruleId}: ${f.message}`).join(`
42778
43488
  exitCode: 1,
42779
43489
  output: `Semantic review failed:
42780
43490
 
42781
- ${formatFindings2(debateBlocking)}`,
43491
+ ${formatFindings(debateBlocking)}`,
42782
43492
  durationMs,
42783
43493
  findings: toReviewFindings(debateBlocking),
42784
43494
  advisoryFindings: debateAdvisory.length > 0 ? toReviewFindings(debateAdvisory) : undefined,
@@ -42840,6 +43550,7 @@ var init_semantic_debate = __esm(() => {
42840
43550
  });
42841
43551
 
42842
43552
  // src/review/semantic-evidence.ts
43553
+ import { isAbsolute as isAbsolute10 } from "path";
42843
43554
  async function substantiateSemanticEvidence(findings, diffMode, workdir, storyId) {
42844
43555
  if (diffMode !== "ref")
42845
43556
  return findings;
@@ -42853,7 +43564,9 @@ async function substantiateFinding(finding, workdir, storyId) {
42853
43564
  return finding;
42854
43565
  const file3 = finding.verifiedBy?.file?.trim() || finding.file;
42855
43566
  const contents = await readSafeFile(workdir, file3);
42856
- if (contents !== null && normalizedIncludes(contents, observed))
43567
+ if (contents === null)
43568
+ return finding;
43569
+ if (normalizedIncludes(contents, observed))
42857
43570
  return finding;
42858
43571
  _evidenceDeps.getLogger()?.warn("review", "Downgraded unsubstantiated semantic error finding", {
42859
43572
  storyId,
@@ -42867,13 +43580,21 @@ async function substantiateFinding(finding, workdir, storyId) {
42867
43580
  }
42868
43581
  async function readSafeFile(workdir, file3) {
42869
43582
  const validated = validateModulePath(file3, [workdir]);
42870
- if (!validated.valid || !validated.absolutePath)
42871
- return null;
42872
- try {
42873
- return await Bun.file(validated.absolutePath).text();
42874
- } catch {
42875
- return null;
43583
+ if (validated.valid && validated.absolutePath) {
43584
+ try {
43585
+ return await Bun.file(validated.absolutePath).text();
43586
+ } catch {
43587
+ return null;
43588
+ }
43589
+ }
43590
+ if (isAbsolute10(file3)) {
43591
+ try {
43592
+ return await Bun.file(file3).text();
43593
+ } catch {
43594
+ return null;
43595
+ }
42876
43596
  }
43597
+ return null;
42877
43598
  }
42878
43599
  function normalizedIncludes(contents, observed) {
42879
43600
  const normalizedObserved = normalizeEvidenceText(observed);
@@ -42902,7 +43623,7 @@ var init_semantic_evidence = __esm(() => {
42902
43623
  });
42903
43624
 
42904
43625
  // src/review/semantic.ts
42905
- import { relative as relative11, sep as sep4 } from "path";
43626
+ import { relative as relative12, sep as sep4 } from "path";
42906
43627
  function recordSemanticAudit(opts) {
42907
43628
  opts.runtime?.reviewAuditor.recordDecision({
42908
43629
  reviewer: "semantic",
@@ -42929,7 +43650,7 @@ async function runSemanticReview(opts) {
42929
43650
  naxConfig,
42930
43651
  featureName,
42931
43652
  resolverSession,
42932
- priorFailures,
43653
+ priorSemanticIterations,
42933
43654
  blockingThreshold,
42934
43655
  featureContextMarkdown,
42935
43656
  contextBundle,
@@ -42966,7 +43687,7 @@ async function runSemanticReview(opts) {
42966
43687
  const packageDir = workdir !== repoRoot ? workdir : undefined;
42967
43688
  const stat = await collectDiffStat(workdir, effectiveRef, { naxIgnoreIndex, packageDir });
42968
43689
  const packageDirRelative = projectDir && workdir !== projectDir ? (() => {
42969
- const rel = relative11(projectDir, workdir);
43690
+ const rel = relative12(projectDir, workdir);
42970
43691
  if (rel === ".." || rel.startsWith(`..${sep4}`))
42971
43692
  return;
42972
43693
  return rel && rel !== "." ? rel : undefined;
@@ -43037,7 +43758,7 @@ async function runSemanticReview(opts) {
43037
43758
  diff,
43038
43759
  storyGitRef: effectiveRef,
43039
43760
  stat,
43040
- priorFailures,
43761
+ priorSemanticIterations,
43041
43762
  excludePatterns: semanticConfig.excludePatterns
43042
43763
  });
43043
43764
  const prompt = featureCtxBlock ? `${featureCtxBlock}${basePrompt}` : basePrompt;
@@ -43094,7 +43815,7 @@ async function runSemanticReview(opts) {
43094
43815
  diff,
43095
43816
  storyGitRef: effectiveRef,
43096
43817
  stat,
43097
- priorFailures,
43818
+ priorSemanticIterations,
43098
43819
  excludePatterns,
43099
43820
  featureCtxBlock,
43100
43821
  blockingThreshold
@@ -43179,8 +43900,8 @@ async function runSemanticReview(opts) {
43179
43900
  const sanitizedFindings = await substantiateSemanticEvidence(sanitizeRefModeFindings(parsed.findings, diffMode), diffMode, workdir, story.id);
43180
43901
  const sanitizedParsed = { ...parsed, findings: sanitizedFindings };
43181
43902
  const threshold = blockingThreshold ?? "error";
43182
- const blockingFindings = sanitizedParsed.findings.filter((f) => isBlockingSeverity2(f.severity, threshold));
43183
- const advisoryFindings = sanitizedParsed.findings.filter((f) => !isBlockingSeverity2(f.severity, threshold));
43903
+ const blockingFindings = sanitizedParsed.findings.filter((f) => isBlockingSeverity(f.severity, threshold));
43904
+ const advisoryFindings = sanitizedParsed.findings.filter((f) => !isBlockingSeverity(f.severity, threshold));
43184
43905
  if (advisoryFindings.length > 0) {
43185
43906
  logger?.debug("review", `Semantic review: ${advisoryFindings.length} advisory findings (below threshold '${threshold}')`, {
43186
43907
  storyId: story.id,
@@ -43205,7 +43926,7 @@ async function runSemanticReview(opts) {
43205
43926
  });
43206
43927
  const output = `Semantic review failed:
43207
43928
 
43208
- ${formatFindings2(blockingFindings)}`;
43929
+ ${formatFindings(blockingFindings)}`;
43209
43930
  recordSemanticAudit({
43210
43931
  runtime,
43211
43932
  workdir,
@@ -43409,13 +44130,14 @@ async function runReview(opts) {
43409
44130
  featureName,
43410
44131
  resolverSession,
43411
44132
  priorFailures,
44133
+ priorSemanticIterations,
43412
44134
  featureContextMarkdown,
43413
44135
  contextBundles,
43414
44136
  projectDir,
43415
44137
  env: env2,
43416
44138
  naxIgnoreIndex,
43417
44139
  runtime,
43418
- priorAdversarialFindings
44140
+ priorAdversarialIterations
43419
44141
  } = opts;
43420
44142
  const startTime = Date.now();
43421
44143
  const logger = getSafeLogger();
@@ -43489,7 +44211,7 @@ Stage and commit these files before running review.`
43489
44211
  naxConfig,
43490
44212
  featureName,
43491
44213
  resolverSession,
43492
- priorFailures,
44214
+ priorSemanticIterations,
43493
44215
  blockingThreshold: config2.blockingThreshold,
43494
44216
  featureContextMarkdown,
43495
44217
  contextBundle: contextBundles?.semantic,
@@ -43537,7 +44259,7 @@ Stage and commit these files before running review.`
43537
44259
  projectDir,
43538
44260
  naxIgnoreIndex,
43539
44261
  runtime,
43540
- priorAdversarialFindings
44262
+ priorAdversarialIterations
43541
44263
  });
43542
44264
  checks3.push(result2);
43543
44265
  if (!result2.success && !firstFailure) {
@@ -43595,16 +44317,16 @@ var init_runner4 = __esm(() => {
43595
44317
 
43596
44318
  // src/review/verdict-writer.ts
43597
44319
  import { mkdir as mkdir6 } from "fs/promises";
43598
- import { join as join35 } from "path";
44320
+ import { join as join36 } from "path";
43599
44321
  async function writeReviewVerdict(entry) {
43600
44322
  const logger = getSafeLogger();
43601
44323
  try {
43602
- const projectDir = await _verdictWriterDeps.findNaxProjectRoot(entry.featureName ? join35(entry.featureName) : ".");
44324
+ const projectDir = await _verdictWriterDeps.findNaxProjectRoot(entry.featureName ? join36(entry.featureName) : ".");
43603
44325
  const baseDir = projectDir ?? ".";
43604
- const verdictDir = entry.featureName ? join35(baseDir, ".nax", "review-verdicts", entry.featureName) : join35(baseDir, ".nax", "review-verdicts", "_unknown");
44326
+ const verdictDir = entry.featureName ? join36(baseDir, ".nax", "review-verdicts", entry.featureName) : join36(baseDir, ".nax", "review-verdicts", "_unknown");
43605
44327
  await _verdictWriterDeps.mkdir(verdictDir, { recursive: true });
43606
44328
  const fileName = `${entry.storyId}.json`;
43607
- const filePath = join35(verdictDir, fileName);
44329
+ const filePath = join36(verdictDir, fileName);
43608
44330
  await _verdictWriterDeps.writeFile(filePath, JSON.stringify(entry, null, 2));
43609
44331
  logger?.debug("review", "Review verdict written", {
43610
44332
  storyId: entry.storyId,
@@ -43629,7 +44351,7 @@ var init_verdict_writer = __esm(() => {
43629
44351
  });
43630
44352
 
43631
44353
  // src/review/orchestrator.ts
43632
- import { join as join36 } from "path";
44354
+ import { join as join37 } from "path";
43633
44355
  var {spawn: spawn4 } = globalThis.Bun;
43634
44356
  async function getChangedFiles2(workdir, baseRef) {
43635
44357
  try {
@@ -43704,13 +44426,14 @@ class ReviewOrchestrator {
43704
44426
  featureName,
43705
44427
  resolverSession,
43706
44428
  priorFailures,
44429
+ priorSemanticIterations,
43707
44430
  featureContextMarkdown,
43708
44431
  contextBundles,
43709
44432
  projectDir,
43710
44433
  env: env2,
43711
44434
  naxIgnoreIndex,
43712
44435
  runtime,
43713
- priorAdversarialFindings
44436
+ priorAdversarialIterations
43714
44437
  } = opts;
43715
44438
  const logger = getSafeLogger();
43716
44439
  const hasSemantic = reviewConfig.checks.includes("semantic");
@@ -43755,12 +44478,14 @@ class ReviewOrchestrator {
43755
44478
  featureName,
43756
44479
  resolverSession,
43757
44480
  priorFailures,
44481
+ priorSemanticIterations,
43758
44482
  featureContextMarkdown,
43759
44483
  contextBundles,
43760
44484
  projectDir,
43761
44485
  env: env2,
43762
44486
  naxIgnoreIndex,
43763
- runtime
44487
+ runtime,
44488
+ priorAdversarialIterations
43764
44489
  });
43765
44490
  } else {
43766
44491
  const mechanicalCheckNames = ORDERED_MECHANICAL_REVIEW_CHECKS.filter((check2) => reviewConfig.checks.includes(check2));
@@ -43834,7 +44559,7 @@ class ReviewOrchestrator {
43834
44559
  naxConfig,
43835
44560
  featureName,
43836
44561
  resolverSession,
43837
- priorFailures,
44562
+ priorSemanticIterations,
43838
44563
  blockingThreshold: reviewConfig.blockingThreshold,
43839
44564
  featureContextMarkdown,
43840
44565
  contextBundle: contextBundles?.semantic,
@@ -43857,7 +44582,7 @@ class ReviewOrchestrator {
43857
44582
  projectDir,
43858
44583
  naxIgnoreIndex,
43859
44584
  runtime,
43860
- priorAdversarialFindings
44585
+ priorAdversarialIterations
43861
44586
  })
43862
44587
  ]);
43863
44588
  llmCheckResults = [semResult, advResult];
@@ -43877,13 +44602,14 @@ class ReviewOrchestrator {
43877
44602
  featureName,
43878
44603
  resolverSession,
43879
44604
  priorFailures,
44605
+ priorSemanticIterations,
43880
44606
  featureContextMarkdown,
43881
44607
  contextBundles,
43882
44608
  projectDir,
43883
44609
  env: env2,
43884
44610
  naxIgnoreIndex,
43885
44611
  runtime,
43886
- priorAdversarialFindings
44612
+ priorAdversarialIterations
43887
44613
  });
43888
44614
  llmCheckResults = llmResult.checks;
43889
44615
  }
@@ -43948,7 +44674,7 @@ class ReviewOrchestrator {
43948
44674
  const baseRef = storyGitRef ?? executionConfig?.storyGitRef;
43949
44675
  const changedFiles = await getChangedFiles2(workdir, baseRef);
43950
44676
  const repoRoot = projectDir ?? workdir;
43951
- const packageDir = scopePrefix ? join36(repoRoot, scopePrefix) : undefined;
44677
+ const packageDir = scopePrefix ? join37(repoRoot, scopePrefix) : undefined;
43952
44678
  const ignoreMatchers = naxIgnoreIndex?.getMatchers(packageDir) ?? await resolveNaxIgnorePatterns(repoRoot, packageDir);
43953
44679
  const visibleChangedFiles = filterNaxInternalPaths(changedFiles, ignoreMatchers);
43954
44680
  const scopedFiles = scopePrefix ? visibleChangedFiles.filter((f) => f === scopePrefix || f.startsWith(`${scopePrefix}/`)) : visibleChangedFiles;
@@ -43970,7 +44696,7 @@ class ReviewOrchestrator {
43970
44696
  passed: result.passed,
43971
44697
  output: result.output,
43972
44698
  exitCode: result.exitCode,
43973
- findings: result.findings
44699
+ findings: result.findings?.map((rf) => pluginToFinding(rf, workdir))
43974
44700
  });
43975
44701
  if (!result.passed) {
43976
44702
  builtIn.pluginReviewers = pluginResults;
@@ -44031,32 +44757,60 @@ class ReviewOrchestrator {
44031
44757
  featureName: ctx.prd.feature,
44032
44758
  resolverSession,
44033
44759
  priorFailures: ctx.story.priorFailures,
44760
+ priorSemanticIterations: ctx.priorSemanticIterations,
44034
44761
  featureContextMarkdown: ctx.featureContextMarkdown,
44035
44762
  contextBundles,
44036
44763
  projectDir: ctx.projectDir,
44037
44764
  env: ctx.worktreeDependencyContext?.env,
44038
44765
  naxIgnoreIndex: ctx.naxIgnoreIndex,
44039
44766
  runtime: ctx.runtime,
44040
- priorAdversarialFindings: ctx.priorAdversarialFindings
44767
+ priorAdversarialIterations: ctx.priorAdversarialIterations
44041
44768
  });
44042
44769
  const advCheck = result.builtIn.checks?.find((c) => c.check === "adversarial");
44043
44770
  if (advCheck) {
44044
- if (!advCheck.success && (advCheck.findings?.length ?? 0) > 0) {
44045
- ctx.priorAdversarialFindings = {
44046
- round: (ctx.priorAdversarialFindings?.round ?? 0) + 1,
44047
- findings: (advCheck.findings ?? []).map((f) => ({
44048
- severity: f.severity,
44049
- category: f.category,
44050
- file: f.file,
44051
- line: f.line,
44052
- issue: f.message
44053
- }))
44771
+ if (!advCheck.success && !advCheck.skipped) {
44772
+ const prior = ctx.priorAdversarialIterations ?? [];
44773
+ const findingsBefore = prior.length > 0 ? prior[prior.length - 1].findingsAfter ?? [] : [];
44774
+ const findingsAfter = advCheck.findings ?? [];
44775
+ const now = new Date().toISOString();
44776
+ const newIteration = {
44777
+ iterationNum: prior.length + 1,
44778
+ findingsBefore,
44779
+ fixesApplied: [],
44780
+ findingsAfter,
44781
+ outcome: classifyOutcome(findingsBefore, findingsAfter),
44782
+ startedAt: now,
44783
+ finishedAt: now
44054
44784
  };
44785
+ ctx.priorAdversarialIterations = [...prior, newIteration];
44055
44786
  } else if (advCheck.success && !advCheck.skipped) {
44056
- ctx.priorAdversarialFindings = undefined;
44787
+ ctx.priorAdversarialIterations = undefined;
44057
44788
  }
44058
44789
  } else if (retrySkipChecks?.has("adversarial")) {
44059
- ctx.priorAdversarialFindings = undefined;
44790
+ ctx.priorAdversarialIterations = undefined;
44791
+ }
44792
+ const semCheck = result.builtIn.checks?.find((c) => c.check === "semantic");
44793
+ if (semCheck) {
44794
+ if (!semCheck.success && !semCheck.skipped) {
44795
+ const prior = ctx.priorSemanticIterations ?? [];
44796
+ const findingsBefore = prior.length > 0 ? prior[prior.length - 1].findingsAfter ?? [] : [];
44797
+ const findingsAfter = semCheck.findings ?? [];
44798
+ const now = new Date().toISOString();
44799
+ const newIteration = {
44800
+ iterationNum: prior.length + 1,
44801
+ findingsBefore,
44802
+ fixesApplied: [],
44803
+ findingsAfter,
44804
+ outcome: classifyOutcome(findingsBefore, findingsAfter),
44805
+ startedAt: now,
44806
+ finishedAt: now
44807
+ };
44808
+ ctx.priorSemanticIterations = [...prior, newIteration];
44809
+ } else if (semCheck.success && !semCheck.skipped) {
44810
+ ctx.priorSemanticIterations = undefined;
44811
+ }
44812
+ } else if (retrySkipChecks?.has("semantic")) {
44813
+ ctx.priorSemanticIterations = undefined;
44060
44814
  }
44061
44815
  return result;
44062
44816
  }
@@ -44064,6 +44818,7 @@ class ReviewOrchestrator {
44064
44818
  var _orchestratorDeps2, reviewOrchestrator;
44065
44819
  var init_orchestrator2 = __esm(() => {
44066
44820
  init_engine();
44821
+ init_findings();
44067
44822
  init_logger2();
44068
44823
  init_path_filters();
44069
44824
  init_adversarial();
@@ -44201,11 +44956,9 @@ var init_review = __esm(() => {
44201
44956
  return { action: "continue", cost: reviewCost };
44202
44957
  }
44203
44958
  if (!result.success) {
44204
- const pluginFindings = result.builtIn.pluginReviewers?.flatMap((pr) => pr.findings ?? []) ?? [];
44205
44959
  const semanticFindings = (result.builtIn.checks ?? []).filter((c) => c.check === "semantic" && !c.success && c.findings?.length).flatMap((c) => c.findings ?? []);
44206
- const allFindings = [...pluginFindings, ...semanticFindings];
44207
- if (allFindings.length > 0) {
44208
- ctx.reviewFindings = allFindings;
44960
+ if (semanticFindings.length > 0) {
44961
+ ctx.reviewFindings = semanticFindings;
44209
44962
  }
44210
44963
  if (result.pluginFailed) {
44211
44964
  if (ctx.interaction && isTriggerEnabled("security-review", ctx.config)) {
@@ -44238,481 +44991,6 @@ var init_review = __esm(() => {
44238
44991
  createReviewerSession
44239
44992
  };
44240
44993
  });
44241
- // src/review/index.ts
44242
- var init_review2 = __esm(() => {
44243
- init_adversarial();
44244
- init_categorization();
44245
- init_diff_utils();
44246
- init_runner4();
44247
- });
44248
-
44249
- // src/verification/shared-rectification-loop.ts
44250
- function buildProgressivePromptPreamble(opts) {
44251
- const rethinkAt = Math.min(opts.rethinkAtAttempt ?? 2, opts.maxAttempts);
44252
- const urgencyAt = Math.min(opts.urgencyAtAttempt ?? 3, opts.maxAttempts);
44253
- const shouldRethink = opts.attempt >= rethinkAt;
44254
- const shouldUrgency = opts.attempt >= urgencyAt;
44255
- if (!shouldRethink && !shouldUrgency) {
44256
- return "";
44257
- }
44258
- if (shouldUrgency) {
44259
- opts.logger?.info(opts.stage, "Progressive prompt escalation: urgency + rethink injected", {
44260
- attempt: opts.attempt,
44261
- rethinkAtAttempt: rethinkAt,
44262
- urgencyAtAttempt: urgencyAt,
44263
- maxAttempts: opts.maxAttempts
44264
- });
44265
- } else {
44266
- opts.logger?.info(opts.stage, "Progressive prompt escalation: rethink injected", {
44267
- attempt: opts.attempt,
44268
- rethinkAtAttempt: rethinkAt,
44269
- maxAttempts: opts.maxAttempts
44270
- });
44271
- }
44272
- const urgencySection = shouldUrgency ? opts.urgencySection : "";
44273
- const rethinkSection = shouldRethink ? opts.rethinkSection : "";
44274
- return `${urgencySection}${rethinkSection}`;
44275
- }
44276
- async function runRetryLoop(input) {
44277
- let currentFailure = input.failure;
44278
- const previous = [...input.previousAttempts];
44279
- for (let attempt = 1;attempt <= input.maxAttempts; attempt++) {
44280
- const prompt = await Promise.resolve(input.buildPrompt(currentFailure, previous));
44281
- const result = await input.execute(prompt);
44282
- const outcome = await input.verify(result);
44283
- previous.push({ attempt, result });
44284
- if (outcome.passed) {
44285
- return { outcome: "fixed", result, attempts: attempt };
44286
- }
44287
- currentFailure = outcome.newFailure;
44288
- if (input.shouldAbort?.(currentFailure, attempt)) {
44289
- return { outcome: "aborted", attempts: attempt };
44290
- }
44291
- }
44292
- return { outcome: "exhausted", attempts: input.maxAttempts, finalFailure: currentFailure };
44293
- }
44294
-
44295
- // src/pipeline/stages/autofix-agent.ts
44296
- var exports_autofix_agent = {};
44297
- __export(exports_autofix_agent, {
44298
- runAgentRectification: () => runAgentRectification
44299
- });
44300
- function collectFailedChecks(ctx) {
44301
- return (ctx.reviewResult?.checks ?? []).filter((c) => !c.success);
44302
- }
44303
- function getCheckSignature(checks3) {
44304
- return [...new Set(checks3.map((check2) => check2.check))].sort().join("|");
44305
- }
44306
- function buildAutofixEscalationPreamble(attempt, maxAttempts, rethinkAtAttempt, urgencyAtAttempt) {
44307
- return buildProgressivePromptPreamble({
44308
- attempt,
44309
- maxAttempts,
44310
- rethinkAtAttempt,
44311
- urgencyAtAttempt,
44312
- stage: "autofix",
44313
- logger: getLogger(),
44314
- urgencySection: `## Final Autofix Attempt Before Escalation
44315
-
44316
- This is attempt ${attempt}. If the review still fails after this, autofix will escalate instead of retrying.
44317
- A different approach is required. Do not repeat the same fix.
44318
-
44319
- `,
44320
- rethinkSection: `## Previous Attempt Did Not Fix the Failures
44321
-
44322
- Your previous fix attempt (attempt ${attempt}) did not resolve the quality errors. Rethink your approach.
44323
-
44324
- - Do not repeat the same edit pattern.
44325
- - Re-read the failing diagnostics carefully.
44326
- - Try a fundamentally different fix strategy if the earlier one did not work.
44327
-
44328
- `
44329
- });
44330
- }
44331
- async function runAgentRectification(ctx, lintFixCmd, formatFixCmd, effectiveWorkdir) {
44332
- const logger = getLogger();
44333
- const maxPerCycle = ctx.config.quality.autofix?.maxAttempts ?? 2;
44334
- const maxTotal = ctx.config.quality.autofix?.maxTotalAttempts ?? 10;
44335
- const rethinkAtAttempt = ctx.config.quality.autofix?.rethinkAtAttempt ?? 2;
44336
- const urgencyAtAttempt = ctx.config.quality.autofix?.urgencyAtAttempt ?? 3;
44337
- const consumed = ctx.autofixAttempt ?? 0;
44338
- const failedChecks = collectFailedChecks(ctx);
44339
- if (failedChecks.length === 0) {
44340
- logger.debug("autofix", "No failed checks found \u2014 skipping agent rectification", { storyId: ctx.story.id });
44341
- return { succeeded: false, cost: 0 };
44342
- }
44343
- if (consumed >= maxTotal) {
44344
- logger.warn("autofix", "Global autofix budget exhausted \u2014 escalating", {
44345
- storyId: ctx.story.id,
44346
- totalAttempts: consumed,
44347
- maxTotalAttempts: maxTotal
44348
- });
44349
- return { succeeded: false, cost: 0 };
44350
- }
44351
- const remainingBudget = maxTotal - consumed;
44352
- const maxAttempts = Math.min(maxPerCycle, remainingBudget);
44353
- if (!ctx.agentManager) {
44354
- logger.error("autofix", "Agent manager unavailable \u2014 cannot run agent rectification", { storyId: ctx.story.id });
44355
- return { succeeded: false, cost: 0 };
44356
- }
44357
- if (!ctx.runtime) {
44358
- throw new NaxError("runtime required \u2014 legacy agentManager.run path removed (ADR-019 Wave 3, issue #762)", "DISPATCH_NO_RUNTIME", { stage: "rectification", storyId: ctx.story.id });
44359
- }
44360
- const { agentManager } = ctx;
44361
- const { runtime } = ctx;
44362
- let implementerChecks = failedChecks;
44363
- let testWriterChecks = [];
44364
- const stageTestFilePatterns = typeof ctx.rootConfig.execution?.smartTestRunner === "object" ? ctx.rootConfig.execution.smartTestRunner?.testFilePatterns : undefined;
44365
- const lintOutputFormat = ctx.config.quality.lintOutput?.format ?? "auto";
44366
- const typecheckOutputFormat = ctx.config.quality.typecheckOutput?.format ?? "auto";
44367
- for (const check2 of failedChecks) {
44368
- if (check2.check === "adversarial" || check2.check === "lint" || check2.check === "typecheck") {
44369
- const { testFindings, sourceFindings } = splitFindingsByScope(check2, stageTestFilePatterns, lintOutputFormat, typecheckOutputFormat);
44370
- if (testFindings || sourceFindings) {
44371
- if (testFindings)
44372
- testWriterChecks = [...testWriterChecks, testFindings];
44373
- if (sourceFindings) {
44374
- implementerChecks = implementerChecks.map((c) => c === check2 ? sourceFindings : c);
44375
- } else {
44376
- implementerChecks = implementerChecks.filter((c) => c !== check2);
44377
- }
44378
- }
44379
- }
44380
- }
44381
- let autofixCostAccum = 0;
44382
- if (testWriterChecks.length > 0) {
44383
- if (ctx.routing.testStrategy === "no-test") {
44384
- logger.warn("autofix", "Skipping test-writer rectification (no-test strategy)", {
44385
- storyId: ctx.story.id,
44386
- checks: testWriterChecks.map((c) => c.check)
44387
- });
44388
- } else {
44389
- logger.info("autofix", "Routing test-file findings to test-writer session", {
44390
- storyId: ctx.story.id,
44391
- checks: testWriterChecks.map((c) => c.check)
44392
- });
44393
- autofixCostAccum += await _autofixDeps.runTestWriterRectification(ctx, testWriterChecks, ctx.story, agentManager);
44394
- }
44395
- }
44396
- if (implementerChecks.length === 0) {
44397
- logger.info("autofix", "All findings routed to test-writer \u2014 skipping implementer loop", {
44398
- storyId: ctx.story.id
44399
- });
44400
- return { succeeded: false, cost: autofixCostAccum };
44401
- }
44402
- let unresolvedReason;
44403
- let autofixBeforeRef;
44404
- const implementerSession = formatSessionName({
44405
- workdir: ctx.workdir,
44406
- featureName: ctx.prd.feature,
44407
- storyId: ctx.story.id,
44408
- role: "implementer"
44409
- });
44410
- let sessionConfirmedOpen = consumed === 0;
44411
- logger.info("autofix", "Starting agent rectification for review failures", {
44412
- storyId: ctx.story.id,
44413
- failedChecks: implementerChecks.map((check2) => check2.check),
44414
- maxAttempts,
44415
- totalUsed: consumed,
44416
- maxTotalAttempts: maxTotal
44417
- });
44418
- const initialFailure = {
44419
- checks: implementerChecks,
44420
- checkSignature: getCheckSignature(implementerChecks)
44421
- };
44422
- let currentAttempt = 0;
44423
- let currentConsecutiveNoOps = 0;
44424
- let currentCheckSignatureChanged = false;
44425
- let failOpenAborted = false;
44426
- let heldHandle;
44427
- const outcome = await runRetryLoop({
44428
- stage: "rectification",
44429
- storyId: ctx.story.id,
44430
- packageDir: ctx.workdir,
44431
- maxAttempts,
44432
- failure: initialFailure,
44433
- previousAttempts: [],
44434
- shouldAbort: (_failure, _attempt) => failOpenAborted,
44435
- buildPrompt: (failure, previous) => {
44436
- currentAttempt = previous.length + 1;
44437
- const lastResult = previous[previous.length - 1]?.result;
44438
- const lastWasNoOp = lastResult?.noOp ?? false;
44439
- currentConsecutiveNoOps = lastResult?.consecutiveNoOps ?? 0;
44440
- currentCheckSignatureChanged = failure.checkSignature !== initialFailure.checkSignature;
44441
- logger.debug("autofix", `Building prompt for attempt ${consumed + currentAttempt}/${maxTotal}`, {
44442
- storyId: ctx.story.id,
44443
- lastWasNoOp,
44444
- consecutiveNoOps: currentConsecutiveNoOps
44445
- });
44446
- if (lastWasNoOp) {
44447
- return RectifierPromptBuilder.noOpReprompt(failure.checks, currentConsecutiveNoOps, MAX_CONSECUTIVE_NOOP_REPROMPTS);
44448
- }
44449
- if (currentAttempt === 1 && sessionConfirmedOpen) {
44450
- return RectifierPromptBuilder.firstAttemptDelta(failure.checks, maxAttempts);
44451
- }
44452
- const isSessionContinuation = currentAttempt > 1 && sessionConfirmedOpen;
44453
- if (isSessionContinuation) {
44454
- if (currentCheckSignatureChanged) {
44455
- const attemptsRemaining = Math.max(1, maxAttempts - currentAttempt + 1);
44456
- return RectifierPromptBuilder.firstAttemptDelta(failure.checks, attemptsRemaining);
44457
- }
44458
- return RectifierPromptBuilder.continuation(failure.checks, currentAttempt, Math.min(rethinkAtAttempt, maxAttempts), Math.min(urgencyAtAttempt, maxAttempts));
44459
- }
44460
- let prompt = RectifierPromptBuilder.reviewRectification(failure.checks, ctx.story);
44461
- const escalationPreamble = buildAutofixEscalationPreamble(currentAttempt, maxAttempts, rethinkAtAttempt, urgencyAtAttempt);
44462
- if (escalationPreamble) {
44463
- prompt = `${escalationPreamble}${prompt}`;
44464
- }
44465
- return prompt;
44466
- },
44467
- execute: async (prompt) => {
44468
- logger.info("autofix", `Agent rectification attempt ${consumed + currentAttempt}/${maxTotal}`, {
44469
- storyId: ctx.story.id
44470
- });
44471
- autofixBeforeRef = await _autofixDeps.captureGitRef(ctx.workdir);
44472
- ctx.autofixAttempt = consumed + currentAttempt;
44473
- const modelTier = ctx.story.routing?.modelTier ?? ctx.rootConfig.autoMode.escalation.tierOrder[0]?.tier ?? "balanced";
44474
- const defaultAgent = agentManager.getDefault();
44475
- const modelDef = resolveModelForAgent(ctx.rootConfig.models, ctx.routing.agent ?? defaultAgent, modelTier, defaultAgent);
44476
- let result;
44477
- try {
44478
- if (!heldHandle) {
44479
- heldHandle = await runtime.sessionManager.openSession(implementerSession, {
44480
- agentName: defaultAgent,
44481
- role: "implementer",
44482
- workdir: ctx.workdir,
44483
- pipelineStage: "rectification",
44484
- modelDef,
44485
- timeoutSeconds: ctx.config.execution.sessionTimeoutSeconds,
44486
- featureName: ctx.prd.feature,
44487
- storyId: ctx.story.id,
44488
- signal: runtime.signal
44489
- });
44490
- }
44491
- const turn = await agentManager.runAsSession(defaultAgent, heldHandle, prompt, {
44492
- storyId: ctx.story.id,
44493
- featureName: ctx.prd.feature,
44494
- workdir: ctx.workdir,
44495
- projectDir: ctx.projectDir,
44496
- pipelineStage: "rectification",
44497
- sessionRole: "implementer",
44498
- signal: runtime.signal,
44499
- maxTurns: ctx.config.agent?.maxInteractionTurns
44500
- });
44501
- result = {
44502
- success: true,
44503
- exitCode: 0,
44504
- output: turn.output,
44505
- rateLimited: false,
44506
- durationMs: 0,
44507
- estimatedCostUsd: turn.estimatedCostUsd,
44508
- ...turn.exactCostUsd !== undefined && { exactCostUsd: turn.exactCostUsd },
44509
- ...turn.tokenUsage && { tokenUsage: turn.tokenUsage },
44510
- ...heldHandle.protocolIds && { protocolIds: heldHandle.protocolIds }
44511
- };
44512
- sessionConfirmedOpen = true;
44513
- } catch (err) {
44514
- sessionConfirmedOpen = false;
44515
- if (heldHandle) {
44516
- const stale = heldHandle;
44517
- heldHandle = undefined;
44518
- await runtime.sessionManager.closeSession(stale).catch(() => {});
44519
- }
44520
- throw err;
44521
- }
44522
- autofixCostAccum += result.estimatedCostUsd ?? 0;
44523
- if (ctx.sessionManager && ctx.sessionId && result.protocolIds) {
44524
- try {
44525
- const desc = ctx.sessionManager.get(ctx.sessionId);
44526
- if (desc) {
44527
- ctx.sessionManager.bindHandle(ctx.sessionId, implementerSession, result.protocolIds);
44528
- }
44529
- } catch {}
44530
- }
44531
- if (result.output) {
44532
- const unresolvedMatch = UNRESOLVED_REGEX.exec(result.output);
44533
- if (unresolvedMatch) {
44534
- unresolvedReason = (unresolvedMatch[1] ?? "reviewer findings contradicted each other").trim();
44535
- logger.warn("autofix", "Implementer signalled reviewer contradiction \u2014 escalating", {
44536
- storyId: ctx.story.id,
44537
- unresolvedReason
44538
- });
44539
- throw new Error("AUTOFIX_UNRESOLVED");
44540
- }
44541
- }
44542
- if (ctx.reviewerSession && result.output) {
44543
- const maxClarifications = ctx.config.review?.dialogue?.maxClarificationsPerAttempt ?? 3;
44544
- let clarifyCount = 0;
44545
- const clarifyRegex = new RegExp(CLARIFY_REGEX.source, `${CLARIFY_REGEX.flags}g`);
44546
- let match;
44547
- while ((match = clarifyRegex.exec(result.output)) !== null) {
44548
- if (clarifyCount >= maxClarifications)
44549
- break;
44550
- const question = match[1]?.trim() ?? "";
44551
- if (!question)
44552
- continue;
44553
- try {
44554
- await ctx.reviewerSession.clarify(question);
44555
- clarifyCount++;
44556
- } catch (err) {
44557
- logger.debug("autofix", "reviewerSession.clarify() failed \u2014 proceeding without clarification", {
44558
- storyId: ctx.story.id
44559
- });
44560
- }
44561
- }
44562
- }
44563
- const refAfterAttempt = await _autofixDeps.captureGitRef(ctx.workdir);
44564
- const sourceFilesChanged = autofixBeforeRef === undefined || refAfterAttempt === undefined || autofixBeforeRef !== refAfterAttempt;
44565
- const noOp = !sourceFilesChanged;
44566
- const checkSignatureChanged = false;
44567
- const newConsecutiveNoOps = noOp ? currentConsecutiveNoOps + 1 : 0;
44568
- return {
44569
- agentSuccess: result.success,
44570
- cost: result.estimatedCostUsd ?? 0,
44571
- checkSignatureChanged,
44572
- noOp,
44573
- consecutiveNoOps: newConsecutiveNoOps
44574
- };
44575
- },
44576
- verify: async (result) => {
44577
- const failingChecks = (ctx.reviewResult?.checks ?? []).filter((c) => !c.success);
44578
- const hasMechanicalFailure = failingChecks.some((c) => !LLM_REVIEW_CHECKS.has(c.check));
44579
- const recheckWorthwhile = !result.noOp || hasMechanicalFailure;
44580
- const passed = recheckWorthwhile ? await _autofixDeps.recheckReview(ctx) : false;
44581
- if (passed) {
44582
- if (result.noOp) {
44583
- logger.info("autofix", `[OK] Checks pass without new commit on attempt ${consumed + currentAttempt} (transient or already resolved)`, { storyId: ctx.story.id });
44584
- } else {
44585
- logger.info("autofix", `[OK] Agent rectification succeeded on attempt ${consumed + currentAttempt}`, {
44586
- storyId: ctx.story.id
44587
- });
44588
- }
44589
- return { passed: true };
44590
- }
44591
- if (result.consecutiveNoOps > MAX_CONSECUTIVE_NOOP_REPROMPTS) {
44592
- logger.warn("autofix", "No source changes (no-op limit reached) \u2014 counting as consumed attempt", {
44593
- storyId: ctx.story.id,
44594
- attemptsRemaining: maxAttempts - currentAttempt
44595
- });
44596
- const passedChecks = (ctx.reviewResult?.checks ?? []).filter((c) => c.success && !c.skipped).map((c) => c.check);
44597
- if (passedChecks.length > 0) {
44598
- ctx.retrySkipChecks = new Set(passedChecks);
44599
- logger.debug("autofix", "No source changes \u2014 skipping already-passed checks on recheck", {
44600
- storyId: ctx.story.id,
44601
- skippedChecks: passedChecks
44602
- });
44603
- }
44604
- return {
44605
- passed: false,
44606
- newFailure: initialFailure
44607
- };
44608
- }
44609
- if (result.noOp) {
44610
- logger.info("autofix", "No source changes and checks still failing \u2014 re-prompting with stronger directive (counts as consumed attempt)", {
44611
- storyId: ctx.story.id,
44612
- noOpCount: `${result.consecutiveNoOps}/${MAX_CONSECUTIVE_NOOP_REPROMPTS}`,
44613
- attemptsRemaining: maxAttempts - currentAttempt
44614
- });
44615
- return {
44616
- passed: false,
44617
- newFailure: initialFailure
44618
- };
44619
- }
44620
- const updatedFailed = collectFailedChecks(ctx);
44621
- const hasNewLintFailure = updatedFailed.some((c) => c.check === "lint");
44622
- if (hasNewLintFailure && (lintFixCmd || formatFixCmd)) {
44623
- if (lintFixCmd) {
44624
- logger.debug("autofix", "Agent introduced lint errors \u2014 running lintFix before next attempt", {
44625
- storyId: ctx.story.id
44626
- });
44627
- pipelineEventBus.emit({ type: "autofix:started", storyId: ctx.story.id, command: lintFixCmd });
44628
- await _autofixDeps.runQualityCommand({
44629
- commandName: "lintFix",
44630
- command: lintFixCmd,
44631
- workdir: effectiveWorkdir,
44632
- storyId: ctx.story.id
44633
- });
44634
- }
44635
- if (formatFixCmd) {
44636
- pipelineEventBus.emit({ type: "autofix:started", storyId: ctx.story.id, command: formatFixCmd });
44637
- await _autofixDeps.runQualityCommand({
44638
- commandName: "formatFix",
44639
- command: formatFixCmd,
44640
- workdir: effectiveWorkdir,
44641
- storyId: ctx.story.id
44642
- });
44643
- }
44644
- const mechPassed = await _autofixDeps.recheckReview(ctx);
44645
- pipelineEventBus.emit({ type: "autofix:completed", storyId: ctx.story.id, fixed: mechPassed });
44646
- if (mechPassed) {
44647
- logger.info("autofix", `[OK] Mechanical fix resolved agent-introduced lint errors on attempt ${consumed + currentAttempt}`, {
44648
- storyId: ctx.story.id
44649
- });
44650
- return { passed: true };
44651
- }
44652
- }
44653
- if (updatedFailed.length > 0) {
44654
- const updatedCheckSignature = getCheckSignature(updatedFailed);
44655
- currentCheckSignatureChanged = updatedCheckSignature !== initialFailure.checkSignature;
44656
- logger.warn("autofix", `Agent rectification still failing after attempt ${consumed + currentAttempt}`, {
44657
- storyId: ctx.story.id,
44658
- attemptsRemaining: maxAttempts - currentAttempt,
44659
- globalBudgetRemaining: maxTotal - (consumed + currentAttempt)
44660
- });
44661
- return {
44662
- passed: false,
44663
- newFailure: {
44664
- checks: updatedFailed,
44665
- checkSignature: updatedCheckSignature
44666
- }
44667
- };
44668
- }
44669
- const isFailOpenOnly = (ctx.reviewResult?.checks ?? []).some((c) => c.failOpen);
44670
- if (isFailOpenOnly) {
44671
- failOpenAborted = true;
44672
- }
44673
- logger.warn("autofix", isFailOpenOnly ? "Adversarial timed out during recheck (fail-open) \u2014 aborting retry to avoid stale re-prompt" : "Agent rectification exhausted \u2014 no failed checks detected after recheck", {
44674
- storyId: ctx.story.id,
44675
- attemptsUsed: currentAttempt,
44676
- globalBudgetUsed: consumed + currentAttempt,
44677
- maxTotalAttempts: maxTotal
44678
- });
44679
- return {
44680
- passed: false,
44681
- newFailure: initialFailure
44682
- };
44683
- }
44684
- }).catch((error48) => {
44685
- if (error48 instanceof Error && error48.message === "AUTOFIX_AGENT_NOT_FOUND") {
44686
- return { outcome: "exhausted", attempts: 0, finalFailure: initialFailure };
44687
- }
44688
- if (error48 instanceof Error && error48.message === "AUTOFIX_UNRESOLVED") {
44689
- return { outcome: "exhausted", attempts: 0, finalFailure: initialFailure };
44690
- }
44691
- throw error48;
44692
- }).finally(async () => {
44693
- if (heldHandle) {
44694
- const stale = heldHandle;
44695
- heldHandle = undefined;
44696
- await runtime.sessionManager.closeSession(stale).catch(() => {});
44697
- }
44698
- });
44699
- const succeeded = outcome.outcome === "fixed";
44700
- return { succeeded, cost: autofixCostAccum, unresolvedReason };
44701
- }
44702
- var CLARIFY_REGEX, UNRESOLVED_REGEX, MAX_CONSECUTIVE_NOOP_REPROMPTS = 1;
44703
- var init_autofix_agent = __esm(() => {
44704
- init_config();
44705
- init_errors();
44706
- init_logger2();
44707
- init_prompts();
44708
- init_review2();
44709
- init_naming();
44710
- init_event_bus();
44711
- init_autofix();
44712
- init_autofix_scope_split();
44713
- CLARIFY_REGEX = /^CLARIFY:\s*(.+)$/ms;
44714
- UNRESOLVED_REGEX = /^UNRESOLVED:\s*(.+)$/ms;
44715
- });
44716
44994
 
44717
44995
  // src/pipeline/stages/autofix.ts
44718
44996
  async function recheckReview(ctx) {
@@ -44729,10 +45007,10 @@ var autofixStage, _autofixDeps;
44729
45007
  var init_autofix = __esm(() => {
44730
45008
  init_logger2();
44731
45009
  init_quality();
44732
- init_git();
44733
45010
  init_event_bus();
45011
+ init_autofix_agent();
44734
45012
  init_autofix_scope_split();
44735
- init_autofix_test_writer();
45013
+ init_autofix_test_writer2();
44736
45014
  autofixStage = {
44737
45015
  name: "autofix",
44738
45016
  enabled(ctx) {
@@ -44823,7 +45101,7 @@ var init_autofix = __esm(() => {
44823
45101
  if (ctx.routing.testStrategy === "no-test") {
44824
45102
  const failedChecks = (reviewResult.checks ?? []).filter((c) => !c.success);
44825
45103
  if (failedChecks.length > 0 && failedChecks.every((c) => {
44826
- const { testFindings, sourceFindings } = splitFindingsByScope(c, testFilePatterns, lintOutputFormat, typecheckOutputFormat);
45104
+ const { testFindings, sourceFindings } = splitFindingsByScope(c, testFilePatterns, lintOutputFormat, typecheckOutputFormat, { workdir: ctx.workdir });
44827
45105
  return testFindings !== null && sourceFindings === null;
44828
45106
  })) {
44829
45107
  const skippedFindingCount = failedChecks.flatMap((c) => c.findings ?? []).length;
@@ -44896,8 +45174,7 @@ var init_autofix = __esm(() => {
44896
45174
  _autofixDeps = {
44897
45175
  runQualityCommand,
44898
45176
  recheckReview,
44899
- captureGitRef,
44900
- runAgentRectification: (ctx, lintFixCmd, formatFixCmd, effectiveWorkdir) => Promise.resolve().then(() => (init_autofix_agent(), exports_autofix_agent)).then(({ runAgentRectification: runAgentRectification2 }) => runAgentRectification2(ctx, lintFixCmd, formatFixCmd, effectiveWorkdir)),
45177
+ runAgentRectification: runAgentRectificationV2,
44901
45178
  runTestWriterRectification: (ctx, testWriterChecks, story, agentManager) => runTestWriterRectification(ctx, testWriterChecks, story, agentManager)
44902
45179
  };
44903
45180
  });
@@ -44910,6 +45187,17 @@ async function persistSemanticVerdict(featureDir, storyId, verdict) {
44910
45187
  const filePath = path8.join(dir, `${storyId}.json`);
44911
45188
  await _semanticVerdictDeps.writeFile(filePath, JSON.stringify(verdict, null, 2));
44912
45189
  }
45190
+ function migrateSemanticVerdict(verdict) {
45191
+ if (!verdict.findings?.length)
45192
+ return verdict;
45193
+ const first = verdict.findings[0];
45194
+ if ("source" in first)
45195
+ return verdict;
45196
+ return {
45197
+ ...verdict,
45198
+ findings: verdict.findings.map((f) => reviewFindingToFinding(f))
45199
+ };
45200
+ }
44913
45201
  async function loadSemanticVerdicts(featureDir) {
44914
45202
  const dir = path8.join(featureDir, "semantic-verdicts");
44915
45203
  let files;
@@ -44927,7 +45215,8 @@ async function loadSemanticVerdicts(featureDir) {
44927
45215
  const filePath = path8.join(dir, file3);
44928
45216
  const content = await _semanticVerdictDeps.readFile(filePath);
44929
45217
  try {
44930
- results.push(JSON.parse(content));
45218
+ const parsed = JSON.parse(content);
45219
+ results.push(migrateSemanticVerdict(parsed));
44931
45220
  } catch {
44932
45221
  _semanticVerdictDeps.logDebug(`Skipping invalid JSON in semantic-verdicts/${file3}`);
44933
45222
  }
@@ -44936,6 +45225,7 @@ async function loadSemanticVerdicts(featureDir) {
44936
45225
  }
44937
45226
  var _semanticVerdictDeps;
44938
45227
  var init_semantic_verdict = __esm(() => {
45228
+ init_findings();
44939
45229
  init_logger2();
44940
45230
  _semanticVerdictDeps = {
44941
45231
  mkdirp: async (dir) => {
@@ -45088,10 +45378,10 @@ var init_effectiveness = __esm(() => {
45088
45378
 
45089
45379
  // src/execution/progress.ts
45090
45380
  import { appendFile as appendFile3, mkdir as mkdir7 } from "fs/promises";
45091
- import { join as join37 } from "path";
45381
+ import { join as join38 } from "path";
45092
45382
  async function appendProgress(featureDir, storyId, status, message) {
45093
45383
  await mkdir7(featureDir, { recursive: true });
45094
- const progressPath = join37(featureDir, "progress.txt");
45384
+ const progressPath = join38(featureDir, "progress.txt");
45095
45385
  const timestamp = new Date().toISOString();
45096
45386
  const entry = `[${timestamp}] ${storyId} \u2014 ${status.toUpperCase()} \u2014 ${message}
45097
45387
  `;
@@ -45223,7 +45513,7 @@ var init_completion = __esm(() => {
45223
45513
 
45224
45514
  // src/constitution/loader.ts
45225
45515
  import { existsSync as existsSync19 } from "fs";
45226
- import { join as join38 } from "path";
45516
+ import { join as join39 } from "path";
45227
45517
  function truncateToTokens(text, maxTokens) {
45228
45518
  const maxChars = maxTokens * 3;
45229
45519
  if (text.length <= maxChars) {
@@ -45245,7 +45535,7 @@ async function loadConstitution(projectDir, config2) {
45245
45535
  }
45246
45536
  let combinedContent = "";
45247
45537
  if (!config2.skipGlobal) {
45248
- const globalPath = join38(globalConfigDir(), config2.path);
45538
+ const globalPath = join39(globalConfigDir(), config2.path);
45249
45539
  if (existsSync19(globalPath)) {
45250
45540
  const validatedPath = validateFilePath(globalPath, globalConfigDir());
45251
45541
  const globalFile = Bun.file(validatedPath);
@@ -45255,7 +45545,7 @@ async function loadConstitution(projectDir, config2) {
45255
45545
  }
45256
45546
  }
45257
45547
  }
45258
- const projectPath = join38(projectDir, config2.path);
45548
+ const projectPath = join39(projectDir, config2.path);
45259
45549
  if (existsSync19(projectPath)) {
45260
45550
  const validatedPath = validateFilePath(projectPath, projectDir);
45261
45551
  const projectFile = Bun.file(validatedPath);
@@ -45813,14 +46103,14 @@ async function closeAllRunSessions(sessionManager, agentGetFn) {
45813
46103
 
45814
46104
  // src/context/greenfield.ts
45815
46105
  import { readdir as readdir2 } from "fs/promises";
45816
- import { join as join39 } from "path";
46106
+ import { join as join40 } from "path";
45817
46107
  async function scanForTestFiles(dir, testPatterns, isRootCall = true) {
45818
46108
  const results = [];
45819
46109
  const ignoreDirs = new Set(["node_modules", "dist", "build", ".next", ".git"]);
45820
46110
  try {
45821
46111
  const entries = await readdir2(dir, { withFileTypes: true });
45822
46112
  for (const entry of entries) {
45823
- const fullPath = join39(dir, entry.name);
46113
+ const fullPath = join40(dir, entry.name);
45824
46114
  if (entry.isDirectory()) {
45825
46115
  if (ignoreDirs.has(entry.name))
45826
46116
  continue;
@@ -45882,6 +46172,26 @@ function shouldRetryRectification(state, config2) {
45882
46172
  return true;
45883
46173
  }
45884
46174
 
46175
+ // src/verification/shared-rectification-loop.ts
46176
+ async function runRetryLoop(input) {
46177
+ let currentFailure = input.failure;
46178
+ const previous = [...input.previousAttempts];
46179
+ for (let attempt = 1;attempt <= input.maxAttempts; attempt++) {
46180
+ const prompt = await Promise.resolve(input.buildPrompt(currentFailure, previous));
46181
+ const result = await input.execute(prompt);
46182
+ const outcome = await input.verify(result);
46183
+ previous.push({ attempt, result });
46184
+ if (outcome.passed) {
46185
+ return { outcome: "fixed", result, attempts: attempt };
46186
+ }
46187
+ currentFailure = outcome.newFailure;
46188
+ if (input.shouldAbort?.(currentFailure, attempt)) {
46189
+ return { outcome: "aborted", attempts: attempt };
46190
+ }
46191
+ }
46192
+ return { outcome: "exhausted", attempts: input.maxAttempts, finalFailure: currentFailure };
46193
+ }
46194
+
45885
46195
  // src/verification/index.ts
45886
46196
  var init_verification = __esm(() => {
45887
46197
  init_executor();
@@ -46441,26 +46751,7 @@ function categorizeVerdict(verdict, testsPass) {
46441
46751
  reviewReason: `Tests failing: ${verdict.tests.failCount} failure(s). ${verdict.reasoning}`
46442
46752
  };
46443
46753
  }
46444
- if (!verdict.acceptanceCriteria.allMet) {
46445
- const unmet = verdict.acceptanceCriteria.criteria.filter((c) => !c.met).map((c) => c.criterion);
46446
- return {
46447
- success: false,
46448
- failureCategory: "verifier-rejected",
46449
- reviewReason: `Acceptance criteria not met: ${unmet.join("; ")}`
46450
- };
46451
- }
46452
- if (verdict.quality.rating === "poor") {
46453
- return {
46454
- success: false,
46455
- failureCategory: "verifier-rejected",
46456
- reviewReason: `Poor code quality: ${verdict.quality.issues.join("; ")}`
46457
- };
46458
- }
46459
- return {
46460
- success: false,
46461
- failureCategory: "verifier-rejected",
46462
- reviewReason: verdict.reasoning || "Verifier rejected without specific reason"
46463
- };
46754
+ return { success: true };
46464
46755
  }
46465
46756
  var init_verdict = __esm(() => {
46466
46757
  init_verdict_reader();
@@ -46483,7 +46774,8 @@ async function runThreeSessionTdd(options) {
46483
46774
  lite = false,
46484
46775
  _recursionDepth = 0,
46485
46776
  projectDir,
46486
- agentManager
46777
+ agentManager,
46778
+ runtime
46487
46779
  } = options;
46488
46780
  const logger = getLogger();
46489
46781
  const MAX_RECURSION_DEPTH = 2;
@@ -46626,7 +46918,7 @@ async function runThreeSessionTdd(options) {
46626
46918
  };
46627
46919
  }
46628
46920
  const implementerBinding = getTddSessionBinding?.("implementer");
46629
- const { cost: fullSuiteGateCost, fullSuiteGatePassed } = await runFullSuiteGate(story, config2, workdir, agentManager, implementerTier, lite, logger, featureName, projectDir, implementerBinding?.sessionManager, implementerBinding?.sessionId);
46921
+ const { cost: fullSuiteGateCost, fullSuiteGatePassed } = await runFullSuiteGate(story, config2, workdir, agentManager, implementerTier, lite, logger, featureName, projectDir, implementerBinding?.sessionManager, implementerBinding?.sessionId, runtime);
46630
46922
  const session3Ref = await captureGitRef(workdir) ?? "HEAD";
46631
46923
  const verifierBundle = await getTddContextBundle?.("verifier") ?? tddContextBundles?.verifier;
46632
46924
  const session3 = await runTddSessionOp(verifyTddOp, options, session3Ref, verifierBundle, getTddSessionBinding?.("verifier"));
@@ -46637,9 +46929,9 @@ async function runThreeSessionTdd(options) {
46637
46929
  let allSuccessful = sessions.every((s) => s.success);
46638
46930
  let finalFailureCategory;
46639
46931
  if (verdict !== null) {
46640
- const categorization2 = categorizeVerdict(verdict, verdict.tests.allPassing);
46641
- if (categorization2.success) {
46642
- logger.info("tdd", "[OK] Verifier verdict: approved", {
46932
+ const categorization = categorizeVerdict(verdict, verdict.tests.allPassing);
46933
+ if (categorization.success) {
46934
+ logger.info("tdd", "[OK] Verifier verdict: accepted", {
46643
46935
  storyId: story.id,
46644
46936
  verdictApproved: verdict.approved,
46645
46937
  testsAllPassing: verdict.tests.allPassing
@@ -46651,13 +46943,13 @@ async function runThreeSessionTdd(options) {
46651
46943
  logger.warn("tdd", "[WARN] Verifier verdict: rejected", {
46652
46944
  storyId: story.id,
46653
46945
  verdictApproved: verdict.approved,
46654
- failureCategory: categorization2.failureCategory,
46655
- reviewReason: categorization2.reviewReason
46946
+ failureCategory: categorization.failureCategory,
46947
+ reviewReason: categorization.reviewReason
46656
46948
  });
46657
46949
  allSuccessful = false;
46658
- finalFailureCategory = categorization2.failureCategory;
46950
+ finalFailureCategory = categorization.failureCategory;
46659
46951
  needsHumanReview = true;
46660
- reviewReason = categorization2.reviewReason;
46952
+ reviewReason = categorization.reviewReason;
46661
46953
  }
46662
46954
  } else {
46663
46955
  if (!allSuccessful) {
@@ -46882,7 +47174,8 @@ async function runThreeSessionTddFromCtx(ctx, opts) {
46882
47174
  interactionChain: ctx.interaction,
46883
47175
  projectDir: ctx.projectDir,
46884
47176
  abortSignal: ctx.abortSignal,
46885
- agentManager: ctx.agentManager
47177
+ agentManager: ctx.agentManager,
47178
+ runtime: ctx.runtime
46886
47179
  });
46887
47180
  }
46888
47181
  var init_orchestrator_ctx = __esm(() => {
@@ -48222,16 +48515,16 @@ class AcceptanceStrategy {
48222
48515
  }, timeoutMs);
48223
48516
  const exitCode = await Promise.race([
48224
48517
  proc.exited,
48225
- new Promise((resolve14) => setTimeout(() => resolve14(124), timeoutMs + 6000))
48518
+ new Promise((resolve15) => setTimeout(() => resolve15(124), timeoutMs + 6000))
48226
48519
  ]);
48227
48520
  clearTimeout(timeoutId);
48228
48521
  const stdout = await Promise.race([
48229
48522
  new Response(proc.stdout).text(),
48230
- new Promise((resolve14) => setTimeout(() => resolve14(""), 3000))
48523
+ new Promise((resolve15) => setTimeout(() => resolve15(""), 3000))
48231
48524
  ]);
48232
48525
  const stderr = await Promise.race([
48233
48526
  new Response(proc.stderr).text(),
48234
- new Promise((resolve14) => setTimeout(() => resolve14(""), 3000))
48527
+ new Promise((resolve15) => setTimeout(() => resolve15(""), 3000))
48235
48528
  ]);
48236
48529
  const durationMs = Date.now() - start;
48237
48530
  if (timedOut || exitCode === 124) {
@@ -48835,7 +49128,7 @@ __export(exports_init_context, {
48835
49128
  });
48836
49129
  import { existsSync as existsSync22 } from "fs";
48837
49130
  import { mkdir as mkdir8 } from "fs/promises";
48838
- import { basename as basename5, join as join43 } from "path";
49131
+ import { basename as basename5, join as join44 } from "path";
48839
49132
  async function findFiles(dir, maxFiles = 200) {
48840
49133
  try {
48841
49134
  const proc = Bun.spawnSync([
@@ -48863,7 +49156,7 @@ async function findFiles(dir, maxFiles = 200) {
48863
49156
  return [];
48864
49157
  }
48865
49158
  async function readPackageManifest(projectRoot) {
48866
- const packageJsonPath = join43(projectRoot, "package.json");
49159
+ const packageJsonPath = join44(projectRoot, "package.json");
48867
49160
  if (!existsSync22(packageJsonPath)) {
48868
49161
  return null;
48869
49162
  }
@@ -48881,7 +49174,7 @@ async function readPackageManifest(projectRoot) {
48881
49174
  }
48882
49175
  }
48883
49176
  async function readReadmeSnippet(projectRoot) {
48884
- const readmePath = join43(projectRoot, "README.md");
49177
+ const readmePath = join44(projectRoot, "README.md");
48885
49178
  if (!existsSync22(readmePath)) {
48886
49179
  return null;
48887
49180
  }
@@ -48899,7 +49192,7 @@ async function detectEntryPoints(projectRoot) {
48899
49192
  const candidates = ["src/index.ts", "src/main.ts", "main.go", "src/lib.rs"];
48900
49193
  const found = [];
48901
49194
  for (const candidate of candidates) {
48902
- const path13 = join43(projectRoot, candidate);
49195
+ const path13 = join44(projectRoot, candidate);
48903
49196
  if (existsSync22(path13)) {
48904
49197
  found.push(candidate);
48905
49198
  }
@@ -48910,7 +49203,7 @@ async function detectConfigFiles(projectRoot) {
48910
49203
  const candidates = ["tsconfig.json", "biome.json", "turbo.json", ".env.example"];
48911
49204
  const found = [];
48912
49205
  for (const candidate of candidates) {
48913
- const path13 = join43(projectRoot, candidate);
49206
+ const path13 = join44(projectRoot, candidate);
48914
49207
  if (existsSync22(path13)) {
48915
49208
  found.push(candidate);
48916
49209
  }
@@ -49071,8 +49364,8 @@ function generatePackageContextTemplate(packagePath) {
49071
49364
  }
49072
49365
  async function initPackage(repoRoot, packagePath, force = false) {
49073
49366
  const logger = getLogger();
49074
- const naxDir = join43(repoRoot, ".nax", "mono", packagePath);
49075
- const contextPath = join43(naxDir, "context.md");
49367
+ const naxDir = join44(repoRoot, ".nax", "mono", packagePath);
49368
+ const contextPath = join44(naxDir, "context.md");
49076
49369
  if (existsSync22(contextPath) && !force) {
49077
49370
  logger.info("init", "Package context.md already exists (use --force to overwrite)", { path: contextPath });
49078
49371
  return;
@@ -49086,8 +49379,8 @@ async function initPackage(repoRoot, packagePath, force = false) {
49086
49379
  }
49087
49380
  async function initContext(projectRoot, options = {}) {
49088
49381
  const logger = getLogger();
49089
- const naxDir = join43(projectRoot, ".nax");
49090
- const contextPath = join43(naxDir, "context.md");
49382
+ const naxDir = join44(projectRoot, ".nax");
49383
+ const contextPath = join44(naxDir, "context.md");
49091
49384
  if (existsSync22(contextPath) && !options.force) {
49092
49385
  logger.info("init", "context.md already exists, skipping (use --force to overwrite)", { path: contextPath });
49093
49386
  return;
@@ -49694,19 +49987,19 @@ var init_command_argv = __esm(() => {
49694
49987
  });
49695
49988
 
49696
49989
  // src/hooks/runner.ts
49697
- import { join as join60 } from "path";
49990
+ import { join as join61 } from "path";
49698
49991
  async function loadHooksConfig(projectDir, globalDir) {
49699
49992
  let globalHooks = { hooks: {} };
49700
49993
  let projectHooks = { hooks: {} };
49701
49994
  let skipGlobal = false;
49702
- const projectPath = join60(projectDir, "hooks.json");
49995
+ const projectPath = join61(projectDir, "hooks.json");
49703
49996
  const projectData = await loadJsonFile(projectPath, "hooks");
49704
49997
  if (projectData) {
49705
49998
  projectHooks = projectData;
49706
49999
  skipGlobal = projectData.skipGlobal ?? false;
49707
50000
  }
49708
50001
  if (!skipGlobal && globalDir) {
49709
- const globalPath = join60(globalDir, "hooks.json");
50002
+ const globalPath = join61(globalDir, "hooks.json");
49710
50003
  const globalData = await loadJsonFile(globalPath, "hooks");
49711
50004
  if (globalData) {
49712
50005
  globalHooks = globalData;
@@ -49861,7 +50154,7 @@ var package_default;
49861
50154
  var init_package = __esm(() => {
49862
50155
  package_default = {
49863
50156
  name: "@nathapp/nax",
49864
- version: "0.64.1",
50157
+ version: "0.64.2-canary.2",
49865
50158
  description: "AI Coding Agent Orchestrator \u2014 loops until done",
49866
50159
  type: "module",
49867
50160
  bin: {
@@ -49945,8 +50238,8 @@ var init_version = __esm(() => {
49945
50238
  NAX_VERSION = package_default.version;
49946
50239
  NAX_COMMIT = (() => {
49947
50240
  try {
49948
- if (/^[0-9a-f]{6,10}$/.test("8afbec51"))
49949
- return "8afbec51";
50241
+ if (/^[0-9a-f]{6,10}$/.test("6c7fd18c"))
50242
+ return "6c7fd18c";
49950
50243
  } catch {}
49951
50244
  try {
49952
50245
  const result = Bun.spawnSync(["git", "rev-parse", "--short", "HEAD"], {
@@ -50392,7 +50685,7 @@ async function loadAcceptanceTestContent2(featureDir, testPaths, configuredTestP
50392
50685
  function buildResult(success2, prd, totalCost, iterations, storiesCompleted, prdDirty, failedACs, retries) {
50393
50686
  return { success: success2, prd, totalCost, iterations, storiesCompleted, prdDirty, failedACs, retries };
50394
50687
  }
50395
- async function regenerateAcceptanceTest(testPath, acceptanceContext, previousFailure) {
50688
+ async function regenerateAcceptanceTest(testPath, acceptanceContext) {
50396
50689
  const logger = getSafeLogger();
50397
50690
  const bakPath = `${testPath}.bak`;
50398
50691
  const content = await Bun.file(testPath).text();
@@ -50440,8 +50733,7 @@ async function regenerateAcceptanceTest(testPath, acceptanceContext, previousFai
50440
50733
  }
50441
50734
  const contextForSetup = {
50442
50735
  ...acceptanceContext,
50443
- ...implementationContext ? { implementationContext } : {},
50444
- ...previousFailure ? { previousFailure } : {}
50736
+ ...implementationContext ? { implementationContext } : {}
50445
50737
  };
50446
50738
  await _regenerateDeps.acceptanceSetupExecute(contextForSetup);
50447
50739
  if (!await Bun.file(testPath).exists()) {
@@ -50474,7 +50766,7 @@ var init_acceptance_helpers = __esm(() => {
50474
50766
  });
50475
50767
 
50476
50768
  // src/execution/lifecycle/acceptance-fix.ts
50477
- function fixCallCtx(ctx) {
50769
+ function fixCallCtx2(ctx) {
50478
50770
  if (!ctx.runtime) {
50479
50771
  throw new NaxError("runtime required for acceptance fix callOp", "CALL_OP_NO_RUNTIME", { stage: "acceptance" });
50480
50772
  }
@@ -50489,7 +50781,7 @@ function fixCallCtx(ctx) {
50489
50781
  }
50490
50782
  async function resolveAcceptanceDiagnosis(opts) {
50491
50783
  const logger = getSafeLogger();
50492
- const { ctx, failures, totalACs, strategy, semanticVerdicts, diagnosisOpts, previousFailure } = opts;
50784
+ const { ctx, failures, totalACs, strategy, semanticVerdicts, diagnosisOpts } = opts;
50493
50785
  const storyId = diagnosisOpts.storyId;
50494
50786
  if (strategy === "implement-only") {
50495
50787
  logger?.info("acceptance.diagnosis", "Fast path: implement-only strategy \u2192 source_bug", { storyId });
@@ -50525,74 +50817,21 @@ async function resolveAcceptanceDiagnosis(opts) {
50525
50817
  };
50526
50818
  }
50527
50819
  const sourceFiles = await loadSourceFilesForDiagnosis(diagnosisOpts.testFileContent, diagnosisOpts.workdir);
50528
- return await _applyFixDeps.callOp(fixCallCtx(ctx), acceptanceDiagnoseOp, {
50820
+ return await _diagnosisDeps.callOp(fixCallCtx2(ctx), acceptanceDiagnoseOp, {
50529
50821
  testOutput: diagnosisOpts.testOutput,
50530
50822
  testFileContent: diagnosisOpts.testFileContent,
50531
50823
  sourceFiles,
50532
- semanticVerdicts,
50533
- previousFailure
50824
+ semanticVerdicts
50534
50825
  });
50535
50826
  }
50536
- async function applyFix(opts) {
50537
- const logger = getSafeLogger();
50538
- const { ctx, failures, diagnosis, previousFailure } = opts;
50539
- const storyId = ctx.prd.userStories[0]?.id ?? "unknown";
50540
- if (!ctx.runtime) {
50541
- logger?.error("acceptance.applyFix", "Runtime not found", { storyId });
50542
- return { cost: 0 };
50543
- }
50544
- const testPaths = ctx.acceptanceTestPaths;
50545
- let testFileContent = "";
50546
- let acceptanceTestPath = "";
50547
- if (testPaths && testPaths.length > 0) {
50548
- const pathStrings = testPaths.map((p) => typeof p === "string" ? p : p.testPath);
50549
- const moduleEntries = await loadAcceptanceTestContent(pathStrings);
50550
- if (moduleEntries.length > 0) {
50551
- testFileContent = moduleEntries[0].content;
50552
- acceptanceTestPath = moduleEntries[0].testPath;
50553
- }
50554
- } else if (ctx.featureDir) {
50555
- const fallbackPath = resolveAcceptanceFeatureTestPath(ctx.featureDir, ctx.config.acceptance.testPath, ctx.config.project?.language);
50556
- const moduleEntries = await loadAcceptanceTestContent(fallbackPath);
50557
- if (moduleEntries.length > 0) {
50558
- testFileContent = moduleEntries[0].content;
50559
- acceptanceTestPath = moduleEntries[0].testPath;
50560
- }
50561
- }
50562
- const callCtx = fixCallCtx(ctx);
50563
- if (diagnosis.verdict === "source_bug" || diagnosis.verdict === "both") {
50564
- logger?.info("acceptance.applyFix", "Applying source fix", { storyId, verdict: diagnosis.verdict });
50565
- await _applyFixDeps.callOp(callCtx, acceptanceFixSourceOp, {
50566
- testOutput: failures.testOutput,
50567
- diagnosisReasoning: diagnosis.reasoning,
50568
- acceptanceTestPath,
50569
- testFileContent
50570
- });
50571
- logger?.info("acceptance.source-fix", "Source fix completed", { storyId });
50572
- }
50573
- if (diagnosis.verdict === "test_bug" || diagnosis.verdict === "both") {
50574
- logger?.info("acceptance.applyFix", "Applying test fix", { storyId, verdict: diagnosis.verdict });
50575
- await _applyFixDeps.callOp(callCtx, acceptanceFixTestOp, {
50576
- testOutput: failures.testOutput,
50577
- diagnosisReasoning: diagnosis.reasoning,
50578
- failedACs: failures.failedACs,
50579
- acceptanceTestPath,
50580
- testFileContent,
50581
- previousFailure
50582
- });
50583
- logger?.info("acceptance.test-fix", "Test fix completed", { storyId });
50584
- }
50585
- return { cost: 0 };
50586
- }
50587
- var _applyFixDeps;
50827
+ var _diagnosisDeps;
50588
50828
  var init_acceptance_fix2 = __esm(() => {
50589
- init_test_path();
50590
50829
  init_errors();
50591
50830
  init_logger2();
50592
50831
  init_operations();
50593
50832
  init_call();
50594
50833
  init_acceptance_helpers();
50595
- _applyFixDeps = {
50834
+ _diagnosisDeps = {
50596
50835
  callOp
50597
50836
  };
50598
50837
  });
@@ -50601,6 +50840,7 @@ var init_acceptance_fix2 = __esm(() => {
50601
50840
  var exports_acceptance_loop = {};
50602
50841
  __export(exports_acceptance_loop, {
50603
50842
  runAcceptanceLoop: () => runAcceptanceLoop,
50843
+ runAcceptanceFixCycle: () => runAcceptanceFixCycle,
50604
50844
  regenerateAcceptanceTest: () => regenerateAcceptanceTest,
50605
50845
  loadSpecContent: () => loadSpecContent,
50606
50846
  loadAcceptanceTestContent: () => loadAcceptanceTestContent2,
@@ -50608,48 +50848,157 @@ __export(exports_acceptance_loop, {
50608
50848
  isStubTestFile: () => isStubTestFile,
50609
50849
  buildResult: () => buildResult,
50610
50850
  _regenerateDeps: () => _regenerateDeps,
50611
- _acceptanceLoopDeps: () => _acceptanceLoopDeps
50612
- });
50851
+ _acceptanceLoopDeps: () => _acceptanceLoopDeps,
50852
+ _acceptanceFixCycleDeps: () => _acceptanceFixCycleDeps
50853
+ });
50854
+ function convertFailuresToFindings(failedACs, testOutput) {
50855
+ return failedACs.map((ac) => {
50856
+ if (ac === "AC-HOOK" || ac === "AC-ERROR") {
50857
+ return acSentinelToFinding(ac, testOutput);
50858
+ }
50859
+ return acFailureToFinding(ac, testOutput);
50860
+ });
50861
+ }
50862
+ function findingsForDiagnosis(failedACs, testOutput, diagnosis) {
50863
+ if (diagnosis.findings && diagnosis.findings.length > 0)
50864
+ return diagnosis.findings;
50865
+ const findings = convertFailuresToFindings(failedACs, testOutput);
50866
+ const isTestRunnerSentinel = (f) => f.category === "hook-failure" || f.category === "test-runner-error";
50867
+ if (diagnosis.verdict === "source_bug") {
50868
+ return findings.map((f) => isTestRunnerSentinel(f) ? f : { ...f, fixTarget: "source" });
50869
+ }
50870
+ if (diagnosis.verdict === "test_bug")
50871
+ return findings.map((f) => ({ ...f, fixTarget: "test" }));
50872
+ return findings.flatMap((f) => isTestRunnerSentinel(f) ? [f] : [
50873
+ { ...f, fixTarget: "source" },
50874
+ { ...f, fixTarget: "test" }
50875
+ ]);
50876
+ }
50877
+ function buildFixCycleCtx(ctx, runtime, storyId) {
50878
+ return {
50879
+ runtime,
50880
+ packageView: runtime.packages.resolve(ctx.workdir),
50881
+ packageDir: ctx.workdir,
50882
+ storyId,
50883
+ featureName: ctx.feature,
50884
+ agentName: ctx.agentManager?.getDefault() ?? "claude"
50885
+ };
50886
+ }
50887
+ function buildAcceptanceContext(ctx, prd) {
50888
+ const firstStory = prd.userStories[0];
50889
+ return {
50890
+ config: ctx.config,
50891
+ rootConfig: ctx.config,
50892
+ prd,
50893
+ story: firstStory,
50894
+ stories: [firstStory],
50895
+ routing: {
50896
+ complexity: "simple",
50897
+ modelTier: "balanced",
50898
+ testStrategy: "test-after",
50899
+ reasoning: "Acceptance validation"
50900
+ },
50901
+ projectDir: ctx.workdir,
50902
+ workdir: ctx.workdir,
50903
+ naxIgnoreIndex: ctx.naxIgnoreIndex,
50904
+ featureDir: ctx.featureDir,
50905
+ hooks: ctx.hooks,
50906
+ plugins: ctx.pluginRegistry,
50907
+ agentGetFn: ctx.agentGetFn,
50908
+ agentManager: ctx.agentManager,
50909
+ sessionManager: ctx.sessionManager,
50910
+ acceptanceTestPaths: ctx.acceptanceTestPaths,
50911
+ runtime: ctx.runtime,
50912
+ abortSignal: ctx.abortSignal
50913
+ };
50914
+ }
50915
+ async function runAcceptanceTestsOnce(ctx, prd) {
50916
+ const acceptanceContext = buildAcceptanceContext(ctx, prd);
50917
+ const { acceptanceStage: acceptanceStage2 } = await Promise.resolve().then(() => (init_acceptance2(), exports_acceptance));
50918
+ const result = await acceptanceStage2.execute(acceptanceContext);
50919
+ if (result.action !== "fail")
50920
+ return { passed: true, failedACs: [], testOutput: "" };
50921
+ const failures = acceptanceContext.acceptanceFailures;
50922
+ if (!failures || failures.failedACs.length === 0)
50923
+ return { passed: true, failedACs: [], testOutput: "" };
50924
+ return { passed: false, failedACs: failures.failedACs, testOutput: failures.testOutput };
50925
+ }
50926
+ async function runAcceptanceFixCycle(ctx, prd, initialFailures, diagnosis, testFileContent, acceptanceTestPath) {
50927
+ const runtime = ctx.runtime;
50928
+ if (!runtime) {
50929
+ return { iterations: [], finalFindings: [], exitReason: "no-strategy" };
50930
+ }
50931
+ let currentTestOutput = initialFailures.testOutput;
50932
+ let currentFailedACs = initialFailures.failedACs;
50933
+ const storyId = prd.userStories[0]?.id ?? "unknown";
50934
+ const cycleCtx = buildFixCycleCtx(ctx, runtime, storyId);
50935
+ const cycle = {
50936
+ findings: findingsForDiagnosis(initialFailures.failedACs, initialFailures.testOutput, diagnosis),
50937
+ iterations: [],
50938
+ strategies: [
50939
+ {
50940
+ name: "acceptance-source-fix",
50941
+ appliesTo: (f) => f.fixTarget === "source",
50942
+ appliesToVerdict: (v) => v === "source_bug" || v === "both",
50943
+ fixOp: acceptanceFixSourceOp,
50944
+ buildInput: (_findings, priorIterations, _ctx) => ({
50945
+ testOutput: currentTestOutput,
50946
+ diagnosisReasoning: diagnosis.reasoning,
50947
+ priorIterationsBlock: buildPriorIterationsBlock(priorIterations),
50948
+ acceptanceTestPath,
50949
+ testFileContent
50950
+ }),
50951
+ maxAttempts: 3,
50952
+ coRun: "co-run-sequential"
50953
+ },
50954
+ {
50955
+ name: "acceptance-test-fix",
50956
+ appliesTo: (f) => f.fixTarget === "test",
50957
+ appliesToVerdict: (v) => v === "test_bug" || v === "both",
50958
+ fixOp: acceptanceFixTestOp,
50959
+ buildInput: (_findings, priorIterations, _ctx) => ({
50960
+ testOutput: currentTestOutput,
50961
+ diagnosisReasoning: diagnosis.reasoning,
50962
+ priorIterationsBlock: buildPriorIterationsBlock(priorIterations),
50963
+ failedACs: currentFailedACs,
50964
+ acceptanceTestPath,
50965
+ testFileContent
50966
+ }),
50967
+ maxAttempts: 3,
50968
+ coRun: "co-run-sequential"
50969
+ }
50970
+ ],
50971
+ validate: async (_ctx) => {
50972
+ const result = await runAcceptanceTestsOnce(ctx, prd);
50973
+ if (result.passed)
50974
+ return [];
50975
+ currentTestOutput = result.testOutput;
50976
+ currentFailedACs = result.failedACs;
50977
+ return findingsForDiagnosis(result.failedACs, result.testOutput, diagnosis);
50978
+ },
50979
+ config: {
50980
+ maxAttemptsTotal: ctx.config.acceptance.maxRetries,
50981
+ validatorRetries: 1
50982
+ },
50983
+ verdict: diagnosis.verdict
50984
+ };
50985
+ return _acceptanceFixCycleDeps.runFixCycle(cycle, cycleCtx, "acceptance");
50986
+ }
50613
50987
  async function runAcceptanceLoop(ctx) {
50614
50988
  const logger = getSafeLogger();
50615
50989
  const maxRetries = ctx.config.acceptance.maxRetries;
50616
50990
  let acceptanceRetries = 0;
50617
50991
  let stubRegenCount = 0;
50618
- let previousFailure = "";
50619
50992
  const prd = ctx.prd;
50620
50993
  let totalCost = ctx.totalCost;
50621
50994
  const iterations = ctx.iterations;
50622
50995
  const storiesCompleted = ctx.storiesCompleted;
50623
50996
  const prdDirty = false;
50624
50997
  logger?.info("acceptance", "All stories complete, running acceptance validation");
50998
+ const { acceptanceStage: acceptanceStage2 } = await Promise.resolve().then(() => (init_acceptance2(), exports_acceptance));
50625
50999
  while (acceptanceRetries < maxRetries) {
50626
51000
  const firstStory = prd.userStories[0];
50627
- const acceptanceContext = {
50628
- config: ctx.config,
50629
- rootConfig: ctx.config,
50630
- prd,
50631
- story: firstStory,
50632
- stories: [firstStory],
50633
- routing: {
50634
- complexity: "simple",
50635
- modelTier: "balanced",
50636
- testStrategy: "test-after",
50637
- reasoning: "Acceptance validation"
50638
- },
50639
- projectDir: ctx.workdir,
50640
- workdir: ctx.workdir,
50641
- naxIgnoreIndex: ctx.naxIgnoreIndex,
50642
- featureDir: ctx.featureDir,
50643
- hooks: ctx.hooks,
50644
- plugins: ctx.pluginRegistry,
50645
- agentGetFn: ctx.agentGetFn,
50646
- agentManager: ctx.agentManager,
50647
- sessionManager: ctx.sessionManager,
50648
- acceptanceTestPaths: ctx.acceptanceTestPaths,
50649
- runtime: ctx.runtime,
50650
- abortSignal: ctx.abortSignal
50651
- };
50652
- const { acceptanceStage: acceptanceStage2 } = await Promise.resolve().then(() => (init_acceptance2(), exports_acceptance));
51001
+ const acceptanceContext = buildAcceptanceContext(ctx, prd);
50653
51002
  const acceptanceResult = await acceptanceStage2.execute(acceptanceContext);
50654
51003
  if (acceptanceResult.action === "continue") {
50655
51004
  logger?.info("acceptance", "Acceptance validation passed!");
@@ -50711,6 +51060,7 @@ async function runAcceptanceLoop(ctx) {
50711
51060
  }
50712
51061
  const testEntries = ctx.acceptanceTestPaths ? await loadAcceptanceTestContent(ctx.acceptanceTestPaths.map((p) => p.testPath)) : [];
50713
51062
  const testFileContent = testEntries[0]?.content ?? "";
51063
+ const acceptanceTestPath = testEntries[0]?.testPath ?? ctx.acceptanceTestPaths?.[0]?.testPath ?? "";
50714
51064
  const strategy = ctx.config.acceptance.fix?.strategy ?? "diagnose-first";
50715
51065
  const diagnosis = await resolveAcceptanceDiagnosis({
50716
51066
  ctx,
@@ -50723,8 +51073,7 @@ async function runAcceptanceLoop(ctx) {
50723
51073
  testFileContent,
50724
51074
  workdir: ctx.workdir,
50725
51075
  storyId: firstStory?.id
50726
- },
50727
- previousFailure
51076
+ }
50728
51077
  });
50729
51078
  logger?.info("acceptance.diagnosis", "Diagnosis resolved", {
50730
51079
  storyId: firstStory?.id,
@@ -50732,28 +51081,22 @@ async function runAcceptanceLoop(ctx) {
50732
51081
  confidence: diagnosis.confidence,
50733
51082
  attempt: acceptanceRetries
50734
51083
  });
50735
- const fixResult = await applyFix({
50736
- ctx,
50737
- failures,
50738
- diagnosis,
50739
- previousFailure
50740
- });
50741
- totalCost += fixResult.cost;
50742
- previousFailure += `
50743
- ---
50744
- Attempt ${acceptanceRetries}/${maxRetries}: verdict=${diagnosis.verdict}, confidence=${diagnosis.confidence}
50745
- Reasoning: ${diagnosis.reasoning}
50746
- Failed ACs: ${failures.failedACs.join(", ")}
50747
- `;
51084
+ const cycleResult = await runAcceptanceFixCycle(ctx, prd, failures, diagnosis, testFileContent, acceptanceTestPath);
51085
+ totalCost += cycleResult.costUsd ?? 0;
51086
+ const success2 = cycleResult.exitReason === "resolved" || cycleResult.finalFindings.length === 0;
51087
+ return buildResult(success2, prd, totalCost, iterations, storiesCompleted, prdDirty, success2 ? undefined : cycleResult.finalFindings.map((f) => f.message), acceptanceRetries + cycleResult.iterations.length);
50748
51088
  }
50749
51089
  return buildResult(false, prd, totalCost, iterations, storiesCompleted, prdDirty);
50750
51090
  }
50751
- var _acceptanceLoopDeps, MAX_STUB_REGENS = 2;
51091
+ var _acceptanceLoopDeps, _acceptanceFixCycleDeps, MAX_STUB_REGENS = 2;
50752
51092
  var init_acceptance_loop = __esm(() => {
50753
51093
  init_semantic_verdict();
50754
51094
  init_test_path();
51095
+ init_findings();
50755
51096
  init_hooks();
50756
51097
  init_logger2();
51098
+ init_operations();
51099
+ init_prompts();
50757
51100
  init_helpers();
50758
51101
  init_acceptance_fix2();
50759
51102
  init_acceptance_helpers();
@@ -50761,19 +51104,22 @@ var init_acceptance_loop = __esm(() => {
50761
51104
  _acceptanceLoopDeps = {
50762
51105
  loadSemanticVerdicts
50763
51106
  };
51107
+ _acceptanceFixCycleDeps = {
51108
+ runFixCycle
51109
+ };
50764
51110
  });
50765
51111
 
50766
51112
  // src/session/scratch-purge.ts
50767
51113
  import { mkdir as mkdir10, rename, rm } from "fs/promises";
50768
- import { dirname as dirname9, join as join61 } from "path";
51114
+ import { dirname as dirname9, join as join62 } from "path";
50769
51115
  async function purgeStaleScratch(projectDir, featureName, retentionDays, archiveInsteadOfDelete = false) {
50770
- const sessionsDir = join61(projectDir, ".nax", "features", featureName, "sessions");
51116
+ const sessionsDir = join62(projectDir, ".nax", "features", featureName, "sessions");
50771
51117
  const sessionIds = await _scratchPurgeDeps.listSessionDirs(sessionsDir);
50772
51118
  const cutoffMs = _scratchPurgeDeps.now() - retentionDays * 86400000;
50773
51119
  let purged = 0;
50774
51120
  for (const sessionId of sessionIds) {
50775
- const sessionDir = join61(sessionsDir, sessionId);
50776
- const descriptorPath = join61(sessionDir, "descriptor.json");
51121
+ const sessionDir = join62(sessionsDir, sessionId);
51122
+ const descriptorPath = join62(sessionDir, "descriptor.json");
50777
51123
  if (!await _scratchPurgeDeps.fileExists(descriptorPath))
50778
51124
  continue;
50779
51125
  let lastActivityAt;
@@ -50789,7 +51135,7 @@ async function purgeStaleScratch(projectDir, featureName, retentionDays, archive
50789
51135
  if (new Date(lastActivityAt).getTime() >= cutoffMs)
50790
51136
  continue;
50791
51137
  if (archiveInsteadOfDelete) {
50792
- const archiveDest = join61(projectDir, ".nax", "features", featureName, "_archive", "sessions", sessionId);
51138
+ const archiveDest = join62(projectDir, ".nax", "features", featureName, "_archive", "sessions", sessionId);
50793
51139
  await _scratchPurgeDeps.move(sessionDir, archiveDest);
50794
51140
  } else {
50795
51141
  await _scratchPurgeDeps.remove(sessionDir);
@@ -51452,12 +51798,12 @@ var DEFAULT_MAX_BATCH_SIZE = 4;
51452
51798
  // src/pipeline/subscribers/events-writer.ts
51453
51799
  import { appendFile as appendFile4, mkdir as mkdir11 } from "fs/promises";
51454
51800
  import { homedir as homedir5 } from "os";
51455
- import { basename as basename9, join as join62 } from "path";
51801
+ import { basename as basename9, join as join63 } from "path";
51456
51802
  function wireEventsWriter(bus, feature, runId, workdir) {
51457
51803
  const logger = getSafeLogger();
51458
51804
  const project = basename9(workdir);
51459
- const eventsDir = join62(homedir5(), ".nax", "events", project);
51460
- const eventsFile = join62(eventsDir, "events.jsonl");
51805
+ const eventsDir = join63(homedir5(), ".nax", "events", project);
51806
+ const eventsFile = join63(eventsDir, "events.jsonl");
51461
51807
  let dirReady = false;
51462
51808
  const write = (line) => {
51463
51809
  return (async () => {
@@ -51638,12 +51984,12 @@ var init_interaction2 = __esm(() => {
51638
51984
  // src/pipeline/subscribers/registry.ts
51639
51985
  import { mkdir as mkdir12, writeFile } from "fs/promises";
51640
51986
  import { homedir as homedir6 } from "os";
51641
- import { basename as basename10, join as join63 } from "path";
51987
+ import { basename as basename10, join as join64 } from "path";
51642
51988
  function wireRegistry(bus, feature, runId, workdir) {
51643
51989
  const logger = getSafeLogger();
51644
51990
  const project = basename10(workdir);
51645
- const runDir = join63(homedir6(), ".nax", "runs", `${project}-${feature}-${runId}`);
51646
- const metaFile = join63(runDir, "meta.json");
51991
+ const runDir = join64(homedir6(), ".nax", "runs", `${project}-${feature}-${runId}`);
51992
+ const metaFile = join64(runDir, "meta.json");
51647
51993
  const unsub = bus.on("run:started", (_ev) => {
51648
51994
  return (async () => {
51649
51995
  try {
@@ -51653,8 +51999,8 @@ function wireRegistry(bus, feature, runId, workdir) {
51653
51999
  project,
51654
52000
  feature,
51655
52001
  workdir,
51656
- statusPath: join63(workdir, ".nax", "features", feature, "status.json"),
51657
- eventsDir: join63(workdir, ".nax", "features", feature, "runs"),
52002
+ statusPath: join64(workdir, ".nax", "features", feature, "status.json"),
52003
+ eventsDir: join64(workdir, ".nax", "features", feature, "runs"),
51658
52004
  registeredAt: new Date().toISOString()
51659
52005
  };
51660
52006
  await writeFile(metaFile, JSON.stringify(meta3, null, 2));
@@ -51888,7 +52234,7 @@ function buildPreviewRouting(story, config2) {
51888
52234
 
51889
52235
  // src/worktree/types.ts
51890
52236
  var WorktreeDependencyPreparationError;
51891
- var init_types7 = __esm(() => {
52237
+ var init_types8 = __esm(() => {
51892
52238
  WorktreeDependencyPreparationError = class WorktreeDependencyPreparationError extends Error {
51893
52239
  mode;
51894
52240
  failureCategory = "dependency-prep";
@@ -51902,7 +52248,7 @@ var init_types7 = __esm(() => {
51902
52248
 
51903
52249
  // src/worktree/dependencies.ts
51904
52250
  import { existsSync as existsSync30 } from "fs";
51905
- import { join as join64 } from "path";
52251
+ import { join as join65 } from "path";
51906
52252
  async function prepareWorktreeDependencies(options) {
51907
52253
  const mode = options.config.execution.worktreeDependencies.mode;
51908
52254
  const resolvedCwd = resolveDependencyCwd(options);
@@ -51916,7 +52262,7 @@ async function prepareWorktreeDependencies(options) {
51916
52262
  }
51917
52263
  }
51918
52264
  function resolveDependencyCwd(options) {
51919
- return options.storyWorkdir ? join64(options.worktreeRoot, options.storyWorkdir) : options.worktreeRoot;
52265
+ return options.storyWorkdir ? join65(options.worktreeRoot, options.storyWorkdir) : options.worktreeRoot;
51920
52266
  }
51921
52267
  function resolveInheritedDependencies(options, resolvedCwd) {
51922
52268
  if (hasDependencyManifests(options.worktreeRoot, resolvedCwd)) {
@@ -51926,7 +52272,7 @@ function resolveInheritedDependencies(options, resolvedCwd) {
51926
52272
  }
51927
52273
  function hasDependencyManifests(worktreeRoot, resolvedCwd) {
51928
52274
  const directories = resolvedCwd === worktreeRoot ? [worktreeRoot] : [worktreeRoot, resolvedCwd];
51929
- return directories.some((directory) => PHASE_ONE_INHERIT_UNSUPPORTED_FILES.some((filename) => _worktreeDependencyDeps.existsSync(join64(directory, filename))));
52275
+ return directories.some((directory) => PHASE_ONE_INHERIT_UNSUPPORTED_FILES.some((filename) => _worktreeDependencyDeps.existsSync(join65(directory, filename))));
51930
52276
  }
51931
52277
  async function provisionDependencies(config2, worktreeRoot, resolvedCwd) {
51932
52278
  const setupCommand = config2.execution.worktreeDependencies.setupCommand;
@@ -51958,7 +52304,7 @@ var PHASE_ONE_INHERIT_UNSUPPORTED_FILES, _worktreeDependencyDeps;
51958
52304
  var init_dependencies = __esm(() => {
51959
52305
  init_bun_deps();
51960
52306
  init_command_argv();
51961
- init_types7();
52307
+ init_types8();
51962
52308
  PHASE_ONE_INHERIT_UNSUPPORTED_FILES = [
51963
52309
  "package.json",
51964
52310
  "bun.lock",
@@ -51990,13 +52336,13 @@ __export(exports_manager, {
51990
52336
  });
51991
52337
  import { existsSync as existsSync31, symlinkSync } from "fs";
51992
52338
  import { mkdir as mkdir13 } from "fs/promises";
51993
- import { join as join65 } from "path";
52339
+ import { join as join66 } from "path";
51994
52340
 
51995
52341
  class WorktreeManager {
51996
52342
  async ensureGitExcludes(projectRoot) {
51997
52343
  const logger = getSafeLogger();
51998
- const infoDir = join65(projectRoot, ".git", "info");
51999
- const excludePath = join65(infoDir, "exclude");
52344
+ const infoDir = join66(projectRoot, ".git", "info");
52345
+ const excludePath = join66(infoDir, "exclude");
52000
52346
  try {
52001
52347
  await mkdir13(infoDir, { recursive: true });
52002
52348
  let existing = "";
@@ -52023,7 +52369,7 @@ ${missing.join(`
52023
52369
  }
52024
52370
  async create(projectRoot, storyId) {
52025
52371
  validateStoryId(storyId);
52026
- const worktreePath = join65(projectRoot, ".nax-wt", storyId);
52372
+ const worktreePath = join66(projectRoot, ".nax-wt", storyId);
52027
52373
  const branchName = `nax/${storyId}`;
52028
52374
  try {
52029
52375
  const pruneProc = _managerDeps.spawn(["git", "worktree", "prune"], {
@@ -52064,9 +52410,9 @@ ${missing.join(`
52064
52410
  }
52065
52411
  throw new Error(`Failed to create worktree: ${String(error48)}`);
52066
52412
  }
52067
- const envSource = join65(projectRoot, ".env");
52413
+ const envSource = join66(projectRoot, ".env");
52068
52414
  if (existsSync31(envSource)) {
52069
- const envTarget = join65(worktreePath, ".env");
52415
+ const envTarget = join66(worktreePath, ".env");
52070
52416
  try {
52071
52417
  symlinkSync(envSource, envTarget, "file");
52072
52418
  } catch (error48) {
@@ -52077,7 +52423,7 @@ ${missing.join(`
52077
52423
  }
52078
52424
  async remove(projectRoot, storyId) {
52079
52425
  validateStoryId(storyId);
52080
- const worktreePath = join65(projectRoot, ".nax-wt", storyId);
52426
+ const worktreePath = join66(projectRoot, ".nax-wt", storyId);
52081
52427
  const branchName = `nax/${storyId}`;
52082
52428
  try {
52083
52429
  const proc = _managerDeps.spawn(["git", "worktree", "remove", worktreePath, "--force"], {
@@ -52789,10 +53135,10 @@ var init_merge_conflict_rectify = __esm(() => {
52789
53135
  });
52790
53136
 
52791
53137
  // src/execution/pipeline-result-handler.ts
52792
- import { join as join66 } from "path";
53138
+ import { join as join67 } from "path";
52793
53139
  async function removeWorktreeDirectory(projectRoot, storyId) {
52794
53140
  const logger = getSafeLogger();
52795
- const worktreePath = join66(projectRoot, ".nax-wt", storyId);
53141
+ const worktreePath = join67(projectRoot, ".nax-wt", storyId);
52796
53142
  try {
52797
53143
  const proc = _resultHandlerDeps.spawn(["git", "worktree", "remove", worktreePath, "--force"], {
52798
53144
  cwd: projectRoot,
@@ -53003,7 +53349,7 @@ var init_pipeline_result_handler = __esm(() => {
53003
53349
 
53004
53350
  // src/execution/iteration-runner.ts
53005
53351
  import { existsSync as existsSync32 } from "fs";
53006
- import { join as join67 } from "path";
53352
+ import { join as join68 } from "path";
53007
53353
  async function runIteration(ctx, prd, selection, iterations, totalCost, allStoryMetrics) {
53008
53354
  const { story, storiesToExecute, routing, isBatchExecution } = selection;
53009
53355
  if (ctx.dryRun) {
@@ -53028,7 +53374,7 @@ async function runIteration(ctx, prd, selection, iterations, totalCost, allStory
53028
53374
  const storyStartTime = Date.now();
53029
53375
  let effectiveWorkdir = ctx.workdir;
53030
53376
  if (ctx.config.execution.storyIsolation === "worktree") {
53031
- const worktreePath = join67(ctx.workdir, ".nax-wt", story.id);
53377
+ const worktreePath = join68(ctx.workdir, ".nax-wt", story.id);
53032
53378
  const worktreeExists = _iterationRunnerDeps.existsSync(worktreePath);
53033
53379
  if (!worktreeExists) {
53034
53380
  await _iterationRunnerDeps.worktreeManager.ensureGitExcludes(ctx.workdir);
@@ -53048,7 +53394,7 @@ async function runIteration(ctx, prd, selection, iterations, totalCost, allStory
53048
53394
  }
53049
53395
  const accumulatedAttemptCost = (story.priorFailures || []).reduce((sum, f) => sum + (f.cost || 0), 0);
53050
53396
  const profileOverride = ctx.config.profile && ctx.config.profile !== "default" ? { profile: ctx.config.profile } : undefined;
53051
- const effectiveConfig = story.workdir ? await _iterationRunnerDeps.loadConfigForWorkdir(join67(ctx.workdir, ".nax", "config.json"), story.workdir, profileOverride) : ctx.config;
53397
+ const effectiveConfig = story.workdir ? await _iterationRunnerDeps.loadConfigForWorkdir(join68(ctx.workdir, ".nax", "config.json"), story.workdir, profileOverride) : ctx.config;
53052
53398
  let dependencyContext;
53053
53399
  if (ctx.config.execution.storyIsolation === "worktree") {
53054
53400
  try {
@@ -53075,7 +53421,7 @@ async function runIteration(ctx, prd, selection, iterations, totalCost, allStory
53075
53421
  };
53076
53422
  }
53077
53423
  }
53078
- const resolvedWorkdir = dependencyContext?.cwd ? dependencyContext.cwd : ctx.config.execution.storyIsolation === "worktree" ? story.workdir ? join67(effectiveWorkdir, story.workdir) : effectiveWorkdir : story.workdir ? join67(ctx.workdir, story.workdir) : ctx.workdir;
53424
+ const resolvedWorkdir = dependencyContext?.cwd ? dependencyContext.cwd : ctx.config.execution.storyIsolation === "worktree" ? story.workdir ? join68(effectiveWorkdir, story.workdir) : effectiveWorkdir : story.workdir ? join68(ctx.workdir, story.workdir) : ctx.workdir;
53079
53425
  const pipelineContext = {
53080
53426
  config: effectiveConfig,
53081
53427
  rootConfig: ctx.config,
@@ -53272,7 +53618,7 @@ __export(exports_parallel_worker, {
53272
53618
  executeParallelBatch: () => executeParallelBatch,
53273
53619
  _parallelWorkerDeps: () => _parallelWorkerDeps
53274
53620
  });
53275
- import { join as join68 } from "path";
53621
+ import { join as join69 } from "path";
53276
53622
  async function executeStoryInWorktree(story, worktreePath, dependencyContext, context, routing, eventEmitter) {
53277
53623
  const logger = getSafeLogger();
53278
53624
  try {
@@ -53292,7 +53638,7 @@ async function executeStoryInWorktree(story, worktreePath, dependencyContext, co
53292
53638
  story,
53293
53639
  stories: [story],
53294
53640
  projectDir: context.projectDir,
53295
- workdir: dependencyContext.cwd ?? (story.workdir ? join68(worktreePath, story.workdir) : worktreePath),
53641
+ workdir: dependencyContext.cwd ?? (story.workdir ? join69(worktreePath, story.workdir) : worktreePath),
53296
53642
  worktreeDependencyContext: dependencyContext,
53297
53643
  routing,
53298
53644
  storyGitRef: storyGitRef ?? undefined
@@ -54053,16 +54399,16 @@ var init_unified_executor = __esm(() => {
54053
54399
  });
54054
54400
 
54055
54401
  // src/project/detector.ts
54056
- import { join as join69 } from "path";
54402
+ import { join as join70 } from "path";
54057
54403
  async function detectLanguage(workdir, pkg) {
54058
54404
  const deps = _detectorDeps;
54059
- if (await deps.fileExists(join69(workdir, "go.mod")))
54405
+ if (await deps.fileExists(join70(workdir, "go.mod")))
54060
54406
  return "go";
54061
- if (await deps.fileExists(join69(workdir, "Cargo.toml")))
54407
+ if (await deps.fileExists(join70(workdir, "Cargo.toml")))
54062
54408
  return "rust";
54063
- if (await deps.fileExists(join69(workdir, "pyproject.toml")))
54409
+ if (await deps.fileExists(join70(workdir, "pyproject.toml")))
54064
54410
  return "python";
54065
- if (await deps.fileExists(join69(workdir, "requirements.txt")))
54411
+ if (await deps.fileExists(join70(workdir, "requirements.txt")))
54066
54412
  return "python";
54067
54413
  if (pkg != null) {
54068
54414
  const allDeps = {
@@ -54122,18 +54468,18 @@ async function detectLintTool(workdir, language) {
54122
54468
  if (language === "python")
54123
54469
  return "ruff";
54124
54470
  const deps = _detectorDeps;
54125
- if (await deps.fileExists(join69(workdir, "biome.json")))
54471
+ if (await deps.fileExists(join70(workdir, "biome.json")))
54126
54472
  return "biome";
54127
- if (await deps.fileExists(join69(workdir, ".eslintrc")))
54473
+ if (await deps.fileExists(join70(workdir, ".eslintrc")))
54128
54474
  return "eslint";
54129
- if (await deps.fileExists(join69(workdir, ".eslintrc.js")))
54475
+ if (await deps.fileExists(join70(workdir, ".eslintrc.js")))
54130
54476
  return "eslint";
54131
- if (await deps.fileExists(join69(workdir, ".eslintrc.json")))
54477
+ if (await deps.fileExists(join70(workdir, ".eslintrc.json")))
54132
54478
  return "eslint";
54133
54479
  return;
54134
54480
  }
54135
54481
  async function detectProjectProfile(workdir, existing) {
54136
- const pkg = await _detectorDeps.readJson(join69(workdir, "package.json"));
54482
+ const pkg = await _detectorDeps.readJson(join70(workdir, "package.json"));
54137
54483
  const language = existing.language !== undefined ? existing.language : await detectLanguage(workdir, pkg);
54138
54484
  const type = existing.type !== undefined ? existing.type : detectType(pkg);
54139
54485
  const testFramework = existing.testFramework !== undefined ? existing.testFramework : await detectTestFramework(workdir, language, pkg);
@@ -54170,7 +54516,7 @@ var init_project = __esm(() => {
54170
54516
 
54171
54517
  // src/execution/status-file.ts
54172
54518
  import { rename as rename2, unlink as unlink3 } from "fs/promises";
54173
- import { resolve as resolve15 } from "path";
54519
+ import { resolve as resolve16 } from "path";
54174
54520
  function countProgress(prd) {
54175
54521
  const stories = prd.userStories;
54176
54522
  const passed = stories.filter((s) => s.status === "passed").length;
@@ -54215,7 +54561,7 @@ function buildStatusSnapshot(state) {
54215
54561
  return snapshot;
54216
54562
  }
54217
54563
  async function writeStatusFile(filePath, status) {
54218
- const resolvedPath = resolve15(filePath);
54564
+ const resolvedPath = resolve16(filePath);
54219
54565
  if (filePath.includes("../") || filePath.includes("..\\")) {
54220
54566
  throw new Error("Invalid status file path: path traversal detected");
54221
54567
  }
@@ -54229,7 +54575,7 @@ async function writeStatusFile(filePath, status) {
54229
54575
  var init_status_file = () => {};
54230
54576
 
54231
54577
  // src/execution/status-writer.ts
54232
- import { join as join70 } from "path";
54578
+ import { join as join71 } from "path";
54233
54579
 
54234
54580
  class StatusWriter {
54235
54581
  statusFile;
@@ -54348,7 +54694,7 @@ class StatusWriter {
54348
54694
  if (!this._prd)
54349
54695
  return;
54350
54696
  const safeLogger = getSafeLogger();
54351
- const featureStatusPath = join70(featureDir, "status.json");
54697
+ const featureStatusPath = join71(featureDir, "status.json");
54352
54698
  const write = async () => {
54353
54699
  try {
54354
54700
  const base = this.getSnapshot(totalCost, iterations);
@@ -54560,7 +54906,7 @@ __export(exports_run_initialization, {
54560
54906
  initializeRun: () => initializeRun,
54561
54907
  _reconcileDeps: () => _reconcileDeps
54562
54908
  });
54563
- import { join as join71 } from "path";
54909
+ import { join as join72 } from "path";
54564
54910
  async function reconcileState(prd, prdPath, workdir, config2) {
54565
54911
  const logger = getSafeLogger();
54566
54912
  let reconciledCount = 0;
@@ -54577,7 +54923,7 @@ async function reconcileState(prd, prdPath, workdir, config2) {
54577
54923
  });
54578
54924
  continue;
54579
54925
  }
54580
- const effectiveWorkdir = story.workdir ? join71(workdir, story.workdir) : workdir;
54926
+ const effectiveWorkdir = story.workdir ? join72(workdir, story.workdir) : workdir;
54581
54927
  try {
54582
54928
  const reviewResult = await _reconcileDeps.runReview(config2.review, effectiveWorkdir, config2.execution);
54583
54929
  if (!reviewResult.success) {
@@ -55451,14 +55797,14 @@ See https://react.dev/link/invalid-hook-call for tips about how to debug and fix
55451
55797
  prevActScopeDepth !== actScopeDepth - 1 && console.error("You seem to have overlapping act() calls, this is not supported. Be sure to await previous act() calls before making a new one. ");
55452
55798
  actScopeDepth = prevActScopeDepth;
55453
55799
  }
55454
- function recursivelyFlushAsyncActWork(returnValue, resolve16, reject) {
55800
+ function recursivelyFlushAsyncActWork(returnValue, resolve17, reject) {
55455
55801
  var queue = ReactSharedInternals.actQueue;
55456
55802
  if (queue !== null)
55457
55803
  if (queue.length !== 0)
55458
55804
  try {
55459
55805
  flushActQueue(queue);
55460
55806
  enqueueTask(function() {
55461
- return recursivelyFlushAsyncActWork(returnValue, resolve16, reject);
55807
+ return recursivelyFlushAsyncActWork(returnValue, resolve17, reject);
55462
55808
  });
55463
55809
  return;
55464
55810
  } catch (error48) {
@@ -55466,7 +55812,7 @@ See https://react.dev/link/invalid-hook-call for tips about how to debug and fix
55466
55812
  }
55467
55813
  else
55468
55814
  ReactSharedInternals.actQueue = null;
55469
- 0 < ReactSharedInternals.thrownErrors.length ? (queue = aggregateErrors(ReactSharedInternals.thrownErrors), ReactSharedInternals.thrownErrors.length = 0, reject(queue)) : resolve16(returnValue);
55815
+ 0 < ReactSharedInternals.thrownErrors.length ? (queue = aggregateErrors(ReactSharedInternals.thrownErrors), ReactSharedInternals.thrownErrors.length = 0, reject(queue)) : resolve17(returnValue);
55470
55816
  }
55471
55817
  function flushActQueue(queue) {
55472
55818
  if (!isFlushing) {
@@ -55642,14 +55988,14 @@ See https://react.dev/link/invalid-hook-call for tips about how to debug and fix
55642
55988
  didAwaitActCall || didWarnNoAwaitAct || (didWarnNoAwaitAct = true, console.error("You called act(async () => ...) without await. This could lead to unexpected testing behaviour, interleaving multiple act calls and mixing their scopes. You should - await act(async () => ...);"));
55643
55989
  });
55644
55990
  return {
55645
- then: function(resolve16, reject) {
55991
+ then: function(resolve17, reject) {
55646
55992
  didAwaitActCall = true;
55647
55993
  thenable.then(function(returnValue) {
55648
55994
  popActScope(prevActQueue, prevActScopeDepth);
55649
55995
  if (prevActScopeDepth === 0) {
55650
55996
  try {
55651
55997
  flushActQueue(queue), enqueueTask(function() {
55652
- return recursivelyFlushAsyncActWork(returnValue, resolve16, reject);
55998
+ return recursivelyFlushAsyncActWork(returnValue, resolve17, reject);
55653
55999
  });
55654
56000
  } catch (error$0) {
55655
56001
  ReactSharedInternals.thrownErrors.push(error$0);
@@ -55660,7 +56006,7 @@ See https://react.dev/link/invalid-hook-call for tips about how to debug and fix
55660
56006
  reject(_thrownError);
55661
56007
  }
55662
56008
  } else
55663
- resolve16(returnValue);
56009
+ resolve17(returnValue);
55664
56010
  }, function(error48) {
55665
56011
  popActScope(prevActQueue, prevActScopeDepth);
55666
56012
  0 < ReactSharedInternals.thrownErrors.length ? (error48 = aggregateErrors(ReactSharedInternals.thrownErrors), ReactSharedInternals.thrownErrors.length = 0, reject(error48)) : reject(error48);
@@ -55676,11 +56022,11 @@ See https://react.dev/link/invalid-hook-call for tips about how to debug and fix
55676
56022
  if (0 < ReactSharedInternals.thrownErrors.length)
55677
56023
  throw callback = aggregateErrors(ReactSharedInternals.thrownErrors), ReactSharedInternals.thrownErrors.length = 0, callback;
55678
56024
  return {
55679
- then: function(resolve16, reject) {
56025
+ then: function(resolve17, reject) {
55680
56026
  didAwaitActCall = true;
55681
56027
  prevActScopeDepth === 0 ? (ReactSharedInternals.actQueue = queue, enqueueTask(function() {
55682
- return recursivelyFlushAsyncActWork(returnValue$jscomp$0, resolve16, reject);
55683
- })) : resolve16(returnValue$jscomp$0);
56028
+ return recursivelyFlushAsyncActWork(returnValue$jscomp$0, resolve17, reject);
56029
+ })) : resolve17(returnValue$jscomp$0);
55684
56030
  }
55685
56031
  };
55686
56032
  };
@@ -58522,8 +58868,8 @@ It can also happen if the client has a browser extension installed which messes
58522
58868
  currentEntangledActionThenable = {
58523
58869
  status: "pending",
58524
58870
  value: undefined,
58525
- then: function(resolve16) {
58526
- entangledListeners.push(resolve16);
58871
+ then: function(resolve17) {
58872
+ entangledListeners.push(resolve17);
58527
58873
  }
58528
58874
  };
58529
58875
  }
@@ -58547,8 +58893,8 @@ It can also happen if the client has a browser extension installed which messes
58547
58893
  status: "pending",
58548
58894
  value: null,
58549
58895
  reason: null,
58550
- then: function(resolve16) {
58551
- listeners.push(resolve16);
58896
+ then: function(resolve17) {
58897
+ listeners.push(resolve17);
58552
58898
  }
58553
58899
  };
58554
58900
  thenable.then(function() {
@@ -85940,7 +86286,7 @@ var require_jsx_dev_runtime = __commonJS((exports, module) => {
85940
86286
  init_source();
85941
86287
  import { existsSync as existsSync34, mkdirSync as mkdirSync7 } from "fs";
85942
86288
  import { homedir as homedir8 } from "os";
85943
- import { join as join73 } from "path";
86289
+ import { join as join74 } from "path";
85944
86290
 
85945
86291
  // node_modules/commander/esm.mjs
85946
86292
  var import__ = __toESM(require_commander(), 1);
@@ -86836,7 +87182,7 @@ async function runsShowCommand(options) {
86836
87182
  // src/cli/prompts-main.ts
86837
87183
  init_logger2();
86838
87184
  import { existsSync as existsSync20, mkdirSync as mkdirSync3 } from "fs";
86839
- import { join as join41 } from "path";
87185
+ import { join as join42 } from "path";
86840
87186
 
86841
87187
  // src/pipeline/index.ts
86842
87188
  init_runner2();
@@ -86912,7 +87258,7 @@ function buildFrontmatter(story, ctx, role) {
86912
87258
 
86913
87259
  // src/cli/prompts-tdd.ts
86914
87260
  init_prompts();
86915
- import { join as join40 } from "path";
87261
+ import { join as join41 } from "path";
86916
87262
  async function handleThreeSessionTddPrompts(story, ctx, outputDir, logger) {
86917
87263
  const [testWriterPrompt, implementerPrompt, verifierPrompt] = await Promise.all([
86918
87264
  TddPromptBuilder.for("test-writer", { isolation: "strict" }).withLoader(ctx.workdir, ctx.config).story(story).context(ctx.contextMarkdown).constitution(ctx.constitution?.content).testCommand(ctx.config.quality?.commands?.test).build(),
@@ -86931,7 +87277,7 @@ ${frontmatter}---
86931
87277
 
86932
87278
  ${session.prompt}`;
86933
87279
  if (outputDir) {
86934
- const promptFile = join40(outputDir, `${story.id}.${session.role}.md`);
87280
+ const promptFile = join41(outputDir, `${story.id}.${session.role}.md`);
86935
87281
  await Bun.write(promptFile, fullOutput);
86936
87282
  logger.info("cli", "Written TDD prompt file", {
86937
87283
  storyId: story.id,
@@ -86947,7 +87293,7 @@ ${"=".repeat(80)}`);
86947
87293
  }
86948
87294
  }
86949
87295
  if (outputDir && ctx.contextMarkdown) {
86950
- const contextFile = join40(outputDir, `${story.id}.context.md`);
87296
+ const contextFile = join41(outputDir, `${story.id}.context.md`);
86951
87297
  const frontmatter = buildFrontmatter(story, ctx);
86952
87298
  const contextOutput = `---
86953
87299
  ${frontmatter}---
@@ -86961,12 +87307,12 @@ ${ctx.contextMarkdown}`;
86961
87307
  async function promptsCommand(options) {
86962
87308
  const logger = getLogger();
86963
87309
  const { feature, workdir, config: config2, storyId, outputDir } = options;
86964
- const naxDir = join41(workdir, ".nax");
87310
+ const naxDir = join42(workdir, ".nax");
86965
87311
  if (!existsSync20(naxDir)) {
86966
87312
  throw new Error(`.nax directory not found. Run 'nax init' first in ${workdir}`);
86967
87313
  }
86968
- const featureDir = join41(naxDir, "features", feature);
86969
- const prdPath = join41(featureDir, "prd.json");
87314
+ const featureDir = join42(naxDir, "features", feature);
87315
+ const prdPath = join42(featureDir, "prd.json");
86970
87316
  if (!existsSync20(prdPath)) {
86971
87317
  throw new Error(`Feature "${feature}" not found or missing prd.json`);
86972
87318
  }
@@ -87033,10 +87379,10 @@ ${frontmatter}---
87033
87379
 
87034
87380
  ${ctx.prompt}`;
87035
87381
  if (outputDir) {
87036
- const promptFile = join41(outputDir, `${story.id}.prompt.md`);
87382
+ const promptFile = join42(outputDir, `${story.id}.prompt.md`);
87037
87383
  await Bun.write(promptFile, fullOutput);
87038
87384
  if (ctx.contextMarkdown) {
87039
- const contextFile = join41(outputDir, `${story.id}.context.md`);
87385
+ const contextFile = join42(outputDir, `${story.id}.context.md`);
87040
87386
  const contextOutput = `---
87041
87387
  ${frontmatter}---
87042
87388
 
@@ -87064,7 +87410,7 @@ ${"=".repeat(80)}`);
87064
87410
  // src/cli/prompts-init.ts
87065
87411
  init_role_task();
87066
87412
  import { existsSync as existsSync21, mkdirSync as mkdirSync4 } from "fs";
87067
- import { join as join42 } from "path";
87413
+ import { join as join43 } from "path";
87068
87414
  var TEMPLATE_ROLES = [
87069
87415
  { file: "test-writer.md", role: "test-writer" },
87070
87416
  { file: "implementer.md", role: "implementer", variant: "standard" },
@@ -87088,9 +87434,9 @@ var TEMPLATE_HEADER = `<!--
87088
87434
  `;
87089
87435
  async function promptsInitCommand(options) {
87090
87436
  const { workdir, force = false, autoWireConfig = true } = options;
87091
- const templatesDir = join42(workdir, ".nax", "templates");
87437
+ const templatesDir = join43(workdir, ".nax", "templates");
87092
87438
  mkdirSync4(templatesDir, { recursive: true });
87093
- const existingFiles = TEMPLATE_ROLES.map((t) => t.file).filter((f) => existsSync21(join42(templatesDir, f)));
87439
+ const existingFiles = TEMPLATE_ROLES.map((t) => t.file).filter((f) => existsSync21(join43(templatesDir, f)));
87094
87440
  if (existingFiles.length > 0 && !force) {
87095
87441
  console.warn(`[WARN] nax/templates/ already contains files: ${existingFiles.join(", ")}. No files overwritten.
87096
87442
  Pass --force to overwrite existing templates.`);
@@ -87098,7 +87444,7 @@ async function promptsInitCommand(options) {
87098
87444
  }
87099
87445
  const written = [];
87100
87446
  for (const template of TEMPLATE_ROLES) {
87101
- const filePath = join42(templatesDir, template.file);
87447
+ const filePath = join43(templatesDir, template.file);
87102
87448
  const roleBody = template.role === "implementer" ? buildRoleTaskSection(template.role, template.variant) : buildRoleTaskSection(template.role);
87103
87449
  const content = TEMPLATE_HEADER + roleBody;
87104
87450
  await Bun.write(filePath, content);
@@ -87114,7 +87460,7 @@ async function promptsInitCommand(options) {
87114
87460
  return written;
87115
87461
  }
87116
87462
  async function autoWirePromptsConfig(workdir) {
87117
- const configPath = join42(workdir, "nax.config.json");
87463
+ const configPath = join43(workdir, "nax.config.json");
87118
87464
  if (!existsSync21(configPath)) {
87119
87465
  const exampleConfig = JSON.stringify({
87120
87466
  prompts: {
@@ -87280,7 +87626,7 @@ init_config();
87280
87626
  init_logger2();
87281
87627
  init_prd();
87282
87628
  import { existsSync as existsSync23, readdirSync as readdirSync6 } from "fs";
87283
- import { join as join46 } from "path";
87629
+ import { join as join47 } from "path";
87284
87630
 
87285
87631
  // src/cli/diagnose-analysis.ts
87286
87632
  function detectFailurePattern(story, _prd, status) {
@@ -87479,7 +87825,7 @@ function isProcessAlive2(pid) {
87479
87825
  }
87480
87826
  }
87481
87827
  async function loadStatusFile2(workdir) {
87482
- const statusPath = join46(workdir, ".nax", "status.json");
87828
+ const statusPath = join47(workdir, ".nax", "status.json");
87483
87829
  if (!existsSync23(statusPath))
87484
87830
  return null;
87485
87831
  try {
@@ -87507,7 +87853,7 @@ async function countCommitsSince(workdir, since) {
87507
87853
  }
87508
87854
  }
87509
87855
  async function checkLock(workdir) {
87510
- const lockFile = Bun.file(join46(workdir, "nax.lock"));
87856
+ const lockFile = Bun.file(join47(workdir, "nax.lock"));
87511
87857
  if (!await lockFile.exists())
87512
87858
  return { lockPresent: false };
87513
87859
  try {
@@ -87525,8 +87871,8 @@ async function diagnoseCommand(options = {}) {
87525
87871
  const logger = getLogger();
87526
87872
  const workdir = options.workdir ?? process.cwd();
87527
87873
  const naxSubdir = findProjectDir(workdir);
87528
- let projectDir = naxSubdir ? join46(naxSubdir, "..") : null;
87529
- if (!projectDir && existsSync23(join46(workdir, ".nax"))) {
87874
+ let projectDir = naxSubdir ? join47(naxSubdir, "..") : null;
87875
+ if (!projectDir && existsSync23(join47(workdir, ".nax"))) {
87530
87876
  projectDir = workdir;
87531
87877
  }
87532
87878
  if (!projectDir)
@@ -87537,7 +87883,7 @@ async function diagnoseCommand(options = {}) {
87537
87883
  if (status2) {
87538
87884
  feature = status2.run.feature;
87539
87885
  } else {
87540
- const featuresDir = join46(projectDir, ".nax", "features");
87886
+ const featuresDir = join47(projectDir, ".nax", "features");
87541
87887
  if (!existsSync23(featuresDir))
87542
87888
  throw new Error("No features found in project");
87543
87889
  const features = readdirSync6(featuresDir, { withFileTypes: true }).filter((e) => e.isDirectory()).map((e) => e.name);
@@ -87547,8 +87893,8 @@ async function diagnoseCommand(options = {}) {
87547
87893
  logger.info("diagnose", "No feature specified, using first found", { feature });
87548
87894
  }
87549
87895
  }
87550
- const featureDir = join46(projectDir, ".nax", "features", feature);
87551
- const prdPath = join46(featureDir, "prd.json");
87896
+ const featureDir = join47(projectDir, ".nax", "features", feature);
87897
+ const prdPath = join47(featureDir, "prd.json");
87552
87898
  if (!existsSync23(prdPath))
87553
87899
  throw new Error(`Feature not found: ${feature}`);
87554
87900
  const prd = await loadPRD(prdPath);
@@ -87592,7 +87938,7 @@ init_source();
87592
87938
  init_loader();
87593
87939
  init_generator2();
87594
87940
  import { existsSync as existsSync24 } from "fs";
87595
- import { join as join47 } from "path";
87941
+ import { join as join48 } from "path";
87596
87942
  var VALID_AGENTS = ["claude", "codex", "opencode", "cursor", "windsurf", "aider", "gemini"];
87597
87943
  async function generateCommand(options) {
87598
87944
  const workdir = options.dir ?? process.cwd();
@@ -87635,7 +87981,7 @@ async function generateCommand(options) {
87635
87981
  return;
87636
87982
  }
87637
87983
  if (options.package) {
87638
- const packageDir = join47(workdir, options.package);
87984
+ const packageDir = join48(workdir, options.package);
87639
87985
  if (dryRun) {
87640
87986
  console.log(source_default.yellow("\u26A0 Dry run \u2014 no files will be written"));
87641
87987
  }
@@ -87655,8 +88001,8 @@ async function generateCommand(options) {
87655
88001
  process.exit(1);
87656
88002
  return;
87657
88003
  }
87658
- const contextPath = options.context ? join47(workdir, options.context) : join47(workdir, ".nax/context.md");
87659
- const outputDir = options.output ? join47(workdir, options.output) : workdir;
88004
+ const contextPath = options.context ? join48(workdir, options.context) : join48(workdir, ".nax/context.md");
88005
+ const outputDir = options.output ? join48(workdir, options.output) : workdir;
87660
88006
  const autoInject = !options.noAutoInject;
87661
88007
  if (!existsSync24(contextPath)) {
87662
88008
  console.error(source_default.red(`\u2717 Context file not found: ${contextPath}`));
@@ -87762,7 +88108,7 @@ async function generateCommand(options) {
87762
88108
  // src/cli/config-display.ts
87763
88109
  init_loader();
87764
88110
  import { existsSync as existsSync26 } from "fs";
87765
- import { join as join49 } from "path";
88111
+ import { join as join50 } from "path";
87766
88112
 
87767
88113
  // src/cli/config-descriptions.ts
87768
88114
  var FIELD_DESCRIPTIONS = {
@@ -88003,7 +88349,7 @@ function deepEqual(a, b) {
88003
88349
  init_defaults();
88004
88350
  init_loader();
88005
88351
  import { existsSync as existsSync25 } from "fs";
88006
- import { join as join48 } from "path";
88352
+ import { join as join49 } from "path";
88007
88353
  async function loadConfigFile(path15) {
88008
88354
  if (!existsSync25(path15))
88009
88355
  return null;
@@ -88025,7 +88371,7 @@ async function loadProjectConfig() {
88025
88371
  const projectDir = findProjectDir();
88026
88372
  if (!projectDir)
88027
88373
  return null;
88028
- const projectPath = join48(projectDir, "config.json");
88374
+ const projectPath = join49(projectDir, "config.json");
88029
88375
  return await loadConfigFile(projectPath);
88030
88376
  }
88031
88377
 
@@ -88085,7 +88431,7 @@ async function configCommand(config2, options = {}) {
88085
88431
  function determineConfigSources() {
88086
88432
  const globalPath = globalConfigPath();
88087
88433
  const projectDir = findProjectDir();
88088
- const projectPath = projectDir ? join49(projectDir, "config.json") : null;
88434
+ const projectPath = projectDir ? join50(projectDir, "config.json") : null;
88089
88435
  return {
88090
88436
  global: fileExists(globalPath) ? globalPath : null,
88091
88437
  project: projectPath && fileExists(projectPath) ? projectPath : null
@@ -88234,15 +88580,15 @@ init_paths();
88234
88580
  init_profile();
88235
88581
  import { mkdirSync as mkdirSync5 } from "fs";
88236
88582
  import { readdirSync as readdirSync7 } from "fs";
88237
- import { join as join50 } from "path";
88583
+ import { join as join51 } from "path";
88238
88584
  var _profileCLIDeps = {
88239
88585
  env: process.env
88240
88586
  };
88241
88587
  var SENSITIVE_KEY_PATTERN = /key|token|secret|password|credential/i;
88242
88588
  var VAR_PATTERN = /\$[A-Za-z_][A-Za-z0-9_]*/;
88243
88589
  async function profileListCommand(startDir) {
88244
- const globalProfilesDir = join50(globalConfigDir(), "profiles");
88245
- const projectProfilesDir = join50(projectConfigDir(startDir), "profiles");
88590
+ const globalProfilesDir = join51(globalConfigDir(), "profiles");
88591
+ const projectProfilesDir = join51(projectConfigDir(startDir), "profiles");
88246
88592
  const globalProfiles = scanProfileDir(globalProfilesDir);
88247
88593
  const projectProfiles = scanProfileDir(projectProfilesDir);
88248
88594
  const activeProfile = await resolveProfileName({}, _profileCLIDeps.env, startDir);
@@ -88301,7 +88647,7 @@ function maskProfileValues(obj) {
88301
88647
  return result;
88302
88648
  }
88303
88649
  async function profileUseCommand(profileName, startDir) {
88304
- const configPath = join50(projectConfigDir(startDir), "config.json");
88650
+ const configPath = join51(projectConfigDir(startDir), "config.json");
88305
88651
  const configFile = Bun.file(configPath);
88306
88652
  let existing = {};
88307
88653
  if (await configFile.exists()) {
@@ -88320,8 +88666,8 @@ async function profileCurrentCommand(startDir) {
88320
88666
  return resolveProfileName({}, _profileCLIDeps.env, startDir);
88321
88667
  }
88322
88668
  async function profileCreateCommand(profileName, startDir) {
88323
- const profilesDir = join50(projectConfigDir(startDir), "profiles");
88324
- const profilePath = join50(profilesDir, `${profileName}.json`);
88669
+ const profilesDir = join51(projectConfigDir(startDir), "profiles");
88670
+ const profilePath = join51(profilesDir, `${profileName}.json`);
88325
88671
  const profileFile = Bun.file(profilePath);
88326
88672
  if (await profileFile.exists()) {
88327
88673
  throw new Error(`Profile "${profileName}" already exists at ${profilePath}`);
@@ -88443,7 +88789,7 @@ async function contextInspectCommand(options) {
88443
88789
  init_canonical_loader();
88444
88790
  init_errors();
88445
88791
  import { mkdir as mkdir9 } from "fs/promises";
88446
- import { basename as basename8, join as join51 } from "path";
88792
+ import { basename as basename8, join as join52 } from "path";
88447
88793
  var _rulesCLIDeps = {
88448
88794
  readFile: async (path15) => Bun.file(path15).text(),
88449
88795
  writeFile: async (path15, content) => {
@@ -88452,7 +88798,7 @@ var _rulesCLIDeps = {
88452
88798
  fileExists: async (path15) => Bun.file(path15).exists(),
88453
88799
  globInDir: (dir) => {
88454
88800
  try {
88455
- return [...new Bun.Glob("*.md").scanSync({ cwd: dir })].sort().map((f) => join51(dir, f));
88801
+ return [...new Bun.Glob("*.md").scanSync({ cwd: dir })].sort().map((f) => join52(dir, f));
88456
88802
  } catch {
88457
88803
  return [];
88458
88804
  }
@@ -88501,7 +88847,7 @@ ${r.content}`).join(`
88501
88847
  `);
88502
88848
  const shimContent = `${header + body}
88503
88849
  `;
88504
- const shimPath = join51(workdir, shimFileName);
88850
+ const shimPath = join52(workdir, shimFileName);
88505
88851
  if (options.dryRun) {
88506
88852
  console.log(`[dry-run] Would write ${shimPath} (${shimContent.length} bytes)`);
88507
88853
  return;
@@ -88530,14 +88876,14 @@ function neutralizeContent(content) {
88530
88876
  }
88531
88877
  async function collectMigrationSources(workdir) {
88532
88878
  const sources = [];
88533
- const claudeMdPath = join51(workdir, "CLAUDE.md");
88879
+ const claudeMdPath = join52(workdir, "CLAUDE.md");
88534
88880
  if (await _rulesCLIDeps.fileExists(claudeMdPath)) {
88535
88881
  const content = await _rulesCLIDeps.readFile(claudeMdPath);
88536
88882
  if (content.trim()) {
88537
88883
  sources.push({ sourcePath: claudeMdPath, targetFileName: "project-conventions.md", content });
88538
88884
  }
88539
88885
  }
88540
- const rulesDir = join51(workdir, ".claude", "rules");
88886
+ const rulesDir = join52(workdir, ".claude", "rules");
88541
88887
  const ruleFiles = _rulesCLIDeps.globInDir(rulesDir);
88542
88888
  for (const filePath of ruleFiles) {
88543
88889
  try {
@@ -88557,7 +88903,7 @@ async function rulesMigrateCommand(options) {
88557
88903
  console.log("[WARN] No source files found (checked CLAUDE.md and .claude/rules/*.md). Nothing to migrate.");
88558
88904
  return;
88559
88905
  }
88560
- const targetDir = join51(workdir, CANONICAL_RULES_DIR);
88906
+ const targetDir = join52(workdir, CANONICAL_RULES_DIR);
88561
88907
  if (!options.dryRun) {
88562
88908
  try {
88563
88909
  await _rulesCLIDeps.mkdir(targetDir);
@@ -88568,7 +88914,7 @@ async function rulesMigrateCommand(options) {
88568
88914
  let written = 0;
88569
88915
  let skipped = 0;
88570
88916
  for (const { sourcePath, targetFileName, content } of sources) {
88571
- const targetPath = join51(targetDir, targetFileName);
88917
+ const targetPath = join52(targetDir, targetFileName);
88572
88918
  if (!force && !options.dryRun && await _rulesCLIDeps.fileExists(targetPath)) {
88573
88919
  console.log(`[skip] ${targetFileName} already exists (use --force to overwrite)`);
88574
88920
  skipped++;
@@ -88607,7 +88953,7 @@ function collectCanonicalRuleRoots(workdir) {
88607
88953
  const packageRel = normalized.slice(0, idx);
88608
88954
  if (!packageRel)
88609
88955
  continue;
88610
- roots.add(join51(workdir, packageRel));
88956
+ roots.add(join52(workdir, packageRel));
88611
88957
  }
88612
88958
  return [...roots].sort();
88613
88959
  }
@@ -88628,7 +88974,7 @@ init_config();
88628
88974
  init_logger2();
88629
88975
  init_detect2();
88630
88976
  init_workspace();
88631
- import { join as join52 } from "path";
88977
+ import { join as join53 } from "path";
88632
88978
  function resolveEffective(detected, configPatterns) {
88633
88979
  if (configPatterns !== undefined)
88634
88980
  return "config";
@@ -88713,7 +89059,7 @@ async function detectCommand(options) {
88713
89059
  const rootDetected = detectionMap[""] ?? { patterns: [], confidence: "empty", sources: [] };
88714
89060
  const pkgEntries = await Promise.all(packageDirs.map(async (dir) => {
88715
89061
  const det = detectionMap[dir] ?? { patterns: [], confidence: "empty", sources: [] };
88716
- const pkgConfigPath = join52(workdir, ".nax", "mono", dir, "config.json");
89062
+ const pkgConfigPath = join53(workdir, ".nax", "mono", dir, "config.json");
88717
89063
  const pkgRaw = await loadRawConfig(pkgConfigPath);
88718
89064
  const pkgPatterns = deepGet(pkgRaw, TEST_PATTERNS_KEY);
88719
89065
  const effective = Array.isArray(pkgPatterns) ? pkgPatterns : undefined;
@@ -88767,13 +89113,13 @@ async function detectCommand(options) {
88767
89113
  if (rootDetected.confidence === "empty") {
88768
89114
  console.log(source_default.yellow(" root: skipped (empty detection)"));
88769
89115
  } else {
88770
- const rootConfigPath = join52(workdir, ".nax", "config.json");
89116
+ const rootConfigPath = join53(workdir, ".nax", "config.json");
88771
89117
  try {
88772
89118
  const status = await applyToConfig(rootConfigPath, rootDetected.patterns, options.force ?? false);
88773
89119
  if (status === "skipped") {
88774
89120
  console.log(source_default.dim(" root: skipped (testFilePatterns already set; use --force to overwrite)"));
88775
89121
  } else {
88776
- console.log(source_default.green(` root: ${status} \u2192 ${join52(".nax", "config.json")}`));
89122
+ console.log(source_default.green(` root: ${status} \u2192 ${join53(".nax", "config.json")}`));
88777
89123
  }
88778
89124
  } catch (err) {
88779
89125
  console.error(source_default.red(` root: write failed \u2014 ${err.message}`));
@@ -88786,13 +89132,13 @@ async function detectCommand(options) {
88786
89132
  console.log(source_default.dim(` ${dir}: skipped (empty detection)`));
88787
89133
  continue;
88788
89134
  }
88789
- const pkgConfigPath = join52(workdir, ".nax", "mono", dir, "config.json");
89135
+ const pkgConfigPath = join53(workdir, ".nax", "mono", dir, "config.json");
88790
89136
  try {
88791
89137
  const status = await applyToConfig(pkgConfigPath, det.patterns, options.force ?? false);
88792
89138
  if (status === "skipped") {
88793
89139
  console.log(source_default.dim(` ${dir}: skipped (already set)`));
88794
89140
  } else {
88795
- console.log(source_default.green(` ${dir}: ${status} \u2192 ${join52(".nax", "mono", dir, "config.json")}`));
89141
+ console.log(source_default.green(` ${dir}: ${status} \u2192 ${join53(".nax", "mono", dir, "config.json")}`));
88796
89142
  }
88797
89143
  } catch (err) {
88798
89144
  console.error(source_default.red(` ${dir}: write failed \u2014 ${err.message}`));
@@ -88814,24 +89160,24 @@ async function diagnose(options) {
88814
89160
 
88815
89161
  // src/commands/logs.ts
88816
89162
  import { existsSync as existsSync28 } from "fs";
88817
- import { join as join56 } from "path";
89163
+ import { join as join57 } from "path";
88818
89164
 
88819
89165
  // src/commands/logs-formatter.ts
88820
89166
  init_source();
88821
89167
  init_formatter();
88822
89168
  import { readdirSync as readdirSync9 } from "fs";
88823
- import { join as join55 } from "path";
89169
+ import { join as join56 } from "path";
88824
89170
 
88825
89171
  // src/commands/logs-reader.ts
88826
89172
  import { existsSync as existsSync27, readdirSync as readdirSync8 } from "fs";
88827
89173
  import { readdir as readdir4 } from "fs/promises";
88828
- import { join as join54 } from "path";
89174
+ import { join as join55 } from "path";
88829
89175
 
88830
89176
  // src/utils/paths.ts
88831
89177
  import { homedir as homedir4 } from "os";
88832
- import { join as join53 } from "path";
89178
+ import { join as join54 } from "path";
88833
89179
  function getRunsDir() {
88834
- return process.env.NAX_RUNS_DIR ?? join53(homedir4(), ".nax", "runs");
89180
+ return process.env.NAX_RUNS_DIR ?? join54(homedir4(), ".nax", "runs");
88835
89181
  }
88836
89182
 
88837
89183
  // src/commands/logs-reader.ts
@@ -88848,7 +89194,7 @@ async function resolveRunFileFromRegistry(runId) {
88848
89194
  }
88849
89195
  let matched = null;
88850
89196
  for (const entry of entries) {
88851
- const metaPath = join54(runsDir, entry, "meta.json");
89197
+ const metaPath = join55(runsDir, entry, "meta.json");
88852
89198
  try {
88853
89199
  const meta3 = await Bun.file(metaPath).json();
88854
89200
  if (meta3.runId === runId || meta3.runId.startsWith(runId)) {
@@ -88870,14 +89216,14 @@ async function resolveRunFileFromRegistry(runId) {
88870
89216
  return null;
88871
89217
  }
88872
89218
  const specificFile = files.find((f) => f === `${matched.runId}.jsonl`);
88873
- return join54(matched.eventsDir, specificFile ?? files[0]);
89219
+ return join55(matched.eventsDir, specificFile ?? files[0]);
88874
89220
  }
88875
89221
  async function selectRunFile(runsDir) {
88876
89222
  const files = readdirSync8(runsDir).filter((f) => f.endsWith(".jsonl") && f !== "latest.jsonl").sort().reverse();
88877
89223
  if (files.length === 0) {
88878
89224
  return null;
88879
89225
  }
88880
- return join54(runsDir, files[0]);
89226
+ return join55(runsDir, files[0]);
88881
89227
  }
88882
89228
  async function extractRunSummary(filePath) {
88883
89229
  const file3 = Bun.file(filePath);
@@ -88962,7 +89308,7 @@ Runs:
88962
89308
  console.log(source_default.gray(" Timestamp Stories Duration Cost Status"));
88963
89309
  console.log(source_default.gray(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500"));
88964
89310
  for (const file3 of files) {
88965
- const filePath = join55(runsDir, file3);
89311
+ const filePath = join56(runsDir, file3);
88966
89312
  const summary = await extractRunSummary(filePath);
88967
89313
  const timestamp = file3.replace(".jsonl", "");
88968
89314
  const stories = summary ? `${summary.passed}/${summary.total}` : "?/?";
@@ -89076,7 +89422,7 @@ async function logsCommand(options) {
89076
89422
  return;
89077
89423
  }
89078
89424
  const resolved = resolveProject({ dir: options.dir });
89079
- const naxDir = join56(resolved.projectDir, ".nax");
89425
+ const naxDir = join57(resolved.projectDir, ".nax");
89080
89426
  const configPath = resolved.configPath;
89081
89427
  const configFile = Bun.file(configPath);
89082
89428
  const config2 = await configFile.json();
@@ -89084,8 +89430,8 @@ async function logsCommand(options) {
89084
89430
  if (!featureName) {
89085
89431
  throw new Error("No feature specified in config.json");
89086
89432
  }
89087
- const featureDir = join56(naxDir, "features", featureName);
89088
- const runsDir = join56(featureDir, "runs");
89433
+ const featureDir = join57(naxDir, "features", featureName);
89434
+ const runsDir = join57(featureDir, "runs");
89089
89435
  if (!existsSync28(runsDir)) {
89090
89436
  throw new Error(`No runs directory found for feature: ${featureName}`);
89091
89437
  }
@@ -89110,7 +89456,7 @@ init_config();
89110
89456
  init_prd();
89111
89457
  init_precheck();
89112
89458
  import { existsSync as existsSync29 } from "fs";
89113
- import { join as join57 } from "path";
89459
+ import { join as join58 } from "path";
89114
89460
  async function precheckCommand(options) {
89115
89461
  const resolved = resolveProject({
89116
89462
  dir: options.dir,
@@ -89132,9 +89478,9 @@ async function precheckCommand(options) {
89132
89478
  process.exit(1);
89133
89479
  }
89134
89480
  }
89135
- const naxDir = join57(resolved.projectDir, ".nax");
89136
- const featureDir = join57(naxDir, "features", featureName);
89137
- const prdPath = join57(featureDir, "prd.json");
89481
+ const naxDir = join58(resolved.projectDir, ".nax");
89482
+ const featureDir = join58(naxDir, "features", featureName);
89483
+ const prdPath = join58(featureDir, "prd.json");
89138
89484
  if (!existsSync29(featureDir)) {
89139
89485
  console.error(source_default.red(`Feature not found: ${featureName}`));
89140
89486
  process.exit(1);
@@ -89156,7 +89502,7 @@ async function precheckCommand(options) {
89156
89502
  // src/commands/runs.ts
89157
89503
  init_source();
89158
89504
  import { readdir as readdir5 } from "fs/promises";
89159
- import { join as join58 } from "path";
89505
+ import { join as join59 } from "path";
89160
89506
  var DEFAULT_LIMIT = 20;
89161
89507
  var _runsCmdDeps = {
89162
89508
  getRunsDir
@@ -89211,7 +89557,7 @@ async function runsCommand(options = {}) {
89211
89557
  }
89212
89558
  const rows = [];
89213
89559
  for (const entry of entries) {
89214
- const metaPath = join58(runsDir, entry, "meta.json");
89560
+ const metaPath = join59(runsDir, entry, "meta.json");
89215
89561
  let meta3;
89216
89562
  try {
89217
89563
  meta3 = await Bun.file(metaPath).json();
@@ -89288,7 +89634,7 @@ async function runsCommand(options = {}) {
89288
89634
 
89289
89635
  // src/commands/unlock.ts
89290
89636
  init_source();
89291
- import { join as join59 } from "path";
89637
+ import { join as join60 } from "path";
89292
89638
  function isProcessAlive3(pid) {
89293
89639
  try {
89294
89640
  process.kill(pid, 0);
@@ -89303,7 +89649,7 @@ function formatLockAge(ageMs) {
89303
89649
  }
89304
89650
  async function unlockCommand(options) {
89305
89651
  const workdir = options.dir ?? process.cwd();
89306
- const lockPath = join59(workdir, "nax.lock");
89652
+ const lockPath = join60(workdir, "nax.lock");
89307
89653
  const lockFile = Bun.file(lockPath);
89308
89654
  const exists = await lockFile.exists();
89309
89655
  if (!exists) {
@@ -95285,8 +95631,8 @@ class Ink {
95285
95631
  }
95286
95632
  }
95287
95633
  async waitUntilExit() {
95288
- this.exitPromise ||= new Promise((resolve16, reject2) => {
95289
- this.resolveExitPromise = resolve16;
95634
+ this.exitPromise ||= new Promise((resolve17, reject2) => {
95635
+ this.resolveExitPromise = resolve17;
95290
95636
  this.rejectExitPromise = reject2;
95291
95637
  });
95292
95638
  if (!this.beforeExitHandler) {
@@ -97115,7 +97461,7 @@ async function promptForConfirmation(question) {
97115
97461
  if (!process.stdin.isTTY) {
97116
97462
  return true;
97117
97463
  }
97118
- return new Promise((resolve16) => {
97464
+ return new Promise((resolve17) => {
97119
97465
  process.stdout.write(source_default.bold(`${question} [Y/n] `));
97120
97466
  process.stdin.setRawMode(true);
97121
97467
  process.stdin.resume();
@@ -97128,9 +97474,9 @@ async function promptForConfirmation(question) {
97128
97474
  process.stdout.write(`
97129
97475
  `);
97130
97476
  if (answer === "n") {
97131
- resolve16(false);
97477
+ resolve17(false);
97132
97478
  } else {
97133
- resolve16(true);
97479
+ resolve17(true);
97134
97480
  }
97135
97481
  };
97136
97482
  process.stdin.on("data", handler);
@@ -97159,15 +97505,15 @@ Next: nax generate --package ${options.package}`));
97159
97505
  }
97160
97506
  return;
97161
97507
  }
97162
- const naxDir = join73(workdir, ".nax");
97508
+ const naxDir = join74(workdir, ".nax");
97163
97509
  if (existsSync34(naxDir) && !options.force) {
97164
97510
  console.log(source_default.yellow("nax already initialized. Use --force to overwrite."));
97165
97511
  return;
97166
97512
  }
97167
- mkdirSync7(join73(naxDir, "features"), { recursive: true });
97168
- mkdirSync7(join73(naxDir, "hooks"), { recursive: true });
97169
- await Bun.write(join73(naxDir, "config.json"), JSON.stringify(DEFAULT_CONFIG, null, 2));
97170
- await Bun.write(join73(naxDir, "hooks.json"), JSON.stringify({
97513
+ mkdirSync7(join74(naxDir, "features"), { recursive: true });
97514
+ mkdirSync7(join74(naxDir, "hooks"), { recursive: true });
97515
+ await Bun.write(join74(naxDir, "config.json"), JSON.stringify(DEFAULT_CONFIG, null, 2));
97516
+ await Bun.write(join74(naxDir, "hooks.json"), JSON.stringify({
97171
97517
  hooks: {
97172
97518
  "on-start": { command: 'echo "nax started: $NAX_FEATURE"', enabled: false },
97173
97519
  "on-complete": { command: 'echo "nax complete: $NAX_FEATURE"', enabled: false },
@@ -97175,12 +97521,12 @@ Next: nax generate --package ${options.package}`));
97175
97521
  "on-error": { command: 'echo "nax error: $NAX_REASON"', enabled: false }
97176
97522
  }
97177
97523
  }, null, 2));
97178
- await Bun.write(join73(naxDir, ".gitignore"), `# nax temp files
97524
+ await Bun.write(join74(naxDir, ".gitignore"), `# nax temp files
97179
97525
  *.tmp
97180
97526
  .paused.json
97181
97527
  .nax-verifier-verdict.json
97182
97528
  `);
97183
- await Bun.write(join73(naxDir, "context.md"), `# Project Context
97529
+ await Bun.write(join74(naxDir, "context.md"), `# Project Context
97184
97530
 
97185
97531
  This document defines coding standards, architectural decisions, and forbidden patterns for this project.
97186
97532
  Run \`nax generate\` to regenerate agent config files (CLAUDE.md, AGENTS.md, .cursorrules, etc.) from this file.
@@ -97310,8 +97656,8 @@ program2.command("run").description("Run the orchestration loop for a feature").
97310
97656
  console.error(source_default.red("nax not initialized. Run: nax init"));
97311
97657
  process.exit(1);
97312
97658
  }
97313
- const featureDir = join73(naxDir, "features", options.feature);
97314
- const prdPath = join73(featureDir, "prd.json");
97659
+ const featureDir = join74(naxDir, "features", options.feature);
97660
+ const prdPath = join74(featureDir, "prd.json");
97315
97661
  if (options.plan && options.from) {
97316
97662
  if (existsSync34(prdPath) && !options.force) {
97317
97663
  console.error(source_default.red(`Error: prd.json already exists for feature "${options.feature}".`));
@@ -97333,10 +97679,10 @@ program2.command("run").description("Run the orchestration loop for a feature").
97333
97679
  }
97334
97680
  }
97335
97681
  try {
97336
- const planLogDir = join73(featureDir, "plan");
97682
+ const planLogDir = join74(featureDir, "plan");
97337
97683
  mkdirSync7(planLogDir, { recursive: true });
97338
97684
  const planLogId = new Date().toISOString().replace(/:/g, "-").replace(/\..+/, "");
97339
- const planLogPath = join73(planLogDir, `${planLogId}.jsonl`);
97685
+ const planLogPath = join74(planLogDir, `${planLogId}.jsonl`);
97340
97686
  initLogger({ level: "info", filePath: planLogPath, useChalk: false, headless: true });
97341
97687
  console.log(source_default.dim(` [Plan log: ${planLogPath}]`));
97342
97688
  console.log(source_default.dim(" [Planning phase: generating PRD from spec]"));
@@ -97380,10 +97726,10 @@ program2.command("run").description("Run the orchestration loop for a feature").
97380
97726
  process.exit(1);
97381
97727
  }
97382
97728
  resetLogger();
97383
- const runsDir = join73(featureDir, "runs");
97729
+ const runsDir = join74(featureDir, "runs");
97384
97730
  mkdirSync7(runsDir, { recursive: true });
97385
97731
  const runId = new Date().toISOString().replace(/:/g, "-").replace(/\..+/, "");
97386
- const logFilePath = join73(runsDir, `${runId}.jsonl`);
97732
+ const logFilePath = join74(runsDir, `${runId}.jsonl`);
97387
97733
  const isTTY = process.stdout.isTTY ?? false;
97388
97734
  const headlessFlag = options.headless ?? false;
97389
97735
  const headlessEnv = process.env.NAX_HEADLESS === "1";
@@ -97400,7 +97746,7 @@ program2.command("run").description("Run the orchestration loop for a feature").
97400
97746
  config2.agent.default = options.agent;
97401
97747
  }
97402
97748
  config2.execution.maxIterations = Number.parseInt(options.maxIterations, 10);
97403
- const globalNaxDir = join73(homedir8(), ".nax");
97749
+ const globalNaxDir = join74(homedir8(), ".nax");
97404
97750
  const hooks = await loadHooksConfig(naxDir, globalNaxDir);
97405
97751
  const eventEmitter = new PipelineEventEmitter;
97406
97752
  let tuiInstance;
@@ -97423,7 +97769,7 @@ program2.command("run").description("Run the orchestration loop for a feature").
97423
97769
  } else {
97424
97770
  console.log(source_default.dim(" [Headless mode \u2014 pipe output]"));
97425
97771
  }
97426
- const statusFilePath = join73(workdir, ".nax", "status.json");
97772
+ const statusFilePath = join74(workdir, ".nax", "status.json");
97427
97773
  let parallel;
97428
97774
  if (options.parallel !== undefined) {
97429
97775
  parallel = Number.parseInt(options.parallel, 10);
@@ -97449,7 +97795,7 @@ program2.command("run").description("Run the orchestration loop for a feature").
97449
97795
  headless: useHeadless,
97450
97796
  skipPrecheck: options.skipPrecheck ?? false
97451
97797
  });
97452
- const latestSymlink = join73(runsDir, "latest.jsonl");
97798
+ const latestSymlink = join74(runsDir, "latest.jsonl");
97453
97799
  try {
97454
97800
  if (existsSync34(latestSymlink)) {
97455
97801
  Bun.spawnSync(["rm", latestSymlink]);
@@ -97487,9 +97833,9 @@ features.command("create <name>").description("Create a new feature").option("-d
97487
97833
  console.error(source_default.red("nax not initialized. Run: nax init"));
97488
97834
  process.exit(1);
97489
97835
  }
97490
- const featureDir = join73(naxDir, "features", name);
97836
+ const featureDir = join74(naxDir, "features", name);
97491
97837
  mkdirSync7(featureDir, { recursive: true });
97492
- await Bun.write(join73(featureDir, "spec.md"), `# Feature: ${name}
97838
+ await Bun.write(join74(featureDir, "spec.md"), `# Feature: ${name}
97493
97839
 
97494
97840
  ## Overview
97495
97841
 
@@ -97522,7 +97868,7 @@ features.command("create <name>").description("Create a new feature").option("-d
97522
97868
 
97523
97869
  <!-- What this feature explicitly does NOT cover. -->
97524
97870
  `);
97525
- await Bun.write(join73(featureDir, "progress.txt"), `# Progress: ${name}
97871
+ await Bun.write(join74(featureDir, "progress.txt"), `# Progress: ${name}
97526
97872
 
97527
97873
  Created: ${new Date().toISOString()}
97528
97874
 
@@ -97548,7 +97894,7 @@ features.command("list").description("List all features").option("-d, --dir <pat
97548
97894
  console.error(source_default.red("nax not initialized."));
97549
97895
  process.exit(1);
97550
97896
  }
97551
- const featuresDir = join73(naxDir, "features");
97897
+ const featuresDir = join74(naxDir, "features");
97552
97898
  if (!existsSync34(featuresDir)) {
97553
97899
  console.log(source_default.dim("No features yet."));
97554
97900
  return;
@@ -97563,7 +97909,7 @@ features.command("list").description("List all features").option("-d, --dir <pat
97563
97909
  Features:
97564
97910
  `));
97565
97911
  for (const name of entries) {
97566
- const prdPath = join73(featuresDir, name, "prd.json");
97912
+ const prdPath = join74(featuresDir, name, "prd.json");
97567
97913
  if (existsSync34(prdPath)) {
97568
97914
  const prd = await loadPRD(prdPath);
97569
97915
  const c = countStories(prd);
@@ -97598,10 +97944,10 @@ Use: nax plan -f <feature> --from <spec>`));
97598
97944
  cliOverrides.profile = options.profile;
97599
97945
  }
97600
97946
  const config2 = await loadConfig(workdir, cliOverrides);
97601
- const featureLogDir = join73(naxDir, "features", options.feature, "plan");
97947
+ const featureLogDir = join74(naxDir, "features", options.feature, "plan");
97602
97948
  mkdirSync7(featureLogDir, { recursive: true });
97603
97949
  const planLogId = new Date().toISOString().replace(/:/g, "-").replace(/\..+/, "");
97604
- const planLogPath = join73(featureLogDir, `${planLogId}.jsonl`);
97950
+ const planLogPath = join74(featureLogDir, `${planLogId}.jsonl`);
97605
97951
  initLogger({ level: "info", filePath: planLogPath, useChalk: false, headless: true });
97606
97952
  console.log(source_default.dim(` [Plan log: ${planLogPath}]`));
97607
97953
  try {