@nathapp/nax 0.64.1 → 0.64.2-canary.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/nax.js +1487 -1154
  2. package/package.json +1 -1
package/dist/nax.js CHANGED
@@ -21027,7 +21027,7 @@ function reshapeSelector(name, fn) {
21027
21027
  }
21028
21028
 
21029
21029
  // src/config/selectors.ts
21030
- var reviewConfigSelector, planConfigSelector, decomposeConfigSelector, rectifyConfigSelector, acceptanceConfigSelector, acceptanceFixConfigSelector, acceptanceGenConfigSelector, tddConfigSelector, debateConfigSelector, routingConfigSelector, verifyConfigSelector, rectificationGateConfigSelector, agentConfigSelector, agentManagerConfigSelector, interactionConfigSelector, precheckConfigSelector, qualityConfigSelector, testPatternConfigSelector, contextConfigSelector, contextToolRuntimeConfigSelector, promptLoaderConfigSelector, llmRoutingConfigSelector;
21030
+ var reviewConfigSelector, planConfigSelector, decomposeConfigSelector, rectifyConfigSelector, acceptanceConfigSelector, acceptanceFixConfigSelector, acceptanceGenConfigSelector, tddConfigSelector, debateConfigSelector, routingConfigSelector, verifyConfigSelector, rectificationGateConfigSelector, agentConfigSelector, agentManagerConfigSelector, interactionConfigSelector, precheckConfigSelector, qualityConfigSelector, autofixConfigSelector, testPatternConfigSelector, contextConfigSelector, contextToolRuntimeConfigSelector, promptLoaderConfigSelector, llmRoutingConfigSelector;
21031
21031
  var init_selectors = __esm(() => {
21032
21032
  reviewConfigSelector = pickSelector("review", "review", "debate", "models", "execution", "project", "quality", "agent");
21033
21033
  planConfigSelector = pickSelector("plan", "plan", "debate");
@@ -21049,6 +21049,7 @@ var init_selectors = __esm(() => {
21049
21049
  interactionConfigSelector = pickSelector("interaction", "interaction");
21050
21050
  precheckConfigSelector = pickSelector("precheck", "precheck", "quality", "execution", "prompts", "review", "project");
21051
21051
  qualityConfigSelector = pickSelector("quality", "quality", "execution");
21052
+ autofixConfigSelector = pickSelector("autofix", "quality", "execution");
21052
21053
  testPatternConfigSelector = pickSelector("test-pattern", "execution", "project", "quality");
21053
21054
  contextConfigSelector = pickSelector("context", "context");
21054
21055
  contextToolRuntimeConfigSelector = pickSelector("context-tool-runtime", "context", "execution", "project", "quality");
@@ -21664,6 +21665,13 @@ var init_path_security2 = () => {};
21664
21665
  // src/context/engine/providers/code-neighbor.ts
21665
21666
  import { createHash as createHash3 } from "crypto";
21666
21667
  import { join as join6, relative, resolve as resolve5 } from "path";
21668
+ function isExcludedPath(file3, ignoreMatchers) {
21669
+ for (const prefix of EXCLUDED_DIR_PREFIXES) {
21670
+ if (file3.startsWith(prefix) || file3.includes(`/${prefix}`))
21671
+ return true;
21672
+ }
21673
+ return ignoreMatchers.some((m) => m.test(file3));
21674
+ }
21667
21675
  function contentHash8(content) {
21668
21676
  return createHash3("sha256").update(content).digest("hex").slice(0, 8);
21669
21677
  }
@@ -21768,7 +21776,7 @@ function stripExt(s) {
21768
21776
  function isTestFile(filePath, regex) {
21769
21777
  return regex.some((re) => re.test(filePath));
21770
21778
  }
21771
- async function collectNeighbors(filePath, workdir, extraGlobWorkdirs, siblingTestContext) {
21779
+ async function collectNeighbors(filePath, workdir, extraGlobWorkdirs, siblingTestContext, ignoreMatchers) {
21772
21780
  const neighbors = new Set;
21773
21781
  if (await _codeNeighborDeps.fileExists(join6(workdir, filePath))) {
21774
21782
  const content = await _codeNeighborDeps.readFile(join6(workdir, filePath));
@@ -21781,7 +21789,7 @@ async function collectNeighbors(filePath, workdir, extraGlobWorkdirs, siblingTes
21781
21789
  const fileBaseName = (filePath.split("/").pop() ?? filePath).replace(/\.[^.]+$/, "");
21782
21790
  const fileNoExt = filePath.replace(/\.[^.]+$/, "");
21783
21791
  const scanForReverseDeps = async (scanWorkdir) => {
21784
- const srcFiles = _codeNeighborDeps.glob(SOURCE_GLOB, scanWorkdir);
21792
+ const srcFiles = _codeNeighborDeps.glob(SOURCE_GLOB, scanWorkdir, ignoreMatchers);
21785
21793
  for (const srcFile of srcFiles) {
21786
21794
  if (neighbors.size >= MAX_NEIGHBORS_PER_FILE)
21787
21795
  break;
@@ -21864,9 +21872,10 @@ class CodeNeighborProvider {
21864
21872
  globs: request.resolvedTestPatterns.globs,
21865
21873
  regex: request.resolvedTestPatterns.regex
21866
21874
  } : undefined;
21875
+ const ignoreMatchers = request.naxIgnoreIndex?.getMatchers(workdir);
21867
21876
  const sections = [];
21868
21877
  for (const file3 of filesToProcess) {
21869
- const neighbors = await collectNeighbors(file3, workdir, extraGlobWorkdirs, siblingTestContext);
21878
+ const neighbors = await collectNeighbors(file3, workdir, extraGlobWorkdirs, siblingTestContext, ignoreMatchers);
21870
21879
  if (neighbors.length > 0) {
21871
21880
  sections.push(`### ${file3}
21872
21881
  ${neighbors.map((n) => `- ${n}`).join(`
@@ -21899,22 +21908,34 @@ ${sections.join(`
21899
21908
  return { chunks: [chunk], pullTools: [] };
21900
21909
  }
21901
21910
  }
21902
- var MAX_FILES = 10, MAX_NEIGHBORS_PER_FILE = 8, MAX_GLOB_FILES = 200, MAX_CHUNK_TOKENS = 500, SOURCE_GLOB = "src/**/*.{ts,tsx,js,jsx,py,go,rs,java,rb,php,cs,cpp,c,h}", _codeNeighborDeps, FROM_PATTERN, REQUIRE_PATTERN, IMPORT_SIDE_EFFECT_PATTERN;
21911
+ var MAX_FILES = 10, MAX_NEIGHBORS_PER_FILE = 8, MAX_GLOB_FILES = 200, MAX_CHUNK_TOKENS = 500, SOURCE_GLOB = "**/*.{ts,tsx,js,jsx,py,go,rs,java,rb,php,cs,cpp,c,h}", EXCLUDED_DIR_PREFIXES, _codeNeighborDeps, FROM_PATTERN, REQUIRE_PATTERN, IMPORT_SIDE_EFFECT_PATTERN;
21903
21912
  var init_code_neighbor = __esm(() => {
21904
21913
  init_logger2();
21905
21914
  init_workspace();
21906
21915
  init_path_security2();
21916
+ EXCLUDED_DIR_PREFIXES = [
21917
+ "node_modules/",
21918
+ ".git/",
21919
+ ".nax/",
21920
+ "vendor/",
21921
+ "dist/",
21922
+ "build/",
21923
+ "out/",
21924
+ ".cache/"
21925
+ ];
21907
21926
  _codeNeighborDeps = {
21908
21927
  fileExists: (path) => Bun.file(path).exists(),
21909
21928
  readFile: (path) => Bun.file(path).text(),
21910
21929
  discoverWorkspacePackages: (repoRoot) => discoverWorkspacePackages(repoRoot),
21911
21930
  getLogger,
21912
- glob: (pattern, cwd) => {
21931
+ glob: (pattern, cwd, ignoreMatchers = []) => {
21913
21932
  const g = new Bun.Glob(pattern);
21914
21933
  const results = [];
21915
21934
  let count = 0;
21916
21935
  let truncated = false;
21917
21936
  for (const file3 of g.scanSync({ cwd, absolute: false })) {
21937
+ if (isExcludedPath(file3, ignoreMatchers))
21938
+ continue;
21918
21939
  if (count >= MAX_GLOB_FILES) {
21919
21940
  truncated = true;
21920
21941
  break;
@@ -24663,7 +24684,7 @@ async function gitLsFiles(workdir) {
24663
24684
  }
24664
24685
  }
24665
24686
  function isExcluded(path) {
24666
- return EXCLUDED_DIR_PREFIXES.some((prefix) => path.startsWith(prefix) || path.includes(`/${prefix}`));
24687
+ return EXCLUDED_DIR_PREFIXES2.some((prefix) => path.startsWith(prefix) || path.includes(`/${prefix}`));
24667
24688
  }
24668
24689
  async function detectFromFileScan(workdir) {
24669
24690
  const files = await gitLsFiles(workdir);
@@ -24701,9 +24722,9 @@ async function detectFromFileScan(workdir) {
24701
24722
  patterns
24702
24723
  };
24703
24724
  }
24704
- var EXCLUDED_DIR_PREFIXES, MIN_COUNT_THRESHOLD = 5, MIN_FRACTION_THRESHOLD = 0.1, CANDIDATE_SUFFIXES, SUFFIX_TO_GLOB, _fileScanDeps;
24725
+ var EXCLUDED_DIR_PREFIXES2, MIN_COUNT_THRESHOLD = 5, MIN_FRACTION_THRESHOLD = 0.1, CANDIDATE_SUFFIXES, SUFFIX_TO_GLOB, _fileScanDeps;
24705
24726
  var init_file_scan = __esm(() => {
24706
- EXCLUDED_DIR_PREFIXES = ["node_modules/", "dist/", "build/", ".nax/", "coverage/", ".git/"];
24727
+ EXCLUDED_DIR_PREFIXES2 = ["node_modules/", "dist/", "build/", ".nax/", "coverage/", ".git/"];
24707
24728
  CANDIDATE_SUFFIXES = [
24708
24729
  ".test.ts",
24709
24730
  ".test.tsx",
@@ -27118,12 +27139,13 @@ function formatPriorFailures(failures) {
27118
27139
  **Review Findings (fix these issues):**`);
27119
27140
  for (const finding of failure.reviewFindings) {
27120
27141
  const source = finding.source ? ` (${finding.source})` : "";
27142
+ const loc = finding.file ? `${finding.file}:${finding.line ?? 0}` : "global";
27121
27143
  parts.push(`
27122
- - **[${finding.severity}]** \`${finding.file}:${finding.line}\`${source}`);
27123
- parts.push(` **Rule:** ${finding.ruleId}`);
27144
+ - **[${finding.severity}]** \`${loc}\`${source}`);
27145
+ parts.push(` **Rule:** ${finding.rule ?? finding.category}`);
27124
27146
  parts.push(` **Issue:** ${finding.message}`);
27125
- if (finding.url) {
27126
- parts.push(` **Docs:** ${finding.url}`);
27147
+ if (typeof finding.meta?.url === "string") {
27148
+ parts.push(` **Docs:** ${finding.meta.url}`);
27127
27149
  }
27128
27150
  }
27129
27151
  }
@@ -27685,7 +27707,7 @@ isolation scope: Implement source code in src/ to make tests pass. Do not modify
27685
27707
  if (role === "verifier") {
27686
27708
  return `${header}
27687
27709
 
27688
- isolation scope: Read-only inspection. Review all test results, implementation code, and acceptance criteria compliance. You MAY write a verdict file (.nax-verifier-verdict.json) and apply legitimate fixes if needed.${footer}`;
27710
+ isolation scope: Read-only TDD integrity inspection. Review story-scoped test results and test-file modifications. Do NOT apply source or test fixes. You MAY write only the verdict file (.nax-verifier-verdict.json).${footer}`;
27689
27711
  }
27690
27712
  if (role === "single-session") {
27691
27713
  return `${header}
@@ -27783,17 +27805,18 @@ Instructions:
27783
27805
  if (role === "verifier") {
27784
27806
  return `# Role: Verifier
27785
27807
 
27786
- Your task: Review and verify the implementation against acceptance criteria.
27808
+ Your task: verify the TDD handoff integrity for this story.
27787
27809
 
27788
- Context: You are the final session in a multi-session workflow. A test-writer created tests, and an implementer wrote the code. The orchestrator has already run the full test suite and confirmed it passes before handing off to you.
27810
+ Context: You are the final session in a multi-session workflow. A test-writer created tests, and an implementer wrote the code. The orchestrator has already attempted the full-suite gate before handing off to you; it may have passed, failed, or exhausted rectification.
27789
27811
 
27790
27812
  Instructions:
27791
27813
  - Run ONLY the story's scoped test files \u2014 do NOT run the full test suite (the orchestrator already handled that)
27792
- - Check that implementation meets all acceptance criteria from the story
27793
- - Inspect code quality, error handling, and edge cases
27814
+ - Confirm the story-scoped tests pass
27815
+ - Check whether the implementer modified test files after the test-writer phase
27794
27816
  - Verify any test modifications (if any) are legitimate fixes, not shortcuts
27817
+ - Do NOT perform semantic acceptance review; semantic/adversarial review stages own acceptance criteria and broad code-quality findings
27795
27818
  - Write a detailed verdict with reasoning
27796
- - Goal: verify story-scoped tests pass, provide comprehensive code review and quality assurance`;
27819
+ - Goal: verify story-scoped tests pass and test integrity was preserved`;
27797
27820
  }
27798
27821
  if (role === "single-session") {
27799
27822
  return `# Role: Single-Session
@@ -27911,16 +27934,13 @@ After completing your verification, you **MUST** write a verdict file at the **p
27911
27934
  **File:** \`.nax-verifier-verdict.json\`
27912
27935
 
27913
27936
  Set \`approved: true\` when ALL of these conditions are met:
27914
- - All story-scoped tests pass (the orchestrator already confirmed the full suite passes \u2014 you only need to verify the story's own tests)
27915
- - Implementation is clean and follows conventions
27916
- - All acceptance criteria met
27937
+ - All story-scoped tests pass (the orchestrator already attempted the full-suite gate \u2014 you only need to verify the story's own tests)
27917
27938
  - Any test modifications by implementer are legitimate fixes
27918
27939
 
27919
27940
  Set \`approved: false\` when ANY of these conditions are true:
27920
27941
  - Tests are failing and you cannot fix them
27921
27942
  - The implementer loosened test assertions to mask bugs
27922
- - Critical acceptance criteria are not met
27923
- - Code quality is poor (security issues, severe bugs, etc.)
27943
+ - The implementer made illegitimate test changes
27924
27944
 
27925
27945
  **JSON schema** (fill in all fields with real values):
27926
27946
 
@@ -27931,10 +27951,11 @@ Set \`approved: false\` when ANY of these conditions are true:
27931
27951
  **Field notes:**
27932
27952
  - \`quality.rating\` must be one of: \`"good"\`, \`"acceptable"\`, \`"poor"\`
27933
27953
  - \`testModifications.files\` \u2014 list any test files the implementer changed
27934
- - \`fixes\` \u2014 list any fixes you applied yourself during this verification session
27954
+ - \`acceptanceCriteria\` and \`quality\` are advisory in this TDD verifier verdict; do not use them to reject semantic correctness
27955
+ - \`fixes\` \u2014 keep this empty; the verifier must not apply code or test fixes
27935
27956
  - \`reasoning\` \u2014 brief summary of your overall assessment
27936
27957
 
27937
- When done, commit any fixes with message: "fix: verify and adjust ${story.title}"`;
27958
+ When done, do not commit code changes. Only write the verdict file.`;
27938
27959
  }
27939
27960
 
27940
27961
  // src/prompts/sections/conventions.ts
@@ -28397,7 +28418,7 @@ ${c.output}`).join(`
28397
28418
  `);
28398
28419
  }
28399
28420
  buildReReviewPrompt(updatedDiff, previousFindings) {
28400
- const findingsList = previousFindings.length > 0 ? previousFindings.map((f) => `- ${f.ruleId}: ${f.message}`).join(`
28421
+ const findingsList = previousFindings.length > 0 ? previousFindings.map((f) => `- ${f.rule ?? "semantic"}: ${f.message}`).join(`
28401
28422
  `) : "(none)";
28402
28423
  return [
28403
28424
  "This is a follow-up re-review. Please review the updated diff below.",
@@ -28442,7 +28463,7 @@ ${c.output}`).join(`
28442
28463
  }
28443
28464
  buildReResolverPrompt(proposals, critiques, diffContext, previousFindings, resolverContext) {
28444
28465
  const framing = this.buildResolverFraming(resolverContext);
28445
- const findingsList = previousFindings.length > 0 ? previousFindings.map((f) => `- ${f.ruleId}: ${f.message}`).join(`
28466
+ const findingsList = previousFindings.length > 0 ? previousFindings.map((f) => `- ${f.rule ?? "semantic"}: ${f.message}`).join(`
28446
28467
  `) : "(none)";
28447
28468
  const proposalsSection = this.buildLabeledProposalsSection(proposals);
28448
28469
  const critiquesSection = this.buildLabeledCritiquesSection(critiques);
@@ -28655,6 +28676,58 @@ function tryParseLLMJson(text) {
28655
28676
  }
28656
28677
  }
28657
28678
 
28679
+ // src/prompts/builders/prior-iterations-builder.ts
28680
+ function buildPriorIterationsBlock(iterations) {
28681
+ if (iterations.length === 0)
28682
+ return "";
28683
+ const rows = iterations.map((iter) => {
28684
+ const strategies = iter.fixesApplied.map((fa) => fa.strategyName).join(", ") || "-";
28685
+ const files = iter.fixesApplied.flatMap((fa) => fa.targetFiles).join(", ") || "-";
28686
+ const outcome = iter.outcome;
28687
+ const findingSummary = formatFindingSummary(iter.findingsBefore, iter.findingsAfter);
28688
+ return `| ${iter.iterationNum} | ${strategies} | ${files} | ${outcome} | ${findingSummary} |`;
28689
+ });
28690
+ const header = "| # | Strategies run | Files touched | Outcome | Findings before \u2192 after |";
28691
+ const separator = "|---|----------------|---------------|---------|--------------------------|";
28692
+ const table = [header, separator, ...rows].join(`
28693
+ `);
28694
+ const hasUnchanged = iterations.some((i) => i.outcome === "unchanged");
28695
+ const unchangedNote = hasUnchanged ? `
28696
+ When outcome is "unchanged", the prior hypothesis is FALSIFIED \u2014 the change did not affect what was tested. Choose a different category before producing a new verdict. Do NOT repeat fixes listed above.` : "";
28697
+ return `## Prior Iterations \u2014 verdict required before new analysis
28698
+
28699
+ ${table}${unchangedNote}
28700
+
28701
+ `;
28702
+ }
28703
+ function formatFindingSummary(before, after) {
28704
+ const beforeStr = before.length === 0 ? "0" : formatFindingCount(before);
28705
+ const afterStr = after.length === 0 ? "0" : formatFindingCount(after);
28706
+ return `${beforeStr} \u2192 ${afterStr}`;
28707
+ }
28708
+ function formatFindingCount(findings) {
28709
+ const count = findings.length;
28710
+ const topCategory = mostFrequentCategory(findings);
28711
+ return topCategory !== null ? `${count} [${topCategory}]` : `${count}`;
28712
+ }
28713
+ function mostFrequentCategory(findings) {
28714
+ if (findings.length === 0)
28715
+ return null;
28716
+ const freq = new Map;
28717
+ for (const f of findings) {
28718
+ freq.set(f.category, (freq.get(f.category) ?? 0) + 1);
28719
+ }
28720
+ let top = null;
28721
+ let topCount = 0;
28722
+ for (const [cat, cnt] of freq) {
28723
+ if (cnt > topCount) {
28724
+ topCount = cnt;
28725
+ top = cat;
28726
+ }
28727
+ }
28728
+ return top;
28729
+ }
28730
+
28658
28731
  // src/prompts/builders/review-builder.ts
28659
28732
  class ReviewPromptBuilder {
28660
28733
  buildSemanticReviewPrompt(story, semanticConfig, options) {
@@ -28665,7 +28738,7 @@ class ReviewPromptBuilder {
28665
28738
  ${semanticConfig.rules.map((r, i) => `${i + 1}. ${r}`).join(`
28666
28739
  `)}
28667
28740
  ` : "";
28668
- const attemptContextBlock = buildAttemptContextBlock(options.priorFailures);
28741
+ const priorIterationsBlock = buildPriorIterationsBlock(options.priorSemanticIterations ?? []);
28669
28742
  let diffSection;
28670
28743
  if (options.mode === "ref") {
28671
28744
  diffSection = buildRefDiffSection(options.storyGitRef ?? "", options.stat ?? "", options.excludePatterns ?? []);
@@ -28681,7 +28754,7 @@ ${story.description}
28681
28754
 
28682
28755
  ### Acceptance Criteria
28683
28756
  ${acList}
28684
- ${customRulesBlock}${attemptContextBlock}${diffSection}
28757
+ ${customRulesBlock}${priorIterationsBlock}${diffSection}
28685
28758
  ${SEMANTIC_INSTRUCTIONS}
28686
28759
  ${SEMANTIC_OUTPUT_SCHEMA}`;
28687
28760
  return wrapJsonPrompt(core2);
@@ -28705,17 +28778,6 @@ Output ONLY a complete, valid JSON object. It must start with { and end with }.
28705
28778
  Schema: {"passed": boolean, "findings": [{"severity": string, "category": string, "file": string, "line": number, "issue": string, "suggestion": string}]}`;
28706
28779
  }
28707
28780
  }
28708
- function buildAttemptContextBlock(priorFailures) {
28709
- if (!priorFailures || priorFailures.length === 0)
28710
- return "";
28711
- const attemptNumber = priorFailures.length + 1;
28712
- const stages = priorFailures.map((f) => f.stage).join(", ");
28713
- return `## Attempt Context
28714
- This is escalation attempt ${attemptNumber}. Prior attempts failed at stages: ${stages}.
28715
- The diff shows the NET result of all changes since story start \u2014 verify against the current codebase state.
28716
-
28717
- `;
28718
- }
28719
28781
  function buildEmbeddedDiffSection(diff) {
28720
28782
  return `## Git Diff (production code only \u2014 test files excluded)
28721
28783
 
@@ -28769,7 +28831,7 @@ Do NOT flag: style issues, naming conventions, import ordering, file length, or
28769
28831
  "passed": boolean,
28770
28832
  "findings": [
28771
28833
  {
28772
- "severity": "error" | "warn" | "info" | "unverifiable",
28834
+ "severity": "error" | "warning" | "info" | "unverifiable",
28773
28835
  "file": "path/to/file",
28774
28836
  "line": 42,
28775
28837
  "issue": "description of the issue",
@@ -28853,25 +28915,6 @@ ${testInventory.addedTestFiles.map((f) => ` - ${f}`).join(`
28853
28915
  \`\`\`diff
28854
28916
  ${diff}\`\`\`
28855
28917
 
28856
- `;
28857
- }
28858
- function buildPriorFindingsBlock(round, findings) {
28859
- const rows = findings.map((f) => {
28860
- const location = f.line !== undefined ? `${f.file}:${f.line}` : f.file;
28861
- const category = f.category ?? "\u2014";
28862
- return `| ${f.severity} | ${category} | ${location} | ${f.issue} |`;
28863
- }).join(`
28864
- `);
28865
- return `## Prior Adversarial Findings \u2014 Round ${round}
28866
-
28867
- The following issues were flagged in the previous adversarial review round.
28868
- **Verdict on each of these first \u2014 determine whether each has been fixed, partially addressed, or is still present.**
28869
- Then continue scanning for new issues.
28870
-
28871
- | Severity | Category | Location | Issue |
28872
- |:---------|:---------|:---------|:------|
28873
- ${rows}
28874
-
28875
28918
  `;
28876
28919
  }
28877
28920
 
@@ -28882,14 +28925,13 @@ class AdversarialReviewPromptBuilder {
28882
28925
  diff,
28883
28926
  storyGitRef,
28884
28927
  stat,
28885
- priorFailures,
28886
28928
  testInventory,
28887
28929
  excludePatterns,
28888
28930
  testGlobs,
28889
28931
  refExcludePatterns,
28890
- priorAdversarialFindings
28932
+ priorAdversarialIterations
28891
28933
  } = options;
28892
- const priorFindingsBlock = priorAdversarialFindings && priorAdversarialFindings.findings.length > 0 ? buildPriorFindingsBlock(priorAdversarialFindings.round, priorAdversarialFindings.findings) : "";
28934
+ const priorFindingsBlock = buildPriorIterationsBlock(priorAdversarialIterations ?? []);
28893
28935
  const storyBlock = `## Story Under Review
28894
28936
 
28895
28937
  **ID:** ${story.id}
@@ -28907,7 +28949,6 @@ ${config2.rules.map((r) => `- ${r}`).join(`
28907
28949
  `)}
28908
28950
 
28909
28951
  ` : "";
28910
- const attemptBlock = buildAttemptContextBlock(priorFailures);
28911
28952
  let diffBlock;
28912
28953
  if (mode === "ref" && storyGitRef) {
28913
28954
  diffBlock = buildAdversarialRefDiffSection(storyGitRef, stat, excludePatterns ?? [], testGlobs ?? [], refExcludePatterns ?? []);
@@ -28936,7 +28977,6 @@ ${config2.rules.map((r) => `- ${r}`).join(`
28936
28977
  `
28937
28978
 
28938
28979
  `,
28939
- attemptBlock,
28940
28980
  diffBlock
28941
28981
  ].join("");
28942
28982
  }
@@ -28998,7 +29038,7 @@ Respond with ONLY a JSON object \u2014 no preamble, no explanation outside the J
28998
29038
  "passed": true | false,
28999
29039
  "findings": [
29000
29040
  {
29001
- "severity": "error" | "warn" | "info" | "unverifiable",
29041
+ "severity": "error" | "warning" | "info" | "unverifiable",
29002
29042
  "category": "input" | "error-path" | "abandonment" | "test-gap" | "convention" | "assumption",
29003
29043
  "file": "relative/path/to/file.ts",
29004
29044
  "line": 42,
@@ -29011,15 +29051,13 @@ Respond with ONLY a JSON object \u2014 no preamble, no explanation outside the J
29011
29051
 
29012
29052
  Severity guide:
29013
29053
  - \`"error"\`: confident this will cause real failure or regression
29014
- - \`"warn"\`: fragile or incomplete but may ship without immediate breakage
29054
+ - \`"warning"\`: fragile or incomplete but may ship without immediate breakage
29015
29055
  - \`"info"\`: noteworthy but not actionable as a blocker
29016
29056
  - \`"unverifiable"\`: suspect problem but couldn't confirm from available artifacts
29017
29057
 
29018
- \`passed\` must be \`false\` if any finding has severity \`"error"\` or \`"warn"\`.
29058
+ \`passed\` must be \`false\` if any finding has severity \`"error"\` or \`"warning"\`.
29019
29059
  \`passed\` may be \`true\` with findings if all findings are \`"info"\` or \`"unverifiable"\`.`;
29020
- var init_adversarial_review_builder = __esm(() => {
29021
- init_review_builder();
29022
- });
29060
+ var init_adversarial_review_builder = () => {};
29023
29061
 
29024
29062
  // src/prompts/builders/acceptance-builder.ts
29025
29063
  class AcceptancePromptBuilder {
@@ -29036,9 +29074,6 @@ ${f.content}
29036
29074
  \`\`\``).join(`
29037
29075
 
29038
29076
  `)}` : "";
29039
- const prevFailureSection = p.previousFailure && p.previousFailure.length > 0 ? `
29040
-
29041
- Previous test failed because: ${p.previousFailure}` : "";
29042
29077
  return `You are a senior test engineer. Your task is to generate a complete acceptance test file for the "${p.featureName}" feature.
29043
29078
 
29044
29079
  ${STEP1}
@@ -29052,7 +29087,7 @@ ${STEP3_HEADER}
29052
29087
  ${STEP3_SHARED_RULES}
29053
29088
  - **File output (REQUIRED)**: Write the acceptance test file DIRECTLY to the path shown below. Do NOT output the test code in your response. After writing the file, reply with a brief confirmation.
29054
29089
  - **Path anchor (CRITICAL)**: Write the test file to this exact path: \`${p.targetTestFilePath}\`. Import from package sources using relative paths like \`../../../src/...\` (3 levels up from \`.nax/features/<name>/\` to the package root).
29055
- - **Process cwd**: When spawning child processes to invoke a CLI or binary, set the working directory to the **package root** (\`join(import.meta.dir, "../../..")\`) as your default \u2014 unless your Step 2 exploration reveals the CLI uses a different working directory convention (e.g. reads config from \`~/.config/\`, or resolves paths relative to a flag value). Always check how the CLI resolves file paths before assuming.${implSection}${prevFailureSection}`;
29090
+ - **Process cwd**: When spawning child processes to invoke a CLI or binary, set the working directory to the **package root** (\`join(import.meta.dir, "../../..")\`) as your default \u2014 unless your Step 2 exploration reveals the CLI uses a different working directory convention (e.g. reads config from \`~/.config/\`, or resolves paths relative to a flag value). Always check how the CLI resolves file paths before assuming.${implSection}`;
29056
29091
  }
29057
29092
  buildGeneratorFromSpecPrompt(p) {
29058
29093
  return `You are a senior test engineer. Your task is to generate a complete acceptance test file for the "${p.featureName}" feature.
@@ -29070,6 +29105,21 @@ ${STEP3_SHARED_RULES}
29070
29105
  - **Path anchor (CRITICAL)**: This test file will be saved at \`<repo-root>/.nax/features/${p.featureName}/${p.resolvedTestPath}\` and will ALWAYS run from the repo root. The repo root is exactly 3 \`../\` levels above \`__dirname\`: \`join(__dirname, '..', '..', '..')\`. For monorepo projects, navigate into packages from root (e.g. \`join(root, 'apps/api/src')\`).`;
29071
29106
  }
29072
29107
  buildDiagnosisPromptTemplate(p) {
29108
+ const responseSchema = `{
29109
+ "verdict": "source_bug" | "test_bug" | "both",
29110
+ "reasoning": "Your analysis explaining why this is a source_bug, test_bug, or both",
29111
+ "confidence": 0.0-1.0,
29112
+ "findings": [
29113
+ {
29114
+ "fixTarget": "source" | "test",
29115
+ "category": "stdout-capture" | "ac-mismatch" | "framework-misuse" | "missing-impl" | "import-path" | "hook-failure" | "test-runner-error" | "stub-test" | "other",
29116
+ "file": "optional/path/relative/to/workdir.ts",
29117
+ "line": 0,
29118
+ "message": "Concrete description of the issue",
29119
+ "suggestion": "Optional concrete fix suggestion"
29120
+ }
29121
+ ]
29122
+ }`;
29073
29123
  return `You are a debugging expert. An acceptance test has failed.
29074
29124
 
29075
29125
  TASK: Diagnose whether the failure is due to a bug in the SOURCE CODE or a bug in the TEST CODE.
@@ -29084,15 +29134,9 @@ ${p.testFileContent}
29084
29134
 
29085
29135
  SOURCE FILES (auto-detected from imports, up to ${p.maxFileLines} lines each):
29086
29136
  ${p.sourceFilesSection}
29087
- ${p.verdictSection}${p.previousFailureSection}
29137
+ ${p.verdictSection}
29088
29138
  Respond with ONLY a JSON object in this exact format (no markdown, no extra text):
29089
- {
29090
- "verdict": "source_bug" | "test_bug" | "both",
29091
- "reasoning": "Your analysis explaining why this is a source_bug, test_bug, or both",
29092
- "confidence": 0.0-1.0,
29093
- "testIssues": ["Issue in test code if any"],
29094
- "sourceIssues": ["Issue in source code if any"]
29095
- }`;
29139
+ ${responseSchema}`;
29096
29140
  }
29097
29141
  buildSourceFixPrompt(p) {
29098
29142
  let prompt = `ACCEPTANCE TEST FAILURE:
@@ -29104,6 +29148,8 @@ ${p.testOutput}
29104
29148
  ${p.diagnosisReasoning}
29105
29149
 
29106
29150
  `;
29151
+ if (p.priorIterationsBlock)
29152
+ prompt += p.priorIterationsBlock;
29107
29153
  prompt += `ACCEPTANCE TEST FILE: ${p.acceptanceTestPath}
29108
29154
 
29109
29155
  `;
@@ -29167,17 +29213,12 @@ ${f.content}
29167
29213
  SEMANTIC VERDICTS:
29168
29214
  ${p.semanticVerdicts.map((v) => `- ${v.storyId}: ${v.passed ? "likely test bug (semantic review confirmed AC implementation)" : "unconfirmed"}`).join(`
29169
29215
  `)}
29170
- ` : "";
29171
- const previousFailureSection = p.previousFailure && p.previousFailure.length > 0 ? `
29172
- PREVIOUS FIX ATTEMPTS:
29173
- ${p.previousFailure}
29174
29216
  ` : "";
29175
29217
  return this.buildDiagnosisPromptTemplate({
29176
29218
  truncatedOutput,
29177
29219
  testFileContent: p.testFileContent,
29178
29220
  sourceFilesSection,
29179
29221
  verdictSection,
29180
- previousFailureSection,
29181
29222
  maxFileLines: MAX_FILE_LINES
29182
29223
  });
29183
29224
  }
@@ -29277,12 +29318,8 @@ ${p.testOutput}
29277
29318
  ${p.diagnosisReasoning}
29278
29319
 
29279
29320
  `;
29280
- if (p.previousFailure && p.previousFailure.length > 0) {
29281
- prompt += `PREVIOUS FAILED ATTEMPTS:
29282
- ${p.previousFailure}
29283
-
29284
- `;
29285
- }
29321
+ if (p.priorIterationsBlock)
29322
+ prompt += p.priorIterationsBlock;
29286
29323
  prompt += `ACCEPTANCE TEST FILE: ${p.acceptanceTestPath}
29287
29324
 
29288
29325
  `;
@@ -31876,8 +31913,7 @@ var init_acceptance_generate = __esm(() => {
31876
31913
  criteriaList: input.criteriaList,
31877
31914
  frameworkOverrideLine: input.frameworkOverrideLine,
31878
31915
  targetTestFilePath: input.targetTestFilePath,
31879
- implementationContext: input.implementationContext,
31880
- previousFailure: input.previousFailure
31916
+ implementationContext: input.implementationContext
31881
31917
  });
31882
31918
  return {
31883
31919
  role: { id: "role", content: "", overridable: false },
@@ -31990,8 +32026,7 @@ var init_acceptance_diagnose = __esm(() => {
31990
32026
  testOutput: input.testOutput,
31991
32027
  testFileContent: input.testFileContent,
31992
32028
  sourceFiles: input.sourceFiles,
31993
- semanticVerdicts: input.semanticVerdicts,
31994
- previousFailure: input.previousFailure
32029
+ semanticVerdicts: input.semanticVerdicts
31995
32030
  });
31996
32031
  return {
31997
32032
  role: { id: "role", content: "", overridable: false },
@@ -32001,13 +32036,26 @@ var init_acceptance_diagnose = __esm(() => {
32001
32036
  parse(output, _input, _ctx) {
32002
32037
  const raw = tryParseLLMJson(output);
32003
32038
  if (raw && typeof raw.verdict === "string" && typeof raw.reasoning === "string" && typeof raw.confidence === "number") {
32004
- return {
32039
+ const base = {
32005
32040
  verdict: raw.verdict,
32006
32041
  reasoning: raw.reasoning,
32007
- confidence: raw.confidence,
32008
- testIssues: Array.isArray(raw.testIssues) ? raw.testIssues : undefined,
32009
- sourceIssues: Array.isArray(raw.sourceIssues) ? raw.sourceIssues : undefined
32042
+ confidence: raw.confidence
32010
32043
  };
32044
+ if (Array.isArray(raw.findings) && raw.findings.length > 0) {
32045
+ const findings = raw.findings.filter((f) => typeof f.message === "string" && typeof f.category === "string").map((f) => ({
32046
+ source: "acceptance-diagnose",
32047
+ severity: typeof f.severity === "string" ? f.severity : "error",
32048
+ category: String(f.category),
32049
+ message: String(f.message),
32050
+ fixTarget: f.fixTarget ?? undefined,
32051
+ file: typeof f.file === "string" ? f.file : undefined,
32052
+ line: typeof f.line === "number" ? f.line : undefined,
32053
+ suggestion: typeof f.suggestion === "string" ? f.suggestion : undefined
32054
+ }));
32055
+ if (findings.length > 0)
32056
+ return { ...base, findings };
32057
+ }
32058
+ return base;
32011
32059
  }
32012
32060
  return FALLBACK;
32013
32061
  }
@@ -32030,6 +32078,7 @@ var init_acceptance_fix = __esm(() => {
32030
32078
  const prompt = new AcceptancePromptBuilder().buildSourceFixPrompt({
32031
32079
  testOutput: input.testOutput,
32032
32080
  diagnosisReasoning: input.diagnosisReasoning,
32081
+ priorIterationsBlock: input.priorIterationsBlock,
32033
32082
  acceptanceTestPath: input.acceptanceTestPath,
32034
32083
  testFileContent: input.testFileContent
32035
32084
  });
@@ -32053,10 +32102,10 @@ var init_acceptance_fix = __esm(() => {
32053
32102
  const prompt = new AcceptancePromptBuilder().buildTestFixPrompt({
32054
32103
  testOutput: input.testOutput,
32055
32104
  diagnosisReasoning: input.diagnosisReasoning,
32105
+ priorIterationsBlock: input.priorIterationsBlock,
32056
32106
  failedACs: input.failedACs,
32057
32107
  acceptanceTestPath: input.acceptanceTestPath,
32058
- testFileContent: input.testFileContent ?? "",
32059
- previousFailure: input.previousFailure
32108
+ testFileContent: input.testFileContent ?? ""
32060
32109
  });
32061
32110
  return {
32062
32111
  role: { id: "role", content: "", overridable: false },
@@ -32069,32 +32118,120 @@ var init_acceptance_fix = __esm(() => {
32069
32118
  };
32070
32119
  });
32071
32120
 
32072
- // src/review/truncation.ts
32073
- function looksLikeTruncatedJson(raw) {
32074
- return raw.trimEnd().length >= MAX_AGENT_OUTPUT_CHARS - 100;
32075
- }
32076
- var init_truncation = __esm(() => {
32077
- init_adapter();
32121
+ // src/review/severity.ts
32122
+ var SEVERITY_RANK;
32123
+ var init_severity = __esm(() => {
32124
+ SEVERITY_RANK = {
32125
+ info: 0,
32126
+ unverifiable: 0,
32127
+ low: 1,
32128
+ warning: 1,
32129
+ error: 2,
32130
+ critical: 3
32131
+ };
32078
32132
  });
32079
32133
 
32080
- // src/operations/types.ts
32081
- function parseLlmReviewShape(raw) {
32082
- if (typeof raw !== "object" || raw === null)
32134
+ // src/review/semantic-helpers.ts
32135
+ function validateLLMShape(parsed) {
32136
+ if (typeof parsed !== "object" || parsed === null)
32083
32137
  return null;
32084
- const obj = raw;
32138
+ const obj = parsed;
32085
32139
  if (typeof obj.passed !== "boolean")
32086
32140
  return null;
32087
32141
  if (!Array.isArray(obj.findings))
32088
32142
  return null;
32089
32143
  return { passed: obj.passed, findings: obj.findings };
32090
32144
  }
32145
+ function parseLLMResponse(raw) {
32146
+ try {
32147
+ return validateLLMShape(tryParseLLMJson(raw));
32148
+ } catch {
32149
+ return null;
32150
+ }
32151
+ }
32152
+ function formatFindings(findings) {
32153
+ return findings.map((f) => `[${f.severity}] ${f.file}:${f.line} \u2014 ${f.issue}
32154
+ Suggestion: ${f.suggestion}`).join(`
32155
+ `);
32156
+ }
32157
+ function normalizeSeverity(sev) {
32158
+ if (sev === "warn")
32159
+ return "warning";
32160
+ if (sev === "critical" || sev === "error" || sev === "warning" || sev === "info" || sev === "low" || sev === "unverifiable")
32161
+ return sev;
32162
+ return "info";
32163
+ }
32164
+ function isBlockingSeverity(sev, threshold = "error") {
32165
+ return (SEVERITY_RANK[sev] ?? 0) >= (SEVERITY_RANK[threshold] ?? 2);
32166
+ }
32167
+ function sanitizeRefModeFindings(findings, diffMode) {
32168
+ if (diffMode !== "ref")
32169
+ return findings;
32170
+ return findings.map((finding) => needsDowngradeForMissingEvidence(finding) ? downgradeToUnverifiable(finding) : finding);
32171
+ }
32172
+ function needsDowngradeForMissingEvidence(finding) {
32173
+ if ((SEVERITY_RANK[finding.severity] ?? 0) < SEVERITY_RANK.error)
32174
+ return false;
32175
+ return mentionsUnverifiedSource(finding) || !hasVerifiedEvidence(finding);
32176
+ }
32177
+ function mentionsUnverifiedSource(finding) {
32178
+ const text = `${finding.issue} ${finding.suggestion}`.toLowerCase();
32179
+ return UNVERIFIED_FINDING_PATTERNS.some((pattern) => text.includes(pattern));
32180
+ }
32181
+ function hasVerifiedEvidence(finding) {
32182
+ const evidence = finding.verifiedBy;
32183
+ return !!evidence?.file?.trim() && !!evidence.observed?.trim();
32184
+ }
32185
+ function downgradeToUnverifiable(finding) {
32186
+ return {
32187
+ ...finding,
32188
+ severity: "unverifiable"
32189
+ };
32190
+ }
32191
+ function llmFindingToFinding(f) {
32192
+ return {
32193
+ source: "semantic-review",
32194
+ severity: normalizeSeverity(f.severity),
32195
+ category: "",
32196
+ file: f.file,
32197
+ line: f.line,
32198
+ message: f.issue,
32199
+ suggestion: f.suggestion ?? undefined,
32200
+ fixTarget: "source",
32201
+ meta: f.verifiedBy ? { verifiedBy: f.verifiedBy } : undefined
32202
+ };
32203
+ }
32204
+ function toReviewFindings(findings) {
32205
+ return findings.map(llmFindingToFinding);
32206
+ }
32207
+ var UNVERIFIED_FINDING_PATTERNS;
32208
+ var init_semantic_helpers = __esm(() => {
32209
+ init_severity();
32210
+ UNVERIFIED_FINDING_PATTERNS = [
32211
+ "cannot verify",
32212
+ "can't verify",
32213
+ "from diff alone",
32214
+ "missing from diff",
32215
+ "not found in diff",
32216
+ "not present in diff",
32217
+ "does not appear in diff"
32218
+ ];
32219
+ });
32220
+
32221
+ // src/review/truncation.ts
32222
+ function looksLikeTruncatedJson(raw) {
32223
+ return raw.trimEnd().length >= MAX_AGENT_OUTPUT_CHARS - 100;
32224
+ }
32225
+ var init_truncation = __esm(() => {
32226
+ init_adapter();
32227
+ });
32091
32228
 
32092
32229
  // src/operations/semantic-review.ts
32093
32230
  var FAIL_OPEN, semanticReviewHopBody = async (initialPrompt, ctx) => {
32094
32231
  const first = await ctx.send(initialPrompt);
32095
32232
  const isTruncated = looksLikeTruncatedJson(first.output);
32096
32233
  const parsed = tryParseLLMJson(first.output);
32097
- if (!isTruncated && parsed && parseLlmReviewShape(parsed))
32234
+ if (!isTruncated && parsed && validateLLMShape(parsed))
32098
32235
  return first;
32099
32236
  const retryPrompt = isTruncated ? ReviewPromptBuilder.jsonRetryCondensed({ blockingThreshold: ctx.input.blockingThreshold }) : ReviewPromptBuilder.jsonRetry();
32100
32237
  if (isTruncated) {
@@ -32114,6 +32251,7 @@ var init_semantic_review = __esm(() => {
32114
32251
  init_config();
32115
32252
  init_logger2();
32116
32253
  init_prompts();
32254
+ init_semantic_helpers();
32117
32255
  init_truncation();
32118
32256
  FAIL_OPEN = { passed: true, findings: [], failOpen: true };
32119
32257
  semanticReviewOp = {
@@ -32131,7 +32269,7 @@ var init_semantic_review = __esm(() => {
32131
32269
  diff: input.diff,
32132
32270
  storyGitRef: input.storyGitRef,
32133
32271
  stat: input.stat,
32134
- priorFailures: input.priorFailures,
32272
+ priorSemanticIterations: input.priorSemanticIterations,
32135
32273
  excludePatterns: input.excludePatterns
32136
32274
  });
32137
32275
  const content = input.featureCtxBlock ? `${input.featureCtxBlock}${base}` : base;
@@ -32142,9 +32280,9 @@ var init_semantic_review = __esm(() => {
32142
32280
  },
32143
32281
  parse(output, _input, _ctx) {
32144
32282
  const raw = tryParseLLMJson(output);
32145
- const parsed = parseLlmReviewShape(raw);
32283
+ const parsed = validateLLMShape(raw);
32146
32284
  if (parsed)
32147
- return parsed;
32285
+ return { passed: parsed.passed, findings: parsed.findings };
32148
32286
  if (/"passed"\s*:\s*false/.test(output))
32149
32287
  return { passed: false, findings: [], looksLikeFail: true };
32150
32288
  return FAIL_OPEN;
@@ -32152,12 +32290,54 @@ var init_semantic_review = __esm(() => {
32152
32290
  };
32153
32291
  });
32154
32292
 
32293
+ // src/review/adversarial-helpers.ts
32294
+ function validateAdversarialShape(parsed) {
32295
+ if (typeof parsed !== "object" || parsed === null)
32296
+ return null;
32297
+ const obj = parsed;
32298
+ if (typeof obj.passed !== "boolean")
32299
+ return null;
32300
+ if (!Array.isArray(obj.findings))
32301
+ return null;
32302
+ return { passed: obj.passed, findings: obj.findings };
32303
+ }
32304
+ function formatFindings2(findings) {
32305
+ return findings.map((f) => `[${f.severity}][${f.category}] ${f.file}:${f.line} \u2014 ${f.issue}
32306
+ Suggestion: ${f.suggestion}`).join(`
32307
+ `);
32308
+ }
32309
+ function normalizeSeverity2(sev) {
32310
+ if (sev === "warn")
32311
+ return "warning";
32312
+ if (sev === "critical" || sev === "error" || sev === "warning" || sev === "info" || sev === "low" || sev === "unverifiable")
32313
+ return sev;
32314
+ return "info";
32315
+ }
32316
+ function isBlockingSeverity2(sev, threshold = "error") {
32317
+ return (SEVERITY_RANK[sev] ?? 0) >= (SEVERITY_RANK[threshold] ?? 2);
32318
+ }
32319
+ function toAdversarialReviewFindings(findings) {
32320
+ return findings.map((f) => ({
32321
+ source: "adversarial-review",
32322
+ severity: normalizeSeverity2(f.severity),
32323
+ category: f.category,
32324
+ file: f.file,
32325
+ line: f.line,
32326
+ message: f.issue,
32327
+ suggestion: f.suggestion,
32328
+ fixTarget: f.category === "test-gap" ? "test" : undefined
32329
+ }));
32330
+ }
32331
+ var init_adversarial_helpers = __esm(() => {
32332
+ init_severity();
32333
+ });
32334
+
32155
32335
  // src/operations/adversarial-review.ts
32156
32336
  var FAIL_OPEN2, adversarialReviewHopBody = async (initialPrompt, ctx) => {
32157
32337
  const first = await ctx.send(initialPrompt);
32158
32338
  const isTruncated = looksLikeTruncatedJson(first.output);
32159
32339
  const parsed = tryParseLLMJson(first.output);
32160
- if (!isTruncated && parsed && parseLlmReviewShape(parsed))
32340
+ if (!isTruncated && parsed && validateAdversarialShape(parsed))
32161
32341
  return first;
32162
32342
  const retryPrompt = isTruncated ? ReviewPromptBuilder.jsonRetryCondensed({ blockingThreshold: ctx.input.blockingThreshold }) : ReviewPromptBuilder.jsonRetry();
32163
32343
  if (isTruncated) {
@@ -32177,6 +32357,7 @@ var init_adversarial_review = __esm(() => {
32177
32357
  init_config();
32178
32358
  init_logger2();
32179
32359
  init_prompts();
32360
+ init_adversarial_helpers();
32180
32361
  init_truncation();
32181
32362
  FAIL_OPEN2 = { passed: true, findings: [], failOpen: true };
32182
32363
  adversarialReviewOp = {
@@ -32194,12 +32375,11 @@ var init_adversarial_review = __esm(() => {
32194
32375
  diff: input.diff,
32195
32376
  storyGitRef: input.storyGitRef,
32196
32377
  stat: input.stat,
32197
- priorFailures: input.priorFailures,
32198
32378
  testInventory: input.testInventory,
32199
32379
  excludePatterns: input.excludePatterns,
32200
32380
  testGlobs: input.testGlobs,
32201
32381
  refExcludePatterns: input.refExcludePatterns,
32202
- priorAdversarialFindings: input.priorAdversarialFindings
32382
+ priorAdversarialIterations: input.priorAdversarialIterations
32203
32383
  });
32204
32384
  const content = input.featureCtxBlock ? `${input.featureCtxBlock}${base}` : base;
32205
32385
  return {
@@ -32209,9 +32389,9 @@ var init_adversarial_review = __esm(() => {
32209
32389
  },
32210
32390
  parse(output, _input, _ctx) {
32211
32391
  const raw = tryParseLLMJson(output);
32212
- const parsed = parseLlmReviewShape(raw);
32392
+ const parsed = validateAdversarialShape(raw);
32213
32393
  if (parsed)
32214
- return parsed;
32394
+ return { passed: parsed.passed, findings: parsed.findings };
32215
32395
  if (/"passed"\s*:\s*false/.test(output))
32216
32396
  return { passed: false, findings: [], looksLikeFail: true };
32217
32397
  return FAIL_OPEN2;
@@ -32225,6 +32405,55 @@ var init_rectify = __esm(() => {
32225
32405
  init_prompts();
32226
32406
  });
32227
32407
 
32408
+ // src/operations/autofix-implementer.ts
32409
+ var implementerRectifyOp;
32410
+ var init_autofix_implementer = __esm(() => {
32411
+ init_config();
32412
+ init_prompts();
32413
+ implementerRectifyOp = {
32414
+ kind: "run",
32415
+ name: "autofix-implementer",
32416
+ stage: "rectification",
32417
+ session: { role: "implementer", lifetime: "fresh" },
32418
+ config: autofixConfigSelector,
32419
+ build(input, _ctx) {
32420
+ const prompt = RectifierPromptBuilder.reviewRectification(input.failedChecks, input.story);
32421
+ return {
32422
+ role: { id: "role", content: "", overridable: false },
32423
+ task: { id: "task", content: prompt, overridable: false }
32424
+ };
32425
+ },
32426
+ parse(output, _input, _ctx) {
32427
+ const match = output.match(/^UNRESOLVED:\s*(.+)$/ms);
32428
+ return { applied: true, ...match ? { unresolvedReason: match[1]?.trim() } : {} };
32429
+ }
32430
+ };
32431
+ });
32432
+
32433
+ // src/operations/autofix-test-writer.ts
32434
+ var testWriterRectifyOp;
32435
+ var init_autofix_test_writer = __esm(() => {
32436
+ init_config();
32437
+ init_prompts();
32438
+ testWriterRectifyOp = {
32439
+ kind: "run",
32440
+ name: "autofix-test-writer",
32441
+ stage: "rectification",
32442
+ session: { role: "test-writer", lifetime: "fresh" },
32443
+ config: autofixConfigSelector,
32444
+ build(input, _ctx) {
32445
+ const prompt = RectifierPromptBuilder.testWriterRectification(input.failedChecks, input.story);
32446
+ return {
32447
+ role: { id: "role", content: "", overridable: false },
32448
+ task: { id: "task", content: prompt, overridable: false }
32449
+ };
32450
+ },
32451
+ parse(_output, _input, _ctx) {
32452
+ return { applied: true };
32453
+ }
32454
+ };
32455
+ });
32456
+
32228
32457
  // src/operations/debate-propose.ts
32229
32458
  var debateProposeOp;
32230
32459
  var init_debate_propose = __esm(() => {
@@ -32645,7 +32874,8 @@ async function runTddSessionOp(op, options, beforeRef, contextBundle, sessionBin
32645
32874
  break;
32646
32875
  }
32647
32876
  const interactionBridge = includeContext ? buildInteractionBridge(interactionChain, { featureName, storyId: story.id, stage: "execution" }) : undefined;
32648
- return runTddSession(role, agent, agentManager, story, config2, workdir, tier, beforeRef, includeContext ? contextMarkdown : undefined, lite, skipIsolation, constitution, featureName, interactionBridge, projectDir, includeContext ? featureContextMarkdown : undefined, contextBundle, sessionBinding, abortSignal);
32877
+ const verifierLimitedContext = role === "verifier";
32878
+ return runTddSession(role, agent, agentManager, story, config2, workdir, tier, beforeRef, includeContext ? contextMarkdown : undefined, lite, skipIsolation, verifierLimitedContext ? undefined : constitution, featureName, interactionBridge, projectDir, includeContext ? featureContextMarkdown : undefined, verifierLimitedContext ? undefined : contextBundle, sessionBinding, abortSignal);
32649
32879
  }
32650
32880
  var writeTddTestOp, implementTddOp, verifyTddOp;
32651
32881
  var init_session_op = __esm(() => {
@@ -32764,6 +32994,8 @@ var init_operations = __esm(() => {
32764
32994
  init_semantic_review();
32765
32995
  init_adversarial_review();
32766
32996
  init_rectify();
32997
+ init_autofix_implementer();
32998
+ init_autofix_test_writer();
32767
32999
  init_debate_propose();
32768
33000
  init_debate_rebut();
32769
33001
  init_write_test();
@@ -39538,6 +39770,393 @@ var init_runner2 = __esm(() => {
39538
39770
  init_logger2();
39539
39771
  });
39540
39772
 
39773
+ // src/findings/types.ts
39774
+ function findingKey(f) {
39775
+ return JSON.stringify([f.source, f.file ?? null, f.line ?? null, f.rule ?? null, f.message]);
39776
+ }
39777
+ var SEVERITY_ORDER;
39778
+ var init_types7 = __esm(() => {
39779
+ SEVERITY_ORDER = Object.freeze({
39780
+ critical: 5,
39781
+ error: 4,
39782
+ warning: 3,
39783
+ info: 2,
39784
+ low: 1,
39785
+ unverifiable: 0
39786
+ });
39787
+ });
39788
+
39789
+ // src/findings/path-utils.ts
39790
+ import { relative as relative9, resolve as resolve14 } from "path";
39791
+ function rebaseToWorkdir(rawPath, cwd, workdir) {
39792
+ if (rawPath.startsWith("/")) {
39793
+ return relative9(workdir, rawPath);
39794
+ }
39795
+ return relative9(workdir, resolve14(cwd, rawPath));
39796
+ }
39797
+ var init_path_utils = () => {};
39798
+
39799
+ // src/findings/adapters/lint.ts
39800
+ function lintDiagnosticToFinding(d, workdir, tool) {
39801
+ return {
39802
+ source: "lint",
39803
+ tool,
39804
+ severity: d.severity ?? "warning",
39805
+ category: "lint",
39806
+ rule: d.ruleId,
39807
+ file: rebaseToWorkdir(d.file, workdir, workdir),
39808
+ line: d.line,
39809
+ column: d.column,
39810
+ message: d.message
39811
+ };
39812
+ }
39813
+ var init_lint = __esm(() => {
39814
+ init_path_utils();
39815
+ });
39816
+
39817
+ // src/findings/adapters/plugin.ts
39818
+ function pluginToFinding(rf, _workdir) {
39819
+ return {
39820
+ source: "plugin",
39821
+ tool: rf.source ?? "plugin",
39822
+ severity: rf.severity,
39823
+ category: rf.category ?? "general",
39824
+ rule: rf.ruleId,
39825
+ file: rf.file,
39826
+ line: rf.line,
39827
+ column: rf.column,
39828
+ endLine: rf.endLine,
39829
+ endColumn: rf.endColumn,
39830
+ message: rf.message,
39831
+ meta: rf.url ? { url: rf.url } : undefined
39832
+ };
39833
+ }
39834
+
39835
+ // src/findings/adapters/semantic-review.ts
39836
+ function reviewFindingToFinding(f) {
39837
+ return {
39838
+ source: "semantic-review",
39839
+ severity: f.severity,
39840
+ category: f.category ?? "",
39841
+ rule: f.ruleId,
39842
+ file: f.file,
39843
+ line: f.line,
39844
+ column: f.column,
39845
+ endLine: f.endLine,
39846
+ endColumn: f.endColumn,
39847
+ message: f.message,
39848
+ fixTarget: "source"
39849
+ };
39850
+ }
39851
+
39852
+ // src/findings/adapters/test-runner.ts
39853
+ function extractExcerpt(output, acId) {
39854
+ const lines = output.split(`
39855
+ `);
39856
+ const idx = lines.findIndex((l) => l.toLowerCase().includes(acId.toLowerCase()));
39857
+ if (idx === -1)
39858
+ return `${acId} failed`;
39859
+ const end = Math.min(lines.length, idx + 5);
39860
+ return lines.slice(idx, end).join(`
39861
+ `).trim() || `${acId} failed`;
39862
+ }
39863
+ function acFailureToFinding(acId, output) {
39864
+ return {
39865
+ source: "test-runner",
39866
+ severity: "error",
39867
+ category: "assertion-failure",
39868
+ rule: acId,
39869
+ message: extractExcerpt(output, acId),
39870
+ fixTarget: "source"
39871
+ };
39872
+ }
39873
+ function acSentinelToFinding(sentinel, _output) {
39874
+ if (sentinel === "AC-HOOK") {
39875
+ return {
39876
+ source: "test-runner",
39877
+ severity: "error",
39878
+ category: "hook-failure",
39879
+ message: "beforeAll/afterAll hook timed out",
39880
+ fixTarget: "test"
39881
+ };
39882
+ }
39883
+ return {
39884
+ source: "test-runner",
39885
+ severity: "critical",
39886
+ category: "test-runner-error",
39887
+ message: "Test runner crashed before test bodies ran",
39888
+ fixTarget: "test"
39889
+ };
39890
+ }
39891
+
39892
+ // src/findings/adapters/typecheck.ts
39893
+ function tscDiagnosticToFinding(d, workdir) {
39894
+ return {
39895
+ source: "typecheck",
39896
+ tool: "tsc",
39897
+ severity: "error",
39898
+ category: "type-error",
39899
+ rule: d.code ? `TS${d.code}` : undefined,
39900
+ file: rebaseToWorkdir(d.file, workdir, workdir),
39901
+ line: d.line,
39902
+ column: d.column,
39903
+ message: d.message
39904
+ };
39905
+ }
39906
+ var init_typecheck = __esm(() => {
39907
+ init_path_utils();
39908
+ });
39909
+
39910
+ // src/findings/adapters/index.ts
39911
+ var init_adapters = __esm(() => {
39912
+ init_lint();
39913
+ init_typecheck();
39914
+ });
39915
+
39916
+ // src/findings/cycle.ts
39917
+ function classifySingleSource(before, after) {
39918
+ const beforeKeys = new Set(before.map(findingKey));
39919
+ const afterKeys = new Set(after.map(findingKey));
39920
+ if (afterKeys.size === 0 && beforeKeys.size === 0)
39921
+ return "resolved";
39922
+ if (afterKeys.size === 0)
39923
+ return "resolved";
39924
+ const hasNew = [...afterKeys].some((k) => !beforeKeys.has(k));
39925
+ const hasResolved = [...beforeKeys].some((k) => !afterKeys.has(k));
39926
+ if (hasNew && !hasResolved)
39927
+ return "regressed";
39928
+ if (!hasNew && !hasResolved)
39929
+ return "unchanged";
39930
+ if (hasNew && hasResolved)
39931
+ return "regressed";
39932
+ return "partial";
39933
+ }
39934
+ function classifyOutcome(before, after) {
39935
+ if (before.length === 0 && after.length === 0)
39936
+ return "resolved";
39937
+ if (before.length === 0)
39938
+ return "regressed";
39939
+ const beforeSources = new Set(before.map((f) => f.source));
39940
+ const afterSources = new Set(after.map((f) => f.source));
39941
+ const newSources = [...afterSources].filter((s) => !beforeSources.has(s));
39942
+ if (newSources.length > 0)
39943
+ return "regressed-different-source";
39944
+ const sources = [...beforeSources];
39945
+ const perSource = sources.map((source) => classifySingleSource(before.filter((f) => f.source === source), after.filter((f) => f.source === source)));
39946
+ if (perSource.every((o) => o === "resolved"))
39947
+ return "resolved";
39948
+ if (perSource.some((o) => o === "regressed"))
39949
+ return "regressed";
39950
+ if (perSource.every((o) => o === "unchanged"))
39951
+ return "unchanged";
39952
+ return "partial";
39953
+ }
39954
+ function selectActiveStrategies(strategies, findings, verdict) {
39955
+ if (findings.length > 0) {
39956
+ return strategies.filter((s) => findings.some((f) => s.appliesTo(f)));
39957
+ }
39958
+ if (verdict !== undefined) {
39959
+ return strategies.filter((s) => s.appliesToVerdict?.(verdict) ?? false);
39960
+ }
39961
+ return [];
39962
+ }
39963
+ function selectExecutionGroup(active) {
39964
+ const exclusive = active.find((s) => !s.coRun || s.coRun === "exclusive");
39965
+ if (exclusive)
39966
+ return [exclusive];
39967
+ return active.filter((s) => s.coRun === "co-run-sequential");
39968
+ }
39969
+ function countStrategyAttempts(iterations, strategyName) {
39970
+ return iterations.reduce((sum, iter) => sum + iter.fixesApplied.filter((fa) => fa.strategyName === strategyName).length, 0);
39971
+ }
39972
+ function countTotalAttempts(iterations) {
39973
+ return iterations.reduce((sum, iter) => sum + iter.fixesApplied.length, 0);
39974
+ }
39975
+ async function runFixCycle(cycle, ctx, cycleName, _deps = {}) {
39976
+ const logger = getSafeLogger();
39977
+ const doCallOp = _deps.callOp ?? _cycleDeps.callOp;
39978
+ const now = _deps.now ?? _cycleDeps.now;
39979
+ const storyId = ctx.storyId;
39980
+ const packageDir = ctx.packageDir;
39981
+ let totalCostUsd = 0;
39982
+ for (;; ) {
39983
+ if (cycle.findings.length === 0 && cycle.verdict === undefined) {
39984
+ return { iterations: cycle.iterations, finalFindings: [], exitReason: "resolved", costUsd: totalCostUsd };
39985
+ }
39986
+ const active = selectActiveStrategies(cycle.strategies, cycle.findings, cycle.verdict);
39987
+ if (active.length === 0) {
39988
+ logger?.info("findings.cycle", "cycle exited \u2014 no matching strategy", {
39989
+ storyId,
39990
+ packageDir,
39991
+ cycleName,
39992
+ reason: "no-strategy",
39993
+ findingsCount: cycle.findings.length
39994
+ });
39995
+ return {
39996
+ iterations: cycle.iterations,
39997
+ finalFindings: cycle.findings,
39998
+ exitReason: "no-strategy",
39999
+ costUsd: totalCostUsd
40000
+ };
40001
+ }
40002
+ for (const strategy of active) {
40003
+ const attempts = countStrategyAttempts(cycle.iterations, strategy.name);
40004
+ if (attempts >= strategy.maxAttempts) {
40005
+ logger?.info("findings.cycle", "cycle exited \u2014 strategy attempt cap reached", {
40006
+ storyId,
40007
+ packageDir,
40008
+ cycleName,
40009
+ reason: "max-attempts-per-strategy",
40010
+ exhaustedStrategy: strategy.name,
40011
+ attempts,
40012
+ maxAttempts: strategy.maxAttempts
40013
+ });
40014
+ return {
40015
+ iterations: cycle.iterations,
40016
+ finalFindings: cycle.findings,
40017
+ exitReason: "max-attempts-per-strategy",
40018
+ exhaustedStrategy: strategy.name,
40019
+ costUsd: totalCostUsd
40020
+ };
40021
+ }
40022
+ }
40023
+ const totalAttempts = countTotalAttempts(cycle.iterations);
40024
+ if (totalAttempts >= cycle.config.maxAttemptsTotal) {
40025
+ logger?.info("findings.cycle", "cycle exited \u2014 total attempt cap reached", {
40026
+ storyId,
40027
+ packageDir,
40028
+ cycleName,
40029
+ reason: "max-attempts-total",
40030
+ totalAttempts,
40031
+ maxAttemptsTotal: cycle.config.maxAttemptsTotal
40032
+ });
40033
+ return {
40034
+ iterations: cycle.iterations,
40035
+ finalFindings: cycle.findings,
40036
+ exitReason: "max-attempts-total",
40037
+ costUsd: totalCostUsd
40038
+ };
40039
+ }
40040
+ for (const strategy of active) {
40041
+ const bailReason = strategy.bailWhen?.(cycle.iterations) ?? null;
40042
+ if (bailReason !== null) {
40043
+ logger?.info("findings.cycle", "cycle exited \u2014 bail predicate fired", {
40044
+ storyId,
40045
+ packageDir,
40046
+ cycleName,
40047
+ reason: "bail-when",
40048
+ strategyName: strategy.name,
40049
+ bailDetail: bailReason
40050
+ });
40051
+ return {
40052
+ iterations: cycle.iterations,
40053
+ finalFindings: cycle.findings,
40054
+ exitReason: "bail-when",
40055
+ bailDetail: bailReason,
40056
+ costUsd: totalCostUsd
40057
+ };
40058
+ }
40059
+ }
40060
+ const group = selectExecutionGroup(active);
40061
+ const startedAt = now();
40062
+ const findingsBefore = [...cycle.findings];
40063
+ const fixesApplied = [];
40064
+ for (const strategy of group) {
40065
+ const relevantFindings = findingsBefore.filter((f) => strategy.appliesTo(f));
40066
+ const input = strategy.buildInput(relevantFindings, cycle.iterations, ctx);
40067
+ const output = await doCallOp(ctx, strategy.fixOp, input);
40068
+ const extracted = strategy.extractApplied?.(output, input) ?? {};
40069
+ fixesApplied.push({
40070
+ strategyName: strategy.name,
40071
+ op: strategy.fixOp.name,
40072
+ targetFiles: extracted.targetFiles ?? [],
40073
+ summary: extracted.summary ?? "",
40074
+ costUsd: extracted.costUsd
40075
+ });
40076
+ }
40077
+ let findingsAfter;
40078
+ let validatorAttempt = 0;
40079
+ for (;; ) {
40080
+ try {
40081
+ findingsAfter = await cycle.validate(ctx);
40082
+ break;
40083
+ } catch (err) {
40084
+ if (validatorAttempt >= cycle.config.validatorRetries) {
40085
+ logger?.error("findings.cycle", "cycle exited \u2014 validator error", {
40086
+ storyId,
40087
+ packageDir,
40088
+ cycleName,
40089
+ reason: "validator-error",
40090
+ error: errorMessage(err)
40091
+ });
40092
+ return {
40093
+ iterations: cycle.iterations,
40094
+ finalFindings: cycle.findings,
40095
+ exitReason: "validator-error",
40096
+ costUsd: totalCostUsd
40097
+ };
40098
+ }
40099
+ logger?.warn("findings.cycle", "validator retry", {
40100
+ storyId,
40101
+ packageDir,
40102
+ cycleName,
40103
+ attempt: validatorAttempt + 1,
40104
+ error: errorMessage(err)
40105
+ });
40106
+ validatorAttempt++;
40107
+ }
40108
+ }
40109
+ const outcome = classifyOutcome(findingsBefore, findingsAfter);
40110
+ const finishedAt = now();
40111
+ const iterationNum = cycle.iterations.length + 1;
40112
+ const iteration = {
40113
+ iterationNum,
40114
+ findingsBefore,
40115
+ fixesApplied,
40116
+ findingsAfter,
40117
+ outcome,
40118
+ startedAt,
40119
+ finishedAt
40120
+ };
40121
+ cycle.iterations.push(iteration);
40122
+ cycle.findings = findingsAfter;
40123
+ const iterationCostUsd = fixesApplied.reduce((sum, fa) => sum + (fa.costUsd ?? 0), 0);
40124
+ totalCostUsd += iterationCostUsd;
40125
+ logger?.info("findings.cycle", "iteration completed", {
40126
+ storyId,
40127
+ packageDir,
40128
+ cycleName,
40129
+ iterationNum,
40130
+ strategiesRan: fixesApplied.map((fa) => fa.strategyName),
40131
+ outcome,
40132
+ findingsBefore: findingsBefore.length,
40133
+ findingsAfter: findingsAfter.length,
40134
+ ...iterationCostUsd > 0 ? { costUsd: iterationCostUsd } : {}
40135
+ });
40136
+ if (outcome === "resolved") {
40137
+ return { iterations: cycle.iterations, finalFindings: [], exitReason: "resolved", costUsd: totalCostUsd };
40138
+ }
40139
+ }
40140
+ }
40141
+ var _cycleDeps;
40142
+ var init_cycle = __esm(() => {
40143
+ init_logger2();
40144
+ init_call();
40145
+ init_types7();
40146
+ _cycleDeps = {
40147
+ callOp,
40148
+ now: () => new Date().toISOString()
40149
+ };
40150
+ });
40151
+
40152
+ // src/findings/index.ts
40153
+ var init_findings = __esm(() => {
40154
+ init_types7();
40155
+ init_adapters();
40156
+ init_path_utils();
40157
+ init_cycle();
40158
+ });
40159
+
39541
40160
  // src/utils/log-test-output.ts
39542
40161
  function logTestOutput(logger, stage, output, opts = {}) {
39543
40162
  if (!logger || !output)
@@ -39719,6 +40338,7 @@ var _acceptanceStageDeps, parseTestFailures2, acceptanceStage;
39719
40338
  var init_acceptance2 = __esm(() => {
39720
40339
  init_generator();
39721
40340
  init_test_path();
40341
+ init_findings();
39722
40342
  init_logger2();
39723
40343
  init_prd();
39724
40344
  init_ac_parser();
@@ -39754,6 +40374,7 @@ var init_acceptance2 = __esm(() => {
39754
40374
  }
39755
40375
  ];
39756
40376
  const allFailedACs = [];
40377
+ const allFindings = [];
39757
40378
  const allOutputParts = [];
39758
40379
  let anyError = false;
39759
40380
  let errorExitCode = 0;
@@ -39806,11 +40427,13 @@ ${stderr}`;
39806
40427
  anyError = true;
39807
40428
  errorExitCode = exitCode;
39808
40429
  allFailedACs.push("AC-ERROR");
40430
+ allFindings.push(acSentinelToFinding("AC-ERROR", output));
39809
40431
  continue;
39810
40432
  }
39811
40433
  for (const acId of actualFailures) {
39812
40434
  if (!allFailedACs.includes(acId)) {
39813
40435
  allFailedACs.push(acId);
40436
+ allFindings.push(acId === "AC-HOOK" ? acSentinelToFinding("AC-HOOK", output) : acFailureToFinding(acId, output));
39814
40437
  }
39815
40438
  }
39816
40439
  if (actualFailures.length > 0) {
@@ -39861,6 +40484,7 @@ ${stderr}`;
39861
40484
  }
39862
40485
  ctx.acceptanceFailures = {
39863
40486
  failedACs: allFailedACs,
40487
+ findings: allFindings,
39864
40488
  testOutput: combinedOutput
39865
40489
  };
39866
40490
  if (anyError) {
@@ -40092,8 +40716,7 @@ ${stderr}` };
40092
40716
  criteriaList,
40093
40717
  frameworkOverrideLine,
40094
40718
  targetTestFilePath: testPath,
40095
- ..."implementationContext" in ctx && ctx.implementationContext ? { implementationContext: ctx.implementationContext } : {},
40096
- ..."previousFailure" in ctx && ctx.previousFailure ? { previousFailure: ctx.previousFailure } : {}
40719
+ ..."implementationContext" in ctx && ctx.implementationContext ? { implementationContext: ctx.implementationContext } : {}
40097
40720
  }, groupStoryId);
40098
40721
  const testCode = genResult.testCode;
40099
40722
  if (testCode) {
@@ -40361,10 +40984,10 @@ async function executeWithTimeout(command, timeoutSeconds, env2, options) {
40361
40984
  const timeoutMs = timeoutSeconds * 1000;
40362
40985
  let timedOut = false;
40363
40986
  const timer = { id: undefined };
40364
- const timeoutPromise = new Promise((resolve14) => {
40987
+ const timeoutPromise = new Promise((resolve15) => {
40365
40988
  timer.id = setTimeout(() => {
40366
40989
  timedOut = true;
40367
- resolve14();
40990
+ resolve15();
40368
40991
  }, timeoutMs);
40369
40992
  });
40370
40993
  const processPromise = proc.exited;
@@ -40378,8 +41001,8 @@ async function executeWithTimeout(command, timeoutSeconds, env2, options) {
40378
41001
  proc.exited.then(() => {
40379
41002
  exitedDuringGrace = true;
40380
41003
  }),
40381
- new Promise((resolve14) => {
40382
- setTimeout(resolve14, gracePeriodMs);
41004
+ new Promise((resolve15) => {
41005
+ setTimeout(resolve15, gracePeriodMs);
40383
41006
  })
40384
41007
  ]);
40385
41008
  if (!exitedDuringGrace) {
@@ -40557,7 +41180,7 @@ var init_runners = __esm(() => {
40557
41180
  });
40558
41181
 
40559
41182
  // src/verification/smart-runner.ts
40560
- import { join as join33, relative as relative9 } from "path";
41183
+ import { join as join33, relative as relative10 } from "path";
40561
41184
  function extractPatternSuffix(pattern) {
40562
41185
  const lastStar = pattern.lastIndexOf("*");
40563
41186
  if (lastStar === -1)
@@ -40665,7 +41288,7 @@ async function getChangedNonTestFiles(workdir, baseRef, packagePrefix, testFileR
40665
41288
  let effectivePrefix = packagePrefix;
40666
41289
  if (packagePrefix && repoRoot) {
40667
41290
  const gitRoot = await _gitUtilDeps.getGitRoot(workdir);
40668
- const extraPrefix2 = gitRoot && gitRoot !== repoRoot ? relative9(gitRoot, repoRoot) : "";
41291
+ const extraPrefix2 = gitRoot && gitRoot !== repoRoot ? relative10(gitRoot, repoRoot) : "";
40669
41292
  effectivePrefix = extraPrefix2 ? `${extraPrefix2}/${packagePrefix}` : packagePrefix;
40670
41293
  }
40671
41294
  const scopedRaw = effectivePrefix ? lines.filter((f) => f.startsWith(`${effectivePrefix}/`)) : lines;
@@ -40692,7 +41315,7 @@ async function getChangedTestFiles(workdir, repoRoot, baseRef, packagePrefix, te
40692
41315
  const packageDir = packagePrefix ? join33(repoRoot, packagePrefix) : undefined;
40693
41316
  const ignoreMatchers = naxIgnoreIndex?.getMatchers(packageDir) ?? await resolveNaxIgnorePatterns(repoRoot, packageDir);
40694
41317
  const gitRoot = await _gitUtilDeps.getGitRoot(workdir);
40695
- const extraPrefix = gitRoot && gitRoot !== repoRoot ? relative9(gitRoot, repoRoot) : "";
41318
+ const extraPrefix = gitRoot && gitRoot !== repoRoot ? relative10(gitRoot, repoRoot) : "";
40696
41319
  const effectivePrefix = packagePrefix ? extraPrefix ? `${extraPrefix}/${packagePrefix}` : packagePrefix : undefined;
40697
41320
  const scopedRaw = effectivePrefix ? lines.filter((f) => f.startsWith(`${effectivePrefix}/`)) : lines;
40698
41321
  const scoped = filterNaxInternalPaths(scopedRaw, ignoreMatchers);
@@ -40977,6 +41600,158 @@ var init_event_bus = __esm(() => {
40977
41600
  pipelineEventBus = new PipelineEventBus;
40978
41601
  });
40979
41602
 
41603
+ // src/pipeline/stages/autofix-cycle.ts
41604
+ import { join as join35 } from "path";
41605
+ function fixCallCtx(ctx) {
41606
+ const packageView = ctx.packageView ?? ctx.runtime.packages.repo();
41607
+ return {
41608
+ runtime: ctx.runtime,
41609
+ packageView,
41610
+ packageDir: ctx.workdir,
41611
+ storyId: ctx.story.id,
41612
+ featureName: ctx.prd.feature,
41613
+ agentName: ctx.agentManager.getDefault(),
41614
+ story: ctx.story
41615
+ };
41616
+ }
41617
+ function collectFailedChecks(ctx) {
41618
+ return (ctx.reviewResult?.checks ?? []).filter((c) => !c.success);
41619
+ }
41620
+ function collectCurrentFindings(ctx) {
41621
+ const checks3 = collectFailedChecks(ctx);
41622
+ if (checks3.length === 0)
41623
+ return [];
41624
+ return checks3.flatMap((c) => {
41625
+ if (c.findings?.length)
41626
+ return c.findings;
41627
+ return [
41628
+ {
41629
+ source: c.check === "adversarial" ? "adversarial-review" : c.check === "semantic" ? "semantic-review" : "lint",
41630
+ severity: "error",
41631
+ category: c.check,
41632
+ message: (c.output ?? c.check).slice(0, 200),
41633
+ fixTarget: "source"
41634
+ }
41635
+ ];
41636
+ });
41637
+ }
41638
+ function collectTestTargetedChecks(ctx) {
41639
+ return collectFailedChecks(ctx).filter((c) => c.findings?.some((f) => f.fixTarget === "test"));
41640
+ }
41641
+ function buildAutofixStrategies(ctx, maxAttempts) {
41642
+ const implementer = {
41643
+ name: "autofix-implementer",
41644
+ appliesTo: (f) => (f.fixTarget ?? "source") === "source",
41645
+ fixOp: implementerRectifyOp,
41646
+ maxAttempts,
41647
+ coRun: "co-run-sequential",
41648
+ buildInput: (_findings, _prior, _cycleCtx) => ({
41649
+ failedChecks: collectFailedChecks(ctx),
41650
+ story: ctx.story
41651
+ }),
41652
+ extractApplied: (output) => ({
41653
+ summary: output.unresolvedReason ?? ""
41654
+ })
41655
+ };
41656
+ const testWriter = {
41657
+ name: "autofix-test-writer",
41658
+ appliesTo: (f) => f.fixTarget === "test",
41659
+ fixOp: testWriterRectifyOp,
41660
+ maxAttempts: 1,
41661
+ coRun: "co-run-sequential",
41662
+ buildInput: (_findings, _prior, _cycleCtx) => ({
41663
+ failedChecks: collectTestTargetedChecks(ctx),
41664
+ story: ctx.story
41665
+ })
41666
+ };
41667
+ return [implementer, testWriter];
41668
+ }
41669
+ function findUnresolvedReason(result) {
41670
+ for (const iter of result.iterations) {
41671
+ for (const fa of iter.fixesApplied) {
41672
+ if (fa.strategyName === "autofix-implementer" && fa.summary) {
41673
+ return fa.summary;
41674
+ }
41675
+ }
41676
+ }
41677
+ return;
41678
+ }
41679
+ async function writeShadowReport(ctx, result, initialFindingsCount) {
41680
+ const logger = getLogger();
41681
+ const shadowDir = join35(ctx.workdir, ".nax", "cycle-shadow", ctx.story.id);
41682
+ const timestamp = new Date().toISOString().replace(/[:.]/g, "-");
41683
+ const report = {
41684
+ storyId: ctx.story.id,
41685
+ timestamp,
41686
+ initialFindingsCount,
41687
+ exitReason: result.exitReason,
41688
+ iterations: result.iterations.length,
41689
+ finalFindingsCount: result.finalFindings.length,
41690
+ ...result.exhaustedStrategy ? { exhaustedStrategy: result.exhaustedStrategy } : {}
41691
+ };
41692
+ try {
41693
+ const file3 = join35(shadowDir, `${timestamp}.json`);
41694
+ await Bun.write(file3, JSON.stringify(report, null, 2));
41695
+ } catch (err) {
41696
+ logger.debug("autofix-cycle", "Shadow report write failed (non-fatal)", {
41697
+ storyId: ctx.story.id,
41698
+ error: String(err)
41699
+ });
41700
+ }
41701
+ }
41702
+ async function runAgentRectificationV2(ctx, _lintFixCmd, _formatFixCmd, _effectiveWorkdir) {
41703
+ const logger = getLogger();
41704
+ const storyId = ctx.story.id;
41705
+ const cycleCtx = fixCallCtx(ctx);
41706
+ const initialFindings = collectCurrentFindings(ctx);
41707
+ const maxAttempts = ctx.config.quality.autofix?.maxAttempts ?? 3;
41708
+ const maxTotalAttempts = ctx.config.quality.autofix?.maxTotalAttempts ?? 12;
41709
+ logger.info("autofix-cycle", "Starting V2 fix cycle", {
41710
+ storyId,
41711
+ initialFindingsCount: initialFindings.length,
41712
+ maxAttempts,
41713
+ maxTotalAttempts
41714
+ });
41715
+ const cycle = {
41716
+ findings: initialFindings,
41717
+ iterations: [...ctx.autofixPriorIterations ?? []],
41718
+ strategies: buildAutofixStrategies(ctx, maxAttempts),
41719
+ config: {
41720
+ maxAttemptsTotal: maxTotalAttempts,
41721
+ validatorRetries: 1
41722
+ },
41723
+ async validate(_cycleCtx) {
41724
+ await _autofixDeps.recheckReview(ctx);
41725
+ return collectCurrentFindings(ctx);
41726
+ }
41727
+ };
41728
+ const result = await runFixCycle(cycle, cycleCtx, "autofix-v2");
41729
+ ctx.autofixPriorIterations = result.iterations;
41730
+ await writeShadowReport(ctx, result, initialFindings.length);
41731
+ const unresolvedReason = findUnresolvedReason(result);
41732
+ const succeeded = result.exitReason === "resolved" || result.finalFindings.length === 0;
41733
+ logger.info("autofix-cycle", "V2 fix cycle complete", {
41734
+ storyId,
41735
+ exitReason: result.exitReason,
41736
+ iterations: result.iterations.length,
41737
+ finalFindingsCount: result.finalFindings.length,
41738
+ succeeded,
41739
+ ...unresolvedReason ? { unresolvedReason } : {}
41740
+ });
41741
+ return { succeeded, cost: 0, ...unresolvedReason ? { unresolvedReason } : {} };
41742
+ }
41743
+ var init_autofix_cycle = __esm(() => {
41744
+ init_findings();
41745
+ init_logger2();
41746
+ init_operations();
41747
+ init_autofix();
41748
+ });
41749
+
41750
+ // src/pipeline/stages/autofix-agent.ts
41751
+ var init_autofix_agent = __esm(() => {
41752
+ init_autofix_cycle();
41753
+ });
41754
+
40980
41755
  // src/review/lint-parsing/strategies/biome-json.ts
40981
41756
  function asRecord(value) {
40982
41757
  return typeof value === "object" && value !== null ? value : null;
@@ -41223,12 +41998,24 @@ function strategiesFor(format) {
41223
41998
  return [];
41224
41999
  return [eslintJsonStrategy, biomeJsonStrategy, textBlockStrategy];
41225
42000
  }
41226
- function parseLintOutput(output, format = "auto") {
42001
+ function toolForFormat(format) {
42002
+ if (format === "biome-json")
42003
+ return "biome";
42004
+ if (format === "eslint-json")
42005
+ return "eslint";
42006
+ return "text";
42007
+ }
42008
+ function parseLintOutput(output, format = "auto", opts) {
41227
42009
  if (!output.trim())
41228
42010
  return null;
41229
42011
  for (const strategy of strategiesFor(format)) {
41230
42012
  const parsed = strategy.parse(output);
41231
42013
  if (parsed && parsed.diagnostics.length > 0) {
42014
+ if (opts) {
42015
+ const tool = toolForFormat(parsed.format);
42016
+ const findings = parsed.diagnostics.map((d) => lintDiagnosticToFinding(d, opts.workdir, tool));
42017
+ return { ...parsed, findings };
42018
+ }
41232
42019
  return parsed;
41233
42020
  }
41234
42021
  }
@@ -41242,6 +42029,7 @@ function formatDiagnosticsOutput(diagnostics) {
41242
42029
  `).trim() || null;
41243
42030
  }
41244
42031
  var init_parse3 = __esm(() => {
42032
+ init_findings();
41245
42033
  init_biome_json();
41246
42034
  init_eslint_json();
41247
42035
  init_text_block();
@@ -41438,12 +42226,16 @@ function strategiesFor2(format) {
41438
42226
  return [];
41439
42227
  return [tscStrategy, typecheckTextBlockStrategy];
41440
42228
  }
41441
- function parseTypecheckOutput(output, format = "auto") {
42229
+ function parseTypecheckOutput(output, format = "auto", opts) {
41442
42230
  if (!output.trim())
41443
42231
  return null;
41444
42232
  for (const strategy of strategiesFor2(format)) {
41445
42233
  const parsed = strategy.parse(output);
41446
42234
  if (parsed && parsed.diagnostics.length > 0) {
42235
+ if (opts) {
42236
+ const findings = parsed.diagnostics.map((d) => tscDiagnosticToFinding(d, opts.workdir));
42237
+ return { ...parsed, findings };
42238
+ }
41447
42239
  return parsed;
41448
42240
  }
41449
42241
  }
@@ -41457,6 +42249,7 @@ function formatTypecheckDiagnosticsOutput(diagnostics) {
41457
42249
  `).trim() || null;
41458
42250
  }
41459
42251
  var init_parse4 = __esm(() => {
42252
+ init_findings();
41460
42253
  init_text_block2();
41461
42254
  init_tsc();
41462
42255
  });
@@ -41493,49 +42286,84 @@ function splitByStructuredFindings(check2, testFilePatterns) {
41493
42286
  };
41494
42287
  return { testFindings: toCheck(testFs), sourceFindings: toCheck(sourceFs) };
41495
42288
  }
41496
- function splitByOutputParsing(check2, testFilePatterns, format = "auto") {
41497
- const parsed = parseLintOutput(check2.output, format);
42289
+ function deriveFixTarget(file3, testFilePatterns) {
42290
+ return file3 && isTestFile2(file3, testFilePatterns) ? "test" : "source";
42291
+ }
42292
+ function splitFindingsByFixTarget(findings, diagnostics, testFilePatterns) {
42293
+ const testDiagnostics = [];
42294
+ const sourceDiagnostics = [];
42295
+ for (let i = 0;i < findings.length; i++) {
42296
+ const diagnostic = diagnostics[i];
42297
+ if (!diagnostic) {
42298
+ throw new NaxError(`findings and diagnostics arrays are not co-produced: length mismatch at index ${i}`, "INVARIANT_VIOLATION", {
42299
+ stage: "autofix-scope-split",
42300
+ index: i,
42301
+ findingsCount: findings.length,
42302
+ diagnosticsCount: diagnostics.length
42303
+ });
42304
+ }
42305
+ const f = findings[i];
42306
+ const target = f.fixTarget ?? deriveFixTarget(f.file, testFilePatterns);
42307
+ (target === "test" ? testDiagnostics : sourceDiagnostics).push(diagnostic);
42308
+ }
42309
+ return { testDiagnostics, sourceDiagnostics };
42310
+ }
42311
+ function splitByOutputParsing(check2, testFilePatterns, format = "auto", opts) {
42312
+ const parsed = parseLintOutput(check2.output, format, opts);
41498
42313
  if (!parsed) {
41499
42314
  if (check2.output.trim()) {
41500
42315
  return { testFindings: null, sourceFindings: check2 };
41501
42316
  }
41502
42317
  return { testFindings: null, sourceFindings: null };
41503
42318
  }
41504
- const testDiagnostics = parsed.diagnostics.filter((d) => isTestFile2(d.file, testFilePatterns));
41505
- const sourceDiagnostics = parsed.diagnostics.filter((d) => !isTestFile2(d.file, testFilePatterns));
42319
+ let testDiagnostics;
42320
+ let sourceDiagnostics;
42321
+ if (parsed.findings) {
42322
+ ({ testDiagnostics, sourceDiagnostics } = splitFindingsByFixTarget(parsed.findings, parsed.diagnostics, testFilePatterns));
42323
+ } else {
42324
+ testDiagnostics = parsed.diagnostics.filter((d) => isTestFile2(d.file, testFilePatterns));
42325
+ sourceDiagnostics = parsed.diagnostics.filter((d) => !isTestFile2(d.file, testFilePatterns));
42326
+ }
41506
42327
  return {
41507
42328
  testFindings: buildScopedLintCheck(check2, testDiagnostics),
41508
42329
  sourceFindings: buildScopedLintCheck(check2, sourceDiagnostics)
41509
42330
  };
41510
42331
  }
41511
- function splitByTypecheckOutputParsing(check2, testFilePatterns, format = "auto") {
41512
- const parsed = parseTypecheckOutput(check2.output, format);
42332
+ function splitByTypecheckOutputParsing(check2, testFilePatterns, format = "auto", opts) {
42333
+ const parsed = parseTypecheckOutput(check2.output, format, opts);
41513
42334
  if (!parsed) {
41514
42335
  if (check2.output.trim()) {
41515
42336
  return { testFindings: null, sourceFindings: check2 };
41516
42337
  }
41517
42338
  return { testFindings: null, sourceFindings: null };
41518
42339
  }
41519
- const testDiagnostics = parsed.diagnostics.filter((d) => isTestFile2(d.file, testFilePatterns));
41520
- const sourceDiagnostics = parsed.diagnostics.filter((d) => !isTestFile2(d.file, testFilePatterns));
42340
+ let testDiagnostics;
42341
+ let sourceDiagnostics;
42342
+ if (parsed.findings) {
42343
+ ({ testDiagnostics, sourceDiagnostics } = splitFindingsByFixTarget(parsed.findings, parsed.diagnostics, testFilePatterns));
42344
+ } else {
42345
+ testDiagnostics = parsed.diagnostics.filter((d) => isTestFile2(d.file, testFilePatterns));
42346
+ sourceDiagnostics = parsed.diagnostics.filter((d) => !isTestFile2(d.file, testFilePatterns));
42347
+ }
41521
42348
  return {
41522
42349
  testFindings: buildScopedTypecheckCheck(check2, testDiagnostics),
41523
42350
  sourceFindings: buildScopedTypecheckCheck(check2, sourceDiagnostics)
41524
42351
  };
41525
42352
  }
41526
- function splitFindingsByScope(check2, testFilePatterns, lintOutputFormat = "auto", typecheckOutputFormat = "auto") {
42353
+ function splitFindingsByScope(check2, testFilePatterns, lintOutputFormat = "auto", typecheckOutputFormat = "auto", opts) {
41527
42354
  if (check2.check === "adversarial") {
41528
42355
  return splitByStructuredFindings(check2, testFilePatterns);
41529
42356
  }
41530
42357
  if (check2.check === "lint") {
41531
- return splitByOutputParsing(check2, testFilePatterns, lintOutputFormat);
42358
+ return splitByOutputParsing(check2, testFilePatterns, lintOutputFormat, opts);
41532
42359
  }
41533
42360
  if (check2.check === "typecheck") {
41534
- return splitByTypecheckOutputParsing(check2, testFilePatterns, typecheckOutputFormat);
42361
+ return splitByTypecheckOutputParsing(check2, testFilePatterns, typecheckOutputFormat, opts);
41535
42362
  }
41536
42363
  return { testFindings: null, sourceFindings: null };
41537
42364
  }
41538
42365
  var init_autofix_scope_split = __esm(() => {
42366
+ init_errors();
41539
42367
  init_lint_parsing();
41540
42368
  init_typecheck_parsing();
41541
42369
  init_test_runners();
@@ -41591,7 +42419,7 @@ async function runTestWriterRectification(ctx, testWriterChecks, story, agentMan
41591
42419
  return 0;
41592
42420
  }
41593
42421
  }
41594
- var init_autofix_test_writer = __esm(() => {
42422
+ var init_autofix_test_writer2 = __esm(() => {
41595
42423
  init_config();
41596
42424
  init_errors();
41597
42425
  init_logger2();
@@ -41600,25 +42428,28 @@ var init_autofix_test_writer = __esm(() => {
41600
42428
  });
41601
42429
 
41602
42430
  // src/review/dialogue.ts
42431
+ function findingId(f) {
42432
+ return f.rule ?? `${f.file ?? ""}:${f.line ?? 0}:${f.message.slice(0, 40)}`;
42433
+ }
41603
42434
  function extractDeltaSummary(rawOutput, previousFindings, newFindings) {
41604
42435
  const parsed = tryParseLLMJson(rawOutput);
41605
42436
  if (parsed && typeof parsed.deltaSummary === "string" && parsed.deltaSummary.length > 0) {
41606
42437
  return parsed.deltaSummary;
41607
42438
  }
41608
- const newIds = new Set(newFindings.map((f) => f.ruleId));
41609
- const prevIds = new Set(previousFindings.map((f) => f.ruleId));
41610
- const resolved = previousFindings.filter((f) => !newIds.has(f.ruleId));
41611
- const stillPresent = newFindings.filter((f) => prevIds.has(f.ruleId));
41612
- const added = newFindings.filter((f) => !prevIds.has(f.ruleId));
42439
+ const newIds = new Set(newFindings.map(findingId));
42440
+ const prevIds = new Set(previousFindings.map(findingId));
42441
+ const resolved = previousFindings.filter((f) => !newIds.has(findingId(f)));
42442
+ const stillPresent = newFindings.filter((f) => prevIds.has(findingId(f)));
42443
+ const added = newFindings.filter((f) => !prevIds.has(findingId(f)));
41613
42444
  const parts = [];
41614
42445
  if (resolved.length > 0) {
41615
- parts.push(`Resolved: ${resolved.map((f) => f.ruleId).join(", ")}.`);
42446
+ parts.push(`Resolved: ${resolved.map(findingId).join(", ")}.`);
41616
42447
  }
41617
42448
  if (stillPresent.length > 0) {
41618
- parts.push(`Still present: ${stillPresent.map((f) => f.ruleId).join(", ")}.`);
42449
+ parts.push(`Still present: ${stillPresent.map(findingId).join(", ")}.`);
41619
42450
  }
41620
42451
  if (added.length > 0) {
41621
- parts.push(`New findings: ${added.map((f) => f.ruleId).join(", ")}.`);
42452
+ parts.push(`New findings: ${added.map(findingId).join(", ")}.`);
41622
42453
  }
41623
42454
  if (parts.length === 0) {
41624
42455
  return previousFindings.length > 0 ? "All previous findings resolved." : "No changes from previous review.";
@@ -41641,24 +42472,21 @@ function compactHistory(history) {
41641
42472
  history.push(lastReviewer);
41642
42473
  return summary;
41643
42474
  }
41644
- function mapLLMFindingToReviewFinding(f) {
42475
+ function mapLLMFindingToFinding(f) {
41645
42476
  const rawSeverity = typeof f.severity === "string" ? f.severity : "info";
41646
42477
  let severity = "info";
41647
42478
  if (rawSeverity === "warn" || rawSeverity === "warning")
41648
42479
  severity = "warning";
41649
- else if (rawSeverity === "critical" || rawSeverity === "error" || rawSeverity === "low")
42480
+ else if (rawSeverity === "critical" || rawSeverity === "error" || rawSeverity === "low" || rawSeverity === "unverifiable")
41650
42481
  severity = rawSeverity;
41651
- else if (rawSeverity === "unverifiable")
41652
- severity = "info";
41653
- else if (rawSeverity === "info")
41654
- severity = "info";
41655
42482
  return {
41656
- ruleId: typeof f.ruleId === "string" && f.ruleId ? f.ruleId : "semantic",
42483
+ source: "semantic-review",
41657
42484
  severity,
42485
+ category: "",
42486
+ rule: typeof f.ruleId === "string" && f.ruleId ? f.ruleId : undefined,
41658
42487
  file: typeof f.file === "string" ? f.file : "",
41659
42488
  line: typeof f.line === "number" ? f.line : 0,
41660
- message: typeof f.message === "string" && f.message ? f.message : typeof f.issue === "string" ? f.issue : "",
41661
- source: typeof f.source === "string" ? f.source : "semantic-review"
42489
+ message: typeof f.message === "string" && f.message ? f.message : typeof f.issue === "string" ? f.issue : ""
41662
42490
  };
41663
42491
  }
41664
42492
  function parseReviewResponse(output) {
@@ -41679,7 +42507,7 @@ function parseReviewResponse(output) {
41679
42507
  }
41680
42508
  const success2 = Boolean(parsed.passed);
41681
42509
  const rawFindings = Array.isArray(parsed.findings) ? parsed.findings : [];
41682
- const findings = rawFindings.map((f) => mapLLMFindingToReviewFinding(f));
42510
+ const findings = rawFindings.map((f) => mapLLMFindingToFinding(f));
41683
42511
  const reasoningObj = parsed.findingReasoning && typeof parsed.findingReasoning === "object" ? parsed.findingReasoning : {};
41684
42512
  const findingReasoning = new Map(Object.entries(reasoningObj));
41685
42513
  return { checkResult: { success: success2, findings }, findingReasoning };
@@ -41898,52 +42726,6 @@ var init_dialogue = __esm(() => {
41898
42726
  init_prompts();
41899
42727
  });
41900
42728
 
41901
- // src/review/severity.ts
41902
- var SEVERITY_RANK;
41903
- var init_severity = __esm(() => {
41904
- SEVERITY_RANK = {
41905
- info: 0,
41906
- unverifiable: 0,
41907
- low: 1,
41908
- warning: 1,
41909
- error: 2,
41910
- critical: 3
41911
- };
41912
- });
41913
-
41914
- // src/review/adversarial-helpers.ts
41915
- function formatFindings(findings) {
41916
- return findings.map((f) => `[${f.severity}][${f.category}] ${f.file}:${f.line} \u2014 ${f.issue}
41917
- Suggestion: ${f.suggestion}`).join(`
41918
- `);
41919
- }
41920
- function normalizeSeverity(sev) {
41921
- if (sev === "warn")
41922
- return "warning";
41923
- if (sev === "unverifiable")
41924
- return "info";
41925
- if (sev === "critical" || sev === "error" || sev === "warning" || sev === "info" || sev === "low")
41926
- return sev;
41927
- return "info";
41928
- }
41929
- function isBlockingSeverity(sev, threshold = "error") {
41930
- return (SEVERITY_RANK[sev] ?? 0) >= (SEVERITY_RANK[threshold] ?? 2);
41931
- }
41932
- function toAdversarialReviewFindings(findings) {
41933
- return findings.map((f) => ({
41934
- ruleId: "adversarial",
41935
- severity: normalizeSeverity(f.severity),
41936
- file: f.file,
41937
- line: f.line,
41938
- message: f.issue,
41939
- source: "adversarial-review",
41940
- category: f.category
41941
- }));
41942
- }
41943
- var init_adversarial_helpers = __esm(() => {
41944
- init_severity();
41945
- });
41946
-
41947
42729
  // src/review/diff-utils.ts
41948
42730
  var {spawn: spawn3 } = globalThis.Bun;
41949
42731
  async function resolveNaxIgnorePathspecExcludes(workdir, options) {
@@ -42090,7 +42872,7 @@ var init_diff_utils = __esm(() => {
42090
42872
  });
42091
42873
 
42092
42874
  // src/review/adversarial.ts
42093
- import { relative as relative10, sep as sep3 } from "path";
42875
+ import { relative as relative11, sep as sep3 } from "path";
42094
42876
  function recordAdversarialAudit(opts) {
42095
42877
  opts.runtime?.reviewAuditor.recordDecision({
42096
42878
  reviewer: "adversarial",
@@ -42123,7 +42905,7 @@ async function runAdversarialReview(opts) {
42123
42905
  projectDir,
42124
42906
  naxIgnoreIndex,
42125
42907
  runtime,
42126
- priorAdversarialFindings
42908
+ priorAdversarialIterations
42127
42909
  } = opts;
42128
42910
  const startTime = Date.now();
42129
42911
  const logger = getSafeLogger();
@@ -42161,7 +42943,7 @@ async function runAdversarialReview(opts) {
42161
42943
  let testInventory;
42162
42944
  const effectiveConfig = naxConfig ?? reviewConfigSelector.select(DEFAULT_CONFIG);
42163
42945
  const packageDirRelative = projectDir && workdir !== projectDir ? (() => {
42164
- const rel = relative10(projectDir, workdir);
42946
+ const rel = relative11(projectDir, workdir);
42165
42947
  if (rel === ".." || rel.startsWith(`..${sep3}`))
42166
42948
  return;
42167
42949
  return rel && rel !== "." ? rel : undefined;
@@ -42248,7 +43030,7 @@ async function runAdversarialReview(opts) {
42248
43030
  excludePatterns: adversarialConfig.excludePatterns,
42249
43031
  testGlobs: resolvedTestPatterns.globs,
42250
43032
  featureCtxBlock,
42251
- priorAdversarialFindings,
43033
+ priorAdversarialIterations,
42252
43034
  blockingThreshold,
42253
43035
  refExcludePatterns: effectiveRefExcludePatterns
42254
43036
  });
@@ -42333,8 +43115,8 @@ async function runAdversarialReview(opts) {
42333
43115
  findings: opResult.findings
42334
43116
  };
42335
43117
  const threshold = blockingThreshold ?? "error";
42336
- const blockingFindings = parsed.findings.filter((f) => isBlockingSeverity(f.severity, threshold));
42337
- const advisoryFindings = parsed.findings.filter((f) => !isBlockingSeverity(f.severity, threshold));
43118
+ const blockingFindings = parsed.findings.filter((f) => isBlockingSeverity2(f.severity, threshold));
43119
+ const advisoryFindings = parsed.findings.filter((f) => !isBlockingSeverity2(f.severity, threshold));
42338
43120
  if (advisoryFindings.length > 0) {
42339
43121
  logger?.debug("review", `Adversarial review: ${advisoryFindings.length} advisory findings (below threshold '${threshold}')`, {
42340
43122
  storyId: story.id,
@@ -42382,7 +43164,7 @@ async function runAdversarialReview(opts) {
42382
43164
  exitCode: 1,
42383
43165
  output: `Adversarial review failed:
42384
43166
 
42385
- ${formatFindings(blockingFindings)}`,
43167
+ ${formatFindings2(blockingFindings)}`,
42386
43168
  durationMs: durationMs2,
42387
43169
  findings: toAdversarialReviewFindings(blockingFindings),
42388
43170
  advisoryFindings: advisoryFindings.length > 0 ? toAdversarialReviewFindings(advisoryFindings) : undefined,
@@ -42507,89 +43289,6 @@ var init_language_commands = __esm(() => {
42507
43289
  };
42508
43290
  });
42509
43291
 
42510
- // src/review/semantic-helpers.ts
42511
- function validateLLMShape(parsed) {
42512
- if (typeof parsed !== "object" || parsed === null)
42513
- return null;
42514
- const obj = parsed;
42515
- if (typeof obj.passed !== "boolean")
42516
- return null;
42517
- if (!Array.isArray(obj.findings))
42518
- return null;
42519
- return { passed: obj.passed, findings: obj.findings };
42520
- }
42521
- function parseLLMResponse(raw) {
42522
- try {
42523
- return validateLLMShape(tryParseLLMJson(raw));
42524
- } catch {
42525
- return null;
42526
- }
42527
- }
42528
- function formatFindings2(findings) {
42529
- return findings.map((f) => `[${f.severity}] ${f.file}:${f.line} \u2014 ${f.issue}
42530
- Suggestion: ${f.suggestion}`).join(`
42531
- `);
42532
- }
42533
- function normalizeSeverity2(sev) {
42534
- if (sev === "warn")
42535
- return "warning";
42536
- if (sev === "unverifiable")
42537
- return "info";
42538
- if (sev === "critical" || sev === "error" || sev === "warning" || sev === "info" || sev === "low")
42539
- return sev;
42540
- return "info";
42541
- }
42542
- function isBlockingSeverity2(sev, threshold = "error") {
42543
- return (SEVERITY_RANK[sev] ?? 0) >= (SEVERITY_RANK[threshold] ?? 2);
42544
- }
42545
- function sanitizeRefModeFindings(findings, diffMode) {
42546
- if (diffMode !== "ref")
42547
- return findings;
42548
- return findings.map((finding) => needsDowngradeForMissingEvidence(finding) ? downgradeToUnverifiable(finding) : finding);
42549
- }
42550
- function needsDowngradeForMissingEvidence(finding) {
42551
- if ((SEVERITY_RANK[finding.severity] ?? 0) < SEVERITY_RANK.error)
42552
- return false;
42553
- return mentionsUnverifiedSource(finding) || !hasVerifiedEvidence(finding);
42554
- }
42555
- function mentionsUnverifiedSource(finding) {
42556
- const text = `${finding.issue} ${finding.suggestion}`.toLowerCase();
42557
- return UNVERIFIED_FINDING_PATTERNS.some((pattern) => text.includes(pattern));
42558
- }
42559
- function hasVerifiedEvidence(finding) {
42560
- const evidence = finding.verifiedBy;
42561
- return !!evidence?.file?.trim() && !!evidence.observed?.trim();
42562
- }
42563
- function downgradeToUnverifiable(finding) {
42564
- return {
42565
- ...finding,
42566
- severity: "unverifiable"
42567
- };
42568
- }
42569
- function toReviewFindings(findings) {
42570
- return findings.map((f) => ({
42571
- ruleId: "semantic",
42572
- severity: normalizeSeverity2(f.severity),
42573
- file: f.file,
42574
- line: f.line,
42575
- message: f.issue,
42576
- source: "semantic-review"
42577
- }));
42578
- }
42579
- var UNVERIFIED_FINDING_PATTERNS;
42580
- var init_semantic_helpers = __esm(() => {
42581
- init_severity();
42582
- UNVERIFIED_FINDING_PATTERNS = [
42583
- "cannot verify",
42584
- "can't verify",
42585
- "from diff alone",
42586
- "missing from diff",
42587
- "not found in diff",
42588
- "not present in diff",
42589
- "does not appear in diff"
42590
- ];
42591
- });
42592
-
42593
43292
  // src/review/semantic-debate.ts
42594
43293
  function recordSemanticDebateAudit(opts) {
42595
43294
  opts.runtime.reviewAuditor.recordDecision({
@@ -42698,7 +43397,7 @@ async function runSemanticDebate(opts) {
42698
43397
  exitCode: 1,
42699
43398
  output: `Semantic review failed:
42700
43399
 
42701
- ${findings.map((f) => `${f.ruleId}: ${f.message}`).join(`
43400
+ ${findings.map((f) => `${f.rule ?? "semantic"}: ${f.message}`).join(`
42702
43401
  `)}`,
42703
43402
  durationMs: durationMs2,
42704
43403
  findings,
@@ -42751,8 +43450,8 @@ ${findings.map((f) => `${f.ruleId}: ${f.message}`).join(`
42751
43450
  }
42752
43451
  const debateFindings = sanitizeRefModeFindings(deduped, diffMode);
42753
43452
  const debateThreshold = blockingThreshold ?? "error";
42754
- const debateBlocking = debateFindings.filter((f) => isBlockingSeverity2(f.severity, debateThreshold));
42755
- const debateAdvisory = debateFindings.filter((f) => !isBlockingSeverity2(f.severity, debateThreshold));
43453
+ const debateBlocking = debateFindings.filter((f) => isBlockingSeverity(f.severity, debateThreshold));
43454
+ const debateAdvisory = debateFindings.filter((f) => !isBlockingSeverity(f.severity, debateThreshold));
42756
43455
  const durationMs = Date.now() - startTime;
42757
43456
  if (!resolverPassed) {
42758
43457
  if (debateBlocking.length > 0) {
@@ -42778,7 +43477,7 @@ ${findings.map((f) => `${f.ruleId}: ${f.message}`).join(`
42778
43477
  exitCode: 1,
42779
43478
  output: `Semantic review failed:
42780
43479
 
42781
- ${formatFindings2(debateBlocking)}`,
43480
+ ${formatFindings(debateBlocking)}`,
42782
43481
  durationMs,
42783
43482
  findings: toReviewFindings(debateBlocking),
42784
43483
  advisoryFindings: debateAdvisory.length > 0 ? toReviewFindings(debateAdvisory) : undefined,
@@ -42840,6 +43539,7 @@ var init_semantic_debate = __esm(() => {
42840
43539
  });
42841
43540
 
42842
43541
  // src/review/semantic-evidence.ts
43542
+ import { isAbsolute as isAbsolute10 } from "path";
42843
43543
  async function substantiateSemanticEvidence(findings, diffMode, workdir, storyId) {
42844
43544
  if (diffMode !== "ref")
42845
43545
  return findings;
@@ -42853,7 +43553,9 @@ async function substantiateFinding(finding, workdir, storyId) {
42853
43553
  return finding;
42854
43554
  const file3 = finding.verifiedBy?.file?.trim() || finding.file;
42855
43555
  const contents = await readSafeFile(workdir, file3);
42856
- if (contents !== null && normalizedIncludes(contents, observed))
43556
+ if (contents === null)
43557
+ return finding;
43558
+ if (normalizedIncludes(contents, observed))
42857
43559
  return finding;
42858
43560
  _evidenceDeps.getLogger()?.warn("review", "Downgraded unsubstantiated semantic error finding", {
42859
43561
  storyId,
@@ -42867,13 +43569,21 @@ async function substantiateFinding(finding, workdir, storyId) {
42867
43569
  }
42868
43570
  async function readSafeFile(workdir, file3) {
42869
43571
  const validated = validateModulePath(file3, [workdir]);
42870
- if (!validated.valid || !validated.absolutePath)
42871
- return null;
42872
- try {
42873
- return await Bun.file(validated.absolutePath).text();
42874
- } catch {
42875
- return null;
43572
+ if (validated.valid && validated.absolutePath) {
43573
+ try {
43574
+ return await Bun.file(validated.absolutePath).text();
43575
+ } catch {
43576
+ return null;
43577
+ }
43578
+ }
43579
+ if (isAbsolute10(file3)) {
43580
+ try {
43581
+ return await Bun.file(file3).text();
43582
+ } catch {
43583
+ return null;
43584
+ }
42876
43585
  }
43586
+ return null;
42877
43587
  }
42878
43588
  function normalizedIncludes(contents, observed) {
42879
43589
  const normalizedObserved = normalizeEvidenceText(observed);
@@ -42902,7 +43612,7 @@ var init_semantic_evidence = __esm(() => {
42902
43612
  });
42903
43613
 
42904
43614
  // src/review/semantic.ts
42905
- import { relative as relative11, sep as sep4 } from "path";
43615
+ import { relative as relative12, sep as sep4 } from "path";
42906
43616
  function recordSemanticAudit(opts) {
42907
43617
  opts.runtime?.reviewAuditor.recordDecision({
42908
43618
  reviewer: "semantic",
@@ -42929,7 +43639,7 @@ async function runSemanticReview(opts) {
42929
43639
  naxConfig,
42930
43640
  featureName,
42931
43641
  resolverSession,
42932
- priorFailures,
43642
+ priorSemanticIterations,
42933
43643
  blockingThreshold,
42934
43644
  featureContextMarkdown,
42935
43645
  contextBundle,
@@ -42966,7 +43676,7 @@ async function runSemanticReview(opts) {
42966
43676
  const packageDir = workdir !== repoRoot ? workdir : undefined;
42967
43677
  const stat = await collectDiffStat(workdir, effectiveRef, { naxIgnoreIndex, packageDir });
42968
43678
  const packageDirRelative = projectDir && workdir !== projectDir ? (() => {
42969
- const rel = relative11(projectDir, workdir);
43679
+ const rel = relative12(projectDir, workdir);
42970
43680
  if (rel === ".." || rel.startsWith(`..${sep4}`))
42971
43681
  return;
42972
43682
  return rel && rel !== "." ? rel : undefined;
@@ -43037,7 +43747,7 @@ async function runSemanticReview(opts) {
43037
43747
  diff,
43038
43748
  storyGitRef: effectiveRef,
43039
43749
  stat,
43040
- priorFailures,
43750
+ priorSemanticIterations,
43041
43751
  excludePatterns: semanticConfig.excludePatterns
43042
43752
  });
43043
43753
  const prompt = featureCtxBlock ? `${featureCtxBlock}${basePrompt}` : basePrompt;
@@ -43094,7 +43804,7 @@ async function runSemanticReview(opts) {
43094
43804
  diff,
43095
43805
  storyGitRef: effectiveRef,
43096
43806
  stat,
43097
- priorFailures,
43807
+ priorSemanticIterations,
43098
43808
  excludePatterns,
43099
43809
  featureCtxBlock,
43100
43810
  blockingThreshold
@@ -43179,8 +43889,8 @@ async function runSemanticReview(opts) {
43179
43889
  const sanitizedFindings = await substantiateSemanticEvidence(sanitizeRefModeFindings(parsed.findings, diffMode), diffMode, workdir, story.id);
43180
43890
  const sanitizedParsed = { ...parsed, findings: sanitizedFindings };
43181
43891
  const threshold = blockingThreshold ?? "error";
43182
- const blockingFindings = sanitizedParsed.findings.filter((f) => isBlockingSeverity2(f.severity, threshold));
43183
- const advisoryFindings = sanitizedParsed.findings.filter((f) => !isBlockingSeverity2(f.severity, threshold));
43892
+ const blockingFindings = sanitizedParsed.findings.filter((f) => isBlockingSeverity(f.severity, threshold));
43893
+ const advisoryFindings = sanitizedParsed.findings.filter((f) => !isBlockingSeverity(f.severity, threshold));
43184
43894
  if (advisoryFindings.length > 0) {
43185
43895
  logger?.debug("review", `Semantic review: ${advisoryFindings.length} advisory findings (below threshold '${threshold}')`, {
43186
43896
  storyId: story.id,
@@ -43205,7 +43915,7 @@ async function runSemanticReview(opts) {
43205
43915
  });
43206
43916
  const output = `Semantic review failed:
43207
43917
 
43208
- ${formatFindings2(blockingFindings)}`;
43918
+ ${formatFindings(blockingFindings)}`;
43209
43919
  recordSemanticAudit({
43210
43920
  runtime,
43211
43921
  workdir,
@@ -43409,13 +44119,14 @@ async function runReview(opts) {
43409
44119
  featureName,
43410
44120
  resolverSession,
43411
44121
  priorFailures,
44122
+ priorSemanticIterations,
43412
44123
  featureContextMarkdown,
43413
44124
  contextBundles,
43414
44125
  projectDir,
43415
44126
  env: env2,
43416
44127
  naxIgnoreIndex,
43417
44128
  runtime,
43418
- priorAdversarialFindings
44129
+ priorAdversarialIterations
43419
44130
  } = opts;
43420
44131
  const startTime = Date.now();
43421
44132
  const logger = getSafeLogger();
@@ -43489,7 +44200,7 @@ Stage and commit these files before running review.`
43489
44200
  naxConfig,
43490
44201
  featureName,
43491
44202
  resolverSession,
43492
- priorFailures,
44203
+ priorSemanticIterations,
43493
44204
  blockingThreshold: config2.blockingThreshold,
43494
44205
  featureContextMarkdown,
43495
44206
  contextBundle: contextBundles?.semantic,
@@ -43537,7 +44248,7 @@ Stage and commit these files before running review.`
43537
44248
  projectDir,
43538
44249
  naxIgnoreIndex,
43539
44250
  runtime,
43540
- priorAdversarialFindings
44251
+ priorAdversarialIterations
43541
44252
  });
43542
44253
  checks3.push(result2);
43543
44254
  if (!result2.success && !firstFailure) {
@@ -43595,16 +44306,16 @@ var init_runner4 = __esm(() => {
43595
44306
 
43596
44307
  // src/review/verdict-writer.ts
43597
44308
  import { mkdir as mkdir6 } from "fs/promises";
43598
- import { join as join35 } from "path";
44309
+ import { join as join36 } from "path";
43599
44310
  async function writeReviewVerdict(entry) {
43600
44311
  const logger = getSafeLogger();
43601
44312
  try {
43602
- const projectDir = await _verdictWriterDeps.findNaxProjectRoot(entry.featureName ? join35(entry.featureName) : ".");
44313
+ const projectDir = await _verdictWriterDeps.findNaxProjectRoot(entry.featureName ? join36(entry.featureName) : ".");
43603
44314
  const baseDir = projectDir ?? ".";
43604
- const verdictDir = entry.featureName ? join35(baseDir, ".nax", "review-verdicts", entry.featureName) : join35(baseDir, ".nax", "review-verdicts", "_unknown");
44315
+ const verdictDir = entry.featureName ? join36(baseDir, ".nax", "review-verdicts", entry.featureName) : join36(baseDir, ".nax", "review-verdicts", "_unknown");
43605
44316
  await _verdictWriterDeps.mkdir(verdictDir, { recursive: true });
43606
44317
  const fileName = `${entry.storyId}.json`;
43607
- const filePath = join35(verdictDir, fileName);
44318
+ const filePath = join36(verdictDir, fileName);
43608
44319
  await _verdictWriterDeps.writeFile(filePath, JSON.stringify(entry, null, 2));
43609
44320
  logger?.debug("review", "Review verdict written", {
43610
44321
  storyId: entry.storyId,
@@ -43629,7 +44340,7 @@ var init_verdict_writer = __esm(() => {
43629
44340
  });
43630
44341
 
43631
44342
  // src/review/orchestrator.ts
43632
- import { join as join36 } from "path";
44343
+ import { join as join37 } from "path";
43633
44344
  var {spawn: spawn4 } = globalThis.Bun;
43634
44345
  async function getChangedFiles2(workdir, baseRef) {
43635
44346
  try {
@@ -43704,13 +44415,14 @@ class ReviewOrchestrator {
43704
44415
  featureName,
43705
44416
  resolverSession,
43706
44417
  priorFailures,
44418
+ priorSemanticIterations,
43707
44419
  featureContextMarkdown,
43708
44420
  contextBundles,
43709
44421
  projectDir,
43710
44422
  env: env2,
43711
44423
  naxIgnoreIndex,
43712
44424
  runtime,
43713
- priorAdversarialFindings
44425
+ priorAdversarialIterations
43714
44426
  } = opts;
43715
44427
  const logger = getSafeLogger();
43716
44428
  const hasSemantic = reviewConfig.checks.includes("semantic");
@@ -43755,12 +44467,14 @@ class ReviewOrchestrator {
43755
44467
  featureName,
43756
44468
  resolverSession,
43757
44469
  priorFailures,
44470
+ priorSemanticIterations,
43758
44471
  featureContextMarkdown,
43759
44472
  contextBundles,
43760
44473
  projectDir,
43761
44474
  env: env2,
43762
44475
  naxIgnoreIndex,
43763
- runtime
44476
+ runtime,
44477
+ priorAdversarialIterations
43764
44478
  });
43765
44479
  } else {
43766
44480
  const mechanicalCheckNames = ORDERED_MECHANICAL_REVIEW_CHECKS.filter((check2) => reviewConfig.checks.includes(check2));
@@ -43834,7 +44548,7 @@ class ReviewOrchestrator {
43834
44548
  naxConfig,
43835
44549
  featureName,
43836
44550
  resolverSession,
43837
- priorFailures,
44551
+ priorSemanticIterations,
43838
44552
  blockingThreshold: reviewConfig.blockingThreshold,
43839
44553
  featureContextMarkdown,
43840
44554
  contextBundle: contextBundles?.semantic,
@@ -43857,7 +44571,7 @@ class ReviewOrchestrator {
43857
44571
  projectDir,
43858
44572
  naxIgnoreIndex,
43859
44573
  runtime,
43860
- priorAdversarialFindings
44574
+ priorAdversarialIterations
43861
44575
  })
43862
44576
  ]);
43863
44577
  llmCheckResults = [semResult, advResult];
@@ -43877,13 +44591,14 @@ class ReviewOrchestrator {
43877
44591
  featureName,
43878
44592
  resolverSession,
43879
44593
  priorFailures,
44594
+ priorSemanticIterations,
43880
44595
  featureContextMarkdown,
43881
44596
  contextBundles,
43882
44597
  projectDir,
43883
44598
  env: env2,
43884
44599
  naxIgnoreIndex,
43885
44600
  runtime,
43886
- priorAdversarialFindings
44601
+ priorAdversarialIterations
43887
44602
  });
43888
44603
  llmCheckResults = llmResult.checks;
43889
44604
  }
@@ -43948,7 +44663,7 @@ class ReviewOrchestrator {
43948
44663
  const baseRef = storyGitRef ?? executionConfig?.storyGitRef;
43949
44664
  const changedFiles = await getChangedFiles2(workdir, baseRef);
43950
44665
  const repoRoot = projectDir ?? workdir;
43951
- const packageDir = scopePrefix ? join36(repoRoot, scopePrefix) : undefined;
44666
+ const packageDir = scopePrefix ? join37(repoRoot, scopePrefix) : undefined;
43952
44667
  const ignoreMatchers = naxIgnoreIndex?.getMatchers(packageDir) ?? await resolveNaxIgnorePatterns(repoRoot, packageDir);
43953
44668
  const visibleChangedFiles = filterNaxInternalPaths(changedFiles, ignoreMatchers);
43954
44669
  const scopedFiles = scopePrefix ? visibleChangedFiles.filter((f) => f === scopePrefix || f.startsWith(`${scopePrefix}/`)) : visibleChangedFiles;
@@ -43970,7 +44685,7 @@ class ReviewOrchestrator {
43970
44685
  passed: result.passed,
43971
44686
  output: result.output,
43972
44687
  exitCode: result.exitCode,
43973
- findings: result.findings
44688
+ findings: result.findings?.map((rf) => pluginToFinding(rf, workdir))
43974
44689
  });
43975
44690
  if (!result.passed) {
43976
44691
  builtIn.pluginReviewers = pluginResults;
@@ -44031,32 +44746,60 @@ class ReviewOrchestrator {
44031
44746
  featureName: ctx.prd.feature,
44032
44747
  resolverSession,
44033
44748
  priorFailures: ctx.story.priorFailures,
44749
+ priorSemanticIterations: ctx.priorSemanticIterations,
44034
44750
  featureContextMarkdown: ctx.featureContextMarkdown,
44035
44751
  contextBundles,
44036
44752
  projectDir: ctx.projectDir,
44037
44753
  env: ctx.worktreeDependencyContext?.env,
44038
44754
  naxIgnoreIndex: ctx.naxIgnoreIndex,
44039
44755
  runtime: ctx.runtime,
44040
- priorAdversarialFindings: ctx.priorAdversarialFindings
44756
+ priorAdversarialIterations: ctx.priorAdversarialIterations
44041
44757
  });
44042
44758
  const advCheck = result.builtIn.checks?.find((c) => c.check === "adversarial");
44043
44759
  if (advCheck) {
44044
- if (!advCheck.success && (advCheck.findings?.length ?? 0) > 0) {
44045
- ctx.priorAdversarialFindings = {
44046
- round: (ctx.priorAdversarialFindings?.round ?? 0) + 1,
44047
- findings: (advCheck.findings ?? []).map((f) => ({
44048
- severity: f.severity,
44049
- category: f.category,
44050
- file: f.file,
44051
- line: f.line,
44052
- issue: f.message
44053
- }))
44760
+ if (!advCheck.success && !advCheck.skipped) {
44761
+ const prior = ctx.priorAdversarialIterations ?? [];
44762
+ const findingsBefore = prior.length > 0 ? prior[prior.length - 1].findingsAfter ?? [] : [];
44763
+ const findingsAfter = advCheck.findings ?? [];
44764
+ const now = new Date().toISOString();
44765
+ const newIteration = {
44766
+ iterationNum: prior.length + 1,
44767
+ findingsBefore,
44768
+ fixesApplied: [],
44769
+ findingsAfter,
44770
+ outcome: classifyOutcome(findingsBefore, findingsAfter),
44771
+ startedAt: now,
44772
+ finishedAt: now
44054
44773
  };
44774
+ ctx.priorAdversarialIterations = [...prior, newIteration];
44055
44775
  } else if (advCheck.success && !advCheck.skipped) {
44056
- ctx.priorAdversarialFindings = undefined;
44776
+ ctx.priorAdversarialIterations = undefined;
44057
44777
  }
44058
44778
  } else if (retrySkipChecks?.has("adversarial")) {
44059
- ctx.priorAdversarialFindings = undefined;
44779
+ ctx.priorAdversarialIterations = undefined;
44780
+ }
44781
+ const semCheck = result.builtIn.checks?.find((c) => c.check === "semantic");
44782
+ if (semCheck) {
44783
+ if (!semCheck.success && !semCheck.skipped) {
44784
+ const prior = ctx.priorSemanticIterations ?? [];
44785
+ const findingsBefore = prior.length > 0 ? prior[prior.length - 1].findingsAfter ?? [] : [];
44786
+ const findingsAfter = semCheck.findings ?? [];
44787
+ const now = new Date().toISOString();
44788
+ const newIteration = {
44789
+ iterationNum: prior.length + 1,
44790
+ findingsBefore,
44791
+ fixesApplied: [],
44792
+ findingsAfter,
44793
+ outcome: classifyOutcome(findingsBefore, findingsAfter),
44794
+ startedAt: now,
44795
+ finishedAt: now
44796
+ };
44797
+ ctx.priorSemanticIterations = [...prior, newIteration];
44798
+ } else if (semCheck.success && !semCheck.skipped) {
44799
+ ctx.priorSemanticIterations = undefined;
44800
+ }
44801
+ } else if (retrySkipChecks?.has("semantic")) {
44802
+ ctx.priorSemanticIterations = undefined;
44060
44803
  }
44061
44804
  return result;
44062
44805
  }
@@ -44064,6 +44807,7 @@ class ReviewOrchestrator {
44064
44807
  var _orchestratorDeps2, reviewOrchestrator;
44065
44808
  var init_orchestrator2 = __esm(() => {
44066
44809
  init_engine();
44810
+ init_findings();
44067
44811
  init_logger2();
44068
44812
  init_path_filters();
44069
44813
  init_adversarial();
@@ -44201,11 +44945,9 @@ var init_review = __esm(() => {
44201
44945
  return { action: "continue", cost: reviewCost };
44202
44946
  }
44203
44947
  if (!result.success) {
44204
- const pluginFindings = result.builtIn.pluginReviewers?.flatMap((pr) => pr.findings ?? []) ?? [];
44205
44948
  const semanticFindings = (result.builtIn.checks ?? []).filter((c) => c.check === "semantic" && !c.success && c.findings?.length).flatMap((c) => c.findings ?? []);
44206
- const allFindings = [...pluginFindings, ...semanticFindings];
44207
- if (allFindings.length > 0) {
44208
- ctx.reviewFindings = allFindings;
44949
+ if (semanticFindings.length > 0) {
44950
+ ctx.reviewFindings = semanticFindings;
44209
44951
  }
44210
44952
  if (result.pluginFailed) {
44211
44953
  if (ctx.interaction && isTriggerEnabled("security-review", ctx.config)) {
@@ -44238,481 +44980,6 @@ var init_review = __esm(() => {
44238
44980
  createReviewerSession
44239
44981
  };
44240
44982
  });
44241
- // src/review/index.ts
44242
- var init_review2 = __esm(() => {
44243
- init_adversarial();
44244
- init_categorization();
44245
- init_diff_utils();
44246
- init_runner4();
44247
- });
44248
-
44249
- // src/verification/shared-rectification-loop.ts
44250
- function buildProgressivePromptPreamble(opts) {
44251
- const rethinkAt = Math.min(opts.rethinkAtAttempt ?? 2, opts.maxAttempts);
44252
- const urgencyAt = Math.min(opts.urgencyAtAttempt ?? 3, opts.maxAttempts);
44253
- const shouldRethink = opts.attempt >= rethinkAt;
44254
- const shouldUrgency = opts.attempt >= urgencyAt;
44255
- if (!shouldRethink && !shouldUrgency) {
44256
- return "";
44257
- }
44258
- if (shouldUrgency) {
44259
- opts.logger?.info(opts.stage, "Progressive prompt escalation: urgency + rethink injected", {
44260
- attempt: opts.attempt,
44261
- rethinkAtAttempt: rethinkAt,
44262
- urgencyAtAttempt: urgencyAt,
44263
- maxAttempts: opts.maxAttempts
44264
- });
44265
- } else {
44266
- opts.logger?.info(opts.stage, "Progressive prompt escalation: rethink injected", {
44267
- attempt: opts.attempt,
44268
- rethinkAtAttempt: rethinkAt,
44269
- maxAttempts: opts.maxAttempts
44270
- });
44271
- }
44272
- const urgencySection = shouldUrgency ? opts.urgencySection : "";
44273
- const rethinkSection = shouldRethink ? opts.rethinkSection : "";
44274
- return `${urgencySection}${rethinkSection}`;
44275
- }
44276
- async function runRetryLoop(input) {
44277
- let currentFailure = input.failure;
44278
- const previous = [...input.previousAttempts];
44279
- for (let attempt = 1;attempt <= input.maxAttempts; attempt++) {
44280
- const prompt = await Promise.resolve(input.buildPrompt(currentFailure, previous));
44281
- const result = await input.execute(prompt);
44282
- const outcome = await input.verify(result);
44283
- previous.push({ attempt, result });
44284
- if (outcome.passed) {
44285
- return { outcome: "fixed", result, attempts: attempt };
44286
- }
44287
- currentFailure = outcome.newFailure;
44288
- if (input.shouldAbort?.(currentFailure, attempt)) {
44289
- return { outcome: "aborted", attempts: attempt };
44290
- }
44291
- }
44292
- return { outcome: "exhausted", attempts: input.maxAttempts, finalFailure: currentFailure };
44293
- }
44294
-
44295
- // src/pipeline/stages/autofix-agent.ts
44296
- var exports_autofix_agent = {};
44297
- __export(exports_autofix_agent, {
44298
- runAgentRectification: () => runAgentRectification
44299
- });
44300
- function collectFailedChecks(ctx) {
44301
- return (ctx.reviewResult?.checks ?? []).filter((c) => !c.success);
44302
- }
44303
- function getCheckSignature(checks3) {
44304
- return [...new Set(checks3.map((check2) => check2.check))].sort().join("|");
44305
- }
44306
- function buildAutofixEscalationPreamble(attempt, maxAttempts, rethinkAtAttempt, urgencyAtAttempt) {
44307
- return buildProgressivePromptPreamble({
44308
- attempt,
44309
- maxAttempts,
44310
- rethinkAtAttempt,
44311
- urgencyAtAttempt,
44312
- stage: "autofix",
44313
- logger: getLogger(),
44314
- urgencySection: `## Final Autofix Attempt Before Escalation
44315
-
44316
- This is attempt ${attempt}. If the review still fails after this, autofix will escalate instead of retrying.
44317
- A different approach is required. Do not repeat the same fix.
44318
-
44319
- `,
44320
- rethinkSection: `## Previous Attempt Did Not Fix the Failures
44321
-
44322
- Your previous fix attempt (attempt ${attempt}) did not resolve the quality errors. Rethink your approach.
44323
-
44324
- - Do not repeat the same edit pattern.
44325
- - Re-read the failing diagnostics carefully.
44326
- - Try a fundamentally different fix strategy if the earlier one did not work.
44327
-
44328
- `
44329
- });
44330
- }
44331
- async function runAgentRectification(ctx, lintFixCmd, formatFixCmd, effectiveWorkdir) {
44332
- const logger = getLogger();
44333
- const maxPerCycle = ctx.config.quality.autofix?.maxAttempts ?? 2;
44334
- const maxTotal = ctx.config.quality.autofix?.maxTotalAttempts ?? 10;
44335
- const rethinkAtAttempt = ctx.config.quality.autofix?.rethinkAtAttempt ?? 2;
44336
- const urgencyAtAttempt = ctx.config.quality.autofix?.urgencyAtAttempt ?? 3;
44337
- const consumed = ctx.autofixAttempt ?? 0;
44338
- const failedChecks = collectFailedChecks(ctx);
44339
- if (failedChecks.length === 0) {
44340
- logger.debug("autofix", "No failed checks found \u2014 skipping agent rectification", { storyId: ctx.story.id });
44341
- return { succeeded: false, cost: 0 };
44342
- }
44343
- if (consumed >= maxTotal) {
44344
- logger.warn("autofix", "Global autofix budget exhausted \u2014 escalating", {
44345
- storyId: ctx.story.id,
44346
- totalAttempts: consumed,
44347
- maxTotalAttempts: maxTotal
44348
- });
44349
- return { succeeded: false, cost: 0 };
44350
- }
44351
- const remainingBudget = maxTotal - consumed;
44352
- const maxAttempts = Math.min(maxPerCycle, remainingBudget);
44353
- if (!ctx.agentManager) {
44354
- logger.error("autofix", "Agent manager unavailable \u2014 cannot run agent rectification", { storyId: ctx.story.id });
44355
- return { succeeded: false, cost: 0 };
44356
- }
44357
- if (!ctx.runtime) {
44358
- throw new NaxError("runtime required \u2014 legacy agentManager.run path removed (ADR-019 Wave 3, issue #762)", "DISPATCH_NO_RUNTIME", { stage: "rectification", storyId: ctx.story.id });
44359
- }
44360
- const { agentManager } = ctx;
44361
- const { runtime } = ctx;
44362
- let implementerChecks = failedChecks;
44363
- let testWriterChecks = [];
44364
- const stageTestFilePatterns = typeof ctx.rootConfig.execution?.smartTestRunner === "object" ? ctx.rootConfig.execution.smartTestRunner?.testFilePatterns : undefined;
44365
- const lintOutputFormat = ctx.config.quality.lintOutput?.format ?? "auto";
44366
- const typecheckOutputFormat = ctx.config.quality.typecheckOutput?.format ?? "auto";
44367
- for (const check2 of failedChecks) {
44368
- if (check2.check === "adversarial" || check2.check === "lint" || check2.check === "typecheck") {
44369
- const { testFindings, sourceFindings } = splitFindingsByScope(check2, stageTestFilePatterns, lintOutputFormat, typecheckOutputFormat);
44370
- if (testFindings || sourceFindings) {
44371
- if (testFindings)
44372
- testWriterChecks = [...testWriterChecks, testFindings];
44373
- if (sourceFindings) {
44374
- implementerChecks = implementerChecks.map((c) => c === check2 ? sourceFindings : c);
44375
- } else {
44376
- implementerChecks = implementerChecks.filter((c) => c !== check2);
44377
- }
44378
- }
44379
- }
44380
- }
44381
- let autofixCostAccum = 0;
44382
- if (testWriterChecks.length > 0) {
44383
- if (ctx.routing.testStrategy === "no-test") {
44384
- logger.warn("autofix", "Skipping test-writer rectification (no-test strategy)", {
44385
- storyId: ctx.story.id,
44386
- checks: testWriterChecks.map((c) => c.check)
44387
- });
44388
- } else {
44389
- logger.info("autofix", "Routing test-file findings to test-writer session", {
44390
- storyId: ctx.story.id,
44391
- checks: testWriterChecks.map((c) => c.check)
44392
- });
44393
- autofixCostAccum += await _autofixDeps.runTestWriterRectification(ctx, testWriterChecks, ctx.story, agentManager);
44394
- }
44395
- }
44396
- if (implementerChecks.length === 0) {
44397
- logger.info("autofix", "All findings routed to test-writer \u2014 skipping implementer loop", {
44398
- storyId: ctx.story.id
44399
- });
44400
- return { succeeded: false, cost: autofixCostAccum };
44401
- }
44402
- let unresolvedReason;
44403
- let autofixBeforeRef;
44404
- const implementerSession = formatSessionName({
44405
- workdir: ctx.workdir,
44406
- featureName: ctx.prd.feature,
44407
- storyId: ctx.story.id,
44408
- role: "implementer"
44409
- });
44410
- let sessionConfirmedOpen = consumed === 0;
44411
- logger.info("autofix", "Starting agent rectification for review failures", {
44412
- storyId: ctx.story.id,
44413
- failedChecks: implementerChecks.map((check2) => check2.check),
44414
- maxAttempts,
44415
- totalUsed: consumed,
44416
- maxTotalAttempts: maxTotal
44417
- });
44418
- const initialFailure = {
44419
- checks: implementerChecks,
44420
- checkSignature: getCheckSignature(implementerChecks)
44421
- };
44422
- let currentAttempt = 0;
44423
- let currentConsecutiveNoOps = 0;
44424
- let currentCheckSignatureChanged = false;
44425
- let failOpenAborted = false;
44426
- let heldHandle;
44427
- const outcome = await runRetryLoop({
44428
- stage: "rectification",
44429
- storyId: ctx.story.id,
44430
- packageDir: ctx.workdir,
44431
- maxAttempts,
44432
- failure: initialFailure,
44433
- previousAttempts: [],
44434
- shouldAbort: (_failure, _attempt) => failOpenAborted,
44435
- buildPrompt: (failure, previous) => {
44436
- currentAttempt = previous.length + 1;
44437
- const lastResult = previous[previous.length - 1]?.result;
44438
- const lastWasNoOp = lastResult?.noOp ?? false;
44439
- currentConsecutiveNoOps = lastResult?.consecutiveNoOps ?? 0;
44440
- currentCheckSignatureChanged = failure.checkSignature !== initialFailure.checkSignature;
44441
- logger.debug("autofix", `Building prompt for attempt ${consumed + currentAttempt}/${maxTotal}`, {
44442
- storyId: ctx.story.id,
44443
- lastWasNoOp,
44444
- consecutiveNoOps: currentConsecutiveNoOps
44445
- });
44446
- if (lastWasNoOp) {
44447
- return RectifierPromptBuilder.noOpReprompt(failure.checks, currentConsecutiveNoOps, MAX_CONSECUTIVE_NOOP_REPROMPTS);
44448
- }
44449
- if (currentAttempt === 1 && sessionConfirmedOpen) {
44450
- return RectifierPromptBuilder.firstAttemptDelta(failure.checks, maxAttempts);
44451
- }
44452
- const isSessionContinuation = currentAttempt > 1 && sessionConfirmedOpen;
44453
- if (isSessionContinuation) {
44454
- if (currentCheckSignatureChanged) {
44455
- const attemptsRemaining = Math.max(1, maxAttempts - currentAttempt + 1);
44456
- return RectifierPromptBuilder.firstAttemptDelta(failure.checks, attemptsRemaining);
44457
- }
44458
- return RectifierPromptBuilder.continuation(failure.checks, currentAttempt, Math.min(rethinkAtAttempt, maxAttempts), Math.min(urgencyAtAttempt, maxAttempts));
44459
- }
44460
- let prompt = RectifierPromptBuilder.reviewRectification(failure.checks, ctx.story);
44461
- const escalationPreamble = buildAutofixEscalationPreamble(currentAttempt, maxAttempts, rethinkAtAttempt, urgencyAtAttempt);
44462
- if (escalationPreamble) {
44463
- prompt = `${escalationPreamble}${prompt}`;
44464
- }
44465
- return prompt;
44466
- },
44467
- execute: async (prompt) => {
44468
- logger.info("autofix", `Agent rectification attempt ${consumed + currentAttempt}/${maxTotal}`, {
44469
- storyId: ctx.story.id
44470
- });
44471
- autofixBeforeRef = await _autofixDeps.captureGitRef(ctx.workdir);
44472
- ctx.autofixAttempt = consumed + currentAttempt;
44473
- const modelTier = ctx.story.routing?.modelTier ?? ctx.rootConfig.autoMode.escalation.tierOrder[0]?.tier ?? "balanced";
44474
- const defaultAgent = agentManager.getDefault();
44475
- const modelDef = resolveModelForAgent(ctx.rootConfig.models, ctx.routing.agent ?? defaultAgent, modelTier, defaultAgent);
44476
- let result;
44477
- try {
44478
- if (!heldHandle) {
44479
- heldHandle = await runtime.sessionManager.openSession(implementerSession, {
44480
- agentName: defaultAgent,
44481
- role: "implementer",
44482
- workdir: ctx.workdir,
44483
- pipelineStage: "rectification",
44484
- modelDef,
44485
- timeoutSeconds: ctx.config.execution.sessionTimeoutSeconds,
44486
- featureName: ctx.prd.feature,
44487
- storyId: ctx.story.id,
44488
- signal: runtime.signal
44489
- });
44490
- }
44491
- const turn = await agentManager.runAsSession(defaultAgent, heldHandle, prompt, {
44492
- storyId: ctx.story.id,
44493
- featureName: ctx.prd.feature,
44494
- workdir: ctx.workdir,
44495
- projectDir: ctx.projectDir,
44496
- pipelineStage: "rectification",
44497
- sessionRole: "implementer",
44498
- signal: runtime.signal,
44499
- maxTurns: ctx.config.agent?.maxInteractionTurns
44500
- });
44501
- result = {
44502
- success: true,
44503
- exitCode: 0,
44504
- output: turn.output,
44505
- rateLimited: false,
44506
- durationMs: 0,
44507
- estimatedCostUsd: turn.estimatedCostUsd,
44508
- ...turn.exactCostUsd !== undefined && { exactCostUsd: turn.exactCostUsd },
44509
- ...turn.tokenUsage && { tokenUsage: turn.tokenUsage },
44510
- ...heldHandle.protocolIds && { protocolIds: heldHandle.protocolIds }
44511
- };
44512
- sessionConfirmedOpen = true;
44513
- } catch (err) {
44514
- sessionConfirmedOpen = false;
44515
- if (heldHandle) {
44516
- const stale = heldHandle;
44517
- heldHandle = undefined;
44518
- await runtime.sessionManager.closeSession(stale).catch(() => {});
44519
- }
44520
- throw err;
44521
- }
44522
- autofixCostAccum += result.estimatedCostUsd ?? 0;
44523
- if (ctx.sessionManager && ctx.sessionId && result.protocolIds) {
44524
- try {
44525
- const desc = ctx.sessionManager.get(ctx.sessionId);
44526
- if (desc) {
44527
- ctx.sessionManager.bindHandle(ctx.sessionId, implementerSession, result.protocolIds);
44528
- }
44529
- } catch {}
44530
- }
44531
- if (result.output) {
44532
- const unresolvedMatch = UNRESOLVED_REGEX.exec(result.output);
44533
- if (unresolvedMatch) {
44534
- unresolvedReason = (unresolvedMatch[1] ?? "reviewer findings contradicted each other").trim();
44535
- logger.warn("autofix", "Implementer signalled reviewer contradiction \u2014 escalating", {
44536
- storyId: ctx.story.id,
44537
- unresolvedReason
44538
- });
44539
- throw new Error("AUTOFIX_UNRESOLVED");
44540
- }
44541
- }
44542
- if (ctx.reviewerSession && result.output) {
44543
- const maxClarifications = ctx.config.review?.dialogue?.maxClarificationsPerAttempt ?? 3;
44544
- let clarifyCount = 0;
44545
- const clarifyRegex = new RegExp(CLARIFY_REGEX.source, `${CLARIFY_REGEX.flags}g`);
44546
- let match;
44547
- while ((match = clarifyRegex.exec(result.output)) !== null) {
44548
- if (clarifyCount >= maxClarifications)
44549
- break;
44550
- const question = match[1]?.trim() ?? "";
44551
- if (!question)
44552
- continue;
44553
- try {
44554
- await ctx.reviewerSession.clarify(question);
44555
- clarifyCount++;
44556
- } catch (err) {
44557
- logger.debug("autofix", "reviewerSession.clarify() failed \u2014 proceeding without clarification", {
44558
- storyId: ctx.story.id
44559
- });
44560
- }
44561
- }
44562
- }
44563
- const refAfterAttempt = await _autofixDeps.captureGitRef(ctx.workdir);
44564
- const sourceFilesChanged = autofixBeforeRef === undefined || refAfterAttempt === undefined || autofixBeforeRef !== refAfterAttempt;
44565
- const noOp = !sourceFilesChanged;
44566
- const checkSignatureChanged = false;
44567
- const newConsecutiveNoOps = noOp ? currentConsecutiveNoOps + 1 : 0;
44568
- return {
44569
- agentSuccess: result.success,
44570
- cost: result.estimatedCostUsd ?? 0,
44571
- checkSignatureChanged,
44572
- noOp,
44573
- consecutiveNoOps: newConsecutiveNoOps
44574
- };
44575
- },
44576
- verify: async (result) => {
44577
- const failingChecks = (ctx.reviewResult?.checks ?? []).filter((c) => !c.success);
44578
- const hasMechanicalFailure = failingChecks.some((c) => !LLM_REVIEW_CHECKS.has(c.check));
44579
- const recheckWorthwhile = !result.noOp || hasMechanicalFailure;
44580
- const passed = recheckWorthwhile ? await _autofixDeps.recheckReview(ctx) : false;
44581
- if (passed) {
44582
- if (result.noOp) {
44583
- logger.info("autofix", `[OK] Checks pass without new commit on attempt ${consumed + currentAttempt} (transient or already resolved)`, { storyId: ctx.story.id });
44584
- } else {
44585
- logger.info("autofix", `[OK] Agent rectification succeeded on attempt ${consumed + currentAttempt}`, {
44586
- storyId: ctx.story.id
44587
- });
44588
- }
44589
- return { passed: true };
44590
- }
44591
- if (result.consecutiveNoOps > MAX_CONSECUTIVE_NOOP_REPROMPTS) {
44592
- logger.warn("autofix", "No source changes (no-op limit reached) \u2014 counting as consumed attempt", {
44593
- storyId: ctx.story.id,
44594
- attemptsRemaining: maxAttempts - currentAttempt
44595
- });
44596
- const passedChecks = (ctx.reviewResult?.checks ?? []).filter((c) => c.success && !c.skipped).map((c) => c.check);
44597
- if (passedChecks.length > 0) {
44598
- ctx.retrySkipChecks = new Set(passedChecks);
44599
- logger.debug("autofix", "No source changes \u2014 skipping already-passed checks on recheck", {
44600
- storyId: ctx.story.id,
44601
- skippedChecks: passedChecks
44602
- });
44603
- }
44604
- return {
44605
- passed: false,
44606
- newFailure: initialFailure
44607
- };
44608
- }
44609
- if (result.noOp) {
44610
- logger.info("autofix", "No source changes and checks still failing \u2014 re-prompting with stronger directive (counts as consumed attempt)", {
44611
- storyId: ctx.story.id,
44612
- noOpCount: `${result.consecutiveNoOps}/${MAX_CONSECUTIVE_NOOP_REPROMPTS}`,
44613
- attemptsRemaining: maxAttempts - currentAttempt
44614
- });
44615
- return {
44616
- passed: false,
44617
- newFailure: initialFailure
44618
- };
44619
- }
44620
- const updatedFailed = collectFailedChecks(ctx);
44621
- const hasNewLintFailure = updatedFailed.some((c) => c.check === "lint");
44622
- if (hasNewLintFailure && (lintFixCmd || formatFixCmd)) {
44623
- if (lintFixCmd) {
44624
- logger.debug("autofix", "Agent introduced lint errors \u2014 running lintFix before next attempt", {
44625
- storyId: ctx.story.id
44626
- });
44627
- pipelineEventBus.emit({ type: "autofix:started", storyId: ctx.story.id, command: lintFixCmd });
44628
- await _autofixDeps.runQualityCommand({
44629
- commandName: "lintFix",
44630
- command: lintFixCmd,
44631
- workdir: effectiveWorkdir,
44632
- storyId: ctx.story.id
44633
- });
44634
- }
44635
- if (formatFixCmd) {
44636
- pipelineEventBus.emit({ type: "autofix:started", storyId: ctx.story.id, command: formatFixCmd });
44637
- await _autofixDeps.runQualityCommand({
44638
- commandName: "formatFix",
44639
- command: formatFixCmd,
44640
- workdir: effectiveWorkdir,
44641
- storyId: ctx.story.id
44642
- });
44643
- }
44644
- const mechPassed = await _autofixDeps.recheckReview(ctx);
44645
- pipelineEventBus.emit({ type: "autofix:completed", storyId: ctx.story.id, fixed: mechPassed });
44646
- if (mechPassed) {
44647
- logger.info("autofix", `[OK] Mechanical fix resolved agent-introduced lint errors on attempt ${consumed + currentAttempt}`, {
44648
- storyId: ctx.story.id
44649
- });
44650
- return { passed: true };
44651
- }
44652
- }
44653
- if (updatedFailed.length > 0) {
44654
- const updatedCheckSignature = getCheckSignature(updatedFailed);
44655
- currentCheckSignatureChanged = updatedCheckSignature !== initialFailure.checkSignature;
44656
- logger.warn("autofix", `Agent rectification still failing after attempt ${consumed + currentAttempt}`, {
44657
- storyId: ctx.story.id,
44658
- attemptsRemaining: maxAttempts - currentAttempt,
44659
- globalBudgetRemaining: maxTotal - (consumed + currentAttempt)
44660
- });
44661
- return {
44662
- passed: false,
44663
- newFailure: {
44664
- checks: updatedFailed,
44665
- checkSignature: updatedCheckSignature
44666
- }
44667
- };
44668
- }
44669
- const isFailOpenOnly = (ctx.reviewResult?.checks ?? []).some((c) => c.failOpen);
44670
- if (isFailOpenOnly) {
44671
- failOpenAborted = true;
44672
- }
44673
- logger.warn("autofix", isFailOpenOnly ? "Adversarial timed out during recheck (fail-open) \u2014 aborting retry to avoid stale re-prompt" : "Agent rectification exhausted \u2014 no failed checks detected after recheck", {
44674
- storyId: ctx.story.id,
44675
- attemptsUsed: currentAttempt,
44676
- globalBudgetUsed: consumed + currentAttempt,
44677
- maxTotalAttempts: maxTotal
44678
- });
44679
- return {
44680
- passed: false,
44681
- newFailure: initialFailure
44682
- };
44683
- }
44684
- }).catch((error48) => {
44685
- if (error48 instanceof Error && error48.message === "AUTOFIX_AGENT_NOT_FOUND") {
44686
- return { outcome: "exhausted", attempts: 0, finalFailure: initialFailure };
44687
- }
44688
- if (error48 instanceof Error && error48.message === "AUTOFIX_UNRESOLVED") {
44689
- return { outcome: "exhausted", attempts: 0, finalFailure: initialFailure };
44690
- }
44691
- throw error48;
44692
- }).finally(async () => {
44693
- if (heldHandle) {
44694
- const stale = heldHandle;
44695
- heldHandle = undefined;
44696
- await runtime.sessionManager.closeSession(stale).catch(() => {});
44697
- }
44698
- });
44699
- const succeeded = outcome.outcome === "fixed";
44700
- return { succeeded, cost: autofixCostAccum, unresolvedReason };
44701
- }
44702
- var CLARIFY_REGEX, UNRESOLVED_REGEX, MAX_CONSECUTIVE_NOOP_REPROMPTS = 1;
44703
- var init_autofix_agent = __esm(() => {
44704
- init_config();
44705
- init_errors();
44706
- init_logger2();
44707
- init_prompts();
44708
- init_review2();
44709
- init_naming();
44710
- init_event_bus();
44711
- init_autofix();
44712
- init_autofix_scope_split();
44713
- CLARIFY_REGEX = /^CLARIFY:\s*(.+)$/ms;
44714
- UNRESOLVED_REGEX = /^UNRESOLVED:\s*(.+)$/ms;
44715
- });
44716
44983
 
44717
44984
  // src/pipeline/stages/autofix.ts
44718
44985
  async function recheckReview(ctx) {
@@ -44729,10 +44996,10 @@ var autofixStage, _autofixDeps;
44729
44996
  var init_autofix = __esm(() => {
44730
44997
  init_logger2();
44731
44998
  init_quality();
44732
- init_git();
44733
44999
  init_event_bus();
45000
+ init_autofix_agent();
44734
45001
  init_autofix_scope_split();
44735
- init_autofix_test_writer();
45002
+ init_autofix_test_writer2();
44736
45003
  autofixStage = {
44737
45004
  name: "autofix",
44738
45005
  enabled(ctx) {
@@ -44823,7 +45090,7 @@ var init_autofix = __esm(() => {
44823
45090
  if (ctx.routing.testStrategy === "no-test") {
44824
45091
  const failedChecks = (reviewResult.checks ?? []).filter((c) => !c.success);
44825
45092
  if (failedChecks.length > 0 && failedChecks.every((c) => {
44826
- const { testFindings, sourceFindings } = splitFindingsByScope(c, testFilePatterns, lintOutputFormat, typecheckOutputFormat);
45093
+ const { testFindings, sourceFindings } = splitFindingsByScope(c, testFilePatterns, lintOutputFormat, typecheckOutputFormat, { workdir: ctx.workdir });
44827
45094
  return testFindings !== null && sourceFindings === null;
44828
45095
  })) {
44829
45096
  const skippedFindingCount = failedChecks.flatMap((c) => c.findings ?? []).length;
@@ -44896,8 +45163,7 @@ var init_autofix = __esm(() => {
44896
45163
  _autofixDeps = {
44897
45164
  runQualityCommand,
44898
45165
  recheckReview,
44899
- captureGitRef,
44900
- runAgentRectification: (ctx, lintFixCmd, formatFixCmd, effectiveWorkdir) => Promise.resolve().then(() => (init_autofix_agent(), exports_autofix_agent)).then(({ runAgentRectification: runAgentRectification2 }) => runAgentRectification2(ctx, lintFixCmd, formatFixCmd, effectiveWorkdir)),
45166
+ runAgentRectification: runAgentRectificationV2,
44901
45167
  runTestWriterRectification: (ctx, testWriterChecks, story, agentManager) => runTestWriterRectification(ctx, testWriterChecks, story, agentManager)
44902
45168
  };
44903
45169
  });
@@ -44910,6 +45176,17 @@ async function persistSemanticVerdict(featureDir, storyId, verdict) {
44910
45176
  const filePath = path8.join(dir, `${storyId}.json`);
44911
45177
  await _semanticVerdictDeps.writeFile(filePath, JSON.stringify(verdict, null, 2));
44912
45178
  }
45179
+ function migrateSemanticVerdict(verdict) {
45180
+ if (!verdict.findings?.length)
45181
+ return verdict;
45182
+ const first = verdict.findings[0];
45183
+ if ("source" in first)
45184
+ return verdict;
45185
+ return {
45186
+ ...verdict,
45187
+ findings: verdict.findings.map((f) => reviewFindingToFinding(f))
45188
+ };
45189
+ }
44913
45190
  async function loadSemanticVerdicts(featureDir) {
44914
45191
  const dir = path8.join(featureDir, "semantic-verdicts");
44915
45192
  let files;
@@ -44927,7 +45204,8 @@ async function loadSemanticVerdicts(featureDir) {
44927
45204
  const filePath = path8.join(dir, file3);
44928
45205
  const content = await _semanticVerdictDeps.readFile(filePath);
44929
45206
  try {
44930
- results.push(JSON.parse(content));
45207
+ const parsed = JSON.parse(content);
45208
+ results.push(migrateSemanticVerdict(parsed));
44931
45209
  } catch {
44932
45210
  _semanticVerdictDeps.logDebug(`Skipping invalid JSON in semantic-verdicts/${file3}`);
44933
45211
  }
@@ -44936,6 +45214,7 @@ async function loadSemanticVerdicts(featureDir) {
44936
45214
  }
44937
45215
  var _semanticVerdictDeps;
44938
45216
  var init_semantic_verdict = __esm(() => {
45217
+ init_findings();
44939
45218
  init_logger2();
44940
45219
  _semanticVerdictDeps = {
44941
45220
  mkdirp: async (dir) => {
@@ -45088,10 +45367,10 @@ var init_effectiveness = __esm(() => {
45088
45367
 
45089
45368
  // src/execution/progress.ts
45090
45369
  import { appendFile as appendFile3, mkdir as mkdir7 } from "fs/promises";
45091
- import { join as join37 } from "path";
45370
+ import { join as join38 } from "path";
45092
45371
  async function appendProgress(featureDir, storyId, status, message) {
45093
45372
  await mkdir7(featureDir, { recursive: true });
45094
- const progressPath = join37(featureDir, "progress.txt");
45373
+ const progressPath = join38(featureDir, "progress.txt");
45095
45374
  const timestamp = new Date().toISOString();
45096
45375
  const entry = `[${timestamp}] ${storyId} \u2014 ${status.toUpperCase()} \u2014 ${message}
45097
45376
  `;
@@ -45223,7 +45502,7 @@ var init_completion = __esm(() => {
45223
45502
 
45224
45503
  // src/constitution/loader.ts
45225
45504
  import { existsSync as existsSync19 } from "fs";
45226
- import { join as join38 } from "path";
45505
+ import { join as join39 } from "path";
45227
45506
  function truncateToTokens(text, maxTokens) {
45228
45507
  const maxChars = maxTokens * 3;
45229
45508
  if (text.length <= maxChars) {
@@ -45245,7 +45524,7 @@ async function loadConstitution(projectDir, config2) {
45245
45524
  }
45246
45525
  let combinedContent = "";
45247
45526
  if (!config2.skipGlobal) {
45248
- const globalPath = join38(globalConfigDir(), config2.path);
45527
+ const globalPath = join39(globalConfigDir(), config2.path);
45249
45528
  if (existsSync19(globalPath)) {
45250
45529
  const validatedPath = validateFilePath(globalPath, globalConfigDir());
45251
45530
  const globalFile = Bun.file(validatedPath);
@@ -45255,7 +45534,7 @@ async function loadConstitution(projectDir, config2) {
45255
45534
  }
45256
45535
  }
45257
45536
  }
45258
- const projectPath = join38(projectDir, config2.path);
45537
+ const projectPath = join39(projectDir, config2.path);
45259
45538
  if (existsSync19(projectPath)) {
45260
45539
  const validatedPath = validateFilePath(projectPath, projectDir);
45261
45540
  const projectFile = Bun.file(validatedPath);
@@ -45813,14 +46092,14 @@ async function closeAllRunSessions(sessionManager, agentGetFn) {
45813
46092
 
45814
46093
  // src/context/greenfield.ts
45815
46094
  import { readdir as readdir2 } from "fs/promises";
45816
- import { join as join39 } from "path";
46095
+ import { join as join40 } from "path";
45817
46096
  async function scanForTestFiles(dir, testPatterns, isRootCall = true) {
45818
46097
  const results = [];
45819
46098
  const ignoreDirs = new Set(["node_modules", "dist", "build", ".next", ".git"]);
45820
46099
  try {
45821
46100
  const entries = await readdir2(dir, { withFileTypes: true });
45822
46101
  for (const entry of entries) {
45823
- const fullPath = join39(dir, entry.name);
46102
+ const fullPath = join40(dir, entry.name);
45824
46103
  if (entry.isDirectory()) {
45825
46104
  if (ignoreDirs.has(entry.name))
45826
46105
  continue;
@@ -45882,6 +46161,26 @@ function shouldRetryRectification(state, config2) {
45882
46161
  return true;
45883
46162
  }
45884
46163
 
46164
+ // src/verification/shared-rectification-loop.ts
46165
+ async function runRetryLoop(input) {
46166
+ let currentFailure = input.failure;
46167
+ const previous = [...input.previousAttempts];
46168
+ for (let attempt = 1;attempt <= input.maxAttempts; attempt++) {
46169
+ const prompt = await Promise.resolve(input.buildPrompt(currentFailure, previous));
46170
+ const result = await input.execute(prompt);
46171
+ const outcome = await input.verify(result);
46172
+ previous.push({ attempt, result });
46173
+ if (outcome.passed) {
46174
+ return { outcome: "fixed", result, attempts: attempt };
46175
+ }
46176
+ currentFailure = outcome.newFailure;
46177
+ if (input.shouldAbort?.(currentFailure, attempt)) {
46178
+ return { outcome: "aborted", attempts: attempt };
46179
+ }
46180
+ }
46181
+ return { outcome: "exhausted", attempts: input.maxAttempts, finalFailure: currentFailure };
46182
+ }
46183
+
45885
46184
  // src/verification/index.ts
45886
46185
  var init_verification = __esm(() => {
45887
46186
  init_executor();
@@ -46441,26 +46740,7 @@ function categorizeVerdict(verdict, testsPass) {
46441
46740
  reviewReason: `Tests failing: ${verdict.tests.failCount} failure(s). ${verdict.reasoning}`
46442
46741
  };
46443
46742
  }
46444
- if (!verdict.acceptanceCriteria.allMet) {
46445
- const unmet = verdict.acceptanceCriteria.criteria.filter((c) => !c.met).map((c) => c.criterion);
46446
- return {
46447
- success: false,
46448
- failureCategory: "verifier-rejected",
46449
- reviewReason: `Acceptance criteria not met: ${unmet.join("; ")}`
46450
- };
46451
- }
46452
- if (verdict.quality.rating === "poor") {
46453
- return {
46454
- success: false,
46455
- failureCategory: "verifier-rejected",
46456
- reviewReason: `Poor code quality: ${verdict.quality.issues.join("; ")}`
46457
- };
46458
- }
46459
- return {
46460
- success: false,
46461
- failureCategory: "verifier-rejected",
46462
- reviewReason: verdict.reasoning || "Verifier rejected without specific reason"
46463
- };
46743
+ return { success: true };
46464
46744
  }
46465
46745
  var init_verdict = __esm(() => {
46466
46746
  init_verdict_reader();
@@ -46637,9 +46917,9 @@ async function runThreeSessionTdd(options) {
46637
46917
  let allSuccessful = sessions.every((s) => s.success);
46638
46918
  let finalFailureCategory;
46639
46919
  if (verdict !== null) {
46640
- const categorization2 = categorizeVerdict(verdict, verdict.tests.allPassing);
46641
- if (categorization2.success) {
46642
- logger.info("tdd", "[OK] Verifier verdict: approved", {
46920
+ const categorization = categorizeVerdict(verdict, verdict.tests.allPassing);
46921
+ if (categorization.success) {
46922
+ logger.info("tdd", "[OK] Verifier verdict: accepted", {
46643
46923
  storyId: story.id,
46644
46924
  verdictApproved: verdict.approved,
46645
46925
  testsAllPassing: verdict.tests.allPassing
@@ -46651,13 +46931,13 @@ async function runThreeSessionTdd(options) {
46651
46931
  logger.warn("tdd", "[WARN] Verifier verdict: rejected", {
46652
46932
  storyId: story.id,
46653
46933
  verdictApproved: verdict.approved,
46654
- failureCategory: categorization2.failureCategory,
46655
- reviewReason: categorization2.reviewReason
46934
+ failureCategory: categorization.failureCategory,
46935
+ reviewReason: categorization.reviewReason
46656
46936
  });
46657
46937
  allSuccessful = false;
46658
- finalFailureCategory = categorization2.failureCategory;
46938
+ finalFailureCategory = categorization.failureCategory;
46659
46939
  needsHumanReview = true;
46660
- reviewReason = categorization2.reviewReason;
46940
+ reviewReason = categorization.reviewReason;
46661
46941
  }
46662
46942
  } else {
46663
46943
  if (!allSuccessful) {
@@ -48222,16 +48502,16 @@ class AcceptanceStrategy {
48222
48502
  }, timeoutMs);
48223
48503
  const exitCode = await Promise.race([
48224
48504
  proc.exited,
48225
- new Promise((resolve14) => setTimeout(() => resolve14(124), timeoutMs + 6000))
48505
+ new Promise((resolve15) => setTimeout(() => resolve15(124), timeoutMs + 6000))
48226
48506
  ]);
48227
48507
  clearTimeout(timeoutId);
48228
48508
  const stdout = await Promise.race([
48229
48509
  new Response(proc.stdout).text(),
48230
- new Promise((resolve14) => setTimeout(() => resolve14(""), 3000))
48510
+ new Promise((resolve15) => setTimeout(() => resolve15(""), 3000))
48231
48511
  ]);
48232
48512
  const stderr = await Promise.race([
48233
48513
  new Response(proc.stderr).text(),
48234
- new Promise((resolve14) => setTimeout(() => resolve14(""), 3000))
48514
+ new Promise((resolve15) => setTimeout(() => resolve15(""), 3000))
48235
48515
  ]);
48236
48516
  const durationMs = Date.now() - start;
48237
48517
  if (timedOut || exitCode === 124) {
@@ -48835,7 +49115,7 @@ __export(exports_init_context, {
48835
49115
  });
48836
49116
  import { existsSync as existsSync22 } from "fs";
48837
49117
  import { mkdir as mkdir8 } from "fs/promises";
48838
- import { basename as basename5, join as join43 } from "path";
49118
+ import { basename as basename5, join as join44 } from "path";
48839
49119
  async function findFiles(dir, maxFiles = 200) {
48840
49120
  try {
48841
49121
  const proc = Bun.spawnSync([
@@ -48863,7 +49143,7 @@ async function findFiles(dir, maxFiles = 200) {
48863
49143
  return [];
48864
49144
  }
48865
49145
  async function readPackageManifest(projectRoot) {
48866
- const packageJsonPath = join43(projectRoot, "package.json");
49146
+ const packageJsonPath = join44(projectRoot, "package.json");
48867
49147
  if (!existsSync22(packageJsonPath)) {
48868
49148
  return null;
48869
49149
  }
@@ -48881,7 +49161,7 @@ async function readPackageManifest(projectRoot) {
48881
49161
  }
48882
49162
  }
48883
49163
  async function readReadmeSnippet(projectRoot) {
48884
- const readmePath = join43(projectRoot, "README.md");
49164
+ const readmePath = join44(projectRoot, "README.md");
48885
49165
  if (!existsSync22(readmePath)) {
48886
49166
  return null;
48887
49167
  }
@@ -48899,7 +49179,7 @@ async function detectEntryPoints(projectRoot) {
48899
49179
  const candidates = ["src/index.ts", "src/main.ts", "main.go", "src/lib.rs"];
48900
49180
  const found = [];
48901
49181
  for (const candidate of candidates) {
48902
- const path13 = join43(projectRoot, candidate);
49182
+ const path13 = join44(projectRoot, candidate);
48903
49183
  if (existsSync22(path13)) {
48904
49184
  found.push(candidate);
48905
49185
  }
@@ -48910,7 +49190,7 @@ async function detectConfigFiles(projectRoot) {
48910
49190
  const candidates = ["tsconfig.json", "biome.json", "turbo.json", ".env.example"];
48911
49191
  const found = [];
48912
49192
  for (const candidate of candidates) {
48913
- const path13 = join43(projectRoot, candidate);
49193
+ const path13 = join44(projectRoot, candidate);
48914
49194
  if (existsSync22(path13)) {
48915
49195
  found.push(candidate);
48916
49196
  }
@@ -49071,8 +49351,8 @@ function generatePackageContextTemplate(packagePath) {
49071
49351
  }
49072
49352
  async function initPackage(repoRoot, packagePath, force = false) {
49073
49353
  const logger = getLogger();
49074
- const naxDir = join43(repoRoot, ".nax", "mono", packagePath);
49075
- const contextPath = join43(naxDir, "context.md");
49354
+ const naxDir = join44(repoRoot, ".nax", "mono", packagePath);
49355
+ const contextPath = join44(naxDir, "context.md");
49076
49356
  if (existsSync22(contextPath) && !force) {
49077
49357
  logger.info("init", "Package context.md already exists (use --force to overwrite)", { path: contextPath });
49078
49358
  return;
@@ -49086,8 +49366,8 @@ async function initPackage(repoRoot, packagePath, force = false) {
49086
49366
  }
49087
49367
  async function initContext(projectRoot, options = {}) {
49088
49368
  const logger = getLogger();
49089
- const naxDir = join43(projectRoot, ".nax");
49090
- const contextPath = join43(naxDir, "context.md");
49369
+ const naxDir = join44(projectRoot, ".nax");
49370
+ const contextPath = join44(naxDir, "context.md");
49091
49371
  if (existsSync22(contextPath) && !options.force) {
49092
49372
  logger.info("init", "context.md already exists, skipping (use --force to overwrite)", { path: contextPath });
49093
49373
  return;
@@ -49694,19 +49974,19 @@ var init_command_argv = __esm(() => {
49694
49974
  });
49695
49975
 
49696
49976
  // src/hooks/runner.ts
49697
- import { join as join60 } from "path";
49977
+ import { join as join61 } from "path";
49698
49978
  async function loadHooksConfig(projectDir, globalDir) {
49699
49979
  let globalHooks = { hooks: {} };
49700
49980
  let projectHooks = { hooks: {} };
49701
49981
  let skipGlobal = false;
49702
- const projectPath = join60(projectDir, "hooks.json");
49982
+ const projectPath = join61(projectDir, "hooks.json");
49703
49983
  const projectData = await loadJsonFile(projectPath, "hooks");
49704
49984
  if (projectData) {
49705
49985
  projectHooks = projectData;
49706
49986
  skipGlobal = projectData.skipGlobal ?? false;
49707
49987
  }
49708
49988
  if (!skipGlobal && globalDir) {
49709
- const globalPath = join60(globalDir, "hooks.json");
49989
+ const globalPath = join61(globalDir, "hooks.json");
49710
49990
  const globalData = await loadJsonFile(globalPath, "hooks");
49711
49991
  if (globalData) {
49712
49992
  globalHooks = globalData;
@@ -49861,7 +50141,7 @@ var package_default;
49861
50141
  var init_package = __esm(() => {
49862
50142
  package_default = {
49863
50143
  name: "@nathapp/nax",
49864
- version: "0.64.1",
50144
+ version: "0.64.2-canary.1",
49865
50145
  description: "AI Coding Agent Orchestrator \u2014 loops until done",
49866
50146
  type: "module",
49867
50147
  bin: {
@@ -49945,8 +50225,8 @@ var init_version = __esm(() => {
49945
50225
  NAX_VERSION = package_default.version;
49946
50226
  NAX_COMMIT = (() => {
49947
50227
  try {
49948
- if (/^[0-9a-f]{6,10}$/.test("8afbec51"))
49949
- return "8afbec51";
50228
+ if (/^[0-9a-f]{6,10}$/.test("b8feb3bf"))
50229
+ return "b8feb3bf";
49950
50230
  } catch {}
49951
50231
  try {
49952
50232
  const result = Bun.spawnSync(["git", "rev-parse", "--short", "HEAD"], {
@@ -50392,7 +50672,7 @@ async function loadAcceptanceTestContent2(featureDir, testPaths, configuredTestP
50392
50672
  function buildResult(success2, prd, totalCost, iterations, storiesCompleted, prdDirty, failedACs, retries) {
50393
50673
  return { success: success2, prd, totalCost, iterations, storiesCompleted, prdDirty, failedACs, retries };
50394
50674
  }
50395
- async function regenerateAcceptanceTest(testPath, acceptanceContext, previousFailure) {
50675
+ async function regenerateAcceptanceTest(testPath, acceptanceContext) {
50396
50676
  const logger = getSafeLogger();
50397
50677
  const bakPath = `${testPath}.bak`;
50398
50678
  const content = await Bun.file(testPath).text();
@@ -50440,8 +50720,7 @@ async function regenerateAcceptanceTest(testPath, acceptanceContext, previousFai
50440
50720
  }
50441
50721
  const contextForSetup = {
50442
50722
  ...acceptanceContext,
50443
- ...implementationContext ? { implementationContext } : {},
50444
- ...previousFailure ? { previousFailure } : {}
50723
+ ...implementationContext ? { implementationContext } : {}
50445
50724
  };
50446
50725
  await _regenerateDeps.acceptanceSetupExecute(contextForSetup);
50447
50726
  if (!await Bun.file(testPath).exists()) {
@@ -50474,7 +50753,7 @@ var init_acceptance_helpers = __esm(() => {
50474
50753
  });
50475
50754
 
50476
50755
  // src/execution/lifecycle/acceptance-fix.ts
50477
- function fixCallCtx(ctx) {
50756
+ function fixCallCtx2(ctx) {
50478
50757
  if (!ctx.runtime) {
50479
50758
  throw new NaxError("runtime required for acceptance fix callOp", "CALL_OP_NO_RUNTIME", { stage: "acceptance" });
50480
50759
  }
@@ -50489,7 +50768,7 @@ function fixCallCtx(ctx) {
50489
50768
  }
50490
50769
  async function resolveAcceptanceDiagnosis(opts) {
50491
50770
  const logger = getSafeLogger();
50492
- const { ctx, failures, totalACs, strategy, semanticVerdicts, diagnosisOpts, previousFailure } = opts;
50771
+ const { ctx, failures, totalACs, strategy, semanticVerdicts, diagnosisOpts } = opts;
50493
50772
  const storyId = diagnosisOpts.storyId;
50494
50773
  if (strategy === "implement-only") {
50495
50774
  logger?.info("acceptance.diagnosis", "Fast path: implement-only strategy \u2192 source_bug", { storyId });
@@ -50525,74 +50804,21 @@ async function resolveAcceptanceDiagnosis(opts) {
50525
50804
  };
50526
50805
  }
50527
50806
  const sourceFiles = await loadSourceFilesForDiagnosis(diagnosisOpts.testFileContent, diagnosisOpts.workdir);
50528
- return await _applyFixDeps.callOp(fixCallCtx(ctx), acceptanceDiagnoseOp, {
50807
+ return await _diagnosisDeps.callOp(fixCallCtx2(ctx), acceptanceDiagnoseOp, {
50529
50808
  testOutput: diagnosisOpts.testOutput,
50530
50809
  testFileContent: diagnosisOpts.testFileContent,
50531
50810
  sourceFiles,
50532
- semanticVerdicts,
50533
- previousFailure
50811
+ semanticVerdicts
50534
50812
  });
50535
50813
  }
50536
- async function applyFix(opts) {
50537
- const logger = getSafeLogger();
50538
- const { ctx, failures, diagnosis, previousFailure } = opts;
50539
- const storyId = ctx.prd.userStories[0]?.id ?? "unknown";
50540
- if (!ctx.runtime) {
50541
- logger?.error("acceptance.applyFix", "Runtime not found", { storyId });
50542
- return { cost: 0 };
50543
- }
50544
- const testPaths = ctx.acceptanceTestPaths;
50545
- let testFileContent = "";
50546
- let acceptanceTestPath = "";
50547
- if (testPaths && testPaths.length > 0) {
50548
- const pathStrings = testPaths.map((p) => typeof p === "string" ? p : p.testPath);
50549
- const moduleEntries = await loadAcceptanceTestContent(pathStrings);
50550
- if (moduleEntries.length > 0) {
50551
- testFileContent = moduleEntries[0].content;
50552
- acceptanceTestPath = moduleEntries[0].testPath;
50553
- }
50554
- } else if (ctx.featureDir) {
50555
- const fallbackPath = resolveAcceptanceFeatureTestPath(ctx.featureDir, ctx.config.acceptance.testPath, ctx.config.project?.language);
50556
- const moduleEntries = await loadAcceptanceTestContent(fallbackPath);
50557
- if (moduleEntries.length > 0) {
50558
- testFileContent = moduleEntries[0].content;
50559
- acceptanceTestPath = moduleEntries[0].testPath;
50560
- }
50561
- }
50562
- const callCtx = fixCallCtx(ctx);
50563
- if (diagnosis.verdict === "source_bug" || diagnosis.verdict === "both") {
50564
- logger?.info("acceptance.applyFix", "Applying source fix", { storyId, verdict: diagnosis.verdict });
50565
- await _applyFixDeps.callOp(callCtx, acceptanceFixSourceOp, {
50566
- testOutput: failures.testOutput,
50567
- diagnosisReasoning: diagnosis.reasoning,
50568
- acceptanceTestPath,
50569
- testFileContent
50570
- });
50571
- logger?.info("acceptance.source-fix", "Source fix completed", { storyId });
50572
- }
50573
- if (diagnosis.verdict === "test_bug" || diagnosis.verdict === "both") {
50574
- logger?.info("acceptance.applyFix", "Applying test fix", { storyId, verdict: diagnosis.verdict });
50575
- await _applyFixDeps.callOp(callCtx, acceptanceFixTestOp, {
50576
- testOutput: failures.testOutput,
50577
- diagnosisReasoning: diagnosis.reasoning,
50578
- failedACs: failures.failedACs,
50579
- acceptanceTestPath,
50580
- testFileContent,
50581
- previousFailure
50582
- });
50583
- logger?.info("acceptance.test-fix", "Test fix completed", { storyId });
50584
- }
50585
- return { cost: 0 };
50586
- }
50587
- var _applyFixDeps;
50814
+ var _diagnosisDeps;
50588
50815
  var init_acceptance_fix2 = __esm(() => {
50589
- init_test_path();
50590
50816
  init_errors();
50591
50817
  init_logger2();
50592
50818
  init_operations();
50593
50819
  init_call();
50594
50820
  init_acceptance_helpers();
50595
- _applyFixDeps = {
50821
+ _diagnosisDeps = {
50596
50822
  callOp
50597
50823
  };
50598
50824
  });
@@ -50601,6 +50827,7 @@ var init_acceptance_fix2 = __esm(() => {
50601
50827
  var exports_acceptance_loop = {};
50602
50828
  __export(exports_acceptance_loop, {
50603
50829
  runAcceptanceLoop: () => runAcceptanceLoop,
50830
+ runAcceptanceFixCycle: () => runAcceptanceFixCycle,
50604
50831
  regenerateAcceptanceTest: () => regenerateAcceptanceTest,
50605
50832
  loadSpecContent: () => loadSpecContent,
50606
50833
  loadAcceptanceTestContent: () => loadAcceptanceTestContent2,
@@ -50608,48 +50835,157 @@ __export(exports_acceptance_loop, {
50608
50835
  isStubTestFile: () => isStubTestFile,
50609
50836
  buildResult: () => buildResult,
50610
50837
  _regenerateDeps: () => _regenerateDeps,
50611
- _acceptanceLoopDeps: () => _acceptanceLoopDeps
50612
- });
50838
+ _acceptanceLoopDeps: () => _acceptanceLoopDeps,
50839
+ _acceptanceFixCycleDeps: () => _acceptanceFixCycleDeps
50840
+ });
50841
+ function convertFailuresToFindings(failedACs, testOutput) {
50842
+ return failedACs.map((ac) => {
50843
+ if (ac === "AC-HOOK" || ac === "AC-ERROR") {
50844
+ return acSentinelToFinding(ac, testOutput);
50845
+ }
50846
+ return acFailureToFinding(ac, testOutput);
50847
+ });
50848
+ }
50849
+ function findingsForDiagnosis(failedACs, testOutput, diagnosis) {
50850
+ if (diagnosis.findings && diagnosis.findings.length > 0)
50851
+ return diagnosis.findings;
50852
+ const findings = convertFailuresToFindings(failedACs, testOutput);
50853
+ const isTestRunnerSentinel = (f) => f.category === "hook-failure" || f.category === "test-runner-error";
50854
+ if (diagnosis.verdict === "source_bug") {
50855
+ return findings.map((f) => isTestRunnerSentinel(f) ? f : { ...f, fixTarget: "source" });
50856
+ }
50857
+ if (diagnosis.verdict === "test_bug")
50858
+ return findings.map((f) => ({ ...f, fixTarget: "test" }));
50859
+ return findings.flatMap((f) => isTestRunnerSentinel(f) ? [f] : [
50860
+ { ...f, fixTarget: "source" },
50861
+ { ...f, fixTarget: "test" }
50862
+ ]);
50863
+ }
50864
+ function buildFixCycleCtx(ctx, runtime, storyId) {
50865
+ return {
50866
+ runtime,
50867
+ packageView: runtime.packages.resolve(ctx.workdir),
50868
+ packageDir: ctx.workdir,
50869
+ storyId,
50870
+ featureName: ctx.feature,
50871
+ agentName: ctx.agentManager?.getDefault() ?? "claude"
50872
+ };
50873
+ }
50874
+ function buildAcceptanceContext(ctx, prd) {
50875
+ const firstStory = prd.userStories[0];
50876
+ return {
50877
+ config: ctx.config,
50878
+ rootConfig: ctx.config,
50879
+ prd,
50880
+ story: firstStory,
50881
+ stories: [firstStory],
50882
+ routing: {
50883
+ complexity: "simple",
50884
+ modelTier: "balanced",
50885
+ testStrategy: "test-after",
50886
+ reasoning: "Acceptance validation"
50887
+ },
50888
+ projectDir: ctx.workdir,
50889
+ workdir: ctx.workdir,
50890
+ naxIgnoreIndex: ctx.naxIgnoreIndex,
50891
+ featureDir: ctx.featureDir,
50892
+ hooks: ctx.hooks,
50893
+ plugins: ctx.pluginRegistry,
50894
+ agentGetFn: ctx.agentGetFn,
50895
+ agentManager: ctx.agentManager,
50896
+ sessionManager: ctx.sessionManager,
50897
+ acceptanceTestPaths: ctx.acceptanceTestPaths,
50898
+ runtime: ctx.runtime,
50899
+ abortSignal: ctx.abortSignal
50900
+ };
50901
+ }
50902
+ async function runAcceptanceTestsOnce(ctx, prd) {
50903
+ const acceptanceContext = buildAcceptanceContext(ctx, prd);
50904
+ const { acceptanceStage: acceptanceStage2 } = await Promise.resolve().then(() => (init_acceptance2(), exports_acceptance));
50905
+ const result = await acceptanceStage2.execute(acceptanceContext);
50906
+ if (result.action !== "fail")
50907
+ return { passed: true, failedACs: [], testOutput: "" };
50908
+ const failures = acceptanceContext.acceptanceFailures;
50909
+ if (!failures || failures.failedACs.length === 0)
50910
+ return { passed: true, failedACs: [], testOutput: "" };
50911
+ return { passed: false, failedACs: failures.failedACs, testOutput: failures.testOutput };
50912
+ }
50913
+ async function runAcceptanceFixCycle(ctx, prd, initialFailures, diagnosis, testFileContent, acceptanceTestPath) {
50914
+ const runtime = ctx.runtime;
50915
+ if (!runtime) {
50916
+ return { iterations: [], finalFindings: [], exitReason: "no-strategy" };
50917
+ }
50918
+ let currentTestOutput = initialFailures.testOutput;
50919
+ let currentFailedACs = initialFailures.failedACs;
50920
+ const storyId = prd.userStories[0]?.id ?? "unknown";
50921
+ const cycleCtx = buildFixCycleCtx(ctx, runtime, storyId);
50922
+ const cycle = {
50923
+ findings: findingsForDiagnosis(initialFailures.failedACs, initialFailures.testOutput, diagnosis),
50924
+ iterations: [],
50925
+ strategies: [
50926
+ {
50927
+ name: "acceptance-source-fix",
50928
+ appliesTo: (f) => f.fixTarget === "source",
50929
+ appliesToVerdict: (v) => v === "source_bug" || v === "both",
50930
+ fixOp: acceptanceFixSourceOp,
50931
+ buildInput: (_findings, priorIterations, _ctx) => ({
50932
+ testOutput: currentTestOutput,
50933
+ diagnosisReasoning: diagnosis.reasoning,
50934
+ priorIterationsBlock: buildPriorIterationsBlock(priorIterations),
50935
+ acceptanceTestPath,
50936
+ testFileContent
50937
+ }),
50938
+ maxAttempts: 3,
50939
+ coRun: "co-run-sequential"
50940
+ },
50941
+ {
50942
+ name: "acceptance-test-fix",
50943
+ appliesTo: (f) => f.fixTarget === "test",
50944
+ appliesToVerdict: (v) => v === "test_bug" || v === "both",
50945
+ fixOp: acceptanceFixTestOp,
50946
+ buildInput: (_findings, priorIterations, _ctx) => ({
50947
+ testOutput: currentTestOutput,
50948
+ diagnosisReasoning: diagnosis.reasoning,
50949
+ priorIterationsBlock: buildPriorIterationsBlock(priorIterations),
50950
+ failedACs: currentFailedACs,
50951
+ acceptanceTestPath,
50952
+ testFileContent
50953
+ }),
50954
+ maxAttempts: 3,
50955
+ coRun: "co-run-sequential"
50956
+ }
50957
+ ],
50958
+ validate: async (_ctx) => {
50959
+ const result = await runAcceptanceTestsOnce(ctx, prd);
50960
+ if (result.passed)
50961
+ return [];
50962
+ currentTestOutput = result.testOutput;
50963
+ currentFailedACs = result.failedACs;
50964
+ return findingsForDiagnosis(result.failedACs, result.testOutput, diagnosis);
50965
+ },
50966
+ config: {
50967
+ maxAttemptsTotal: ctx.config.acceptance.maxRetries,
50968
+ validatorRetries: 1
50969
+ },
50970
+ verdict: diagnosis.verdict
50971
+ };
50972
+ return _acceptanceFixCycleDeps.runFixCycle(cycle, cycleCtx, "acceptance");
50973
+ }
50613
50974
  async function runAcceptanceLoop(ctx) {
50614
50975
  const logger = getSafeLogger();
50615
50976
  const maxRetries = ctx.config.acceptance.maxRetries;
50616
50977
  let acceptanceRetries = 0;
50617
50978
  let stubRegenCount = 0;
50618
- let previousFailure = "";
50619
50979
  const prd = ctx.prd;
50620
50980
  let totalCost = ctx.totalCost;
50621
50981
  const iterations = ctx.iterations;
50622
50982
  const storiesCompleted = ctx.storiesCompleted;
50623
50983
  const prdDirty = false;
50624
50984
  logger?.info("acceptance", "All stories complete, running acceptance validation");
50985
+ const { acceptanceStage: acceptanceStage2 } = await Promise.resolve().then(() => (init_acceptance2(), exports_acceptance));
50625
50986
  while (acceptanceRetries < maxRetries) {
50626
50987
  const firstStory = prd.userStories[0];
50627
- const acceptanceContext = {
50628
- config: ctx.config,
50629
- rootConfig: ctx.config,
50630
- prd,
50631
- story: firstStory,
50632
- stories: [firstStory],
50633
- routing: {
50634
- complexity: "simple",
50635
- modelTier: "balanced",
50636
- testStrategy: "test-after",
50637
- reasoning: "Acceptance validation"
50638
- },
50639
- projectDir: ctx.workdir,
50640
- workdir: ctx.workdir,
50641
- naxIgnoreIndex: ctx.naxIgnoreIndex,
50642
- featureDir: ctx.featureDir,
50643
- hooks: ctx.hooks,
50644
- plugins: ctx.pluginRegistry,
50645
- agentGetFn: ctx.agentGetFn,
50646
- agentManager: ctx.agentManager,
50647
- sessionManager: ctx.sessionManager,
50648
- acceptanceTestPaths: ctx.acceptanceTestPaths,
50649
- runtime: ctx.runtime,
50650
- abortSignal: ctx.abortSignal
50651
- };
50652
- const { acceptanceStage: acceptanceStage2 } = await Promise.resolve().then(() => (init_acceptance2(), exports_acceptance));
50988
+ const acceptanceContext = buildAcceptanceContext(ctx, prd);
50653
50989
  const acceptanceResult = await acceptanceStage2.execute(acceptanceContext);
50654
50990
  if (acceptanceResult.action === "continue") {
50655
50991
  logger?.info("acceptance", "Acceptance validation passed!");
@@ -50711,6 +51047,7 @@ async function runAcceptanceLoop(ctx) {
50711
51047
  }
50712
51048
  const testEntries = ctx.acceptanceTestPaths ? await loadAcceptanceTestContent(ctx.acceptanceTestPaths.map((p) => p.testPath)) : [];
50713
51049
  const testFileContent = testEntries[0]?.content ?? "";
51050
+ const acceptanceTestPath = testEntries[0]?.testPath ?? ctx.acceptanceTestPaths?.[0]?.testPath ?? "";
50714
51051
  const strategy = ctx.config.acceptance.fix?.strategy ?? "diagnose-first";
50715
51052
  const diagnosis = await resolveAcceptanceDiagnosis({
50716
51053
  ctx,
@@ -50723,8 +51060,7 @@ async function runAcceptanceLoop(ctx) {
50723
51060
  testFileContent,
50724
51061
  workdir: ctx.workdir,
50725
51062
  storyId: firstStory?.id
50726
- },
50727
- previousFailure
51063
+ }
50728
51064
  });
50729
51065
  logger?.info("acceptance.diagnosis", "Diagnosis resolved", {
50730
51066
  storyId: firstStory?.id,
@@ -50732,28 +51068,22 @@ async function runAcceptanceLoop(ctx) {
50732
51068
  confidence: diagnosis.confidence,
50733
51069
  attempt: acceptanceRetries
50734
51070
  });
50735
- const fixResult = await applyFix({
50736
- ctx,
50737
- failures,
50738
- diagnosis,
50739
- previousFailure
50740
- });
50741
- totalCost += fixResult.cost;
50742
- previousFailure += `
50743
- ---
50744
- Attempt ${acceptanceRetries}/${maxRetries}: verdict=${diagnosis.verdict}, confidence=${diagnosis.confidence}
50745
- Reasoning: ${diagnosis.reasoning}
50746
- Failed ACs: ${failures.failedACs.join(", ")}
50747
- `;
51071
+ const cycleResult = await runAcceptanceFixCycle(ctx, prd, failures, diagnosis, testFileContent, acceptanceTestPath);
51072
+ totalCost += cycleResult.costUsd ?? 0;
51073
+ const success2 = cycleResult.exitReason === "resolved" || cycleResult.finalFindings.length === 0;
51074
+ return buildResult(success2, prd, totalCost, iterations, storiesCompleted, prdDirty, success2 ? undefined : cycleResult.finalFindings.map((f) => f.message), acceptanceRetries + cycleResult.iterations.length);
50748
51075
  }
50749
51076
  return buildResult(false, prd, totalCost, iterations, storiesCompleted, prdDirty);
50750
51077
  }
50751
- var _acceptanceLoopDeps, MAX_STUB_REGENS = 2;
51078
+ var _acceptanceLoopDeps, _acceptanceFixCycleDeps, MAX_STUB_REGENS = 2;
50752
51079
  var init_acceptance_loop = __esm(() => {
50753
51080
  init_semantic_verdict();
50754
51081
  init_test_path();
51082
+ init_findings();
50755
51083
  init_hooks();
50756
51084
  init_logger2();
51085
+ init_operations();
51086
+ init_prompts();
50757
51087
  init_helpers();
50758
51088
  init_acceptance_fix2();
50759
51089
  init_acceptance_helpers();
@@ -50761,19 +51091,22 @@ var init_acceptance_loop = __esm(() => {
50761
51091
  _acceptanceLoopDeps = {
50762
51092
  loadSemanticVerdicts
50763
51093
  };
51094
+ _acceptanceFixCycleDeps = {
51095
+ runFixCycle
51096
+ };
50764
51097
  });
50765
51098
 
50766
51099
  // src/session/scratch-purge.ts
50767
51100
  import { mkdir as mkdir10, rename, rm } from "fs/promises";
50768
- import { dirname as dirname9, join as join61 } from "path";
51101
+ import { dirname as dirname9, join as join62 } from "path";
50769
51102
  async function purgeStaleScratch(projectDir, featureName, retentionDays, archiveInsteadOfDelete = false) {
50770
- const sessionsDir = join61(projectDir, ".nax", "features", featureName, "sessions");
51103
+ const sessionsDir = join62(projectDir, ".nax", "features", featureName, "sessions");
50771
51104
  const sessionIds = await _scratchPurgeDeps.listSessionDirs(sessionsDir);
50772
51105
  const cutoffMs = _scratchPurgeDeps.now() - retentionDays * 86400000;
50773
51106
  let purged = 0;
50774
51107
  for (const sessionId of sessionIds) {
50775
- const sessionDir = join61(sessionsDir, sessionId);
50776
- const descriptorPath = join61(sessionDir, "descriptor.json");
51108
+ const sessionDir = join62(sessionsDir, sessionId);
51109
+ const descriptorPath = join62(sessionDir, "descriptor.json");
50777
51110
  if (!await _scratchPurgeDeps.fileExists(descriptorPath))
50778
51111
  continue;
50779
51112
  let lastActivityAt;
@@ -50789,7 +51122,7 @@ async function purgeStaleScratch(projectDir, featureName, retentionDays, archive
50789
51122
  if (new Date(lastActivityAt).getTime() >= cutoffMs)
50790
51123
  continue;
50791
51124
  if (archiveInsteadOfDelete) {
50792
- const archiveDest = join61(projectDir, ".nax", "features", featureName, "_archive", "sessions", sessionId);
51125
+ const archiveDest = join62(projectDir, ".nax", "features", featureName, "_archive", "sessions", sessionId);
50793
51126
  await _scratchPurgeDeps.move(sessionDir, archiveDest);
50794
51127
  } else {
50795
51128
  await _scratchPurgeDeps.remove(sessionDir);
@@ -51452,12 +51785,12 @@ var DEFAULT_MAX_BATCH_SIZE = 4;
51452
51785
  // src/pipeline/subscribers/events-writer.ts
51453
51786
  import { appendFile as appendFile4, mkdir as mkdir11 } from "fs/promises";
51454
51787
  import { homedir as homedir5 } from "os";
51455
- import { basename as basename9, join as join62 } from "path";
51788
+ import { basename as basename9, join as join63 } from "path";
51456
51789
  function wireEventsWriter(bus, feature, runId, workdir) {
51457
51790
  const logger = getSafeLogger();
51458
51791
  const project = basename9(workdir);
51459
- const eventsDir = join62(homedir5(), ".nax", "events", project);
51460
- const eventsFile = join62(eventsDir, "events.jsonl");
51792
+ const eventsDir = join63(homedir5(), ".nax", "events", project);
51793
+ const eventsFile = join63(eventsDir, "events.jsonl");
51461
51794
  let dirReady = false;
51462
51795
  const write = (line) => {
51463
51796
  return (async () => {
@@ -51638,12 +51971,12 @@ var init_interaction2 = __esm(() => {
51638
51971
  // src/pipeline/subscribers/registry.ts
51639
51972
  import { mkdir as mkdir12, writeFile } from "fs/promises";
51640
51973
  import { homedir as homedir6 } from "os";
51641
- import { basename as basename10, join as join63 } from "path";
51974
+ import { basename as basename10, join as join64 } from "path";
51642
51975
  function wireRegistry(bus, feature, runId, workdir) {
51643
51976
  const logger = getSafeLogger();
51644
51977
  const project = basename10(workdir);
51645
- const runDir = join63(homedir6(), ".nax", "runs", `${project}-${feature}-${runId}`);
51646
- const metaFile = join63(runDir, "meta.json");
51978
+ const runDir = join64(homedir6(), ".nax", "runs", `${project}-${feature}-${runId}`);
51979
+ const metaFile = join64(runDir, "meta.json");
51647
51980
  const unsub = bus.on("run:started", (_ev) => {
51648
51981
  return (async () => {
51649
51982
  try {
@@ -51653,8 +51986,8 @@ function wireRegistry(bus, feature, runId, workdir) {
51653
51986
  project,
51654
51987
  feature,
51655
51988
  workdir,
51656
- statusPath: join63(workdir, ".nax", "features", feature, "status.json"),
51657
- eventsDir: join63(workdir, ".nax", "features", feature, "runs"),
51989
+ statusPath: join64(workdir, ".nax", "features", feature, "status.json"),
51990
+ eventsDir: join64(workdir, ".nax", "features", feature, "runs"),
51658
51991
  registeredAt: new Date().toISOString()
51659
51992
  };
51660
51993
  await writeFile(metaFile, JSON.stringify(meta3, null, 2));
@@ -51888,7 +52221,7 @@ function buildPreviewRouting(story, config2) {
51888
52221
 
51889
52222
  // src/worktree/types.ts
51890
52223
  var WorktreeDependencyPreparationError;
51891
- var init_types7 = __esm(() => {
52224
+ var init_types8 = __esm(() => {
51892
52225
  WorktreeDependencyPreparationError = class WorktreeDependencyPreparationError extends Error {
51893
52226
  mode;
51894
52227
  failureCategory = "dependency-prep";
@@ -51902,7 +52235,7 @@ var init_types7 = __esm(() => {
51902
52235
 
51903
52236
  // src/worktree/dependencies.ts
51904
52237
  import { existsSync as existsSync30 } from "fs";
51905
- import { join as join64 } from "path";
52238
+ import { join as join65 } from "path";
51906
52239
  async function prepareWorktreeDependencies(options) {
51907
52240
  const mode = options.config.execution.worktreeDependencies.mode;
51908
52241
  const resolvedCwd = resolveDependencyCwd(options);
@@ -51916,7 +52249,7 @@ async function prepareWorktreeDependencies(options) {
51916
52249
  }
51917
52250
  }
51918
52251
  function resolveDependencyCwd(options) {
51919
- return options.storyWorkdir ? join64(options.worktreeRoot, options.storyWorkdir) : options.worktreeRoot;
52252
+ return options.storyWorkdir ? join65(options.worktreeRoot, options.storyWorkdir) : options.worktreeRoot;
51920
52253
  }
51921
52254
  function resolveInheritedDependencies(options, resolvedCwd) {
51922
52255
  if (hasDependencyManifests(options.worktreeRoot, resolvedCwd)) {
@@ -51926,7 +52259,7 @@ function resolveInheritedDependencies(options, resolvedCwd) {
51926
52259
  }
51927
52260
  function hasDependencyManifests(worktreeRoot, resolvedCwd) {
51928
52261
  const directories = resolvedCwd === worktreeRoot ? [worktreeRoot] : [worktreeRoot, resolvedCwd];
51929
- return directories.some((directory) => PHASE_ONE_INHERIT_UNSUPPORTED_FILES.some((filename) => _worktreeDependencyDeps.existsSync(join64(directory, filename))));
52262
+ return directories.some((directory) => PHASE_ONE_INHERIT_UNSUPPORTED_FILES.some((filename) => _worktreeDependencyDeps.existsSync(join65(directory, filename))));
51930
52263
  }
51931
52264
  async function provisionDependencies(config2, worktreeRoot, resolvedCwd) {
51932
52265
  const setupCommand = config2.execution.worktreeDependencies.setupCommand;
@@ -51958,7 +52291,7 @@ var PHASE_ONE_INHERIT_UNSUPPORTED_FILES, _worktreeDependencyDeps;
51958
52291
  var init_dependencies = __esm(() => {
51959
52292
  init_bun_deps();
51960
52293
  init_command_argv();
51961
- init_types7();
52294
+ init_types8();
51962
52295
  PHASE_ONE_INHERIT_UNSUPPORTED_FILES = [
51963
52296
  "package.json",
51964
52297
  "bun.lock",
@@ -51990,13 +52323,13 @@ __export(exports_manager, {
51990
52323
  });
51991
52324
  import { existsSync as existsSync31, symlinkSync } from "fs";
51992
52325
  import { mkdir as mkdir13 } from "fs/promises";
51993
- import { join as join65 } from "path";
52326
+ import { join as join66 } from "path";
51994
52327
 
51995
52328
  class WorktreeManager {
51996
52329
  async ensureGitExcludes(projectRoot) {
51997
52330
  const logger = getSafeLogger();
51998
- const infoDir = join65(projectRoot, ".git", "info");
51999
- const excludePath = join65(infoDir, "exclude");
52331
+ const infoDir = join66(projectRoot, ".git", "info");
52332
+ const excludePath = join66(infoDir, "exclude");
52000
52333
  try {
52001
52334
  await mkdir13(infoDir, { recursive: true });
52002
52335
  let existing = "";
@@ -52023,7 +52356,7 @@ ${missing.join(`
52023
52356
  }
52024
52357
  async create(projectRoot, storyId) {
52025
52358
  validateStoryId(storyId);
52026
- const worktreePath = join65(projectRoot, ".nax-wt", storyId);
52359
+ const worktreePath = join66(projectRoot, ".nax-wt", storyId);
52027
52360
  const branchName = `nax/${storyId}`;
52028
52361
  try {
52029
52362
  const pruneProc = _managerDeps.spawn(["git", "worktree", "prune"], {
@@ -52064,9 +52397,9 @@ ${missing.join(`
52064
52397
  }
52065
52398
  throw new Error(`Failed to create worktree: ${String(error48)}`);
52066
52399
  }
52067
- const envSource = join65(projectRoot, ".env");
52400
+ const envSource = join66(projectRoot, ".env");
52068
52401
  if (existsSync31(envSource)) {
52069
- const envTarget = join65(worktreePath, ".env");
52402
+ const envTarget = join66(worktreePath, ".env");
52070
52403
  try {
52071
52404
  symlinkSync(envSource, envTarget, "file");
52072
52405
  } catch (error48) {
@@ -52077,7 +52410,7 @@ ${missing.join(`
52077
52410
  }
52078
52411
  async remove(projectRoot, storyId) {
52079
52412
  validateStoryId(storyId);
52080
- const worktreePath = join65(projectRoot, ".nax-wt", storyId);
52413
+ const worktreePath = join66(projectRoot, ".nax-wt", storyId);
52081
52414
  const branchName = `nax/${storyId}`;
52082
52415
  try {
52083
52416
  const proc = _managerDeps.spawn(["git", "worktree", "remove", worktreePath, "--force"], {
@@ -52789,10 +53122,10 @@ var init_merge_conflict_rectify = __esm(() => {
52789
53122
  });
52790
53123
 
52791
53124
  // src/execution/pipeline-result-handler.ts
52792
- import { join as join66 } from "path";
53125
+ import { join as join67 } from "path";
52793
53126
  async function removeWorktreeDirectory(projectRoot, storyId) {
52794
53127
  const logger = getSafeLogger();
52795
- const worktreePath = join66(projectRoot, ".nax-wt", storyId);
53128
+ const worktreePath = join67(projectRoot, ".nax-wt", storyId);
52796
53129
  try {
52797
53130
  const proc = _resultHandlerDeps.spawn(["git", "worktree", "remove", worktreePath, "--force"], {
52798
53131
  cwd: projectRoot,
@@ -53003,7 +53336,7 @@ var init_pipeline_result_handler = __esm(() => {
53003
53336
 
53004
53337
  // src/execution/iteration-runner.ts
53005
53338
  import { existsSync as existsSync32 } from "fs";
53006
- import { join as join67 } from "path";
53339
+ import { join as join68 } from "path";
53007
53340
  async function runIteration(ctx, prd, selection, iterations, totalCost, allStoryMetrics) {
53008
53341
  const { story, storiesToExecute, routing, isBatchExecution } = selection;
53009
53342
  if (ctx.dryRun) {
@@ -53028,7 +53361,7 @@ async function runIteration(ctx, prd, selection, iterations, totalCost, allStory
53028
53361
  const storyStartTime = Date.now();
53029
53362
  let effectiveWorkdir = ctx.workdir;
53030
53363
  if (ctx.config.execution.storyIsolation === "worktree") {
53031
- const worktreePath = join67(ctx.workdir, ".nax-wt", story.id);
53364
+ const worktreePath = join68(ctx.workdir, ".nax-wt", story.id);
53032
53365
  const worktreeExists = _iterationRunnerDeps.existsSync(worktreePath);
53033
53366
  if (!worktreeExists) {
53034
53367
  await _iterationRunnerDeps.worktreeManager.ensureGitExcludes(ctx.workdir);
@@ -53048,7 +53381,7 @@ async function runIteration(ctx, prd, selection, iterations, totalCost, allStory
53048
53381
  }
53049
53382
  const accumulatedAttemptCost = (story.priorFailures || []).reduce((sum, f) => sum + (f.cost || 0), 0);
53050
53383
  const profileOverride = ctx.config.profile && ctx.config.profile !== "default" ? { profile: ctx.config.profile } : undefined;
53051
- const effectiveConfig = story.workdir ? await _iterationRunnerDeps.loadConfigForWorkdir(join67(ctx.workdir, ".nax", "config.json"), story.workdir, profileOverride) : ctx.config;
53384
+ const effectiveConfig = story.workdir ? await _iterationRunnerDeps.loadConfigForWorkdir(join68(ctx.workdir, ".nax", "config.json"), story.workdir, profileOverride) : ctx.config;
53052
53385
  let dependencyContext;
53053
53386
  if (ctx.config.execution.storyIsolation === "worktree") {
53054
53387
  try {
@@ -53075,7 +53408,7 @@ async function runIteration(ctx, prd, selection, iterations, totalCost, allStory
53075
53408
  };
53076
53409
  }
53077
53410
  }
53078
- const resolvedWorkdir = dependencyContext?.cwd ? dependencyContext.cwd : ctx.config.execution.storyIsolation === "worktree" ? story.workdir ? join67(effectiveWorkdir, story.workdir) : effectiveWorkdir : story.workdir ? join67(ctx.workdir, story.workdir) : ctx.workdir;
53411
+ const resolvedWorkdir = dependencyContext?.cwd ? dependencyContext.cwd : ctx.config.execution.storyIsolation === "worktree" ? story.workdir ? join68(effectiveWorkdir, story.workdir) : effectiveWorkdir : story.workdir ? join68(ctx.workdir, story.workdir) : ctx.workdir;
53079
53412
  const pipelineContext = {
53080
53413
  config: effectiveConfig,
53081
53414
  rootConfig: ctx.config,
@@ -53272,7 +53605,7 @@ __export(exports_parallel_worker, {
53272
53605
  executeParallelBatch: () => executeParallelBatch,
53273
53606
  _parallelWorkerDeps: () => _parallelWorkerDeps
53274
53607
  });
53275
- import { join as join68 } from "path";
53608
+ import { join as join69 } from "path";
53276
53609
  async function executeStoryInWorktree(story, worktreePath, dependencyContext, context, routing, eventEmitter) {
53277
53610
  const logger = getSafeLogger();
53278
53611
  try {
@@ -53292,7 +53625,7 @@ async function executeStoryInWorktree(story, worktreePath, dependencyContext, co
53292
53625
  story,
53293
53626
  stories: [story],
53294
53627
  projectDir: context.projectDir,
53295
- workdir: dependencyContext.cwd ?? (story.workdir ? join68(worktreePath, story.workdir) : worktreePath),
53628
+ workdir: dependencyContext.cwd ?? (story.workdir ? join69(worktreePath, story.workdir) : worktreePath),
53296
53629
  worktreeDependencyContext: dependencyContext,
53297
53630
  routing,
53298
53631
  storyGitRef: storyGitRef ?? undefined
@@ -54053,16 +54386,16 @@ var init_unified_executor = __esm(() => {
54053
54386
  });
54054
54387
 
54055
54388
  // src/project/detector.ts
54056
- import { join as join69 } from "path";
54389
+ import { join as join70 } from "path";
54057
54390
  async function detectLanguage(workdir, pkg) {
54058
54391
  const deps = _detectorDeps;
54059
- if (await deps.fileExists(join69(workdir, "go.mod")))
54392
+ if (await deps.fileExists(join70(workdir, "go.mod")))
54060
54393
  return "go";
54061
- if (await deps.fileExists(join69(workdir, "Cargo.toml")))
54394
+ if (await deps.fileExists(join70(workdir, "Cargo.toml")))
54062
54395
  return "rust";
54063
- if (await deps.fileExists(join69(workdir, "pyproject.toml")))
54396
+ if (await deps.fileExists(join70(workdir, "pyproject.toml")))
54064
54397
  return "python";
54065
- if (await deps.fileExists(join69(workdir, "requirements.txt")))
54398
+ if (await deps.fileExists(join70(workdir, "requirements.txt")))
54066
54399
  return "python";
54067
54400
  if (pkg != null) {
54068
54401
  const allDeps = {
@@ -54122,18 +54455,18 @@ async function detectLintTool(workdir, language) {
54122
54455
  if (language === "python")
54123
54456
  return "ruff";
54124
54457
  const deps = _detectorDeps;
54125
- if (await deps.fileExists(join69(workdir, "biome.json")))
54458
+ if (await deps.fileExists(join70(workdir, "biome.json")))
54126
54459
  return "biome";
54127
- if (await deps.fileExists(join69(workdir, ".eslintrc")))
54460
+ if (await deps.fileExists(join70(workdir, ".eslintrc")))
54128
54461
  return "eslint";
54129
- if (await deps.fileExists(join69(workdir, ".eslintrc.js")))
54462
+ if (await deps.fileExists(join70(workdir, ".eslintrc.js")))
54130
54463
  return "eslint";
54131
- if (await deps.fileExists(join69(workdir, ".eslintrc.json")))
54464
+ if (await deps.fileExists(join70(workdir, ".eslintrc.json")))
54132
54465
  return "eslint";
54133
54466
  return;
54134
54467
  }
54135
54468
  async function detectProjectProfile(workdir, existing) {
54136
- const pkg = await _detectorDeps.readJson(join69(workdir, "package.json"));
54469
+ const pkg = await _detectorDeps.readJson(join70(workdir, "package.json"));
54137
54470
  const language = existing.language !== undefined ? existing.language : await detectLanguage(workdir, pkg);
54138
54471
  const type = existing.type !== undefined ? existing.type : detectType(pkg);
54139
54472
  const testFramework = existing.testFramework !== undefined ? existing.testFramework : await detectTestFramework(workdir, language, pkg);
@@ -54170,7 +54503,7 @@ var init_project = __esm(() => {
54170
54503
 
54171
54504
  // src/execution/status-file.ts
54172
54505
  import { rename as rename2, unlink as unlink3 } from "fs/promises";
54173
- import { resolve as resolve15 } from "path";
54506
+ import { resolve as resolve16 } from "path";
54174
54507
  function countProgress(prd) {
54175
54508
  const stories = prd.userStories;
54176
54509
  const passed = stories.filter((s) => s.status === "passed").length;
@@ -54215,7 +54548,7 @@ function buildStatusSnapshot(state) {
54215
54548
  return snapshot;
54216
54549
  }
54217
54550
  async function writeStatusFile(filePath, status) {
54218
- const resolvedPath = resolve15(filePath);
54551
+ const resolvedPath = resolve16(filePath);
54219
54552
  if (filePath.includes("../") || filePath.includes("..\\")) {
54220
54553
  throw new Error("Invalid status file path: path traversal detected");
54221
54554
  }
@@ -54229,7 +54562,7 @@ async function writeStatusFile(filePath, status) {
54229
54562
  var init_status_file = () => {};
54230
54563
 
54231
54564
  // src/execution/status-writer.ts
54232
- import { join as join70 } from "path";
54565
+ import { join as join71 } from "path";
54233
54566
 
54234
54567
  class StatusWriter {
54235
54568
  statusFile;
@@ -54348,7 +54681,7 @@ class StatusWriter {
54348
54681
  if (!this._prd)
54349
54682
  return;
54350
54683
  const safeLogger = getSafeLogger();
54351
- const featureStatusPath = join70(featureDir, "status.json");
54684
+ const featureStatusPath = join71(featureDir, "status.json");
54352
54685
  const write = async () => {
54353
54686
  try {
54354
54687
  const base = this.getSnapshot(totalCost, iterations);
@@ -54560,7 +54893,7 @@ __export(exports_run_initialization, {
54560
54893
  initializeRun: () => initializeRun,
54561
54894
  _reconcileDeps: () => _reconcileDeps
54562
54895
  });
54563
- import { join as join71 } from "path";
54896
+ import { join as join72 } from "path";
54564
54897
  async function reconcileState(prd, prdPath, workdir, config2) {
54565
54898
  const logger = getSafeLogger();
54566
54899
  let reconciledCount = 0;
@@ -54577,7 +54910,7 @@ async function reconcileState(prd, prdPath, workdir, config2) {
54577
54910
  });
54578
54911
  continue;
54579
54912
  }
54580
- const effectiveWorkdir = story.workdir ? join71(workdir, story.workdir) : workdir;
54913
+ const effectiveWorkdir = story.workdir ? join72(workdir, story.workdir) : workdir;
54581
54914
  try {
54582
54915
  const reviewResult = await _reconcileDeps.runReview(config2.review, effectiveWorkdir, config2.execution);
54583
54916
  if (!reviewResult.success) {
@@ -55451,14 +55784,14 @@ See https://react.dev/link/invalid-hook-call for tips about how to debug and fix
55451
55784
  prevActScopeDepth !== actScopeDepth - 1 && console.error("You seem to have overlapping act() calls, this is not supported. Be sure to await previous act() calls before making a new one. ");
55452
55785
  actScopeDepth = prevActScopeDepth;
55453
55786
  }
55454
- function recursivelyFlushAsyncActWork(returnValue, resolve16, reject) {
55787
+ function recursivelyFlushAsyncActWork(returnValue, resolve17, reject) {
55455
55788
  var queue = ReactSharedInternals.actQueue;
55456
55789
  if (queue !== null)
55457
55790
  if (queue.length !== 0)
55458
55791
  try {
55459
55792
  flushActQueue(queue);
55460
55793
  enqueueTask(function() {
55461
- return recursivelyFlushAsyncActWork(returnValue, resolve16, reject);
55794
+ return recursivelyFlushAsyncActWork(returnValue, resolve17, reject);
55462
55795
  });
55463
55796
  return;
55464
55797
  } catch (error48) {
@@ -55466,7 +55799,7 @@ See https://react.dev/link/invalid-hook-call for tips about how to debug and fix
55466
55799
  }
55467
55800
  else
55468
55801
  ReactSharedInternals.actQueue = null;
55469
- 0 < ReactSharedInternals.thrownErrors.length ? (queue = aggregateErrors(ReactSharedInternals.thrownErrors), ReactSharedInternals.thrownErrors.length = 0, reject(queue)) : resolve16(returnValue);
55802
+ 0 < ReactSharedInternals.thrownErrors.length ? (queue = aggregateErrors(ReactSharedInternals.thrownErrors), ReactSharedInternals.thrownErrors.length = 0, reject(queue)) : resolve17(returnValue);
55470
55803
  }
55471
55804
  function flushActQueue(queue) {
55472
55805
  if (!isFlushing) {
@@ -55642,14 +55975,14 @@ See https://react.dev/link/invalid-hook-call for tips about how to debug and fix
55642
55975
  didAwaitActCall || didWarnNoAwaitAct || (didWarnNoAwaitAct = true, console.error("You called act(async () => ...) without await. This could lead to unexpected testing behaviour, interleaving multiple act calls and mixing their scopes. You should - await act(async () => ...);"));
55643
55976
  });
55644
55977
  return {
55645
- then: function(resolve16, reject) {
55978
+ then: function(resolve17, reject) {
55646
55979
  didAwaitActCall = true;
55647
55980
  thenable.then(function(returnValue) {
55648
55981
  popActScope(prevActQueue, prevActScopeDepth);
55649
55982
  if (prevActScopeDepth === 0) {
55650
55983
  try {
55651
55984
  flushActQueue(queue), enqueueTask(function() {
55652
- return recursivelyFlushAsyncActWork(returnValue, resolve16, reject);
55985
+ return recursivelyFlushAsyncActWork(returnValue, resolve17, reject);
55653
55986
  });
55654
55987
  } catch (error$0) {
55655
55988
  ReactSharedInternals.thrownErrors.push(error$0);
@@ -55660,7 +55993,7 @@ See https://react.dev/link/invalid-hook-call for tips about how to debug and fix
55660
55993
  reject(_thrownError);
55661
55994
  }
55662
55995
  } else
55663
- resolve16(returnValue);
55996
+ resolve17(returnValue);
55664
55997
  }, function(error48) {
55665
55998
  popActScope(prevActQueue, prevActScopeDepth);
55666
55999
  0 < ReactSharedInternals.thrownErrors.length ? (error48 = aggregateErrors(ReactSharedInternals.thrownErrors), ReactSharedInternals.thrownErrors.length = 0, reject(error48)) : reject(error48);
@@ -55676,11 +56009,11 @@ See https://react.dev/link/invalid-hook-call for tips about how to debug and fix
55676
56009
  if (0 < ReactSharedInternals.thrownErrors.length)
55677
56010
  throw callback = aggregateErrors(ReactSharedInternals.thrownErrors), ReactSharedInternals.thrownErrors.length = 0, callback;
55678
56011
  return {
55679
- then: function(resolve16, reject) {
56012
+ then: function(resolve17, reject) {
55680
56013
  didAwaitActCall = true;
55681
56014
  prevActScopeDepth === 0 ? (ReactSharedInternals.actQueue = queue, enqueueTask(function() {
55682
- return recursivelyFlushAsyncActWork(returnValue$jscomp$0, resolve16, reject);
55683
- })) : resolve16(returnValue$jscomp$0);
56015
+ return recursivelyFlushAsyncActWork(returnValue$jscomp$0, resolve17, reject);
56016
+ })) : resolve17(returnValue$jscomp$0);
55684
56017
  }
55685
56018
  };
55686
56019
  };
@@ -58522,8 +58855,8 @@ It can also happen if the client has a browser extension installed which messes
58522
58855
  currentEntangledActionThenable = {
58523
58856
  status: "pending",
58524
58857
  value: undefined,
58525
- then: function(resolve16) {
58526
- entangledListeners.push(resolve16);
58858
+ then: function(resolve17) {
58859
+ entangledListeners.push(resolve17);
58527
58860
  }
58528
58861
  };
58529
58862
  }
@@ -58547,8 +58880,8 @@ It can also happen if the client has a browser extension installed which messes
58547
58880
  status: "pending",
58548
58881
  value: null,
58549
58882
  reason: null,
58550
- then: function(resolve16) {
58551
- listeners.push(resolve16);
58883
+ then: function(resolve17) {
58884
+ listeners.push(resolve17);
58552
58885
  }
58553
58886
  };
58554
58887
  thenable.then(function() {
@@ -85940,7 +86273,7 @@ var require_jsx_dev_runtime = __commonJS((exports, module) => {
85940
86273
  init_source();
85941
86274
  import { existsSync as existsSync34, mkdirSync as mkdirSync7 } from "fs";
85942
86275
  import { homedir as homedir8 } from "os";
85943
- import { join as join73 } from "path";
86276
+ import { join as join74 } from "path";
85944
86277
 
85945
86278
  // node_modules/commander/esm.mjs
85946
86279
  var import__ = __toESM(require_commander(), 1);
@@ -86836,7 +87169,7 @@ async function runsShowCommand(options) {
86836
87169
  // src/cli/prompts-main.ts
86837
87170
  init_logger2();
86838
87171
  import { existsSync as existsSync20, mkdirSync as mkdirSync3 } from "fs";
86839
- import { join as join41 } from "path";
87172
+ import { join as join42 } from "path";
86840
87173
 
86841
87174
  // src/pipeline/index.ts
86842
87175
  init_runner2();
@@ -86912,7 +87245,7 @@ function buildFrontmatter(story, ctx, role) {
86912
87245
 
86913
87246
  // src/cli/prompts-tdd.ts
86914
87247
  init_prompts();
86915
- import { join as join40 } from "path";
87248
+ import { join as join41 } from "path";
86916
87249
  async function handleThreeSessionTddPrompts(story, ctx, outputDir, logger) {
86917
87250
  const [testWriterPrompt, implementerPrompt, verifierPrompt] = await Promise.all([
86918
87251
  TddPromptBuilder.for("test-writer", { isolation: "strict" }).withLoader(ctx.workdir, ctx.config).story(story).context(ctx.contextMarkdown).constitution(ctx.constitution?.content).testCommand(ctx.config.quality?.commands?.test).build(),
@@ -86931,7 +87264,7 @@ ${frontmatter}---
86931
87264
 
86932
87265
  ${session.prompt}`;
86933
87266
  if (outputDir) {
86934
- const promptFile = join40(outputDir, `${story.id}.${session.role}.md`);
87267
+ const promptFile = join41(outputDir, `${story.id}.${session.role}.md`);
86935
87268
  await Bun.write(promptFile, fullOutput);
86936
87269
  logger.info("cli", "Written TDD prompt file", {
86937
87270
  storyId: story.id,
@@ -86947,7 +87280,7 @@ ${"=".repeat(80)}`);
86947
87280
  }
86948
87281
  }
86949
87282
  if (outputDir && ctx.contextMarkdown) {
86950
- const contextFile = join40(outputDir, `${story.id}.context.md`);
87283
+ const contextFile = join41(outputDir, `${story.id}.context.md`);
86951
87284
  const frontmatter = buildFrontmatter(story, ctx);
86952
87285
  const contextOutput = `---
86953
87286
  ${frontmatter}---
@@ -86961,12 +87294,12 @@ ${ctx.contextMarkdown}`;
86961
87294
  async function promptsCommand(options) {
86962
87295
  const logger = getLogger();
86963
87296
  const { feature, workdir, config: config2, storyId, outputDir } = options;
86964
- const naxDir = join41(workdir, ".nax");
87297
+ const naxDir = join42(workdir, ".nax");
86965
87298
  if (!existsSync20(naxDir)) {
86966
87299
  throw new Error(`.nax directory not found. Run 'nax init' first in ${workdir}`);
86967
87300
  }
86968
- const featureDir = join41(naxDir, "features", feature);
86969
- const prdPath = join41(featureDir, "prd.json");
87301
+ const featureDir = join42(naxDir, "features", feature);
87302
+ const prdPath = join42(featureDir, "prd.json");
86970
87303
  if (!existsSync20(prdPath)) {
86971
87304
  throw new Error(`Feature "${feature}" not found or missing prd.json`);
86972
87305
  }
@@ -87033,10 +87366,10 @@ ${frontmatter}---
87033
87366
 
87034
87367
  ${ctx.prompt}`;
87035
87368
  if (outputDir) {
87036
- const promptFile = join41(outputDir, `${story.id}.prompt.md`);
87369
+ const promptFile = join42(outputDir, `${story.id}.prompt.md`);
87037
87370
  await Bun.write(promptFile, fullOutput);
87038
87371
  if (ctx.contextMarkdown) {
87039
- const contextFile = join41(outputDir, `${story.id}.context.md`);
87372
+ const contextFile = join42(outputDir, `${story.id}.context.md`);
87040
87373
  const contextOutput = `---
87041
87374
  ${frontmatter}---
87042
87375
 
@@ -87064,7 +87397,7 @@ ${"=".repeat(80)}`);
87064
87397
  // src/cli/prompts-init.ts
87065
87398
  init_role_task();
87066
87399
  import { existsSync as existsSync21, mkdirSync as mkdirSync4 } from "fs";
87067
- import { join as join42 } from "path";
87400
+ import { join as join43 } from "path";
87068
87401
  var TEMPLATE_ROLES = [
87069
87402
  { file: "test-writer.md", role: "test-writer" },
87070
87403
  { file: "implementer.md", role: "implementer", variant: "standard" },
@@ -87088,9 +87421,9 @@ var TEMPLATE_HEADER = `<!--
87088
87421
  `;
87089
87422
  async function promptsInitCommand(options) {
87090
87423
  const { workdir, force = false, autoWireConfig = true } = options;
87091
- const templatesDir = join42(workdir, ".nax", "templates");
87424
+ const templatesDir = join43(workdir, ".nax", "templates");
87092
87425
  mkdirSync4(templatesDir, { recursive: true });
87093
- const existingFiles = TEMPLATE_ROLES.map((t) => t.file).filter((f) => existsSync21(join42(templatesDir, f)));
87426
+ const existingFiles = TEMPLATE_ROLES.map((t) => t.file).filter((f) => existsSync21(join43(templatesDir, f)));
87094
87427
  if (existingFiles.length > 0 && !force) {
87095
87428
  console.warn(`[WARN] nax/templates/ already contains files: ${existingFiles.join(", ")}. No files overwritten.
87096
87429
  Pass --force to overwrite existing templates.`);
@@ -87098,7 +87431,7 @@ async function promptsInitCommand(options) {
87098
87431
  }
87099
87432
  const written = [];
87100
87433
  for (const template of TEMPLATE_ROLES) {
87101
- const filePath = join42(templatesDir, template.file);
87434
+ const filePath = join43(templatesDir, template.file);
87102
87435
  const roleBody = template.role === "implementer" ? buildRoleTaskSection(template.role, template.variant) : buildRoleTaskSection(template.role);
87103
87436
  const content = TEMPLATE_HEADER + roleBody;
87104
87437
  await Bun.write(filePath, content);
@@ -87114,7 +87447,7 @@ async function promptsInitCommand(options) {
87114
87447
  return written;
87115
87448
  }
87116
87449
  async function autoWirePromptsConfig(workdir) {
87117
- const configPath = join42(workdir, "nax.config.json");
87450
+ const configPath = join43(workdir, "nax.config.json");
87118
87451
  if (!existsSync21(configPath)) {
87119
87452
  const exampleConfig = JSON.stringify({
87120
87453
  prompts: {
@@ -87280,7 +87613,7 @@ init_config();
87280
87613
  init_logger2();
87281
87614
  init_prd();
87282
87615
  import { existsSync as existsSync23, readdirSync as readdirSync6 } from "fs";
87283
- import { join as join46 } from "path";
87616
+ import { join as join47 } from "path";
87284
87617
 
87285
87618
  // src/cli/diagnose-analysis.ts
87286
87619
  function detectFailurePattern(story, _prd, status) {
@@ -87479,7 +87812,7 @@ function isProcessAlive2(pid) {
87479
87812
  }
87480
87813
  }
87481
87814
  async function loadStatusFile2(workdir) {
87482
- const statusPath = join46(workdir, ".nax", "status.json");
87815
+ const statusPath = join47(workdir, ".nax", "status.json");
87483
87816
  if (!existsSync23(statusPath))
87484
87817
  return null;
87485
87818
  try {
@@ -87507,7 +87840,7 @@ async function countCommitsSince(workdir, since) {
87507
87840
  }
87508
87841
  }
87509
87842
  async function checkLock(workdir) {
87510
- const lockFile = Bun.file(join46(workdir, "nax.lock"));
87843
+ const lockFile = Bun.file(join47(workdir, "nax.lock"));
87511
87844
  if (!await lockFile.exists())
87512
87845
  return { lockPresent: false };
87513
87846
  try {
@@ -87525,8 +87858,8 @@ async function diagnoseCommand(options = {}) {
87525
87858
  const logger = getLogger();
87526
87859
  const workdir = options.workdir ?? process.cwd();
87527
87860
  const naxSubdir = findProjectDir(workdir);
87528
- let projectDir = naxSubdir ? join46(naxSubdir, "..") : null;
87529
- if (!projectDir && existsSync23(join46(workdir, ".nax"))) {
87861
+ let projectDir = naxSubdir ? join47(naxSubdir, "..") : null;
87862
+ if (!projectDir && existsSync23(join47(workdir, ".nax"))) {
87530
87863
  projectDir = workdir;
87531
87864
  }
87532
87865
  if (!projectDir)
@@ -87537,7 +87870,7 @@ async function diagnoseCommand(options = {}) {
87537
87870
  if (status2) {
87538
87871
  feature = status2.run.feature;
87539
87872
  } else {
87540
- const featuresDir = join46(projectDir, ".nax", "features");
87873
+ const featuresDir = join47(projectDir, ".nax", "features");
87541
87874
  if (!existsSync23(featuresDir))
87542
87875
  throw new Error("No features found in project");
87543
87876
  const features = readdirSync6(featuresDir, { withFileTypes: true }).filter((e) => e.isDirectory()).map((e) => e.name);
@@ -87547,8 +87880,8 @@ async function diagnoseCommand(options = {}) {
87547
87880
  logger.info("diagnose", "No feature specified, using first found", { feature });
87548
87881
  }
87549
87882
  }
87550
- const featureDir = join46(projectDir, ".nax", "features", feature);
87551
- const prdPath = join46(featureDir, "prd.json");
87883
+ const featureDir = join47(projectDir, ".nax", "features", feature);
87884
+ const prdPath = join47(featureDir, "prd.json");
87552
87885
  if (!existsSync23(prdPath))
87553
87886
  throw new Error(`Feature not found: ${feature}`);
87554
87887
  const prd = await loadPRD(prdPath);
@@ -87592,7 +87925,7 @@ init_source();
87592
87925
  init_loader();
87593
87926
  init_generator2();
87594
87927
  import { existsSync as existsSync24 } from "fs";
87595
- import { join as join47 } from "path";
87928
+ import { join as join48 } from "path";
87596
87929
  var VALID_AGENTS = ["claude", "codex", "opencode", "cursor", "windsurf", "aider", "gemini"];
87597
87930
  async function generateCommand(options) {
87598
87931
  const workdir = options.dir ?? process.cwd();
@@ -87635,7 +87968,7 @@ async function generateCommand(options) {
87635
87968
  return;
87636
87969
  }
87637
87970
  if (options.package) {
87638
- const packageDir = join47(workdir, options.package);
87971
+ const packageDir = join48(workdir, options.package);
87639
87972
  if (dryRun) {
87640
87973
  console.log(source_default.yellow("\u26A0 Dry run \u2014 no files will be written"));
87641
87974
  }
@@ -87655,8 +87988,8 @@ async function generateCommand(options) {
87655
87988
  process.exit(1);
87656
87989
  return;
87657
87990
  }
87658
- const contextPath = options.context ? join47(workdir, options.context) : join47(workdir, ".nax/context.md");
87659
- const outputDir = options.output ? join47(workdir, options.output) : workdir;
87991
+ const contextPath = options.context ? join48(workdir, options.context) : join48(workdir, ".nax/context.md");
87992
+ const outputDir = options.output ? join48(workdir, options.output) : workdir;
87660
87993
  const autoInject = !options.noAutoInject;
87661
87994
  if (!existsSync24(contextPath)) {
87662
87995
  console.error(source_default.red(`\u2717 Context file not found: ${contextPath}`));
@@ -87762,7 +88095,7 @@ async function generateCommand(options) {
87762
88095
  // src/cli/config-display.ts
87763
88096
  init_loader();
87764
88097
  import { existsSync as existsSync26 } from "fs";
87765
- import { join as join49 } from "path";
88098
+ import { join as join50 } from "path";
87766
88099
 
87767
88100
  // src/cli/config-descriptions.ts
87768
88101
  var FIELD_DESCRIPTIONS = {
@@ -88003,7 +88336,7 @@ function deepEqual(a, b) {
88003
88336
  init_defaults();
88004
88337
  init_loader();
88005
88338
  import { existsSync as existsSync25 } from "fs";
88006
- import { join as join48 } from "path";
88339
+ import { join as join49 } from "path";
88007
88340
  async function loadConfigFile(path15) {
88008
88341
  if (!existsSync25(path15))
88009
88342
  return null;
@@ -88025,7 +88358,7 @@ async function loadProjectConfig() {
88025
88358
  const projectDir = findProjectDir();
88026
88359
  if (!projectDir)
88027
88360
  return null;
88028
- const projectPath = join48(projectDir, "config.json");
88361
+ const projectPath = join49(projectDir, "config.json");
88029
88362
  return await loadConfigFile(projectPath);
88030
88363
  }
88031
88364
 
@@ -88085,7 +88418,7 @@ async function configCommand(config2, options = {}) {
88085
88418
  function determineConfigSources() {
88086
88419
  const globalPath = globalConfigPath();
88087
88420
  const projectDir = findProjectDir();
88088
- const projectPath = projectDir ? join49(projectDir, "config.json") : null;
88421
+ const projectPath = projectDir ? join50(projectDir, "config.json") : null;
88089
88422
  return {
88090
88423
  global: fileExists(globalPath) ? globalPath : null,
88091
88424
  project: projectPath && fileExists(projectPath) ? projectPath : null
@@ -88234,15 +88567,15 @@ init_paths();
88234
88567
  init_profile();
88235
88568
  import { mkdirSync as mkdirSync5 } from "fs";
88236
88569
  import { readdirSync as readdirSync7 } from "fs";
88237
- import { join as join50 } from "path";
88570
+ import { join as join51 } from "path";
88238
88571
  var _profileCLIDeps = {
88239
88572
  env: process.env
88240
88573
  };
88241
88574
  var SENSITIVE_KEY_PATTERN = /key|token|secret|password|credential/i;
88242
88575
  var VAR_PATTERN = /\$[A-Za-z_][A-Za-z0-9_]*/;
88243
88576
  async function profileListCommand(startDir) {
88244
- const globalProfilesDir = join50(globalConfigDir(), "profiles");
88245
- const projectProfilesDir = join50(projectConfigDir(startDir), "profiles");
88577
+ const globalProfilesDir = join51(globalConfigDir(), "profiles");
88578
+ const projectProfilesDir = join51(projectConfigDir(startDir), "profiles");
88246
88579
  const globalProfiles = scanProfileDir(globalProfilesDir);
88247
88580
  const projectProfiles = scanProfileDir(projectProfilesDir);
88248
88581
  const activeProfile = await resolveProfileName({}, _profileCLIDeps.env, startDir);
@@ -88301,7 +88634,7 @@ function maskProfileValues(obj) {
88301
88634
  return result;
88302
88635
  }
88303
88636
  async function profileUseCommand(profileName, startDir) {
88304
- const configPath = join50(projectConfigDir(startDir), "config.json");
88637
+ const configPath = join51(projectConfigDir(startDir), "config.json");
88305
88638
  const configFile = Bun.file(configPath);
88306
88639
  let existing = {};
88307
88640
  if (await configFile.exists()) {
@@ -88320,8 +88653,8 @@ async function profileCurrentCommand(startDir) {
88320
88653
  return resolveProfileName({}, _profileCLIDeps.env, startDir);
88321
88654
  }
88322
88655
  async function profileCreateCommand(profileName, startDir) {
88323
- const profilesDir = join50(projectConfigDir(startDir), "profiles");
88324
- const profilePath = join50(profilesDir, `${profileName}.json`);
88656
+ const profilesDir = join51(projectConfigDir(startDir), "profiles");
88657
+ const profilePath = join51(profilesDir, `${profileName}.json`);
88325
88658
  const profileFile = Bun.file(profilePath);
88326
88659
  if (await profileFile.exists()) {
88327
88660
  throw new Error(`Profile "${profileName}" already exists at ${profilePath}`);
@@ -88443,7 +88776,7 @@ async function contextInspectCommand(options) {
88443
88776
  init_canonical_loader();
88444
88777
  init_errors();
88445
88778
  import { mkdir as mkdir9 } from "fs/promises";
88446
- import { basename as basename8, join as join51 } from "path";
88779
+ import { basename as basename8, join as join52 } from "path";
88447
88780
  var _rulesCLIDeps = {
88448
88781
  readFile: async (path15) => Bun.file(path15).text(),
88449
88782
  writeFile: async (path15, content) => {
@@ -88452,7 +88785,7 @@ var _rulesCLIDeps = {
88452
88785
  fileExists: async (path15) => Bun.file(path15).exists(),
88453
88786
  globInDir: (dir) => {
88454
88787
  try {
88455
- return [...new Bun.Glob("*.md").scanSync({ cwd: dir })].sort().map((f) => join51(dir, f));
88788
+ return [...new Bun.Glob("*.md").scanSync({ cwd: dir })].sort().map((f) => join52(dir, f));
88456
88789
  } catch {
88457
88790
  return [];
88458
88791
  }
@@ -88501,7 +88834,7 @@ ${r.content}`).join(`
88501
88834
  `);
88502
88835
  const shimContent = `${header + body}
88503
88836
  `;
88504
- const shimPath = join51(workdir, shimFileName);
88837
+ const shimPath = join52(workdir, shimFileName);
88505
88838
  if (options.dryRun) {
88506
88839
  console.log(`[dry-run] Would write ${shimPath} (${shimContent.length} bytes)`);
88507
88840
  return;
@@ -88530,14 +88863,14 @@ function neutralizeContent(content) {
88530
88863
  }
88531
88864
  async function collectMigrationSources(workdir) {
88532
88865
  const sources = [];
88533
- const claudeMdPath = join51(workdir, "CLAUDE.md");
88866
+ const claudeMdPath = join52(workdir, "CLAUDE.md");
88534
88867
  if (await _rulesCLIDeps.fileExists(claudeMdPath)) {
88535
88868
  const content = await _rulesCLIDeps.readFile(claudeMdPath);
88536
88869
  if (content.trim()) {
88537
88870
  sources.push({ sourcePath: claudeMdPath, targetFileName: "project-conventions.md", content });
88538
88871
  }
88539
88872
  }
88540
- const rulesDir = join51(workdir, ".claude", "rules");
88873
+ const rulesDir = join52(workdir, ".claude", "rules");
88541
88874
  const ruleFiles = _rulesCLIDeps.globInDir(rulesDir);
88542
88875
  for (const filePath of ruleFiles) {
88543
88876
  try {
@@ -88557,7 +88890,7 @@ async function rulesMigrateCommand(options) {
88557
88890
  console.log("[WARN] No source files found (checked CLAUDE.md and .claude/rules/*.md). Nothing to migrate.");
88558
88891
  return;
88559
88892
  }
88560
- const targetDir = join51(workdir, CANONICAL_RULES_DIR);
88893
+ const targetDir = join52(workdir, CANONICAL_RULES_DIR);
88561
88894
  if (!options.dryRun) {
88562
88895
  try {
88563
88896
  await _rulesCLIDeps.mkdir(targetDir);
@@ -88568,7 +88901,7 @@ async function rulesMigrateCommand(options) {
88568
88901
  let written = 0;
88569
88902
  let skipped = 0;
88570
88903
  for (const { sourcePath, targetFileName, content } of sources) {
88571
- const targetPath = join51(targetDir, targetFileName);
88904
+ const targetPath = join52(targetDir, targetFileName);
88572
88905
  if (!force && !options.dryRun && await _rulesCLIDeps.fileExists(targetPath)) {
88573
88906
  console.log(`[skip] ${targetFileName} already exists (use --force to overwrite)`);
88574
88907
  skipped++;
@@ -88607,7 +88940,7 @@ function collectCanonicalRuleRoots(workdir) {
88607
88940
  const packageRel = normalized.slice(0, idx);
88608
88941
  if (!packageRel)
88609
88942
  continue;
88610
- roots.add(join51(workdir, packageRel));
88943
+ roots.add(join52(workdir, packageRel));
88611
88944
  }
88612
88945
  return [...roots].sort();
88613
88946
  }
@@ -88628,7 +88961,7 @@ init_config();
88628
88961
  init_logger2();
88629
88962
  init_detect2();
88630
88963
  init_workspace();
88631
- import { join as join52 } from "path";
88964
+ import { join as join53 } from "path";
88632
88965
  function resolveEffective(detected, configPatterns) {
88633
88966
  if (configPatterns !== undefined)
88634
88967
  return "config";
@@ -88713,7 +89046,7 @@ async function detectCommand(options) {
88713
89046
  const rootDetected = detectionMap[""] ?? { patterns: [], confidence: "empty", sources: [] };
88714
89047
  const pkgEntries = await Promise.all(packageDirs.map(async (dir) => {
88715
89048
  const det = detectionMap[dir] ?? { patterns: [], confidence: "empty", sources: [] };
88716
- const pkgConfigPath = join52(workdir, ".nax", "mono", dir, "config.json");
89049
+ const pkgConfigPath = join53(workdir, ".nax", "mono", dir, "config.json");
88717
89050
  const pkgRaw = await loadRawConfig(pkgConfigPath);
88718
89051
  const pkgPatterns = deepGet(pkgRaw, TEST_PATTERNS_KEY);
88719
89052
  const effective = Array.isArray(pkgPatterns) ? pkgPatterns : undefined;
@@ -88767,13 +89100,13 @@ async function detectCommand(options) {
88767
89100
  if (rootDetected.confidence === "empty") {
88768
89101
  console.log(source_default.yellow(" root: skipped (empty detection)"));
88769
89102
  } else {
88770
- const rootConfigPath = join52(workdir, ".nax", "config.json");
89103
+ const rootConfigPath = join53(workdir, ".nax", "config.json");
88771
89104
  try {
88772
89105
  const status = await applyToConfig(rootConfigPath, rootDetected.patterns, options.force ?? false);
88773
89106
  if (status === "skipped") {
88774
89107
  console.log(source_default.dim(" root: skipped (testFilePatterns already set; use --force to overwrite)"));
88775
89108
  } else {
88776
- console.log(source_default.green(` root: ${status} \u2192 ${join52(".nax", "config.json")}`));
89109
+ console.log(source_default.green(` root: ${status} \u2192 ${join53(".nax", "config.json")}`));
88777
89110
  }
88778
89111
  } catch (err) {
88779
89112
  console.error(source_default.red(` root: write failed \u2014 ${err.message}`));
@@ -88786,13 +89119,13 @@ async function detectCommand(options) {
88786
89119
  console.log(source_default.dim(` ${dir}: skipped (empty detection)`));
88787
89120
  continue;
88788
89121
  }
88789
- const pkgConfigPath = join52(workdir, ".nax", "mono", dir, "config.json");
89122
+ const pkgConfigPath = join53(workdir, ".nax", "mono", dir, "config.json");
88790
89123
  try {
88791
89124
  const status = await applyToConfig(pkgConfigPath, det.patterns, options.force ?? false);
88792
89125
  if (status === "skipped") {
88793
89126
  console.log(source_default.dim(` ${dir}: skipped (already set)`));
88794
89127
  } else {
88795
- console.log(source_default.green(` ${dir}: ${status} \u2192 ${join52(".nax", "mono", dir, "config.json")}`));
89128
+ console.log(source_default.green(` ${dir}: ${status} \u2192 ${join53(".nax", "mono", dir, "config.json")}`));
88796
89129
  }
88797
89130
  } catch (err) {
88798
89131
  console.error(source_default.red(` ${dir}: write failed \u2014 ${err.message}`));
@@ -88814,24 +89147,24 @@ async function diagnose(options) {
88814
89147
 
88815
89148
  // src/commands/logs.ts
88816
89149
  import { existsSync as existsSync28 } from "fs";
88817
- import { join as join56 } from "path";
89150
+ import { join as join57 } from "path";
88818
89151
 
88819
89152
  // src/commands/logs-formatter.ts
88820
89153
  init_source();
88821
89154
  init_formatter();
88822
89155
  import { readdirSync as readdirSync9 } from "fs";
88823
- import { join as join55 } from "path";
89156
+ import { join as join56 } from "path";
88824
89157
 
88825
89158
  // src/commands/logs-reader.ts
88826
89159
  import { existsSync as existsSync27, readdirSync as readdirSync8 } from "fs";
88827
89160
  import { readdir as readdir4 } from "fs/promises";
88828
- import { join as join54 } from "path";
89161
+ import { join as join55 } from "path";
88829
89162
 
88830
89163
  // src/utils/paths.ts
88831
89164
  import { homedir as homedir4 } from "os";
88832
- import { join as join53 } from "path";
89165
+ import { join as join54 } from "path";
88833
89166
  function getRunsDir() {
88834
- return process.env.NAX_RUNS_DIR ?? join53(homedir4(), ".nax", "runs");
89167
+ return process.env.NAX_RUNS_DIR ?? join54(homedir4(), ".nax", "runs");
88835
89168
  }
88836
89169
 
88837
89170
  // src/commands/logs-reader.ts
@@ -88848,7 +89181,7 @@ async function resolveRunFileFromRegistry(runId) {
88848
89181
  }
88849
89182
  let matched = null;
88850
89183
  for (const entry of entries) {
88851
- const metaPath = join54(runsDir, entry, "meta.json");
89184
+ const metaPath = join55(runsDir, entry, "meta.json");
88852
89185
  try {
88853
89186
  const meta3 = await Bun.file(metaPath).json();
88854
89187
  if (meta3.runId === runId || meta3.runId.startsWith(runId)) {
@@ -88870,14 +89203,14 @@ async function resolveRunFileFromRegistry(runId) {
88870
89203
  return null;
88871
89204
  }
88872
89205
  const specificFile = files.find((f) => f === `${matched.runId}.jsonl`);
88873
- return join54(matched.eventsDir, specificFile ?? files[0]);
89206
+ return join55(matched.eventsDir, specificFile ?? files[0]);
88874
89207
  }
88875
89208
  async function selectRunFile(runsDir) {
88876
89209
  const files = readdirSync8(runsDir).filter((f) => f.endsWith(".jsonl") && f !== "latest.jsonl").sort().reverse();
88877
89210
  if (files.length === 0) {
88878
89211
  return null;
88879
89212
  }
88880
- return join54(runsDir, files[0]);
89213
+ return join55(runsDir, files[0]);
88881
89214
  }
88882
89215
  async function extractRunSummary(filePath) {
88883
89216
  const file3 = Bun.file(filePath);
@@ -88962,7 +89295,7 @@ Runs:
88962
89295
  console.log(source_default.gray(" Timestamp Stories Duration Cost Status"));
88963
89296
  console.log(source_default.gray(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500"));
88964
89297
  for (const file3 of files) {
88965
- const filePath = join55(runsDir, file3);
89298
+ const filePath = join56(runsDir, file3);
88966
89299
  const summary = await extractRunSummary(filePath);
88967
89300
  const timestamp = file3.replace(".jsonl", "");
88968
89301
  const stories = summary ? `${summary.passed}/${summary.total}` : "?/?";
@@ -89076,7 +89409,7 @@ async function logsCommand(options) {
89076
89409
  return;
89077
89410
  }
89078
89411
  const resolved = resolveProject({ dir: options.dir });
89079
- const naxDir = join56(resolved.projectDir, ".nax");
89412
+ const naxDir = join57(resolved.projectDir, ".nax");
89080
89413
  const configPath = resolved.configPath;
89081
89414
  const configFile = Bun.file(configPath);
89082
89415
  const config2 = await configFile.json();
@@ -89084,8 +89417,8 @@ async function logsCommand(options) {
89084
89417
  if (!featureName) {
89085
89418
  throw new Error("No feature specified in config.json");
89086
89419
  }
89087
- const featureDir = join56(naxDir, "features", featureName);
89088
- const runsDir = join56(featureDir, "runs");
89420
+ const featureDir = join57(naxDir, "features", featureName);
89421
+ const runsDir = join57(featureDir, "runs");
89089
89422
  if (!existsSync28(runsDir)) {
89090
89423
  throw new Error(`No runs directory found for feature: ${featureName}`);
89091
89424
  }
@@ -89110,7 +89443,7 @@ init_config();
89110
89443
  init_prd();
89111
89444
  init_precheck();
89112
89445
  import { existsSync as existsSync29 } from "fs";
89113
- import { join as join57 } from "path";
89446
+ import { join as join58 } from "path";
89114
89447
  async function precheckCommand(options) {
89115
89448
  const resolved = resolveProject({
89116
89449
  dir: options.dir,
@@ -89132,9 +89465,9 @@ async function precheckCommand(options) {
89132
89465
  process.exit(1);
89133
89466
  }
89134
89467
  }
89135
- const naxDir = join57(resolved.projectDir, ".nax");
89136
- const featureDir = join57(naxDir, "features", featureName);
89137
- const prdPath = join57(featureDir, "prd.json");
89468
+ const naxDir = join58(resolved.projectDir, ".nax");
89469
+ const featureDir = join58(naxDir, "features", featureName);
89470
+ const prdPath = join58(featureDir, "prd.json");
89138
89471
  if (!existsSync29(featureDir)) {
89139
89472
  console.error(source_default.red(`Feature not found: ${featureName}`));
89140
89473
  process.exit(1);
@@ -89156,7 +89489,7 @@ async function precheckCommand(options) {
89156
89489
  // src/commands/runs.ts
89157
89490
  init_source();
89158
89491
  import { readdir as readdir5 } from "fs/promises";
89159
- import { join as join58 } from "path";
89492
+ import { join as join59 } from "path";
89160
89493
  var DEFAULT_LIMIT = 20;
89161
89494
  var _runsCmdDeps = {
89162
89495
  getRunsDir
@@ -89211,7 +89544,7 @@ async function runsCommand(options = {}) {
89211
89544
  }
89212
89545
  const rows = [];
89213
89546
  for (const entry of entries) {
89214
- const metaPath = join58(runsDir, entry, "meta.json");
89547
+ const metaPath = join59(runsDir, entry, "meta.json");
89215
89548
  let meta3;
89216
89549
  try {
89217
89550
  meta3 = await Bun.file(metaPath).json();
@@ -89288,7 +89621,7 @@ async function runsCommand(options = {}) {
89288
89621
 
89289
89622
  // src/commands/unlock.ts
89290
89623
  init_source();
89291
- import { join as join59 } from "path";
89624
+ import { join as join60 } from "path";
89292
89625
  function isProcessAlive3(pid) {
89293
89626
  try {
89294
89627
  process.kill(pid, 0);
@@ -89303,7 +89636,7 @@ function formatLockAge(ageMs) {
89303
89636
  }
89304
89637
  async function unlockCommand(options) {
89305
89638
  const workdir = options.dir ?? process.cwd();
89306
- const lockPath = join59(workdir, "nax.lock");
89639
+ const lockPath = join60(workdir, "nax.lock");
89307
89640
  const lockFile = Bun.file(lockPath);
89308
89641
  const exists = await lockFile.exists();
89309
89642
  if (!exists) {
@@ -95285,8 +95618,8 @@ class Ink {
95285
95618
  }
95286
95619
  }
95287
95620
  async waitUntilExit() {
95288
- this.exitPromise ||= new Promise((resolve16, reject2) => {
95289
- this.resolveExitPromise = resolve16;
95621
+ this.exitPromise ||= new Promise((resolve17, reject2) => {
95622
+ this.resolveExitPromise = resolve17;
95290
95623
  this.rejectExitPromise = reject2;
95291
95624
  });
95292
95625
  if (!this.beforeExitHandler) {
@@ -97115,7 +97448,7 @@ async function promptForConfirmation(question) {
97115
97448
  if (!process.stdin.isTTY) {
97116
97449
  return true;
97117
97450
  }
97118
- return new Promise((resolve16) => {
97451
+ return new Promise((resolve17) => {
97119
97452
  process.stdout.write(source_default.bold(`${question} [Y/n] `));
97120
97453
  process.stdin.setRawMode(true);
97121
97454
  process.stdin.resume();
@@ -97128,9 +97461,9 @@ async function promptForConfirmation(question) {
97128
97461
  process.stdout.write(`
97129
97462
  `);
97130
97463
  if (answer === "n") {
97131
- resolve16(false);
97464
+ resolve17(false);
97132
97465
  } else {
97133
- resolve16(true);
97466
+ resolve17(true);
97134
97467
  }
97135
97468
  };
97136
97469
  process.stdin.on("data", handler);
@@ -97159,15 +97492,15 @@ Next: nax generate --package ${options.package}`));
97159
97492
  }
97160
97493
  return;
97161
97494
  }
97162
- const naxDir = join73(workdir, ".nax");
97495
+ const naxDir = join74(workdir, ".nax");
97163
97496
  if (existsSync34(naxDir) && !options.force) {
97164
97497
  console.log(source_default.yellow("nax already initialized. Use --force to overwrite."));
97165
97498
  return;
97166
97499
  }
97167
- mkdirSync7(join73(naxDir, "features"), { recursive: true });
97168
- mkdirSync7(join73(naxDir, "hooks"), { recursive: true });
97169
- await Bun.write(join73(naxDir, "config.json"), JSON.stringify(DEFAULT_CONFIG, null, 2));
97170
- await Bun.write(join73(naxDir, "hooks.json"), JSON.stringify({
97500
+ mkdirSync7(join74(naxDir, "features"), { recursive: true });
97501
+ mkdirSync7(join74(naxDir, "hooks"), { recursive: true });
97502
+ await Bun.write(join74(naxDir, "config.json"), JSON.stringify(DEFAULT_CONFIG, null, 2));
97503
+ await Bun.write(join74(naxDir, "hooks.json"), JSON.stringify({
97171
97504
  hooks: {
97172
97505
  "on-start": { command: 'echo "nax started: $NAX_FEATURE"', enabled: false },
97173
97506
  "on-complete": { command: 'echo "nax complete: $NAX_FEATURE"', enabled: false },
@@ -97175,12 +97508,12 @@ Next: nax generate --package ${options.package}`));
97175
97508
  "on-error": { command: 'echo "nax error: $NAX_REASON"', enabled: false }
97176
97509
  }
97177
97510
  }, null, 2));
97178
- await Bun.write(join73(naxDir, ".gitignore"), `# nax temp files
97511
+ await Bun.write(join74(naxDir, ".gitignore"), `# nax temp files
97179
97512
  *.tmp
97180
97513
  .paused.json
97181
97514
  .nax-verifier-verdict.json
97182
97515
  `);
97183
- await Bun.write(join73(naxDir, "context.md"), `# Project Context
97516
+ await Bun.write(join74(naxDir, "context.md"), `# Project Context
97184
97517
 
97185
97518
  This document defines coding standards, architectural decisions, and forbidden patterns for this project.
97186
97519
  Run \`nax generate\` to regenerate agent config files (CLAUDE.md, AGENTS.md, .cursorrules, etc.) from this file.
@@ -97310,8 +97643,8 @@ program2.command("run").description("Run the orchestration loop for a feature").
97310
97643
  console.error(source_default.red("nax not initialized. Run: nax init"));
97311
97644
  process.exit(1);
97312
97645
  }
97313
- const featureDir = join73(naxDir, "features", options.feature);
97314
- const prdPath = join73(featureDir, "prd.json");
97646
+ const featureDir = join74(naxDir, "features", options.feature);
97647
+ const prdPath = join74(featureDir, "prd.json");
97315
97648
  if (options.plan && options.from) {
97316
97649
  if (existsSync34(prdPath) && !options.force) {
97317
97650
  console.error(source_default.red(`Error: prd.json already exists for feature "${options.feature}".`));
@@ -97333,10 +97666,10 @@ program2.command("run").description("Run the orchestration loop for a feature").
97333
97666
  }
97334
97667
  }
97335
97668
  try {
97336
- const planLogDir = join73(featureDir, "plan");
97669
+ const planLogDir = join74(featureDir, "plan");
97337
97670
  mkdirSync7(planLogDir, { recursive: true });
97338
97671
  const planLogId = new Date().toISOString().replace(/:/g, "-").replace(/\..+/, "");
97339
- const planLogPath = join73(planLogDir, `${planLogId}.jsonl`);
97672
+ const planLogPath = join74(planLogDir, `${planLogId}.jsonl`);
97340
97673
  initLogger({ level: "info", filePath: planLogPath, useChalk: false, headless: true });
97341
97674
  console.log(source_default.dim(` [Plan log: ${planLogPath}]`));
97342
97675
  console.log(source_default.dim(" [Planning phase: generating PRD from spec]"));
@@ -97380,10 +97713,10 @@ program2.command("run").description("Run the orchestration loop for a feature").
97380
97713
  process.exit(1);
97381
97714
  }
97382
97715
  resetLogger();
97383
- const runsDir = join73(featureDir, "runs");
97716
+ const runsDir = join74(featureDir, "runs");
97384
97717
  mkdirSync7(runsDir, { recursive: true });
97385
97718
  const runId = new Date().toISOString().replace(/:/g, "-").replace(/\..+/, "");
97386
- const logFilePath = join73(runsDir, `${runId}.jsonl`);
97719
+ const logFilePath = join74(runsDir, `${runId}.jsonl`);
97387
97720
  const isTTY = process.stdout.isTTY ?? false;
97388
97721
  const headlessFlag = options.headless ?? false;
97389
97722
  const headlessEnv = process.env.NAX_HEADLESS === "1";
@@ -97400,7 +97733,7 @@ program2.command("run").description("Run the orchestration loop for a feature").
97400
97733
  config2.agent.default = options.agent;
97401
97734
  }
97402
97735
  config2.execution.maxIterations = Number.parseInt(options.maxIterations, 10);
97403
- const globalNaxDir = join73(homedir8(), ".nax");
97736
+ const globalNaxDir = join74(homedir8(), ".nax");
97404
97737
  const hooks = await loadHooksConfig(naxDir, globalNaxDir);
97405
97738
  const eventEmitter = new PipelineEventEmitter;
97406
97739
  let tuiInstance;
@@ -97423,7 +97756,7 @@ program2.command("run").description("Run the orchestration loop for a feature").
97423
97756
  } else {
97424
97757
  console.log(source_default.dim(" [Headless mode \u2014 pipe output]"));
97425
97758
  }
97426
- const statusFilePath = join73(workdir, ".nax", "status.json");
97759
+ const statusFilePath = join74(workdir, ".nax", "status.json");
97427
97760
  let parallel;
97428
97761
  if (options.parallel !== undefined) {
97429
97762
  parallel = Number.parseInt(options.parallel, 10);
@@ -97449,7 +97782,7 @@ program2.command("run").description("Run the orchestration loop for a feature").
97449
97782
  headless: useHeadless,
97450
97783
  skipPrecheck: options.skipPrecheck ?? false
97451
97784
  });
97452
- const latestSymlink = join73(runsDir, "latest.jsonl");
97785
+ const latestSymlink = join74(runsDir, "latest.jsonl");
97453
97786
  try {
97454
97787
  if (existsSync34(latestSymlink)) {
97455
97788
  Bun.spawnSync(["rm", latestSymlink]);
@@ -97487,9 +97820,9 @@ features.command("create <name>").description("Create a new feature").option("-d
97487
97820
  console.error(source_default.red("nax not initialized. Run: nax init"));
97488
97821
  process.exit(1);
97489
97822
  }
97490
- const featureDir = join73(naxDir, "features", name);
97823
+ const featureDir = join74(naxDir, "features", name);
97491
97824
  mkdirSync7(featureDir, { recursive: true });
97492
- await Bun.write(join73(featureDir, "spec.md"), `# Feature: ${name}
97825
+ await Bun.write(join74(featureDir, "spec.md"), `# Feature: ${name}
97493
97826
 
97494
97827
  ## Overview
97495
97828
 
@@ -97522,7 +97855,7 @@ features.command("create <name>").description("Create a new feature").option("-d
97522
97855
 
97523
97856
  <!-- What this feature explicitly does NOT cover. -->
97524
97857
  `);
97525
- await Bun.write(join73(featureDir, "progress.txt"), `# Progress: ${name}
97858
+ await Bun.write(join74(featureDir, "progress.txt"), `# Progress: ${name}
97526
97859
 
97527
97860
  Created: ${new Date().toISOString()}
97528
97861
 
@@ -97548,7 +97881,7 @@ features.command("list").description("List all features").option("-d, --dir <pat
97548
97881
  console.error(source_default.red("nax not initialized."));
97549
97882
  process.exit(1);
97550
97883
  }
97551
- const featuresDir = join73(naxDir, "features");
97884
+ const featuresDir = join74(naxDir, "features");
97552
97885
  if (!existsSync34(featuresDir)) {
97553
97886
  console.log(source_default.dim("No features yet."));
97554
97887
  return;
@@ -97563,7 +97896,7 @@ features.command("list").description("List all features").option("-d, --dir <pat
97563
97896
  Features:
97564
97897
  `));
97565
97898
  for (const name of entries) {
97566
- const prdPath = join73(featuresDir, name, "prd.json");
97899
+ const prdPath = join74(featuresDir, name, "prd.json");
97567
97900
  if (existsSync34(prdPath)) {
97568
97901
  const prd = await loadPRD(prdPath);
97569
97902
  const c = countStories(prd);
@@ -97598,10 +97931,10 @@ Use: nax plan -f <feature> --from <spec>`));
97598
97931
  cliOverrides.profile = options.profile;
97599
97932
  }
97600
97933
  const config2 = await loadConfig(workdir, cliOverrides);
97601
- const featureLogDir = join73(naxDir, "features", options.feature, "plan");
97934
+ const featureLogDir = join74(naxDir, "features", options.feature, "plan");
97602
97935
  mkdirSync7(featureLogDir, { recursive: true });
97603
97936
  const planLogId = new Date().toISOString().replace(/:/g, "-").replace(/\..+/, "");
97604
- const planLogPath = join73(featureLogDir, `${planLogId}.jsonl`);
97937
+ const planLogPath = join74(featureLogDir, `${planLogId}.jsonl`);
97605
97938
  initLogger({ level: "info", filePath: planLogPath, useChalk: false, headless: true });
97606
97939
  console.log(source_default.dim(` [Plan log: ${planLogPath}]`));
97607
97940
  try {