@nathapp/nax 0.60.0-canary.1 → 0.60.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/nax.js +873 -1038
  2. package/package.json +1 -1
package/dist/nax.js CHANGED
@@ -18334,7 +18334,11 @@ var init_schemas3 = __esm(() => {
18334
18334
  fixModel: "balanced",
18335
18335
  strategy: "diagnose-first",
18336
18336
  maxRetries: 2
18337
- })
18337
+ }),
18338
+ suggestedTestPath: exports_external.string().min(1).optional(),
18339
+ hardening: exports_external.object({
18340
+ enabled: exports_external.boolean().default(true)
18341
+ }).optional().default({ enabled: true })
18338
18342
  });
18339
18343
  TestCoverageConfigSchema = exports_external.object({
18340
18344
  enabled: exports_external.boolean().default(true),
@@ -18650,7 +18654,7 @@ var init_schemas3 = __esm(() => {
18650
18654
  }),
18651
18655
  acceptance: AcceptanceConfigSchema.default({
18652
18656
  enabled: true,
18653
- maxRetries: 2,
18657
+ maxRetries: 3,
18654
18658
  generateTests: true,
18655
18659
  testPath: ".nax-acceptance.test.ts",
18656
18660
  model: "fast",
@@ -18663,7 +18667,8 @@ var init_schemas3 = __esm(() => {
18663
18667
  fixModel: "balanced",
18664
18668
  strategy: "diagnose-first",
18665
18669
  maxRetries: 2
18666
- }
18670
+ },
18671
+ hardening: { enabled: true }
18667
18672
  }),
18668
18673
  context: ContextConfigSchema.default({
18669
18674
  fileInjection: "disabled",
@@ -21929,6 +21934,130 @@ ${c.output}`).join(`
21929
21934
  buildDebaterLabel(debater) {
21930
21935
  return debater.persona ? `${debater.agent} (${debater.persona})` : debater.agent;
21931
21936
  }
21937
+ buildReviewPrompt(diff, story) {
21938
+ const criteria = story.acceptanceCriteria.map((c) => `- ${c}`).join(`
21939
+ `);
21940
+ return [
21941
+ `Review the following code diff for story ${story.id}: ${story.title}`,
21942
+ "",
21943
+ "## Acceptance Criteria",
21944
+ criteria,
21945
+ "",
21946
+ "## Diff",
21947
+ diff,
21948
+ "",
21949
+ "Also flag any changes in the diff not required by the acceptance criteria above as out-of-scope findings.",
21950
+ "Respond with JSON: { passed: boolean, findings: [...], findingReasoning: { [id]: string } }"
21951
+ ].join(`
21952
+ `);
21953
+ }
21954
+ buildReReviewPrompt(updatedDiff, previousFindings) {
21955
+ const findingsList = previousFindings.length > 0 ? previousFindings.map((f) => `- ${f.ruleId}: ${f.message}`).join(`
21956
+ `) : "(none)";
21957
+ return [
21958
+ "This is a follow-up re-review. Please review the updated diff below.",
21959
+ "",
21960
+ "## Previous Findings",
21961
+ findingsList,
21962
+ "",
21963
+ "## Updated Diff",
21964
+ updatedDiff,
21965
+ "",
21966
+ "Respond with JSON: { passed: boolean, findings: [...], findingReasoning: { [id]: string }, deltaSummary: string }",
21967
+ "deltaSummary should describe which previous findings are resolved vs still present."
21968
+ ].join(`
21969
+ `);
21970
+ }
21971
+ buildResolverPrompt(proposals, critiques, diff, story, resolverContext) {
21972
+ const criteria = story.acceptanceCriteria.map((c) => `- ${c}`).join(`
21973
+ `);
21974
+ const framing = this.buildResolverFraming(resolverContext);
21975
+ const voteTally = this.buildVoteTallyLine(resolverContext);
21976
+ const proposalsSection = this.buildLabeledProposalsSection(proposals);
21977
+ const critiquesSection = this.buildLabeledCritiquesSection(critiques);
21978
+ return [
21979
+ framing,
21980
+ "",
21981
+ `## Story ${story.id}: ${story.title}`,
21982
+ "",
21983
+ "## Acceptance Criteria",
21984
+ criteria,
21985
+ "",
21986
+ "## Debater Proposals",
21987
+ proposalsSection,
21988
+ critiquesSection,
21989
+ "",
21990
+ "## Diff",
21991
+ diff,
21992
+ voteTally,
21993
+ "",
21994
+ "Respond with JSON: { passed: boolean, findings: [...], findingReasoning: { [id]: string } }"
21995
+ ].filter((line) => line !== undefined).join(`
21996
+ `);
21997
+ }
21998
+ buildReResolverPrompt(proposals, critiques, updatedDiff, previousFindings, resolverContext) {
21999
+ const framing = this.buildResolverFraming(resolverContext);
22000
+ const findingsList = previousFindings.length > 0 ? previousFindings.map((f) => `- ${f.ruleId}: ${f.message}`).join(`
22001
+ `) : "(none)";
22002
+ const proposalsSection = this.buildLabeledProposalsSection(proposals);
22003
+ const critiquesSection = this.buildLabeledCritiquesSection(critiques);
22004
+ return [
22005
+ `${framing} This is a re-review after implementer changes.`,
22006
+ "",
22007
+ "## Previous Findings",
22008
+ findingsList,
22009
+ "",
22010
+ "## Updated Debater Proposals",
22011
+ proposalsSection,
22012
+ critiquesSection,
22013
+ "",
22014
+ "## Updated Diff",
22015
+ updatedDiff,
22016
+ "",
22017
+ "Respond with JSON: { passed: boolean, findings: [...], findingReasoning: { [id]: string }, deltaSummary: string }",
22018
+ "deltaSummary should describe which previous findings are resolved vs still present."
22019
+ ].filter((line) => line !== undefined).join(`
22020
+ `);
22021
+ }
22022
+ buildResolverFraming(ctx) {
22023
+ switch (ctx.resolverType) {
22024
+ case "majority-fail-closed":
22025
+ case "majority-fail-open":
22026
+ return "You are the authoritative reviewer resolving a debate. A preliminary vote was taken \u2014 see tally below. Verify disputed findings using tools (READ files, GREP for usage) and give your final verdict.";
22027
+ case "synthesis":
22028
+ return "You are a synthesis reviewer. Synthesize the debater proposals into a single, coherent, tool-verified verdict. Use READ and GREP to verify claims before ruling.";
22029
+ case "custom":
22030
+ return "You are the judge. Evaluate the debater proposals independently. Verify claims with tools (READ, GREP) and give your final authoritative verdict.";
22031
+ default:
22032
+ return "You are the reviewer. Evaluate the debater proposals and give your final authoritative verdict.";
22033
+ }
22034
+ }
22035
+ buildVoteTallyLine(ctx) {
22036
+ if (!ctx.majorityVote)
22037
+ return "";
22038
+ const { passCount, failCount } = ctx.majorityVote;
22039
+ const failOpenNote = ctx.resolverType === "majority-fail-open" ? " (unparseable proposals count as pass)" : " (unparseable proposals count as fail)";
22040
+ return `
22041
+
22042
+ The preliminary majority vote is: **${passCount} passed, ${failCount} failed**${failOpenNote}. Verify the failing findings with tools before giving your authoritative verdict.`;
22043
+ }
22044
+ buildLabeledProposalsSection(proposals) {
22045
+ return proposals.map((p) => `### ${p.debater}
22046
+ ${p.output}`).join(`
22047
+
22048
+ `);
22049
+ }
22050
+ buildLabeledCritiquesSection(critiques) {
22051
+ if (critiques.length === 0)
22052
+ return "";
22053
+ return `
22054
+
22055
+ ## Critiques
22056
+ ${critiques.map((c, i) => `### Critique ${i + 1}
22057
+ ${c}`).join(`
22058
+
22059
+ `)}`;
22060
+ }
21932
22061
  }
21933
22062
  var init_prompt_builder = __esm(() => {
21934
22063
  init_personas();
@@ -22566,7 +22695,20 @@ Do NOT output the JSON to the conversation. Write the file, then reply with a br
22566
22695
  logger?.warn("debate", "hybrid mode requires sessionMode: stateful for plan \u2014 running as panel");
22567
22696
  }
22568
22697
  const resolverTimeoutMs = (ctx.stageConfig.timeoutSeconds ?? 600) * 1000;
22569
- const planSynthesisSuffix = "IMPORTANT: Your response must be a single valid JSON object in PRD format (with project, feature, branchName, userStories array, etc.). Do NOT wrap it in markdown fences. Output raw JSON only.";
22698
+ const specAnchor = opts.specContent ? `
22699
+
22700
+ ## Original Spec
22701
+
22702
+ ${opts.specContent}
22703
+
22704
+ ## Synthesis Rules \u2014 Acceptance Criteria
22705
+
22706
+ The spec above is the authoritative source for acceptance criteria.
22707
+ - Each story's \`acceptanceCriteria\` array MUST contain only criteria that are explicitly stated or directly implied by the spec.
22708
+ - If a debater proposed criteria beyond the spec (edge cases, error handling, implementation details), place those in a separate \`suggestedCriteria\` array on the same story object.
22709
+ - Never silently merge debater-invented criteria into \`acceptanceCriteria\`. The distinction matters: \`acceptanceCriteria\` drives automated testing; \`suggestedCriteria\` is logged for human review.
22710
+ - Preserve the spec's AC wording. You may refine for clarity but must not change semantics.` : "";
22711
+ const planSynthesisSuffix = `IMPORTANT: Your response must be a single valid JSON object in PRD format (with project, feature, branchName, userStories array, etc.). Do NOT wrap it in markdown fences. Output raw JSON only.${specAnchor}`;
22570
22712
  const outcome = await resolveOutcome(proposalOutputs, critiqueOutputs, ctx.stageConfig, ctx.config, ctx.storyId, resolverTimeoutMs, opts.workdir, opts.feature, undefined, undefined, planSynthesisSuffix, successful.map((p) => p.debater));
22571
22713
  const winningOutput = outcome.output ?? successful[0].output;
22572
22714
  const proposals = successful.map((p) => ({ debater: p.debater, output: p.output }));
@@ -24929,6 +25071,24 @@ function resolveAcceptanceTestCandidates(options) {
24929
25071
  return [];
24930
25072
  return [resolveAcceptanceFeatureTestPath(options.featureDir, options.testPathConfig, options.language)];
24931
25073
  }
25074
+ function suggestedTestFilename(language) {
25075
+ switch (language?.toLowerCase()) {
25076
+ case "go":
25077
+ return ".nax-suggested_test.go";
25078
+ case "python":
25079
+ return ".nax-suggested.test.py";
25080
+ case "rust":
25081
+ return ".nax-suggested.rs";
25082
+ default:
25083
+ return ".nax-suggested.test.ts";
25084
+ }
25085
+ }
25086
+ function resolveSuggestedTestFile(language, testPathConfig) {
25087
+ return testPathConfig ?? suggestedTestFilename(language);
25088
+ }
25089
+ function resolveSuggestedPackageFeatureTestPath(packageDir, featureName, testPathConfig, language) {
25090
+ return path.join(packageDir, ".nax", "features", featureName, resolveSuggestedTestFile(language, testPathConfig));
25091
+ }
24932
25092
  async function findExistingAcceptanceTestPath(options) {
24933
25093
  const candidates = resolveAcceptanceTestCandidates(options);
24934
25094
  for (const testPath of candidates) {
@@ -25795,7 +25955,7 @@ Rules:
25795
25955
  - Every test MUST have real assertions that PASS when the feature is correctly implemented and FAIL when it is broken
25796
25956
  - **Prefer behavioral tests** \u2014 import functions and call them rather than reading source files. For example, to verify "getPostRunActions() returns empty array", import PluginRegistry and call getPostRunActions(), don't grep the source file for the method name.
25797
25957
  - **File output (REQUIRED)**: Write the acceptance test file DIRECTLY to the path shown below. Do NOT output the test code in your response. After writing the file, reply with a brief confirmation.
25798
- - **Path anchor (CRITICAL)**: Write the test file to this exact path: \`${join16(options.workdir, ".nax", "features", options.featureName, resolveAcceptanceTestFile2(options.language, options.config?.acceptance?.testPath))}\`. Import from package sources using relative paths like \`../../../src/...\` (3 levels up from \`.nax/features/<name>/\` to the package root).
25958
+ - **Path anchor (CRITICAL)**: Write the test file to this exact path: \`${options.targetTestFile ?? join16(options.workdir, ".nax", "features", options.featureName, resolveAcceptanceTestFile2(options.language, options.config?.acceptance?.testPath))}\`. Import from package sources using relative paths like \`../../../src/...\` (3 levels up from \`.nax/features/<name>/\` to the package root).
25799
25959
  - **Process cwd**: When spawning child processes to invoke a CLI or binary, set the working directory to the **package root** (\`join(import.meta.dir, "../../..")\`) as your default \u2014 unless your Step 2 exploration reveals the CLI uses a different working directory convention (e.g. reads config from \`~/.config/\`, or resolves paths relative to a flag value). Always check how the CLI resolves file paths before assuming.`;
25800
25960
  const implementationSection = options.implementationContext && options.implementationContext.length > 0 ? `
25801
25961
 
@@ -25828,7 +25988,7 @@ Previous test failed because: ${options.previousFailure}` : "";
25828
25988
  outputPreview: rawOutput.slice(0, 300)
25829
25989
  });
25830
25990
  if (!testCode) {
25831
- const targetPath = join16(options.workdir, ".nax", "features", options.featureName, resolveAcceptanceTestFile2(options.language, options.config?.acceptance?.testPath));
25991
+ const targetPath = options.targetTestFile ?? join16(options.workdir, ".nax", "features", options.featureName, resolveAcceptanceTestFile2(options.language, options.config?.acceptance?.testPath));
25832
25992
  const backupPath = `${targetPath}.llm-recovery.bak`;
25833
25993
  let recoveryFailed = false;
25834
25994
  logger.debug("acceptance", "BUG-076 recovery: checking for agent-written file", {
@@ -26193,10 +26353,304 @@ function logTestOutput(logger, stage, output, opts = {}) {
26193
26353
  });
26194
26354
  }
26195
26355
 
26356
+ // src/acceptance/refinement.ts
26357
+ var exports_refinement = {};
26358
+ __export(exports_refinement, {
26359
+ refineAcceptanceCriteria: () => refineAcceptanceCriteria,
26360
+ parseRefinementResponse: () => parseRefinementResponse,
26361
+ buildRefinementPrompt: () => buildRefinementPrompt,
26362
+ _refineDeps: () => _refineDeps
26363
+ });
26364
+ function buildRefinementPrompt(criteria, codebaseContext, options) {
26365
+ const criteriaList = criteria.map((c, i) => `${i + 1}. ${c}`).join(`
26366
+ `);
26367
+ const strategySection = buildStrategySection(options);
26368
+ const refinedExample = buildRefinedExample(options?.testStrategy);
26369
+ const codebaseSection = codebaseContext ? `CODEBASE CONTEXT:
26370
+ ${codebaseContext}
26371
+ ` : "";
26372
+ const core2 = `You are an acceptance criteria refinement assistant. Your task is to convert raw acceptance criteria into concrete, machine-verifiable assertions.
26373
+
26374
+ ${codebaseSection}${strategySection}ACCEPTANCE CRITERIA TO REFINE:
26375
+ ${criteriaList}
26376
+
26377
+ For each criterion, produce a refined version that is concrete and automatically testable where possible.
26378
+ Respond with a JSON array:
26379
+ [{
26380
+ "original": "<exact original criterion text>",
26381
+ "refined": "<concrete, machine-verifiable description>",
26382
+ "testable": true,
26383
+ "storyId": ""
26384
+ }]
26385
+
26386
+ Rules:
26387
+ - "original" must match the input criterion text exactly
26388
+ - "refined" must be a concrete assertion (e.g., ${refinedExample})
26389
+ - "testable" is false only if the criterion cannot be automatically verified (e.g., "UX feels responsive", "design looks good")
26390
+ - "storyId" leave as empty string \u2014 it will be assigned by the caller`;
26391
+ return wrapJsonPrompt(core2);
26392
+ }
26393
+ function buildStrategySection(options) {
26394
+ if (!options?.testStrategy) {
26395
+ return "";
26396
+ }
26397
+ const framework = options.testFramework ? ` Use ${options.testFramework} testing library syntax.` : "";
26398
+ switch (options.testStrategy) {
26399
+ case "component":
26400
+ return `
26401
+ TEST STRATEGY: component
26402
+ Focus assertions on rendered output visible on screen \u2014 text content, visible elements, and screen state.
26403
+ Assert what the user sees rendered in the component, not what internal functions produce.${framework}
26404
+ `;
26405
+ case "cli":
26406
+ return `
26407
+ TEST STRATEGY: cli
26408
+ Focus assertions on stdout and stderr text output from the CLI command.
26409
+ Assert about terminal output content, exit codes, and standard output/standard error streams.${framework}
26410
+ `;
26411
+ case "e2e":
26412
+ return `
26413
+ TEST STRATEGY: e2e
26414
+ Focus assertions on HTTP response content \u2014 status codes, response bodies, and endpoint behavior.
26415
+ Assert about HTTP responses, status codes, and API endpoint output.${framework}
26416
+ `;
26417
+ default:
26418
+ return framework ? `
26419
+ TEST FRAMEWORK: ${options.testFramework}
26420
+ ` : "";
26421
+ }
26422
+ }
26423
+ function buildRefinedExample(testStrategy) {
26424
+ switch (testStrategy) {
26425
+ case "component":
26426
+ return '"Text content visible on screen matches expected", "Rendered output contains expected element"';
26427
+ case "cli":
26428
+ return '"stdout contains expected text", "stderr is empty on success", "exit code is 0"';
26429
+ case "e2e":
26430
+ return '"HTTP status 200 returned", "Response body contains expected field", "Endpoint returns JSON"';
26431
+ default:
26432
+ return '"Array of length N returned", "HTTP status 200 returned"';
26433
+ }
26434
+ }
26435
+ function parseRefinementResponse(response, criteria) {
26436
+ if (!response || !response.trim()) {
26437
+ return fallbackCriteria(criteria);
26438
+ }
26439
+ try {
26440
+ const fromFence = extractJsonFromMarkdown(response);
26441
+ const cleaned = stripTrailingCommas(fromFence !== response ? fromFence : response);
26442
+ const parsed = JSON.parse(cleaned);
26443
+ if (!Array.isArray(parsed)) {
26444
+ return fallbackCriteria(criteria);
26445
+ }
26446
+ return parsed.map((item, i) => ({
26447
+ original: typeof item.original === "string" && item.original.length > 0 ? item.original : criteria[i] ?? "",
26448
+ refined: typeof item.refined === "string" && item.refined.length > 0 ? item.refined : criteria[i] ?? "",
26449
+ testable: typeof item.testable === "boolean" ? item.testable : true,
26450
+ storyId: typeof item.storyId === "string" ? item.storyId : ""
26451
+ }));
26452
+ } catch {
26453
+ return fallbackCriteria(criteria);
26454
+ }
26455
+ }
26456
+ async function refineAcceptanceCriteria(criteria, context) {
26457
+ if (criteria.length === 0) {
26458
+ return [];
26459
+ }
26460
+ const { storyId, featureName, workdir, codebaseContext, config: config2, testStrategy, testFramework } = context;
26461
+ const logger = getLogger();
26462
+ const modelTier = config2.acceptance?.model ?? "fast";
26463
+ const modelDef = resolveModelForAgent(config2.models, config2.autoMode.defaultAgent, modelTier, config2.autoMode.defaultAgent);
26464
+ const prompt = buildRefinementPrompt(criteria, codebaseContext, { testStrategy, testFramework });
26465
+ let response;
26466
+ try {
26467
+ const completeResult = await _refineDeps.adapter.complete(prompt, {
26468
+ jsonMode: true,
26469
+ maxTokens: 4096,
26470
+ model: modelDef.model,
26471
+ config: config2,
26472
+ featureName,
26473
+ storyId,
26474
+ workdir,
26475
+ sessionRole: "refine",
26476
+ timeoutMs: config2.acceptance?.timeoutMs ?? 120000
26477
+ });
26478
+ response = typeof completeResult === "string" ? completeResult : completeResult.output;
26479
+ } catch (error48) {
26480
+ const reason = errorMessage(error48);
26481
+ logger.warn("refinement", "adapter.complete() failed, falling back to original criteria", {
26482
+ storyId,
26483
+ error: reason
26484
+ });
26485
+ return fallbackCriteria(criteria, storyId);
26486
+ }
26487
+ const parsed = parseRefinementResponse(response, criteria);
26488
+ return parsed.map((item) => ({
26489
+ ...item,
26490
+ storyId: item.storyId || storyId
26491
+ }));
26492
+ }
26493
+ function fallbackCriteria(criteria, storyId = "") {
26494
+ return criteria.map((c) => ({
26495
+ original: c,
26496
+ refined: c,
26497
+ testable: true,
26498
+ storyId
26499
+ }));
26500
+ }
26501
+ var _refineDeps;
26502
+ var init_refinement = __esm(() => {
26503
+ init_registry();
26504
+ init_config();
26505
+ init_logger2();
26506
+ _refineDeps = {
26507
+ adapter: {
26508
+ complete: async (...args) => {
26509
+ const options = args[1];
26510
+ const config2 = options?.config;
26511
+ if (!config2)
26512
+ throw new Error("Refinement adapter requires config");
26513
+ const adapter = createAgentRegistry(config2).getAgent(config2.autoMode.defaultAgent);
26514
+ if (!adapter)
26515
+ throw new Error(`Agent "${config2.autoMode.defaultAgent}" not found`);
26516
+ return adapter.complete(...args);
26517
+ }
26518
+ }
26519
+ };
26520
+ });
26521
+
26522
+ // src/acceptance/hardening.ts
26523
+ var exports_hardening = {};
26524
+ __export(exports_hardening, {
26525
+ runHardeningPass: () => runHardeningPass,
26526
+ _hardeningDeps: () => _hardeningDeps
26527
+ });
26528
+ async function runHardeningPass(ctx) {
26529
+ const logger = getSafeLogger();
26530
+ const result = { promoted: [], discarded: [], costUsd: 0 };
26531
+ const storiesWithSuggested = ctx.prd.userStories.filter((s) => s.suggestedCriteria && s.suggestedCriteria.length > 0);
26532
+ if (storiesWithSuggested.length === 0)
26533
+ return result;
26534
+ logger?.info("acceptance", "Starting hardening pass", {
26535
+ storyId: storiesWithSuggested[0].id,
26536
+ storiesWithSuggested: storiesWithSuggested.length,
26537
+ totalSuggestedACs: storiesWithSuggested.reduce((n, s) => n + (s.suggestedCriteria?.length ?? 0), 0)
26538
+ });
26539
+ try {
26540
+ const allRefined = [];
26541
+ for (const story of storiesWithSuggested) {
26542
+ const criteria = story.suggestedCriteria ?? [];
26543
+ const refined = await _hardeningDeps.refine(criteria, {
26544
+ storyId: story.id,
26545
+ featureName: ctx.prd.feature,
26546
+ workdir: ctx.workdir,
26547
+ codebaseContext: "",
26548
+ config: ctx.config
26549
+ });
26550
+ allRefined.push(...refined);
26551
+ }
26552
+ const language = ctx.config.project?.language;
26553
+ const suggestedTestPath = resolveSuggestedPackageFeatureTestPath(ctx.workdir, ctx.prd.feature, ctx.config.acceptance?.suggestedTestPath, language);
26554
+ let modelDef;
26555
+ try {
26556
+ modelDef = resolveModelForAgent(ctx.config.models, ctx.config.autoMode?.defaultAgent ?? "claude", ctx.config.acceptance?.model ?? "fast", ctx.config.autoMode?.defaultAgent ?? "claude");
26557
+ } catch {
26558
+ modelDef = { provider: "anthropic", model: "claude-haiku-4-5-20251001" };
26559
+ }
26560
+ const genResult = await _hardeningDeps.generate(storiesWithSuggested, allRefined, {
26561
+ featureName: ctx.prd.feature,
26562
+ workdir: ctx.workdir,
26563
+ featureDir: ctx.featureDir,
26564
+ codebaseContext: "",
26565
+ modelTier: ctx.config.acceptance?.model ?? "fast",
26566
+ modelDef,
26567
+ config: ctx.config,
26568
+ language,
26569
+ targetTestFile: suggestedTestPath
26570
+ });
26571
+ if (genResult.testCode) {
26572
+ await _hardeningDeps.writeFile(suggestedTestPath, genResult.testCode);
26573
+ }
26574
+ const testCmd = buildAcceptanceRunCommand(suggestedTestPath, ctx.config.project?.testFramework, ctx.config.acceptance?.command);
26575
+ const proc = _hardeningDeps.spawn(testCmd, {
26576
+ cwd: ctx.workdir,
26577
+ stdout: "pipe",
26578
+ stderr: "pipe"
26579
+ });
26580
+ const [exitCode, stdout, stderr] = await Promise.all([
26581
+ proc.exited,
26582
+ new Response(proc.stdout).text(),
26583
+ new Response(proc.stderr).text()
26584
+ ]);
26585
+ const output = `${stdout}
26586
+ ${stderr}`;
26587
+ const failedACs = parseTestFailures(output);
26588
+ const failedSet = new Set(failedACs.map((ac) => ac.toUpperCase()));
26589
+ let acIndex = 0;
26590
+ for (const story of storiesWithSuggested) {
26591
+ const suggested = story.suggestedCriteria ?? [];
26592
+ const toPromote = [];
26593
+ const toDiscard = [];
26594
+ for (const criterion of suggested) {
26595
+ acIndex++;
26596
+ const acId = `AC-${acIndex}`;
26597
+ if (failedSet.has(acId) || exitCode !== 0 && failedACs.length === 0) {
26598
+ toDiscard.push(criterion);
26599
+ } else {
26600
+ toPromote.push(criterion);
26601
+ }
26602
+ }
26603
+ if (toPromote.length > 0) {
26604
+ story.acceptanceCriteria = [...story.acceptanceCriteria, ...toPromote];
26605
+ result.promoted.push(...toPromote);
26606
+ }
26607
+ result.discarded.push(...toDiscard);
26608
+ story.suggestedCriteria = toDiscard.length > 0 ? toDiscard : undefined;
26609
+ }
26610
+ if (result.promoted.length > 0) {
26611
+ await _hardeningDeps.savePRD(ctx.prd, ctx.prdPath);
26612
+ }
26613
+ logger?.info("acceptance", "Hardening pass complete", {
26614
+ storyId: storiesWithSuggested[0].id,
26615
+ promoted: result.promoted.length,
26616
+ discarded: result.discarded.length,
26617
+ costUsd: result.costUsd
26618
+ });
26619
+ } catch (err) {
26620
+ logger?.warn("acceptance", "Hardening pass failed (non-blocking)", {
26621
+ storyId: storiesWithSuggested[0].id,
26622
+ error: err instanceof Error ? err.message : String(err)
26623
+ });
26624
+ }
26625
+ return result;
26626
+ }
26627
+ var _hardeningDeps;
26628
+ var init_hardening = __esm(() => {
26629
+ init_config();
26630
+ init_logger2();
26631
+ init_acceptance();
26632
+ init_prd();
26633
+ init_generator();
26634
+ init_generator();
26635
+ init_refinement();
26636
+ init_test_path();
26637
+ _hardeningDeps = {
26638
+ refine: refineAcceptanceCriteria,
26639
+ generate: generateFromPRD,
26640
+ savePRD,
26641
+ spawn: Bun.spawn,
26642
+ writeFile: async (p, c) => {
26643
+ await Bun.write(p, c);
26644
+ }
26645
+ };
26646
+ });
26647
+
26196
26648
  // src/pipeline/stages/acceptance.ts
26197
26649
  var exports_acceptance = {};
26198
26650
  __export(exports_acceptance, {
26199
- acceptanceStage: () => acceptanceStage
26651
+ parseTestFailures: () => parseTestFailures,
26652
+ acceptanceStage: () => acceptanceStage,
26653
+ _acceptanceStageDeps: () => _acceptanceStageDeps
26200
26654
  });
26201
26655
  function parseTestFailures(output) {
26202
26656
  const failedACs = [];
@@ -26220,12 +26674,18 @@ function areAllStoriesComplete(ctx) {
26220
26674
  const totalComplete = counts.passed + counts.failed + counts.skipped;
26221
26675
  return totalComplete === counts.total;
26222
26676
  }
26223
- var acceptanceStage;
26677
+ var _acceptanceStageDeps, acceptanceStage;
26224
26678
  var init_acceptance = __esm(() => {
26225
26679
  init_generator();
26226
26680
  init_test_path();
26227
26681
  init_logger2();
26228
26682
  init_prd();
26683
+ _acceptanceStageDeps = {
26684
+ runHardeningPass: async (ctx) => {
26685
+ const { runHardeningPass: runHardeningPass2 } = await Promise.resolve().then(() => (init_hardening(), exports_hardening));
26686
+ return runHardeningPass2(ctx);
26687
+ }
26688
+ };
26229
26689
  acceptanceStage = {
26230
26690
  name: "acceptance",
26231
26691
  enabled(ctx) {
@@ -26323,6 +26783,31 @@ ${stderr}`;
26323
26783
  `);
26324
26784
  if (allFailedACs.length === 0) {
26325
26785
  logger.info("acceptance", "All acceptance tests passed", { storyId: ctx.story.id });
26786
+ const hardeningEnabled = ctx.config.acceptance?.hardening?.enabled !== false;
26787
+ const hasAnySuggested = ctx.prd.userStories.some((s) => s.suggestedCriteria && s.suggestedCriteria.length > 0);
26788
+ if (hardeningEnabled && hasAnySuggested && ctx.featureDir) {
26789
+ try {
26790
+ const prdPath = ctx.prdPath ?? `${ctx.featureDir}/prd.json`;
26791
+ const result = await _acceptanceStageDeps.runHardeningPass({
26792
+ prd: ctx.prd,
26793
+ prdPath,
26794
+ featureDir: ctx.featureDir,
26795
+ workdir: ctx.workdir,
26796
+ config: ctx.config,
26797
+ agentGetFn: ctx.agentGetFn
26798
+ });
26799
+ logger.info("acceptance", "Hardening pass complete", {
26800
+ storyId: ctx.story.id,
26801
+ promoted: result.promoted.length,
26802
+ discarded: result.discarded.length
26803
+ });
26804
+ } catch (err) {
26805
+ logger.warn("acceptance", "Hardening pass failed (non-blocking)", {
26806
+ storyId: ctx.story.id,
26807
+ error: err instanceof Error ? err.message : String(err)
26808
+ });
26809
+ }
26810
+ }
26326
26811
  return { action: "continue" };
26327
26812
  }
26328
26813
  ctx.acceptanceFailures = {
@@ -26343,172 +26828,6 @@ ${stderr}`;
26343
26828
  };
26344
26829
  });
26345
26830
 
26346
- // src/acceptance/refinement.ts
26347
- var exports_refinement = {};
26348
- __export(exports_refinement, {
26349
- refineAcceptanceCriteria: () => refineAcceptanceCriteria,
26350
- parseRefinementResponse: () => parseRefinementResponse,
26351
- buildRefinementPrompt: () => buildRefinementPrompt,
26352
- _refineDeps: () => _refineDeps
26353
- });
26354
- function buildRefinementPrompt(criteria, codebaseContext, options) {
26355
- const criteriaList = criteria.map((c, i) => `${i + 1}. ${c}`).join(`
26356
- `);
26357
- const strategySection = buildStrategySection(options);
26358
- const refinedExample = buildRefinedExample(options?.testStrategy);
26359
- const codebaseSection = codebaseContext ? `CODEBASE CONTEXT:
26360
- ${codebaseContext}
26361
- ` : "";
26362
- const core2 = `You are an acceptance criteria refinement assistant. Your task is to convert raw acceptance criteria into concrete, machine-verifiable assertions.
26363
-
26364
- ${codebaseSection}${strategySection}ACCEPTANCE CRITERIA TO REFINE:
26365
- ${criteriaList}
26366
-
26367
- For each criterion, produce a refined version that is concrete and automatically testable where possible.
26368
- Respond with a JSON array:
26369
- [{
26370
- "original": "<exact original criterion text>",
26371
- "refined": "<concrete, machine-verifiable description>",
26372
- "testable": true,
26373
- "storyId": ""
26374
- }]
26375
-
26376
- Rules:
26377
- - "original" must match the input criterion text exactly
26378
- - "refined" must be a concrete assertion (e.g., ${refinedExample})
26379
- - "testable" is false only if the criterion cannot be automatically verified (e.g., "UX feels responsive", "design looks good")
26380
- - "storyId" leave as empty string \u2014 it will be assigned by the caller`;
26381
- return wrapJsonPrompt(core2);
26382
- }
26383
- function buildStrategySection(options) {
26384
- if (!options?.testStrategy) {
26385
- return "";
26386
- }
26387
- const framework = options.testFramework ? ` Use ${options.testFramework} testing library syntax.` : "";
26388
- switch (options.testStrategy) {
26389
- case "component":
26390
- return `
26391
- TEST STRATEGY: component
26392
- Focus assertions on rendered output visible on screen \u2014 text content, visible elements, and screen state.
26393
- Assert what the user sees rendered in the component, not what internal functions produce.${framework}
26394
- `;
26395
- case "cli":
26396
- return `
26397
- TEST STRATEGY: cli
26398
- Focus assertions on stdout and stderr text output from the CLI command.
26399
- Assert about terminal output content, exit codes, and standard output/standard error streams.${framework}
26400
- `;
26401
- case "e2e":
26402
- return `
26403
- TEST STRATEGY: e2e
26404
- Focus assertions on HTTP response content \u2014 status codes, response bodies, and endpoint behavior.
26405
- Assert about HTTP responses, status codes, and API endpoint output.${framework}
26406
- `;
26407
- default:
26408
- return framework ? `
26409
- TEST FRAMEWORK: ${options.testFramework}
26410
- ` : "";
26411
- }
26412
- }
26413
- function buildRefinedExample(testStrategy) {
26414
- switch (testStrategy) {
26415
- case "component":
26416
- return '"Text content visible on screen matches expected", "Rendered output contains expected element"';
26417
- case "cli":
26418
- return '"stdout contains expected text", "stderr is empty on success", "exit code is 0"';
26419
- case "e2e":
26420
- return '"HTTP status 200 returned", "Response body contains expected field", "Endpoint returns JSON"';
26421
- default:
26422
- return '"Array of length N returned", "HTTP status 200 returned"';
26423
- }
26424
- }
26425
- function parseRefinementResponse(response, criteria) {
26426
- if (!response || !response.trim()) {
26427
- return fallbackCriteria(criteria);
26428
- }
26429
- try {
26430
- const fromFence = extractJsonFromMarkdown(response);
26431
- const cleaned = stripTrailingCommas(fromFence !== response ? fromFence : response);
26432
- const parsed = JSON.parse(cleaned);
26433
- if (!Array.isArray(parsed)) {
26434
- return fallbackCriteria(criteria);
26435
- }
26436
- return parsed.map((item, i) => ({
26437
- original: typeof item.original === "string" && item.original.length > 0 ? item.original : criteria[i] ?? "",
26438
- refined: typeof item.refined === "string" && item.refined.length > 0 ? item.refined : criteria[i] ?? "",
26439
- testable: typeof item.testable === "boolean" ? item.testable : true,
26440
- storyId: typeof item.storyId === "string" ? item.storyId : ""
26441
- }));
26442
- } catch {
26443
- return fallbackCriteria(criteria);
26444
- }
26445
- }
26446
- async function refineAcceptanceCriteria(criteria, context) {
26447
- if (criteria.length === 0) {
26448
- return [];
26449
- }
26450
- const { storyId, featureName, workdir, codebaseContext, config: config2, testStrategy, testFramework } = context;
26451
- const logger = getLogger();
26452
- const modelTier = config2.acceptance?.model ?? "fast";
26453
- const modelDef = resolveModelForAgent(config2.models, config2.autoMode.defaultAgent, modelTier, config2.autoMode.defaultAgent);
26454
- const prompt = buildRefinementPrompt(criteria, codebaseContext, { testStrategy, testFramework });
26455
- let response;
26456
- try {
26457
- const completeResult = await _refineDeps.adapter.complete(prompt, {
26458
- jsonMode: true,
26459
- maxTokens: 4096,
26460
- model: modelDef.model,
26461
- config: config2,
26462
- featureName,
26463
- storyId,
26464
- workdir,
26465
- sessionRole: "refine",
26466
- timeoutMs: config2.acceptance?.timeoutMs ?? 120000
26467
- });
26468
- response = typeof completeResult === "string" ? completeResult : completeResult.output;
26469
- } catch (error48) {
26470
- const reason = errorMessage(error48);
26471
- logger.warn("refinement", "adapter.complete() failed, falling back to original criteria", {
26472
- storyId,
26473
- error: reason
26474
- });
26475
- return fallbackCriteria(criteria, storyId);
26476
- }
26477
- const parsed = parseRefinementResponse(response, criteria);
26478
- return parsed.map((item) => ({
26479
- ...item,
26480
- storyId: item.storyId || storyId
26481
- }));
26482
- }
26483
- function fallbackCriteria(criteria, storyId = "") {
26484
- return criteria.map((c) => ({
26485
- original: c,
26486
- refined: c,
26487
- testable: true,
26488
- storyId
26489
- }));
26490
- }
26491
- var _refineDeps;
26492
- var init_refinement = __esm(() => {
26493
- init_registry();
26494
- init_config();
26495
- init_logger2();
26496
- _refineDeps = {
26497
- adapter: {
26498
- complete: async (...args) => {
26499
- const options = args[1];
26500
- const config2 = options?.config;
26501
- if (!config2)
26502
- throw new Error("Refinement adapter requires config");
26503
- const adapter = createAgentRegistry(config2).getAgent(config2.autoMode.defaultAgent);
26504
- if (!adapter)
26505
- throw new Error(`Agent "${config2.autoMode.defaultAgent}" not found`);
26506
- return adapter.complete(...args);
26507
- }
26508
- }
26509
- };
26510
- });
26511
-
26512
26831
  // src/pipeline/stages/acceptance-setup.ts
26513
26832
  var exports_acceptance_setup = {};
26514
26833
  __export(exports_acceptance_setup, {
@@ -26734,7 +27053,8 @@ ${stderr}` };
26734
27053
  testStrategy: ctx.config.acceptance.testStrategy,
26735
27054
  testFramework: ctx.config.acceptance.testFramework,
26736
27055
  adapter: agent ?? undefined,
26737
- ..."implementationContext" in ctx && ctx.implementationContext ? { implementationContext: ctx.implementationContext } : {}
27056
+ ..."implementationContext" in ctx && ctx.implementationContext ? { implementationContext: ctx.implementationContext } : {},
27057
+ ..."previousFailure" in ctx && ctx.previousFailure ? { previousFailure: ctx.previousFailure } : {}
26738
27058
  });
26739
27059
  await _acceptanceSetupDeps.writeFile(testPath, result.testCode);
26740
27060
  }
@@ -27132,132 +27452,6 @@ var init_agents = __esm(() => {
27132
27452
  init_errors();
27133
27453
  });
27134
27454
 
27135
- // src/review/dialogue-prompts.ts
27136
- function buildReviewPrompt(diff, story, _semanticConfig) {
27137
- const criteria = story.acceptanceCriteria.map((c) => `- ${c}`).join(`
27138
- `);
27139
- return [
27140
- `Review the following code diff for story ${story.id}: ${story.title}`,
27141
- "",
27142
- "## Acceptance Criteria",
27143
- criteria,
27144
- "",
27145
- "## Diff",
27146
- diff,
27147
- "",
27148
- "Also flag any changes in the diff not required by the acceptance criteria above as out-of-scope findings.",
27149
- "Respond with JSON: { passed: boolean, findings: [...], findingReasoning: { [id]: string } }"
27150
- ].join(`
27151
- `);
27152
- }
27153
- function buildReReviewPrompt(updatedDiff, previousFindings) {
27154
- const findingsList = previousFindings.length > 0 ? previousFindings.map((f) => `- ${f.ruleId}: ${f.message}`).join(`
27155
- `) : "(none)";
27156
- return [
27157
- "This is a follow-up re-review. Please review the updated diff below.",
27158
- "",
27159
- "## Previous Findings",
27160
- findingsList,
27161
- "",
27162
- "## Updated Diff",
27163
- updatedDiff,
27164
- "",
27165
- "Respond with JSON: { passed: boolean, findings: [...], findingReasoning: { [id]: string }, deltaSummary: string }",
27166
- "deltaSummary should describe which previous findings are resolved vs still present."
27167
- ].join(`
27168
- `);
27169
- }
27170
- function buildProposalsSection2(proposals) {
27171
- return proposals.map((p) => `### ${p.debater}
27172
- ${p.output}`).join(`
27173
-
27174
- `);
27175
- }
27176
- function buildCritiquesSection(critiques) {
27177
- if (critiques.length === 0)
27178
- return "";
27179
- return `
27180
-
27181
- ## Critiques
27182
- ${critiques.map((c, i) => `### Critique ${i + 1}
27183
- ${c}`).join(`
27184
-
27185
- `)}`;
27186
- }
27187
- function buildVoteTallyLine(ctx) {
27188
- if (!ctx.majorityVote)
27189
- return "";
27190
- const { passCount, failCount } = ctx.majorityVote;
27191
- const failOpenNote = ctx.resolverType === "majority-fail-open" ? " (unparseable proposals count as pass)" : " (unparseable proposals count as fail)";
27192
- return `
27193
-
27194
- The preliminary majority vote is: **${passCount} passed, ${failCount} failed**${failOpenNote}. Verify the failing findings with tools before giving your authoritative verdict.`;
27195
- }
27196
- function buildResolverFraming(ctx) {
27197
- switch (ctx.resolverType) {
27198
- case "majority-fail-closed":
27199
- case "majority-fail-open":
27200
- return "You are the authoritative reviewer resolving a debate. A preliminary vote was taken \u2014 see tally below. Verify disputed findings using tools (READ files, GREP for usage) and give your final verdict.";
27201
- case "synthesis":
27202
- return "You are a synthesis reviewer. Synthesize the debater proposals into a single, coherent, tool-verified verdict. Use READ and GREP to verify claims before ruling.";
27203
- case "custom":
27204
- return "You are the judge. Evaluate the debater proposals independently. Verify claims with tools (READ, GREP) and give your final authoritative verdict.";
27205
- default:
27206
- return "You are the reviewer. Evaluate the debater proposals and give your final authoritative verdict.";
27207
- }
27208
- }
27209
- function buildDebateResolverPrompt(proposals, critiques, diff, story, _semanticConfig, resolverContext) {
27210
- const criteria = story.acceptanceCriteria.map((c) => `- ${c}`).join(`
27211
- `);
27212
- const framing = buildResolverFraming(resolverContext);
27213
- const voteTally = buildVoteTallyLine(resolverContext);
27214
- const proposalsSection = buildProposalsSection2(proposals);
27215
- const critiquesSection = buildCritiquesSection(critiques);
27216
- return [
27217
- framing,
27218
- "",
27219
- `## Story ${story.id}: ${story.title}`,
27220
- "",
27221
- "## Acceptance Criteria",
27222
- criteria,
27223
- "",
27224
- "## Debater Proposals",
27225
- proposalsSection,
27226
- critiquesSection,
27227
- "",
27228
- "## Diff",
27229
- diff,
27230
- voteTally,
27231
- "",
27232
- "Respond with JSON: { passed: boolean, findings: [...], findingReasoning: { [id]: string } }"
27233
- ].filter((line) => line !== undefined).join(`
27234
- `);
27235
- }
27236
- function buildDebateReReviewPrompt(proposals, critiques, updatedDiff, previousFindings, resolverContext) {
27237
- const framing = buildResolverFraming(resolverContext);
27238
- const findingsList = previousFindings.length > 0 ? previousFindings.map((f) => `- ${f.ruleId}: ${f.message}`).join(`
27239
- `) : "(none)";
27240
- const proposalsSection = buildProposalsSection2(proposals);
27241
- const critiquesSection = buildCritiquesSection(critiques);
27242
- return [
27243
- `${framing} This is a re-review after implementer changes.`,
27244
- "",
27245
- "## Previous Findings",
27246
- findingsList,
27247
- "",
27248
- "## Updated Debater Proposals",
27249
- proposalsSection,
27250
- critiquesSection,
27251
- "",
27252
- "## Updated Diff",
27253
- updatedDiff,
27254
- "",
27255
- "Respond with JSON: { passed: boolean, findings: [...], findingReasoning: { [id]: string }, deltaSummary: string }",
27256
- "deltaSummary should describe which previous findings are resolved vs still present."
27257
- ].filter((line) => line !== undefined).join(`
27258
- `);
27259
- }
27260
-
27261
27455
  // src/review/dialogue.ts
27262
27456
  function extractDeltaSummary(rawOutput, previousFindings, newFindings) {
27263
27457
  const parsed = tryParseLLMJson(rawOutput);
@@ -27333,6 +27527,7 @@ function createReviewerSession(agent, storyId, workdir, featureName, _config) {
27333
27527
  generation: 1,
27334
27528
  pendingCompactionContext: null
27335
27529
  };
27530
+ const promptBuilder = new DebatePromptBuilder({ taskContext: "", outputFormat: "", stage: "review" }, { debaters: [], sessionMode: "stateful" });
27336
27531
  function resolveRunParams(semanticConfig) {
27337
27532
  const modelTier = semanticConfig.modelTier;
27338
27533
  const defaultAgent = _config.autoMode?.defaultAgent ?? "claude";
@@ -27367,7 +27562,7 @@ ${prompt}`,
27367
27562
  if (!active) {
27368
27563
  throw new NaxError(`[dialogue] ReviewerSession for story ${storyId} has been destroyed`, "REVIEWER_SESSION_DESTROYED", { stage: "review", storyId, featureName });
27369
27564
  }
27370
- const prompt = buildReviewPrompt(diff, story, semanticConfig);
27565
+ const prompt = promptBuilder.buildReviewPrompt(diff, story);
27371
27566
  const { modelTier, modelDef, timeoutSeconds } = resolveRunParams(semanticConfig);
27372
27567
  const { effectivePrompt, acpSessionName } = buildEffectiveRunArgs(prompt);
27373
27568
  const result = await agent.run({
@@ -27405,7 +27600,7 @@ ${prompt}`,
27405
27600
  });
27406
27601
  }
27407
27602
  const previousFindings = lastCheckResult.checkResult.findings;
27408
- const prompt = buildReReviewPrompt(updatedDiff, previousFindings);
27603
+ const prompt = promptBuilder.buildReReviewPrompt(updatedDiff, previousFindings);
27409
27604
  const { modelTier, modelDef, timeoutSeconds } = resolveRunParams(lastSemanticConfig);
27410
27605
  const { effectivePrompt, acpSessionName } = buildEffectiveRunArgs(prompt);
27411
27606
  const result = await agent.run({
@@ -27465,7 +27660,7 @@ ${prompt}`,
27465
27660
  if (!active) {
27466
27661
  throw new NaxError(`[dialogue] ReviewerSession for story ${storyId} has been destroyed`, "REVIEWER_SESSION_DESTROYED", { stage: "review", storyId, featureName });
27467
27662
  }
27468
- const prompt = buildDebateResolverPrompt(proposals, critiques, diff, story, semanticConfig, resolverContext);
27663
+ const prompt = promptBuilder.buildResolverPrompt(proposals, critiques, diff, story, resolverContext);
27469
27664
  const { modelTier, modelDef, timeoutSeconds } = resolveRunParams(semanticConfig);
27470
27665
  const { effectivePrompt, acpSessionName } = buildEffectiveRunArgs(prompt);
27471
27666
  const result = await agent.run({
@@ -27500,7 +27695,7 @@ ${prompt}`,
27500
27695
  throw new NaxError(`[dialogue] reReviewDebate() called before any resolveDebate() on story ${storyId}`, "NO_REVIEW_RESULT", { stage: "review", storyId });
27501
27696
  }
27502
27697
  const previousFindings = lastCheckResult.checkResult.findings;
27503
- const prompt = buildDebateReReviewPrompt(proposals, critiques, updatedDiff, previousFindings, resolverContext);
27698
+ const prompt = promptBuilder.buildReResolverPrompt(proposals, critiques, updatedDiff, previousFindings, resolverContext);
27504
27699
  const { modelTier, modelDef, timeoutSeconds } = resolveRunParams(lastSemanticConfig);
27505
27700
  const { effectivePrompt, acpSessionName } = buildEffectiveRunArgs(prompt);
27506
27701
  const result = await agent.run({
@@ -27552,6 +27747,7 @@ ${prompt}`,
27552
27747
  };
27553
27748
  }
27554
27749
  var init_dialogue = __esm(() => {
27750
+ init_prompt_builder();
27555
27751
  init_errors();
27556
27752
  });
27557
27753
 
@@ -36376,7 +36572,7 @@ var package_default;
36376
36572
  var init_package = __esm(() => {
36377
36573
  package_default = {
36378
36574
  name: "@nathapp/nax",
36379
- version: "0.60.0-canary.1",
36575
+ version: "0.60.1",
36380
36576
  description: "AI Coding Agent Orchestrator \u2014 loops until done",
36381
36577
  type: "module",
36382
36578
  bin: {
@@ -36456,8 +36652,8 @@ var init_version = __esm(() => {
36456
36652
  NAX_VERSION = package_default.version;
36457
36653
  NAX_COMMIT = (() => {
36458
36654
  try {
36459
- if (/^[0-9a-f]{6,10}$/.test("1de1f9cc"))
36460
- return "1de1f9cc";
36655
+ if (/^[0-9a-f]{6,10}$/.test("2b74a9ef"))
36656
+ return "2b74a9ef";
36461
36657
  } catch {}
36462
36658
  try {
36463
36659
  const result = Bun.spawnSync(["git", "rev-parse", "--short", "HEAD"], {
@@ -36773,196 +36969,6 @@ var init_crash_recovery = __esm(() => {
36773
36969
  init_crash_heartbeat();
36774
36970
  });
36775
36971
 
36776
- // src/acceptance/fix-generator.ts
36777
- function findRelatedStories(failedAC, prd) {
36778
- const relatedStoryIds = [];
36779
- for (const story of prd.userStories) {
36780
- for (const ac of story.acceptanceCriteria) {
36781
- if (ac.includes(failedAC)) {
36782
- relatedStoryIds.push(story.id);
36783
- break;
36784
- }
36785
- }
36786
- }
36787
- if (relatedStoryIds.length > 0) {
36788
- return relatedStoryIds;
36789
- }
36790
- const passedStories = prd.userStories.filter((s) => s.status === "passed").map((s) => s.id);
36791
- return passedStories.slice(0, 5);
36792
- }
36793
- function groupACsByRelatedStories(failedACs, prd) {
36794
- const groups = new Map;
36795
- for (const ac of failedACs) {
36796
- const related = findRelatedStories(ac, prd);
36797
- const key = [...related].sort().join(",");
36798
- if (!groups.has(key)) {
36799
- groups.set(key, { acs: [], relatedStories: related });
36800
- }
36801
- groups.get(key)?.acs.push(ac);
36802
- }
36803
- const result = Array.from(groups.values());
36804
- while (result.length > MAX_FIX_STORIES) {
36805
- result.sort((a, b) => a.acs.length - b.acs.length);
36806
- const smallest = result.shift();
36807
- if (!smallest)
36808
- break;
36809
- result[0].acs.push(...smallest.acs);
36810
- for (const s of smallest.relatedStories) {
36811
- if (!result[0].relatedStories.includes(s)) {
36812
- result[0].relatedStories.push(s);
36813
- }
36814
- }
36815
- }
36816
- return result;
36817
- }
36818
- function buildFixPrompt(batchedACs, acTextMap, testOutput, relatedStories, prd, testFilePath) {
36819
- const acList = batchedACs.map((ac) => `${ac}: ${acTextMap[ac] || "No description available"}`).join(`
36820
- `);
36821
- const relatedStoriesText = relatedStories.map((id) => {
36822
- const story = prd.userStories.find((s) => s.id === id);
36823
- if (!story)
36824
- return "";
36825
- return `${story.id}: ${story.title}
36826
- ${story.description}`;
36827
- }).filter(Boolean).join(`
36828
-
36829
- `);
36830
- const testFileSection = testFilePath ? `
36831
- ACCEPTANCE TEST FILE: ${testFilePath}
36832
- (Read this file first to understand what each test expects)
36833
- ` : "";
36834
- return `You are a debugging expert. Feature acceptance tests have failed.${testFileSection}
36835
- FAILED ACCEPTANCE CRITERIA (${batchedACs.length} total):
36836
- ${acList}
36837
-
36838
- TEST FAILURE OUTPUT:
36839
- ${testOutput.slice(0, 2000)}
36840
-
36841
- RELATED STORIES (implemented this functionality):
36842
- ${relatedStoriesText}
36843
-
36844
- Your task: Generate a fix description that will make these acceptance tests pass.
36845
-
36846
- Requirements:
36847
- 1. Read the acceptance test file first to understand what each failing test expects
36848
- 2. Identify the root cause based on the test failure output
36849
- 3. Find and fix the relevant implementation code (do NOT modify the test file)
36850
- 4. Write a clear, actionable fix description (2-4 sentences)
36851
- 5. Reference the relevant story IDs if needed
36852
-
36853
- Respond with ONLY the fix description (no JSON, no markdown, just the description text).`;
36854
- }
36855
- async function generateFixStories(adapter, options) {
36856
- const { failedACs, testOutput, prd, specContent, modelDef, testFilePath } = options;
36857
- const logger = getLogger();
36858
- const acTextMap = parseACTextFromSpec(specContent);
36859
- const groups = groupACsByRelatedStories(failedACs, prd);
36860
- const fixStories = [];
36861
- for (let i = 0;i < groups.length; i++) {
36862
- const { acs: batchedACs, relatedStories } = groups[i];
36863
- if (relatedStories.length === 0) {
36864
- logger.warn("acceptance", "[WARN] No related stories found for AC group \u2014 skipping", { batchedACs });
36865
- continue;
36866
- }
36867
- logger.info("acceptance", "Generating fix for AC group", { batchedACs });
36868
- const prompt = buildFixPrompt(batchedACs, acTextMap, testOutput, relatedStories, prd, testFilePath);
36869
- const relatedStory = prd.userStories.find((s) => relatedStories.includes(s.id) && s.workdir);
36870
- const workdir = relatedStory?.workdir;
36871
- try {
36872
- const fixResult = await adapter.complete(prompt, {
36873
- model: modelDef.model,
36874
- config: options.config,
36875
- featureName: options.prd.feature,
36876
- workdir: options.workdir,
36877
- sessionRole: "fix-gen",
36878
- timeoutMs: options.timeoutMs ?? options.config?.acceptance?.timeoutMs ?? 1800000
36879
- });
36880
- fixStories.push({
36881
- id: `US-FIX-${String(i + 1).padStart(3, "0")}`,
36882
- title: `Fix: ${batchedACs.join(", ")} \u2014 ${(acTextMap[batchedACs[0]] || "").slice(0, 40)}`,
36883
- failedAC: batchedACs[0],
36884
- batchedACs,
36885
- testOutput,
36886
- relatedStories,
36887
- description: typeof fixResult === "string" ? fixResult : fixResult.output,
36888
- testFilePath,
36889
- workdir
36890
- });
36891
- logger.info("acceptance", "[OK] Generated fix story", { storyId: fixStories[fixStories.length - 1].id });
36892
- } catch (error48) {
36893
- logger.warn("acceptance", "[WARN] Error generating fix", {
36894
- batchedACs,
36895
- error: error48.message
36896
- });
36897
- fixStories.push({
36898
- id: `US-FIX-${String(i + 1).padStart(3, "0")}`,
36899
- title: `Fix: ${batchedACs.join(", ")}`,
36900
- failedAC: batchedACs[0],
36901
- batchedACs,
36902
- testOutput,
36903
- relatedStories,
36904
- description: `Fix the implementation to make ${batchedACs.join(", ")} pass. Related stories: ${relatedStories.join(", ")}.`,
36905
- testFilePath,
36906
- workdir
36907
- });
36908
- }
36909
- }
36910
- return fixStories;
36911
- }
36912
- function parseACTextFromSpec(specContent) {
36913
- const map2 = {};
36914
- const lines = specContent.split(`
36915
- `);
36916
- for (const line of lines) {
36917
- const acMatch = line.match(/^\s*-?\s*(?:\[.\])?\s*(AC-\d+):\s*(.+)$/i);
36918
- if (acMatch) {
36919
- const id = acMatch[1].toUpperCase();
36920
- const text = acMatch[2].trim();
36921
- map2[id] = text;
36922
- }
36923
- }
36924
- return map2;
36925
- }
36926
- function convertFixStoryToUserStory(fixStory) {
36927
- const batchedACs = fixStory.batchedACs ?? [fixStory.failedAC];
36928
- const acList = batchedACs.join(", ");
36929
- const truncatedOutput = fixStory.testOutput.slice(0, 1000);
36930
- const testFilePath = fixStory.testFilePath ?? resolveAcceptanceTestFile();
36931
- const enrichedDescription = [
36932
- fixStory.description,
36933
- "",
36934
- `ACCEPTANCE TEST FILE: ${testFilePath}`,
36935
- `FAILED ACCEPTANCE CRITERIA: ${acList}`,
36936
- "",
36937
- "TEST FAILURE OUTPUT:",
36938
- truncatedOutput,
36939
- "",
36940
- "Instructions: Read the acceptance test file first to understand what each failing test expects.",
36941
- "Then find the relevant source code and fix the implementation.",
36942
- "Do NOT modify the test file."
36943
- ].join(`
36944
- `);
36945
- return {
36946
- id: fixStory.id,
36947
- title: fixStory.title,
36948
- description: enrichedDescription,
36949
- acceptanceCriteria: batchedACs.map((ac) => `Fix ${ac}`),
36950
- tags: ["fix", "acceptance-failure"],
36951
- dependencies: fixStory.relatedStories,
36952
- status: "pending",
36953
- passes: false,
36954
- escalations: [],
36955
- attempts: 0,
36956
- contextFiles: [],
36957
- workdir: fixStory.workdir
36958
- };
36959
- }
36960
- var MAX_FIX_STORIES = 8;
36961
- var init_fix_generator = __esm(() => {
36962
- init_logger2();
36963
- init_test_path();
36964
- });
36965
-
36966
36972
  // src/acceptance/content-loader.ts
36967
36973
  async function loadAcceptanceTestContent(pathsOrFallback) {
36968
36974
  if (!pathsOrFallback)
@@ -36984,13 +36990,6 @@ async function loadAcceptanceTestContent(pathsOrFallback) {
36984
36990
  return [];
36985
36991
  }
36986
36992
 
36987
- // src/acceptance/index.ts
36988
- var init_acceptance4 = __esm(() => {
36989
- init_refinement();
36990
- init_generator();
36991
- init_fix_generator();
36992
- });
36993
-
36994
36993
  // src/acceptance/fix-diagnosis.ts
36995
36994
  function parseImportStatements(content) {
36996
36995
  const importRegex = /import\s+(?:{[^}]+}|[^;]+)\s+from\s+["']([^"']+)["']/g;
@@ -37040,6 +37039,13 @@ ${f.content}
37040
37039
  SEMANTIC VERDICTS:
37041
37040
  ${lines.join(`
37042
37041
  `)}
37042
+ `;
37043
+ }
37044
+ let previousFailureSection = "";
37045
+ if (options.previousFailure && options.previousFailure.length > 0) {
37046
+ previousFailureSection = `
37047
+ PREVIOUS FIX ATTEMPTS:
37048
+ ${options.previousFailure}
37043
37049
  `;
37044
37050
  }
37045
37051
  return `You are a debugging expert. An acceptance test has failed.
@@ -37056,7 +37062,7 @@ ${options.testFileContent}
37056
37062
 
37057
37063
  SOURCE FILES (auto-detected from imports, up to ${MAX_FILE_LINES} lines each):
37058
37064
  ${sourceFilesSection}
37059
- ${verdictSection}
37065
+ ${verdictSection}${previousFailureSection}
37060
37066
  Respond with ONLY a JSON object in this exact format (no markdown, no extra text):
37061
37067
  {
37062
37068
  "verdict": "source_bug" | "test_bug" | "both",
@@ -37082,7 +37088,8 @@ async function diagnoseAcceptanceFailure(agent, options) {
37082
37088
  testOutput,
37083
37089
  testFileContent,
37084
37090
  sourceFiles: validSourceFiles,
37085
- semanticVerdicts: options.semanticVerdicts
37091
+ semanticVerdicts: options.semanticVerdicts,
37092
+ previousFailure: options.previousFailure
37086
37093
  });
37087
37094
  try {
37088
37095
  const timeoutSeconds = (config2.acceptance?.timeoutMs ?? 120000) / 1000;
@@ -37167,7 +37174,7 @@ async function executeSourceFix(agent, options) {
37167
37174
  if (!agent) {
37168
37175
  throw new Error("[fix-executor] agent is required");
37169
37176
  }
37170
- const { testOutput, testFileContent, diagnosis, config: config2, workdir, featureName, storyId, acceptanceTestPath } = options;
37177
+ const { config: config2, workdir, featureName, storyId } = options;
37171
37178
  const modelDef = resolveModelForAgent(config2.models, config2.autoMode.defaultAgent, config2.acceptance.fix.fixModel, config2.autoMode.defaultAgent);
37172
37179
  const sessionName = buildSessionName(workdir, featureName, storyId, "source-fix");
37173
37180
  const prompt = buildSourceFixPrompt(options);
@@ -37191,23 +37198,77 @@ async function executeSourceFix(agent, options) {
37191
37198
  cost: result.estimatedCost
37192
37199
  };
37193
37200
  }
37201
+ function buildTestFixPrompt(options) {
37202
+ const { testOutput, diagnosis, acceptanceTestPath, testFileContent, failedACs, previousFailure } = options;
37203
+ let prompt = `ACCEPTANCE TEST BUG \u2014 surgical fix required.
37204
+
37205
+ `;
37206
+ prompt += `FAILING ACS: ${failedACs.join(", ")}
37207
+
37208
+ `;
37209
+ prompt += `TEST OUTPUT:
37210
+ ${testOutput}
37211
+
37212
+ `;
37213
+ if (diagnosis.reasoning) {
37214
+ prompt += `DIAGNOSIS:
37215
+ ${diagnosis.reasoning}
37216
+
37217
+ `;
37218
+ }
37219
+ if (previousFailure && previousFailure.length > 0) {
37220
+ prompt += `PREVIOUS FAILED ATTEMPTS:
37221
+ ${previousFailure}
37222
+
37223
+ `;
37224
+ }
37225
+ prompt += `ACCEPTANCE TEST FILE: ${acceptanceTestPath}
37226
+
37227
+ `;
37228
+ prompt += `\`\`\`typescript
37229
+ ${testFileContent}
37230
+ \`\`\`
37231
+
37232
+ `;
37233
+ prompt += "Fix ONLY the failing test assertions for the ACs listed above. ";
37234
+ prompt += "Do NOT modify passing tests. Do NOT modify source code. ";
37235
+ prompt += "Edit the test file in place.";
37236
+ return prompt;
37237
+ }
37238
+ async function executeTestFix(agent, options) {
37239
+ if (!agent) {
37240
+ throw new Error("[fix-executor] agent is required");
37241
+ }
37242
+ const { config: config2, workdir, featureName, storyId } = options;
37243
+ const modelDef = resolveModelForAgent(config2.models, config2.autoMode.defaultAgent, config2.acceptance.fix.fixModel, config2.autoMode.defaultAgent);
37244
+ const sessionName = buildSessionName(workdir, featureName, storyId, "test-fix");
37245
+ const prompt = buildTestFixPrompt(options);
37246
+ const timeoutSeconds = config2.execution?.sessionTimeoutSeconds ?? 3600;
37247
+ const runOptions = {
37248
+ prompt,
37249
+ workdir,
37250
+ modelTier: undefined,
37251
+ modelDef,
37252
+ timeoutSeconds,
37253
+ sessionRole: "test-fix",
37254
+ acpSessionName: sessionName,
37255
+ featureName,
37256
+ storyId,
37257
+ config: config2,
37258
+ pipelineStage: "acceptance"
37259
+ };
37260
+ const result = await agent.run(runOptions);
37261
+ return {
37262
+ success: result.success,
37263
+ cost: result.estimatedCost
37264
+ };
37265
+ }
37194
37266
  var init_fix_executor = __esm(() => {
37195
37267
  init_adapter();
37196
37268
  });
37197
37269
 
37198
- // src/execution/lifecycle/acceptance-loop.ts
37199
- var exports_acceptance_loop = {};
37200
- __export(exports_acceptance_loop, {
37201
- runFixRouting: () => runFixRouting,
37202
- runAcceptanceLoop: () => runAcceptanceLoop,
37203
- regenerateAcceptanceTest: () => regenerateAcceptanceTest,
37204
- loadAcceptanceTestContent: () => loadAcceptanceTestContent2,
37205
- isTestLevelFailure: () => isTestLevelFailure,
37206
- isStubTestFile: () => isStubTestFile,
37207
- _regenerateDeps: () => _regenerateDeps,
37208
- _acceptanceLoopDeps: () => _acceptanceLoopDeps
37209
- });
37210
- import path15, { join as join43 } from "path";
37270
+ // src/execution/lifecycle/acceptance-helpers.ts
37271
+ import path15 from "path";
37211
37272
  function isStubTestFile(content) {
37212
37273
  return /expect\s*\(\s*true\s*\)\s*\.\s*toBe\s*\(\s*(?:false|true)\s*\)/.test(content);
37213
37274
  }
@@ -37254,73 +37315,7 @@ async function loadAcceptanceTestContent2(featureDir, testPaths, configuredTestP
37254
37315
  function buildResult(success2, prd, totalCost, iterations, storiesCompleted, prdDirty, failedACs, retries) {
37255
37316
  return { success: success2, prd, totalCost, iterations, storiesCompleted, prdDirty, failedACs, retries };
37256
37317
  }
37257
- async function generateAndAddFixStories(ctx, failures, prd) {
37258
- const logger = getSafeLogger();
37259
- const agent = (ctx.agentGetFn ?? _acceptanceLoopDeps.getAgent)(ctx.config.autoMode.defaultAgent);
37260
- if (!agent) {
37261
- logger?.error("acceptance", "Agent not found, cannot generate fix stories");
37262
- return null;
37263
- }
37264
- const modelDef = resolveModelForAgent(ctx.config.models, ctx.config.autoMode.defaultAgent, ctx.config.analyze.model, ctx.config.autoMode.defaultAgent);
37265
- const testFilePath = ctx.featureDir ? resolveAcceptanceFeatureTestPath(ctx.featureDir, ctx.config.acceptance.testPath, ctx.config.project?.language) : undefined;
37266
- const fixStories = await generateFixStories(agent, {
37267
- failedACs: failures.failedACs,
37268
- testOutput: failures.testOutput,
37269
- prd,
37270
- specContent: await loadSpecContent(ctx.featureDir),
37271
- workdir: ctx.workdir,
37272
- modelDef,
37273
- config: ctx.config,
37274
- testFilePath,
37275
- timeoutMs: ctx.config.acceptance?.timeoutMs
37276
- });
37277
- if (fixStories.length === 0) {
37278
- logger?.error("acceptance", "Failed to generate fix stories");
37279
- return null;
37280
- }
37281
- logger?.info("acceptance", `Generated ${fixStories.length} fix stories`);
37282
- for (const fixStory of fixStories) {
37283
- const userStory = convertFixStoryToUserStory(fixStory);
37284
- prd.userStories.push(userStory);
37285
- logger?.debug("acceptance", `Fix story added: ${userStory.id}: ${userStory.title}`);
37286
- }
37287
- return fixStories;
37288
- }
37289
- async function executeFixStory(ctx, story, prd, iterations) {
37290
- const logger = getSafeLogger();
37291
- const routing = await resolveRouting(story, ctx.config, ctx.pluginRegistry);
37292
- logger?.info("acceptance", `Starting fix story: ${story.id}`, { storyId: story.id, storyTitle: story.title });
37293
- await fireHook(ctx.hooks, "on-story-start", hookCtx(ctx.feature, {
37294
- storyId: story.id,
37295
- model: routing.modelTier,
37296
- agent: ctx.config.autoMode.defaultAgent,
37297
- iteration: iterations
37298
- }), ctx.workdir);
37299
- const fixEffectiveConfig = story.workdir ? await loadConfigForWorkdir(join43(ctx.workdir, ".nax", "config.json"), story.workdir) : ctx.config;
37300
- const fixContext = {
37301
- config: fixEffectiveConfig,
37302
- rootConfig: ctx.config,
37303
- prd,
37304
- story,
37305
- stories: [story],
37306
- routing,
37307
- projectDir: ctx.workdir,
37308
- workdir: story.workdir ? join43(ctx.workdir, story.workdir) : ctx.workdir,
37309
- featureDir: ctx.featureDir,
37310
- hooks: ctx.hooks,
37311
- plugins: ctx.pluginRegistry,
37312
- storyStartTime: new Date().toISOString(),
37313
- agentGetFn: ctx.agentGetFn
37314
- };
37315
- const result = await runPipeline(defaultPipeline, fixContext, ctx.eventEmitter);
37316
- logger?.info("acceptance", `Fix story ${story.id} ${result.success ? "passed" : "failed"}`);
37317
- return {
37318
- success: result.success,
37319
- cost: result.context.agentResult?.estimatedCost || 0,
37320
- metrics: result.context.storyMetrics
37321
- };
37322
- }
37323
- async function regenerateAcceptanceTest(testPath, acceptanceContext) {
37318
+ async function regenerateAcceptanceTest(testPath, acceptanceContext, previousFailure) {
37324
37319
  const logger = getSafeLogger();
37325
37320
  const bakPath = `${testPath}.bak`;
37326
37321
  const content = await Bun.file(testPath).text();
@@ -37364,7 +37359,8 @@ async function regenerateAcceptanceTest(testPath, acceptanceContext) {
37364
37359
  }
37365
37360
  const contextForSetup = {
37366
37361
  ...acceptanceContext,
37367
- ...implementationContext ? { implementationContext } : {}
37362
+ ...implementationContext ? { implementationContext } : {},
37363
+ ...previousFailure ? { previousFailure } : {}
37368
37364
  };
37369
37365
  await _regenerateDeps.acceptanceSetupExecute(contextForSetup);
37370
37366
  if (!await Bun.file(testPath).exists()) {
@@ -37374,300 +37370,179 @@ async function regenerateAcceptanceTest(testPath, acceptanceContext) {
37374
37370
  logger?.info("acceptance", "Acceptance test regenerated successfully");
37375
37371
  return true;
37376
37372
  }
37377
- async function runFixRouting(options) {
37373
+ var _regenerateDeps;
37374
+ var init_acceptance_helpers = __esm(() => {
37375
+ init_logger2();
37376
+ _regenerateDeps = {
37377
+ spawnGitDiff: async (workdir, gitRef) => {
37378
+ const proc = Bun.spawn(["git", "diff", "--name-only", gitRef], {
37379
+ cwd: workdir,
37380
+ stdout: "pipe",
37381
+ stderr: "pipe"
37382
+ });
37383
+ const [, stdout] = await Promise.all([proc.exited, new Response(proc.stdout).text()]);
37384
+ return stdout.trim();
37385
+ },
37386
+ readFile: async (filePath) => Bun.file(filePath).text(),
37387
+ acceptanceSetupExecute: async (ctx) => {
37388
+ const { acceptanceSetupStage: acceptanceSetupStage2 } = await Promise.resolve().then(() => (init_acceptance_setup(), exports_acceptance_setup));
37389
+ await acceptanceSetupStage2.execute(ctx);
37390
+ }
37391
+ };
37392
+ });
37393
+
37394
+ // src/execution/lifecycle/acceptance-fix.ts
37395
+ async function resolveAcceptanceDiagnosis(opts) {
37378
37396
  const logger = getSafeLogger();
37379
- const { ctx, failures, acceptanceContext } = options;
37380
- const prd = options.prd ?? ctx.prd;
37381
- const semanticVerdicts = options.semanticVerdicts;
37382
- if (semanticVerdicts && semanticVerdicts.length > 0 && semanticVerdicts.every((v) => v.passed)) {
37383
- const verdictCount = semanticVerdicts.length;
37384
- const storyId2 = acceptanceContext?.story?.id ?? prd?.userStories?.[0]?.id ?? "unknown";
37385
- logger?.info("acceptance", "All semantic verdicts passed \u2014 routing to test regeneration", {
37386
- storyId: storyId2,
37387
- verdictCount
37397
+ const { agent, failures, totalACs, strategy, semanticVerdicts, diagnosisOpts, previousFailure } = opts;
37398
+ const storyId = diagnosisOpts.storyId;
37399
+ if (strategy === "implement-only") {
37400
+ logger?.info("acceptance.diagnosis", "Fast path: implement-only strategy \u2192 source_bug", { storyId });
37401
+ return {
37402
+ verdict: "source_bug",
37403
+ reasoning: "implement-only strategy \u2014 skipping diagnosis",
37404
+ confidence: 1
37405
+ };
37406
+ }
37407
+ if (semanticVerdicts.length > 0 && semanticVerdicts.every((v) => v.passed)) {
37408
+ logger?.info("acceptance.diagnosis", "Fast path: all semantic verdicts passed \u2192 test_bug", {
37409
+ storyId,
37410
+ verdictCount: semanticVerdicts.length
37411
+ });
37412
+ return {
37413
+ verdict: "test_bug",
37414
+ reasoning: `Semantic review confirmed all ${semanticVerdicts.length} ACs are implemented \u2014 failure is a test generation issue`,
37415
+ confidence: 1
37416
+ };
37417
+ }
37418
+ if (isTestLevelFailure(failures.failedACs, totalACs)) {
37419
+ logger?.info("acceptance.diagnosis", "Fast path: test-level failure heuristic \u2192 test_bug", {
37420
+ storyId,
37421
+ failedCount: failures.failedACs.length,
37422
+ totalACs
37388
37423
  });
37389
- if (!ctx.featureDir || !acceptanceContext) {
37390
- logger?.warn("acceptance", "Cannot regenerate test \u2014 featureDir or acceptanceContext missing", { storyId: storyId2 });
37391
- return {
37392
- fixed: false,
37393
- cost: 0,
37394
- prdDirty: false,
37395
- verdict: "test_bug",
37396
- confidence: 1,
37397
- reasoning: "Semantic review confirmed all ACs are implemented \u2014 acceptance test failure is a test generation issue"
37398
- };
37399
- }
37400
- const regenOutcome = await _acceptanceLoopDeps.executeTestRegen(ctx, acceptanceContext);
37401
- logger?.info("acceptance.test-regen", "Test regeneration completed", { storyId: storyId2, outcome: regenOutcome });
37402
- if (regenOutcome === "passed") {
37403
- return { fixed: true, cost: 0, prdDirty: true };
37404
- }
37405
37424
  return {
37406
- fixed: false,
37407
- cost: 0,
37408
- prdDirty: regenOutcome !== "no_test_file",
37409
37425
  verdict: "test_bug",
37410
- confidence: 1,
37411
- reasoning: "Semantic review confirmed all ACs are implemented \u2014 acceptance test failure is a test generation issue"
37426
+ reasoning: `Test-level failure: ${failures.failedACs.length}/${totalACs} ACs failed (>80% threshold or AC-ERROR sentinel)`,
37427
+ confidence: 0.9
37412
37428
  };
37413
37429
  }
37430
+ return await diagnoseAcceptanceFailure(agent, {
37431
+ ...diagnosisOpts,
37432
+ semanticVerdicts,
37433
+ previousFailure
37434
+ });
37435
+ }
37436
+ async function applyFix(opts) {
37437
+ const logger = getSafeLogger();
37438
+ const { ctx, failures, diagnosis, previousFailure } = opts;
37439
+ const storyId = ctx.prd.userStories[0]?.id ?? "unknown";
37414
37440
  const agentName = ctx.config.autoMode.defaultAgent;
37415
- const agent = (ctx.agentGetFn ?? _acceptanceLoopDeps.getAgent)(agentName);
37416
- const strategy = ctx.config.acceptance.fix?.strategy ?? "diagnose-first";
37417
- const fixMaxRetries = ctx.config.acceptance.fix?.maxRetries ?? 2;
37441
+ const agent = (ctx.agentGetFn ?? _applyFixDeps.getAgent)(agentName);
37442
+ if (!agent) {
37443
+ logger?.error("acceptance.applyFix", "Agent not found", { storyId, agentName });
37444
+ return { cost: 0 };
37445
+ }
37418
37446
  const testPaths = ctx.acceptanceTestPaths;
37419
- let testEntries;
37447
+ let testFileContent = "";
37448
+ let acceptanceTestPath = "";
37420
37449
  if (testPaths && testPaths.length > 0) {
37421
37450
  const pathStrings = testPaths.map((p) => typeof p === "string" ? p : p.testPath);
37422
37451
  const moduleEntries = await loadAcceptanceTestContent(pathStrings);
37423
- testEntries = moduleEntries.map((e) => ({ content: e.content, path: e.testPath }));
37424
- } else {
37425
- const fallbackPath = ctx.featureDir ? resolveAcceptanceFeatureTestPath(ctx.featureDir, ctx.config.acceptance.testPath, ctx.config.project?.language) : undefined;
37426
- const moduleEntries = await loadAcceptanceTestContent(fallbackPath);
37427
- testEntries = moduleEntries.map((e) => ({ content: e.content, path: e.testPath }));
37428
- }
37429
- const primaryEntry = testEntries[0] ?? { content: "", path: "" };
37430
- const testFileContent = primaryEntry.content;
37431
- const acceptanceTestPath = primaryEntry.path;
37432
- const firstStory = prd?.userStories?.[0];
37433
- const storyId = firstStory?.id ?? "unknown";
37434
- if (failures.failedACs.length === 0) {
37435
- return { fixed: true, cost: 0, prdDirty: false };
37436
- }
37437
- if (strategy === "implement-only") {
37438
- logger?.info("acceptance", "Strategy is implement-only \u2014 executing source fix directly");
37439
- if (!agent) {
37440
- logger?.error("acceptance", "Agent not found for fix routing");
37441
- return { fixed: false, cost: 0, prdDirty: false };
37442
- }
37443
- let fixAttempts = 0;
37444
- while (fixAttempts < fixMaxRetries) {
37445
- fixAttempts++;
37446
- logger?.info("acceptance", `Source fix attempt ${fixAttempts}/${fixMaxRetries}`);
37447
- const defaultDiagnosis = {
37448
- verdict: "source_bug",
37449
- reasoning: "implement-only strategy \u2014 skipping diagnosis",
37450
- confidence: 1
37451
- };
37452
- const fixResult = await executeSourceFix(agent, {
37453
- testOutput: failures.testOutput,
37454
- testFileContent,
37455
- diagnosis: defaultDiagnosis,
37456
- config: ctx.config,
37457
- workdir: ctx.workdir,
37458
- featureName: ctx.feature,
37459
- storyId,
37460
- acceptanceTestPath
37461
- });
37462
- logger?.info("acceptance.source-fix", "Source fix completed", {
37463
- success: fixResult.success,
37464
- cost: fixResult.cost,
37465
- attempt: fixAttempts
37466
- });
37467
- if (fixResult.success) {
37468
- return { fixed: true, cost: fixResult.cost, prdDirty: false };
37469
- }
37470
- logger?.warn("acceptance.source-fix", "Source fix attempt failed", {
37471
- attempt: fixAttempts,
37472
- maxRetries: fixMaxRetries,
37473
- cost: fixResult.cost,
37474
- willRetry: fixAttempts < fixMaxRetries
37475
- });
37476
- if (fixAttempts >= fixMaxRetries) {
37477
- logger?.error("acceptance", `Source fix failed after ${fixMaxRetries} attempts`);
37478
- break;
37479
- }
37452
+ if (moduleEntries.length > 0) {
37453
+ testFileContent = moduleEntries[0].content;
37454
+ acceptanceTestPath = moduleEntries[0].testPath;
37480
37455
  }
37481
- return { fixed: false, cost: 0, prdDirty: false };
37482
- }
37483
- logger?.info("acceptance", "Strategy is diagnose-first \u2014 running diagnosis");
37484
- const diagnosis = await diagnoseAcceptanceFailure(agent, {
37485
- testOutput: failures.testOutput,
37486
- testFileContent,
37487
- config: ctx.config,
37488
- workdir: ctx.workdir,
37489
- featureName: ctx.feature,
37490
- storyId,
37491
- semanticVerdicts: options.semanticVerdicts
37492
- });
37493
- const diagnosisCost = diagnosis.cost ?? 0;
37494
- logger?.info("acceptance.diagnosis", "Diagnosis complete", {
37495
- verdict: diagnosis.verdict,
37496
- confidence: diagnosis.confidence,
37497
- reasoning: diagnosis.reasoning
37498
- });
37499
- if (diagnosis.verdict === "source_bug") {
37500
- logger?.info("acceptance", "Diagnosis: source_bug \u2014 executing source fix");
37501
- if (!agent) {
37502
- logger?.error("acceptance", "Agent not found for source fix execution");
37503
- return { fixed: false, cost: diagnosisCost, prdDirty: false };
37504
- }
37505
- let fixAttempts = 0;
37506
- while (fixAttempts < fixMaxRetries) {
37507
- fixAttempts++;
37508
- logger?.info("acceptance", `Source fix attempt ${fixAttempts}/${fixMaxRetries}`);
37509
- const fixResult = await executeSourceFix(agent, {
37510
- testOutput: failures.testOutput,
37511
- testFileContent,
37512
- diagnosis,
37513
- config: ctx.config,
37514
- workdir: ctx.workdir,
37515
- featureName: ctx.feature,
37516
- storyId,
37517
- acceptanceTestPath
37518
- });
37519
- logger?.info("acceptance.source-fix", "Source fix completed", {
37520
- success: fixResult.success,
37521
- cost: fixResult.cost,
37522
- attempt: fixAttempts
37523
- });
37524
- if (fixResult.success) {
37525
- return { fixed: true, cost: fixResult.cost + diagnosisCost, prdDirty: false };
37526
- }
37527
- logger?.warn("acceptance.source-fix", "Source fix attempt failed", {
37528
- attempt: fixAttempts,
37529
- maxRetries: fixMaxRetries,
37530
- cost: fixResult.cost,
37531
- willRetry: fixAttempts < fixMaxRetries
37532
- });
37533
- if (fixAttempts >= fixMaxRetries) {
37534
- logger?.error("acceptance", `Source fix failed after ${fixMaxRetries} attempts`);
37535
- break;
37536
- }
37456
+ } else if (ctx.featureDir) {
37457
+ const fallbackPath = resolveAcceptanceFeatureTestPath(ctx.featureDir, ctx.config.acceptance.testPath, ctx.config.project?.language);
37458
+ const moduleEntries = await loadAcceptanceTestContent(fallbackPath);
37459
+ if (moduleEntries.length > 0) {
37460
+ testFileContent = moduleEntries[0].content;
37461
+ acceptanceTestPath = moduleEntries[0].testPath;
37537
37462
  }
37538
- return { fixed: false, cost: diagnosisCost, prdDirty: false };
37539
37463
  }
37540
- if (diagnosis.verdict === "test_bug") {
37541
- logger?.info("acceptance", "Diagnosis: test_bug \u2014 regenerating acceptance test");
37542
- if (!ctx.featureDir) {
37543
- logger?.error("acceptance", "Cannot regenerate test without featureDir");
37544
- return { fixed: false, cost: diagnosisCost, prdDirty: false };
37545
- }
37546
- const testPath = await findExistingAcceptanceTestPath({
37547
- acceptanceTestPaths: ctx.acceptanceTestPaths,
37548
- featureDir: ctx.featureDir,
37549
- testPathConfig: ctx.config.acceptance.testPath,
37550
- language: ctx.config.project?.language
37464
+ let totalCost = 0;
37465
+ if (diagnosis.verdict === "source_bug" || diagnosis.verdict === "both") {
37466
+ logger?.info("acceptance.applyFix", "Applying source fix", { storyId, verdict: diagnosis.verdict });
37467
+ const sourceResult = await _applyFixDeps.executeSourceFix(agent, {
37468
+ testOutput: failures.testOutput,
37469
+ testFileContent,
37470
+ diagnosis,
37471
+ config: ctx.config,
37472
+ workdir: ctx.workdir,
37473
+ featureName: ctx.feature,
37474
+ storyId,
37475
+ acceptanceTestPath
37551
37476
  });
37552
- if (!testPath) {
37553
- logger?.error("acceptance", "Acceptance test file not found for regeneration", {
37554
- candidates: resolveAcceptanceTestCandidates({
37555
- acceptanceTestPaths: ctx.acceptanceTestPaths,
37556
- featureDir: ctx.featureDir,
37557
- testPathConfig: ctx.config.acceptance.testPath,
37558
- language: ctx.config.project?.language
37559
- })
37560
- });
37561
- return { fixed: false, cost: diagnosisCost, prdDirty: false };
37562
- }
37563
- const regenerated = await regenerateAcceptanceTest(testPath, acceptanceContext);
37564
- logger?.info("acceptance.test-regen", "Test regeneration completed", {
37565
- outcome: regenerated ? "success" : "failure"
37477
+ totalCost += sourceResult.cost;
37478
+ logger?.info("acceptance.source-fix", "Source fix completed", {
37479
+ storyId,
37480
+ success: sourceResult.success,
37481
+ cost: sourceResult.cost
37566
37482
  });
37567
- if (!regenerated) {
37568
- return { fixed: false, cost: diagnosisCost, prdDirty: false };
37569
- }
37570
- const { acceptanceStage: acceptanceStage2 } = await Promise.resolve().then(() => (init_acceptance(), exports_acceptance));
37571
- const acceptanceResult = await acceptanceStage2.execute(acceptanceContext);
37572
- if (acceptanceResult.action === "continue") {
37573
- logger?.info("acceptance", "Acceptance passed after test regeneration");
37574
- return { fixed: true, cost: diagnosisCost, prdDirty: true };
37575
- }
37576
- logger?.warn("acceptance", "Acceptance still failing after test regeneration");
37577
- return { fixed: false, cost: diagnosisCost, prdDirty: true };
37578
37483
  }
37579
- if (diagnosis.verdict === "both") {
37580
- logger?.info("acceptance", "Diagnosis: both \u2014 executing source fix then regenerating test if needed");
37581
- if (!agent) {
37582
- logger?.error("acceptance", "Agent not found for source fix execution");
37583
- return { fixed: false, cost: diagnosisCost, prdDirty: false };
37584
- }
37585
- let sourceFixSuccess = false;
37586
- let sourceFixCost = 0;
37587
- let fixAttempts = 0;
37588
- while (fixAttempts < fixMaxRetries && !sourceFixSuccess) {
37589
- fixAttempts++;
37590
- logger?.info("acceptance", `Source fix attempt ${fixAttempts}/${fixMaxRetries}`);
37591
- const fixResult = await executeSourceFix(agent, {
37592
- testOutput: failures.testOutput,
37593
- testFileContent,
37594
- diagnosis,
37595
- config: ctx.config,
37596
- workdir: ctx.workdir,
37597
- featureName: ctx.feature,
37598
- storyId,
37599
- acceptanceTestPath
37600
- });
37601
- logger?.info("acceptance.source-fix", "Source fix completed", {
37602
- success: fixResult.success,
37603
- cost: fixResult.cost,
37604
- attempt: fixAttempts
37605
- });
37606
- sourceFixSuccess = fixResult.success;
37607
- sourceFixCost += fixResult.cost;
37608
- if (fixResult.success) {
37609
- break;
37610
- }
37611
- logger?.warn("acceptance.source-fix", "Source fix attempt failed", {
37612
- attempt: fixAttempts,
37613
- maxRetries: fixMaxRetries,
37614
- cost: fixResult.cost,
37615
- willRetry: fixAttempts < fixMaxRetries
37616
- });
37617
- if (fixAttempts >= fixMaxRetries) {
37618
- logger?.error("acceptance", `Source fix failed after ${fixMaxRetries} attempts`);
37619
- break;
37620
- }
37621
- }
37622
- if (!sourceFixSuccess) {
37623
- return { fixed: false, cost: sourceFixCost + diagnosisCost, prdDirty: false };
37624
- }
37625
- logger?.info("acceptance", "Source fix succeeded \u2014 re-running acceptance to verify");
37626
- const { acceptanceStage: acceptanceStage2 } = await Promise.resolve().then(() => (init_acceptance(), exports_acceptance));
37627
- const acceptanceResult = await acceptanceStage2.execute(acceptanceContext);
37628
- if (acceptanceResult.action === "continue") {
37629
- logger?.info("acceptance", "Acceptance passed after source fix");
37630
- return { fixed: true, cost: sourceFixCost + diagnosisCost, prdDirty: false };
37631
- }
37632
- logger?.info("acceptance", "Acceptance still failing after source fix \u2014 regenerating test");
37633
- if (!ctx.featureDir) {
37634
- logger?.error("acceptance", "Cannot regenerate test without featureDir");
37635
- return { fixed: false, cost: sourceFixCost + diagnosisCost, prdDirty: false };
37636
- }
37637
- const testPath = await findExistingAcceptanceTestPath({
37638
- acceptanceTestPaths: ctx.acceptanceTestPaths,
37639
- featureDir: ctx.featureDir,
37640
- testPathConfig: ctx.config.acceptance.testPath,
37641
- language: ctx.config.project?.language
37484
+ if (diagnosis.verdict === "test_bug" || diagnosis.verdict === "both") {
37485
+ logger?.info("acceptance.applyFix", "Applying test fix", { storyId, verdict: diagnosis.verdict });
37486
+ const testResult = await _applyFixDeps.executeTestFix(agent, {
37487
+ testOutput: failures.testOutput,
37488
+ testFileContent,
37489
+ failedACs: failures.failedACs,
37490
+ diagnosis,
37491
+ config: ctx.config,
37492
+ workdir: ctx.workdir,
37493
+ featureName: ctx.feature,
37494
+ storyId,
37495
+ acceptanceTestPath,
37496
+ previousFailure
37642
37497
  });
37643
- if (!testPath) {
37644
- logger?.error("acceptance", "Acceptance test file not found for regeneration", {
37645
- candidates: resolveAcceptanceTestCandidates({
37646
- acceptanceTestPaths: ctx.acceptanceTestPaths,
37647
- featureDir: ctx.featureDir,
37648
- testPathConfig: ctx.config.acceptance.testPath,
37649
- language: ctx.config.project?.language
37650
- })
37651
- });
37652
- return { fixed: false, cost: sourceFixCost + diagnosisCost, prdDirty: false };
37653
- }
37654
- const regenerated = await regenerateAcceptanceTest(testPath, acceptanceContext);
37655
- logger?.info("acceptance.test-regen", "Test regeneration completed", {
37656
- outcome: regenerated ? "success" : "failure"
37498
+ totalCost += testResult.cost;
37499
+ logger?.info("acceptance.test-fix", "Test fix completed", {
37500
+ storyId,
37501
+ success: testResult.success,
37502
+ cost: testResult.cost
37657
37503
  });
37658
- return { fixed: regenerated, cost: sourceFixCost + diagnosisCost, prdDirty: regenerated };
37659
37504
  }
37660
- return { fixed: false, cost: diagnosisCost, prdDirty: false };
37505
+ return { cost: totalCost };
37661
37506
  }
37507
+ var _applyFixDeps;
37508
+ var init_acceptance_fix = __esm(() => {
37509
+ init_fix_diagnosis();
37510
+ init_fix_executor();
37511
+ init_test_path();
37512
+ init_registry();
37513
+ init_logger2();
37514
+ init_acceptance_helpers();
37515
+ _applyFixDeps = {
37516
+ getAgent,
37517
+ executeSourceFix,
37518
+ executeTestFix
37519
+ };
37520
+ });
37521
+
37522
+ // src/execution/lifecycle/acceptance-loop.ts
37523
+ var exports_acceptance_loop = {};
37524
+ __export(exports_acceptance_loop, {
37525
+ runAcceptanceLoop: () => runAcceptanceLoop,
37526
+ regenerateAcceptanceTest: () => regenerateAcceptanceTest,
37527
+ loadSpecContent: () => loadSpecContent,
37528
+ loadAcceptanceTestContent: () => loadAcceptanceTestContent2,
37529
+ isTestLevelFailure: () => isTestLevelFailure,
37530
+ isStubTestFile: () => isStubTestFile,
37531
+ buildResult: () => buildResult,
37532
+ _regenerateDeps: () => _regenerateDeps,
37533
+ _acceptanceLoopDeps: () => _acceptanceLoopDeps
37534
+ });
37662
37535
  async function runAcceptanceLoop(ctx) {
37663
37536
  const logger = getSafeLogger();
37664
37537
  const maxRetries = ctx.config.acceptance.maxRetries;
37665
37538
  let acceptanceRetries = 0;
37666
- let prd = ctx.prd;
37539
+ let stubRegenCount = 0;
37540
+ let previousFailure = "";
37541
+ const prd = ctx.prd;
37667
37542
  let totalCost = ctx.totalCost;
37668
- let iterations = ctx.iterations;
37669
- let storiesCompleted = ctx.storiesCompleted;
37670
- let prdDirty = false;
37543
+ const iterations = ctx.iterations;
37544
+ const storiesCompleted = ctx.storiesCompleted;
37545
+ const prdDirty = false;
37671
37546
  logger?.info("acceptance", "All stories complete, running acceptance validation");
37672
37547
  while (acceptanceRetries < maxRetries) {
37673
37548
  const firstStory = prd.userStories[0];
@@ -37704,18 +37579,16 @@ async function runAcceptanceLoop(ctx) {
37704
37579
  const failures = acceptanceContext.acceptanceFailures;
37705
37580
  if (!failures || failures.failedACs.length === 0) {
37706
37581
  logger?.error("acceptance", "Acceptance tests failed but no specific failures detected");
37707
- logger?.warn("acceptance", "Manual intervention required");
37708
37582
  await fireHook(ctx.hooks, "on-pause", hookCtx(ctx.feature, { reason: "Acceptance tests failed (no failures detected)", cost: totalCost }), ctx.workdir);
37709
37583
  return buildResult(false, prd, totalCost, iterations, storiesCompleted, prdDirty);
37710
37584
  }
37711
37585
  acceptanceRetries++;
37712
37586
  logger?.warn("acceptance", `Acceptance retry ${acceptanceRetries}/${maxRetries}`, {
37587
+ storyId: firstStory?.id,
37713
37588
  failedACs: failures.failedACs
37714
37589
  });
37715
37590
  if (acceptanceRetries >= maxRetries) {
37716
- logger?.error("acceptance", "Max acceptance retries reached");
37717
- logger?.warn("acceptance", "Manual intervention required");
37718
- logger?.debug("acceptance", 'Run: nax accept --override AC-N "reason" to skip specific ACs');
37591
+ logger?.error("acceptance", "Max acceptance retries reached", { storyId: firstStory?.id });
37719
37592
  await fireHook(ctx.hooks, "on-pause", hookCtx(ctx.feature, {
37720
37593
  reason: `Acceptance validation failed after ${maxRetries} retries: ${failures.failedACs.join(", ")}`,
37721
37594
  cost: totalCost
@@ -37729,141 +37602,87 @@ async function runAcceptanceLoop(ctx) {
37729
37602
  testPathConfig: ctx.config.acceptance.testPath,
37730
37603
  language: ctx.config.project?.language
37731
37604
  });
37732
- if (existingStubPath) {
37733
- const testContent = await Bun.file(existingStubPath).text();
37734
- if (isStubTestFile(testContent)) {
37735
- logger?.warn("acceptance", "Stub tests detected \u2014 re-generating acceptance tests", {
37736
- testPath: existingStubPath
37605
+ if (existingStubPath && isStubTestFile(await Bun.file(existingStubPath).text())) {
37606
+ if (stubRegenCount >= MAX_STUB_REGENS) {
37607
+ logger?.error("acceptance", "Acceptance test generator cannot produce real tests \u2014 giving up", {
37608
+ storyId: firstStory?.id,
37609
+ stubRegenCount
37737
37610
  });
37738
- const { unlink: unlink3 } = await import("fs/promises");
37739
- await unlink3(existingStubPath);
37740
- const { acceptanceSetupStage: acceptanceSetupStage2 } = await Promise.resolve().then(() => (init_acceptance_setup(), exports_acceptance_setup));
37741
- await acceptanceSetupStage2.execute(acceptanceContext);
37742
- const newContent = await Bun.file(existingStubPath).text();
37743
- if (isStubTestFile(newContent)) {
37744
- logger?.error("acceptance", "Acceptance test generation failed after retry \u2014 manual implementation required");
37745
- return buildResult(false, prd, totalCost, iterations, storiesCompleted, prdDirty, failures.failedACs, acceptanceRetries);
37746
- }
37747
- continue;
37748
- }
37749
- }
37750
- }
37751
- const totalACs = prd.userStories.filter((s) => !s.id.startsWith("US-FIX-")).flatMap((s) => s.acceptanceCriteria).length;
37752
- if (ctx.featureDir && isTestLevelFailure(failures.failedACs, totalACs)) {
37753
- logger?.warn("acceptance", `Test-level failure detected (${failures.failedACs.length}/${totalACs} ACs failed) \u2014 regenerating acceptance test`);
37754
- const testPath = await findExistingAcceptanceTestPath({
37755
- acceptanceTestPaths: ctx.acceptanceTestPaths,
37756
- featureDir: ctx.featureDir,
37757
- testPathConfig: ctx.config.acceptance.testPath,
37758
- language: ctx.config.project?.language
37759
- });
37760
- if (testPath) {
37761
- const regenerated = await regenerateAcceptanceTest(testPath, acceptanceContext);
37762
- if (!regenerated) {
37763
37611
  return buildResult(false, prd, totalCost, iterations, storiesCompleted, prdDirty, failures.failedACs, acceptanceRetries);
37764
37612
  }
37613
+ stubRegenCount++;
37614
+ logger?.warn("acceptance", "Stub test detected \u2014 full regen", {
37615
+ storyId: firstStory?.id,
37616
+ attempt: stubRegenCount,
37617
+ maxStubRegens: MAX_STUB_REGENS
37618
+ });
37619
+ await regenerateAcceptanceTest(existingStubPath, acceptanceContext);
37765
37620
  continue;
37766
37621
  }
37767
37622
  }
37768
- const strategy = ctx.config.acceptance.fix?.strategy ?? "diagnose-first";
37769
- if (strategy === "diagnose-first" || strategy === "implement-only") {
37770
- logger?.info("acceptance", `Running fix routing with strategy: ${strategy}`);
37771
- const semanticVerdicts = ctx.featureDir ? await _acceptanceLoopDeps.loadSemanticVerdicts(ctx.featureDir) : [];
37772
- const fixResult = await runFixRouting({
37773
- ctx,
37774
- failures,
37775
- prd,
37776
- acceptanceContext,
37777
- semanticVerdicts
37778
- });
37779
- totalCost += fixResult.cost;
37780
- if (fixResult.fixed) {
37781
- logger?.info("acceptance", "Fix succeeded \u2014 re-running acceptance tests...");
37782
- continue;
37783
- }
37784
- logger?.error("acceptance", "Fix routing failed to resolve acceptance failures");
37785
- return buildResult(false, prd, totalCost, iterations, storiesCompleted, prdDirty, failures.failedACs, acceptanceRetries);
37786
- }
37787
- logger?.info("acceptance", "Generating fix stories...");
37788
- const fixStories = await generateAndAddFixStories(ctx, failures, prd);
37789
- if (!fixStories) {
37623
+ const semanticVerdicts = ctx.featureDir ? await _acceptanceLoopDeps.loadSemanticVerdicts(ctx.featureDir) : [];
37624
+ const totalACs = prd.userStories.filter((s) => !s.id.startsWith("US-FIX-")).flatMap((s) => s.acceptanceCriteria).length;
37625
+ const agentName = ctx.config.autoMode.defaultAgent;
37626
+ const agent = (ctx.agentGetFn ?? _acceptanceLoopDeps.getAgent)(agentName);
37627
+ if (!agent) {
37628
+ logger?.error("acceptance", "Agent not found for diagnosis", { storyId: firstStory?.id, agentName });
37790
37629
  return buildResult(false, prd, totalCost, iterations, storiesCompleted, prdDirty, failures.failedACs, acceptanceRetries);
37791
37630
  }
37792
- await savePRD(prd, ctx.prdPath);
37793
- prdDirty = true;
37794
- logger?.info("acceptance", "Running fix stories...");
37795
- for (const fixStory of fixStories) {
37796
- const userStory = prd.userStories.find((s) => s.id === fixStory.id);
37797
- if (!userStory || userStory.status !== "pending")
37798
- continue;
37799
- iterations++;
37800
- const result = await executeFixStory(ctx, userStory, prd, iterations);
37801
- prd = await loadPRD(ctx.prdPath);
37802
- if (result.success) {
37803
- storiesCompleted++;
37804
- totalCost += result.cost;
37805
- if (result.metrics)
37806
- ctx.allStoryMetrics.push(...result.metrics);
37807
- }
37808
- await savePRD(prd, ctx.prdPath);
37809
- prdDirty = true;
37810
- }
37811
- logger?.info("acceptance", "Re-running acceptance tests...");
37631
+ const testEntries = ctx.acceptanceTestPaths ? await loadAcceptanceTestContent(ctx.acceptanceTestPaths.map((p) => p.testPath)) : [];
37632
+ const testFileContent = testEntries[0]?.content ?? "";
37633
+ const strategy = ctx.config.acceptance.fix?.strategy ?? "diagnose-first";
37634
+ const diagnosis = await resolveAcceptanceDiagnosis({
37635
+ agent,
37636
+ failures,
37637
+ totalACs,
37638
+ strategy,
37639
+ semanticVerdicts,
37640
+ diagnosisOpts: {
37641
+ testOutput: failures.testOutput,
37642
+ testFileContent,
37643
+ config: ctx.config,
37644
+ workdir: ctx.workdir,
37645
+ featureName: ctx.feature,
37646
+ storyId: firstStory?.id
37647
+ },
37648
+ previousFailure
37649
+ });
37650
+ logger?.info("acceptance.diagnosis", "Diagnosis resolved", {
37651
+ storyId: firstStory?.id,
37652
+ verdict: diagnosis.verdict,
37653
+ confidence: diagnosis.confidence,
37654
+ attempt: acceptanceRetries
37655
+ });
37656
+ const fixResult = await applyFix({
37657
+ ctx,
37658
+ failures,
37659
+ diagnosis,
37660
+ previousFailure
37661
+ });
37662
+ totalCost += fixResult.cost;
37663
+ previousFailure += `
37664
+ ---
37665
+ Attempt ${acceptanceRetries}/${maxRetries}: verdict=${diagnosis.verdict}, confidence=${diagnosis.confidence}
37666
+ Reasoning: ${diagnosis.reasoning}
37667
+ Failed ACs: ${failures.failedACs.join(", ")}
37668
+ `;
37812
37669
  }
37813
37670
  return buildResult(false, prd, totalCost, iterations, storiesCompleted, prdDirty);
37814
37671
  }
37815
- var _acceptanceLoopDeps, _regenerateDeps;
37672
+ var _acceptanceLoopDeps, MAX_STUB_REGENS = 2;
37816
37673
  var init_acceptance_loop = __esm(() => {
37817
- init_acceptance4();
37818
- init_fix_diagnosis();
37819
- init_fix_executor();
37820
37674
  init_semantic_verdict();
37821
37675
  init_test_path();
37822
37676
  init_registry();
37823
- init_config();
37824
- init_loader();
37825
37677
  init_hooks();
37826
37678
  init_logger2();
37827
- init_runner();
37828
- init_stages();
37829
- init_prd();
37830
- init_routing();
37831
37679
  init_helpers();
37680
+ init_acceptance_fix();
37681
+ init_acceptance_helpers();
37682
+ init_acceptance_helpers();
37832
37683
  _acceptanceLoopDeps = {
37833
37684
  getAgent,
37834
- loadSemanticVerdicts,
37835
- executeTestRegen: async (ctx, acceptanceContext) => {
37836
- const testPath = await findExistingAcceptanceTestPath({
37837
- acceptanceTestPaths: ctx.acceptanceTestPaths,
37838
- featureDir: ctx.featureDir,
37839
- testPathConfig: ctx.config.acceptance.testPath,
37840
- language: ctx.config.project?.language
37841
- });
37842
- if (!testPath)
37843
- return "no_test_file";
37844
- const regenerated = await regenerateAcceptanceTest(testPath, acceptanceContext);
37845
- if (!regenerated)
37846
- return "failed";
37847
- const { acceptanceStage: acceptanceStage2 } = await Promise.resolve().then(() => (init_acceptance(), exports_acceptance));
37848
- const result = await acceptanceStage2.execute(acceptanceContext);
37849
- return result.action === "continue" ? "passed" : "failed";
37850
- }
37851
- };
37852
- _regenerateDeps = {
37853
- spawnGitDiff: async (workdir, gitRef) => {
37854
- const proc = Bun.spawn(["git", "diff", "--name-only", gitRef], {
37855
- cwd: workdir,
37856
- stdout: "pipe",
37857
- stderr: "pipe"
37858
- });
37859
- const [, stdout] = await Promise.all([proc.exited, new Response(proc.stdout).text()]);
37860
- return stdout.trim();
37861
- },
37862
- readFile: async (filePath) => Bun.file(filePath).text(),
37863
- acceptanceSetupExecute: async (ctx) => {
37864
- const { acceptanceSetupStage: acceptanceSetupStage2 } = await Promise.resolve().then(() => (init_acceptance_setup(), exports_acceptance_setup));
37865
- await acceptanceSetupStage2.execute(ctx);
37866
- }
37685
+ loadSemanticVerdicts
37867
37686
  };
37868
37687
  });
37869
37688
 
@@ -38347,12 +38166,12 @@ var init_headless_formatter = __esm(() => {
38347
38166
  // src/pipeline/subscribers/events-writer.ts
38348
38167
  import { appendFile as appendFile3, mkdir as mkdir3 } from "fs/promises";
38349
38168
  import { homedir as homedir5 } from "os";
38350
- import { basename as basename6, join as join44 } from "path";
38169
+ import { basename as basename6, join as join43 } from "path";
38351
38170
  function wireEventsWriter(bus, feature, runId, workdir) {
38352
38171
  const logger = getSafeLogger();
38353
38172
  const project = basename6(workdir);
38354
- const eventsDir = join44(homedir5(), ".nax", "events", project);
38355
- const eventsFile = join44(eventsDir, "events.jsonl");
38173
+ const eventsDir = join43(homedir5(), ".nax", "events", project);
38174
+ const eventsFile = join43(eventsDir, "events.jsonl");
38356
38175
  let dirReady = false;
38357
38176
  const write = (line) => {
38358
38177
  return (async () => {
@@ -38533,12 +38352,12 @@ var init_interaction2 = __esm(() => {
38533
38352
  // src/pipeline/subscribers/registry.ts
38534
38353
  import { mkdir as mkdir4, writeFile } from "fs/promises";
38535
38354
  import { homedir as homedir6 } from "os";
38536
- import { basename as basename7, join as join45 } from "path";
38355
+ import { basename as basename7, join as join44 } from "path";
38537
38356
  function wireRegistry(bus, feature, runId, workdir) {
38538
38357
  const logger = getSafeLogger();
38539
38358
  const project = basename7(workdir);
38540
- const runDir = join45(homedir6(), ".nax", "runs", `${project}-${feature}-${runId}`);
38541
- const metaFile = join45(runDir, "meta.json");
38359
+ const runDir = join44(homedir6(), ".nax", "runs", `${project}-${feature}-${runId}`);
38360
+ const metaFile = join44(runDir, "meta.json");
38542
38361
  const unsub = bus.on("run:started", (_ev) => {
38543
38362
  return (async () => {
38544
38363
  try {
@@ -38548,8 +38367,8 @@ function wireRegistry(bus, feature, runId, workdir) {
38548
38367
  project,
38549
38368
  feature,
38550
38369
  workdir,
38551
- statusPath: join45(workdir, ".nax", "features", feature, "status.json"),
38552
- eventsDir: join45(workdir, ".nax", "features", feature, "runs"),
38370
+ statusPath: join44(workdir, ".nax", "features", feature, "status.json"),
38371
+ eventsDir: join44(workdir, ".nax", "features", feature, "runs"),
38553
38372
  registeredAt: new Date().toISOString()
38554
38373
  };
38555
38374
  await writeFile(metaFile, JSON.stringify(meta3, null, 2));
@@ -39206,7 +39025,7 @@ var init_pipeline_result_handler = __esm(() => {
39206
39025
  });
39207
39026
 
39208
39027
  // src/execution/iteration-runner.ts
39209
- import { join as join46 } from "path";
39028
+ import { join as join45 } from "path";
39210
39029
  async function runIteration(ctx, prd, selection, iterations, totalCost, allStoryMetrics) {
39211
39030
  const logger = getSafeLogger();
39212
39031
  const { story, storiesToExecute, routing, isBatchExecution } = selection;
@@ -39241,7 +39060,7 @@ async function runIteration(ctx, prd, selection, iterations, totalCost, allStory
39241
39060
  }
39242
39061
  }
39243
39062
  const accumulatedAttemptCost = (story.priorFailures || []).reduce((sum, f) => sum + (f.cost || 0), 0);
39244
- const effectiveConfig = story.workdir ? await _iterationRunnerDeps.loadConfigForWorkdir(join46(ctx.workdir, ".nax", "config.json"), story.workdir) : ctx.config;
39063
+ const effectiveConfig = story.workdir ? await _iterationRunnerDeps.loadConfigForWorkdir(join45(ctx.workdir, ".nax", "config.json"), story.workdir) : ctx.config;
39245
39064
  const pipelineContext = {
39246
39065
  config: effectiveConfig,
39247
39066
  rootConfig: ctx.config,
@@ -39250,7 +39069,7 @@ async function runIteration(ctx, prd, selection, iterations, totalCost, allStory
39250
39069
  stories: storiesToExecute,
39251
39070
  routing,
39252
39071
  projectDir: ctx.workdir,
39253
- workdir: story.workdir ? join46(ctx.workdir, story.workdir) : ctx.workdir,
39072
+ workdir: story.workdir ? join45(ctx.workdir, story.workdir) : ctx.workdir,
39254
39073
  prdPath: ctx.prdPath,
39255
39074
  featureDir: ctx.featureDir,
39256
39075
  hooks: ctx.hooks,
@@ -39406,7 +39225,7 @@ __export(exports_parallel_worker, {
39406
39225
  executeStoryInWorktree: () => executeStoryInWorktree,
39407
39226
  executeParallelBatch: () => executeParallelBatch
39408
39227
  });
39409
- import { join as join47 } from "path";
39228
+ import { join as join46 } from "path";
39410
39229
  async function executeStoryInWorktree(story, worktreePath, context, routing, eventEmitter) {
39411
39230
  const logger = getSafeLogger();
39412
39231
  try {
@@ -39426,7 +39245,7 @@ async function executeStoryInWorktree(story, worktreePath, context, routing, eve
39426
39245
  story,
39427
39246
  stories: [story],
39428
39247
  projectDir: context.projectDir,
39429
- workdir: story.workdir ? join47(worktreePath, story.workdir) : worktreePath,
39248
+ workdir: story.workdir ? join46(worktreePath, story.workdir) : worktreePath,
39430
39249
  routing,
39431
39250
  storyGitRef: storyGitRef ?? undefined
39432
39251
  };
@@ -39515,13 +39334,13 @@ __export(exports_manager, {
39515
39334
  });
39516
39335
  import { existsSync as existsSync29, symlinkSync } from "fs";
39517
39336
  import { mkdir as mkdir5 } from "fs/promises";
39518
- import { join as join48 } from "path";
39337
+ import { join as join47 } from "path";
39519
39338
 
39520
39339
  class WorktreeManager {
39521
39340
  async ensureGitExcludes(projectRoot) {
39522
39341
  const logger = getSafeLogger();
39523
- const infoDir = join48(projectRoot, ".git", "info");
39524
- const excludePath = join48(infoDir, "exclude");
39342
+ const infoDir = join47(projectRoot, ".git", "info");
39343
+ const excludePath = join47(infoDir, "exclude");
39525
39344
  try {
39526
39345
  await mkdir5(infoDir, { recursive: true });
39527
39346
  let existing = "";
@@ -39548,7 +39367,7 @@ ${missing.join(`
39548
39367
  }
39549
39368
  async create(projectRoot, storyId) {
39550
39369
  validateStoryId(storyId);
39551
- const worktreePath = join48(projectRoot, ".nax-wt", storyId);
39370
+ const worktreePath = join47(projectRoot, ".nax-wt", storyId);
39552
39371
  const branchName = `nax/${storyId}`;
39553
39372
  try {
39554
39373
  const pruneProc = _managerDeps.spawn(["git", "worktree", "prune"], {
@@ -39589,9 +39408,9 @@ ${missing.join(`
39589
39408
  }
39590
39409
  throw new Error(`Failed to create worktree: ${String(error48)}`);
39591
39410
  }
39592
- const nodeModulesSource = join48(projectRoot, "node_modules");
39411
+ const nodeModulesSource = join47(projectRoot, "node_modules");
39593
39412
  if (existsSync29(nodeModulesSource)) {
39594
- const nodeModulesTarget = join48(worktreePath, "node_modules");
39413
+ const nodeModulesTarget = join47(worktreePath, "node_modules");
39595
39414
  try {
39596
39415
  symlinkSync(nodeModulesSource, nodeModulesTarget, "dir");
39597
39416
  } catch (error48) {
@@ -39599,9 +39418,9 @@ ${missing.join(`
39599
39418
  throw new Error(`Failed to symlink node_modules: ${errorMessage(error48)}`);
39600
39419
  }
39601
39420
  }
39602
- const envSource = join48(projectRoot, ".env");
39421
+ const envSource = join47(projectRoot, ".env");
39603
39422
  if (existsSync29(envSource)) {
39604
- const envTarget = join48(worktreePath, ".env");
39423
+ const envTarget = join47(worktreePath, ".env");
39605
39424
  try {
39606
39425
  symlinkSync(envSource, envTarget, "file");
39607
39426
  } catch (error48) {
@@ -39612,7 +39431,7 @@ ${missing.join(`
39612
39431
  }
39613
39432
  async remove(projectRoot, storyId) {
39614
39433
  validateStoryId(storyId);
39615
- const worktreePath = join48(projectRoot, ".nax-wt", storyId);
39434
+ const worktreePath = join47(projectRoot, ".nax-wt", storyId);
39616
39435
  const branchName = `nax/${storyId}`;
39617
39436
  try {
39618
39437
  const proc = _managerDeps.spawn(["git", "worktree", "remove", worktreePath, "--force"], {
@@ -40554,16 +40373,16 @@ var init_unified_executor = __esm(() => {
40554
40373
  });
40555
40374
 
40556
40375
  // src/project/detector.ts
40557
- import { join as join49 } from "path";
40376
+ import { join as join48 } from "path";
40558
40377
  async function detectLanguage(workdir, pkg) {
40559
40378
  const deps = _detectorDeps;
40560
- if (await deps.fileExists(join49(workdir, "go.mod")))
40379
+ if (await deps.fileExists(join48(workdir, "go.mod")))
40561
40380
  return "go";
40562
- if (await deps.fileExists(join49(workdir, "Cargo.toml")))
40381
+ if (await deps.fileExists(join48(workdir, "Cargo.toml")))
40563
40382
  return "rust";
40564
- if (await deps.fileExists(join49(workdir, "pyproject.toml")))
40383
+ if (await deps.fileExists(join48(workdir, "pyproject.toml")))
40565
40384
  return "python";
40566
- if (await deps.fileExists(join49(workdir, "requirements.txt")))
40385
+ if (await deps.fileExists(join48(workdir, "requirements.txt")))
40567
40386
  return "python";
40568
40387
  if (pkg != null) {
40569
40388
  const allDeps = {
@@ -40623,18 +40442,18 @@ async function detectLintTool(workdir, language) {
40623
40442
  if (language === "python")
40624
40443
  return "ruff";
40625
40444
  const deps = _detectorDeps;
40626
- if (await deps.fileExists(join49(workdir, "biome.json")))
40445
+ if (await deps.fileExists(join48(workdir, "biome.json")))
40627
40446
  return "biome";
40628
- if (await deps.fileExists(join49(workdir, ".eslintrc")))
40447
+ if (await deps.fileExists(join48(workdir, ".eslintrc")))
40629
40448
  return "eslint";
40630
- if (await deps.fileExists(join49(workdir, ".eslintrc.js")))
40449
+ if (await deps.fileExists(join48(workdir, ".eslintrc.js")))
40631
40450
  return "eslint";
40632
- if (await deps.fileExists(join49(workdir, ".eslintrc.json")))
40451
+ if (await deps.fileExists(join48(workdir, ".eslintrc.json")))
40633
40452
  return "eslint";
40634
40453
  return;
40635
40454
  }
40636
40455
  async function detectProjectProfile(workdir, existing) {
40637
- const pkg = await _detectorDeps.readJson(join49(workdir, "package.json"));
40456
+ const pkg = await _detectorDeps.readJson(join48(workdir, "package.json"));
40638
40457
  const language = existing.language !== undefined ? existing.language : await detectLanguage(workdir, pkg);
40639
40458
  const type = existing.type !== undefined ? existing.type : detectType(pkg);
40640
40459
  const testFramework = existing.testFramework !== undefined ? existing.testFramework : await detectTestFramework(workdir, language, pkg);
@@ -40730,7 +40549,7 @@ async function writeStatusFile(filePath, status) {
40730
40549
  var init_status_file = () => {};
40731
40550
 
40732
40551
  // src/execution/status-writer.ts
40733
- import { join as join50 } from "path";
40552
+ import { join as join49 } from "path";
40734
40553
 
40735
40554
  class StatusWriter {
40736
40555
  statusFile;
@@ -40844,7 +40663,7 @@ class StatusWriter {
40844
40663
  if (!this._prd)
40845
40664
  return;
40846
40665
  const safeLogger = getSafeLogger();
40847
- const featureStatusPath = join50(featureDir, "status.json");
40666
+ const featureStatusPath = join49(featureDir, "status.json");
40848
40667
  const write = async () => {
40849
40668
  try {
40850
40669
  const base = this.getSnapshot(totalCost, iterations);
@@ -41055,7 +40874,7 @@ __export(exports_run_initialization, {
41055
40874
  initializeRun: () => initializeRun,
41056
40875
  _reconcileDeps: () => _reconcileDeps
41057
40876
  });
41058
- import { join as join51 } from "path";
40877
+ import { join as join50 } from "path";
41059
40878
  async function reconcileState(prd, prdPath, workdir, config2) {
41060
40879
  const logger = getSafeLogger();
41061
40880
  let reconciledCount = 0;
@@ -41073,7 +40892,7 @@ async function reconcileState(prd, prdPath, workdir, config2) {
41073
40892
  });
41074
40893
  continue;
41075
40894
  }
41076
- const effectiveWorkdir = story.workdir ? join51(workdir, story.workdir) : workdir;
40895
+ const effectiveWorkdir = story.workdir ? join50(workdir, story.workdir) : workdir;
41077
40896
  try {
41078
40897
  const reviewResult = await _reconcileDeps.runReview(config2.review, effectiveWorkdir, config2.execution);
41079
40898
  if (!reviewResult.success) {
@@ -72288,7 +72107,7 @@ var require_jsx_dev_runtime = __commonJS((exports, module) => {
72288
72107
  init_source();
72289
72108
  import { existsSync as existsSync31, mkdirSync as mkdirSync7 } from "fs";
72290
72109
  import { homedir as homedir8 } from "os";
72291
- import { join as join53 } from "path";
72110
+ import { join as join52 } from "path";
72292
72111
 
72293
72112
  // node_modules/commander/esm.mjs
72294
72113
  var import__ = __toESM(require_commander(), 1);
@@ -73029,6 +72848,20 @@ function validateStory(raw, index, allIds) {
73029
72848
  throw new Error(`[schema] story[${index}].acceptanceCriteria[${i}] must be a string`);
73030
72849
  }
73031
72850
  }
72851
+ let suggestedCriteria;
72852
+ if (s.suggestedCriteria !== undefined && s.suggestedCriteria !== null) {
72853
+ if (!Array.isArray(s.suggestedCriteria)) {
72854
+ throw new Error(`[schema] story[${index}].suggestedCriteria must be an array when present`);
72855
+ }
72856
+ if (s.suggestedCriteria.length > 0) {
72857
+ for (let i = 0;i < s.suggestedCriteria.length; i++) {
72858
+ if (typeof s.suggestedCriteria[i] !== "string") {
72859
+ throw new Error(`[schema] story[${index}].suggestedCriteria[${i}] must be a string`);
72860
+ }
72861
+ }
72862
+ suggestedCriteria = s.suggestedCriteria;
72863
+ }
72864
+ }
73032
72865
  const routing = typeof s.routing === "object" && s.routing !== null ? s.routing : {};
73033
72866
  const rawComplexity = routing.complexity ?? s.complexity;
73034
72867
  if (rawComplexity === undefined || rawComplexity === null) {
@@ -73096,7 +72929,8 @@ function validateStory(raw, index, allIds) {
73096
72929
  ...noTestJustification !== undefined ? { noTestJustification } : {}
73097
72930
  },
73098
72931
  ...workdir !== undefined ? { workdir } : {},
73099
- ...contextFiles.length > 0 ? { contextFiles } : {}
72932
+ ...contextFiles.length > 0 ? { contextFiles } : {},
72933
+ ...suggestedCriteria !== undefined ? { suggestedCriteria } : {}
73100
72934
  };
73101
72935
  }
73102
72936
  function sanitizeInvalidEscapes(text) {
@@ -73234,7 +73068,8 @@ async function planCommand(workdir, config2, options) {
73234
73068
  outputDir,
73235
73069
  timeoutSeconds,
73236
73070
  dangerouslySkipPermissions: resolvedPerm.skipPermissions,
73237
- maxInteractionTurns: config2?.agent?.maxInteractionTurns
73071
+ maxInteractionTurns: config2?.agent?.maxInteractionTurns,
73072
+ specContent
73238
73073
  });
73239
73074
  if (debateResult.outcome !== "failed" && debateResult.output) {
73240
73075
  rawResponse = debateResult.output;
@@ -84304,15 +84139,15 @@ Next: nax generate --package ${options.package}`));
84304
84139
  }
84305
84140
  return;
84306
84141
  }
84307
- const naxDir = join53(workdir, ".nax");
84142
+ const naxDir = join52(workdir, ".nax");
84308
84143
  if (existsSync31(naxDir) && !options.force) {
84309
84144
  console.log(source_default.yellow("nax already initialized. Use --force to overwrite."));
84310
84145
  return;
84311
84146
  }
84312
- mkdirSync7(join53(naxDir, "features"), { recursive: true });
84313
- mkdirSync7(join53(naxDir, "hooks"), { recursive: true });
84314
- await Bun.write(join53(naxDir, "config.json"), JSON.stringify(DEFAULT_CONFIG, null, 2));
84315
- await Bun.write(join53(naxDir, "hooks.json"), JSON.stringify({
84147
+ mkdirSync7(join52(naxDir, "features"), { recursive: true });
84148
+ mkdirSync7(join52(naxDir, "hooks"), { recursive: true });
84149
+ await Bun.write(join52(naxDir, "config.json"), JSON.stringify(DEFAULT_CONFIG, null, 2));
84150
+ await Bun.write(join52(naxDir, "hooks.json"), JSON.stringify({
84316
84151
  hooks: {
84317
84152
  "on-start": { command: 'echo "nax started: $NAX_FEATURE"', enabled: false },
84318
84153
  "on-complete": { command: 'echo "nax complete: $NAX_FEATURE"', enabled: false },
@@ -84320,12 +84155,12 @@ Next: nax generate --package ${options.package}`));
84320
84155
  "on-error": { command: 'echo "nax error: $NAX_REASON"', enabled: false }
84321
84156
  }
84322
84157
  }, null, 2));
84323
- await Bun.write(join53(naxDir, ".gitignore"), `# nax temp files
84158
+ await Bun.write(join52(naxDir, ".gitignore"), `# nax temp files
84324
84159
  *.tmp
84325
84160
  .paused.json
84326
84161
  .nax-verifier-verdict.json
84327
84162
  `);
84328
- await Bun.write(join53(naxDir, "context.md"), `# Project Context
84163
+ await Bun.write(join52(naxDir, "context.md"), `# Project Context
84329
84164
 
84330
84165
  This document defines coding standards, architectural decisions, and forbidden patterns for this project.
84331
84166
  Run \`nax generate\` to regenerate agent config files (CLAUDE.md, AGENTS.md, .cursorrules, etc.) from this file.
@@ -84455,8 +84290,8 @@ program2.command("run").description("Run the orchestration loop for a feature").
84455
84290
  console.error(source_default.red("nax not initialized. Run: nax init"));
84456
84291
  process.exit(1);
84457
84292
  }
84458
- const featureDir = join53(naxDir, "features", options.feature);
84459
- const prdPath = join53(featureDir, "prd.json");
84293
+ const featureDir = join52(naxDir, "features", options.feature);
84294
+ const prdPath = join52(featureDir, "prd.json");
84460
84295
  if (options.plan && options.from) {
84461
84296
  if (existsSync31(prdPath) && !options.force) {
84462
84297
  console.error(source_default.red(`Error: prd.json already exists for feature "${options.feature}".`));
@@ -84478,10 +84313,10 @@ program2.command("run").description("Run the orchestration loop for a feature").
84478
84313
  }
84479
84314
  }
84480
84315
  try {
84481
- const planLogDir = join53(featureDir, "plan");
84316
+ const planLogDir = join52(featureDir, "plan");
84482
84317
  mkdirSync7(planLogDir, { recursive: true });
84483
84318
  const planLogId = new Date().toISOString().replace(/:/g, "-").replace(/\..+/, "");
84484
- const planLogPath = join53(planLogDir, `${planLogId}.jsonl`);
84319
+ const planLogPath = join52(planLogDir, `${planLogId}.jsonl`);
84485
84320
  initLogger({ level: "info", filePath: planLogPath, useChalk: false, headless: true });
84486
84321
  console.log(source_default.dim(` [Plan log: ${planLogPath}]`));
84487
84322
  console.log(source_default.dim(" [Planning phase: generating PRD from spec]"));
@@ -84525,10 +84360,10 @@ program2.command("run").description("Run the orchestration loop for a feature").
84525
84360
  process.exit(1);
84526
84361
  }
84527
84362
  resetLogger();
84528
- const runsDir = join53(featureDir, "runs");
84363
+ const runsDir = join52(featureDir, "runs");
84529
84364
  mkdirSync7(runsDir, { recursive: true });
84530
84365
  const runId = new Date().toISOString().replace(/:/g, "-").replace(/\..+/, "");
84531
- const logFilePath = join53(runsDir, `${runId}.jsonl`);
84366
+ const logFilePath = join52(runsDir, `${runId}.jsonl`);
84532
84367
  const isTTY = process.stdout.isTTY ?? false;
84533
84368
  const headlessFlag = options.headless ?? false;
84534
84369
  const headlessEnv = process.env.NAX_HEADLESS === "1";
@@ -84544,7 +84379,7 @@ program2.command("run").description("Run the orchestration loop for a feature").
84544
84379
  config2.autoMode.defaultAgent = options.agent;
84545
84380
  }
84546
84381
  config2.execution.maxIterations = Number.parseInt(options.maxIterations, 10);
84547
- const globalNaxDir = join53(homedir8(), ".nax");
84382
+ const globalNaxDir = join52(homedir8(), ".nax");
84548
84383
  const hooks = await loadHooksConfig(naxDir, globalNaxDir);
84549
84384
  const eventEmitter = new PipelineEventEmitter;
84550
84385
  let tuiInstance;
@@ -84567,7 +84402,7 @@ program2.command("run").description("Run the orchestration loop for a feature").
84567
84402
  } else {
84568
84403
  console.log(source_default.dim(" [Headless mode \u2014 pipe output]"));
84569
84404
  }
84570
- const statusFilePath = join53(workdir, ".nax", "status.json");
84405
+ const statusFilePath = join52(workdir, ".nax", "status.json");
84571
84406
  let parallel;
84572
84407
  if (options.parallel !== undefined) {
84573
84408
  parallel = Number.parseInt(options.parallel, 10);
@@ -84593,7 +84428,7 @@ program2.command("run").description("Run the orchestration loop for a feature").
84593
84428
  headless: useHeadless,
84594
84429
  skipPrecheck: options.skipPrecheck ?? false
84595
84430
  });
84596
- const latestSymlink = join53(runsDir, "latest.jsonl");
84431
+ const latestSymlink = join52(runsDir, "latest.jsonl");
84597
84432
  try {
84598
84433
  if (existsSync31(latestSymlink)) {
84599
84434
  Bun.spawnSync(["rm", latestSymlink]);
@@ -84631,9 +84466,9 @@ features.command("create <name>").description("Create a new feature").option("-d
84631
84466
  console.error(source_default.red("nax not initialized. Run: nax init"));
84632
84467
  process.exit(1);
84633
84468
  }
84634
- const featureDir = join53(naxDir, "features", name);
84469
+ const featureDir = join52(naxDir, "features", name);
84635
84470
  mkdirSync7(featureDir, { recursive: true });
84636
- await Bun.write(join53(featureDir, "spec.md"), `# Feature: ${name}
84471
+ await Bun.write(join52(featureDir, "spec.md"), `# Feature: ${name}
84637
84472
 
84638
84473
  ## Overview
84639
84474
 
@@ -84666,7 +84501,7 @@ features.command("create <name>").description("Create a new feature").option("-d
84666
84501
 
84667
84502
  <!-- What this feature explicitly does NOT cover. -->
84668
84503
  `);
84669
- await Bun.write(join53(featureDir, "progress.txt"), `# Progress: ${name}
84504
+ await Bun.write(join52(featureDir, "progress.txt"), `# Progress: ${name}
84670
84505
 
84671
84506
  Created: ${new Date().toISOString()}
84672
84507
 
@@ -84692,7 +84527,7 @@ features.command("list").description("List all features").option("-d, --dir <pat
84692
84527
  console.error(source_default.red("nax not initialized."));
84693
84528
  process.exit(1);
84694
84529
  }
84695
- const featuresDir = join53(naxDir, "features");
84530
+ const featuresDir = join52(naxDir, "features");
84696
84531
  if (!existsSync31(featuresDir)) {
84697
84532
  console.log(source_default.dim("No features yet."));
84698
84533
  return;
@@ -84707,7 +84542,7 @@ features.command("list").description("List all features").option("-d, --dir <pat
84707
84542
  Features:
84708
84543
  `));
84709
84544
  for (const name of entries) {
84710
- const prdPath = join53(featuresDir, name, "prd.json");
84545
+ const prdPath = join52(featuresDir, name, "prd.json");
84711
84546
  if (existsSync31(prdPath)) {
84712
84547
  const prd = await loadPRD(prdPath);
84713
84548
  const c = countStories(prd);
@@ -84742,10 +84577,10 @@ Use: nax plan -f <feature> --from <spec>`));
84742
84577
  cliOverrides.profile = options.profile;
84743
84578
  }
84744
84579
  const config2 = await loadConfig(workdir, cliOverrides);
84745
- const featureLogDir = join53(naxDir, "features", options.feature, "plan");
84580
+ const featureLogDir = join52(naxDir, "features", options.feature, "plan");
84746
84581
  mkdirSync7(featureLogDir, { recursive: true });
84747
84582
  const planLogId = new Date().toISOString().replace(/:/g, "-").replace(/\..+/, "");
84748
- const planLogPath = join53(featureLogDir, `${planLogId}.jsonl`);
84583
+ const planLogPath = join52(featureLogDir, `${planLogId}.jsonl`);
84749
84584
  initLogger({ level: "info", filePath: planLogPath, useChalk: false, headless: true });
84750
84585
  console.log(source_default.dim(` [Plan log: ${planLogPath}]`));
84751
84586
  try {