@nathapp/nax 0.57.2 → 0.57.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/nax.js +642 -351
  2. package/package.json +1 -1
package/dist/nax.js CHANGED
@@ -3044,16 +3044,6 @@ var init_logger = __esm(() => {
3044
3044
  });
3045
3045
 
3046
3046
  // src/logger/index.ts
3047
- var exports_logger = {};
3048
- __export(exports_logger, {
3049
- resetLogger: () => resetLogger,
3050
- initLogger: () => initLogger,
3051
- getSafeLogger: () => getSafeLogger,
3052
- getLogger: () => getLogger,
3053
- formatJsonl: () => formatJsonl,
3054
- formatConsole: () => formatConsole,
3055
- Logger: () => Logger
3056
- });
3057
3047
  var init_logger2 = __esm(() => {
3058
3048
  init_logger();
3059
3049
  init_formatters();
@@ -3304,6 +3294,61 @@ var init_test_strategy = __esm(() => {
3304
3294
 
3305
3295
  // src/agents/shared/decompose.ts
3306
3296
  function buildDecomposePrompt(options) {
3297
+ if (options.targetStory) {
3298
+ return buildPlanModeDecomposePrompt(options);
3299
+ }
3300
+ return buildSpecDecomposePrompt(options);
3301
+ }
3302
+ function buildPlanModeDecomposePrompt(options) {
3303
+ const targetStory = options.targetStory;
3304
+ const siblings = options.siblings ?? [];
3305
+ const siblingsSummary = siblings.length > 0 ? `
3306
+ ## Sibling Stories
3307
+
3308
+ ${siblings.map((s) => `- ${s.id}: ${s.title}`).join(`
3309
+ `)}
3310
+ ` : "";
3311
+ const maxAcCount = options.config?.precheck?.storySizeGate?.maxAcCount;
3312
+ const acConstraint = maxAcCount != null ? `
3313
+ ## Acceptance Criteria Constraint
3314
+
3315
+ Every sub-story must have at most ${maxAcCount} acceptance criteria. If a story would exceed this limit, split it into additional sub-stories instead of adding more ACs.
3316
+ ` : "";
3317
+ return `You are a senior software architect decomposing a complex user story into smaller, implementable sub-stories.
3318
+
3319
+ ## Target Story
3320
+
3321
+ ${JSON.stringify(targetStory, null, 2)}${siblingsSummary}
3322
+ ## Codebase Context
3323
+
3324
+ ${options.codebaseContext}
3325
+ ${acConstraint}
3326
+ ${COMPLEXITY_GUIDE}
3327
+
3328
+ ${TEST_STRATEGY_GUIDE}
3329
+
3330
+ ${GROUPING_RULES}
3331
+
3332
+ ## Output
3333
+
3334
+ Return a JSON array of sub-stories (no markdown code fences, no explanation \u2014 JSON array only):
3335
+
3336
+ [{
3337
+ "id": "string \u2014 e.g. ${targetStory.id}-A",
3338
+ "title": "string",
3339
+ "description": "string",
3340
+ "acceptanceCriteria": ["string \u2014 behavioral, testable criteria"],
3341
+ "contextFiles": ["string \u2014 required, non-empty list of key source files"],
3342
+ "tags": ["string"],
3343
+ "dependencies": ["string"],
3344
+ "complexity": "simple | medium | complex | expert",
3345
+ "reasoning": "string",
3346
+ "estimatedLOC": 0,
3347
+ "risks": ["string"],
3348
+ "testStrategy": "no-test | tdd-simple | three-session-tdd-lite | three-session-tdd | test-after"
3349
+ }]`;
3350
+ }
3351
+ function buildSpecDecomposePrompt(options) {
3307
3352
  return `You are a requirements analyst. Break down the following feature specification into user stories and classify each story's complexity.
3308
3353
 
3309
3354
  CODEBASE CONTEXT:
@@ -3614,9 +3659,11 @@ class SpawnAcpSession {
3614
3659
  try {
3615
3660
  proc.stdin?.write(text);
3616
3661
  proc.stdin?.end();
3617
- const exitCode = await proc.exited;
3618
- const stdout = await new Response(proc.stdout).text();
3619
- const stderr = await new Response(proc.stderr).text();
3662
+ const [exitCode, stdout, stderr] = await Promise.all([
3663
+ proc.exited,
3664
+ new Response(proc.stdout).text(),
3665
+ new Response(proc.stderr).text()
3666
+ ]);
3620
3667
  if (exitCode !== 0) {
3621
3668
  getSafeLogger()?.warn("acp-adapter", `Session prompt exited with code ${exitCode}`, {
3622
3669
  exitCode,
@@ -3646,6 +3693,21 @@ class SpawnAcpSession {
3646
3693
  await this.pidRegistry?.unregister(processPid);
3647
3694
  }
3648
3695
  }
3696
+ async trackedSpawn(cmd, opts) {
3697
+ const proc = _spawnClientDeps.spawn(cmd, { stdout: "pipe", stderr: "pipe", ...opts });
3698
+ const pid = proc.pid;
3699
+ await this.pidRegistry?.register(pid);
3700
+ try {
3701
+ const [exitCode, stdout, stderr] = await Promise.all([
3702
+ proc.exited,
3703
+ new Response(proc.stdout).text(),
3704
+ new Response(proc.stderr).text()
3705
+ ]);
3706
+ return { exitCode, stdout, stderr };
3707
+ } finally {
3708
+ await this.pidRegistry?.unregister(pid);
3709
+ }
3710
+ }
3649
3711
  async close(options) {
3650
3712
  if (this.activeProc) {
3651
3713
  try {
@@ -3656,10 +3718,8 @@ class SpawnAcpSession {
3656
3718
  }
3657
3719
  const cmd = ["acpx", "--cwd", this.cwd, this.agentName, "sessions", "close", this.sessionName];
3658
3720
  getSafeLogger()?.debug("acp-adapter", `Closing session: ${this.sessionName}`);
3659
- const proc = _spawnClientDeps.spawn(cmd, { stdout: "pipe", stderr: "pipe" });
3660
- const exitCode = await proc.exited;
3721
+ const { exitCode, stderr } = await this.trackedSpawn(cmd);
3661
3722
  if (exitCode !== 0) {
3662
- const stderr = await new Response(proc.stderr).text();
3663
3723
  getSafeLogger()?.warn("acp-adapter", "Failed to close session", {
3664
3724
  sessionName: this.sessionName,
3665
3725
  stderr: stderr.slice(0, 200)
@@ -3667,8 +3727,7 @@ class SpawnAcpSession {
3667
3727
  }
3668
3728
  if (options?.forceTerminate) {
3669
3729
  try {
3670
- const stopProc = _spawnClientDeps.spawn(["acpx", this.agentName, "stop"], { stdout: "pipe", stderr: "pipe" });
3671
- await stopProc.exited;
3730
+ await this.trackedSpawn(["acpx", this.agentName, "stop"]);
3672
3731
  } catch (err) {
3673
3732
  getSafeLogger()?.debug("acp-adapter", "acpx stop failed (swallowed)", { cause: String(err) });
3674
3733
  }
@@ -3683,8 +3742,7 @@ class SpawnAcpSession {
3683
3742
  }
3684
3743
  const cmd = ["acpx", this.agentName, "cancel"];
3685
3744
  getSafeLogger()?.debug("acp-adapter", `Cancelling active prompt: ${this.sessionName}`);
3686
- const proc = _spawnClientDeps.spawn(cmd, { stdout: "pipe", stderr: "pipe" });
3687
- await proc.exited;
3745
+ await this.trackedSpawn(cmd);
3688
3746
  }
3689
3747
  }
3690
3748
 
@@ -3710,14 +3768,27 @@ class SpawnAcpClient {
3710
3768
  this.pidRegistry = pidRegistry;
3711
3769
  }
3712
3770
  async start() {}
3771
+ async trackedSpawn(cmd) {
3772
+ const proc = _spawnClientDeps.spawn(cmd, { stdout: "pipe", stderr: "pipe" });
3773
+ const pid = proc.pid;
3774
+ await this.pidRegistry?.register(pid);
3775
+ try {
3776
+ const [exitCode, stdout, stderr] = await Promise.all([
3777
+ proc.exited,
3778
+ new Response(proc.stdout).text(),
3779
+ new Response(proc.stderr).text()
3780
+ ]);
3781
+ return { exitCode, stdout, stderr };
3782
+ } finally {
3783
+ await this.pidRegistry?.unregister(pid);
3784
+ }
3785
+ }
3713
3786
  async createSession(opts) {
3714
3787
  const sessionName = opts.sessionName || `nax-${Date.now()}`;
3715
3788
  const cmd = ["acpx", "--cwd", this.cwd, opts.agentName, "sessions", "ensure", "--name", sessionName];
3716
3789
  getSafeLogger()?.debug("acp-adapter", `Ensuring session: ${sessionName}`);
3717
- const proc = _spawnClientDeps.spawn(cmd, { stdout: "pipe", stderr: "pipe" });
3718
- const exitCode = await proc.exited;
3790
+ const { exitCode, stderr } = await this.trackedSpawn(cmd);
3719
3791
  if (exitCode !== 0) {
3720
- const stderr = await new Response(proc.stderr).text();
3721
3792
  throw new Error(`[acp-adapter] Failed to create session: ${stderr || `exit code ${exitCode}`}`);
3722
3793
  }
3723
3794
  return new SpawnAcpSession({
@@ -3733,8 +3804,7 @@ class SpawnAcpClient {
3733
3804
  }
3734
3805
  async loadSession(sessionName, agentName, permissionMode) {
3735
3806
  const cmd = ["acpx", "--cwd", this.cwd, agentName, "sessions", "ensure", "--name", sessionName];
3736
- const proc = _spawnClientDeps.spawn(cmd, { stdout: "pipe", stderr: "pipe" });
3737
- const exitCode = await proc.exited;
3807
+ const { exitCode } = await this.trackedSpawn(cmd);
3738
3808
  if (exitCode !== 0) {
3739
3809
  return null;
3740
3810
  }
@@ -18082,6 +18152,7 @@ var init_schemas3 = __esm(() => {
18082
18152
  command: exports_external.string().optional(),
18083
18153
  model: exports_external.enum(["fast", "balanced", "powerful"]).default("fast"),
18084
18154
  refinement: exports_external.boolean().default(true),
18155
+ refinementConcurrency: exports_external.number().int().min(1).max(10).default(3),
18085
18156
  redGate: exports_external.boolean().default(true),
18086
18157
  testStrategy: exports_external.enum(["unit", "component", "cli", "e2e", "snapshot"]).optional(),
18087
18158
  testFramework: exports_external.string().min(1, "acceptance.testFramework must be non-empty").optional(),
@@ -18285,8 +18356,6 @@ var init_schemas3 = __esm(() => {
18285
18356
  maxRectificationAttempts: 3
18286
18357
  },
18287
18358
  contextProviderTokenBudget: 2000,
18288
- lintCommand: null,
18289
- typecheckCommand: null,
18290
18359
  dangerouslySkipPermissions: true,
18291
18360
  permissionProfile: "unrestricted",
18292
18361
  smartTestRunner: true
@@ -18392,6 +18461,7 @@ var init_schemas3 = __esm(() => {
18392
18461
  testPath: ".nax-acceptance.test.ts",
18393
18462
  model: "fast",
18394
18463
  refinement: true,
18464
+ refinementConcurrency: 3,
18395
18465
  redGate: true,
18396
18466
  timeoutMs: 1800000,
18397
18467
  fix: {
@@ -18641,7 +18711,7 @@ async function readAcpSession(workdir, featureName, storyId) {
18641
18711
  return null;
18642
18712
  }
18643
18713
  }
18644
- async function sweepFeatureSessions(workdir, featureName) {
18714
+ async function sweepFeatureSessions(workdir, featureName, pidRegistry) {
18645
18715
  const path = acpSessionsPath(workdir, featureName);
18646
18716
  let sessions;
18647
18717
  try {
@@ -18665,7 +18735,7 @@ async function sweepFeatureSessions(workdir, featureName) {
18665
18735
  }
18666
18736
  for (const [agentName, sessionNames] of byAgent) {
18667
18737
  const cmdStr = `acpx ${agentName}`;
18668
- const client = _acpAdapterDeps.createClient(cmdStr, workdir);
18738
+ const client = _acpAdapterDeps.createClient(cmdStr, workdir, undefined, pidRegistry);
18669
18739
  try {
18670
18740
  await client.start();
18671
18741
  for (const sessionName of sessionNames) {
@@ -18690,7 +18760,7 @@ async function sweepFeatureSessions(workdir, featureName) {
18690
18760
  logger?.warn("acp-adapter", "[sweep] Failed to clear sidecar after sweep", { error: String(err) });
18691
18761
  }
18692
18762
  }
18693
- async function sweepStaleFeatureSessions(workdir, featureName, maxAgeMs = MAX_SESSION_AGE_MS) {
18763
+ async function sweepStaleFeatureSessions(workdir, featureName, maxAgeMs = MAX_SESSION_AGE_MS, pidRegistry) {
18694
18764
  const path = acpSessionsPath(workdir, featureName);
18695
18765
  const file3 = Bun.file(path);
18696
18766
  if (!await file3.exists())
@@ -18702,7 +18772,7 @@ async function sweepStaleFeatureSessions(workdir, featureName, maxAgeMs = MAX_SE
18702
18772
  featureName,
18703
18773
  ageMs
18704
18774
  });
18705
- await sweepFeatureSessions(workdir, featureName);
18775
+ await sweepFeatureSessions(workdir, featureName, pidRegistry);
18706
18776
  }
18707
18777
  function extractOutput(response) {
18708
18778
  if (!response)
@@ -19202,7 +19272,9 @@ class AcpAgentAdapter {
19202
19272
  jsonMode: true,
19203
19273
  config: options.config,
19204
19274
  workdir: options.workdir,
19205
- sessionRole: "decompose"
19275
+ featureName: options.featureName,
19276
+ storyId: options.storyId,
19277
+ sessionRole: options.sessionRole ?? "decompose"
19206
19278
  });
19207
19279
  output = completeResult.output;
19208
19280
  } catch (err) {
@@ -20040,17 +20112,27 @@ class ClaudeCodeAdapter {
20040
20112
  cmd.splice(cmd.length - 2, 0, "--dangerously-skip-permissions");
20041
20113
  }
20042
20114
  const pidRegistry = this.getPidRegistry(options.workdir);
20115
+ const env2 = this.buildAllowedEnv({
20116
+ workdir: options.workdir,
20117
+ modelDef,
20118
+ prompt: "",
20119
+ modelTier: options.modelTier || "balanced",
20120
+ timeoutSeconds: 600
20121
+ });
20122
+ if (options.featureName) {
20123
+ env2.NAX_FEATURE_NAME = options.featureName;
20124
+ }
20125
+ if (options.storyId) {
20126
+ env2.NAX_STORY_ID = options.storyId;
20127
+ }
20128
+ if (options.sessionRole) {
20129
+ env2.NAX_SESSION_ROLE = options.sessionRole;
20130
+ }
20043
20131
  const proc = _decomposeDeps.spawn(cmd, {
20044
20132
  cwd: options.workdir,
20045
20133
  stdout: "pipe",
20046
20134
  stderr: "inherit",
20047
- env: this.buildAllowedEnv({
20048
- workdir: options.workdir,
20049
- modelDef,
20050
- prompt: "",
20051
- modelTier: options.modelTier || "balanced",
20052
- timeoutSeconds: 600
20053
- })
20135
+ env: env2
20054
20136
  });
20055
20137
  await pidRegistry.register(proc.pid);
20056
20138
  const DECOMPOSE_TIMEOUT_MS = 300000;
@@ -20997,6 +21079,50 @@ function errorMessage(err) {
20997
21079
  return err instanceof Error ? err.message : String(err);
20998
21080
  }
20999
21081
 
21082
+ // src/utils/llm-json.ts
21083
+ function extractJsonFromMarkdown(text) {
21084
+ const match = text.match(/```(?:json)?\s*\n([\s\S]*?)\n?\s*```/);
21085
+ if (match) {
21086
+ return match[1] ?? text;
21087
+ }
21088
+ return text;
21089
+ }
21090
+ function stripTrailingCommas(text) {
21091
+ return text.replace(/,\s*([}\]])/g, "$1");
21092
+ }
21093
+ function extractJsonObject(text) {
21094
+ const objStart = text.indexOf("{");
21095
+ const arrStart = text.indexOf("[");
21096
+ let start;
21097
+ let closeChar;
21098
+ if (objStart === -1 && arrStart === -1)
21099
+ return null;
21100
+ if (objStart === -1) {
21101
+ start = arrStart;
21102
+ closeChar = "]";
21103
+ } else if (arrStart === -1) {
21104
+ start = objStart;
21105
+ closeChar = "}";
21106
+ } else if (objStart < arrStart) {
21107
+ start = objStart;
21108
+ closeChar = "}";
21109
+ } else {
21110
+ start = arrStart;
21111
+ closeChar = "]";
21112
+ }
21113
+ const end = text.lastIndexOf(closeChar);
21114
+ if (end <= start)
21115
+ return null;
21116
+ return text.slice(start, end + 1);
21117
+ }
21118
+ function wrapJsonPrompt(prompt) {
21119
+ return `IMPORTANT: Your entire response must be a single JSON object or array. Do not explain your reasoning. Do not use markdown formatting. Output ONLY the JSON.
21120
+
21121
+ ${prompt.trim()}
21122
+
21123
+ YOUR RESPONSE MUST START WITH { OR [ AND END WITH } OR ]. No other text.`;
21124
+ }
21125
+
21000
21126
  // src/acceptance/refinement.ts
21001
21127
  var exports_refinement = {};
21002
21128
  __export(exports_refinement, {
@@ -21010,7 +21136,7 @@ function buildRefinementPrompt(criteria, codebaseContext, options) {
21010
21136
  `);
21011
21137
  const strategySection = buildStrategySection(options);
21012
21138
  const refinedExample = buildRefinedExample(options?.testStrategy);
21013
- return `You are an acceptance criteria refinement assistant. Your task is to convert raw acceptance criteria into concrete, machine-verifiable assertions.
21139
+ const core2 = `You are an acceptance criteria refinement assistant. Your task is to convert raw acceptance criteria into concrete, machine-verifiable assertions.
21014
21140
 
21015
21141
  CODEBASE CONTEXT:
21016
21142
  ${codebaseContext}
@@ -21019,7 +21145,7 @@ ACCEPTANCE CRITERIA TO REFINE:
21019
21145
  ${criteriaList}
21020
21146
 
21021
21147
  For each criterion, produce a refined version that is concrete and automatically testable where possible.
21022
- Respond with ONLY a JSON array (no markdown code fences):
21148
+ Respond with a JSON array:
21023
21149
  [{
21024
21150
  "original": "<exact original criterion text>",
21025
21151
  "refined": "<concrete, machine-verifiable description>",
@@ -21031,8 +21157,8 @@ Rules:
21031
21157
  - "original" must match the input criterion text exactly
21032
21158
  - "refined" must be a concrete assertion (e.g., ${refinedExample})
21033
21159
  - "testable" is false only if the criterion cannot be automatically verified (e.g., "UX feels responsive", "design looks good")
21034
- - "storyId" leave as empty string \u2014 it will be assigned by the caller
21035
- - Respond with ONLY the JSON array`;
21160
+ - "storyId" leave as empty string \u2014 it will be assigned by the caller`;
21161
+ return wrapJsonPrompt(core2);
21036
21162
  }
21037
21163
  function buildStrategySection(options) {
21038
21164
  if (!options?.testStrategy) {
@@ -21081,7 +21207,9 @@ function parseRefinementResponse(response, criteria) {
21081
21207
  return fallbackCriteria(criteria);
21082
21208
  }
21083
21209
  try {
21084
- const parsed = JSON.parse(response);
21210
+ const fromFence = extractJsonFromMarkdown(response);
21211
+ const cleaned = stripTrailingCommas(fromFence !== response ? fromFence : response);
21212
+ const parsed = JSON.parse(cleaned);
21085
21213
  if (!Array.isArray(parsed)) {
21086
21214
  return fallbackCriteria(criteria);
21087
21215
  }
@@ -21188,6 +21316,9 @@ function skeletonImportLine(testFramework) {
21188
21316
  }
21189
21317
  return `import { describe, test, expect } from "bun:test";`;
21190
21318
  }
21319
+ function hasLikelyTestContent(content) {
21320
+ return /\b(?:describe|test|it|expect)\s*\(/.test(content) || /func\s+Test\w+\s*\(/.test(content) || /def\s+test_\w+/.test(content) || /#\[test\]/.test(content);
21321
+ }
21191
21322
  function acceptanceTestFilename(language) {
21192
21323
  switch (language?.toLowerCase()) {
21193
21324
  case "go":
@@ -21291,36 +21422,80 @@ Rules:
21291
21422
  });
21292
21423
  if (!testCode) {
21293
21424
  const targetPath = join6(options.workdir, ".nax", "features", options.featureName, resolveAcceptanceTestFile(options.language, options.config?.acceptance?.testPath));
21425
+ const backupPath = `${targetPath}.llm-recovery.bak`;
21294
21426
  let recoveryFailed = false;
21295
- logger.debug("acceptance", "BUG-076 recovery: checking for agent-written file", { targetPath });
21427
+ logger.debug("acceptance", "BUG-076 recovery: checking for agent-written file", {
21428
+ targetPath,
21429
+ backupPath,
21430
+ featureName: options.featureName,
21431
+ workdir: options.workdir
21432
+ });
21296
21433
  try {
21297
21434
  const existing = await Bun.file(targetPath).text();
21298
21435
  const recovered = extractTestCode(existing);
21436
+ const likelyTestContent = hasLikelyTestContent(existing);
21299
21437
  logger.debug("acceptance", "BUG-076 recovery: file check result", {
21300
21438
  fileSize: existing.length,
21301
21439
  extractedCode: recovered !== null,
21440
+ likelyTestContent,
21302
21441
  filePreview: existing.slice(0, 300)
21303
21442
  });
21304
21443
  if (recovered) {
21305
21444
  logger.info("acceptance", "Acceptance test written directly by agent \u2014 using existing file", { targetPath });
21306
21445
  testCode = recovered;
21446
+ } else if (existing.trim().length > 0 && likelyTestContent) {
21447
+ let backupCreated = false;
21448
+ try {
21449
+ await _generatorPRDDeps.backupFile(backupPath, existing);
21450
+ backupCreated = true;
21451
+ } catch (backupError) {
21452
+ logger.warn("acceptance", "BUG-076: failed to create recovery backup; preserving file anyway", {
21453
+ targetPath,
21454
+ backupPath,
21455
+ backupError: backupError instanceof Error ? backupError.message : String(backupError)
21456
+ });
21457
+ }
21458
+ logger.warn("acceptance", "BUG-076: preserving agent-written file with backup (heuristic recovery)", {
21459
+ targetPath,
21460
+ backupPath,
21461
+ backupCreated,
21462
+ reason: "extractTestCode returned null"
21463
+ });
21464
+ testCode = existing;
21307
21465
  } else {
21466
+ if (existing.trim().length > 0) {
21467
+ try {
21468
+ await _generatorPRDDeps.backupFile(backupPath, existing);
21469
+ } catch (backupError) {
21470
+ logger.warn("acceptance", "BUG-076: failed to create fallback backup for unrecognized file", {
21471
+ targetPath,
21472
+ backupPath,
21473
+ backupError: backupError instanceof Error ? backupError.message : String(backupError)
21474
+ });
21475
+ }
21476
+ }
21308
21477
  recoveryFailed = true;
21309
- logger.error("acceptance", "BUG-076: ACP adapter wrote file but no code extractable \u2014 falling back to skeleton", {
21478
+ logger.error("acceptance", "BUG-076: agent-written file not recognized as test code \u2014 falling back to skeleton", {
21310
21479
  targetPath,
21480
+ backupPath,
21481
+ fileSize: existing.length,
21311
21482
  filePreview: existing.slice(0, 300)
21312
21483
  });
21313
21484
  }
21314
- } catch {
21485
+ } catch (error48) {
21315
21486
  recoveryFailed = true;
21316
- logger.debug("acceptance", "BUG-076 recovery: no file written by agent, falling back to skeleton", {
21487
+ logger.debug("acceptance", "BUG-076 recovery: failed to read agent-written file, falling back to skeleton", {
21317
21488
  targetPath,
21489
+ backupPath,
21490
+ error: error48 instanceof Error ? error48.message : String(error48),
21318
21491
  rawOutputPreview: rawOutput.slice(0, 500)
21319
21492
  });
21320
21493
  }
21321
21494
  if (recoveryFailed) {
21322
- logger.error("acceptance", "BUG-076: LLM returned non-code output and no file was written by agent \u2014 falling back to skeleton", {
21323
- rawOutputPreview: rawOutput.slice(0, 500)
21495
+ logger.error("acceptance", "BUG-076: LLM returned non-code output and recovery could not produce runnable tests \u2014 falling back to skeleton", {
21496
+ rawOutputPreview: rawOutput.slice(0, 500),
21497
+ targetPath,
21498
+ backupPath
21324
21499
  });
21325
21500
  }
21326
21501
  }
@@ -21586,6 +21761,9 @@ var init_generator = __esm(() => {
21586
21761
  },
21587
21762
  writeFile: async (path, content) => {
21588
21763
  await Bun.write(path, content);
21764
+ },
21765
+ backupFile: async (path, content) => {
21766
+ await Bun.write(path, content);
21589
21767
  }
21590
21768
  };
21591
21769
  });
@@ -21791,7 +21969,7 @@ function buildRoutingPrompt(story, config2) {
21791
21969
  const { title, description, acceptanceCriteria, tags } = story;
21792
21970
  const criteria = acceptanceCriteria.map((c, i) => `${i + 1}. ${c}`).join(`
21793
21971
  `);
21794
- return `You are a code task router. Classify a user story's complexity and select the cheapest model tier that will succeed.
21972
+ const core2 = `You are a code task router. Classify a user story's complexity and select the cheapest model tier that will succeed.
21795
21973
 
21796
21974
  ## Story
21797
21975
  Title: ${title}
@@ -21817,8 +21995,9 @@ Tags: ${tags.join(", ")}
21817
21995
  - Many files \u2260 complex \u2014 copy-paste refactors across files are simple.
21818
21996
  - Pure refactoring/deletion with no new behavior \u2192 simple.
21819
21997
 
21820
- Respond with ONLY this JSON (no markdown, no explanation):
21998
+ Respond with:
21821
21999
  {"complexity":"simple|medium|complex|expert","modelTier":"fast|balanced|powerful","reasoning":"<one line>"}`;
22000
+ return wrapJsonPrompt(core2);
21822
22001
  }
21823
22002
  function buildBatchRoutingPrompt(stories, config2) {
21824
22003
  const storyBlocks = stories.map((story, idx) => {
@@ -21832,7 +22011,7 @@ ${criteria}
21832
22011
  }).join(`
21833
22012
 
21834
22013
  `);
21835
- return `You are a code task router. Classify each story's complexity and select the cheapest model tier that will succeed.
22014
+ const batchCore = `You are a code task router. Classify each story's complexity and select the cheapest model tier that will succeed.
21836
22015
 
21837
22016
  ## Stories
21838
22017
  ${storyBlocks}
@@ -21854,8 +22033,9 @@ ${storyBlocks}
21854
22033
  - Many files \u2260 complex \u2014 copy-paste refactors across files are simple.
21855
22034
  - Pure refactoring/deletion with no new behavior \u2192 simple.
21856
22035
 
21857
- Respond with ONLY a JSON array (no markdown, no explanation):
22036
+ Respond with a JSON array:
21858
22037
  [{"id":"US-001","complexity":"simple|medium|complex|expert","modelTier":"fast|balanced|powerful","reasoning":"<one line>"}]`;
22038
+ return wrapJsonPrompt(batchCore);
21859
22039
  }
21860
22040
  function validateRoutingDecision(parsed, config2, story) {
21861
22041
  if (!parsed.complexity || !parsed.modelTier || !parsed.reasoning) {
@@ -21880,35 +22060,22 @@ function validateRoutingDecision(parsed, config2, story) {
21880
22060
  };
21881
22061
  }
21882
22062
  function stripCodeFences(text) {
21883
- let result = text.trim();
21884
- if (result.startsWith("```")) {
21885
- const lines = result.split(`
21886
- `);
21887
- result = lines.slice(1, -1).join(`
21888
- `).trim();
21889
- }
21890
- if (result.startsWith("json")) {
21891
- result = result.slice(4).trim();
22063
+ const trimmed = text.trim();
22064
+ const fromFence = extractJsonFromMarkdown(trimmed);
22065
+ if (fromFence !== trimmed)
22066
+ return fromFence;
22067
+ if (trimmed.startsWith("json")) {
22068
+ return trimmed.slice(4).trim();
21892
22069
  }
21893
- return result;
22070
+ return trimmed;
21894
22071
  }
21895
22072
  function parseRoutingResponse(output, story, config2) {
21896
- const jsonText = stripCodeFences(output);
22073
+ const jsonText = extractJsonFromMarkdown(output.trim());
21897
22074
  const parsed = JSON.parse(jsonText);
21898
22075
  return validateRoutingDecision(parsed, config2, story);
21899
22076
  }
21900
22077
  function parseBatchResponse(output, stories, config2) {
21901
- let jsonText = output.trim();
21902
- if (jsonText.startsWith("```")) {
21903
- const lines = jsonText.split(`
21904
- `);
21905
- jsonText = lines.slice(1, -1).join(`
21906
- `).trim();
21907
- }
21908
- if (jsonText.startsWith("json")) {
21909
- jsonText = jsonText.slice(4).trim();
21910
- }
21911
- const parsed = JSON.parse(jsonText);
22078
+ const parsed = JSON.parse(extractJsonFromMarkdown(output.trim()));
21912
22079
  if (!Array.isArray(parsed)) {
21913
22080
  throw new Error("Batch LLM response must be a JSON array");
21914
22081
  }
@@ -22342,7 +22509,7 @@ var package_default;
22342
22509
  var init_package = __esm(() => {
22343
22510
  package_default = {
22344
22511
  name: "@nathapp/nax",
22345
- version: "0.57.2",
22512
+ version: "0.57.4",
22346
22513
  description: "AI Coding Agent Orchestrator \u2014 loops until done",
22347
22514
  type: "module",
22348
22515
  bin: {
@@ -22421,8 +22588,8 @@ var init_version = __esm(() => {
22421
22588
  NAX_VERSION = package_default.version;
22422
22589
  NAX_COMMIT = (() => {
22423
22590
  try {
22424
- if (/^[0-9a-f]{6,10}$/.test("2ffb62ec"))
22425
- return "2ffb62ec";
22591
+ if (/^[0-9a-f]{6,10}$/.test("b3088982"))
22592
+ return "b3088982";
22426
22593
  } catch {}
22427
22594
  try {
22428
22595
  const result = Bun.spawnSync(["git", "rev-parse", "--short", "HEAD"], {
@@ -25043,7 +25210,7 @@ var init_version_detection = __esm(() => {
25043
25210
  // src/precheck/checks-agents.ts
25044
25211
  async function checkMultiAgentHealth() {
25045
25212
  try {
25046
- const versions2 = await getAgentVersions();
25213
+ const versions2 = await _checkAgentsDeps.getAgentVersions();
25047
25214
  const installed = versions2.filter((v) => v.installed);
25048
25215
  const notInstalled = versions2.filter((v) => !v.installed);
25049
25216
  const lines = [];
@@ -25080,8 +25247,12 @@ Available but not installed (${notInstalled.length}):`);
25080
25247
  };
25081
25248
  }
25082
25249
  }
25250
+ var _checkAgentsDeps;
25083
25251
  var init_checks_agents = __esm(() => {
25084
25252
  init_version_detection();
25253
+ _checkAgentsDeps = {
25254
+ getAgentVersions
25255
+ };
25085
25256
  });
25086
25257
 
25087
25258
  // src/precheck/checks.ts
@@ -26252,9 +26423,12 @@ ${stderr}` };
26252
26423
  const agent = (ctx.agentGetFn ?? _acceptanceSetupDeps.getAgent)(ctx.config.autoMode.defaultAgent);
26253
26424
  let allRefinedCriteria;
26254
26425
  if (ctx.config.acceptance.refinement) {
26255
- allRefinedCriteria = [];
26256
- for (const story of nonFixStories) {
26257
- const storyRefined = await _acceptanceSetupDeps.refine(story.acceptanceCriteria, {
26426
+ const maxConcurrency = ctx.config.acceptance.refinementConcurrency ?? 3;
26427
+ const results = new Array(nonFixStories.length);
26428
+ const executing = new Set;
26429
+ for (let i = 0;i < nonFixStories.length; i++) {
26430
+ const story = nonFixStories[i];
26431
+ const task = _acceptanceSetupDeps.refine(story.acceptanceCriteria, {
26258
26432
  storyId: story.id,
26259
26433
  featureName: ctx.prd.feature,
26260
26434
  workdir: ctx.workdir,
@@ -26262,9 +26436,18 @@ ${stderr}` };
26262
26436
  config: ctx.config,
26263
26437
  testStrategy: ctx.config.acceptance.testStrategy,
26264
26438
  testFramework: ctx.config.acceptance.testFramework
26439
+ }).then((refined) => {
26440
+ results[i] = refined;
26441
+ }).finally(() => {
26442
+ executing.delete(task);
26265
26443
  });
26266
- allRefinedCriteria = allRefinedCriteria.concat(storyRefined);
26444
+ executing.add(task);
26445
+ if (executing.size >= maxConcurrency) {
26446
+ await Promise.race(executing);
26447
+ }
26267
26448
  }
26449
+ await Promise.all(executing);
26450
+ allRefinedCriteria = results.flat();
26268
26451
  } else {
26269
26452
  allRefinedCriteria = nonFixStories.flatMap((story) => story.acceptanceCriteria.map((c) => ({
26270
26453
  original: c,
@@ -26593,6 +26776,93 @@ var init_event_bus = __esm(() => {
26593
26776
  pipelineEventBus = new PipelineEventBus;
26594
26777
  });
26595
26778
 
26779
+ // src/pipeline/stages/autofix-prompts.ts
26780
+ function formatCheckErrors(checks3) {
26781
+ return checks3.map((c) => `## ${c.check} errors (exit code ${c.exitCode})
26782
+ \`\`\`
26783
+ ${c.output}
26784
+ \`\`\``).join(`
26785
+
26786
+ `);
26787
+ }
26788
+ function buildSemanticRectificationPrompt(semanticChecks, story, scopeConstraint) {
26789
+ const errors3 = formatCheckErrors(semanticChecks);
26790
+ const acList = story.acceptanceCriteria.map((ac, i) => `${i + 1}. ${ac}`).join(`
26791
+ `);
26792
+ return `You are fixing acceptance criteria compliance issues found during semantic review.
26793
+
26794
+ Story: ${story.title} (${story.id})
26795
+
26796
+ ### Acceptance Criteria
26797
+ ${acList}
26798
+
26799
+ ### Semantic Review Findings
26800
+ ${errors3}
26801
+
26802
+ **Important:** The semantic reviewer only analyzed the git diff and may have flagged false positives (e.g., claiming a key or function is "missing" when it already exists in the codebase). Before making any changes:
26803
+ 1. Read the relevant files to verify each finding is a real issue
26804
+ 2. Only fix findings that are actually valid problems
26805
+ 3. Do NOT add keys, functions, or imports that already exist \u2014 check first
26806
+
26807
+ Do NOT change test files or test behavior.
26808
+ Do NOT add new features \u2014 only fix valid issues.
26809
+ Commit your fixes when done.${scopeConstraint}`;
26810
+ }
26811
+ function buildMechanicalRectificationPrompt(mechanicalChecks, story, scopeConstraint) {
26812
+ const errors3 = formatCheckErrors(mechanicalChecks);
26813
+ return `You are fixing lint/typecheck errors from a code review.
26814
+
26815
+ Story: ${story.title} (${story.id})
26816
+
26817
+ The following quality checks failed after implementation:
26818
+
26819
+ ${errors3}
26820
+
26821
+ Fix ALL errors listed above. Do NOT change test files or test behavior.
26822
+ Do NOT add new features \u2014 only fix the quality check errors.
26823
+ Commit your fixes when done.${scopeConstraint}`;
26824
+ }
26825
+ function buildReviewRectificationPrompt(failedChecks, story) {
26826
+ const scopeConstraint = story.workdir ? `
26827
+
26828
+ IMPORTANT: Only modify files within \`${story.workdir}/\`. Do NOT touch files outside this directory.` : "";
26829
+ const semanticChecks = failedChecks.filter((c) => c.check === "semantic");
26830
+ const mechanicalChecks = failedChecks.filter((c) => c.check !== "semantic");
26831
+ if (semanticChecks.length > 0 && mechanicalChecks.length === 0) {
26832
+ return buildSemanticRectificationPrompt(semanticChecks, story, scopeConstraint);
26833
+ }
26834
+ if (mechanicalChecks.length > 0 && semanticChecks.length === 0) {
26835
+ return buildMechanicalRectificationPrompt(mechanicalChecks, story, scopeConstraint);
26836
+ }
26837
+ const mechanicalSection = formatCheckErrors(mechanicalChecks);
26838
+ const semanticSection = formatCheckErrors(semanticChecks);
26839
+ const acList = story.acceptanceCriteria.map((ac, i) => `${i + 1}. ${ac}`).join(`
26840
+ `);
26841
+ return `You are fixing issues from a code review.
26842
+
26843
+ Story: ${story.title} (${story.id})
26844
+
26845
+ ## Lint/Typecheck Errors
26846
+
26847
+ ${mechanicalSection}
26848
+
26849
+ Fix ALL lint/typecheck errors listed above.
26850
+
26851
+ ## Semantic Review Findings (AC Compliance)
26852
+
26853
+ ### Acceptance Criteria
26854
+ ${acList}
26855
+
26856
+ ### Findings
26857
+ ${semanticSection}
26858
+
26859
+ **Important:** The semantic reviewer may have flagged false positives. Before making changes for semantic findings, read the relevant files to verify each finding is a real issue. Do NOT add keys, functions, or imports that already exist.
26860
+
26861
+ Do NOT change test files or test behavior.
26862
+ Do NOT add new features \u2014 only fix the identified issues.
26863
+ Commit your fixes when done.${scopeConstraint}`;
26864
+ }
26865
+
26596
26866
  // src/utils/git.ts
26597
26867
  async function gitWithTimeout(args, workdir) {
26598
26868
  const proc = _gitDeps.spawn(["git", ...args], {
@@ -26853,7 +27123,7 @@ ${stat}
26853
27123
  return `${statPreamble}${truncated}
26854
27124
  ... (truncated at ${DIFF_CAP_BYTES} bytes, showing ${visibleFiles}/${totalFiles} files)`;
26855
27125
  }
26856
- function buildPrompt(story, semanticConfig, diff) {
27126
+ function buildPrompt(story, semanticConfig, diff, stat) {
26857
27127
  const acList = story.acceptanceCriteria.map((ac, i) => `${i + 1}. ${ac}`).join(`
26858
27128
  `);
26859
27129
  const customRulesSection = semanticConfig.rules.length > 0 ? `
@@ -26861,7 +27131,7 @@ function buildPrompt(story, semanticConfig, diff) {
26861
27131
  ${semanticConfig.rules.map((r, i) => `${i + 1}. ${r}`).join(`
26862
27132
  `)}
26863
27133
  ` : "";
26864
- return `You are a semantic code reviewer. Your job is to verify that the implementation satisfies the story's acceptance criteria (ACs). You are NOT a linter or style checker \u2014 lint, typecheck, and convention checks are handled separately.
27134
+ const core2 = `You are a semantic code reviewer with access to the repository files. Your job is to verify that the implementation satisfies the story's acceptance criteria (ACs). You are NOT a linter or style checker \u2014 lint, typecheck, and convention checks are handled separately.
26865
27135
 
26866
27136
  ## Story: ${story.title}
26867
27137
 
@@ -26878,20 +27148,28 @@ ${diff}\`\`\`
26878
27148
 
26879
27149
  ## Instructions
26880
27150
 
26881
- For each acceptance criterion, verify the diff implements it correctly. Flag issues only when:
26882
- 1. An AC is not implemented or partially implemented
27151
+ For each acceptance criterion, verify the diff implements it correctly.
27152
+
27153
+ **Before reporting any finding as "error", you MUST verify it using your tools:**
27154
+ - If you suspect a key, function, import, or variable is missing, READ the relevant file to confirm before flagging.
27155
+ - If you suspect a code path is not wired in, GREP for its usage to confirm.
27156
+ - Do NOT flag something as missing based solely on its absence from the diff \u2014 it may already exist in the codebase. Check the actual file first.
27157
+ - If you cannot verify a claim even after checking, use "unverifiable" severity instead of "error".
27158
+
27159
+ Flag issues only when you have confirmed:
27160
+ 1. An AC is not implemented or partially implemented (verified by reading the actual files)
26883
27161
  2. The implementation contradicts what the AC specifies
26884
27162
  3. New code has dead paths that will never execute (stubs, noops, unreachable branches)
26885
- 4. New code is not wired into callers/exports (written but never used)
27163
+ 4. New code is not wired into callers/exports (verified by grepping for usage)
26886
27164
 
26887
27165
  Do NOT flag: style issues, naming conventions, import ordering, file length, or anything lint handles.
26888
27166
 
26889
- Respond in JSON format:
27167
+ Respond with JSON only \u2014 no explanation text before or after:
26890
27168
  {
26891
27169
  "passed": boolean,
26892
27170
  "findings": [
26893
27171
  {
26894
- "severity": "error" | "warn" | "info",
27172
+ "severity": "error" | "warn" | "info" | "unverifiable",
26895
27173
  "file": "path/to/file",
26896
27174
  "line": 42,
26897
27175
  "issue": "description of the issue",
@@ -26901,25 +27179,36 @@ Respond in JSON format:
26901
27179
  }
26902
27180
 
26903
27181
  If all ACs are correctly implemented, respond with { "passed": true, "findings": [] }.`;
27182
+ return wrapJsonPrompt(core2);
27183
+ }
27184
+ function validateLLMShape(parsed) {
27185
+ if (typeof parsed !== "object" || parsed === null)
27186
+ return null;
27187
+ const obj = parsed;
27188
+ if (typeof obj.passed !== "boolean")
27189
+ return null;
27190
+ if (!Array.isArray(obj.findings))
27191
+ return null;
27192
+ return { passed: obj.passed, findings: obj.findings };
26904
27193
  }
26905
27194
  function parseLLMResponse(raw) {
27195
+ const text = raw.trim();
26906
27196
  try {
26907
- let cleaned = raw.trim();
26908
- const fenceMatch = cleaned.match(/^```(?:json)?\s*\n([\s\S]*?)\n```/);
26909
- if (fenceMatch)
26910
- cleaned = fenceMatch[1].trim();
26911
- const parsed = JSON.parse(cleaned);
26912
- if (typeof parsed !== "object" || parsed === null)
26913
- return null;
26914
- const obj = parsed;
26915
- if (typeof obj.passed !== "boolean")
26916
- return null;
26917
- if (!Array.isArray(obj.findings))
26918
- return null;
26919
- return { passed: obj.passed, findings: obj.findings };
26920
- } catch {
26921
- return null;
27197
+ return validateLLMShape(JSON.parse(text));
27198
+ } catch {}
27199
+ const fromFence = extractJsonFromMarkdown(text);
27200
+ if (fromFence !== text) {
27201
+ try {
27202
+ return validateLLMShape(JSON.parse(stripTrailingCommas(fromFence)));
27203
+ } catch {}
27204
+ }
27205
+ const bareJson = extractJsonObject(text);
27206
+ if (bareJson) {
27207
+ try {
27208
+ return validateLLMShape(JSON.parse(stripTrailingCommas(bareJson)));
27209
+ } catch {}
26922
27210
  }
27211
+ return null;
26923
27212
  }
26924
27213
  function formatFindings(findings) {
26925
27214
  return findings.map((f) => `[${f.severity}] ${f.file}:${f.line} \u2014 ${f.issue}
@@ -26929,10 +27218,15 @@ function formatFindings(findings) {
26929
27218
  function normalizeSeverity(sev) {
26930
27219
  if (sev === "warn")
26931
27220
  return "warning";
27221
+ if (sev === "unverifiable")
27222
+ return "info";
26932
27223
  if (sev === "critical" || sev === "error" || sev === "warning" || sev === "info" || sev === "low")
26933
27224
  return sev;
26934
27225
  return "info";
26935
27226
  }
27227
+ function isBlockingSeverity(sev) {
27228
+ return sev !== "unverifiable";
27229
+ }
26936
27230
  function toReviewFindings(findings) {
26937
27231
  return findings.map((f) => ({
26938
27232
  ruleId: "semantic",
@@ -26975,10 +27269,11 @@ async function runSemanticReview(workdir, storyGitRef, story, semanticConfig, mo
26975
27269
  modelTier: semanticConfig.modelTier,
26976
27270
  configProvided: !!naxConfig
26977
27271
  });
26978
- const rawDiff = await collectDiff(workdir, effectiveRef, semanticConfig.excludePatterns);
26979
- const needsTruncation = rawDiff.length > DIFF_CAP_BYTES;
26980
- const stat = needsTruncation ? await collectDiffStat(workdir, effectiveRef) : undefined;
26981
- const diff = truncateDiff(rawDiff, stat);
27272
+ const [rawDiff, stat] = await Promise.all([
27273
+ collectDiff(workdir, effectiveRef, semanticConfig.excludePatterns),
27274
+ collectDiffStat(workdir, effectiveRef)
27275
+ ]);
27276
+ const diff = truncateDiff(rawDiff, rawDiff.length > DIFF_CAP_BYTES ? stat : undefined);
26982
27277
  if (!diff) {
26983
27278
  return {
26984
27279
  check: "semantic",
@@ -27003,7 +27298,7 @@ async function runSemanticReview(workdir, storyGitRef, story, semanticConfig, mo
27003
27298
  durationMs: Date.now() - startTime
27004
27299
  };
27005
27300
  }
27006
- const prompt = buildPrompt(story, semanticConfig, diff);
27301
+ const prompt = buildPrompt(story, semanticConfig, diff, stat || undefined);
27007
27302
  const reviewDebateEnabled = naxConfig?.debate?.enabled && naxConfig?.debate?.stages?.review?.enabled;
27008
27303
  if (reviewDebateEnabled) {
27009
27304
  const reviewStageConfig = naxConfig?.debate?.stages.review;
@@ -27042,10 +27337,11 @@ async function runSemanticReview(workdir, storyGitRef, story, semanticConfig, mo
27042
27337
  deduped.push(f);
27043
27338
  }
27044
27339
  }
27340
+ const debateBlocking = deduped.filter((f) => isBlockingSeverity(f.severity));
27045
27341
  const durationMs2 = Date.now() - startTime;
27046
27342
  if (!majorityPassed) {
27047
- if (deduped.length > 0) {
27048
- logger?.warn("review", `Semantic review failed (debate): ${deduped.length} findings`, {
27343
+ if (debateBlocking.length > 0) {
27344
+ logger?.warn("review", `Semantic review failed (debate): ${debateBlocking.length} findings`, {
27049
27345
  storyId: story.id,
27050
27346
  durationMs: durationMs2
27051
27347
  });
@@ -27056,17 +27352,21 @@ async function runSemanticReview(workdir, storyGitRef, story, semanticConfig, mo
27056
27352
  exitCode: 1,
27057
27353
  output: `Semantic review failed:
27058
27354
 
27059
- ${formatFindings(deduped)}`,
27355
+ ${formatFindings(debateBlocking)}`,
27060
27356
  durationMs: durationMs2,
27061
- findings: toReviewFindings(deduped)
27357
+ findings: toReviewFindings(debateBlocking)
27062
27358
  };
27063
27359
  }
27360
+ logger?.info("review", "Semantic review passed (debate, all findings non-blocking)", {
27361
+ storyId: story.id,
27362
+ durationMs: durationMs2
27363
+ });
27064
27364
  return {
27065
27365
  check: "semantic",
27066
- success: false,
27366
+ success: true,
27067
27367
  command: "",
27068
- exitCode: 1,
27069
- output: "Semantic review failed (debate, no findings)",
27368
+ exitCode: 0,
27369
+ output: "Semantic review passed (debate, all findings were unverifiable or informational)",
27070
27370
  durationMs: durationMs2
27071
27371
  };
27072
27372
  }
@@ -27127,15 +27427,23 @@ ${formatFindings(deduped)}`,
27127
27427
  durationMs: Date.now() - startTime
27128
27428
  };
27129
27429
  }
27130
- if (!parsed.passed && parsed.findings.length > 0) {
27430
+ const blockingFindings = parsed.findings.filter((f) => isBlockingSeverity(f.severity));
27431
+ const nonBlockingFindings = parsed.findings.filter((f) => !isBlockingSeverity(f.severity));
27432
+ if (nonBlockingFindings.length > 0) {
27433
+ logger?.debug("review", `Semantic review: ${nonBlockingFindings.length} non-blocking findings (unverifiable/info)`, {
27434
+ storyId: story.id,
27435
+ findings: nonBlockingFindings.map((f) => ({ severity: f.severity, file: f.file, issue: f.issue }))
27436
+ });
27437
+ }
27438
+ if (!parsed.passed && blockingFindings.length > 0) {
27131
27439
  const durationMs2 = Date.now() - startTime;
27132
- logger?.warn("review", `Semantic review failed: ${parsed.findings.length} findings`, {
27440
+ logger?.warn("review", `Semantic review failed: ${blockingFindings.length} findings`, {
27133
27441
  storyId: story.id,
27134
27442
  durationMs: durationMs2
27135
27443
  });
27136
27444
  logger?.debug("review", "Semantic review findings", {
27137
27445
  storyId: story.id,
27138
- findings: parsed.findings.map((f) => ({
27446
+ findings: blockingFindings.map((f) => ({
27139
27447
  severity: f.severity,
27140
27448
  file: f.file,
27141
27449
  line: f.line,
@@ -27145,7 +27453,7 @@ ${formatFindings(deduped)}`,
27145
27453
  });
27146
27454
  const output = `Semantic review failed:
27147
27455
 
27148
- ${formatFindings(parsed.findings)}`;
27456
+ ${formatFindings(blockingFindings)}`;
27149
27457
  return {
27150
27458
  check: "semantic",
27151
27459
  success: false,
@@ -27153,7 +27461,19 @@ ${formatFindings(parsed.findings)}`;
27153
27461
  exitCode: 1,
27154
27462
  output,
27155
27463
  durationMs: durationMs2,
27156
- findings: toReviewFindings(parsed.findings)
27464
+ findings: toReviewFindings(blockingFindings)
27465
+ };
27466
+ }
27467
+ if (!parsed.passed && blockingFindings.length === 0) {
27468
+ const durationMs2 = Date.now() - startTime;
27469
+ logger?.info("review", "Semantic review passed (all findings non-blocking)", { storyId: story.id, durationMs: durationMs2 });
27470
+ return {
27471
+ check: "semantic",
27472
+ success: true,
27473
+ command: "",
27474
+ exitCode: 0,
27475
+ output: "Semantic review passed (all findings were unverifiable or informational)",
27476
+ durationMs: durationMs2
27157
27477
  };
27158
27478
  }
27159
27479
  const durationMs = Date.now() - startTime;
@@ -27560,28 +27880,6 @@ async function recheckReview(ctx) {
27560
27880
  function collectFailedChecks(ctx) {
27561
27881
  return (ctx.reviewResult?.checks ?? []).filter((c) => !c.success);
27562
27882
  }
27563
- function buildReviewRectificationPrompt(failedChecks, story) {
27564
- const errors3 = failedChecks.map((c) => `## ${c.check} errors (exit code ${c.exitCode})
27565
- \`\`\`
27566
- ${c.output}
27567
- \`\`\``).join(`
27568
-
27569
- `);
27570
- const scopeConstraint = story.workdir ? `
27571
-
27572
- IMPORTANT: Only modify files within \`${story.workdir}/\`. Do NOT touch files outside this directory.` : "";
27573
- return `You are fixing lint/typecheck errors from a code review.
27574
-
27575
- Story: ${story.title} (${story.id})
27576
-
27577
- The following quality checks failed after implementation:
27578
-
27579
- ${errors3}
27580
-
27581
- Fix ALL errors listed above. Do NOT change test files or test behavior.
27582
- Do NOT add new features \u2014 only fix the quality check errors.
27583
- Commit your fixes when done.${scopeConstraint}`;
27584
- }
27585
27883
  function buildAutofixEscalationPreamble(attempt, maxAttempts, rethinkAtAttempt, urgencyAtAttempt) {
27586
27884
  return buildProgressivePromptPreamble({
27587
27885
  attempt,
@@ -29164,35 +29462,12 @@ async function isGreenfieldStory(story, workdir, testPattern = "**/*.{test,spec}
29164
29462
  }
29165
29463
  var init_greenfield = () => {};
29166
29464
  // src/verification/executor.ts
29167
- async function drainWithDeadline(proc, deadlineMs) {
29168
- const EMPTY = Symbol("timeout");
29169
- const race = (p) => {
29170
- let timerId;
29171
- const timeoutPromise = new Promise((r) => {
29172
- timerId = setTimeout(() => r(EMPTY), deadlineMs);
29173
- });
29174
- return Promise.race([p, timeoutPromise]).finally(() => clearTimeout(timerId));
29175
- };
29176
- let out = "";
29177
- try {
29178
- const stdout = race(new Response(proc.stdout).text());
29179
- const stderr = race(new Response(proc.stderr).text());
29180
- const [o, e] = await Promise.all([stdout, stderr]);
29181
- if (o !== EMPTY)
29182
- out += o;
29183
- if (e !== EMPTY)
29184
- out += (out ? `
29185
- ` : "") + e;
29186
- } catch (error48) {
29187
- const isExpectedStreamError = error48 instanceof TypeError || error48 instanceof Error && /abort|cancel|close|destroy|locked/i.test(error48.message);
29188
- if (!isExpectedStreamError) {
29189
- const { getSafeLogger: getSafeLogger4 } = await Promise.resolve().then(() => (init_logger2(), exports_logger));
29190
- getSafeLogger4()?.debug("executor", "Unexpected error draining process output", {
29191
- error: errorMessage(error48)
29192
- });
29193
- }
29194
- }
29195
- return out;
29465
+ function raceWithDeadline(p, deadlineMs) {
29466
+ const timer = { id: undefined };
29467
+ const timeoutP = new Promise((r) => {
29468
+ timer.id = setTimeout(() => r(DRAIN_TIMEOUT), deadlineMs);
29469
+ });
29470
+ return Promise.race([p, timeoutP]).finally(() => clearTimeout(timer.id));
29196
29471
  }
29197
29472
  function normalizeEnvironment(env2, stripVars) {
29198
29473
  const normalized = { ...env2 };
@@ -29212,6 +29487,8 @@ async function executeWithTimeout(command, timeoutSeconds, env2, options) {
29212
29487
  env: env2 || normalizeEnvironment(process.env),
29213
29488
  cwd: options?.cwd
29214
29489
  });
29490
+ const stdoutPromise = new Response(proc.stdout).text().catch(() => "");
29491
+ const stderrPromise = new Response(proc.stderr).text().catch(() => "");
29215
29492
  const timeoutMs = timeoutSeconds * 1000;
29216
29493
  let timedOut = false;
29217
29494
  const timer = { id: undefined };
@@ -29229,20 +29506,25 @@ async function executeWithTimeout(command, timeoutSeconds, env2, options) {
29229
29506
  killProcessGroup(pid, "SIGTERM");
29230
29507
  await Bun.sleep(gracePeriodMs);
29231
29508
  killProcessGroup(pid, "SIGKILL");
29232
- const partialOutput = await drainWithDeadline(proc, drainTimeoutMs);
29509
+ const [out, err] = await Promise.all([
29510
+ raceWithDeadline(stdoutPromise, drainTimeoutMs),
29511
+ raceWithDeadline(stderrPromise, drainTimeoutMs)
29512
+ ]);
29513
+ const parts = [out !== DRAIN_TIMEOUT ? out : "", err !== DRAIN_TIMEOUT ? err : ""].filter(Boolean);
29514
+ const partialOutput = parts.join(`
29515
+ `) || undefined;
29233
29516
  return {
29234
29517
  success: false,
29235
29518
  timeout: true,
29236
29519
  killed: true,
29237
29520
  childProcessesKilled: true,
29238
- output: partialOutput || undefined,
29521
+ output: partialOutput,
29239
29522
  error: `EXECUTION_TIMEOUT: Verification process exceeded ${timeoutSeconds}s. Process group (PID ${pid}) killed.`,
29240
29523
  countsTowardEscalation: false
29241
29524
  };
29242
29525
  }
29243
- const exitCode = raceResult;
29244
- const stdout = await new Response(proc.stdout).text();
29245
- const stderr = await new Response(proc.stderr).text();
29526
+ const exitCode = typeof raceResult === "number" ? raceResult : 0;
29527
+ const [stdout, stderr] = await Promise.all([stdoutPromise, stderrPromise]);
29246
29528
  const output = `${stdout}
29247
29529
  ${stderr}`;
29248
29530
  return {
@@ -29288,10 +29570,11 @@ function buildTestCommand(baseCommand, options) {
29288
29570
  }
29289
29571
  return command;
29290
29572
  }
29291
- var _executorDeps, DEFAULT_STRIP_ENV_VARS;
29573
+ var _executorDeps, DRAIN_TIMEOUT, DEFAULT_STRIP_ENV_VARS;
29292
29574
  var init_executor = __esm(() => {
29293
29575
  init_bun_deps();
29294
29576
  _executorDeps = { spawn };
29577
+ DRAIN_TIMEOUT = Symbol("drain-timeout");
29295
29578
  DEFAULT_STRIP_ENV_VARS = ["CLAUDECODE", "REPL_ID", "AGENT"];
29296
29579
  });
29297
29580
 
@@ -34435,12 +34718,12 @@ function createSignalHandler(ctx) {
34435
34718
  hardDeadline.unref();
34436
34719
  const logger = getSafeLogger();
34437
34720
  logger?.error("crash-recovery", `Received ${signal}, shutting down...`, { signal });
34438
- if (ctx.pidRegistry) {
34439
- await ctx.pidRegistry.killAll();
34440
- }
34441
34721
  if (ctx.onShutdown) {
34442
34722
  await ctx.onShutdown().catch(() => {});
34443
34723
  }
34724
+ if (ctx.pidRegistry) {
34725
+ await ctx.pidRegistry.killAll();
34726
+ }
34444
34727
  ctx.emitError?.(signal.toLowerCase());
34445
34728
  await writeFatalLog(ctx.jsonlFilePath, signal);
34446
34729
  await writeRunComplete(ctx, signal.toLowerCase());
@@ -34460,12 +34743,12 @@ ${error48.stack ?? ""}
34460
34743
  error: error48.message,
34461
34744
  stack: error48.stack
34462
34745
  });
34463
- if (ctx.pidRegistry) {
34464
- await ctx.pidRegistry.killAll();
34465
- }
34466
34746
  if (ctx.onShutdown) {
34467
34747
  await ctx.onShutdown().catch(() => {});
34468
34748
  }
34749
+ if (ctx.pidRegistry) {
34750
+ await ctx.pidRegistry.killAll();
34751
+ }
34469
34752
  ctx.emitError?.("uncaughtException");
34470
34753
  await writeFatalLog(ctx.jsonlFilePath, "uncaughtException", error48);
34471
34754
  await updateStatusToCrashed(ctx.statusWriter, ctx.getTotalCost(), ctx.getIterations(), "uncaughtException", ctx.featureDir);
@@ -34484,12 +34767,12 @@ ${error48.stack ?? ""}
34484
34767
  error: error48.message,
34485
34768
  stack: error48.stack
34486
34769
  });
34487
- if (ctx.pidRegistry) {
34488
- await ctx.pidRegistry.killAll();
34489
- }
34490
34770
  if (ctx.onShutdown) {
34491
34771
  await ctx.onShutdown().catch(() => {});
34492
34772
  }
34773
+ if (ctx.pidRegistry) {
34774
+ await ctx.pidRegistry.killAll();
34775
+ }
34493
34776
  ctx.emitError?.("unhandledRejection");
34494
34777
  await writeFatalLog(ctx.jsonlFilePath, "unhandledRejection", error48);
34495
34778
  await updateStatusToCrashed(ctx.statusWriter, ctx.getTotalCost(), ctx.getIterations(), "unhandledRejection", ctx.featureDir);
@@ -38446,7 +38729,7 @@ async function setupRun(options) {
38446
38729
  },
38447
38730
  onShutdown: async () => {
38448
38731
  const { sweepFeatureSessions: sweepFeatureSessions2 } = await Promise.resolve().then(() => (init_adapter(), exports_adapter));
38449
- await sweepFeatureSessions2(workdir, feature).catch(() => {});
38732
+ await sweepFeatureSessions2(workdir, feature, pidRegistry).catch(() => {});
38450
38733
  }
38451
38734
  });
38452
38735
  let prd = await loadPRD(prdPath);
@@ -38469,7 +38752,7 @@ async function setupRun(options) {
38469
38752
  logger?.warn("precheck", "Precheck validations skipped (--skip-precheck)");
38470
38753
  }
38471
38754
  const { sweepStaleFeatureSessions: sweepStaleFeatureSessions2 } = await Promise.resolve().then(() => (init_adapter(), exports_adapter));
38472
- await sweepStaleFeatureSessions2(workdir, feature).catch(() => {});
38755
+ await sweepStaleFeatureSessions2(workdir, feature, undefined, pidRegistry).catch(() => {});
38473
38756
  const lockAcquired = await acquireLock(workdir);
38474
38757
  if (!lockAcquired) {
38475
38758
  logger?.error("execution", "Another nax process is already running in this directory");
@@ -69988,6 +70271,7 @@ async function generateAcceptanceTestsForFeature(specContent, featureName, featu
69988
70271
  }
69989
70272
  // src/cli/plan.ts
69990
70273
  init_registry();
70274
+ init_decompose();
69991
70275
  import { existsSync as existsSync15 } from "fs";
69992
70276
  import { join as join13 } from "path";
69993
70277
  import { createInterface as createInterface2 } from "readline";
@@ -70534,20 +70818,52 @@ init_bridge_builder();
70534
70818
  init_init();
70535
70819
  init_logger2();
70536
70820
 
70821
+ // src/prd/decompose-mapper.ts
70822
+ init_errors();
70823
+ function mapDecomposedStoriesToUserStories(stories, parentStoryId) {
70824
+ return stories.map((story, entryIndex) => {
70825
+ if (!story.id) {
70826
+ throw new NaxError(`Entry at index ${entryIndex} is missing required field: id`, "DECOMPOSE_VALIDATION_FAILED", {
70827
+ stage: "decompose-mapper",
70828
+ entryIndex,
70829
+ parentStoryId
70830
+ });
70831
+ }
70832
+ if (!story.contextFiles || story.contextFiles.length === 0) {
70833
+ throw new NaxError(`Entry ${entryIndex} (${story.id}) has empty contextFiles`, "DECOMPOSE_VALIDATION_FAILED", {
70834
+ stage: "decompose-mapper",
70835
+ entryIndex,
70836
+ storyId: story.id,
70837
+ parentStoryId
70838
+ });
70839
+ }
70840
+ return {
70841
+ id: story.id,
70842
+ title: story.title,
70843
+ description: story.description,
70844
+ acceptanceCriteria: story.acceptanceCriteria,
70845
+ tags: story.tags,
70846
+ dependencies: story.dependencies,
70847
+ contextFiles: story.contextFiles,
70848
+ status: "pending",
70849
+ passes: false,
70850
+ escalations: [],
70851
+ attempts: 0,
70852
+ parentStoryId,
70853
+ routing: {
70854
+ complexity: story.complexity,
70855
+ testStrategy: story.testStrategy ?? "test-after",
70856
+ reasoning: story.reasoning,
70857
+ modelTier: "balanced"
70858
+ }
70859
+ };
70860
+ });
70861
+ }
70862
+
70537
70863
  // src/prd/schema.ts
70538
70864
  init_test_strategy();
70539
70865
  var VALID_COMPLEXITY = ["simple", "medium", "complex", "expert"];
70540
70866
  var STORY_ID_NO_SEPARATOR = /^([A-Za-z]+)(\d+)$/;
70541
- function extractJsonFromMarkdown(text) {
70542
- const match = text.match(/```(?:json)?\s*\n([\s\S]*?)\n?\s*```/);
70543
- if (match) {
70544
- return match[1] ?? text;
70545
- }
70546
- return text;
70547
- }
70548
- function stripTrailingCommas(text) {
70549
- return text.replace(/,\s*([}\]])/g, "$1");
70550
- }
70551
70867
  function normalizeStoryId(id) {
70552
70868
  const match = id.match(STORY_ID_NO_SEPARATOR);
70553
70869
  if (match) {
@@ -70825,26 +71141,45 @@ async function planCommand(workdir, config2, options) {
70825
71141
  timeoutSeconds
70826
71142
  });
70827
71143
  const pidRegistry = new PidRegistry(workdir);
71144
+ let planError = null;
70828
71145
  try {
70829
- await adapter.plan({
70830
- prompt,
70831
- workdir,
70832
- interactive: false,
70833
- timeoutSeconds,
70834
- config: config2,
70835
- modelTier: config2?.plan?.model ?? "balanced",
70836
- dangerouslySkipPermissions: resolvePermissions(config2, "plan").skipPermissions,
70837
- maxInteractionTurns: config2?.agent?.maxInteractionTurns,
70838
- featureName: options.feature,
70839
- pidRegistry,
70840
- sessionRole: "plan"
70841
- });
71146
+ try {
71147
+ await adapter.plan({
71148
+ prompt,
71149
+ workdir,
71150
+ interactive: false,
71151
+ timeoutSeconds,
71152
+ config: config2,
71153
+ modelTier: config2?.plan?.model ?? "balanced",
71154
+ dangerouslySkipPermissions: resolvePermissions(config2, "plan").skipPermissions,
71155
+ maxInteractionTurns: config2?.agent?.maxInteractionTurns,
71156
+ featureName: options.feature,
71157
+ pidRegistry,
71158
+ sessionRole: "plan"
71159
+ });
71160
+ } catch (err) {
71161
+ planError = err instanceof Error ? err : new Error(String(err));
71162
+ logger?.warn("plan", "ACP auto planning did not complete cleanly; checking for written PRD", {
71163
+ error: planError.message,
71164
+ outputPath
71165
+ });
71166
+ }
70842
71167
  } finally {
70843
71168
  await pidRegistry.killAll().catch(() => {});
70844
71169
  }
70845
71170
  if (!_planDeps.existsSync(outputPath)) {
71171
+ if (planError) {
71172
+ throw new Error(`[plan] ACP planning failed and no PRD was written: ${planError.message}`, {
71173
+ cause: planError
71174
+ });
71175
+ }
70846
71176
  throw new Error(`[plan] ACP agent did not write PRD to ${outputPath}. Check agent logs for errors.`);
70847
71177
  }
71178
+ if (planError) {
71179
+ logger?.warn("plan", "Proceeding with PRD written by ACP despite incomplete terminal response", {
71180
+ outputPath
71181
+ });
71182
+ }
70848
71183
  rawResponse = await _planDeps.readFile(outputPath);
70849
71184
  } else {
70850
71185
  const timeoutMs = (config2?.plan?.timeoutSeconds ?? DEFAULT_TIMEOUT_SECONDS2) * 1000;
@@ -70893,20 +71228,30 @@ async function planCommand(workdir, config2, options) {
70893
71228
  timeoutSeconds
70894
71229
  });
70895
71230
  const planStartTime = Date.now();
71231
+ let planError = null;
70896
71232
  try {
70897
- await adapter.plan({
70898
- prompt,
70899
- workdir,
70900
- interactive: true,
70901
- timeoutSeconds,
70902
- interactionBridge,
70903
- config: config2,
70904
- modelTier: resolvedModel,
70905
- dangerouslySkipPermissions: resolvedPerm.skipPermissions,
70906
- maxInteractionTurns: config2?.agent?.maxInteractionTurns,
70907
- featureName: options.feature,
70908
- pidRegistry
70909
- });
71233
+ try {
71234
+ await adapter.plan({
71235
+ prompt,
71236
+ workdir,
71237
+ interactive: true,
71238
+ timeoutSeconds,
71239
+ interactionBridge,
71240
+ config: config2,
71241
+ modelTier: resolvedModel,
71242
+ dangerouslySkipPermissions: resolvedPerm.skipPermissions,
71243
+ maxInteractionTurns: config2?.agent?.maxInteractionTurns,
71244
+ featureName: options.feature,
71245
+ pidRegistry,
71246
+ sessionRole: "plan"
71247
+ });
71248
+ } catch (err) {
71249
+ planError = err instanceof Error ? err : new Error(String(err));
71250
+ logger?.warn("plan", "Interactive planning did not complete cleanly; checking for written PRD", {
71251
+ error: planError.message,
71252
+ outputPath
71253
+ });
71254
+ }
70910
71255
  } finally {
70911
71256
  await pidRegistry.killAll().catch(() => {});
70912
71257
  if (interactionChain)
@@ -70914,8 +71259,16 @@ async function planCommand(workdir, config2, options) {
70914
71259
  logger?.info("plan", "Interactive session ended", { durationMs: Date.now() - planStartTime });
70915
71260
  }
70916
71261
  if (!_planDeps.existsSync(outputPath)) {
71262
+ if (planError) {
71263
+ throw new Error(`[plan] Planning failed and no PRD was written: ${planError.message}`, { cause: planError });
71264
+ }
70917
71265
  throw new Error(`[plan] Agent did not write PRD to ${outputPath}. Check agent logs for errors.`);
70918
71266
  }
71267
+ if (planError) {
71268
+ logger?.warn("plan", "Proceeding with PRD written by agent despite incomplete terminal response", {
71269
+ outputPath
71270
+ });
71271
+ }
70919
71272
  return _planDeps.readFile(outputPath);
70920
71273
  }
70921
71274
  const finalPrd = validatePlanOutput(rawResponse, options.feature, branchName);
@@ -71142,58 +71495,6 @@ Generate a JSON object with this exact structure (no markdown, no explanation \u
71142
71495
  ${outputFilePath ? `Write the PRD JSON directly to this file path: ${outputFilePath}
71143
71496
  Do NOT output the JSON to the conversation. Write the file, then reply with a brief confirmation.` : "Output ONLY the JSON object. Do not wrap in markdown code blocks."}`;
71144
71497
  }
71145
- function buildDecomposePrompt2(targetStory, siblings, codebaseContext) {
71146
- const siblingsSummary = siblings.length > 0 ? `
71147
- ## Sibling Stories
71148
-
71149
- ${siblings.map((s) => `- ${s.id}: ${s.title}`).join(`
71150
- `)}
71151
- ` : "";
71152
- return `You are a senior software architect decomposing a complex user story into smaller, implementable sub-stories.
71153
-
71154
- ## Target Story
71155
-
71156
- ${JSON.stringify(targetStory, null, 2)}${siblingsSummary}
71157
- ## Codebase Context
71158
-
71159
- ${codebaseContext}
71160
-
71161
- ${COMPLEXITY_GUIDE}
71162
-
71163
- ${TEST_STRATEGY_GUIDE}
71164
-
71165
- ${GROUPING_RULES}
71166
-
71167
- ${getAcQualityRules()}
71168
-
71169
- ## Output
71170
-
71171
- Return JSON with this exact structure (no markdown, no explanation \u2014 JSON only):
71172
-
71173
- {
71174
- "subStories": [
71175
- {
71176
- "id": "string \u2014 e.g. ${targetStory.id}-A",
71177
- "title": "string",
71178
- "description": "string",
71179
- "acceptanceCriteria": ["string \u2014 behavioral, testable criteria"],
71180
- "contextFiles": ["string \u2014 required, non-empty list of key source files"],
71181
- "tags": ["string"],
71182
- "dependencies": ["string"],
71183
- "status": "pending",
71184
- "passes": false,
71185
- "routing": {
71186
- "complexity": "simple | medium | complex | expert",
71187
- "testStrategy": "no-test | tdd-simple | three-session-tdd-lite | three-session-tdd | test-after",
71188
- "modelTier": "fast | balanced | powerful",
71189
- "reasoning": "string"
71190
- },
71191
- "escalations": [],
71192
- "attempts": 0
71193
- }
71194
- ]
71195
- }`;
71196
- }
71197
71498
  async function planDecomposeCommand(workdir, config2, options) {
71198
71499
  const prdPath = join13(workdir, ".nax", "features", options.feature, "prd.json");
71199
71500
  if (!_planDeps.existsSync(prdPath)) {
@@ -71220,95 +71521,83 @@ async function planDecomposeCommand(workdir, config2, options) {
71220
71521
  const scan = await _planDeps.scanCodebase(workdir);
71221
71522
  const codebaseContext = buildCodebaseContext2(scan);
71222
71523
  const siblings = prd.userStories.filter((s) => s.id !== options.storyId);
71223
- const prompt = buildDecomposePrompt2(targetStory, siblings, codebaseContext);
71224
71524
  const agentName = config2?.autoMode?.defaultAgent ?? "claude";
71225
71525
  const adapter = _planDeps.getAgent(agentName, config2);
71226
71526
  if (!adapter)
71227
71527
  throw new Error(`[decompose] No agent adapter found for '${agentName}'`);
71228
- let decomposeModel;
71229
- try {
71230
- const planTier = config2?.plan?.model ?? "balanced";
71231
- const { resolveModelForAgent: resolveModelForAgent2 } = await Promise.resolve().then(() => (init_schema(), exports_schema));
71232
- if (config2?.models) {
71233
- const defaultAgent = config2.autoMode?.defaultAgent ?? "claude";
71234
- decomposeModel = resolveModelForAgent2(config2.models, defaultAgent, planTier, defaultAgent).model;
71235
- }
71236
- } catch {}
71237
- const stages = config2?.debate?.stages;
71238
- const debateEnabled = config2?.debate?.enabled && stages?.decompose?.enabled;
71239
- let rawResponse;
71240
71528
  const timeoutSeconds = config2?.plan?.timeoutSeconds ?? DEFAULT_TIMEOUT_SECONDS2;
71241
- const timeoutMs = timeoutSeconds * 1000;
71242
- if (debateEnabled) {
71243
- const stageConfig = stages?.decompose;
71244
- const debateSession = _planDeps.createDebateSession({
71245
- storyId: options.storyId,
71246
- stage: "decompose",
71247
- stageConfig,
71248
- config: config2,
71249
- workdir,
71250
- featureName: options.feature,
71251
- timeoutSeconds
71252
- });
71253
- const debateResult = await debateSession.run(prompt);
71254
- if (debateResult.outcome !== "failed" && debateResult.output) {
71255
- rawResponse = debateResult.output;
71256
- } else {
71257
- const completeResult = await adapter.complete(prompt, {
71258
- model: decomposeModel,
71259
- jsonMode: true,
71529
+ const maxAcCount = config2?.precheck?.storySizeGate?.maxAcCount ?? Number.POSITIVE_INFINITY;
71530
+ const maxReplanAttempts = config2?.precheck?.storySizeGate?.maxReplanAttempts ?? 3;
71531
+ if (typeof adapter.decompose !== "function") {
71532
+ throw new NaxError(`Agent "${agentName}" does not support decompose() required by plan --decompose`, "DECOMPOSE_NOT_SUPPORTED", { stage: "decompose", agent: agentName, storyId: options.storyId });
71533
+ }
71534
+ const debateStages = config2?.debate?.stages;
71535
+ const debateDecompEnabled = config2?.debate?.enabled && debateStages?.decompose?.enabled;
71536
+ let decompStories;
71537
+ let repairHint = "";
71538
+ for (let attempt = 0;attempt < maxReplanAttempts; attempt++) {
71539
+ if (attempt === 0 && debateDecompEnabled) {
71540
+ const decomposeStageConfig = debateStages.decompose;
71541
+ const prompt = buildDecomposePrompt({
71542
+ specContent: "",
71543
+ codebaseContext,
71260
71544
  workdir,
71261
- sessionRole: "decompose",
71545
+ targetStory,
71546
+ siblings,
71262
71547
  featureName: options.feature,
71263
71548
  storyId: options.storyId,
71264
- timeoutMs
71549
+ config: config2
71265
71550
  });
71266
- rawResponse = typeof completeResult === "string" ? completeResult : completeResult.output;
71267
- }
71268
- } else {
71269
- const completeResult = await adapter.complete(prompt, {
71270
- model: decomposeModel,
71271
- jsonMode: true,
71272
- workdir,
71273
- sessionRole: "decompose",
71274
- featureName: options.feature,
71275
- storyId: options.storyId,
71276
- timeoutMs
71277
- });
71278
- rawResponse = typeof completeResult === "string" ? completeResult : completeResult.output;
71279
- }
71280
- const jsonMatch = rawResponse.match(/```(?:json)?\s*([\s\S]*?)\s*```/);
71281
- const cleanedResponse = jsonMatch ? jsonMatch[1] : rawResponse;
71282
- let parsed;
71283
- try {
71284
- parsed = JSON.parse(cleanedResponse.trim());
71285
- } catch (err) {
71286
- throw new NaxError(`Failed to parse decompose response as JSON: ${err.message}
71287
-
71288
- Response (first 500 chars):
71289
- ${rawResponse.slice(0, 500)}`, "DECOMPOSE_PARSE_FAILED", { stage: "decompose", storyId: options.storyId });
71290
- }
71291
- const subStories = parsed.subStories;
71292
- const maxAcCount = config2?.precheck?.storySizeGate?.maxAcCount ?? Number.POSITIVE_INFINITY;
71293
- for (const sub of subStories) {
71294
- if (!sub.contextFiles || sub.contextFiles.length === 0) {
71295
- throw new NaxError(`Sub-story "${sub.id}" has empty contextFiles`, "DECOMPOSE_VALIDATION_FAILED", {
71551
+ const debateSession = _planDeps.createDebateSession({
71552
+ storyId: options.storyId,
71296
71553
  stage: "decompose",
71297
- storyId: sub.id
71554
+ stageConfig: decomposeStageConfig,
71555
+ config: config2,
71556
+ workdir,
71557
+ featureName: options.feature,
71558
+ timeoutSeconds
71298
71559
  });
71560
+ const debateResult = await debateSession.run(prompt);
71561
+ if (debateResult.outcome !== "failed" && debateResult.output) {
71562
+ decompStories = parseDecomposeOutput(debateResult.output);
71563
+ }
71299
71564
  }
71300
- if (!sub.routing?.complexity || !sub.routing?.testStrategy || !sub.routing?.modelTier) {
71301
- throw new NaxError(`Sub-story "${sub.id}" is missing required routing fields`, "DECOMPOSE_VALIDATION_FAILED", {
71302
- stage: "decompose",
71303
- storyId: sub.id
71565
+ if (!decompStories) {
71566
+ const effectiveContext = repairHint ? `${codebaseContext}
71567
+
71568
+ ${repairHint}` : codebaseContext;
71569
+ const result = await adapter.decompose({
71570
+ specContent: "",
71571
+ codebaseContext: effectiveContext,
71572
+ workdir,
71573
+ targetStory,
71574
+ siblings,
71575
+ featureName: options.feature,
71576
+ storyId: options.storyId,
71577
+ config: config2
71304
71578
  });
71579
+ decompStories = result.stories;
71580
+ }
71581
+ for (const sub of decompStories) {
71582
+ if (!sub.complexity || !sub.testStrategy) {
71583
+ throw new NaxError(`Sub-story "${sub.id}" is missing required routing fields`, "DECOMPOSE_VALIDATION_FAILED", {
71584
+ stage: "decompose",
71585
+ storyId: sub.id
71586
+ });
71587
+ }
71305
71588
  }
71306
- if (sub.acceptanceCriteria && sub.acceptanceCriteria.length > maxAcCount) {
71307
- throw new NaxError(`Sub-story "${sub.id}" has ${sub.acceptanceCriteria.length} ACs, exceeds maxAcCount of ${maxAcCount}`, "DECOMPOSE_VALIDATION_FAILED", { stage: "decompose", storyId: sub.id });
71589
+ const violations = decompStories.filter((sub) => sub.acceptanceCriteria && sub.acceptanceCriteria.length > maxAcCount);
71590
+ if (violations.length === 0)
71591
+ break;
71592
+ const violationSummary = violations.map((v) => `"${v.id}" (${v.acceptanceCriteria.length} ACs, max ${maxAcCount})`).join(", ");
71593
+ if (attempt + 1 >= maxReplanAttempts) {
71594
+ throw new NaxError(`Decompose AC repair failed after ${maxReplanAttempts} attempts. Oversized sub-stories: ${violationSummary}`, "DECOMPOSE_VALIDATION_FAILED", { stage: "decompose", storyId: options.storyId });
71308
71595
  }
71596
+ repairHint = `REPAIR REQUIRED (attempt ${attempt + 1}/${maxReplanAttempts}): The following sub-stories exceeded maxAcCount of ${maxAcCount}: ${violationSummary}. Split each offending story further so every sub-story has at most ${maxAcCount} acceptance criteria.`;
71597
+ decompStories = undefined;
71309
71598
  }
71599
+ const subStoriesWithParent = mapDecomposedStoriesToUserStories(decompStories, options.storyId);
71310
71600
  const updatedStories = prd.userStories.map((s) => s.id === options.storyId ? { ...s, status: "decomposed" } : s);
71311
- const subStoriesWithParent = subStories.map((s) => ({ ...s, parentStoryId: options.storyId }));
71312
71601
  const originalIndex = updatedStories.findIndex((s) => s.id === options.storyId);
71313
71602
  const finalStories = [
71314
71603
  ...updatedStories.slice(0, originalIndex + 1),
@@ -72987,6 +73276,8 @@ var FIELD_DESCRIPTIONS = {
72987
73276
  "acceptance.generateTests": "Generate acceptance tests during analyze",
72988
73277
  "acceptance.testPath": "Path to acceptance test file (relative to feature dir)",
72989
73278
  "acceptance.command": "Override command to run acceptance tests. Use {{FILE}} as placeholder for the test file path (default: 'bun test {{FILE}} --timeout=60000')",
73279
+ "acceptance.model": "Model tier for acceptance generation/refinement LLM calls (fast | balanced | powerful). Default: fast.",
73280
+ "acceptance.refinement": "Enable acceptance criteria refinement step before execution (default: true). Disable to skip refinement and use generated criteria as-is.",
72990
73281
  "acceptance.timeoutMs": "Timeout for acceptance test generation in milliseconds (default: 1800000 = 30 min)",
72991
73282
  context: "Context injection configuration",
72992
73283
  "context.fileInjection": "Mode: 'disabled' (default, MCP-aware agents pull context on-demand) | 'keyword' (legacy git-grep injection for non-MCP agents). Set context.fileInjection in config.",