substrate-ai 0.1.24 → 0.1.26
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli/index.js
CHANGED
|
@@ -9,7 +9,7 @@ import { fileURLToPath } from "url";
|
|
|
9
9
|
import { dirname, extname, isAbsolute, join, relative, resolve } from "path";
|
|
10
10
|
import { access, mkdir, readFile, readdir, stat, writeFile } from "fs/promises";
|
|
11
11
|
import { execFile } from "child_process";
|
|
12
|
-
import { cpSync, existsSync, mkdirSync, readFileSync, readdirSync, realpathSync, renameSync, statSync, unlinkSync, writeFileSync } from "fs";
|
|
12
|
+
import { chmodSync, cpSync, existsSync, mkdirSync, readFileSync, readdirSync, realpathSync, renameSync, statSync, unlinkSync, writeFileSync } from "fs";
|
|
13
13
|
import yaml, { dump, load } from "js-yaml";
|
|
14
14
|
import { z } from "zod";
|
|
15
15
|
import { fileURLToPath as fileURLToPath$1 } from "node:url";
|
|
@@ -389,7 +389,7 @@ function listTemplates() {
|
|
|
389
389
|
|
|
390
390
|
//#endregion
|
|
391
391
|
//#region src/cli/commands/init.ts
|
|
392
|
-
const logger$
|
|
392
|
+
const logger$35 = createLogger("init");
|
|
393
393
|
/**
|
|
394
394
|
* Detect whether the CLI was invoked via `npx substrate`.
|
|
395
395
|
* When true, prefix suggested commands with `npx `.
|
|
@@ -573,7 +573,7 @@ async function runInit(options = {}) {
|
|
|
573
573
|
discoveryReport = await registry.discoverAndRegister();
|
|
574
574
|
} catch (err) {
|
|
575
575
|
const message = err instanceof Error ? err.message : String(err);
|
|
576
|
-
logger$
|
|
576
|
+
logger$35.error({ err }, "Adapter discovery failed");
|
|
577
577
|
process.stderr.write(` Error: adapter discovery failed — ${message}\n`);
|
|
578
578
|
return INIT_EXIT_ERROR;
|
|
579
579
|
}
|
|
@@ -611,7 +611,7 @@ async function runInit(options = {}) {
|
|
|
611
611
|
await writeFile(routingPolicyPath, routingHeader + yaml.dump(routingPolicy), "utf-8");
|
|
612
612
|
} catch (err) {
|
|
613
613
|
const message = err instanceof Error ? err.message : String(err);
|
|
614
|
-
logger$
|
|
614
|
+
logger$35.error({ err }, "Failed to write config files");
|
|
615
615
|
process.stderr.write(` Error: failed to write configuration — ${message}\n`);
|
|
616
616
|
return INIT_EXIT_ERROR;
|
|
617
617
|
}
|
|
@@ -686,7 +686,7 @@ function formatUnsupportedVersionError(formatType, version, supported) {
|
|
|
686
686
|
|
|
687
687
|
//#endregion
|
|
688
688
|
//#region src/modules/config/config-system-impl.ts
|
|
689
|
-
const logger$
|
|
689
|
+
const logger$34 = createLogger("config");
|
|
690
690
|
function deepMerge(base, override) {
|
|
691
691
|
const result = { ...base };
|
|
692
692
|
for (const [key, val] of Object.entries(override)) if (val !== null && val !== void 0 && typeof val === "object" && !Array.isArray(val) && typeof result[key] === "object" && result[key] !== null && !Array.isArray(result[key])) result[key] = deepMerge(result[key], val);
|
|
@@ -731,7 +731,7 @@ function readEnvOverrides() {
|
|
|
731
731
|
}
|
|
732
732
|
const parsed = PartialSubstrateConfigSchema.safeParse(overrides);
|
|
733
733
|
if (!parsed.success) {
|
|
734
|
-
logger$
|
|
734
|
+
logger$34.warn({ errors: parsed.error.issues }, "Invalid environment variable overrides ignored");
|
|
735
735
|
return {};
|
|
736
736
|
}
|
|
737
737
|
return parsed.data;
|
|
@@ -795,7 +795,7 @@ var ConfigSystemImpl = class {
|
|
|
795
795
|
throw new ConfigError(`Configuration validation failed:\n${issues}`, { issues: result.error.issues });
|
|
796
796
|
}
|
|
797
797
|
this._config = result.data;
|
|
798
|
-
logger$
|
|
798
|
+
logger$34.debug("Configuration loaded successfully");
|
|
799
799
|
}
|
|
800
800
|
getConfig() {
|
|
801
801
|
if (this._config === null) throw new ConfigError("Configuration has not been loaded. Call load() before getConfig().", {});
|
|
@@ -858,7 +858,7 @@ var ConfigSystemImpl = class {
|
|
|
858
858
|
if (version !== void 0 && typeof version === "string" && !isVersionSupported(version, SUPPORTED_CONFIG_FORMAT_VERSIONS)) if (defaultConfigMigrator.canMigrate(version, CURRENT_CONFIG_FORMAT_VERSION)) {
|
|
859
859
|
const migrationOutput = defaultConfigMigrator.migrate(rawObj, version, CURRENT_CONFIG_FORMAT_VERSION, filePath);
|
|
860
860
|
if (migrationOutput.result.success) {
|
|
861
|
-
logger$
|
|
861
|
+
logger$34.info({
|
|
862
862
|
from: version,
|
|
863
863
|
to: CURRENT_CONFIG_FORMAT_VERSION,
|
|
864
864
|
backup: migrationOutput.result.backupPath
|
|
@@ -901,7 +901,7 @@ function createConfigSystem(options = {}) {
|
|
|
901
901
|
|
|
902
902
|
//#endregion
|
|
903
903
|
//#region src/cli/commands/config.ts
|
|
904
|
-
const logger$
|
|
904
|
+
const logger$33 = createLogger("config-cmd");
|
|
905
905
|
const CONFIG_EXIT_SUCCESS = 0;
|
|
906
906
|
const CONFIG_EXIT_ERROR = 1;
|
|
907
907
|
const CONFIG_EXIT_INVALID = 2;
|
|
@@ -927,7 +927,7 @@ async function runConfigShow(opts = {}) {
|
|
|
927
927
|
return CONFIG_EXIT_INVALID;
|
|
928
928
|
}
|
|
929
929
|
const message = err instanceof Error ? err.message : String(err);
|
|
930
|
-
logger$
|
|
930
|
+
logger$33.error({ err }, "Failed to load configuration");
|
|
931
931
|
process.stderr.write(` Error loading configuration: ${message}\n`);
|
|
932
932
|
return CONFIG_EXIT_ERROR;
|
|
933
933
|
}
|
|
@@ -1001,7 +1001,7 @@ async function runConfigExport(opts = {}) {
|
|
|
1001
1001
|
return CONFIG_EXIT_INVALID;
|
|
1002
1002
|
}
|
|
1003
1003
|
const message = err instanceof Error ? err.message : String(err);
|
|
1004
|
-
logger$
|
|
1004
|
+
logger$33.error({ err }, "Failed to load configuration");
|
|
1005
1005
|
process.stderr.write(`Error loading configuration: ${message}\n`);
|
|
1006
1006
|
return CONFIG_EXIT_ERROR;
|
|
1007
1007
|
}
|
|
@@ -1155,7 +1155,7 @@ function registerConfigCommand(program, _version) {
|
|
|
1155
1155
|
|
|
1156
1156
|
//#endregion
|
|
1157
1157
|
//#region src/cli/commands/merge.ts
|
|
1158
|
-
const logger$
|
|
1158
|
+
const logger$32 = createLogger("merge-cmd");
|
|
1159
1159
|
const MERGE_EXIT_SUCCESS = 0;
|
|
1160
1160
|
const MERGE_EXIT_CONFLICT = 1;
|
|
1161
1161
|
const MERGE_EXIT_ERROR = 2;
|
|
@@ -1193,7 +1193,7 @@ async function mergeTask(taskId, targetBranch, projectRoot) {
|
|
|
1193
1193
|
projectRoot
|
|
1194
1194
|
});
|
|
1195
1195
|
try {
|
|
1196
|
-
logger$
|
|
1196
|
+
logger$32.info({
|
|
1197
1197
|
taskId,
|
|
1198
1198
|
targetBranch
|
|
1199
1199
|
}, "Running conflict detection...");
|
|
@@ -1215,7 +1215,7 @@ async function mergeTask(taskId, targetBranch, projectRoot) {
|
|
|
1215
1215
|
} catch (err) {
|
|
1216
1216
|
const message = err instanceof Error ? err.message : String(err);
|
|
1217
1217
|
console.error(`Error merging task "${taskId}": ${message}`);
|
|
1218
|
-
logger$
|
|
1218
|
+
logger$32.error({
|
|
1219
1219
|
taskId,
|
|
1220
1220
|
err
|
|
1221
1221
|
}, "merge --task failed");
|
|
@@ -1269,7 +1269,7 @@ async function mergeAll(targetBranch, projectRoot, taskIds) {
|
|
|
1269
1269
|
error: message
|
|
1270
1270
|
});
|
|
1271
1271
|
console.log(` Error for task "${taskId}": ${message}`);
|
|
1272
|
-
logger$
|
|
1272
|
+
logger$32.error({
|
|
1273
1273
|
taskId,
|
|
1274
1274
|
err
|
|
1275
1275
|
}, "merge --all: task failed");
|
|
@@ -1322,7 +1322,7 @@ function registerMergeCommand(program, projectRoot = process.cwd()) {
|
|
|
1322
1322
|
|
|
1323
1323
|
//#endregion
|
|
1324
1324
|
//#region src/cli/commands/worktrees.ts
|
|
1325
|
-
const logger$
|
|
1325
|
+
const logger$31 = createLogger("worktrees-cmd");
|
|
1326
1326
|
const WORKTREES_EXIT_SUCCESS = 0;
|
|
1327
1327
|
const WORKTREES_EXIT_ERROR = 1;
|
|
1328
1328
|
/** Valid task statuses for filtering */
|
|
@@ -1449,7 +1449,7 @@ async function listWorktreesAction(options) {
|
|
|
1449
1449
|
try {
|
|
1450
1450
|
worktreeInfos = await manager.listWorktrees();
|
|
1451
1451
|
} catch (err) {
|
|
1452
|
-
logger$
|
|
1452
|
+
logger$31.error({ err }, "Failed to list worktrees");
|
|
1453
1453
|
const message = err instanceof Error ? err.message : String(err);
|
|
1454
1454
|
process.stderr.write(`Error listing worktrees: ${message}\n`);
|
|
1455
1455
|
return WORKTREES_EXIT_ERROR;
|
|
@@ -1476,7 +1476,7 @@ async function listWorktreesAction(options) {
|
|
|
1476
1476
|
} catch (err) {
|
|
1477
1477
|
const message = err instanceof Error ? err.message : String(err);
|
|
1478
1478
|
process.stderr.write(`Error: ${message}\n`);
|
|
1479
|
-
logger$
|
|
1479
|
+
logger$31.error({ err }, "listWorktreesAction failed");
|
|
1480
1480
|
return WORKTREES_EXIT_ERROR;
|
|
1481
1481
|
}
|
|
1482
1482
|
}
|
|
@@ -1738,7 +1738,7 @@ function getPlanningCostTotal(db, sessionId) {
|
|
|
1738
1738
|
|
|
1739
1739
|
//#endregion
|
|
1740
1740
|
//#region src/cli/commands/cost.ts
|
|
1741
|
-
const logger$
|
|
1741
|
+
const logger$30 = createLogger("cost-cmd");
|
|
1742
1742
|
const COST_EXIT_SUCCESS = 0;
|
|
1743
1743
|
const COST_EXIT_ERROR = 1;
|
|
1744
1744
|
/**
|
|
@@ -1984,7 +1984,7 @@ async function runCostAction(options) {
|
|
|
1984
1984
|
} catch (err) {
|
|
1985
1985
|
const message = err instanceof Error ? err.message : String(err);
|
|
1986
1986
|
process.stderr.write(`Error: ${message}\n`);
|
|
1987
|
-
logger$
|
|
1987
|
+
logger$30.error({ err }, "runCostAction failed");
|
|
1988
1988
|
return COST_EXIT_ERROR;
|
|
1989
1989
|
} finally {
|
|
1990
1990
|
if (wrapper !== null) try {
|
|
@@ -2046,7 +2046,7 @@ function emitStatusSnapshot(snapshot) {
|
|
|
2046
2046
|
|
|
2047
2047
|
//#endregion
|
|
2048
2048
|
//#region src/recovery/crash-recovery.ts
|
|
2049
|
-
const logger$
|
|
2049
|
+
const logger$29 = createLogger("crash-recovery");
|
|
2050
2050
|
var CrashRecoveryManager = class {
|
|
2051
2051
|
db;
|
|
2052
2052
|
gitWorktreeManager;
|
|
@@ -2099,7 +2099,7 @@ var CrashRecoveryManager = class {
|
|
|
2099
2099
|
});
|
|
2100
2100
|
}
|
|
2101
2101
|
if (this.gitWorktreeManager !== void 0) this.cleanupOrphanedWorktrees().catch((err) => {
|
|
2102
|
-
logger$
|
|
2102
|
+
logger$29.warn({ err }, "Worktree cleanup failed during recovery (non-fatal)");
|
|
2103
2103
|
});
|
|
2104
2104
|
let newlyReady = 0;
|
|
2105
2105
|
if (sessionId !== void 0) {
|
|
@@ -2109,7 +2109,7 @@ var CrashRecoveryManager = class {
|
|
|
2109
2109
|
const row = db.prepare("SELECT COUNT(*) as count FROM ready_tasks").get();
|
|
2110
2110
|
newlyReady = row.count;
|
|
2111
2111
|
}
|
|
2112
|
-
logger$
|
|
2112
|
+
logger$29.info({
|
|
2113
2113
|
event: "recovery:complete",
|
|
2114
2114
|
recovered,
|
|
2115
2115
|
failed,
|
|
@@ -2131,10 +2131,10 @@ var CrashRecoveryManager = class {
|
|
|
2131
2131
|
if (this.gitWorktreeManager === void 0) return 0;
|
|
2132
2132
|
try {
|
|
2133
2133
|
const count = await this.gitWorktreeManager.cleanupAllWorktrees();
|
|
2134
|
-
logger$
|
|
2134
|
+
logger$29.info({ count }, "Cleaned up orphaned worktrees");
|
|
2135
2135
|
return count;
|
|
2136
2136
|
} catch (err) {
|
|
2137
|
-
logger$
|
|
2137
|
+
logger$29.warn({ err }, "Failed to clean up orphaned worktrees — continuing");
|
|
2138
2138
|
return 0;
|
|
2139
2139
|
}
|
|
2140
2140
|
}
|
|
@@ -2217,7 +2217,7 @@ function setupGracefulShutdown(options) {
|
|
|
2217
2217
|
|
|
2218
2218
|
//#endregion
|
|
2219
2219
|
//#region src/cli/commands/start.ts
|
|
2220
|
-
const logger$
|
|
2220
|
+
const logger$28 = createLogger("start-cmd");
|
|
2221
2221
|
const START_EXIT_SUCCESS = 0;
|
|
2222
2222
|
const START_EXIT_ERROR = 1;
|
|
2223
2223
|
const START_EXIT_USAGE_ERROR = 2;
|
|
@@ -2326,7 +2326,7 @@ async function runStartAction(options) {
|
|
|
2326
2326
|
let configWatcher$1 = null;
|
|
2327
2327
|
const configFilePath = join(projectRoot, "substrate.config.yaml");
|
|
2328
2328
|
if (noWatchConfig) {
|
|
2329
|
-
logger$
|
|
2329
|
+
logger$28.info("Config hot-reload disabled (--no-watch-config).");
|
|
2330
2330
|
process.stdout.write("Config hot-reload disabled (--no-watch-config).\n");
|
|
2331
2331
|
} else {
|
|
2332
2332
|
let currentHotConfig = config;
|
|
@@ -2341,7 +2341,7 @@ async function runStartAction(options) {
|
|
|
2341
2341
|
const changedKeys = computeChangedKeys(previousConfig, newConfig);
|
|
2342
2342
|
currentHotConfig = newConfig;
|
|
2343
2343
|
const n = changedKeys.length;
|
|
2344
|
-
logger$
|
|
2344
|
+
logger$28.info({
|
|
2345
2345
|
changedKeys,
|
|
2346
2346
|
configPath: configFilePath
|
|
2347
2347
|
}, `Config reloaded: ${n} setting(s) changed`);
|
|
@@ -2353,7 +2353,7 @@ async function runStartAction(options) {
|
|
|
2353
2353
|
});
|
|
2354
2354
|
},
|
|
2355
2355
|
onError: (err) => {
|
|
2356
|
-
logger$
|
|
2356
|
+
logger$28.error({
|
|
2357
2357
|
err,
|
|
2358
2358
|
configPath: configFilePath
|
|
2359
2359
|
}, `Config reload failed: ${err.message}. Continuing with previous config.`);
|
|
@@ -2366,7 +2366,7 @@ async function runStartAction(options) {
|
|
|
2366
2366
|
let cleanupShutdown = null;
|
|
2367
2367
|
if (resolvedGraphFile === null) if (interruptedSession !== void 0) {
|
|
2368
2368
|
process.stdout.write(`Resuming interrupted session ${interruptedSession.id}\n`);
|
|
2369
|
-
logger$
|
|
2369
|
+
logger$28.info({ sessionId: interruptedSession.id }, "session:resumed");
|
|
2370
2370
|
const recovery = new CrashRecoveryManager({
|
|
2371
2371
|
db: databaseService.db,
|
|
2372
2372
|
gitWorktreeManager
|
|
@@ -2490,7 +2490,7 @@ async function runStartAction(options) {
|
|
|
2490
2490
|
} catch (err) {
|
|
2491
2491
|
const message = err instanceof Error ? err.message : String(err);
|
|
2492
2492
|
process.stderr.write(`Error: ${message}\n`);
|
|
2493
|
-
logger$
|
|
2493
|
+
logger$28.error({ err }, "runStartAction failed");
|
|
2494
2494
|
return START_EXIT_ERROR;
|
|
2495
2495
|
} finally {
|
|
2496
2496
|
try {
|
|
@@ -2648,7 +2648,7 @@ function renderTaskGraph(snapshot, tasks) {
|
|
|
2648
2648
|
|
|
2649
2649
|
//#endregion
|
|
2650
2650
|
//#region src/cli/commands/status.ts
|
|
2651
|
-
const logger$
|
|
2651
|
+
const logger$27 = createLogger("status-cmd");
|
|
2652
2652
|
const STATUS_EXIT_SUCCESS = 0;
|
|
2653
2653
|
const STATUS_EXIT_ERROR = 1;
|
|
2654
2654
|
const STATUS_EXIT_NOT_FOUND = 2;
|
|
@@ -2801,7 +2801,7 @@ async function runStatusAction(options) {
|
|
|
2801
2801
|
} catch (err) {
|
|
2802
2802
|
const message = err instanceof Error ? err.message : String(err);
|
|
2803
2803
|
process.stderr.write(`Error: ${message}\n`);
|
|
2804
|
-
logger$
|
|
2804
|
+
logger$27.error({ err }, "runStatusAction failed");
|
|
2805
2805
|
return STATUS_EXIT_ERROR;
|
|
2806
2806
|
} finally {
|
|
2807
2807
|
if (wrapper !== null) try {
|
|
@@ -2834,7 +2834,7 @@ function registerStatusCommand(program, _version = "0.0.0", projectRoot = proces
|
|
|
2834
2834
|
|
|
2835
2835
|
//#endregion
|
|
2836
2836
|
//#region src/cli/commands/pause.ts
|
|
2837
|
-
const logger$
|
|
2837
|
+
const logger$26 = createLogger("pause-cmd");
|
|
2838
2838
|
const PAUSE_EXIT_SUCCESS = 0;
|
|
2839
2839
|
const PAUSE_EXIT_ERROR = 1;
|
|
2840
2840
|
const PAUSE_EXIT_USAGE_ERROR = 2;
|
|
@@ -2903,7 +2903,7 @@ async function runPauseAction(options) {
|
|
|
2903
2903
|
} catch (err) {
|
|
2904
2904
|
const message = err instanceof Error ? err.message : String(err);
|
|
2905
2905
|
process.stderr.write(`Error: ${message}\n`);
|
|
2906
|
-
logger$
|
|
2906
|
+
logger$26.error({ err }, "runPauseAction failed");
|
|
2907
2907
|
return PAUSE_EXIT_ERROR;
|
|
2908
2908
|
} finally {
|
|
2909
2909
|
if (wrapper !== null) try {
|
|
@@ -2933,7 +2933,7 @@ function registerPauseCommand(program, version = "0.0.0", projectRoot = process.
|
|
|
2933
2933
|
|
|
2934
2934
|
//#endregion
|
|
2935
2935
|
//#region src/cli/commands/resume.ts
|
|
2936
|
-
const logger$
|
|
2936
|
+
const logger$25 = createLogger("resume-cmd");
|
|
2937
2937
|
const RESUME_EXIT_SUCCESS = 0;
|
|
2938
2938
|
const RESUME_EXIT_ERROR = 1;
|
|
2939
2939
|
const RESUME_EXIT_USAGE_ERROR = 2;
|
|
@@ -3018,7 +3018,7 @@ async function runResumeAction(options) {
|
|
|
3018
3018
|
} catch (err) {
|
|
3019
3019
|
const message = err instanceof Error ? err.message : String(err);
|
|
3020
3020
|
process.stderr.write(`Error: ${message}\n`);
|
|
3021
|
-
logger$
|
|
3021
|
+
logger$25.error({ err }, "runResumeAction failed");
|
|
3022
3022
|
return RESUME_EXIT_ERROR;
|
|
3023
3023
|
} finally {
|
|
3024
3024
|
if (wrapper !== null) try {
|
|
@@ -3051,7 +3051,7 @@ function registerResumeCommand(program, version = "0.0.0", projectRoot = process
|
|
|
3051
3051
|
|
|
3052
3052
|
//#endregion
|
|
3053
3053
|
//#region src/cli/commands/cancel.ts
|
|
3054
|
-
const logger$
|
|
3054
|
+
const logger$24 = createLogger("cancel-cmd");
|
|
3055
3055
|
const CANCEL_EXIT_SUCCESS = 0;
|
|
3056
3056
|
const CANCEL_EXIT_ERROR = 1;
|
|
3057
3057
|
const CANCEL_EXIT_USAGE_ERROR = 2;
|
|
@@ -3148,7 +3148,7 @@ async function runCancelAction(options) {
|
|
|
3148
3148
|
} catch (err) {
|
|
3149
3149
|
const message = err instanceof Error ? err.message : String(err);
|
|
3150
3150
|
process.stderr.write(`Error: ${message}\n`);
|
|
3151
|
-
logger$
|
|
3151
|
+
logger$24.error({ err }, "runCancelAction failed");
|
|
3152
3152
|
return CANCEL_EXIT_ERROR;
|
|
3153
3153
|
} finally {
|
|
3154
3154
|
if (wrapper !== null) try {
|
|
@@ -3263,7 +3263,7 @@ function renderFailedTasksJson(tasks) {
|
|
|
3263
3263
|
|
|
3264
3264
|
//#endregion
|
|
3265
3265
|
//#region src/cli/commands/retry.ts
|
|
3266
|
-
const logger$
|
|
3266
|
+
const logger$23 = createLogger("retry-cmd");
|
|
3267
3267
|
const RETRY_EXIT_SUCCESS = 0;
|
|
3268
3268
|
const RETRY_EXIT_PARTIAL_FAILURE = 1;
|
|
3269
3269
|
const RETRY_EXIT_USAGE_ERROR = 2;
|
|
@@ -3368,7 +3368,7 @@ async function runRetryAction(options) {
|
|
|
3368
3368
|
} catch (err) {
|
|
3369
3369
|
const message = err instanceof Error ? err.message : String(err);
|
|
3370
3370
|
process.stderr.write(`Error: ${message}\n`);
|
|
3371
|
-
logger$
|
|
3371
|
+
logger$23.error({ err }, "runRetryAction failed");
|
|
3372
3372
|
return RETRY_EXIT_USAGE_ERROR;
|
|
3373
3373
|
} finally {
|
|
3374
3374
|
if (wrapper !== null) try {
|
|
@@ -3497,11 +3497,11 @@ async function runFollowMode(opts) {
|
|
|
3497
3497
|
});
|
|
3498
3498
|
});
|
|
3499
3499
|
const sigintHandler = () => {
|
|
3500
|
-
logger$
|
|
3500
|
+
logger$23.info("SIGINT received — initiating graceful shutdown");
|
|
3501
3501
|
taskGraphEngine.cancelAll();
|
|
3502
3502
|
};
|
|
3503
3503
|
const sigtermHandler = () => {
|
|
3504
|
-
logger$
|
|
3504
|
+
logger$23.info("SIGTERM received — initiating graceful shutdown");
|
|
3505
3505
|
taskGraphEngine.cancelAll();
|
|
3506
3506
|
};
|
|
3507
3507
|
process.once("SIGINT", sigintHandler);
|
|
@@ -3514,7 +3514,7 @@ async function runFollowMode(opts) {
|
|
|
3514
3514
|
} catch (err) {
|
|
3515
3515
|
const message = err instanceof Error ? err.message : String(err);
|
|
3516
3516
|
process.stderr.write(`Error: ${message}\n`);
|
|
3517
|
-
logger$
|
|
3517
|
+
logger$23.error({ err }, "runFollowMode failed");
|
|
3518
3518
|
return RETRY_EXIT_USAGE_ERROR;
|
|
3519
3519
|
} finally {
|
|
3520
3520
|
try {
|
|
@@ -3974,7 +3974,7 @@ function buildMultiAgentInstructionsSection(agentCount) {
|
|
|
3974
3974
|
|
|
3975
3975
|
//#endregion
|
|
3976
3976
|
//#region src/modules/plan-generator/plan-generator.ts
|
|
3977
|
-
const logger$
|
|
3977
|
+
const logger$22 = createLogger("plan-generator");
|
|
3978
3978
|
/**
|
|
3979
3979
|
* Wrapper around execFile that immediately closes stdin on the child process.
|
|
3980
3980
|
* Some CLI tools (e.g. Claude Code) wait for stdin to close before processing
|
|
@@ -4151,7 +4151,7 @@ var PlanGenerator = class {
|
|
|
4151
4151
|
else {
|
|
4152
4152
|
const slugified = dep.toLowerCase().replace(/[^a-z0-9]+/g, "-").replace(/^-|-$/g, "").slice(0, 64);
|
|
4153
4153
|
if (taskKeys.has(slugified)) resolvedDeps.push(slugified);
|
|
4154
|
-
else logger$
|
|
4154
|
+
else logger$22.warn({
|
|
4155
4155
|
taskKey,
|
|
4156
4156
|
dep
|
|
4157
4157
|
}, `depends_on reference '${dep}' not found in task keys; removing`);
|
|
@@ -4898,7 +4898,7 @@ function getLatestPlanVersion(db, planId) {
|
|
|
4898
4898
|
|
|
4899
4899
|
//#endregion
|
|
4900
4900
|
//#region src/modules/plan-generator/plan-refiner.ts
|
|
4901
|
-
const logger$
|
|
4901
|
+
const logger$21 = createLogger("plan-refiner");
|
|
4902
4902
|
var PlanRefiner = class {
|
|
4903
4903
|
db;
|
|
4904
4904
|
planGenerator;
|
|
@@ -4941,7 +4941,7 @@ var PlanRefiner = class {
|
|
|
4941
4941
|
newFeedback: feedback,
|
|
4942
4942
|
availableAgents: this.availableAgents
|
|
4943
4943
|
});
|
|
4944
|
-
logger$
|
|
4944
|
+
logger$21.info({
|
|
4945
4945
|
planId,
|
|
4946
4946
|
currentVersion,
|
|
4947
4947
|
feedbackRounds: feedbackHistory.length
|
|
@@ -4988,7 +4988,7 @@ var PlanRefiner = class {
|
|
|
4988
4988
|
newVersion,
|
|
4989
4989
|
taskCount
|
|
4990
4990
|
});
|
|
4991
|
-
logger$
|
|
4991
|
+
logger$21.info({
|
|
4992
4992
|
planId,
|
|
4993
4993
|
newVersion,
|
|
4994
4994
|
taskCount
|
|
@@ -5070,7 +5070,7 @@ function normalizeForDiff(value) {
|
|
|
5070
5070
|
|
|
5071
5071
|
//#endregion
|
|
5072
5072
|
//#region src/cli/commands/plan-refine.ts
|
|
5073
|
-
const logger$
|
|
5073
|
+
const logger$20 = createLogger("plan-refine-cmd");
|
|
5074
5074
|
const REFINE_EXIT_SUCCESS = 0;
|
|
5075
5075
|
const REFINE_EXIT_ERROR = 1;
|
|
5076
5076
|
const REFINE_EXIT_USAGE_ERROR = 2;
|
|
@@ -5112,7 +5112,7 @@ async function runPlanRefineAction(options) {
|
|
|
5112
5112
|
let result;
|
|
5113
5113
|
try {
|
|
5114
5114
|
result = await refiner.refine(planId, feedback, (event, payload) => {
|
|
5115
|
-
logger$
|
|
5115
|
+
logger$20.info({
|
|
5116
5116
|
event,
|
|
5117
5117
|
payload
|
|
5118
5118
|
}, "Plan refinement event");
|
|
@@ -5155,7 +5155,7 @@ async function runPlanRefineAction(options) {
|
|
|
5155
5155
|
} catch (err) {
|
|
5156
5156
|
const message = err instanceof Error ? err.message : String(err);
|
|
5157
5157
|
process.stderr.write(`Error: ${message}\n`);
|
|
5158
|
-
logger$
|
|
5158
|
+
logger$20.error({ err }, "runPlanRefineAction failed");
|
|
5159
5159
|
return REFINE_EXIT_ERROR;
|
|
5160
5160
|
} finally {
|
|
5161
5161
|
dbWrapper.close();
|
|
@@ -5180,7 +5180,7 @@ function registerPlanRefineCommand(planCmd, _version = "0.0.0", projectRoot = pr
|
|
|
5180
5180
|
|
|
5181
5181
|
//#endregion
|
|
5182
5182
|
//#region src/cli/commands/plan-diff.ts
|
|
5183
|
-
const logger$
|
|
5183
|
+
const logger$19 = createLogger("plan-diff-cmd");
|
|
5184
5184
|
const DIFF_EXIT_SUCCESS = 0;
|
|
5185
5185
|
const DIFF_EXIT_ERROR = 1;
|
|
5186
5186
|
const DIFF_EXIT_NOT_FOUND = 2;
|
|
@@ -5223,7 +5223,7 @@ async function runPlanDiffAction(options) {
|
|
|
5223
5223
|
} catch (err) {
|
|
5224
5224
|
const message = err instanceof Error ? err.message : String(err);
|
|
5225
5225
|
process.stderr.write(`Error: ${message}\n`);
|
|
5226
|
-
logger$
|
|
5226
|
+
logger$19.error({ err }, "runPlanDiffAction failed");
|
|
5227
5227
|
return DIFF_EXIT_ERROR;
|
|
5228
5228
|
} finally {
|
|
5229
5229
|
dbWrapper.close();
|
|
@@ -5271,7 +5271,7 @@ function registerPlanDiffCommand(planCmd, _version = "0.0.0", projectRoot = proc
|
|
|
5271
5271
|
|
|
5272
5272
|
//#endregion
|
|
5273
5273
|
//#region src/cli/commands/plan-rollback.ts
|
|
5274
|
-
const logger$
|
|
5274
|
+
const logger$18 = createLogger("plan-rollback-cmd");
|
|
5275
5275
|
const ROLLBACK_EXIT_SUCCESS = 0;
|
|
5276
5276
|
const ROLLBACK_EXIT_ERROR = 1;
|
|
5277
5277
|
const ROLLBACK_EXIT_USAGE_ERROR = 2;
|
|
@@ -5319,7 +5319,7 @@ async function runPlanRollbackAction(options, onEvent) {
|
|
|
5319
5319
|
toVersion,
|
|
5320
5320
|
newVersion
|
|
5321
5321
|
});
|
|
5322
|
-
logger$
|
|
5322
|
+
logger$18.info({
|
|
5323
5323
|
planId,
|
|
5324
5324
|
fromVersion,
|
|
5325
5325
|
toVersion,
|
|
@@ -5360,7 +5360,7 @@ async function runPlanRollbackAction(options, onEvent) {
|
|
|
5360
5360
|
} catch (err) {
|
|
5361
5361
|
const message = err instanceof Error ? err.message : String(err);
|
|
5362
5362
|
process.stderr.write(`Error: ${message}\n`);
|
|
5363
|
-
logger$
|
|
5363
|
+
logger$18.error({ err }, "runPlanRollbackAction failed");
|
|
5364
5364
|
return ROLLBACK_EXIT_ERROR;
|
|
5365
5365
|
} finally {
|
|
5366
5366
|
dbWrapper.close();
|
|
@@ -5554,7 +5554,7 @@ function validatePlan(raw, adapterRegistry, options) {
|
|
|
5554
5554
|
|
|
5555
5555
|
//#endregion
|
|
5556
5556
|
//#region src/cli/commands/plan.ts
|
|
5557
|
-
const logger$
|
|
5557
|
+
const logger$17 = createLogger("plan-cmd");
|
|
5558
5558
|
const PLAN_EXIT_SUCCESS = 0;
|
|
5559
5559
|
const PLAN_EXIT_ERROR = 1;
|
|
5560
5560
|
const PLAN_EXIT_USAGE_ERROR = 2;
|
|
@@ -5698,7 +5698,7 @@ async function runPlanReviewAction(options) {
|
|
|
5698
5698
|
}
|
|
5699
5699
|
const message = err instanceof Error ? err.message : String(err);
|
|
5700
5700
|
process.stderr.write(`Error: ${message}\n`);
|
|
5701
|
-
logger$
|
|
5701
|
+
logger$17.error({ err }, "runPlanReviewAction failed");
|
|
5702
5702
|
return PLAN_EXIT_ERROR;
|
|
5703
5703
|
}
|
|
5704
5704
|
if (dryRun) {
|
|
@@ -5724,7 +5724,7 @@ async function runPlanReviewAction(options) {
|
|
|
5724
5724
|
if (ext.endsWith(".yaml") || ext.endsWith(".yml")) taskGraph = load(planYaml);
|
|
5725
5725
|
else taskGraph = JSON.parse(planYaml);
|
|
5726
5726
|
} catch {
|
|
5727
|
-
logger$
|
|
5727
|
+
logger$17.warn("Could not read generated plan file for DB storage");
|
|
5728
5728
|
}
|
|
5729
5729
|
if (outputFormat === "json") {
|
|
5730
5730
|
const envelope = {
|
|
@@ -6786,6 +6786,41 @@ Initialize a methodology pack and decision store.
|
|
|
6786
6786
|
\`\`\`
|
|
6787
6787
|
substrate auto init [--pack bmad] [--project-root .]
|
|
6788
6788
|
\`\`\`
|
|
6789
|
+
|
|
6790
|
+
### substrate auto supervisor
|
|
6791
|
+
Long-running process that monitors pipeline health, kills stalled runs, and auto-restarts.
|
|
6792
|
+
|
|
6793
|
+
\`\`\`
|
|
6794
|
+
substrate auto supervisor [options]
|
|
6795
|
+
\`\`\`
|
|
6796
|
+
|
|
6797
|
+
Options:
|
|
6798
|
+
- \`--poll-interval <seconds>\` — Health check interval (default: 60)
|
|
6799
|
+
- \`--stall-threshold <seconds>\` — Staleness before killing (default: 600)
|
|
6800
|
+
- \`--max-restarts <n>\` — Maximum restart attempts (default: 3)
|
|
6801
|
+
- \`--output-format <format>\` — Output format: human (default) or json
|
|
6802
|
+
|
|
6803
|
+
Exit codes: 0 = all succeeded, 1 = failures/escalations, 2 = max restarts exceeded.
|
|
6804
|
+
|
|
6805
|
+
### substrate auto metrics
|
|
6806
|
+
Show historical pipeline run metrics and cross-run comparison.
|
|
6807
|
+
|
|
6808
|
+
\`\`\`
|
|
6809
|
+
substrate auto metrics [options]
|
|
6810
|
+
\`\`\`
|
|
6811
|
+
|
|
6812
|
+
Options:
|
|
6813
|
+
- \`--limit <n>\` — Number of runs to show (default: 10)
|
|
6814
|
+
- \`--compare <run-id-a,run-id-b>\` — Compare two runs side-by-side (token, time, review cycle deltas)
|
|
6815
|
+
- \`--tag-baseline <run-id>\` — Mark a run as the performance baseline
|
|
6816
|
+
- \`--output-format <format>\` — Output format: human (default) or json
|
|
6817
|
+
|
|
6818
|
+
### substrate auto health
|
|
6819
|
+
Check pipeline health, stall detection, and process status.
|
|
6820
|
+
|
|
6821
|
+
\`\`\`
|
|
6822
|
+
substrate auto health [--output-format json]
|
|
6823
|
+
\`\`\`
|
|
6789
6824
|
`;
|
|
6790
6825
|
}
|
|
6791
6826
|
/**
|
|
@@ -7181,7 +7216,7 @@ function truncateToTokens(text, maxTokens) {
|
|
|
7181
7216
|
|
|
7182
7217
|
//#endregion
|
|
7183
7218
|
//#region src/modules/context-compiler/context-compiler-impl.ts
|
|
7184
|
-
const logger$
|
|
7219
|
+
const logger$16 = createLogger("context-compiler");
|
|
7185
7220
|
/**
|
|
7186
7221
|
* Fraction of the original token budget that must remain (after required +
|
|
7187
7222
|
* important sections) before an optional section is included.
|
|
@@ -7273,7 +7308,7 @@ var ContextCompilerImpl = class {
|
|
|
7273
7308
|
includedParts.push(truncated);
|
|
7274
7309
|
remainingBudget -= truncatedTokens;
|
|
7275
7310
|
anyTruncated = true;
|
|
7276
|
-
logger$
|
|
7311
|
+
logger$16.warn({
|
|
7277
7312
|
section: section.name,
|
|
7278
7313
|
originalTokens: tokens,
|
|
7279
7314
|
budgetTokens: truncatedTokens
|
|
@@ -7287,7 +7322,7 @@ var ContextCompilerImpl = class {
|
|
|
7287
7322
|
});
|
|
7288
7323
|
} else {
|
|
7289
7324
|
anyTruncated = true;
|
|
7290
|
-
logger$
|
|
7325
|
+
logger$16.warn({
|
|
7291
7326
|
section: section.name,
|
|
7292
7327
|
tokens
|
|
7293
7328
|
}, "Context compiler: omitted \"important\" section — no budget remaining");
|
|
@@ -7314,7 +7349,7 @@ var ContextCompilerImpl = class {
|
|
|
7314
7349
|
} else {
|
|
7315
7350
|
if (tokens > 0) {
|
|
7316
7351
|
anyTruncated = true;
|
|
7317
|
-
logger$
|
|
7352
|
+
logger$16.warn({
|
|
7318
7353
|
section: section.name,
|
|
7319
7354
|
tokens,
|
|
7320
7355
|
budgetFractionRemaining: budgetFractionRemaining.toFixed(2)
|
|
@@ -7582,7 +7617,7 @@ function parseYamlResult(yamlText, schema) {
|
|
|
7582
7617
|
|
|
7583
7618
|
//#endregion
|
|
7584
7619
|
//#region src/modules/agent-dispatch/dispatcher-impl.ts
|
|
7585
|
-
const logger$
|
|
7620
|
+
const logger$15 = createLogger("agent-dispatch");
|
|
7586
7621
|
const SHUTDOWN_GRACE_MS = 1e4;
|
|
7587
7622
|
const SHUTDOWN_MAX_WAIT_MS = 3e4;
|
|
7588
7623
|
const CHARS_PER_TOKEN = 4;
|
|
@@ -7651,7 +7686,7 @@ var DispatcherImpl = class {
|
|
|
7651
7686
|
resolve: typedResolve,
|
|
7652
7687
|
reject
|
|
7653
7688
|
});
|
|
7654
|
-
logger$
|
|
7689
|
+
logger$15.debug({
|
|
7655
7690
|
id,
|
|
7656
7691
|
queueLength: this._queue.length
|
|
7657
7692
|
}, "Dispatch queued");
|
|
@@ -7681,7 +7716,7 @@ var DispatcherImpl = class {
|
|
|
7681
7716
|
}
|
|
7682
7717
|
async shutdown() {
|
|
7683
7718
|
this._shuttingDown = true;
|
|
7684
|
-
logger$
|
|
7719
|
+
logger$15.info({
|
|
7685
7720
|
running: this._running.size,
|
|
7686
7721
|
queued: this._queue.length
|
|
7687
7722
|
}, "Dispatcher shutting down");
|
|
@@ -7714,13 +7749,13 @@ var DispatcherImpl = class {
|
|
|
7714
7749
|
}
|
|
7715
7750
|
}, 50);
|
|
7716
7751
|
});
|
|
7717
|
-
logger$
|
|
7752
|
+
logger$15.info("Dispatcher shutdown complete");
|
|
7718
7753
|
}
|
|
7719
7754
|
async _startDispatch(id, request, resolve$2) {
|
|
7720
7755
|
const { prompt, agent, taskType, timeout, outputSchema, workingDirectory, model, maxTurns } = request;
|
|
7721
7756
|
const adapter = this._adapterRegistry.get(agent);
|
|
7722
7757
|
if (adapter === void 0) {
|
|
7723
|
-
logger$
|
|
7758
|
+
logger$15.warn({
|
|
7724
7759
|
id,
|
|
7725
7760
|
agent
|
|
7726
7761
|
}, "No adapter found for agent");
|
|
@@ -7764,7 +7799,7 @@ var DispatcherImpl = class {
|
|
|
7764
7799
|
});
|
|
7765
7800
|
const startedAt = Date.now();
|
|
7766
7801
|
proc.on("error", (err) => {
|
|
7767
|
-
logger$
|
|
7802
|
+
logger$15.error({
|
|
7768
7803
|
id,
|
|
7769
7804
|
binary: cmd.binary,
|
|
7770
7805
|
error: err.message
|
|
@@ -7772,7 +7807,7 @@ var DispatcherImpl = class {
|
|
|
7772
7807
|
});
|
|
7773
7808
|
if (proc.stdin !== null) {
|
|
7774
7809
|
proc.stdin.on("error", (err) => {
|
|
7775
|
-
if (err.code !== "EPIPE") logger$
|
|
7810
|
+
if (err.code !== "EPIPE") logger$15.warn({
|
|
7776
7811
|
id,
|
|
7777
7812
|
error: err.message
|
|
7778
7813
|
}, "stdin write error");
|
|
@@ -7814,7 +7849,7 @@ var DispatcherImpl = class {
|
|
|
7814
7849
|
agent,
|
|
7815
7850
|
taskType
|
|
7816
7851
|
});
|
|
7817
|
-
logger$
|
|
7852
|
+
logger$15.debug({
|
|
7818
7853
|
id,
|
|
7819
7854
|
agent,
|
|
7820
7855
|
taskType,
|
|
@@ -7831,7 +7866,7 @@ var DispatcherImpl = class {
|
|
|
7831
7866
|
dispatchId: id,
|
|
7832
7867
|
timeoutMs
|
|
7833
7868
|
});
|
|
7834
|
-
logger$
|
|
7869
|
+
logger$15.warn({
|
|
7835
7870
|
id,
|
|
7836
7871
|
agent,
|
|
7837
7872
|
taskType,
|
|
@@ -7885,7 +7920,7 @@ var DispatcherImpl = class {
|
|
|
7885
7920
|
exitCode: code,
|
|
7886
7921
|
output: stdout
|
|
7887
7922
|
});
|
|
7888
|
-
logger$
|
|
7923
|
+
logger$15.debug({
|
|
7889
7924
|
id,
|
|
7890
7925
|
agent,
|
|
7891
7926
|
taskType,
|
|
@@ -7911,7 +7946,7 @@ var DispatcherImpl = class {
|
|
|
7911
7946
|
error: stderr || `Process exited with code ${String(code)}`,
|
|
7912
7947
|
exitCode: code
|
|
7913
7948
|
});
|
|
7914
|
-
logger$
|
|
7949
|
+
logger$15.debug({
|
|
7915
7950
|
id,
|
|
7916
7951
|
agent,
|
|
7917
7952
|
taskType,
|
|
@@ -7963,7 +7998,7 @@ var DispatcherImpl = class {
|
|
|
7963
7998
|
const next = this._queue.shift();
|
|
7964
7999
|
if (next === void 0) return;
|
|
7965
8000
|
next.handle.status = "running";
|
|
7966
|
-
logger$
|
|
8001
|
+
logger$15.debug({
|
|
7967
8002
|
id: next.id,
|
|
7968
8003
|
queueLength: this._queue.length
|
|
7969
8004
|
}, "Dequeued dispatch");
|
|
@@ -8385,6 +8420,12 @@ function writeStoryMetrics(db, input) {
|
|
|
8385
8420
|
stmt.run(input.run_id, input.story_key, input.result, input.phase_durations_json ?? null, input.started_at ?? null, input.completed_at ?? null, input.wall_clock_seconds ?? 0, input.input_tokens ?? 0, input.output_tokens ?? 0, input.cost_usd ?? 0, input.review_cycles ?? 0, input.dispatches ?? 0);
|
|
8386
8421
|
}
|
|
8387
8422
|
/**
|
|
8423
|
+
* Get all story metrics for a given run.
|
|
8424
|
+
*/
|
|
8425
|
+
function getStoryMetricsForRun(db, runId) {
|
|
8426
|
+
return db.prepare("SELECT * FROM story_metrics WHERE run_id = ? ORDER BY id ASC").all(runId);
|
|
8427
|
+
}
|
|
8428
|
+
/**
|
|
8388
8429
|
* Compare two runs and return percentage deltas for key numeric fields.
|
|
8389
8430
|
* Positive deltas mean run B is larger/longer than run A.
|
|
8390
8431
|
* Returns null if either run does not exist.
|
|
@@ -8432,10 +8473,31 @@ function aggregateTokenUsageForRun(db, runId) {
|
|
|
8432
8473
|
cost: 0
|
|
8433
8474
|
};
|
|
8434
8475
|
}
|
|
8476
|
+
/**
|
|
8477
|
+
* Aggregate token usage for a specific story within a pipeline run.
|
|
8478
|
+
* Matches rows where the metadata JSON contains the given storyKey.
|
|
8479
|
+
*/
|
|
8480
|
+
function aggregateTokenUsageForStory(db, runId, storyKey) {
|
|
8481
|
+
const row = db.prepare(`
|
|
8482
|
+
SELECT
|
|
8483
|
+
COALESCE(SUM(input_tokens), 0) as input,
|
|
8484
|
+
COALESCE(SUM(output_tokens), 0) as output,
|
|
8485
|
+
COALESCE(SUM(cost_usd), 0) as cost
|
|
8486
|
+
FROM token_usage
|
|
8487
|
+
WHERE pipeline_run_id = ?
|
|
8488
|
+
AND metadata IS NOT NULL
|
|
8489
|
+
AND json_extract(metadata, '$.storyKey') = ?
|
|
8490
|
+
`).get(runId, storyKey);
|
|
8491
|
+
return row ?? {
|
|
8492
|
+
input: 0,
|
|
8493
|
+
output: 0,
|
|
8494
|
+
cost: 0
|
|
8495
|
+
};
|
|
8496
|
+
}
|
|
8435
8497
|
|
|
8436
8498
|
//#endregion
|
|
8437
8499
|
//#region src/modules/compiled-workflows/prompt-assembler.ts
|
|
8438
|
-
const logger$
|
|
8500
|
+
const logger$14 = createLogger("compiled-workflows:prompt-assembler");
|
|
8439
8501
|
/**
|
|
8440
8502
|
* Assemble a final prompt from a template and sections map.
|
|
8441
8503
|
*
|
|
@@ -8460,7 +8522,7 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
|
|
|
8460
8522
|
tokenCount,
|
|
8461
8523
|
truncated: false
|
|
8462
8524
|
};
|
|
8463
|
-
logger$
|
|
8525
|
+
logger$14.warn({
|
|
8464
8526
|
tokenCount,
|
|
8465
8527
|
ceiling: tokenCeiling
|
|
8466
8528
|
}, "Prompt exceeds token ceiling — truncating optional sections");
|
|
@@ -8476,10 +8538,10 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
|
|
|
8476
8538
|
const targetSectionTokens = Math.max(0, currentSectionTokens - overBy);
|
|
8477
8539
|
if (targetSectionTokens === 0) {
|
|
8478
8540
|
contentMap[section.name] = "";
|
|
8479
|
-
logger$
|
|
8541
|
+
logger$14.warn({ sectionName: section.name }, "Section eliminated to fit token budget");
|
|
8480
8542
|
} else {
|
|
8481
8543
|
contentMap[section.name] = truncateToTokens(section.content, targetSectionTokens);
|
|
8482
|
-
logger$
|
|
8544
|
+
logger$14.warn({
|
|
8483
8545
|
sectionName: section.name,
|
|
8484
8546
|
targetSectionTokens
|
|
8485
8547
|
}, "Section truncated to fit token budget");
|
|
@@ -8490,7 +8552,7 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
|
|
|
8490
8552
|
}
|
|
8491
8553
|
if (tokenCount <= tokenCeiling) break;
|
|
8492
8554
|
}
|
|
8493
|
-
if (tokenCount > tokenCeiling) logger$
|
|
8555
|
+
if (tokenCount > tokenCeiling) logger$14.warn({
|
|
8494
8556
|
tokenCount,
|
|
8495
8557
|
ceiling: tokenCeiling
|
|
8496
8558
|
}, "Required sections alone exceed token ceiling — returning over-budget prompt");
|
|
@@ -8631,7 +8693,7 @@ const CodeReviewResultSchema = z.object({
|
|
|
8631
8693
|
|
|
8632
8694
|
//#endregion
|
|
8633
8695
|
//#region src/modules/compiled-workflows/create-story.ts
|
|
8634
|
-
const logger$
|
|
8696
|
+
const logger$13 = createLogger("compiled-workflows:create-story");
|
|
8635
8697
|
/**
|
|
8636
8698
|
* Hard ceiling for the assembled create-story prompt.
|
|
8637
8699
|
*/
|
|
@@ -8655,7 +8717,7 @@ const TOKEN_CEILING$2 = 3e3;
|
|
|
8655
8717
|
*/
|
|
8656
8718
|
async function runCreateStory(deps, params) {
|
|
8657
8719
|
const { epicId, storyKey, pipelineRunId } = params;
|
|
8658
|
-
logger$
|
|
8720
|
+
logger$13.debug({
|
|
8659
8721
|
epicId,
|
|
8660
8722
|
storyKey,
|
|
8661
8723
|
pipelineRunId
|
|
@@ -8665,7 +8727,7 @@ async function runCreateStory(deps, params) {
|
|
|
8665
8727
|
template = await deps.pack.getPrompt("create-story");
|
|
8666
8728
|
} catch (err) {
|
|
8667
8729
|
const error = err instanceof Error ? err.message : String(err);
|
|
8668
|
-
logger$
|
|
8730
|
+
logger$13.error({ error }, "Failed to retrieve create-story prompt template");
|
|
8669
8731
|
return {
|
|
8670
8732
|
result: "failed",
|
|
8671
8733
|
error: `Failed to retrieve prompt template: ${error}`,
|
|
@@ -8707,7 +8769,7 @@ async function runCreateStory(deps, params) {
|
|
|
8707
8769
|
priority: "important"
|
|
8708
8770
|
}
|
|
8709
8771
|
], TOKEN_CEILING$2);
|
|
8710
|
-
logger$
|
|
8772
|
+
logger$13.debug({
|
|
8711
8773
|
tokenCount,
|
|
8712
8774
|
truncated,
|
|
8713
8775
|
tokenCeiling: TOKEN_CEILING$2
|
|
@@ -8724,7 +8786,7 @@ async function runCreateStory(deps, params) {
|
|
|
8724
8786
|
dispatchResult = await handle.result;
|
|
8725
8787
|
} catch (err) {
|
|
8726
8788
|
const error = err instanceof Error ? err.message : String(err);
|
|
8727
|
-
logger$
|
|
8789
|
+
logger$13.error({
|
|
8728
8790
|
epicId,
|
|
8729
8791
|
storyKey,
|
|
8730
8792
|
error
|
|
@@ -8745,7 +8807,7 @@ async function runCreateStory(deps, params) {
|
|
|
8745
8807
|
if (dispatchResult.status === "failed") {
|
|
8746
8808
|
const errorMsg = dispatchResult.parseError ?? `Dispatch failed with exit code ${dispatchResult.exitCode}`;
|
|
8747
8809
|
const stderrDetail = dispatchResult.output ? ` Output: ${dispatchResult.output}` : "";
|
|
8748
|
-
logger$
|
|
8810
|
+
logger$13.warn({
|
|
8749
8811
|
epicId,
|
|
8750
8812
|
storyKey,
|
|
8751
8813
|
exitCode: dispatchResult.exitCode
|
|
@@ -8757,7 +8819,7 @@ async function runCreateStory(deps, params) {
|
|
|
8757
8819
|
};
|
|
8758
8820
|
}
|
|
8759
8821
|
if (dispatchResult.status === "timeout") {
|
|
8760
|
-
logger$
|
|
8822
|
+
logger$13.warn({
|
|
8761
8823
|
epicId,
|
|
8762
8824
|
storyKey
|
|
8763
8825
|
}, "Create-story dispatch timed out");
|
|
@@ -8770,7 +8832,7 @@ async function runCreateStory(deps, params) {
|
|
|
8770
8832
|
if (dispatchResult.parsed === null) {
|
|
8771
8833
|
const details = dispatchResult.parseError ?? "No YAML block found in output";
|
|
8772
8834
|
const rawSnippet = dispatchResult.output ? dispatchResult.output.slice(0, 1e3) : "(empty)";
|
|
8773
|
-
logger$
|
|
8835
|
+
logger$13.warn({
|
|
8774
8836
|
epicId,
|
|
8775
8837
|
storyKey,
|
|
8776
8838
|
details,
|
|
@@ -8786,7 +8848,7 @@ async function runCreateStory(deps, params) {
|
|
|
8786
8848
|
const parseResult = CreateStoryResultSchema.safeParse(dispatchResult.parsed);
|
|
8787
8849
|
if (!parseResult.success) {
|
|
8788
8850
|
const details = parseResult.error.message;
|
|
8789
|
-
logger$
|
|
8851
|
+
logger$13.warn({
|
|
8790
8852
|
epicId,
|
|
8791
8853
|
storyKey,
|
|
8792
8854
|
details
|
|
@@ -8799,7 +8861,7 @@ async function runCreateStory(deps, params) {
|
|
|
8799
8861
|
};
|
|
8800
8862
|
}
|
|
8801
8863
|
const parsed = parseResult.data;
|
|
8802
|
-
logger$
|
|
8864
|
+
logger$13.info({
|
|
8803
8865
|
epicId,
|
|
8804
8866
|
storyKey,
|
|
8805
8867
|
storyFile: parsed.story_file,
|
|
@@ -8821,7 +8883,7 @@ function getImplementationDecisions(deps) {
|
|
|
8821
8883
|
try {
|
|
8822
8884
|
return getDecisionsByPhase(deps.db, "implementation");
|
|
8823
8885
|
} catch (err) {
|
|
8824
|
-
logger$
|
|
8886
|
+
logger$13.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve implementation decisions");
|
|
8825
8887
|
return [];
|
|
8826
8888
|
}
|
|
8827
8889
|
}
|
|
@@ -8837,13 +8899,13 @@ function getEpicShard(decisions, epicId, projectRoot) {
|
|
|
8837
8899
|
if (projectRoot) {
|
|
8838
8900
|
const fallback = readEpicShardFromFile(projectRoot, epicId);
|
|
8839
8901
|
if (fallback) {
|
|
8840
|
-
logger$
|
|
8902
|
+
logger$13.info({ epicId }, "Using file-based fallback for epic shard (decisions table empty)");
|
|
8841
8903
|
return fallback;
|
|
8842
8904
|
}
|
|
8843
8905
|
}
|
|
8844
8906
|
return "";
|
|
8845
8907
|
} catch (err) {
|
|
8846
|
-
logger$
|
|
8908
|
+
logger$13.warn({
|
|
8847
8909
|
epicId,
|
|
8848
8910
|
error: err instanceof Error ? err.message : String(err)
|
|
8849
8911
|
}, "Failed to retrieve epic shard");
|
|
@@ -8860,7 +8922,7 @@ function getPrevDevNotes(decisions, epicId) {
|
|
|
8860
8922
|
if (devNotes.length === 0) return "";
|
|
8861
8923
|
return devNotes[devNotes.length - 1].value;
|
|
8862
8924
|
} catch (err) {
|
|
8863
|
-
logger$
|
|
8925
|
+
logger$13.warn({
|
|
8864
8926
|
epicId,
|
|
8865
8927
|
error: err instanceof Error ? err.message : String(err)
|
|
8866
8928
|
}, "Failed to retrieve prev dev notes");
|
|
@@ -8880,13 +8942,13 @@ function getArchConstraints$1(deps) {
|
|
|
8880
8942
|
if (deps.projectRoot) {
|
|
8881
8943
|
const fallback = readArchConstraintsFromFile(deps.projectRoot);
|
|
8882
8944
|
if (fallback) {
|
|
8883
|
-
logger$
|
|
8945
|
+
logger$13.info("Using file-based fallback for architecture constraints (decisions table empty)");
|
|
8884
8946
|
return fallback;
|
|
8885
8947
|
}
|
|
8886
8948
|
}
|
|
8887
8949
|
return "";
|
|
8888
8950
|
} catch (err) {
|
|
8889
|
-
logger$
|
|
8951
|
+
logger$13.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
|
|
8890
8952
|
return "";
|
|
8891
8953
|
}
|
|
8892
8954
|
}
|
|
@@ -8906,7 +8968,7 @@ function readEpicShardFromFile(projectRoot, epicId) {
|
|
|
8906
8968
|
const match = pattern.exec(content);
|
|
8907
8969
|
return match ? match[0].trim() : "";
|
|
8908
8970
|
} catch (err) {
|
|
8909
|
-
logger$
|
|
8971
|
+
logger$13.warn({
|
|
8910
8972
|
epicId,
|
|
8911
8973
|
error: err instanceof Error ? err.message : String(err)
|
|
8912
8974
|
}, "File-based epic shard fallback failed");
|
|
@@ -8929,7 +8991,7 @@ function readArchConstraintsFromFile(projectRoot) {
|
|
|
8929
8991
|
const content = readFileSync$1(archPath, "utf-8");
|
|
8930
8992
|
return content.slice(0, 1500);
|
|
8931
8993
|
} catch (err) {
|
|
8932
|
-
logger$
|
|
8994
|
+
logger$13.warn({ error: err instanceof Error ? err.message : String(err) }, "File-based architecture fallback failed");
|
|
8933
8995
|
return "";
|
|
8934
8996
|
}
|
|
8935
8997
|
}
|
|
@@ -8942,14 +9004,14 @@ async function getStoryTemplate(deps) {
|
|
|
8942
9004
|
try {
|
|
8943
9005
|
return await deps.pack.getTemplate("story");
|
|
8944
9006
|
} catch (err) {
|
|
8945
|
-
logger$
|
|
9007
|
+
logger$13.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve story template from pack");
|
|
8946
9008
|
return "";
|
|
8947
9009
|
}
|
|
8948
9010
|
}
|
|
8949
9011
|
|
|
8950
9012
|
//#endregion
|
|
8951
9013
|
//#region src/modules/compiled-workflows/git-helpers.ts
|
|
8952
|
-
const logger$
|
|
9014
|
+
const logger$12 = createLogger("compiled-workflows:git-helpers");
|
|
8953
9015
|
/**
|
|
8954
9016
|
* Capture the full git diff for HEAD (working tree vs current commit).
|
|
8955
9017
|
*
|
|
@@ -9073,7 +9135,7 @@ async function runGitCommand(args, cwd, logLabel) {
|
|
|
9073
9135
|
stderr += chunk.toString("utf-8");
|
|
9074
9136
|
});
|
|
9075
9137
|
proc.on("error", (err) => {
|
|
9076
|
-
logger$
|
|
9138
|
+
logger$12.warn({
|
|
9077
9139
|
label: logLabel,
|
|
9078
9140
|
cwd,
|
|
9079
9141
|
error: err.message
|
|
@@ -9082,7 +9144,7 @@ async function runGitCommand(args, cwd, logLabel) {
|
|
|
9082
9144
|
});
|
|
9083
9145
|
proc.on("close", (code) => {
|
|
9084
9146
|
if (code !== 0) {
|
|
9085
|
-
logger$
|
|
9147
|
+
logger$12.warn({
|
|
9086
9148
|
label: logLabel,
|
|
9087
9149
|
cwd,
|
|
9088
9150
|
code,
|
|
@@ -9098,7 +9160,7 @@ async function runGitCommand(args, cwd, logLabel) {
|
|
|
9098
9160
|
|
|
9099
9161
|
//#endregion
|
|
9100
9162
|
//#region src/modules/compiled-workflows/dev-story.ts
|
|
9101
|
-
const logger$
|
|
9163
|
+
const logger$11 = createLogger("compiled-workflows:dev-story");
|
|
9102
9164
|
/** Hard token ceiling for the assembled dev-story prompt */
|
|
9103
9165
|
const TOKEN_CEILING$1 = 24e3;
|
|
9104
9166
|
/** Default timeout for dev-story dispatches in milliseconds (30 min) */
|
|
@@ -9120,7 +9182,7 @@ const DEFAULT_VITEST_PATTERNS = `## Test Patterns (defaults)
|
|
|
9120
9182
|
*/
|
|
9121
9183
|
async function runDevStory(deps, params) {
|
|
9122
9184
|
const { storyKey, storyFilePath, taskScope, priorFiles } = params;
|
|
9123
|
-
logger$
|
|
9185
|
+
logger$11.info({
|
|
9124
9186
|
storyKey,
|
|
9125
9187
|
storyFilePath
|
|
9126
9188
|
}, "Starting compiled dev-story workflow");
|
|
@@ -9162,10 +9224,10 @@ async function runDevStory(deps, params) {
|
|
|
9162
9224
|
let template;
|
|
9163
9225
|
try {
|
|
9164
9226
|
template = await deps.pack.getPrompt("dev-story");
|
|
9165
|
-
logger$
|
|
9227
|
+
logger$11.debug({ storyKey }, "Retrieved dev-story prompt template from pack");
|
|
9166
9228
|
} catch (err) {
|
|
9167
9229
|
const error = err instanceof Error ? err.message : String(err);
|
|
9168
|
-
logger$
|
|
9230
|
+
logger$11.error({
|
|
9169
9231
|
storyKey,
|
|
9170
9232
|
error
|
|
9171
9233
|
}, "Failed to retrieve dev-story prompt template");
|
|
@@ -9176,14 +9238,14 @@ async function runDevStory(deps, params) {
|
|
|
9176
9238
|
storyContent = await readFile$2(storyFilePath, "utf-8");
|
|
9177
9239
|
} catch (err) {
|
|
9178
9240
|
if (err.code === "ENOENT") {
|
|
9179
|
-
logger$
|
|
9241
|
+
logger$11.error({
|
|
9180
9242
|
storyKey,
|
|
9181
9243
|
storyFilePath
|
|
9182
9244
|
}, "Story file not found");
|
|
9183
9245
|
return makeFailureResult("story_file_not_found");
|
|
9184
9246
|
}
|
|
9185
9247
|
const error = err instanceof Error ? err.message : String(err);
|
|
9186
|
-
logger$
|
|
9248
|
+
logger$11.error({
|
|
9187
9249
|
storyKey,
|
|
9188
9250
|
storyFilePath,
|
|
9189
9251
|
error
|
|
@@ -9191,7 +9253,7 @@ async function runDevStory(deps, params) {
|
|
|
9191
9253
|
return makeFailureResult(`story_file_read_error: ${error}`);
|
|
9192
9254
|
}
|
|
9193
9255
|
if (storyContent.trim().length === 0) {
|
|
9194
|
-
logger$
|
|
9256
|
+
logger$11.error({
|
|
9195
9257
|
storyKey,
|
|
9196
9258
|
storyFilePath
|
|
9197
9259
|
}, "Story file is empty");
|
|
@@ -9203,17 +9265,17 @@ async function runDevStory(deps, params) {
|
|
|
9203
9265
|
const testPatternDecisions = solutioningDecisions.filter((d) => d.category === "test-patterns");
|
|
9204
9266
|
if (testPatternDecisions.length > 0) {
|
|
9205
9267
|
testPatternsContent = "## Test Patterns\n" + testPatternDecisions.map((d) => `- ${d.key}: ${d.value}`).join("\n");
|
|
9206
|
-
logger$
|
|
9268
|
+
logger$11.debug({
|
|
9207
9269
|
storyKey,
|
|
9208
9270
|
count: testPatternDecisions.length
|
|
9209
9271
|
}, "Loaded test patterns from decision store");
|
|
9210
9272
|
} else {
|
|
9211
9273
|
testPatternsContent = DEFAULT_VITEST_PATTERNS;
|
|
9212
|
-
logger$
|
|
9274
|
+
logger$11.debug({ storyKey }, "No test-pattern decisions found — using default Vitest patterns");
|
|
9213
9275
|
}
|
|
9214
9276
|
} catch (err) {
|
|
9215
9277
|
const error = err instanceof Error ? err.message : String(err);
|
|
9216
|
-
logger$
|
|
9278
|
+
logger$11.warn({
|
|
9217
9279
|
storyKey,
|
|
9218
9280
|
error
|
|
9219
9281
|
}, "Failed to load test patterns — using defaults");
|
|
@@ -9256,7 +9318,7 @@ async function runDevStory(deps, params) {
|
|
|
9256
9318
|
}
|
|
9257
9319
|
];
|
|
9258
9320
|
const { prompt, tokenCount, truncated } = assemblePrompt(template, sections, TOKEN_CEILING$1);
|
|
9259
|
-
logger$
|
|
9321
|
+
logger$11.info({
|
|
9260
9322
|
storyKey,
|
|
9261
9323
|
tokenCount,
|
|
9262
9324
|
ceiling: TOKEN_CEILING$1,
|
|
@@ -9275,7 +9337,7 @@ async function runDevStory(deps, params) {
|
|
|
9275
9337
|
dispatchResult = await handle.result;
|
|
9276
9338
|
} catch (err) {
|
|
9277
9339
|
const error = err instanceof Error ? err.message : String(err);
|
|
9278
|
-
logger$
|
|
9340
|
+
logger$11.error({
|
|
9279
9341
|
storyKey,
|
|
9280
9342
|
error
|
|
9281
9343
|
}, "Dispatch threw an unexpected error");
|
|
@@ -9286,11 +9348,11 @@ async function runDevStory(deps, params) {
|
|
|
9286
9348
|
output: dispatchResult.tokenEstimate.output
|
|
9287
9349
|
};
|
|
9288
9350
|
if (dispatchResult.status === "timeout") {
|
|
9289
|
-
logger$
|
|
9351
|
+
logger$11.error({
|
|
9290
9352
|
storyKey,
|
|
9291
9353
|
durationMs: dispatchResult.durationMs
|
|
9292
9354
|
}, "Dev-story dispatch timed out");
|
|
9293
|
-
if (dispatchResult.output.length > 0) logger$
|
|
9355
|
+
if (dispatchResult.output.length > 0) logger$11.info({
|
|
9294
9356
|
storyKey,
|
|
9295
9357
|
partialOutput: dispatchResult.output.slice(0, 500)
|
|
9296
9358
|
}, "Partial output before timeout");
|
|
@@ -9300,12 +9362,12 @@ async function runDevStory(deps, params) {
|
|
|
9300
9362
|
};
|
|
9301
9363
|
}
|
|
9302
9364
|
if (dispatchResult.status === "failed" || dispatchResult.exitCode !== 0) {
|
|
9303
|
-
logger$
|
|
9365
|
+
logger$11.error({
|
|
9304
9366
|
storyKey,
|
|
9305
9367
|
exitCode: dispatchResult.exitCode,
|
|
9306
9368
|
status: dispatchResult.status
|
|
9307
9369
|
}, "Dev-story dispatch failed");
|
|
9308
|
-
if (dispatchResult.output.length > 0) logger$
|
|
9370
|
+
if (dispatchResult.output.length > 0) logger$11.info({
|
|
9309
9371
|
storyKey,
|
|
9310
9372
|
partialOutput: dispatchResult.output.slice(0, 500)
|
|
9311
9373
|
}, "Partial output from failed dispatch");
|
|
@@ -9317,7 +9379,7 @@ async function runDevStory(deps, params) {
|
|
|
9317
9379
|
if (dispatchResult.parseError !== null || dispatchResult.parsed === null) {
|
|
9318
9380
|
const details = dispatchResult.parseError ?? "parsed result was null";
|
|
9319
9381
|
const rawSnippet = dispatchResult.output ? dispatchResult.output.slice(0, 1e3) : "(empty)";
|
|
9320
|
-
logger$
|
|
9382
|
+
logger$11.error({
|
|
9321
9383
|
storyKey,
|
|
9322
9384
|
parseError: details,
|
|
9323
9385
|
rawOutputSnippet: rawSnippet
|
|
@@ -9325,12 +9387,12 @@ async function runDevStory(deps, params) {
|
|
|
9325
9387
|
let filesModified = [];
|
|
9326
9388
|
try {
|
|
9327
9389
|
filesModified = await getGitChangedFiles(deps.projectRoot ?? process.cwd());
|
|
9328
|
-
if (filesModified.length > 0) logger$
|
|
9390
|
+
if (filesModified.length > 0) logger$11.info({
|
|
9329
9391
|
storyKey,
|
|
9330
9392
|
fileCount: filesModified.length
|
|
9331
9393
|
}, "Recovered files_modified from git status (YAML fallback)");
|
|
9332
9394
|
} catch (err) {
|
|
9333
|
-
logger$
|
|
9395
|
+
logger$11.warn({
|
|
9334
9396
|
storyKey,
|
|
9335
9397
|
error: err instanceof Error ? err.message : String(err)
|
|
9336
9398
|
}, "Failed to recover files_modified from git");
|
|
@@ -9347,7 +9409,7 @@ async function runDevStory(deps, params) {
|
|
|
9347
9409
|
};
|
|
9348
9410
|
}
|
|
9349
9411
|
const parsed = dispatchResult.parsed;
|
|
9350
|
-
logger$
|
|
9412
|
+
logger$11.info({
|
|
9351
9413
|
storyKey,
|
|
9352
9414
|
result: parsed.result,
|
|
9353
9415
|
acMet: parsed.ac_met.length
|
|
@@ -9486,7 +9548,7 @@ function extractFilesInScope(storyContent) {
|
|
|
9486
9548
|
|
|
9487
9549
|
//#endregion
|
|
9488
9550
|
//#region src/modules/compiled-workflows/code-review.ts
|
|
9489
|
-
const logger$
|
|
9551
|
+
const logger$10 = createLogger("compiled-workflows:code-review");
|
|
9490
9552
|
/**
|
|
9491
9553
|
* Hard token ceiling for the assembled code-review prompt (50,000 tokens).
|
|
9492
9554
|
* Quality reviews require seeing actual code diffs, not just file names.
|
|
@@ -9526,7 +9588,7 @@ function defaultFailResult(error, tokenUsage) {
|
|
|
9526
9588
|
async function runCodeReview(deps, params) {
|
|
9527
9589
|
const { storyKey, storyFilePath, workingDirectory, pipelineRunId, filesModified, previousIssues } = params;
|
|
9528
9590
|
const cwd = workingDirectory ?? process.cwd();
|
|
9529
|
-
logger$
|
|
9591
|
+
logger$10.debug({
|
|
9530
9592
|
storyKey,
|
|
9531
9593
|
storyFilePath,
|
|
9532
9594
|
cwd,
|
|
@@ -9537,7 +9599,7 @@ async function runCodeReview(deps, params) {
|
|
|
9537
9599
|
template = await deps.pack.getPrompt("code-review");
|
|
9538
9600
|
} catch (err) {
|
|
9539
9601
|
const error = err instanceof Error ? err.message : String(err);
|
|
9540
|
-
logger$
|
|
9602
|
+
logger$10.error({ error }, "Failed to retrieve code-review prompt template");
|
|
9541
9603
|
return defaultFailResult(`Failed to retrieve prompt template: ${error}`, {
|
|
9542
9604
|
input: 0,
|
|
9543
9605
|
output: 0
|
|
@@ -9548,7 +9610,7 @@ async function runCodeReview(deps, params) {
|
|
|
9548
9610
|
storyContent = await readFile$2(storyFilePath, "utf-8");
|
|
9549
9611
|
} catch (err) {
|
|
9550
9612
|
const error = err instanceof Error ? err.message : String(err);
|
|
9551
|
-
logger$
|
|
9613
|
+
logger$10.error({
|
|
9552
9614
|
storyFilePath,
|
|
9553
9615
|
error
|
|
9554
9616
|
}, "Failed to read story file");
|
|
@@ -9568,12 +9630,12 @@ async function runCodeReview(deps, params) {
|
|
|
9568
9630
|
const scopedTotal = nonDiffTokens + countTokens(scopedDiff);
|
|
9569
9631
|
if (scopedTotal <= TOKEN_CEILING) {
|
|
9570
9632
|
gitDiffContent = scopedDiff;
|
|
9571
|
-
logger$
|
|
9633
|
+
logger$10.debug({
|
|
9572
9634
|
fileCount: filesModified.length,
|
|
9573
9635
|
tokenCount: scopedTotal
|
|
9574
9636
|
}, "Using scoped file diff");
|
|
9575
9637
|
} else {
|
|
9576
|
-
logger$
|
|
9638
|
+
logger$10.warn({
|
|
9577
9639
|
estimatedTotal: scopedTotal,
|
|
9578
9640
|
ceiling: TOKEN_CEILING,
|
|
9579
9641
|
fileCount: filesModified.length
|
|
@@ -9587,7 +9649,7 @@ async function runCodeReview(deps, params) {
|
|
|
9587
9649
|
const fullTotal = nonDiffTokens + countTokens(fullDiff);
|
|
9588
9650
|
if (fullTotal <= TOKEN_CEILING) gitDiffContent = fullDiff;
|
|
9589
9651
|
else {
|
|
9590
|
-
logger$
|
|
9652
|
+
logger$10.warn({
|
|
9591
9653
|
estimatedTotal: fullTotal,
|
|
9592
9654
|
ceiling: TOKEN_CEILING
|
|
9593
9655
|
}, "Full git diff would exceed token ceiling — using stat-only summary");
|
|
@@ -9625,11 +9687,11 @@ async function runCodeReview(deps, params) {
|
|
|
9625
9687
|
}
|
|
9626
9688
|
];
|
|
9627
9689
|
const assembleResult = assemblePrompt(template, sections, TOKEN_CEILING);
|
|
9628
|
-
if (assembleResult.truncated) logger$
|
|
9690
|
+
if (assembleResult.truncated) logger$10.warn({
|
|
9629
9691
|
storyKey,
|
|
9630
9692
|
tokenCount: assembleResult.tokenCount
|
|
9631
9693
|
}, "Code-review prompt truncated to fit token ceiling");
|
|
9632
|
-
logger$
|
|
9694
|
+
logger$10.debug({
|
|
9633
9695
|
storyKey,
|
|
9634
9696
|
tokenCount: assembleResult.tokenCount,
|
|
9635
9697
|
truncated: assembleResult.truncated
|
|
@@ -9647,7 +9709,7 @@ async function runCodeReview(deps, params) {
|
|
|
9647
9709
|
dispatchResult = await handle.result;
|
|
9648
9710
|
} catch (err) {
|
|
9649
9711
|
const error = err instanceof Error ? err.message : String(err);
|
|
9650
|
-
logger$
|
|
9712
|
+
logger$10.error({
|
|
9651
9713
|
storyKey,
|
|
9652
9714
|
error
|
|
9653
9715
|
}, "Code-review dispatch threw unexpected error");
|
|
@@ -9663,7 +9725,7 @@ async function runCodeReview(deps, params) {
|
|
|
9663
9725
|
const rawOutput = dispatchResult.output ?? void 0;
|
|
9664
9726
|
if (dispatchResult.status === "failed") {
|
|
9665
9727
|
const errorMsg = `Dispatch status: failed. Exit code: ${dispatchResult.exitCode}. ${dispatchResult.parseError ?? ""} ${dispatchResult.output ? `Stderr: ${dispatchResult.output}` : ""}`.trim();
|
|
9666
|
-
logger$
|
|
9728
|
+
logger$10.warn({
|
|
9667
9729
|
storyKey,
|
|
9668
9730
|
exitCode: dispatchResult.exitCode
|
|
9669
9731
|
}, "Code-review dispatch failed");
|
|
@@ -9673,7 +9735,7 @@ async function runCodeReview(deps, params) {
|
|
|
9673
9735
|
};
|
|
9674
9736
|
}
|
|
9675
9737
|
if (dispatchResult.status === "timeout") {
|
|
9676
|
-
logger$
|
|
9738
|
+
logger$10.warn({ storyKey }, "Code-review dispatch timed out");
|
|
9677
9739
|
return {
|
|
9678
9740
|
...defaultFailResult("Dispatch status: timeout. The agent did not complete within the allowed time.", tokenUsage),
|
|
9679
9741
|
rawOutput
|
|
@@ -9681,7 +9743,7 @@ async function runCodeReview(deps, params) {
|
|
|
9681
9743
|
}
|
|
9682
9744
|
if (dispatchResult.parsed === null) {
|
|
9683
9745
|
const details = dispatchResult.parseError ?? "No YAML block found in output";
|
|
9684
|
-
logger$
|
|
9746
|
+
logger$10.warn({
|
|
9685
9747
|
storyKey,
|
|
9686
9748
|
details
|
|
9687
9749
|
}, "Code-review output schema validation failed");
|
|
@@ -9698,7 +9760,7 @@ async function runCodeReview(deps, params) {
|
|
|
9698
9760
|
const parseResult = CodeReviewResultSchema.safeParse(dispatchResult.parsed);
|
|
9699
9761
|
if (!parseResult.success) {
|
|
9700
9762
|
const details = parseResult.error.message;
|
|
9701
|
-
logger$
|
|
9763
|
+
logger$10.warn({
|
|
9702
9764
|
storyKey,
|
|
9703
9765
|
details
|
|
9704
9766
|
}, "Code-review output failed schema validation");
|
|
@@ -9713,13 +9775,13 @@ async function runCodeReview(deps, params) {
|
|
|
9713
9775
|
};
|
|
9714
9776
|
}
|
|
9715
9777
|
const parsed = parseResult.data;
|
|
9716
|
-
if (parsed.agentVerdict !== parsed.verdict) logger$
|
|
9778
|
+
if (parsed.agentVerdict !== parsed.verdict) logger$10.info({
|
|
9717
9779
|
storyKey,
|
|
9718
9780
|
agentVerdict: parsed.agentVerdict,
|
|
9719
9781
|
pipelineVerdict: parsed.verdict,
|
|
9720
9782
|
issues: parsed.issues
|
|
9721
9783
|
}, "Pipeline overrode agent verdict based on issue severities");
|
|
9722
|
-
logger$
|
|
9784
|
+
logger$10.info({
|
|
9723
9785
|
storyKey,
|
|
9724
9786
|
verdict: parsed.verdict,
|
|
9725
9787
|
issues: parsed.issues
|
|
@@ -9744,7 +9806,7 @@ function getArchConstraints(deps) {
|
|
|
9744
9806
|
if (constraints.length === 0) return "";
|
|
9745
9807
|
return constraints.map((d) => `${d.key}: ${d.value}`).join("\n");
|
|
9746
9808
|
} catch (err) {
|
|
9747
|
-
logger$
|
|
9809
|
+
logger$10.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
|
|
9748
9810
|
return "";
|
|
9749
9811
|
}
|
|
9750
9812
|
}
|
|
@@ -10076,7 +10138,7 @@ function detectConflictGroups(storyKeys, config) {
|
|
|
10076
10138
|
|
|
10077
10139
|
//#endregion
|
|
10078
10140
|
//#region src/modules/implementation-orchestrator/seed-methodology-context.ts
|
|
10079
|
-
const logger$
|
|
10141
|
+
const logger$9 = createLogger("implementation-orchestrator:seed");
|
|
10080
10142
|
/** Max chars for the architecture summary seeded into decisions */
|
|
10081
10143
|
const MAX_ARCH_CHARS = 6e3;
|
|
10082
10144
|
/** Max chars per epic shard */
|
|
@@ -10110,12 +10172,12 @@ function seedMethodologyContext(db, projectRoot) {
|
|
|
10110
10172
|
const testCount = seedTestPatterns(db, projectRoot);
|
|
10111
10173
|
if (testCount === -1) result.skippedCategories.push("test-patterns");
|
|
10112
10174
|
else result.decisionsCreated += testCount;
|
|
10113
|
-
logger$
|
|
10175
|
+
logger$9.info({
|
|
10114
10176
|
decisionsCreated: result.decisionsCreated,
|
|
10115
10177
|
skippedCategories: result.skippedCategories
|
|
10116
10178
|
}, "Methodology context seeding complete");
|
|
10117
10179
|
} catch (err) {
|
|
10118
|
-
logger$
|
|
10180
|
+
logger$9.warn({ error: err instanceof Error ? err.message : String(err) }, "Methodology context seeding failed (non-fatal)");
|
|
10119
10181
|
}
|
|
10120
10182
|
return result;
|
|
10121
10183
|
}
|
|
@@ -10159,7 +10221,7 @@ function seedArchitecture(db, projectRoot) {
|
|
|
10159
10221
|
});
|
|
10160
10222
|
count = 1;
|
|
10161
10223
|
}
|
|
10162
|
-
logger$
|
|
10224
|
+
logger$9.debug({ count }, "Seeded architecture decisions");
|
|
10163
10225
|
return count;
|
|
10164
10226
|
}
|
|
10165
10227
|
/**
|
|
@@ -10187,7 +10249,7 @@ function seedEpicShards(db, projectRoot) {
|
|
|
10187
10249
|
});
|
|
10188
10250
|
count++;
|
|
10189
10251
|
}
|
|
10190
|
-
logger$
|
|
10252
|
+
logger$9.debug({ count }, "Seeded epic shard decisions");
|
|
10191
10253
|
return count;
|
|
10192
10254
|
}
|
|
10193
10255
|
/**
|
|
@@ -10208,7 +10270,7 @@ function seedTestPatterns(db, projectRoot) {
|
|
|
10208
10270
|
value: patterns.slice(0, MAX_TEST_PATTERNS_CHARS),
|
|
10209
10271
|
rationale: "Detected from project configuration at orchestrator startup"
|
|
10210
10272
|
});
|
|
10211
|
-
logger$
|
|
10273
|
+
logger$9.debug("Seeded test patterns decision");
|
|
10212
10274
|
return 1;
|
|
10213
10275
|
}
|
|
10214
10276
|
/**
|
|
@@ -10400,7 +10462,7 @@ function createPauseGate() {
|
|
|
10400
10462
|
*/
|
|
10401
10463
|
function createImplementationOrchestrator(deps) {
|
|
10402
10464
|
const { db, pack, contextCompiler, dispatcher, eventBus, config, projectRoot } = deps;
|
|
10403
|
-
const logger$
|
|
10465
|
+
const logger$36 = createLogger("implementation-orchestrator");
|
|
10404
10466
|
let _state = "IDLE";
|
|
10405
10467
|
let _startedAt;
|
|
10406
10468
|
let _completedAt;
|
|
@@ -10444,6 +10506,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10444
10506
|
const startedAt = storyState?.startedAt;
|
|
10445
10507
|
const completedAt = storyState?.completedAt ?? new Date().toISOString();
|
|
10446
10508
|
const wallClockSeconds = startedAt ? Math.round((new Date(completedAt).getTime() - new Date(startedAt).getTime()) / 1e3) : 0;
|
|
10509
|
+
const tokenAgg = aggregateTokenUsageForStory(db, config.pipelineRunId, storyKey);
|
|
10447
10510
|
writeStoryMetrics(db, {
|
|
10448
10511
|
run_id: config.pipelineRunId,
|
|
10449
10512
|
story_key: storyKey,
|
|
@@ -10452,11 +10515,14 @@ function createImplementationOrchestrator(deps) {
|
|
|
10452
10515
|
started_at: startedAt,
|
|
10453
10516
|
completed_at: completedAt,
|
|
10454
10517
|
wall_clock_seconds: wallClockSeconds,
|
|
10518
|
+
input_tokens: tokenAgg.input,
|
|
10519
|
+
output_tokens: tokenAgg.output,
|
|
10520
|
+
cost_usd: tokenAgg.cost,
|
|
10455
10521
|
review_cycles: reviewCycles,
|
|
10456
10522
|
dispatches: _storyDispatches.get(storyKey) ?? 0
|
|
10457
10523
|
});
|
|
10458
10524
|
} catch (err) {
|
|
10459
|
-
logger$
|
|
10525
|
+
logger$36.warn({
|
|
10460
10526
|
err,
|
|
10461
10527
|
storyKey
|
|
10462
10528
|
}, "Failed to write story metrics (best-effort)");
|
|
@@ -10491,7 +10557,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10491
10557
|
token_usage_json: serialized
|
|
10492
10558
|
});
|
|
10493
10559
|
} catch (err) {
|
|
10494
|
-
logger$
|
|
10560
|
+
logger$36.warn("Failed to persist orchestrator state", { err });
|
|
10495
10561
|
}
|
|
10496
10562
|
}
|
|
10497
10563
|
function recordProgress() {
|
|
@@ -10516,7 +10582,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10516
10582
|
const elapsed = Date.now() - _lastProgressTs;
|
|
10517
10583
|
if (elapsed >= WATCHDOG_TIMEOUT_MS) {
|
|
10518
10584
|
for (const [key, s] of _stories) if (s.phase !== "PENDING" && s.phase !== "COMPLETE" && s.phase !== "ESCALATED") {
|
|
10519
|
-
logger$
|
|
10585
|
+
logger$36.warn({
|
|
10520
10586
|
storyKey: key,
|
|
10521
10587
|
phase: s.phase,
|
|
10522
10588
|
elapsedMs: elapsed
|
|
@@ -10552,7 +10618,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10552
10618
|
* exhausted retries the story is ESCALATED.
|
|
10553
10619
|
*/
|
|
10554
10620
|
async function processStory(storyKey) {
|
|
10555
|
-
logger$
|
|
10621
|
+
logger$36.info("Processing story", { storyKey });
|
|
10556
10622
|
await waitIfPaused();
|
|
10557
10623
|
if (_state !== "RUNNING") return;
|
|
10558
10624
|
startPhase(storyKey, "create-story");
|
|
@@ -10567,7 +10633,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10567
10633
|
const match = files.find((f) => f.startsWith(`${storyKey}-`) && f.endsWith(".md"));
|
|
10568
10634
|
if (match) {
|
|
10569
10635
|
storyFilePath = join$1(artifactsDir, match);
|
|
10570
|
-
logger$
|
|
10636
|
+
logger$36.info({
|
|
10571
10637
|
storyKey,
|
|
10572
10638
|
storyFilePath
|
|
10573
10639
|
}, "Found existing story file — skipping create-story");
|
|
@@ -10668,7 +10734,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10668
10734
|
try {
|
|
10669
10735
|
storyContentForAnalysis = await readFile$2(storyFilePath ?? "", "utf-8");
|
|
10670
10736
|
} catch (err) {
|
|
10671
|
-
logger$
|
|
10737
|
+
logger$36.error({
|
|
10672
10738
|
storyKey,
|
|
10673
10739
|
storyFilePath,
|
|
10674
10740
|
error: err instanceof Error ? err.message : String(err)
|
|
@@ -10676,7 +10742,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10676
10742
|
}
|
|
10677
10743
|
const analysis = analyzeStoryComplexity(storyContentForAnalysis);
|
|
10678
10744
|
const batches = planTaskBatches(analysis);
|
|
10679
|
-
logger$
|
|
10745
|
+
logger$36.info({
|
|
10680
10746
|
storyKey,
|
|
10681
10747
|
estimatedScope: analysis.estimatedScope,
|
|
10682
10748
|
batchCount: batches.length,
|
|
@@ -10694,7 +10760,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10694
10760
|
if (_state !== "RUNNING") break;
|
|
10695
10761
|
const taskScope = batch.taskIds.map((id, i) => `T${id}: ${batch.taskTitles[i] ?? ""}`).join("\n");
|
|
10696
10762
|
const priorFiles = allFilesModified.size > 0 ? Array.from(allFilesModified) : void 0;
|
|
10697
|
-
logger$
|
|
10763
|
+
logger$36.info({
|
|
10698
10764
|
storyKey,
|
|
10699
10765
|
batchIndex: batch.batchIndex,
|
|
10700
10766
|
taskCount: batch.taskIds.length
|
|
@@ -10718,7 +10784,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10718
10784
|
});
|
|
10719
10785
|
} catch (batchErr) {
|
|
10720
10786
|
const errMsg = batchErr instanceof Error ? batchErr.message : String(batchErr);
|
|
10721
|
-
logger$
|
|
10787
|
+
logger$36.warn({
|
|
10722
10788
|
storyKey,
|
|
10723
10789
|
batchIndex: batch.batchIndex,
|
|
10724
10790
|
error: errMsg
|
|
@@ -10738,7 +10804,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10738
10804
|
filesModified: batchFilesModified,
|
|
10739
10805
|
result: batchResult.result === "success" ? "success" : "failed"
|
|
10740
10806
|
};
|
|
10741
|
-
logger$
|
|
10807
|
+
logger$36.info(batchMetrics, "Batch dev-story metrics");
|
|
10742
10808
|
for (const f of batchFilesModified) allFilesModified.add(f);
|
|
10743
10809
|
if (batchFilesModified.length > 0) batchFileGroups.push({
|
|
10744
10810
|
batchIndex: batch.batchIndex,
|
|
@@ -10760,13 +10826,13 @@ function createImplementationOrchestrator(deps) {
|
|
|
10760
10826
|
})
|
|
10761
10827
|
});
|
|
10762
10828
|
} catch (tokenErr) {
|
|
10763
|
-
logger$
|
|
10829
|
+
logger$36.warn({
|
|
10764
10830
|
storyKey,
|
|
10765
10831
|
batchIndex: batch.batchIndex,
|
|
10766
10832
|
err: tokenErr
|
|
10767
10833
|
}, "Failed to record batch token usage");
|
|
10768
10834
|
}
|
|
10769
|
-
if (batchResult.result === "failed") logger$
|
|
10835
|
+
if (batchResult.result === "failed") logger$36.warn({
|
|
10770
10836
|
storyKey,
|
|
10771
10837
|
batchIndex: batch.batchIndex,
|
|
10772
10838
|
error: batchResult.error
|
|
@@ -10799,7 +10865,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10799
10865
|
result: devResult
|
|
10800
10866
|
});
|
|
10801
10867
|
persistState();
|
|
10802
|
-
if (devResult.result === "failed") logger$
|
|
10868
|
+
if (devResult.result === "failed") logger$36.warn("Dev-story reported failure, proceeding to code review", {
|
|
10803
10869
|
storyKey,
|
|
10804
10870
|
error: devResult.error,
|
|
10805
10871
|
filesModified: devFilesModified.length
|
|
@@ -10856,7 +10922,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10856
10922
|
"NEEDS_MAJOR_REWORK": 2
|
|
10857
10923
|
};
|
|
10858
10924
|
for (const group of batchFileGroups) {
|
|
10859
|
-
logger$
|
|
10925
|
+
logger$36.info({
|
|
10860
10926
|
storyKey,
|
|
10861
10927
|
batchIndex: group.batchIndex,
|
|
10862
10928
|
fileCount: group.files.length
|
|
@@ -10893,7 +10959,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10893
10959
|
rawOutput: lastRawOutput,
|
|
10894
10960
|
tokenUsage: aggregateTokens
|
|
10895
10961
|
};
|
|
10896
|
-
logger$
|
|
10962
|
+
logger$36.info({
|
|
10897
10963
|
storyKey,
|
|
10898
10964
|
batchCount: batchFileGroups.length,
|
|
10899
10965
|
verdict: worstVerdict,
|
|
@@ -10919,7 +10985,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10919
10985
|
const isPhantomReview = reviewResult.verdict !== "SHIP_IT" && (reviewResult.issue_list === void 0 || reviewResult.issue_list.length === 0) && reviewResult.error !== void 0;
|
|
10920
10986
|
if (isPhantomReview && !timeoutRetried) {
|
|
10921
10987
|
timeoutRetried = true;
|
|
10922
|
-
logger$
|
|
10988
|
+
logger$36.warn({
|
|
10923
10989
|
storyKey,
|
|
10924
10990
|
reviewCycles,
|
|
10925
10991
|
error: reviewResult.error
|
|
@@ -10929,7 +10995,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10929
10995
|
verdict = reviewResult.verdict;
|
|
10930
10996
|
issueList = reviewResult.issue_list ?? [];
|
|
10931
10997
|
if (verdict === "NEEDS_MAJOR_REWORK" && reviewCycles > 0 && previousIssueList.length > 0 && issueList.length < previousIssueList.length) {
|
|
10932
|
-
logger$
|
|
10998
|
+
logger$36.info({
|
|
10933
10999
|
storyKey,
|
|
10934
11000
|
originalVerdict: verdict,
|
|
10935
11001
|
issuesBefore: previousIssueList.length,
|
|
@@ -10965,7 +11031,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10965
11031
|
if (_decomposition !== void 0) parts.push(`decomposed: ${_decomposition.batchCount} batches`);
|
|
10966
11032
|
parts.push(`${fileCount} files`);
|
|
10967
11033
|
parts.push(`${totalTokensK} tokens`);
|
|
10968
|
-
logger$
|
|
11034
|
+
logger$36.info({
|
|
10969
11035
|
storyKey,
|
|
10970
11036
|
verdict,
|
|
10971
11037
|
agentVerdict: reviewResult.agentVerdict
|
|
@@ -11023,7 +11089,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11023
11089
|
persistState();
|
|
11024
11090
|
return;
|
|
11025
11091
|
}
|
|
11026
|
-
logger$
|
|
11092
|
+
logger$36.info({
|
|
11027
11093
|
storyKey,
|
|
11028
11094
|
reviewCycles: finalReviewCycles,
|
|
11029
11095
|
issueCount: issueList.length
|
|
@@ -11073,7 +11139,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11073
11139
|
fixPrompt = assembled.prompt;
|
|
11074
11140
|
} catch {
|
|
11075
11141
|
fixPrompt = `Fix story ${storyKey}: verdict=${verdict}, minor fixes needed`;
|
|
11076
|
-
logger$
|
|
11142
|
+
logger$36.warn("Failed to assemble auto-approve fix prompt, using fallback", { storyKey });
|
|
11077
11143
|
}
|
|
11078
11144
|
const handle = dispatcher.dispatch({
|
|
11079
11145
|
prompt: fixPrompt,
|
|
@@ -11090,9 +11156,9 @@ function createImplementationOrchestrator(deps) {
|
|
|
11090
11156
|
output: fixResult.tokenEstimate.output
|
|
11091
11157
|
} : void 0 }
|
|
11092
11158
|
});
|
|
11093
|
-
if (fixResult.status === "timeout") logger$
|
|
11159
|
+
if (fixResult.status === "timeout") logger$36.warn("Auto-approve fix timed out — approving anyway (issues were minor)", { storyKey });
|
|
11094
11160
|
} catch (err) {
|
|
11095
|
-
logger$
|
|
11161
|
+
logger$36.warn("Auto-approve fix dispatch failed — approving anyway (issues were minor)", {
|
|
11096
11162
|
storyKey,
|
|
11097
11163
|
err
|
|
11098
11164
|
});
|
|
@@ -11164,7 +11230,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11164
11230
|
fixPrompt = assembled.prompt;
|
|
11165
11231
|
} catch {
|
|
11166
11232
|
fixPrompt = `Fix story ${storyKey}: verdict=${verdict}, taskType=${taskType}`;
|
|
11167
|
-
logger$
|
|
11233
|
+
logger$36.warn("Failed to assemble fix prompt, using fallback", {
|
|
11168
11234
|
storyKey,
|
|
11169
11235
|
taskType
|
|
11170
11236
|
});
|
|
@@ -11187,7 +11253,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11187
11253
|
} : void 0 }
|
|
11188
11254
|
});
|
|
11189
11255
|
if (fixResult.status === "timeout") {
|
|
11190
|
-
logger$
|
|
11256
|
+
logger$36.warn("Fix dispatch timed out — escalating story", {
|
|
11191
11257
|
storyKey,
|
|
11192
11258
|
taskType
|
|
11193
11259
|
});
|
|
@@ -11207,13 +11273,13 @@ function createImplementationOrchestrator(deps) {
|
|
|
11207
11273
|
persistState();
|
|
11208
11274
|
return;
|
|
11209
11275
|
}
|
|
11210
|
-
if (fixResult.status === "failed") logger$
|
|
11276
|
+
if (fixResult.status === "failed") logger$36.warn("Fix dispatch failed", {
|
|
11211
11277
|
storyKey,
|
|
11212
11278
|
taskType,
|
|
11213
11279
|
exitCode: fixResult.exitCode
|
|
11214
11280
|
});
|
|
11215
11281
|
} catch (err) {
|
|
11216
|
-
logger$
|
|
11282
|
+
logger$36.warn("Fix dispatch failed, continuing to next review", {
|
|
11217
11283
|
storyKey,
|
|
11218
11284
|
taskType,
|
|
11219
11285
|
err
|
|
@@ -11266,11 +11332,11 @@ function createImplementationOrchestrator(deps) {
|
|
|
11266
11332
|
}
|
|
11267
11333
|
async function run(storyKeys) {
|
|
11268
11334
|
if (_state === "RUNNING" || _state === "PAUSED") {
|
|
11269
|
-
logger$
|
|
11335
|
+
logger$36.warn("run() called while orchestrator is already running or paused — ignoring", { state: _state });
|
|
11270
11336
|
return getStatus();
|
|
11271
11337
|
}
|
|
11272
11338
|
if (_state === "COMPLETE") {
|
|
11273
|
-
logger$
|
|
11339
|
+
logger$36.warn("run() called on a COMPLETE orchestrator — ignoring", { state: _state });
|
|
11274
11340
|
return getStatus();
|
|
11275
11341
|
}
|
|
11276
11342
|
_state = "RUNNING";
|
|
@@ -11288,13 +11354,13 @@ function createImplementationOrchestrator(deps) {
|
|
|
11288
11354
|
startHeartbeat();
|
|
11289
11355
|
if (projectRoot !== void 0) {
|
|
11290
11356
|
const seedResult = seedMethodologyContext(db, projectRoot);
|
|
11291
|
-
if (seedResult.decisionsCreated > 0) logger$
|
|
11357
|
+
if (seedResult.decisionsCreated > 0) logger$36.info({
|
|
11292
11358
|
decisionsCreated: seedResult.decisionsCreated,
|
|
11293
11359
|
skippedCategories: seedResult.skippedCategories
|
|
11294
11360
|
}, "Methodology context seeded from planning artifacts");
|
|
11295
11361
|
}
|
|
11296
11362
|
const groups = detectConflictGroups(storyKeys);
|
|
11297
|
-
logger$
|
|
11363
|
+
logger$36.info("Orchestrator starting", {
|
|
11298
11364
|
storyCount: storyKeys.length,
|
|
11299
11365
|
groupCount: groups.length,
|
|
11300
11366
|
maxConcurrency: config.maxConcurrency
|
|
@@ -11306,7 +11372,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11306
11372
|
_state = "FAILED";
|
|
11307
11373
|
_completedAt = new Date().toISOString();
|
|
11308
11374
|
persistState();
|
|
11309
|
-
logger$
|
|
11375
|
+
logger$36.error("Orchestrator failed with unhandled error", { err });
|
|
11310
11376
|
return getStatus();
|
|
11311
11377
|
}
|
|
11312
11378
|
stopHeartbeat();
|
|
@@ -11333,7 +11399,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11333
11399
|
_pauseGate = createPauseGate();
|
|
11334
11400
|
_state = "PAUSED";
|
|
11335
11401
|
eventBus.emit("orchestrator:paused", {});
|
|
11336
|
-
logger$
|
|
11402
|
+
logger$36.info("Orchestrator paused");
|
|
11337
11403
|
}
|
|
11338
11404
|
function resume() {
|
|
11339
11405
|
if (_state !== "PAUSED") return;
|
|
@@ -11344,7 +11410,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11344
11410
|
}
|
|
11345
11411
|
_state = "RUNNING";
|
|
11346
11412
|
eventBus.emit("orchestrator:resumed", {});
|
|
11347
|
-
logger$
|
|
11413
|
+
logger$36.info("Orchestrator resumed");
|
|
11348
11414
|
}
|
|
11349
11415
|
return {
|
|
11350
11416
|
run,
|
|
@@ -11982,7 +12048,7 @@ const CritiqueOutputSchema = z.object({
|
|
|
11982
12048
|
|
|
11983
12049
|
//#endregion
|
|
11984
12050
|
//#region src/modules/phase-orchestrator/critique-loop.ts
|
|
11985
|
-
const logger$
|
|
12051
|
+
const logger$8 = createLogger("critique-loop");
|
|
11986
12052
|
/**
|
|
11987
12053
|
* Maps a phase name to the critique prompt template name.
|
|
11988
12054
|
* Falls back to `critique-${phase}` for unknown phases.
|
|
@@ -12035,7 +12101,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
|
|
|
12035
12101
|
critiquePrompt = critiqueTemplate.replace("{{artifact_content}}", currentArtifact).replace("{{project_context}}", projectContext);
|
|
12036
12102
|
} catch (err) {
|
|
12037
12103
|
const message = err instanceof Error ? err.message : String(err);
|
|
12038
|
-
logger$
|
|
12104
|
+
logger$8.warn({
|
|
12039
12105
|
phaseId,
|
|
12040
12106
|
promptName: critiquePromptName,
|
|
12041
12107
|
err: message
|
|
@@ -12063,7 +12129,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
|
|
|
12063
12129
|
critiqueTokens.output += result.tokenEstimate.output;
|
|
12064
12130
|
if (result.status !== "completed" || result.parsed === null) {
|
|
12065
12131
|
const errMsg = result.parseError ?? `Critique dispatch ended with status '${result.status}'`;
|
|
12066
|
-
logger$
|
|
12132
|
+
logger$8.warn({
|
|
12067
12133
|
phaseId,
|
|
12068
12134
|
iteration: i + 1,
|
|
12069
12135
|
err: errMsg
|
|
@@ -12082,7 +12148,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
|
|
|
12082
12148
|
lastCritiqueOutput = critiqueOutput;
|
|
12083
12149
|
} catch (err) {
|
|
12084
12150
|
const message = err instanceof Error ? err.message : String(err);
|
|
12085
|
-
logger$
|
|
12151
|
+
logger$8.warn({
|
|
12086
12152
|
phaseId,
|
|
12087
12153
|
iteration: i + 1,
|
|
12088
12154
|
err: message
|
|
@@ -12122,14 +12188,14 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
|
|
|
12122
12188
|
});
|
|
12123
12189
|
} catch (err) {
|
|
12124
12190
|
const message = err instanceof Error ? err.message : String(err);
|
|
12125
|
-
logger$
|
|
12191
|
+
logger$8.warn({
|
|
12126
12192
|
phaseId,
|
|
12127
12193
|
iteration: i + 1,
|
|
12128
12194
|
err: message
|
|
12129
12195
|
}, "Critique loop: failed to store critique decision — continuing");
|
|
12130
12196
|
}
|
|
12131
12197
|
if (critiqueOutput.verdict === "pass") {
|
|
12132
|
-
logger$
|
|
12198
|
+
logger$8.info({
|
|
12133
12199
|
phaseId,
|
|
12134
12200
|
iteration: i + 1
|
|
12135
12201
|
}, "Critique loop: artifact passed critique — loop complete");
|
|
@@ -12142,7 +12208,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
|
|
|
12142
12208
|
totalMs: Date.now() - startMs
|
|
12143
12209
|
};
|
|
12144
12210
|
}
|
|
12145
|
-
logger$
|
|
12211
|
+
logger$8.info({
|
|
12146
12212
|
phaseId,
|
|
12147
12213
|
iteration: i + 1,
|
|
12148
12214
|
issueCount: critiqueOutput.issue_count
|
|
@@ -12155,7 +12221,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
|
|
|
12155
12221
|
refinePrompt = refineTemplate.replace("{{original_artifact}}", currentArtifact).replace("{{critique_issues}}", issuesText).replace("{{phase_context}}", phaseContext);
|
|
12156
12222
|
} catch (err) {
|
|
12157
12223
|
const message = err instanceof Error ? err.message : String(err);
|
|
12158
|
-
logger$
|
|
12224
|
+
logger$8.warn({
|
|
12159
12225
|
phaseId,
|
|
12160
12226
|
iteration: i + 1,
|
|
12161
12227
|
err: message
|
|
@@ -12176,7 +12242,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
|
|
|
12176
12242
|
const originalLength = currentArtifact.length;
|
|
12177
12243
|
const refinedLength = refineResult.output.length;
|
|
12178
12244
|
const delta = refinedLength - originalLength;
|
|
12179
|
-
logger$
|
|
12245
|
+
logger$8.info({
|
|
12180
12246
|
phaseId,
|
|
12181
12247
|
iteration: i + 1,
|
|
12182
12248
|
originalLength,
|
|
@@ -12185,7 +12251,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
|
|
|
12185
12251
|
}, "Critique loop: refinement complete");
|
|
12186
12252
|
currentArtifact = refineResult.output;
|
|
12187
12253
|
} else {
|
|
12188
|
-
logger$
|
|
12254
|
+
logger$8.warn({
|
|
12189
12255
|
phaseId,
|
|
12190
12256
|
iteration: i + 1,
|
|
12191
12257
|
status: refineResult.status
|
|
@@ -12194,7 +12260,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
|
|
|
12194
12260
|
}
|
|
12195
12261
|
} catch (err) {
|
|
12196
12262
|
const message = err instanceof Error ? err.message : String(err);
|
|
12197
|
-
logger$
|
|
12263
|
+
logger$8.warn({
|
|
12198
12264
|
phaseId,
|
|
12199
12265
|
iteration: i + 1,
|
|
12200
12266
|
err: message
|
|
@@ -12205,12 +12271,12 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
|
|
|
12205
12271
|
}
|
|
12206
12272
|
const remainingIssues = lastCritiqueOutput?.issues ?? [];
|
|
12207
12273
|
if (remainingIssues.length > 0) {
|
|
12208
|
-
logger$
|
|
12274
|
+
logger$8.warn({
|
|
12209
12275
|
phaseId,
|
|
12210
12276
|
maxIterations,
|
|
12211
12277
|
issueCount: remainingIssues.length
|
|
12212
12278
|
}, "Critique loop: max iterations reached with unresolved issues");
|
|
12213
|
-
for (const issue of remainingIssues) logger$
|
|
12279
|
+
for (const issue of remainingIssues) logger$8.warn({
|
|
12214
12280
|
phaseId,
|
|
12215
12281
|
severity: issue.severity,
|
|
12216
12282
|
category: issue.category,
|
|
@@ -12229,7 +12295,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
|
|
|
12229
12295
|
|
|
12230
12296
|
//#endregion
|
|
12231
12297
|
//#region src/modules/phase-orchestrator/elicitation-selector.ts
|
|
12232
|
-
const logger$
|
|
12298
|
+
const logger$7 = createLogger("elicitation-selector");
|
|
12233
12299
|
/**
|
|
12234
12300
|
* Affinity scores (0.0–1.0) for each category per content type.
|
|
12235
12301
|
*
|
|
@@ -12351,10 +12417,10 @@ function loadElicitationMethods() {
|
|
|
12351
12417
|
try {
|
|
12352
12418
|
const content = readFileSync(csvPath, "utf-8");
|
|
12353
12419
|
const methods = parseMethodsCsv(content);
|
|
12354
|
-
logger$
|
|
12420
|
+
logger$7.debug({ count: methods.length }, "Loaded elicitation methods");
|
|
12355
12421
|
return methods;
|
|
12356
12422
|
} catch (err) {
|
|
12357
|
-
logger$
|
|
12423
|
+
logger$7.warn({
|
|
12358
12424
|
csvPath,
|
|
12359
12425
|
err
|
|
12360
12426
|
}, "Failed to load elicitation methods CSV");
|
|
@@ -12649,7 +12715,7 @@ const ElicitationOutputSchema = z.object({
|
|
|
12649
12715
|
|
|
12650
12716
|
//#endregion
|
|
12651
12717
|
//#region src/modules/phase-orchestrator/step-runner.ts
|
|
12652
|
-
const logger$
|
|
12718
|
+
const logger$6 = createLogger("step-runner");
|
|
12653
12719
|
/**
|
|
12654
12720
|
* Format an array of decision records into a markdown section for injection.
|
|
12655
12721
|
*
|
|
@@ -12756,7 +12822,7 @@ async function runSteps(steps, deps, runId, phase, params) {
|
|
|
12756
12822
|
if (estimatedTokens > budgetTokens) {
|
|
12757
12823
|
const decisionRefs = step.context.filter((ref) => ref.source.startsWith("decision:"));
|
|
12758
12824
|
if (decisionRefs.length > 0) {
|
|
12759
|
-
logger$
|
|
12825
|
+
logger$6.warn({
|
|
12760
12826
|
step: step.name,
|
|
12761
12827
|
estimatedTokens,
|
|
12762
12828
|
budgetTokens
|
|
@@ -12783,7 +12849,7 @@ async function runSteps(steps, deps, runId, phase, params) {
|
|
|
12783
12849
|
}
|
|
12784
12850
|
prompt = summarizedPrompt;
|
|
12785
12851
|
estimatedTokens = Math.ceil(prompt.length / 4);
|
|
12786
|
-
if (estimatedTokens <= budgetTokens) logger$
|
|
12852
|
+
if (estimatedTokens <= budgetTokens) logger$6.info({
|
|
12787
12853
|
step: step.name,
|
|
12788
12854
|
estimatedTokens,
|
|
12789
12855
|
budgetTokens
|
|
@@ -12964,7 +13030,7 @@ async function runSteps(steps, deps, runId, phase, params) {
|
|
|
12964
13030
|
const critiqueResult = await runCritiqueLoop(artifactContent, phase, runId, phase, deps);
|
|
12965
13031
|
totalInput += critiqueResult.critiqueTokens.input + critiqueResult.refinementTokens.input;
|
|
12966
13032
|
totalOutput += critiqueResult.critiqueTokens.output + critiqueResult.refinementTokens.output;
|
|
12967
|
-
logger$
|
|
13033
|
+
logger$6.info({
|
|
12968
13034
|
step: step.name,
|
|
12969
13035
|
verdict: critiqueResult.verdict,
|
|
12970
13036
|
iterations: critiqueResult.iterations,
|
|
@@ -12972,7 +13038,7 @@ async function runSteps(steps, deps, runId, phase, params) {
|
|
|
12972
13038
|
}, "Step critique loop complete");
|
|
12973
13039
|
} catch (critiqueErr) {
|
|
12974
13040
|
const critiqueMsg = critiqueErr instanceof Error ? critiqueErr.message : String(critiqueErr);
|
|
12975
|
-
logger$
|
|
13041
|
+
logger$6.warn({
|
|
12976
13042
|
step: step.name,
|
|
12977
13043
|
err: critiqueMsg
|
|
12978
13044
|
}, "Step critique loop threw an error — continuing without critique");
|
|
@@ -12982,7 +13048,7 @@ async function runSteps(steps, deps, runId, phase, params) {
|
|
|
12982
13048
|
const contentType = deriveContentType(phase, step.name);
|
|
12983
13049
|
const selectedMethods = selectMethods({ content_type: contentType }, usedElicitationMethods);
|
|
12984
13050
|
if (selectedMethods.length > 0) {
|
|
12985
|
-
logger$
|
|
13051
|
+
logger$6.info({
|
|
12986
13052
|
step: step.name,
|
|
12987
13053
|
methods: selectedMethods.map((m) => m.name),
|
|
12988
13054
|
contentType
|
|
@@ -13021,13 +13087,13 @@ async function runSteps(steps, deps, runId, phase, params) {
|
|
|
13021
13087
|
key: `${phase}-round-${roundIndex}-insights`,
|
|
13022
13088
|
value: elicitParsed.insights
|
|
13023
13089
|
});
|
|
13024
|
-
logger$
|
|
13090
|
+
logger$6.info({
|
|
13025
13091
|
step: step.name,
|
|
13026
13092
|
method: method.name,
|
|
13027
13093
|
roundIndex
|
|
13028
13094
|
}, "Elicitation insights stored in decision store");
|
|
13029
13095
|
}
|
|
13030
|
-
} else logger$
|
|
13096
|
+
} else logger$6.warn({
|
|
13031
13097
|
step: step.name,
|
|
13032
13098
|
method: method.name,
|
|
13033
13099
|
status: elicitResult.status
|
|
@@ -13043,7 +13109,7 @@ async function runSteps(steps, deps, runId, phase, params) {
|
|
|
13043
13109
|
}
|
|
13044
13110
|
} catch (elicitErr) {
|
|
13045
13111
|
const elicitMsg = elicitErr instanceof Error ? elicitErr.message : String(elicitErr);
|
|
13046
|
-
logger$
|
|
13112
|
+
logger$6.warn({
|
|
13047
13113
|
step: step.name,
|
|
13048
13114
|
err: elicitMsg
|
|
13049
13115
|
}, "Step elicitation threw an error — continuing without elicitation");
|
|
@@ -13123,6 +13189,24 @@ const BRIEF_FIELDS$1 = [
|
|
|
13123
13189
|
"constraints",
|
|
13124
13190
|
"technology_constraints"
|
|
13125
13191
|
];
|
|
13192
|
+
/** Pattern matching cloud platforms, languages, frameworks, and infra tech */
|
|
13193
|
+
const TECH_CONSTRAINT_PATTERN = /\b(GCP|AWS|Azure|Google Cloud|Cloud Run|GKE|Cloud SQL|Memorystore|Pub\/Sub|BigQuery|EKS|Lambda|S3|Kotlin|JVM|Java|Go\b|Golang|Rust|Node\.js|JavaScript|TypeScript|Python|C#|\.NET|Spring Boot|Ktor|Micronaut|Quarkus|NestJS|Express|multi-region|active-active|AES-256|TLS\s*1\.[23]|encryption at rest|encryption in transit)/i;
|
|
13194
|
+
/**
|
|
13195
|
+
* Scan constraints for technology-related items and move them to
|
|
13196
|
+
* technology_constraints. Models consistently lump all constraints
|
|
13197
|
+
* together despite prompt instructions to separate them.
|
|
13198
|
+
*/
|
|
13199
|
+
function reclassifyTechnologyConstraints(brief) {
|
|
13200
|
+
if (brief.technology_constraints.length > 0) return;
|
|
13201
|
+
const techItems = [];
|
|
13202
|
+
const nonTechItems = [];
|
|
13203
|
+
for (const c of brief.constraints) if (TECH_CONSTRAINT_PATTERN.test(c)) techItems.push(c);
|
|
13204
|
+
else nonTechItems.push(c);
|
|
13205
|
+
if (techItems.length > 0) {
|
|
13206
|
+
brief.constraints = nonTechItems;
|
|
13207
|
+
brief.technology_constraints = techItems;
|
|
13208
|
+
}
|
|
13209
|
+
}
|
|
13126
13210
|
/**
|
|
13127
13211
|
* Build step definitions for 2-step analysis decomposition.
|
|
13128
13212
|
*/
|
|
@@ -13220,6 +13304,23 @@ async function runAnalysisMultiStep(deps, params) {
|
|
|
13220
13304
|
constraints: scopeOutput.constraints ?? [],
|
|
13221
13305
|
technology_constraints: scopeOutput.technology_constraints ?? []
|
|
13222
13306
|
};
|
|
13307
|
+
reclassifyTechnologyConstraints(brief);
|
|
13308
|
+
if (brief.technology_constraints.length > 0) {
|
|
13309
|
+
upsertDecision(deps.db, {
|
|
13310
|
+
pipeline_run_id: params.runId,
|
|
13311
|
+
phase: "analysis",
|
|
13312
|
+
category: "product-brief",
|
|
13313
|
+
key: "constraints",
|
|
13314
|
+
value: JSON.stringify(brief.constraints)
|
|
13315
|
+
});
|
|
13316
|
+
upsertDecision(deps.db, {
|
|
13317
|
+
pipeline_run_id: params.runId,
|
|
13318
|
+
phase: "analysis",
|
|
13319
|
+
category: "technology-constraints",
|
|
13320
|
+
key: "technology_constraints",
|
|
13321
|
+
value: JSON.stringify(brief.technology_constraints)
|
|
13322
|
+
});
|
|
13323
|
+
}
|
|
13223
13324
|
const analysisResult = {
|
|
13224
13325
|
result: "success",
|
|
13225
13326
|
product_brief: brief,
|
|
@@ -13350,6 +13451,7 @@ async function runAnalysisPhase(deps, params) {
|
|
|
13350
13451
|
|
|
13351
13452
|
//#endregion
|
|
13352
13453
|
//#region src/modules/phase-orchestrator/phases/planning.ts
|
|
13454
|
+
const logger$5 = createLogger("planning-phase");
|
|
13353
13455
|
/** Maximum total prompt length in tokens (3,500 tokens × 4 chars/token = 14,000 chars) */
|
|
13354
13456
|
const MAX_PROMPT_TOKENS = 3500;
|
|
13355
13457
|
const MAX_PROMPT_CHARS = MAX_PROMPT_TOKENS * 4;
|
|
@@ -13370,6 +13472,26 @@ const BRIEF_FIELDS = [
|
|
|
13370
13472
|
"constraints",
|
|
13371
13473
|
"technology_constraints"
|
|
13372
13474
|
];
|
|
13475
|
+
/** Keywords indicating JavaScript/TypeScript/Node.js ecosystem */
|
|
13476
|
+
const JS_TS_PATTERN = /\b(TypeScript|JavaScript|Node\.js|NestJS|Express|Fastify|Hapi|Koa|Next\.js.*backend|Next\.js.*API|Deno|Bun)\b/i;
|
|
13477
|
+
/** Keywords indicating non-JS backend languages that satisfy high-concurrency constraints */
|
|
13478
|
+
const COMPLIANT_LANG_PATTERN = /\b(Kotlin|JVM|Java|Go\b|Golang|Rust|C#|\.NET|Scala|Erlang|Elixir)\b/i;
|
|
13479
|
+
/**
|
|
13480
|
+
* Check whether the tech stack's language/framework fields violate technology
|
|
13481
|
+
* constraints that exclude JavaScript/Node.js from backend services.
|
|
13482
|
+
*
|
|
13483
|
+
* @returns A violation message if detected, or null if compliant.
|
|
13484
|
+
*/
|
|
13485
|
+
function detectTechStackViolation(techStack, technologyConstraints) {
|
|
13486
|
+
const constraintsText = technologyConstraints.map((c) => c.value).join(" ");
|
|
13487
|
+
const excludesJS = /\b(excluded|not.*right choice|not.*recommended|avoid|do not use|prohibited)\b/i.test(constraintsText) && /\b(JavaScript|Node\.js|TypeScript)\b/i.test(constraintsText);
|
|
13488
|
+
const prefersNonJS = COMPLIANT_LANG_PATTERN.test(constraintsText) && /\b(prefer|must|required|evaluate|choose)\b/i.test(constraintsText);
|
|
13489
|
+
if (!excludesJS && !prefersNonJS) return null;
|
|
13490
|
+
const langValue = techStack["language"] ?? "";
|
|
13491
|
+
const frameworkValue = techStack["framework"] ?? techStack["backend_framework"] ?? "";
|
|
13492
|
+
if (JS_TS_PATTERN.test(langValue) || JS_TS_PATTERN.test(frameworkValue)) return `Tech stack violates technology constraints: language="${langValue}", framework="${frameworkValue}". Constraints specify: ${constraintsText.substring(0, 200)}`;
|
|
13493
|
+
return null;
|
|
13494
|
+
}
|
|
13373
13495
|
/**
|
|
13374
13496
|
* Format product brief decisions from the analysis phase into markdown-like text
|
|
13375
13497
|
* suitable for prompt injection.
|
|
@@ -13468,6 +13590,10 @@ function buildPlanningSteps() {
|
|
|
13468
13590
|
{
|
|
13469
13591
|
placeholder: "technology_constraints",
|
|
13470
13592
|
source: "decision:analysis.technology-constraints"
|
|
13593
|
+
},
|
|
13594
|
+
{
|
|
13595
|
+
placeholder: "concept",
|
|
13596
|
+
source: "param:concept"
|
|
13471
13597
|
}
|
|
13472
13598
|
],
|
|
13473
13599
|
persist: [
|
|
@@ -13524,8 +13650,14 @@ async function runPlanningMultiStep(deps, params) {
|
|
|
13524
13650
|
details: "No product brief decisions found in the analysis phase.",
|
|
13525
13651
|
tokenUsage: zeroTokenUsage
|
|
13526
13652
|
};
|
|
13653
|
+
let concept = "";
|
|
13654
|
+
const run = getPipelineRunById(db, runId);
|
|
13655
|
+
if (run?.config_json) try {
|
|
13656
|
+
const config = JSON.parse(run.config_json);
|
|
13657
|
+
concept = config.concept ?? "";
|
|
13658
|
+
} catch {}
|
|
13527
13659
|
const steps = buildPlanningSteps();
|
|
13528
|
-
const result = await runSteps(steps, deps, params.runId, "planning", {});
|
|
13660
|
+
const result = await runSteps(steps, deps, params.runId, "planning", { concept });
|
|
13529
13661
|
if (!result.success) return {
|
|
13530
13662
|
result: "failed",
|
|
13531
13663
|
error: result.error ?? "multi_step_failed",
|
|
@@ -13533,13 +13665,52 @@ async function runPlanningMultiStep(deps, params) {
|
|
|
13533
13665
|
tokenUsage: result.tokenUsage
|
|
13534
13666
|
};
|
|
13535
13667
|
const frsOutput = result.steps[1]?.parsed;
|
|
13536
|
-
|
|
13668
|
+
let nfrsOutput = result.steps[2]?.parsed;
|
|
13669
|
+
let totalTokenUsage = { ...result.tokenUsage };
|
|
13537
13670
|
if (!frsOutput || !nfrsOutput) return {
|
|
13538
13671
|
result: "failed",
|
|
13539
13672
|
error: "incomplete_steps",
|
|
13540
13673
|
details: "Not all planning steps produced output",
|
|
13541
13674
|
tokenUsage: result.tokenUsage
|
|
13542
13675
|
};
|
|
13676
|
+
const techStack = nfrsOutput.tech_stack;
|
|
13677
|
+
if (techStack) {
|
|
13678
|
+
const techConstraintDecisions = allAnalysisDecisions.filter((d) => d.category === "technology-constraints");
|
|
13679
|
+
const violation = detectTechStackViolation(techStack, techConstraintDecisions);
|
|
13680
|
+
if (violation) {
|
|
13681
|
+
logger$5.warn({ violation }, "Tech stack constraint violation detected — retrying step 3 with correction");
|
|
13682
|
+
const correctionPrefix = `CRITICAL CORRECTION: Your previous output was rejected because it violates the stated technology constraints.\n\nViolation: ${violation}\n\nYou MUST NOT use TypeScript, JavaScript, or Node.js for ANY backend service. Choose from Go, Kotlin/JVM, or Rust as stated in the technology constraints.\n\nRe-generate your output with a compliant tech stack. Everything else (NFRs, domain model, out-of-scope) can remain the same.\n\n---\n\n`;
|
|
13683
|
+
const step3Template = await deps.pack.getPrompt("planning-step-3-nfrs");
|
|
13684
|
+
const stepOutputs = new Map();
|
|
13685
|
+
stepOutputs.set("planning-step-1-classification", result.steps[0]?.parsed ?? {});
|
|
13686
|
+
stepOutputs.set("planning-step-2-frs", frsOutput);
|
|
13687
|
+
let correctedPrompt = step3Template;
|
|
13688
|
+
const step3Def = steps[2];
|
|
13689
|
+
for (const ref of step3Def?.context ?? []) {
|
|
13690
|
+
const value = resolveContext(ref, deps, runId, { concept }, stepOutputs);
|
|
13691
|
+
correctedPrompt = correctedPrompt.replace(`{{${ref.placeholder}}}`, value);
|
|
13692
|
+
}
|
|
13693
|
+
correctedPrompt = correctionPrefix + correctedPrompt;
|
|
13694
|
+
const retryHandle = deps.dispatcher.dispatch({
|
|
13695
|
+
prompt: correctedPrompt,
|
|
13696
|
+
agent: "claude-code",
|
|
13697
|
+
taskType: "planning-nfrs",
|
|
13698
|
+
outputSchema: PlanningNFRsOutputSchema
|
|
13699
|
+
});
|
|
13700
|
+
const retryResult = await retryHandle.result;
|
|
13701
|
+
totalTokenUsage.input += retryResult.tokenEstimate.input;
|
|
13702
|
+
totalTokenUsage.output += retryResult.tokenEstimate.output;
|
|
13703
|
+
if (retryResult.status === "completed" && retryResult.parsed !== null && retryResult.parsed.result !== "failed") {
|
|
13704
|
+
const retryParsed = retryResult.parsed;
|
|
13705
|
+
const retryTechStack = retryParsed.tech_stack;
|
|
13706
|
+
const retryViolation = retryTechStack ? detectTechStackViolation(retryTechStack, techConstraintDecisions) : null;
|
|
13707
|
+
if (!retryViolation) {
|
|
13708
|
+
logger$5.info("Retry produced compliant tech stack — using corrected output");
|
|
13709
|
+
nfrsOutput = retryParsed;
|
|
13710
|
+
} else logger$5.warn({ retryViolation }, "Retry still violates constraints — using original output");
|
|
13711
|
+
} else logger$5.warn("Retry dispatch failed — using original output");
|
|
13712
|
+
}
|
|
13713
|
+
}
|
|
13543
13714
|
const frs = frsOutput.functional_requirements;
|
|
13544
13715
|
const nfrs = nfrsOutput.non_functional_requirements;
|
|
13545
13716
|
const userStories = frsOutput.user_stories;
|
|
@@ -13547,13 +13718,13 @@ async function runPlanningMultiStep(deps, params) {
|
|
|
13547
13718
|
result: "failed",
|
|
13548
13719
|
error: "missing_functional_requirements",
|
|
13549
13720
|
details: "FRs step did not return functional_requirements",
|
|
13550
|
-
tokenUsage:
|
|
13721
|
+
tokenUsage: totalTokenUsage
|
|
13551
13722
|
};
|
|
13552
13723
|
if (!nfrs?.length) return {
|
|
13553
13724
|
result: "failed",
|
|
13554
13725
|
error: "missing_non_functional_requirements",
|
|
13555
13726
|
details: "NFRs step did not return non_functional_requirements",
|
|
13556
|
-
tokenUsage:
|
|
13727
|
+
tokenUsage: totalTokenUsage
|
|
13557
13728
|
};
|
|
13558
13729
|
for (const fr of frs) createRequirement(db, {
|
|
13559
13730
|
pipeline_run_id: params.runId,
|
|
@@ -13575,7 +13746,7 @@ async function runPlanningMultiStep(deps, params) {
|
|
|
13575
13746
|
result: "success",
|
|
13576
13747
|
requirements_count: requirementsCount,
|
|
13577
13748
|
user_stories_count: userStoriesCount,
|
|
13578
|
-
tokenUsage:
|
|
13749
|
+
tokenUsage: totalTokenUsage
|
|
13579
13750
|
};
|
|
13580
13751
|
const artifactId = result.steps[2]?.artifactId;
|
|
13581
13752
|
if (artifactId !== void 0) planningResult.artifact_id = artifactId;
|
|
@@ -15623,6 +15794,19 @@ const BMAD_BASELINE_TOKENS = 23800;
|
|
|
15623
15794
|
/** Story key pattern: <epic>-<story> e.g. "10-1" */
|
|
15624
15795
|
const STORY_KEY_PATTERN = /^\d+-\d+$/;
|
|
15625
15796
|
/**
|
|
15797
|
+
* Top-level keys in .claude/settings.json that substrate owns.
|
|
15798
|
+
* On init, these are set/updated unconditionally.
|
|
15799
|
+
* User-defined keys outside this set are never touched.
|
|
15800
|
+
*/
|
|
15801
|
+
const SUBSTRATE_OWNED_SETTINGS_KEYS = ["statusLine"];
|
|
15802
|
+
function getSubstrateDefaultSettings() {
|
|
15803
|
+
return { statusLine: {
|
|
15804
|
+
type: "command",
|
|
15805
|
+
command: "bash \"$CLAUDE_PROJECT_DIR\"/.claude/statusline.sh",
|
|
15806
|
+
padding: 0
|
|
15807
|
+
} };
|
|
15808
|
+
}
|
|
15809
|
+
/**
|
|
15626
15810
|
* Format output according to the requested format.
|
|
15627
15811
|
*/
|
|
15628
15812
|
function formatOutput(data, format, success = true, errorMessage) {
|
|
@@ -15923,6 +16107,54 @@ async function scaffoldClaudeMd(projectRoot) {
|
|
|
15923
16107
|
await writeFile(claudeMdPath, newContent, "utf8");
|
|
15924
16108
|
logger$3.info({ claudeMdPath }, "Wrote substrate section to CLAUDE.md");
|
|
15925
16109
|
}
|
|
16110
|
+
/**
|
|
16111
|
+
* Scaffold the statusline script from the bundled template.
|
|
16112
|
+
*
|
|
16113
|
+
* Always overwrites — substrate fully owns this file.
|
|
16114
|
+
*/
|
|
16115
|
+
async function scaffoldStatuslineScript(projectRoot) {
|
|
16116
|
+
const pkgRoot = findPackageRoot(__dirname);
|
|
16117
|
+
const templateName = "statusline.sh";
|
|
16118
|
+
let templatePath = join(pkgRoot, "dist", "cli", "templates", templateName);
|
|
16119
|
+
if (!existsSync(templatePath)) templatePath = join(pkgRoot, "src", "cli", "templates", templateName);
|
|
16120
|
+
let content;
|
|
16121
|
+
try {
|
|
16122
|
+
content = await readFile(templatePath, "utf8");
|
|
16123
|
+
} catch {
|
|
16124
|
+
logger$3.warn({ templatePath }, "statusline.sh template not found; skipping");
|
|
16125
|
+
return;
|
|
16126
|
+
}
|
|
16127
|
+
const claudeDir = join(projectRoot, ".claude");
|
|
16128
|
+
const statuslinePath = join(claudeDir, "statusline.sh");
|
|
16129
|
+
mkdirSync(claudeDir, { recursive: true });
|
|
16130
|
+
await writeFile(statuslinePath, content, "utf8");
|
|
16131
|
+
chmodSync(statuslinePath, 493);
|
|
16132
|
+
logger$3.info({ statuslinePath }, "Wrote .claude/statusline.sh");
|
|
16133
|
+
}
|
|
16134
|
+
/**
|
|
16135
|
+
* Scaffold or merge .claude/settings.json with substrate-owned settings.
|
|
16136
|
+
*
|
|
16137
|
+
* Merge strategy:
|
|
16138
|
+
* - Keys in SUBSTRATE_OWNED_SETTINGS_KEYS are set/updated unconditionally.
|
|
16139
|
+
* - All other keys (permissions, hooks, etc.) are preserved as-is.
|
|
16140
|
+
* - $schema is added only if not already present.
|
|
16141
|
+
*/
|
|
16142
|
+
async function scaffoldClaudeSettings(projectRoot) {
|
|
16143
|
+
const claudeDir = join(projectRoot, ".claude");
|
|
16144
|
+
const settingsPath = join(claudeDir, "settings.json");
|
|
16145
|
+
let existing = {};
|
|
16146
|
+
try {
|
|
16147
|
+
const raw = await readFile(settingsPath, "utf8");
|
|
16148
|
+
existing = JSON.parse(raw);
|
|
16149
|
+
} catch {}
|
|
16150
|
+
const defaults = getSubstrateDefaultSettings();
|
|
16151
|
+
const merged = { ...existing };
|
|
16152
|
+
for (const key of SUBSTRATE_OWNED_SETTINGS_KEYS) merged[key] = defaults[key];
|
|
16153
|
+
if (!merged["$schema"]) merged["$schema"] = "https://json.schemastore.org/claude-code-settings.json";
|
|
16154
|
+
mkdirSync(claudeDir, { recursive: true });
|
|
16155
|
+
await writeFile(settingsPath, JSON.stringify(merged, null, 2) + "\n", "utf8");
|
|
16156
|
+
logger$3.info({ settingsPath }, "Wrote substrate settings to .claude/settings.json");
|
|
16157
|
+
}
|
|
15926
16158
|
async function runAutoInit(options) {
|
|
15927
16159
|
const { pack: packName, projectRoot, outputFormat, force = false } = options;
|
|
15928
16160
|
const packPath = join(projectRoot, "packs", packName);
|
|
@@ -15971,6 +16203,8 @@ async function runAutoInit(options) {
|
|
|
15971
16203
|
runMigrations(dbWrapper.db);
|
|
15972
16204
|
dbWrapper.close();
|
|
15973
16205
|
await scaffoldClaudeMd(projectRoot);
|
|
16206
|
+
await scaffoldStatuslineScript(projectRoot);
|
|
16207
|
+
await scaffoldClaudeSettings(projectRoot);
|
|
15974
16208
|
const successMsg = `Pack '${packName}' and database initialized successfully at ${dbPath}`;
|
|
15975
16209
|
if (outputFormat === "json") process.stdout.write(formatOutput({
|
|
15976
16210
|
pack: packName,
|
|
@@ -16155,7 +16389,8 @@ async function runAutoRun(options) {
|
|
|
16155
16389
|
agent: "claude-code",
|
|
16156
16390
|
input_tokens: input,
|
|
16157
16391
|
output_tokens: output,
|
|
16158
|
-
cost_usd: costUsd
|
|
16392
|
+
cost_usd: costUsd,
|
|
16393
|
+
metadata: JSON.stringify({ storyKey: payload.storyKey })
|
|
16159
16394
|
});
|
|
16160
16395
|
}
|
|
16161
16396
|
} catch (err) {
|
|
@@ -16421,10 +16656,13 @@ async function runAutoRun(options) {
|
|
|
16421
16656
|
const runEndMs = Date.now();
|
|
16422
16657
|
const runStartMs = new Date(pipelineRun.created_at).getTime();
|
|
16423
16658
|
const tokenAgg = aggregateTokenUsageForRun(db, pipelineRun.id);
|
|
16659
|
+
const storyMetrics = getStoryMetricsForRun(db, pipelineRun.id);
|
|
16660
|
+
const totalReviewCycles = storyMetrics.reduce((sum, m) => sum + (m.review_cycles ?? 0), 0);
|
|
16661
|
+
const totalDispatches = storyMetrics.reduce((sum, m) => sum + (m.dispatches ?? 0), 0);
|
|
16424
16662
|
writeRunMetrics(db, {
|
|
16425
16663
|
run_id: pipelineRun.id,
|
|
16426
16664
|
methodology: pack.manifest.name,
|
|
16427
|
-
status: failedKeys.length > 0 ? "failed" : "completed",
|
|
16665
|
+
status: failedKeys.length > 0 || escalatedKeys.length > 0 ? "failed" : "completed",
|
|
16428
16666
|
started_at: pipelineRun.created_at,
|
|
16429
16667
|
completed_at: new Date().toISOString(),
|
|
16430
16668
|
wall_clock_seconds: Math.round((runEndMs - runStartMs) / 1e3),
|
|
@@ -16435,6 +16673,8 @@ async function runAutoRun(options) {
|
|
|
16435
16673
|
stories_succeeded: succeededKeys.length,
|
|
16436
16674
|
stories_failed: failedKeys.length,
|
|
16437
16675
|
stories_escalated: escalatedKeys.length,
|
|
16676
|
+
total_review_cycles: totalReviewCycles,
|
|
16677
|
+
total_dispatches: totalDispatches,
|
|
16438
16678
|
concurrency_setting: concurrency
|
|
16439
16679
|
});
|
|
16440
16680
|
} catch (metricsErr) {
|
|
@@ -17468,7 +17708,7 @@ async function runAutoSupervisor(options, deps = {}) {
|
|
|
17468
17708
|
if (health.verdict === "NO_PIPELINE_RUNNING") {
|
|
17469
17709
|
const elapsedSeconds = Math.round((Date.now() - startTime) / 1e3);
|
|
17470
17710
|
const succeeded = Object.entries(health.stories.details).filter(([, s]) => s.phase === "COMPLETE").map(([k]) => k);
|
|
17471
|
-
const failed = Object.entries(health.stories.details).filter(([, s]) => s.phase !== "COMPLETE" && s.phase !== "PENDING").map(([k]) => k);
|
|
17711
|
+
const failed = Object.entries(health.stories.details).filter(([, s]) => s.phase !== "COMPLETE" && s.phase !== "PENDING" && s.phase !== "ESCALATED").map(([k]) => k);
|
|
17472
17712
|
const escalated = Object.entries(health.stories.details).filter(([, s]) => s.phase === "ESCALATED").map(([k]) => k);
|
|
17473
17713
|
emitEvent$1({
|
|
17474
17714
|
type: "supervisor:summary",
|
|
@@ -17480,7 +17720,7 @@ async function runAutoSupervisor(options, deps = {}) {
|
|
|
17480
17720
|
restarts: restartCount
|
|
17481
17721
|
});
|
|
17482
17722
|
log(`\nPipeline reached terminal state. Elapsed: ${elapsedSeconds}s | succeeded: ${succeeded.length} | failed: ${failed.length} | restarts: ${restartCount}`);
|
|
17483
|
-
return failed.length > 0 ? 1 : 0;
|
|
17723
|
+
return failed.length > 0 || escalated.length > 0 ? 1 : 0;
|
|
17484
17724
|
}
|
|
17485
17725
|
if (health.staleness_seconds >= stallThreshold) {
|
|
17486
17726
|
const pids = [...health.process.orchestrator_pid !== null ? [health.process.orchestrator_pid] : [], ...health.process.child_pids];
|
|
@@ -17532,22 +17772,23 @@ async function runAutoSupervisor(options, deps = {}) {
|
|
|
17532
17772
|
attempt: restartCount
|
|
17533
17773
|
});
|
|
17534
17774
|
log(`Supervisor: Restarting pipeline (attempt ${restartCount}/${maxRestarts})`);
|
|
17535
|
-
|
|
17536
|
-
|
|
17537
|
-
|
|
17538
|
-
|
|
17539
|
-
|
|
17540
|
-
|
|
17541
|
-
|
|
17775
|
+
try {
|
|
17776
|
+
await resumePipeline({
|
|
17777
|
+
runId: health.run_id ?? void 0,
|
|
17778
|
+
outputFormat,
|
|
17779
|
+
projectRoot,
|
|
17780
|
+
concurrency: 3,
|
|
17781
|
+
pack
|
|
17782
|
+
});
|
|
17783
|
+
} catch (err) {
|
|
17542
17784
|
const message = err instanceof Error ? err.message : String(err);
|
|
17543
17785
|
log(`Supervisor: Resume error: ${message}`);
|
|
17544
|
-
if (outputFormat === "json")
|
|
17786
|
+
if (outputFormat === "json") emitEvent$1({
|
|
17545
17787
|
type: "supervisor:error",
|
|
17546
17788
|
reason: "resume_failed",
|
|
17547
|
-
message
|
|
17548
|
-
|
|
17549
|
-
|
|
17550
|
-
});
|
|
17789
|
+
message
|
|
17790
|
+
});
|
|
17791
|
+
}
|
|
17551
17792
|
}
|
|
17552
17793
|
await sleep(pollInterval * 1e3);
|
|
17553
17794
|
}
|