substrate-ai 0.2.20 → 0.2.23
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli/index.js +349 -133
- package/dist/{event-bus-BMxhfxfT.js → errors-CswS7Mzg.js} +93 -715
- package/dist/event-bus-CAvDMst7.js +734 -0
- package/dist/{experimenter-prkFLFPw.js → experimenter-bc40oi8p.js} +2 -2
- package/dist/index.js +2 -2
- package/dist/{operational-Dq4IfJzE.js → operational-CnMlvWqc.js} +47 -2
- package/dist/{run-CcWb6Kb-.js → run-BaAws8IQ.js} +950 -200
- package/dist/run-BenC8JuM.js +7 -0
- package/package.json +1 -1
- package/packs/bmad/prompts/analysis-step-1-vision.md +4 -1
- package/packs/bmad/prompts/test-expansion.md +65 -0
- package/packs/bmad/prompts/test-plan.md +41 -0
- package/dist/errors-BPqtzQ4U.js +0 -111
- package/dist/run-C5zfaWYN.js +0 -7
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { createLogger } from "./logger-D2fS2ccL.js";
|
|
2
|
-
import {
|
|
2
|
+
import { createEventBus, createTuiApp, isTuiCapable, printNonTtyWarning } from "./event-bus-CAvDMst7.js";
|
|
3
3
|
import { addTokenUsage, createDecision, createPipelineRun, createRequirement, getArtifactByTypeForRun, getArtifactsByRun, getDecisionsByCategory, getDecisionsByPhase, getDecisionsByPhaseForRun, getLatestRun, getPipelineRunById, getRunningPipelineRuns, getTokenUsageSummary, registerArtifact, updatePipelineRun, updatePipelineRunConfig, upsertDecision } from "./decisions-Dq4cAA2L.js";
|
|
4
|
-
import { ESCALATION_DIAGNOSIS, OPERATIONAL_FINDING, STORY_METRICS, STORY_OUTCOME, aggregateTokenUsageForRun, aggregateTokenUsageForStory, getStoryMetricsForRun, writeRunMetrics, writeStoryMetrics } from "./operational-
|
|
4
|
+
import { ESCALATION_DIAGNOSIS, OPERATIONAL_FINDING, STORY_METRICS, STORY_OUTCOME, TEST_EXPANSION_FINDING, TEST_PLAN, aggregateTokenUsageForRun, aggregateTokenUsageForStory, getStoryMetricsForRun, writeRunMetrics, writeStoryMetrics } from "./operational-CnMlvWqc.js";
|
|
5
5
|
import { createRequire } from "module";
|
|
6
6
|
import { dirname, join } from "path";
|
|
7
7
|
import { access, readFile, readdir, stat } from "fs/promises";
|
|
@@ -15,7 +15,7 @@ import BetterSqlite3 from "better-sqlite3";
|
|
|
15
15
|
import { fileURLToPath } from "node:url";
|
|
16
16
|
import { existsSync as existsSync$1, readFileSync as readFileSync$1, readdirSync as readdirSync$1 } from "node:fs";
|
|
17
17
|
import { freemem, platform } from "node:os";
|
|
18
|
-
import { randomUUID } from "node:crypto";
|
|
18
|
+
import { createHash, randomUUID } from "node:crypto";
|
|
19
19
|
import { readFile as readFile$1, stat as stat$1 } from "node:fs/promises";
|
|
20
20
|
|
|
21
21
|
//#region rolldown:runtime
|
|
@@ -539,7 +539,7 @@ const migration010RunMetrics = {
|
|
|
539
539
|
|
|
540
540
|
//#endregion
|
|
541
541
|
//#region src/persistence/migrations/index.ts
|
|
542
|
-
const logger$
|
|
542
|
+
const logger$19 = createLogger("persistence:migrations");
|
|
543
543
|
const MIGRATIONS = [
|
|
544
544
|
initialSchemaMigration,
|
|
545
545
|
costTrackerSchemaMigration,
|
|
@@ -557,7 +557,7 @@ const MIGRATIONS = [
|
|
|
557
557
|
* Safe to call multiple times — already-applied migrations are skipped.
|
|
558
558
|
*/
|
|
559
559
|
function runMigrations(db) {
|
|
560
|
-
logger$
|
|
560
|
+
logger$19.info("Starting migration runner");
|
|
561
561
|
db.exec(`
|
|
562
562
|
CREATE TABLE IF NOT EXISTS schema_migrations (
|
|
563
563
|
version INTEGER PRIMARY KEY,
|
|
@@ -568,12 +568,12 @@ function runMigrations(db) {
|
|
|
568
568
|
const appliedVersions = new Set(db.prepare("SELECT version FROM schema_migrations").all().map((row) => row.version));
|
|
569
569
|
const pending = MIGRATIONS.filter((m) => !appliedVersions.has(m.version)).sort((a, b) => a.version - b.version);
|
|
570
570
|
if (pending.length === 0) {
|
|
571
|
-
logger$
|
|
571
|
+
logger$19.info("No pending migrations");
|
|
572
572
|
return;
|
|
573
573
|
}
|
|
574
574
|
const insertMigration = db.prepare("INSERT INTO schema_migrations (version, name) VALUES (?, ?)");
|
|
575
575
|
for (const migration of pending) {
|
|
576
|
-
logger$
|
|
576
|
+
logger$19.info({
|
|
577
577
|
version: migration.version,
|
|
578
578
|
name: migration.name
|
|
579
579
|
}, "Applying migration");
|
|
@@ -587,14 +587,14 @@ function runMigrations(db) {
|
|
|
587
587
|
});
|
|
588
588
|
applyMigration();
|
|
589
589
|
}
|
|
590
|
-
logger$
|
|
590
|
+
logger$19.info({ version: migration.version }, "Migration applied successfully");
|
|
591
591
|
}
|
|
592
|
-
logger$
|
|
592
|
+
logger$19.info({ count: pending.length }, "All pending migrations applied");
|
|
593
593
|
}
|
|
594
594
|
|
|
595
595
|
//#endregion
|
|
596
596
|
//#region src/persistence/database.ts
|
|
597
|
-
const logger$
|
|
597
|
+
const logger$18 = createLogger("persistence:database");
|
|
598
598
|
/**
|
|
599
599
|
* Thin wrapper that opens a SQLite database, applies required PRAGMAs,
|
|
600
600
|
* and exposes the raw BetterSqlite3 instance.
|
|
@@ -611,14 +611,14 @@ var DatabaseWrapper = class {
|
|
|
611
611
|
*/
|
|
612
612
|
open() {
|
|
613
613
|
if (this._db !== null) return;
|
|
614
|
-
logger$
|
|
614
|
+
logger$18.info({ path: this._path }, "Opening SQLite database");
|
|
615
615
|
this._db = new BetterSqlite3(this._path);
|
|
616
616
|
const walResult = this._db.pragma("journal_mode = WAL");
|
|
617
|
-
if (walResult?.[0]?.journal_mode !== "wal") logger$
|
|
617
|
+
if (walResult?.[0]?.journal_mode !== "wal") logger$18.warn({ result: walResult?.[0]?.journal_mode }, "WAL pragma did not return expected \"wal\" — journal_mode may be \"memory\" or unsupported");
|
|
618
618
|
this._db.pragma("busy_timeout = 5000");
|
|
619
619
|
this._db.pragma("synchronous = NORMAL");
|
|
620
620
|
this._db.pragma("foreign_keys = ON");
|
|
621
|
-
logger$
|
|
621
|
+
logger$18.info({ path: this._path }, "SQLite database opened with WAL mode");
|
|
622
622
|
}
|
|
623
623
|
/**
|
|
624
624
|
* Close the database. Idempotent — calling close() when already closed is a no-op.
|
|
@@ -627,7 +627,7 @@ var DatabaseWrapper = class {
|
|
|
627
627
|
if (this._db === null) return;
|
|
628
628
|
this._db.close();
|
|
629
629
|
this._db = null;
|
|
630
|
-
logger$
|
|
630
|
+
logger$18.info({ path: this._path }, "SQLite database closed");
|
|
631
631
|
}
|
|
632
632
|
/**
|
|
633
633
|
* Return the raw BetterSqlite3 instance.
|
|
@@ -1208,11 +1208,38 @@ function buildPipelineStatusOutput(run, tokenSummary, decisionsCount, storiesCou
|
|
|
1208
1208
|
totalCost += row.total_cost_usd;
|
|
1209
1209
|
}
|
|
1210
1210
|
let activeDispatches = 0;
|
|
1211
|
+
let storiesSummary;
|
|
1211
1212
|
try {
|
|
1212
1213
|
if (run.token_usage_json) {
|
|
1213
1214
|
const state = JSON.parse(run.token_usage_json);
|
|
1214
|
-
if (state.stories) {
|
|
1215
|
-
|
|
1215
|
+
if (state.stories && Object.keys(state.stories).length > 0) {
|
|
1216
|
+
const now = Date.now();
|
|
1217
|
+
let completed = 0;
|
|
1218
|
+
let inProgress = 0;
|
|
1219
|
+
let escalated = 0;
|
|
1220
|
+
let pending = 0;
|
|
1221
|
+
const details = {};
|
|
1222
|
+
for (const [key, s] of Object.entries(state.stories)) {
|
|
1223
|
+
const phase = s.phase ?? "PENDING";
|
|
1224
|
+
if (phase !== "PENDING" && phase !== "COMPLETE" && phase !== "ESCALATED") activeDispatches++;
|
|
1225
|
+
if (phase === "COMPLETE") completed++;
|
|
1226
|
+
else if (phase === "ESCALATED") escalated++;
|
|
1227
|
+
else if (phase === "PENDING") pending++;
|
|
1228
|
+
else inProgress++;
|
|
1229
|
+
const elapsed = s.startedAt != null ? Math.max(0, Math.round((now - new Date(s.startedAt).getTime()) / 1e3)) : 0;
|
|
1230
|
+
details[key] = {
|
|
1231
|
+
phase,
|
|
1232
|
+
review_cycles: s.reviewCycles ?? 0,
|
|
1233
|
+
elapsed_seconds: elapsed
|
|
1234
|
+
};
|
|
1235
|
+
}
|
|
1236
|
+
storiesSummary = {
|
|
1237
|
+
completed,
|
|
1238
|
+
in_progress: inProgress,
|
|
1239
|
+
escalated,
|
|
1240
|
+
pending,
|
|
1241
|
+
details
|
|
1242
|
+
};
|
|
1216
1243
|
}
|
|
1217
1244
|
}
|
|
1218
1245
|
} catch {}
|
|
@@ -1230,7 +1257,8 @@ function buildPipelineStatusOutput(run, tokenSummary, decisionsCount, storiesCou
|
|
|
1230
1257
|
last_activity: run.updated_at,
|
|
1231
1258
|
staleness_seconds: Math.round((Date.now() - parseDbTimestampAsUtc(run.updated_at).getTime()) / 1e3),
|
|
1232
1259
|
last_event_ts: run.updated_at,
|
|
1233
|
-
active_dispatches: activeDispatches
|
|
1260
|
+
active_dispatches: activeDispatches,
|
|
1261
|
+
...storiesSummary !== void 0 ? { stories: storiesSummary } : {}
|
|
1234
1262
|
};
|
|
1235
1263
|
}
|
|
1236
1264
|
/**
|
|
@@ -1259,6 +1287,19 @@ function formatPipelineStatusHuman(status) {
|
|
|
1259
1287
|
lines.push(` Total Cost: $${status.total_tokens.cost_usd.toFixed(4)}`);
|
|
1260
1288
|
lines.push(` Decisions: ${status.decisions_count}`);
|
|
1261
1289
|
lines.push(` Stories: ${status.stories_count}`);
|
|
1290
|
+
if (status.stories !== void 0 && Object.keys(status.stories.details).length > 0) {
|
|
1291
|
+
lines.push("");
|
|
1292
|
+
lines.push(" Sprint Progress:");
|
|
1293
|
+
lines.push(" " + "─".repeat(68));
|
|
1294
|
+
lines.push(` ${"STORY".padEnd(10)} ${"PHASE".padEnd(24)} ${"CYCLES".padEnd(8)} ELAPSED`);
|
|
1295
|
+
lines.push(" " + "─".repeat(68));
|
|
1296
|
+
for (const [key, detail] of Object.entries(status.stories.details)) {
|
|
1297
|
+
const elapsed = detail.elapsed_seconds > 0 ? `${detail.elapsed_seconds}s` : "-";
|
|
1298
|
+
lines.push(` ${key.padEnd(10)} ${detail.phase.padEnd(24)} ${String(detail.review_cycles).padEnd(8)} ${elapsed}`);
|
|
1299
|
+
}
|
|
1300
|
+
lines.push(" " + "─".repeat(68));
|
|
1301
|
+
lines.push(` Completed: ${status.stories.completed} In Progress: ${status.stories.in_progress} Escalated: ${status.stories.escalated} Pending: ${status.stories.pending}`);
|
|
1302
|
+
}
|
|
1262
1303
|
return lines.join("\n");
|
|
1263
1304
|
}
|
|
1264
1305
|
/**
|
|
@@ -2496,7 +2537,7 @@ function truncateToTokens(text, maxTokens) {
|
|
|
2496
2537
|
|
|
2497
2538
|
//#endregion
|
|
2498
2539
|
//#region src/modules/context-compiler/context-compiler-impl.ts
|
|
2499
|
-
const logger$
|
|
2540
|
+
const logger$17 = createLogger("context-compiler");
|
|
2500
2541
|
/**
|
|
2501
2542
|
* Fraction of the original token budget that must remain (after required +
|
|
2502
2543
|
* important sections) before an optional section is included.
|
|
@@ -2588,7 +2629,7 @@ var ContextCompilerImpl = class {
|
|
|
2588
2629
|
includedParts.push(truncated);
|
|
2589
2630
|
remainingBudget -= truncatedTokens;
|
|
2590
2631
|
anyTruncated = true;
|
|
2591
|
-
logger$
|
|
2632
|
+
logger$17.warn({
|
|
2592
2633
|
section: section.name,
|
|
2593
2634
|
originalTokens: tokens,
|
|
2594
2635
|
budgetTokens: truncatedTokens
|
|
@@ -2602,7 +2643,7 @@ var ContextCompilerImpl = class {
|
|
|
2602
2643
|
});
|
|
2603
2644
|
} else {
|
|
2604
2645
|
anyTruncated = true;
|
|
2605
|
-
logger$
|
|
2646
|
+
logger$17.warn({
|
|
2606
2647
|
section: section.name,
|
|
2607
2648
|
tokens
|
|
2608
2649
|
}, "Context compiler: omitted \"important\" section — no budget remaining");
|
|
@@ -2629,7 +2670,7 @@ var ContextCompilerImpl = class {
|
|
|
2629
2670
|
} else {
|
|
2630
2671
|
if (tokens > 0) {
|
|
2631
2672
|
anyTruncated = true;
|
|
2632
|
-
logger$
|
|
2673
|
+
logger$17.warn({
|
|
2633
2674
|
section: section.name,
|
|
2634
2675
|
tokens,
|
|
2635
2676
|
budgetFractionRemaining: budgetFractionRemaining.toFixed(2)
|
|
@@ -2914,7 +2955,7 @@ function parseYamlResult(yamlText, schema) {
|
|
|
2914
2955
|
|
|
2915
2956
|
//#endregion
|
|
2916
2957
|
//#region src/modules/agent-dispatch/dispatcher-impl.ts
|
|
2917
|
-
const logger$
|
|
2958
|
+
const logger$16 = createLogger("agent-dispatch");
|
|
2918
2959
|
const SHUTDOWN_GRACE_MS = 1e4;
|
|
2919
2960
|
const SHUTDOWN_MAX_WAIT_MS = 3e4;
|
|
2920
2961
|
const CHARS_PER_TOKEN = 4;
|
|
@@ -2957,7 +2998,7 @@ function getAvailableMemory() {
|
|
|
2957
2998
|
encoding: "utf-8"
|
|
2958
2999
|
}).trim(), 10);
|
|
2959
3000
|
if (pressureLevel >= 4) {
|
|
2960
|
-
logger$
|
|
3001
|
+
logger$16.warn({ pressureLevel }, "macOS kernel reports critical memory pressure");
|
|
2961
3002
|
return 0;
|
|
2962
3003
|
}
|
|
2963
3004
|
} catch {}
|
|
@@ -2972,7 +3013,7 @@ function getAvailableMemory() {
|
|
|
2972
3013
|
const speculative = parseInt(vmstat.match(/Pages speculative:\s+(\d+)/)?.[1] ?? "0", 10);
|
|
2973
3014
|
const available = (free + purgeable + speculative) * pageSize;
|
|
2974
3015
|
if (pressureLevel >= 2) {
|
|
2975
|
-
logger$
|
|
3016
|
+
logger$16.warn({
|
|
2976
3017
|
pressureLevel,
|
|
2977
3018
|
availableBeforeDiscount: available
|
|
2978
3019
|
}, "macOS kernel reports memory pressure — discounting estimate");
|
|
@@ -3051,7 +3092,7 @@ var DispatcherImpl = class {
|
|
|
3051
3092
|
resolve: typedResolve,
|
|
3052
3093
|
reject
|
|
3053
3094
|
});
|
|
3054
|
-
logger$
|
|
3095
|
+
logger$16.debug({
|
|
3055
3096
|
id,
|
|
3056
3097
|
queueLength: this._queue.length
|
|
3057
3098
|
}, "Dispatch queued");
|
|
@@ -3082,7 +3123,7 @@ var DispatcherImpl = class {
|
|
|
3082
3123
|
async shutdown() {
|
|
3083
3124
|
this._shuttingDown = true;
|
|
3084
3125
|
this._stopMemoryPressureTimer();
|
|
3085
|
-
logger$
|
|
3126
|
+
logger$16.info({
|
|
3086
3127
|
running: this._running.size,
|
|
3087
3128
|
queued: this._queue.length
|
|
3088
3129
|
}, "Dispatcher shutting down");
|
|
@@ -3115,13 +3156,13 @@ var DispatcherImpl = class {
|
|
|
3115
3156
|
}
|
|
3116
3157
|
}, 50);
|
|
3117
3158
|
});
|
|
3118
|
-
logger$
|
|
3159
|
+
logger$16.info("Dispatcher shutdown complete");
|
|
3119
3160
|
}
|
|
3120
3161
|
async _startDispatch(id, request, resolve$2) {
|
|
3121
3162
|
const { prompt, agent, taskType, timeout, outputSchema, workingDirectory, model, maxTurns } = request;
|
|
3122
3163
|
const adapter = this._adapterRegistry.get(agent);
|
|
3123
3164
|
if (adapter === void 0) {
|
|
3124
|
-
logger$
|
|
3165
|
+
logger$16.warn({
|
|
3125
3166
|
id,
|
|
3126
3167
|
agent
|
|
3127
3168
|
}, "No adapter found for agent");
|
|
@@ -3167,7 +3208,7 @@ var DispatcherImpl = class {
|
|
|
3167
3208
|
});
|
|
3168
3209
|
const startedAt = Date.now();
|
|
3169
3210
|
proc.on("error", (err) => {
|
|
3170
|
-
logger$
|
|
3211
|
+
logger$16.error({
|
|
3171
3212
|
id,
|
|
3172
3213
|
binary: cmd.binary,
|
|
3173
3214
|
error: err.message
|
|
@@ -3175,7 +3216,7 @@ var DispatcherImpl = class {
|
|
|
3175
3216
|
});
|
|
3176
3217
|
if (proc.stdin !== null) {
|
|
3177
3218
|
proc.stdin.on("error", (err) => {
|
|
3178
|
-
if (err.code !== "EPIPE") logger$
|
|
3219
|
+
if (err.code !== "EPIPE") logger$16.warn({
|
|
3179
3220
|
id,
|
|
3180
3221
|
error: err.message
|
|
3181
3222
|
}, "stdin write error");
|
|
@@ -3217,7 +3258,7 @@ var DispatcherImpl = class {
|
|
|
3217
3258
|
agent,
|
|
3218
3259
|
taskType
|
|
3219
3260
|
});
|
|
3220
|
-
logger$
|
|
3261
|
+
logger$16.debug({
|
|
3221
3262
|
id,
|
|
3222
3263
|
agent,
|
|
3223
3264
|
taskType,
|
|
@@ -3234,7 +3275,7 @@ var DispatcherImpl = class {
|
|
|
3234
3275
|
dispatchId: id,
|
|
3235
3276
|
timeoutMs
|
|
3236
3277
|
});
|
|
3237
|
-
logger$
|
|
3278
|
+
logger$16.warn({
|
|
3238
3279
|
id,
|
|
3239
3280
|
agent,
|
|
3240
3281
|
taskType,
|
|
@@ -3288,7 +3329,7 @@ var DispatcherImpl = class {
|
|
|
3288
3329
|
exitCode: code,
|
|
3289
3330
|
output: stdout
|
|
3290
3331
|
});
|
|
3291
|
-
logger$
|
|
3332
|
+
logger$16.debug({
|
|
3292
3333
|
id,
|
|
3293
3334
|
agent,
|
|
3294
3335
|
taskType,
|
|
@@ -3314,7 +3355,7 @@ var DispatcherImpl = class {
|
|
|
3314
3355
|
error: stderr || `Process exited with code ${String(code)}`,
|
|
3315
3356
|
exitCode: code
|
|
3316
3357
|
});
|
|
3317
|
-
logger$
|
|
3358
|
+
logger$16.debug({
|
|
3318
3359
|
id,
|
|
3319
3360
|
agent,
|
|
3320
3361
|
taskType,
|
|
@@ -3373,7 +3414,7 @@ var DispatcherImpl = class {
|
|
|
3373
3414
|
const next = this._queue.shift();
|
|
3374
3415
|
if (next === void 0) return;
|
|
3375
3416
|
next.handle.status = "running";
|
|
3376
|
-
logger$
|
|
3417
|
+
logger$16.debug({
|
|
3377
3418
|
id: next.id,
|
|
3378
3419
|
queueLength: this._queue.length
|
|
3379
3420
|
}, "Dequeued dispatch");
|
|
@@ -3386,7 +3427,7 @@ var DispatcherImpl = class {
|
|
|
3386
3427
|
_isMemoryPressured() {
|
|
3387
3428
|
const free = getAvailableMemory();
|
|
3388
3429
|
if (free < MIN_FREE_MEMORY_BYTES) {
|
|
3389
|
-
logger$
|
|
3430
|
+
logger$16.warn({
|
|
3390
3431
|
freeMB: Math.round(free / 1024 / 1024),
|
|
3391
3432
|
thresholdMB: Math.round(MIN_FREE_MEMORY_BYTES / 1024 / 1024)
|
|
3392
3433
|
}, "Memory pressure detected — holding dispatch queue");
|
|
@@ -3511,7 +3552,7 @@ function pickRecommendation(distribution, profile, totalIssues, reviewCycles, la
|
|
|
3511
3552
|
|
|
3512
3553
|
//#endregion
|
|
3513
3554
|
//#region src/modules/compiled-workflows/prompt-assembler.ts
|
|
3514
|
-
const logger$
|
|
3555
|
+
const logger$15 = createLogger("compiled-workflows:prompt-assembler");
|
|
3515
3556
|
/**
|
|
3516
3557
|
* Assemble a final prompt from a template and sections map.
|
|
3517
3558
|
*
|
|
@@ -3536,7 +3577,7 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
|
|
|
3536
3577
|
tokenCount,
|
|
3537
3578
|
truncated: false
|
|
3538
3579
|
};
|
|
3539
|
-
logger$
|
|
3580
|
+
logger$15.warn({
|
|
3540
3581
|
tokenCount,
|
|
3541
3582
|
ceiling: tokenCeiling
|
|
3542
3583
|
}, "Prompt exceeds token ceiling — truncating optional sections");
|
|
@@ -3552,10 +3593,10 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
|
|
|
3552
3593
|
const targetSectionTokens = Math.max(0, currentSectionTokens - overBy);
|
|
3553
3594
|
if (targetSectionTokens === 0) {
|
|
3554
3595
|
contentMap[section.name] = "";
|
|
3555
|
-
logger$
|
|
3596
|
+
logger$15.warn({ sectionName: section.name }, "Section eliminated to fit token budget");
|
|
3556
3597
|
} else {
|
|
3557
3598
|
contentMap[section.name] = truncateToTokens(section.content, targetSectionTokens);
|
|
3558
|
-
logger$
|
|
3599
|
+
logger$15.warn({
|
|
3559
3600
|
sectionName: section.name,
|
|
3560
3601
|
targetSectionTokens
|
|
3561
3602
|
}, "Section truncated to fit token budget");
|
|
@@ -3566,7 +3607,7 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
|
|
|
3566
3607
|
}
|
|
3567
3608
|
if (tokenCount <= tokenCeiling) break;
|
|
3568
3609
|
}
|
|
3569
|
-
if (tokenCount > tokenCeiling) logger$
|
|
3610
|
+
if (tokenCount > tokenCeiling) logger$15.warn({
|
|
3570
3611
|
tokenCount,
|
|
3571
3612
|
ceiling: tokenCeiling
|
|
3572
3613
|
}, "Required sections alone exceed token ceiling — returning over-budget prompt");
|
|
@@ -3704,14 +3745,89 @@ const CodeReviewResultSchema = z.object({
|
|
|
3704
3745
|
agentVerdict: data.verdict,
|
|
3705
3746
|
verdict: computeVerdict(data.issue_list)
|
|
3706
3747
|
}));
|
|
3748
|
+
/**
|
|
3749
|
+
* Schema for the YAML output contract of the test-plan sub-agent.
|
|
3750
|
+
*
|
|
3751
|
+
* The agent must emit YAML with result, test_files, test_categories, and coverage_notes.
|
|
3752
|
+
* Example:
|
|
3753
|
+
* result: success
|
|
3754
|
+
* test_files:
|
|
3755
|
+
* - src/modules/foo/__tests__/foo.test.ts
|
|
3756
|
+
* test_categories:
|
|
3757
|
+
* - unit
|
|
3758
|
+
* - integration
|
|
3759
|
+
* coverage_notes: "AC1 covered by foo.test.ts"
|
|
3760
|
+
*/
|
|
3761
|
+
const TestPlanResultSchema = z.object({
|
|
3762
|
+
result: z.preprocess((val) => val === "failure" ? "failed" : val, z.enum(["success", "failed"])),
|
|
3763
|
+
test_files: z.array(z.string()).default([]),
|
|
3764
|
+
test_categories: z.array(z.string()).default([]),
|
|
3765
|
+
coverage_notes: z.string().default("")
|
|
3766
|
+
});
|
|
3767
|
+
/**
|
|
3768
|
+
* Schema for a single coverage gap identified during test expansion analysis.
|
|
3769
|
+
*/
|
|
3770
|
+
const CoverageGapSchema = z.object({
|
|
3771
|
+
ac_ref: z.string(),
|
|
3772
|
+
description: z.string(),
|
|
3773
|
+
gap_type: z.enum([
|
|
3774
|
+
"missing-e2e",
|
|
3775
|
+
"missing-integration",
|
|
3776
|
+
"unit-only"
|
|
3777
|
+
])
|
|
3778
|
+
});
|
|
3779
|
+
/**
|
|
3780
|
+
* Schema for a single suggested test generated during test expansion analysis.
|
|
3781
|
+
*/
|
|
3782
|
+
const SuggestedTestSchema = z.object({
|
|
3783
|
+
test_name: z.string(),
|
|
3784
|
+
test_type: z.enum([
|
|
3785
|
+
"e2e",
|
|
3786
|
+
"integration",
|
|
3787
|
+
"unit"
|
|
3788
|
+
]),
|
|
3789
|
+
description: z.string(),
|
|
3790
|
+
target_ac: z.string().optional()
|
|
3791
|
+
});
|
|
3792
|
+
/**
|
|
3793
|
+
* Schema for the YAML output contract of the test-expansion sub-agent.
|
|
3794
|
+
*
|
|
3795
|
+
* The agent must emit YAML with expansion_priority, coverage_gaps, and suggested_tests.
|
|
3796
|
+
* Example:
|
|
3797
|
+
* expansion_priority: medium
|
|
3798
|
+
* coverage_gaps:
|
|
3799
|
+
* - ac_ref: AC1
|
|
3800
|
+
* description: "Happy path not exercised at module boundary"
|
|
3801
|
+
* gap_type: missing-integration
|
|
3802
|
+
* suggested_tests:
|
|
3803
|
+
* - test_name: "runFoo integration happy path"
|
|
3804
|
+
* test_type: integration
|
|
3805
|
+
* description: "Test runFoo with real DB to verify AC1 end-to-end"
|
|
3806
|
+
* target_ac: AC1
|
|
3807
|
+
* notes: "Unit coverage is solid but integration layer is untested."
|
|
3808
|
+
*/
|
|
3809
|
+
const TestExpansionResultSchema = z.object({
|
|
3810
|
+
expansion_priority: z.preprocess((val) => [
|
|
3811
|
+
"low",
|
|
3812
|
+
"medium",
|
|
3813
|
+
"high"
|
|
3814
|
+
].includes(val) ? val : "low", z.enum([
|
|
3815
|
+
"low",
|
|
3816
|
+
"medium",
|
|
3817
|
+
"high"
|
|
3818
|
+
])),
|
|
3819
|
+
coverage_gaps: z.array(CoverageGapSchema).default([]),
|
|
3820
|
+
suggested_tests: z.array(SuggestedTestSchema).default([]),
|
|
3821
|
+
notes: z.string().optional()
|
|
3822
|
+
});
|
|
3707
3823
|
|
|
3708
3824
|
//#endregion
|
|
3709
3825
|
//#region src/modules/compiled-workflows/create-story.ts
|
|
3710
|
-
const logger$
|
|
3826
|
+
const logger$14 = createLogger("compiled-workflows:create-story");
|
|
3711
3827
|
/**
|
|
3712
3828
|
* Hard ceiling for the assembled create-story prompt.
|
|
3713
3829
|
*/
|
|
3714
|
-
const TOKEN_CEILING$
|
|
3830
|
+
const TOKEN_CEILING$4 = 3e3;
|
|
3715
3831
|
/**
|
|
3716
3832
|
* Execute the compiled create-story workflow.
|
|
3717
3833
|
*
|
|
@@ -3731,7 +3847,7 @@ const TOKEN_CEILING$2 = 3e3;
|
|
|
3731
3847
|
*/
|
|
3732
3848
|
async function runCreateStory(deps, params) {
|
|
3733
3849
|
const { epicId, storyKey, pipelineRunId } = params;
|
|
3734
|
-
logger$
|
|
3850
|
+
logger$14.debug({
|
|
3735
3851
|
epicId,
|
|
3736
3852
|
storyKey,
|
|
3737
3853
|
pipelineRunId
|
|
@@ -3741,7 +3857,7 @@ async function runCreateStory(deps, params) {
|
|
|
3741
3857
|
template = await deps.pack.getPrompt("create-story");
|
|
3742
3858
|
} catch (err) {
|
|
3743
3859
|
const error = err instanceof Error ? err.message : String(err);
|
|
3744
|
-
logger$
|
|
3860
|
+
logger$14.error({ error }, "Failed to retrieve create-story prompt template");
|
|
3745
3861
|
return {
|
|
3746
3862
|
result: "failed",
|
|
3747
3863
|
error: `Failed to retrieve prompt template: ${error}`,
|
|
@@ -3752,9 +3868,9 @@ async function runCreateStory(deps, params) {
|
|
|
3752
3868
|
};
|
|
3753
3869
|
}
|
|
3754
3870
|
const implementationDecisions = getImplementationDecisions(deps);
|
|
3755
|
-
const epicShardContent = getEpicShard(implementationDecisions, epicId, deps.projectRoot);
|
|
3871
|
+
const epicShardContent = getEpicShard(implementationDecisions, epicId, deps.projectRoot, storyKey);
|
|
3756
3872
|
const prevDevNotesContent = getPrevDevNotes(implementationDecisions, epicId);
|
|
3757
|
-
const archConstraintsContent = getArchConstraints$
|
|
3873
|
+
const archConstraintsContent = getArchConstraints$2(deps);
|
|
3758
3874
|
const storyTemplateContent = await getStoryTemplate(deps);
|
|
3759
3875
|
const { prompt, tokenCount, truncated } = assemblePrompt(template, [
|
|
3760
3876
|
{
|
|
@@ -3782,11 +3898,11 @@ async function runCreateStory(deps, params) {
|
|
|
3782
3898
|
content: storyTemplateContent,
|
|
3783
3899
|
priority: "important"
|
|
3784
3900
|
}
|
|
3785
|
-
], TOKEN_CEILING$
|
|
3786
|
-
logger$
|
|
3901
|
+
], TOKEN_CEILING$4);
|
|
3902
|
+
logger$14.debug({
|
|
3787
3903
|
tokenCount,
|
|
3788
3904
|
truncated,
|
|
3789
|
-
tokenCeiling: TOKEN_CEILING$
|
|
3905
|
+
tokenCeiling: TOKEN_CEILING$4
|
|
3790
3906
|
}, "Prompt assembled for create-story");
|
|
3791
3907
|
const handle = deps.dispatcher.dispatch({
|
|
3792
3908
|
prompt,
|
|
@@ -3800,7 +3916,7 @@ async function runCreateStory(deps, params) {
|
|
|
3800
3916
|
dispatchResult = await handle.result;
|
|
3801
3917
|
} catch (err) {
|
|
3802
3918
|
const error = err instanceof Error ? err.message : String(err);
|
|
3803
|
-
logger$
|
|
3919
|
+
logger$14.error({
|
|
3804
3920
|
epicId,
|
|
3805
3921
|
storyKey,
|
|
3806
3922
|
error
|
|
@@ -3821,7 +3937,7 @@ async function runCreateStory(deps, params) {
|
|
|
3821
3937
|
if (dispatchResult.status === "failed") {
|
|
3822
3938
|
const errorMsg = dispatchResult.parseError ?? `Dispatch failed with exit code ${dispatchResult.exitCode}`;
|
|
3823
3939
|
const stderrDetail = dispatchResult.output ? ` Output: ${dispatchResult.output}` : "";
|
|
3824
|
-
logger$
|
|
3940
|
+
logger$14.warn({
|
|
3825
3941
|
epicId,
|
|
3826
3942
|
storyKey,
|
|
3827
3943
|
exitCode: dispatchResult.exitCode
|
|
@@ -3833,7 +3949,7 @@ async function runCreateStory(deps, params) {
|
|
|
3833
3949
|
};
|
|
3834
3950
|
}
|
|
3835
3951
|
if (dispatchResult.status === "timeout") {
|
|
3836
|
-
logger$
|
|
3952
|
+
logger$14.warn({
|
|
3837
3953
|
epicId,
|
|
3838
3954
|
storyKey
|
|
3839
3955
|
}, "Create-story dispatch timed out");
|
|
@@ -3846,7 +3962,7 @@ async function runCreateStory(deps, params) {
|
|
|
3846
3962
|
if (dispatchResult.parsed === null) {
|
|
3847
3963
|
const details = dispatchResult.parseError ?? "No YAML block found in output";
|
|
3848
3964
|
const rawSnippet = dispatchResult.output ? dispatchResult.output.slice(0, 1e3) : "(empty)";
|
|
3849
|
-
logger$
|
|
3965
|
+
logger$14.warn({
|
|
3850
3966
|
epicId,
|
|
3851
3967
|
storyKey,
|
|
3852
3968
|
details,
|
|
@@ -3862,7 +3978,7 @@ async function runCreateStory(deps, params) {
|
|
|
3862
3978
|
const parseResult = CreateStoryResultSchema.safeParse(dispatchResult.parsed);
|
|
3863
3979
|
if (!parseResult.success) {
|
|
3864
3980
|
const details = parseResult.error.message;
|
|
3865
|
-
logger$
|
|
3981
|
+
logger$14.warn({
|
|
3866
3982
|
epicId,
|
|
3867
3983
|
storyKey,
|
|
3868
3984
|
details
|
|
@@ -3875,7 +3991,7 @@ async function runCreateStory(deps, params) {
|
|
|
3875
3991
|
};
|
|
3876
3992
|
}
|
|
3877
3993
|
const parsed = parseResult.data;
|
|
3878
|
-
logger$
|
|
3994
|
+
logger$14.info({
|
|
3879
3995
|
epicId,
|
|
3880
3996
|
storyKey,
|
|
3881
3997
|
storyFile: parsed.story_file,
|
|
@@ -3897,29 +4013,82 @@ function getImplementationDecisions(deps) {
|
|
|
3897
4013
|
try {
|
|
3898
4014
|
return getDecisionsByPhase(deps.db, "implementation");
|
|
3899
4015
|
} catch (err) {
|
|
3900
|
-
logger$
|
|
4016
|
+
logger$14.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve implementation decisions");
|
|
3901
4017
|
return [];
|
|
3902
4018
|
}
|
|
3903
4019
|
}
|
|
3904
4020
|
/**
|
|
4021
|
+
* Extract the section for a specific story key from a full epic shard.
|
|
4022
|
+
*
|
|
4023
|
+
* Matches patterns like:
|
|
4024
|
+
* - "Story 23-1:" / "### Story 23-1" / "#### Story 23-1"
|
|
4025
|
+
* - "23-1:" / "**23-1**"
|
|
4026
|
+
*
|
|
4027
|
+
* Returns the matched section content (from heading to next story heading or end),
|
|
4028
|
+
* or null if no matching section is found (caller falls back to full shard).
|
|
4029
|
+
*/
|
|
4030
|
+
function extractStorySection(shardContent, storyKey) {
|
|
4031
|
+
if (!shardContent || !storyKey) return null;
|
|
4032
|
+
const escaped = storyKey.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
|
|
4033
|
+
const headingPattern = new RegExp(`(?:^#{2,4}\\s+Story\\s+${escaped}\\b|^Story\\s+${escaped}:|^\\*\\*${escaped}\\*\\*|^${escaped}:)`, "mi");
|
|
4034
|
+
const match = headingPattern.exec(shardContent);
|
|
4035
|
+
if (!match) return null;
|
|
4036
|
+
const startIdx = match.index;
|
|
4037
|
+
const rest = shardContent.slice(startIdx + match[0].length);
|
|
4038
|
+
const nextStoryPattern = new RegExp(`(?:^#{2,4}\\s+Story\\s+[\\d]|^Story\\s+[\\d][\\d-]*:|^\\*\\*[\\d][\\d-]*\\*\\*|^[\\d][\\d-]*:)`, "mi");
|
|
4039
|
+
const nextMatch = nextStoryPattern.exec(rest);
|
|
4040
|
+
const endIdx = nextMatch !== null ? startIdx + match[0].length + nextMatch.index : shardContent.length;
|
|
4041
|
+
const section = shardContent.slice(startIdx, endIdx).trim();
|
|
4042
|
+
return section.length > 0 ? section : null;
|
|
4043
|
+
}
|
|
4044
|
+
/**
|
|
3905
4045
|
* Retrieve the epic shard from the pre-fetched implementation decisions.
|
|
3906
4046
|
* Looks for decisions with category='epic-shard', key=epicId.
|
|
3907
4047
|
* Falls back to reading _bmad-output/epics.md on disk if decisions are empty.
|
|
4048
|
+
*
|
|
4049
|
+
* When storyKey is provided, extracts only the section for that story (AC3).
|
|
3908
4050
|
*/
|
|
3909
|
-
function getEpicShard(decisions, epicId, projectRoot) {
|
|
4051
|
+
function getEpicShard(decisions, epicId, projectRoot, storyKey) {
|
|
3910
4052
|
try {
|
|
3911
4053
|
const epicShard = decisions.find((d) => d.category === "epic-shard" && d.key === epicId);
|
|
3912
|
-
|
|
4054
|
+
const shardContent = epicShard?.value;
|
|
4055
|
+
if (shardContent) {
|
|
4056
|
+
if (storyKey) {
|
|
4057
|
+
const storySection = extractStorySection(shardContent, storyKey);
|
|
4058
|
+
if (storySection) {
|
|
4059
|
+
logger$14.debug({
|
|
4060
|
+
epicId,
|
|
4061
|
+
storyKey
|
|
4062
|
+
}, "Extracted per-story section from epic shard");
|
|
4063
|
+
return storySection;
|
|
4064
|
+
}
|
|
4065
|
+
logger$14.debug({
|
|
4066
|
+
epicId,
|
|
4067
|
+
storyKey
|
|
4068
|
+
}, "No matching story section found — using full epic shard");
|
|
4069
|
+
}
|
|
4070
|
+
return shardContent;
|
|
4071
|
+
}
|
|
3913
4072
|
if (projectRoot) {
|
|
3914
4073
|
const fallback = readEpicShardFromFile(projectRoot, epicId);
|
|
3915
4074
|
if (fallback) {
|
|
3916
|
-
logger$
|
|
4075
|
+
logger$14.info({ epicId }, "Using file-based fallback for epic shard (decisions table empty)");
|
|
4076
|
+
if (storyKey) {
|
|
4077
|
+
const storySection = extractStorySection(fallback, storyKey);
|
|
4078
|
+
if (storySection) {
|
|
4079
|
+
logger$14.debug({
|
|
4080
|
+
epicId,
|
|
4081
|
+
storyKey
|
|
4082
|
+
}, "Extracted per-story section from file-based epic shard");
|
|
4083
|
+
return storySection;
|
|
4084
|
+
}
|
|
4085
|
+
}
|
|
3917
4086
|
return fallback;
|
|
3918
4087
|
}
|
|
3919
4088
|
}
|
|
3920
4089
|
return "";
|
|
3921
4090
|
} catch (err) {
|
|
3922
|
-
logger$
|
|
4091
|
+
logger$14.warn({
|
|
3923
4092
|
epicId,
|
|
3924
4093
|
error: err instanceof Error ? err.message : String(err)
|
|
3925
4094
|
}, "Failed to retrieve epic shard");
|
|
@@ -3936,7 +4105,7 @@ function getPrevDevNotes(decisions, epicId) {
|
|
|
3936
4105
|
if (devNotes.length === 0) return "";
|
|
3937
4106
|
return devNotes[devNotes.length - 1].value;
|
|
3938
4107
|
} catch (err) {
|
|
3939
|
-
logger$
|
|
4108
|
+
logger$14.warn({
|
|
3940
4109
|
epicId,
|
|
3941
4110
|
error: err instanceof Error ? err.message : String(err)
|
|
3942
4111
|
}, "Failed to retrieve prev dev notes");
|
|
@@ -3948,7 +4117,7 @@ function getPrevDevNotes(decisions, epicId) {
|
|
|
3948
4117
|
* Looks for decisions with phase='solutioning', category='architecture'.
|
|
3949
4118
|
* Falls back to reading _bmad-output/architecture/architecture.md on disk if decisions are empty.
|
|
3950
4119
|
*/
|
|
3951
|
-
function getArchConstraints$
|
|
4120
|
+
function getArchConstraints$2(deps) {
|
|
3952
4121
|
try {
|
|
3953
4122
|
const decisions = getDecisionsByPhase(deps.db, "solutioning");
|
|
3954
4123
|
const constraints = decisions.filter((d) => d.category === "architecture");
|
|
@@ -3956,13 +4125,13 @@ function getArchConstraints$1(deps) {
|
|
|
3956
4125
|
if (deps.projectRoot) {
|
|
3957
4126
|
const fallback = readArchConstraintsFromFile(deps.projectRoot);
|
|
3958
4127
|
if (fallback) {
|
|
3959
|
-
logger$
|
|
4128
|
+
logger$14.info("Using file-based fallback for architecture constraints (decisions table empty)");
|
|
3960
4129
|
return fallback;
|
|
3961
4130
|
}
|
|
3962
4131
|
}
|
|
3963
4132
|
return "";
|
|
3964
4133
|
} catch (err) {
|
|
3965
|
-
logger$
|
|
4134
|
+
logger$14.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
|
|
3966
4135
|
return "";
|
|
3967
4136
|
}
|
|
3968
4137
|
}
|
|
@@ -3978,11 +4147,11 @@ function readEpicShardFromFile(projectRoot, epicId) {
|
|
|
3978
4147
|
if (!epicsPath) return "";
|
|
3979
4148
|
const content = readFileSync$1(epicsPath, "utf-8");
|
|
3980
4149
|
const epicNum = epicId.replace(/^epic-/i, "");
|
|
3981
|
-
const pattern = new RegExp(
|
|
4150
|
+
const pattern = new RegExp(`^#{2,4}\\s+(?:Epic\\s+)?${epicNum}[.:\\s].*?(?=\\n#{2,4}\\s|$)`, "ms");
|
|
3982
4151
|
const match = pattern.exec(content);
|
|
3983
4152
|
return match ? match[0].trim() : "";
|
|
3984
4153
|
} catch (err) {
|
|
3985
|
-
logger$
|
|
4154
|
+
logger$14.warn({
|
|
3986
4155
|
epicId,
|
|
3987
4156
|
error: err instanceof Error ? err.message : String(err)
|
|
3988
4157
|
}, "File-based epic shard fallback failed");
|
|
@@ -4005,7 +4174,7 @@ function readArchConstraintsFromFile(projectRoot) {
|
|
|
4005
4174
|
const content = readFileSync$1(archPath, "utf-8");
|
|
4006
4175
|
return content.slice(0, 1500);
|
|
4007
4176
|
} catch (err) {
|
|
4008
|
-
logger$
|
|
4177
|
+
logger$14.warn({ error: err instanceof Error ? err.message : String(err) }, "File-based architecture fallback failed");
|
|
4009
4178
|
return "";
|
|
4010
4179
|
}
|
|
4011
4180
|
}
|
|
@@ -4018,14 +4187,44 @@ async function getStoryTemplate(deps) {
|
|
|
4018
4187
|
try {
|
|
4019
4188
|
return await deps.pack.getTemplate("story");
|
|
4020
4189
|
} catch (err) {
|
|
4021
|
-
logger$
|
|
4190
|
+
logger$14.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve story template from pack");
|
|
4022
4191
|
return "";
|
|
4023
4192
|
}
|
|
4024
4193
|
}
|
|
4194
|
+
/**
|
|
4195
|
+
* Validate that an existing story file is non-empty and structurally valid.
|
|
4196
|
+
*
|
|
4197
|
+
* A valid story file must:
|
|
4198
|
+
* 1. Be non-empty (> 0 bytes after trim)
|
|
4199
|
+
* 2. Contain at least one heading (`#`) AND either "Acceptance Criteria" or "AC1"
|
|
4200
|
+
*
|
|
4201
|
+
* @returns `{ valid: true }` or `{ valid: false, reason: 'empty' | 'missing_structure' }`
|
|
4202
|
+
*/
|
|
4203
|
+
async function isValidStoryFile(filePath) {
|
|
4204
|
+
try {
|
|
4205
|
+
const content = await readFile$1(filePath, "utf-8");
|
|
4206
|
+
if (content.trim().length === 0) return {
|
|
4207
|
+
valid: false,
|
|
4208
|
+
reason: "empty"
|
|
4209
|
+
};
|
|
4210
|
+
const hasHeading = content.includes("#");
|
|
4211
|
+
const hasAC = /acceptance criteria|AC1/i.test(content);
|
|
4212
|
+
if (!hasHeading || !hasAC) return {
|
|
4213
|
+
valid: false,
|
|
4214
|
+
reason: "missing_structure"
|
|
4215
|
+
};
|
|
4216
|
+
return { valid: true };
|
|
4217
|
+
} catch {
|
|
4218
|
+
return {
|
|
4219
|
+
valid: false,
|
|
4220
|
+
reason: "empty"
|
|
4221
|
+
};
|
|
4222
|
+
}
|
|
4223
|
+
}
|
|
4025
4224
|
|
|
4026
4225
|
//#endregion
|
|
4027
4226
|
//#region src/modules/compiled-workflows/git-helpers.ts
|
|
4028
|
-
const logger$
|
|
4227
|
+
const logger$13 = createLogger("compiled-workflows:git-helpers");
|
|
4029
4228
|
/**
|
|
4030
4229
|
* Capture the full git diff for HEAD (working tree vs current commit).
|
|
4031
4230
|
*
|
|
@@ -4119,11 +4318,17 @@ async function getGitChangedFiles(workingDirectory = process.cwd()) {
|
|
|
4119
4318
|
*/
|
|
4120
4319
|
async function stageIntentToAdd(files, workingDirectory) {
|
|
4121
4320
|
if (files.length === 0) return;
|
|
4321
|
+
const existing = files.filter((f) => {
|
|
4322
|
+
const exists = existsSync$1(f);
|
|
4323
|
+
if (!exists) logger$13.debug({ file: f }, "Skipping nonexistent file in stageIntentToAdd");
|
|
4324
|
+
return exists;
|
|
4325
|
+
});
|
|
4326
|
+
if (existing.length === 0) return;
|
|
4122
4327
|
await runGitCommand([
|
|
4123
4328
|
"add",
|
|
4124
4329
|
"-N",
|
|
4125
4330
|
"--",
|
|
4126
|
-
...
|
|
4331
|
+
...existing
|
|
4127
4332
|
], workingDirectory, "git-add-intent");
|
|
4128
4333
|
}
|
|
4129
4334
|
/**
|
|
@@ -4149,7 +4354,7 @@ async function runGitCommand(args, cwd, logLabel) {
|
|
|
4149
4354
|
stderr += chunk.toString("utf-8");
|
|
4150
4355
|
});
|
|
4151
4356
|
proc.on("error", (err) => {
|
|
4152
|
-
logger$
|
|
4357
|
+
logger$13.warn({
|
|
4153
4358
|
label: logLabel,
|
|
4154
4359
|
cwd,
|
|
4155
4360
|
error: err.message
|
|
@@ -4158,7 +4363,7 @@ async function runGitCommand(args, cwd, logLabel) {
|
|
|
4158
4363
|
});
|
|
4159
4364
|
proc.on("close", (code) => {
|
|
4160
4365
|
if (code !== 0) {
|
|
4161
|
-
logger$
|
|
4366
|
+
logger$13.warn({
|
|
4162
4367
|
label: logLabel,
|
|
4163
4368
|
cwd,
|
|
4164
4369
|
code,
|
|
@@ -4174,7 +4379,7 @@ async function runGitCommand(args, cwd, logLabel) {
|
|
|
4174
4379
|
|
|
4175
4380
|
//#endregion
|
|
4176
4381
|
//#region src/modules/implementation-orchestrator/project-findings.ts
|
|
4177
|
-
const logger$
|
|
4382
|
+
const logger$12 = createLogger("project-findings");
|
|
4178
4383
|
/** Maximum character length for the findings summary */
|
|
4179
4384
|
const MAX_CHARS = 2e3;
|
|
4180
4385
|
/**
|
|
@@ -4229,7 +4434,7 @@ function getProjectFindings(db) {
|
|
|
4229
4434
|
if (summary.length > MAX_CHARS) summary = summary.slice(0, MAX_CHARS - 3) + "...";
|
|
4230
4435
|
return summary;
|
|
4231
4436
|
} catch (err) {
|
|
4232
|
-
logger$
|
|
4437
|
+
logger$12.warn({ err }, "Failed to query project findings (graceful fallback)");
|
|
4233
4438
|
return "";
|
|
4234
4439
|
}
|
|
4235
4440
|
}
|
|
@@ -4252,11 +4457,11 @@ function extractRecurringPatterns(outcomes) {
|
|
|
4252
4457
|
|
|
4253
4458
|
//#endregion
|
|
4254
4459
|
//#region src/modules/compiled-workflows/dev-story.ts
|
|
4255
|
-
const logger$
|
|
4460
|
+
const logger$11 = createLogger("compiled-workflows:dev-story");
|
|
4256
4461
|
/** Hard token ceiling for the assembled dev-story prompt */
|
|
4257
|
-
const TOKEN_CEILING$
|
|
4462
|
+
const TOKEN_CEILING$3 = 24e3;
|
|
4258
4463
|
/** Default timeout for dev-story dispatches in milliseconds (30 min) */
|
|
4259
|
-
const DEFAULT_TIMEOUT_MS = 18e5;
|
|
4464
|
+
const DEFAULT_TIMEOUT_MS$1 = 18e5;
|
|
4260
4465
|
/** Default Vitest test patterns injected when no test-pattern decisions exist */
|
|
4261
4466
|
const DEFAULT_VITEST_PATTERNS = `## Test Patterns (defaults)
|
|
4262
4467
|
- Framework: Vitest (NOT jest — --testPathPattern flag does not work, use -- "pattern")
|
|
@@ -4277,7 +4482,7 @@ const DEFAULT_VITEST_PATTERNS = `## Test Patterns (defaults)
|
|
|
4277
4482
|
*/
|
|
4278
4483
|
async function runDevStory(deps, params) {
|
|
4279
4484
|
const { storyKey, storyFilePath, taskScope, priorFiles } = params;
|
|
4280
|
-
logger$
|
|
4485
|
+
logger$11.info({
|
|
4281
4486
|
storyKey,
|
|
4282
4487
|
storyFilePath
|
|
4283
4488
|
}, "Starting compiled dev-story workflow");
|
|
@@ -4319,10 +4524,10 @@ async function runDevStory(deps, params) {
|
|
|
4319
4524
|
let template;
|
|
4320
4525
|
try {
|
|
4321
4526
|
template = await deps.pack.getPrompt("dev-story");
|
|
4322
|
-
logger$
|
|
4527
|
+
logger$11.debug({ storyKey }, "Retrieved dev-story prompt template from pack");
|
|
4323
4528
|
} catch (err) {
|
|
4324
4529
|
const error = err instanceof Error ? err.message : String(err);
|
|
4325
|
-
logger$
|
|
4530
|
+
logger$11.error({
|
|
4326
4531
|
storyKey,
|
|
4327
4532
|
error
|
|
4328
4533
|
}, "Failed to retrieve dev-story prompt template");
|
|
@@ -4333,14 +4538,14 @@ async function runDevStory(deps, params) {
|
|
|
4333
4538
|
storyContent = await readFile$1(storyFilePath, "utf-8");
|
|
4334
4539
|
} catch (err) {
|
|
4335
4540
|
if (err.code === "ENOENT") {
|
|
4336
|
-
logger$
|
|
4541
|
+
logger$11.error({
|
|
4337
4542
|
storyKey,
|
|
4338
4543
|
storyFilePath
|
|
4339
4544
|
}, "Story file not found");
|
|
4340
4545
|
return makeFailureResult("story_file_not_found");
|
|
4341
4546
|
}
|
|
4342
4547
|
const error = err instanceof Error ? err.message : String(err);
|
|
4343
|
-
logger$
|
|
4548
|
+
logger$11.error({
|
|
4344
4549
|
storyKey,
|
|
4345
4550
|
storyFilePath,
|
|
4346
4551
|
error
|
|
@@ -4348,7 +4553,7 @@ async function runDevStory(deps, params) {
|
|
|
4348
4553
|
return makeFailureResult(`story_file_read_error: ${error}`);
|
|
4349
4554
|
}
|
|
4350
4555
|
if (storyContent.trim().length === 0) {
|
|
4351
|
-
logger$
|
|
4556
|
+
logger$11.error({
|
|
4352
4557
|
storyKey,
|
|
4353
4558
|
storyFilePath
|
|
4354
4559
|
}, "Story file is empty");
|
|
@@ -4360,17 +4565,17 @@ async function runDevStory(deps, params) {
|
|
|
4360
4565
|
const testPatternDecisions = solutioningDecisions.filter((d) => d.category === "test-patterns");
|
|
4361
4566
|
if (testPatternDecisions.length > 0) {
|
|
4362
4567
|
testPatternsContent = "## Test Patterns\n" + testPatternDecisions.map((d) => `- ${d.key}: ${d.value}`).join("\n");
|
|
4363
|
-
logger$
|
|
4568
|
+
logger$11.debug({
|
|
4364
4569
|
storyKey,
|
|
4365
4570
|
count: testPatternDecisions.length
|
|
4366
4571
|
}, "Loaded test patterns from decision store");
|
|
4367
4572
|
} else {
|
|
4368
4573
|
testPatternsContent = DEFAULT_VITEST_PATTERNS;
|
|
4369
|
-
logger$
|
|
4574
|
+
logger$11.debug({ storyKey }, "No test-pattern decisions found — using default Vitest patterns");
|
|
4370
4575
|
}
|
|
4371
4576
|
} catch (err) {
|
|
4372
4577
|
const error = err instanceof Error ? err.message : String(err);
|
|
4373
|
-
logger$
|
|
4578
|
+
logger$11.warn({
|
|
4374
4579
|
storyKey,
|
|
4375
4580
|
error
|
|
4376
4581
|
}, "Failed to load test patterns — using defaults");
|
|
@@ -4385,12 +4590,29 @@ async function runDevStory(deps, params) {
|
|
|
4385
4590
|
const findings = getProjectFindings(deps.db);
|
|
4386
4591
|
if (findings.length > 0) {
|
|
4387
4592
|
priorFindingsContent = "Previous pipeline runs encountered these issues — avoid repeating them:\n\n" + findings;
|
|
4388
|
-
logger$
|
|
4593
|
+
logger$11.debug({
|
|
4389
4594
|
storyKey,
|
|
4390
4595
|
findingsLen: findings.length
|
|
4391
4596
|
}, "Injecting prior findings into dev-story prompt");
|
|
4392
4597
|
}
|
|
4393
4598
|
} catch {}
|
|
4599
|
+
let testPlanContent = "";
|
|
4600
|
+
try {
|
|
4601
|
+
const testPlanDecisions = getDecisionsByCategory(deps.db, "test-plan");
|
|
4602
|
+
const matchingPlan = testPlanDecisions.find((d) => d.key === storyKey);
|
|
4603
|
+
if (matchingPlan) {
|
|
4604
|
+
const plan = JSON.parse(matchingPlan.value);
|
|
4605
|
+
const parts = ["## Test Plan"];
|
|
4606
|
+
if (plan.test_files && plan.test_files.length > 0) {
|
|
4607
|
+
parts.push("\n### Test Files");
|
|
4608
|
+
for (const f of plan.test_files) parts.push(`- ${f}`);
|
|
4609
|
+
}
|
|
4610
|
+
if (plan.test_categories && plan.test_categories.length > 0) parts.push(`\n### Categories: ${plan.test_categories.join(", ")}`);
|
|
4611
|
+
if (plan.coverage_notes) parts.push(`\n### Coverage Notes\n${plan.coverage_notes}`);
|
|
4612
|
+
testPlanContent = parts.join("\n");
|
|
4613
|
+
logger$11.debug({ storyKey }, "Injecting test plan into dev-story prompt");
|
|
4614
|
+
}
|
|
4615
|
+
} catch {}
|
|
4394
4616
|
const sections = [
|
|
4395
4617
|
{
|
|
4396
4618
|
name: "story_content",
|
|
@@ -4422,17 +4644,22 @@ async function runDevStory(deps, params) {
|
|
|
4422
4644
|
content: testPatternsContent,
|
|
4423
4645
|
priority: "optional"
|
|
4424
4646
|
},
|
|
4647
|
+
{
|
|
4648
|
+
name: "test_plan",
|
|
4649
|
+
content: testPlanContent,
|
|
4650
|
+
priority: "optional"
|
|
4651
|
+
},
|
|
4425
4652
|
{
|
|
4426
4653
|
name: "prior_findings",
|
|
4427
4654
|
content: priorFindingsContent,
|
|
4428
4655
|
priority: "optional"
|
|
4429
4656
|
}
|
|
4430
4657
|
];
|
|
4431
|
-
const { prompt, tokenCount, truncated } = assemblePrompt(template, sections, TOKEN_CEILING$
|
|
4432
|
-
logger$
|
|
4658
|
+
const { prompt, tokenCount, truncated } = assemblePrompt(template, sections, TOKEN_CEILING$3);
|
|
4659
|
+
logger$11.info({
|
|
4433
4660
|
storyKey,
|
|
4434
4661
|
tokenCount,
|
|
4435
|
-
ceiling: TOKEN_CEILING$
|
|
4662
|
+
ceiling: TOKEN_CEILING$3,
|
|
4436
4663
|
truncated
|
|
4437
4664
|
}, "Assembled dev-story prompt");
|
|
4438
4665
|
let dispatchResult;
|
|
@@ -4441,14 +4668,14 @@ async function runDevStory(deps, params) {
|
|
|
4441
4668
|
prompt,
|
|
4442
4669
|
agent: "claude-code",
|
|
4443
4670
|
taskType: "dev-story",
|
|
4444
|
-
timeout: DEFAULT_TIMEOUT_MS,
|
|
4671
|
+
timeout: DEFAULT_TIMEOUT_MS$1,
|
|
4445
4672
|
outputSchema: DevStoryResultSchema,
|
|
4446
4673
|
...deps.projectRoot !== void 0 ? { workingDirectory: deps.projectRoot } : {}
|
|
4447
4674
|
});
|
|
4448
4675
|
dispatchResult = await handle.result;
|
|
4449
4676
|
} catch (err) {
|
|
4450
4677
|
const error = err instanceof Error ? err.message : String(err);
|
|
4451
|
-
logger$
|
|
4678
|
+
logger$11.error({
|
|
4452
4679
|
storyKey,
|
|
4453
4680
|
error
|
|
4454
4681
|
}, "Dispatch threw an unexpected error");
|
|
@@ -4459,11 +4686,11 @@ async function runDevStory(deps, params) {
|
|
|
4459
4686
|
output: dispatchResult.tokenEstimate.output
|
|
4460
4687
|
};
|
|
4461
4688
|
if (dispatchResult.status === "timeout") {
|
|
4462
|
-
logger$
|
|
4689
|
+
logger$11.error({
|
|
4463
4690
|
storyKey,
|
|
4464
4691
|
durationMs: dispatchResult.durationMs
|
|
4465
4692
|
}, "Dev-story dispatch timed out");
|
|
4466
|
-
if (dispatchResult.output.length > 0) logger$
|
|
4693
|
+
if (dispatchResult.output.length > 0) logger$11.info({
|
|
4467
4694
|
storyKey,
|
|
4468
4695
|
partialOutput: dispatchResult.output.slice(0, 500)
|
|
4469
4696
|
}, "Partial output before timeout");
|
|
@@ -4473,12 +4700,12 @@ async function runDevStory(deps, params) {
|
|
|
4473
4700
|
};
|
|
4474
4701
|
}
|
|
4475
4702
|
if (dispatchResult.status === "failed" || dispatchResult.exitCode !== 0) {
|
|
4476
|
-
logger$
|
|
4703
|
+
logger$11.error({
|
|
4477
4704
|
storyKey,
|
|
4478
4705
|
exitCode: dispatchResult.exitCode,
|
|
4479
4706
|
status: dispatchResult.status
|
|
4480
4707
|
}, "Dev-story dispatch failed");
|
|
4481
|
-
if (dispatchResult.output.length > 0) logger$
|
|
4708
|
+
if (dispatchResult.output.length > 0) logger$11.info({
|
|
4482
4709
|
storyKey,
|
|
4483
4710
|
partialOutput: dispatchResult.output.slice(0, 500)
|
|
4484
4711
|
}, "Partial output from failed dispatch");
|
|
@@ -4490,7 +4717,7 @@ async function runDevStory(deps, params) {
|
|
|
4490
4717
|
if (dispatchResult.parseError !== null || dispatchResult.parsed === null) {
|
|
4491
4718
|
const details = dispatchResult.parseError ?? "parsed result was null";
|
|
4492
4719
|
const rawSnippet = dispatchResult.output ? dispatchResult.output.slice(0, 1e3) : "(empty)";
|
|
4493
|
-
logger$
|
|
4720
|
+
logger$11.error({
|
|
4494
4721
|
storyKey,
|
|
4495
4722
|
parseError: details,
|
|
4496
4723
|
rawOutputSnippet: rawSnippet
|
|
@@ -4498,12 +4725,12 @@ async function runDevStory(deps, params) {
|
|
|
4498
4725
|
let filesModified = [];
|
|
4499
4726
|
try {
|
|
4500
4727
|
filesModified = await getGitChangedFiles(deps.projectRoot ?? process.cwd());
|
|
4501
|
-
if (filesModified.length > 0) logger$
|
|
4728
|
+
if (filesModified.length > 0) logger$11.info({
|
|
4502
4729
|
storyKey,
|
|
4503
4730
|
fileCount: filesModified.length
|
|
4504
4731
|
}, "Recovered files_modified from git status (YAML fallback)");
|
|
4505
4732
|
} catch (err) {
|
|
4506
|
-
logger$
|
|
4733
|
+
logger$11.warn({
|
|
4507
4734
|
storyKey,
|
|
4508
4735
|
error: err instanceof Error ? err.message : String(err)
|
|
4509
4736
|
}, "Failed to recover files_modified from git");
|
|
@@ -4520,7 +4747,7 @@ async function runDevStory(deps, params) {
|
|
|
4520
4747
|
};
|
|
4521
4748
|
}
|
|
4522
4749
|
const parsed = dispatchResult.parsed;
|
|
4523
|
-
logger$
|
|
4750
|
+
logger$11.info({
|
|
4524
4751
|
storyKey,
|
|
4525
4752
|
result: parsed.result,
|
|
4526
4753
|
acMet: parsed.ac_met.length
|
|
@@ -4659,13 +4886,13 @@ function extractFilesInScope(storyContent) {
|
|
|
4659
4886
|
|
|
4660
4887
|
//#endregion
|
|
4661
4888
|
//#region src/modules/compiled-workflows/code-review.ts
|
|
4662
|
-
const logger$
|
|
4889
|
+
const logger$10 = createLogger("compiled-workflows:code-review");
|
|
4663
4890
|
/**
|
|
4664
4891
|
* Hard token ceiling for the assembled code-review prompt (50,000 tokens).
|
|
4665
4892
|
* Quality reviews require seeing actual code diffs, not just file names.
|
|
4666
4893
|
* // TODO: consider externalizing to pack config when multiple packs exist
|
|
4667
4894
|
*/
|
|
4668
|
-
const TOKEN_CEILING = 1e5;
|
|
4895
|
+
const TOKEN_CEILING$2 = 1e5;
|
|
4669
4896
|
/**
|
|
4670
4897
|
* Default fallback result when dispatch fails or times out.
|
|
4671
4898
|
* Uses NEEDS_MINOR_FIXES (not NEEDS_MAJOR_REWORK) so a parse/schema failure
|
|
@@ -4678,6 +4905,7 @@ function defaultFailResult(error, tokenUsage) {
|
|
|
4678
4905
|
issues: 0,
|
|
4679
4906
|
issue_list: [],
|
|
4680
4907
|
error,
|
|
4908
|
+
dispatchFailed: true,
|
|
4681
4909
|
tokenUsage
|
|
4682
4910
|
};
|
|
4683
4911
|
}
|
|
@@ -4702,7 +4930,7 @@ function defaultFailResult(error, tokenUsage) {
|
|
|
4702
4930
|
async function runCodeReview(deps, params) {
|
|
4703
4931
|
const { storyKey, storyFilePath, workingDirectory, pipelineRunId, filesModified, previousIssues } = params;
|
|
4704
4932
|
const cwd = workingDirectory ?? process.cwd();
|
|
4705
|
-
logger$
|
|
4933
|
+
logger$10.debug({
|
|
4706
4934
|
storyKey,
|
|
4707
4935
|
storyFilePath,
|
|
4708
4936
|
cwd,
|
|
@@ -4713,7 +4941,7 @@ async function runCodeReview(deps, params) {
|
|
|
4713
4941
|
template = await deps.pack.getPrompt("code-review");
|
|
4714
4942
|
} catch (err) {
|
|
4715
4943
|
const error = err instanceof Error ? err.message : String(err);
|
|
4716
|
-
logger$
|
|
4944
|
+
logger$10.error({ error }, "Failed to retrieve code-review prompt template");
|
|
4717
4945
|
return defaultFailResult(`Failed to retrieve prompt template: ${error}`, {
|
|
4718
4946
|
input: 0,
|
|
4719
4947
|
output: 0
|
|
@@ -4724,7 +4952,7 @@ async function runCodeReview(deps, params) {
|
|
|
4724
4952
|
storyContent = await readFile$1(storyFilePath, "utf-8");
|
|
4725
4953
|
} catch (err) {
|
|
4726
4954
|
const error = err instanceof Error ? err.message : String(err);
|
|
4727
|
-
logger$
|
|
4955
|
+
logger$10.error({
|
|
4728
4956
|
storyFilePath,
|
|
4729
4957
|
error
|
|
4730
4958
|
}, "Failed to read story file");
|
|
@@ -4733,7 +4961,7 @@ async function runCodeReview(deps, params) {
|
|
|
4733
4961
|
output: 0
|
|
4734
4962
|
});
|
|
4735
4963
|
}
|
|
4736
|
-
const archConstraintsContent = getArchConstraints(deps);
|
|
4964
|
+
const archConstraintsContent = getArchConstraints$1(deps);
|
|
4737
4965
|
const templateTokens = countTokens(template);
|
|
4738
4966
|
const storyTokens = countTokens(storyContent);
|
|
4739
4967
|
const constraintTokens = countTokens(archConstraintsContent);
|
|
@@ -4742,16 +4970,16 @@ async function runCodeReview(deps, params) {
|
|
|
4742
4970
|
if (filesModified && filesModified.length > 0) {
|
|
4743
4971
|
const scopedDiff = await getGitDiffForFiles(filesModified, cwd);
|
|
4744
4972
|
const scopedTotal = nonDiffTokens + countTokens(scopedDiff);
|
|
4745
|
-
if (scopedTotal <= TOKEN_CEILING) {
|
|
4973
|
+
if (scopedTotal <= TOKEN_CEILING$2) {
|
|
4746
4974
|
gitDiffContent = scopedDiff;
|
|
4747
|
-
logger$
|
|
4975
|
+
logger$10.debug({
|
|
4748
4976
|
fileCount: filesModified.length,
|
|
4749
4977
|
tokenCount: scopedTotal
|
|
4750
4978
|
}, "Using scoped file diff");
|
|
4751
4979
|
} else {
|
|
4752
|
-
logger$
|
|
4980
|
+
logger$10.warn({
|
|
4753
4981
|
estimatedTotal: scopedTotal,
|
|
4754
|
-
ceiling: TOKEN_CEILING,
|
|
4982
|
+
ceiling: TOKEN_CEILING$2,
|
|
4755
4983
|
fileCount: filesModified.length
|
|
4756
4984
|
}, "Scoped diff exceeds token ceiling — falling back to stat-only summary");
|
|
4757
4985
|
gitDiffContent = await getGitDiffStatSummary(cwd);
|
|
@@ -4761,15 +4989,28 @@ async function runCodeReview(deps, params) {
|
|
|
4761
4989
|
await stageIntentToAdd(changedFiles, cwd);
|
|
4762
4990
|
const fullDiff = await getGitDiffSummary(cwd);
|
|
4763
4991
|
const fullTotal = nonDiffTokens + countTokens(fullDiff);
|
|
4764
|
-
if (fullTotal <= TOKEN_CEILING) gitDiffContent = fullDiff;
|
|
4992
|
+
if (fullTotal <= TOKEN_CEILING$2) gitDiffContent = fullDiff;
|
|
4765
4993
|
else {
|
|
4766
|
-
logger$
|
|
4994
|
+
logger$10.warn({
|
|
4767
4995
|
estimatedTotal: fullTotal,
|
|
4768
|
-
ceiling: TOKEN_CEILING
|
|
4996
|
+
ceiling: TOKEN_CEILING$2
|
|
4769
4997
|
}, "Full git diff would exceed token ceiling — using stat-only summary");
|
|
4770
4998
|
gitDiffContent = await getGitDiffStatSummary(cwd);
|
|
4771
4999
|
}
|
|
4772
5000
|
}
|
|
5001
|
+
if (gitDiffContent.trim().length === 0) {
|
|
5002
|
+
logger$10.info({ storyKey }, "Empty git diff — skipping review with SHIP_IT");
|
|
5003
|
+
return {
|
|
5004
|
+
verdict: "SHIP_IT",
|
|
5005
|
+
issues: 0,
|
|
5006
|
+
issue_list: [],
|
|
5007
|
+
notes: "no_changes_to_review",
|
|
5008
|
+
tokenUsage: {
|
|
5009
|
+
input: 0,
|
|
5010
|
+
output: 0
|
|
5011
|
+
}
|
|
5012
|
+
};
|
|
5013
|
+
}
|
|
4773
5014
|
let previousFindingsContent = "";
|
|
4774
5015
|
if (previousIssues !== void 0 && previousIssues.length > 0) previousFindingsContent = [
|
|
4775
5016
|
"The previous code review found these issues. A fix agent has attempted to resolve them.",
|
|
@@ -4783,7 +5024,7 @@ async function runCodeReview(deps, params) {
|
|
|
4783
5024
|
const findings = getProjectFindings(deps.db);
|
|
4784
5025
|
if (findings.length > 0) {
|
|
4785
5026
|
priorFindingsContent = "Previous reviews found these recurring patterns — pay special attention:\n\n" + findings;
|
|
4786
|
-
logger$
|
|
5027
|
+
logger$10.debug({
|
|
4787
5028
|
storyKey,
|
|
4788
5029
|
findingsLen: findings.length
|
|
4789
5030
|
}, "Injecting prior findings into code-review prompt");
|
|
@@ -4816,12 +5057,12 @@ async function runCodeReview(deps, params) {
|
|
|
4816
5057
|
priority: "optional"
|
|
4817
5058
|
}
|
|
4818
5059
|
];
|
|
4819
|
-
const assembleResult = assemblePrompt(template, sections, TOKEN_CEILING);
|
|
4820
|
-
if (assembleResult.truncated) logger$
|
|
5060
|
+
const assembleResult = assemblePrompt(template, sections, TOKEN_CEILING$2);
|
|
5061
|
+
if (assembleResult.truncated) logger$10.warn({
|
|
4821
5062
|
storyKey,
|
|
4822
5063
|
tokenCount: assembleResult.tokenCount
|
|
4823
5064
|
}, "Code-review prompt truncated to fit token ceiling");
|
|
4824
|
-
logger$
|
|
5065
|
+
logger$10.debug({
|
|
4825
5066
|
storyKey,
|
|
4826
5067
|
tokenCount: assembleResult.tokenCount,
|
|
4827
5068
|
truncated: assembleResult.truncated
|
|
@@ -4839,7 +5080,7 @@ async function runCodeReview(deps, params) {
|
|
|
4839
5080
|
dispatchResult = await handle.result;
|
|
4840
5081
|
} catch (err) {
|
|
4841
5082
|
const error = err instanceof Error ? err.message : String(err);
|
|
4842
|
-
logger$
|
|
5083
|
+
logger$10.error({
|
|
4843
5084
|
storyKey,
|
|
4844
5085
|
error
|
|
4845
5086
|
}, "Code-review dispatch threw unexpected error");
|
|
@@ -4855,7 +5096,7 @@ async function runCodeReview(deps, params) {
|
|
|
4855
5096
|
const rawOutput = dispatchResult.output ?? void 0;
|
|
4856
5097
|
if (dispatchResult.status === "failed") {
|
|
4857
5098
|
const errorMsg = `Dispatch status: failed. Exit code: ${dispatchResult.exitCode}. ${dispatchResult.parseError ?? ""} ${dispatchResult.output ? `Stderr: ${dispatchResult.output}` : ""}`.trim();
|
|
4858
|
-
logger$
|
|
5099
|
+
logger$10.warn({
|
|
4859
5100
|
storyKey,
|
|
4860
5101
|
exitCode: dispatchResult.exitCode
|
|
4861
5102
|
}, "Code-review dispatch failed");
|
|
@@ -4865,7 +5106,7 @@ async function runCodeReview(deps, params) {
|
|
|
4865
5106
|
};
|
|
4866
5107
|
}
|
|
4867
5108
|
if (dispatchResult.status === "timeout") {
|
|
4868
|
-
logger$
|
|
5109
|
+
logger$10.warn({ storyKey }, "Code-review dispatch timed out");
|
|
4869
5110
|
return {
|
|
4870
5111
|
...defaultFailResult("Dispatch status: timeout. The agent did not complete within the allowed time.", tokenUsage),
|
|
4871
5112
|
rawOutput
|
|
@@ -4873,7 +5114,7 @@ async function runCodeReview(deps, params) {
|
|
|
4873
5114
|
}
|
|
4874
5115
|
if (dispatchResult.parsed === null) {
|
|
4875
5116
|
const details = dispatchResult.parseError ?? "No YAML block found in output";
|
|
4876
|
-
logger$
|
|
5117
|
+
logger$10.warn({
|
|
4877
5118
|
storyKey,
|
|
4878
5119
|
details
|
|
4879
5120
|
}, "Code-review output schema validation failed");
|
|
@@ -4890,7 +5131,7 @@ async function runCodeReview(deps, params) {
|
|
|
4890
5131
|
const parseResult = CodeReviewResultSchema.safeParse(dispatchResult.parsed);
|
|
4891
5132
|
if (!parseResult.success) {
|
|
4892
5133
|
const details = parseResult.error.message;
|
|
4893
|
-
logger$
|
|
5134
|
+
logger$10.warn({
|
|
4894
5135
|
storyKey,
|
|
4895
5136
|
details
|
|
4896
5137
|
}, "Code-review output failed schema validation");
|
|
@@ -4905,13 +5146,13 @@ async function runCodeReview(deps, params) {
|
|
|
4905
5146
|
};
|
|
4906
5147
|
}
|
|
4907
5148
|
const parsed = parseResult.data;
|
|
4908
|
-
if (parsed.agentVerdict !== parsed.verdict) logger$
|
|
5149
|
+
if (parsed.agentVerdict !== parsed.verdict) logger$10.info({
|
|
4909
5150
|
storyKey,
|
|
4910
5151
|
agentVerdict: parsed.agentVerdict,
|
|
4911
5152
|
pipelineVerdict: parsed.verdict,
|
|
4912
5153
|
issues: parsed.issues
|
|
4913
5154
|
}, "Pipeline overrode agent verdict based on issue severities");
|
|
4914
|
-
logger$
|
|
5155
|
+
logger$10.info({
|
|
4915
5156
|
storyKey,
|
|
4916
5157
|
verdict: parsed.verdict,
|
|
4917
5158
|
issues: parsed.issues
|
|
@@ -4929,6 +5170,384 @@ async function runCodeReview(deps, params) {
|
|
|
4929
5170
|
* Retrieve architecture constraints from the decision store.
|
|
4930
5171
|
* Looks for decisions with phase='solutioning', category='architecture'.
|
|
4931
5172
|
*/
|
|
5173
|
+
function getArchConstraints$1(deps) {
|
|
5174
|
+
try {
|
|
5175
|
+
const decisions = getDecisionsByPhase(deps.db, "solutioning");
|
|
5176
|
+
const constraints = decisions.filter((d) => d.category === "architecture");
|
|
5177
|
+
if (constraints.length === 0) return "";
|
|
5178
|
+
return constraints.map((d) => `${d.key}: ${d.value}`).join("\n");
|
|
5179
|
+
} catch (err) {
|
|
5180
|
+
logger$10.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
|
|
5181
|
+
return "";
|
|
5182
|
+
}
|
|
5183
|
+
}
|
|
5184
|
+
|
|
5185
|
+
//#endregion
|
|
5186
|
+
//#region src/modules/compiled-workflows/test-plan.ts
|
|
5187
|
+
const logger$9 = createLogger("compiled-workflows:test-plan");
|
|
5188
|
+
/** Hard token ceiling for the assembled test-plan prompt */
|
|
5189
|
+
const TOKEN_CEILING$1 = 8e3;
|
|
5190
|
+
/** Default timeout for test-plan dispatches in milliseconds (5 min — lightweight call) */
|
|
5191
|
+
const DEFAULT_TIMEOUT_MS = 3e5;
|
|
5192
|
+
/**
|
|
5193
|
+
* Execute the compiled test-plan workflow.
|
|
5194
|
+
*
|
|
5195
|
+
* @param deps - Injected dependencies (db, pack, contextCompiler, dispatcher)
|
|
5196
|
+
* @param params - Parameters (storyKey, storyFilePath, pipelineRunId)
|
|
5197
|
+
* @returns TestPlanResult with result, test_files, test_categories, coverage_notes, tokenUsage
|
|
5198
|
+
*/
|
|
5199
|
+
async function runTestPlan(deps, params) {
|
|
5200
|
+
const { storyKey, storyFilePath, pipelineRunId } = params;
|
|
5201
|
+
logger$9.info({
|
|
5202
|
+
storyKey,
|
|
5203
|
+
storyFilePath
|
|
5204
|
+
}, "Starting compiled test-plan workflow");
|
|
5205
|
+
let template;
|
|
5206
|
+
try {
|
|
5207
|
+
template = await deps.pack.getPrompt("test-plan");
|
|
5208
|
+
logger$9.debug({ storyKey }, "Retrieved test-plan prompt template from pack");
|
|
5209
|
+
} catch (err) {
|
|
5210
|
+
const error = err instanceof Error ? err.message : String(err);
|
|
5211
|
+
logger$9.warn({
|
|
5212
|
+
storyKey,
|
|
5213
|
+
error
|
|
5214
|
+
}, "Failed to retrieve test-plan prompt template");
|
|
5215
|
+
return makeTestPlanFailureResult(`template_load_failed: ${error}`);
|
|
5216
|
+
}
|
|
5217
|
+
let storyContent;
|
|
5218
|
+
try {
|
|
5219
|
+
storyContent = await readFile$1(storyFilePath, "utf-8");
|
|
5220
|
+
} catch (err) {
|
|
5221
|
+
if (err.code === "ENOENT") {
|
|
5222
|
+
logger$9.warn({
|
|
5223
|
+
storyKey,
|
|
5224
|
+
storyFilePath
|
|
5225
|
+
}, "Story file not found for test planning");
|
|
5226
|
+
return makeTestPlanFailureResult("story_file_not_found");
|
|
5227
|
+
}
|
|
5228
|
+
const error = err instanceof Error ? err.message : String(err);
|
|
5229
|
+
logger$9.warn({
|
|
5230
|
+
storyKey,
|
|
5231
|
+
storyFilePath,
|
|
5232
|
+
error
|
|
5233
|
+
}, "Failed to read story file for test planning");
|
|
5234
|
+
return makeTestPlanFailureResult(`story_file_read_error: ${error}`);
|
|
5235
|
+
}
|
|
5236
|
+
const { prompt, tokenCount, truncated } = assemblePrompt(template, [{
|
|
5237
|
+
name: "story_content",
|
|
5238
|
+
content: storyContent,
|
|
5239
|
+
priority: "required"
|
|
5240
|
+
}], TOKEN_CEILING$1);
|
|
5241
|
+
logger$9.info({
|
|
5242
|
+
storyKey,
|
|
5243
|
+
tokenCount,
|
|
5244
|
+
ceiling: TOKEN_CEILING$1,
|
|
5245
|
+
truncated
|
|
5246
|
+
}, "Assembled test-plan prompt");
|
|
5247
|
+
let dispatchResult;
|
|
5248
|
+
try {
|
|
5249
|
+
const handle = deps.dispatcher.dispatch({
|
|
5250
|
+
prompt,
|
|
5251
|
+
agent: "claude-code",
|
|
5252
|
+
taskType: "test-plan",
|
|
5253
|
+
timeout: DEFAULT_TIMEOUT_MS,
|
|
5254
|
+
outputSchema: TestPlanResultSchema,
|
|
5255
|
+
...deps.projectRoot !== void 0 ? { workingDirectory: deps.projectRoot } : {}
|
|
5256
|
+
});
|
|
5257
|
+
dispatchResult = await handle.result;
|
|
5258
|
+
} catch (err) {
|
|
5259
|
+
const error = err instanceof Error ? err.message : String(err);
|
|
5260
|
+
logger$9.warn({
|
|
5261
|
+
storyKey,
|
|
5262
|
+
error
|
|
5263
|
+
}, "Test-plan dispatch threw an unexpected error");
|
|
5264
|
+
return makeTestPlanFailureResult(`dispatch_error: ${error}`);
|
|
5265
|
+
}
|
|
5266
|
+
const tokenUsage = {
|
|
5267
|
+
input: dispatchResult.tokenEstimate.input,
|
|
5268
|
+
output: dispatchResult.tokenEstimate.output
|
|
5269
|
+
};
|
|
5270
|
+
if (dispatchResult.status === "timeout") {
|
|
5271
|
+
logger$9.warn({
|
|
5272
|
+
storyKey,
|
|
5273
|
+
durationMs: dispatchResult.durationMs
|
|
5274
|
+
}, "Test-plan dispatch timed out");
|
|
5275
|
+
return {
|
|
5276
|
+
...makeTestPlanFailureResult(`dispatch_timeout after ${dispatchResult.durationMs}ms`),
|
|
5277
|
+
tokenUsage
|
|
5278
|
+
};
|
|
5279
|
+
}
|
|
5280
|
+
if (dispatchResult.status === "failed" || dispatchResult.exitCode !== 0) {
|
|
5281
|
+
logger$9.warn({
|
|
5282
|
+
storyKey,
|
|
5283
|
+
exitCode: dispatchResult.exitCode,
|
|
5284
|
+
status: dispatchResult.status
|
|
5285
|
+
}, "Test-plan dispatch failed");
|
|
5286
|
+
return {
|
|
5287
|
+
...makeTestPlanFailureResult(`dispatch_failed with exit_code=${dispatchResult.exitCode}`),
|
|
5288
|
+
tokenUsage
|
|
5289
|
+
};
|
|
5290
|
+
}
|
|
5291
|
+
if (dispatchResult.parseError !== null || dispatchResult.parsed === null) {
|
|
5292
|
+
const details = dispatchResult.parseError ?? "parsed result was null";
|
|
5293
|
+
logger$9.warn({
|
|
5294
|
+
storyKey,
|
|
5295
|
+
parseError: details
|
|
5296
|
+
}, "Test-plan YAML schema validation failed");
|
|
5297
|
+
return {
|
|
5298
|
+
...makeTestPlanFailureResult(`schema_validation_failed: ${details}`),
|
|
5299
|
+
tokenUsage
|
|
5300
|
+
};
|
|
5301
|
+
}
|
|
5302
|
+
const parsed = dispatchResult.parsed;
|
|
5303
|
+
try {
|
|
5304
|
+
createDecision(deps.db, {
|
|
5305
|
+
pipeline_run_id: pipelineRunId,
|
|
5306
|
+
phase: "implementation",
|
|
5307
|
+
category: TEST_PLAN,
|
|
5308
|
+
key: storyKey,
|
|
5309
|
+
value: JSON.stringify({
|
|
5310
|
+
test_files: parsed.test_files,
|
|
5311
|
+
test_categories: parsed.test_categories,
|
|
5312
|
+
coverage_notes: parsed.coverage_notes
|
|
5313
|
+
}),
|
|
5314
|
+
rationale: `Test plan for ${storyKey}: ${parsed.test_files.length} test files, categories: ${parsed.test_categories.join(", ")}`
|
|
5315
|
+
});
|
|
5316
|
+
logger$9.info({
|
|
5317
|
+
storyKey,
|
|
5318
|
+
fileCount: parsed.test_files.length,
|
|
5319
|
+
categories: parsed.test_categories
|
|
5320
|
+
}, "Test plan stored in decision store");
|
|
5321
|
+
} catch (err) {
|
|
5322
|
+
const error = err instanceof Error ? err.message : String(err);
|
|
5323
|
+
logger$9.warn({
|
|
5324
|
+
storyKey,
|
|
5325
|
+
error
|
|
5326
|
+
}, "Failed to store test plan in decision store — proceeding anyway");
|
|
5327
|
+
}
|
|
5328
|
+
logger$9.info({
|
|
5329
|
+
storyKey,
|
|
5330
|
+
result: parsed.result
|
|
5331
|
+
}, "Test-plan workflow completed");
|
|
5332
|
+
return {
|
|
5333
|
+
result: parsed.result,
|
|
5334
|
+
test_files: parsed.test_files,
|
|
5335
|
+
test_categories: parsed.test_categories,
|
|
5336
|
+
coverage_notes: parsed.coverage_notes,
|
|
5337
|
+
tokenUsage
|
|
5338
|
+
};
|
|
5339
|
+
}
|
|
5340
|
+
/**
|
|
5341
|
+
* Build a failure result with sensible defaults.
|
|
5342
|
+
*/
|
|
5343
|
+
function makeTestPlanFailureResult(error) {
|
|
5344
|
+
return {
|
|
5345
|
+
result: "failed",
|
|
5346
|
+
test_files: [],
|
|
5347
|
+
test_categories: [],
|
|
5348
|
+
coverage_notes: "",
|
|
5349
|
+
error,
|
|
5350
|
+
tokenUsage: {
|
|
5351
|
+
input: 0,
|
|
5352
|
+
output: 0
|
|
5353
|
+
}
|
|
5354
|
+
};
|
|
5355
|
+
}
|
|
5356
|
+
|
|
5357
|
+
//#endregion
|
|
5358
|
+
//#region src/modules/compiled-workflows/test-expansion.ts
|
|
5359
|
+
const logger$8 = createLogger("compiled-workflows:test-expansion");
|
|
5360
|
+
/**
|
|
5361
|
+
* Hard token ceiling for the assembled test-expansion prompt (20,000 tokens).
|
|
5362
|
+
*/
|
|
5363
|
+
const TOKEN_CEILING = 2e4;
|
|
5364
|
+
function defaultFallbackResult(error, tokenUsage) {
|
|
5365
|
+
return {
|
|
5366
|
+
expansion_priority: "low",
|
|
5367
|
+
coverage_gaps: [],
|
|
5368
|
+
suggested_tests: [],
|
|
5369
|
+
error,
|
|
5370
|
+
tokenUsage
|
|
5371
|
+
};
|
|
5372
|
+
}
|
|
5373
|
+
/**
|
|
5374
|
+
* Execute the compiled test-expansion workflow.
|
|
5375
|
+
*
|
|
5376
|
+
* Steps:
|
|
5377
|
+
* 1. Retrieve compiled prompt template via pack.getPrompt('test-expansion')
|
|
5378
|
+
* 2. Read story file contents from storyFilePath
|
|
5379
|
+
* 3. Query decision store for architecture constraints (solutioning, architecture)
|
|
5380
|
+
* 4. Capture scoped git diff for filesModified, with stat-only fallback if oversized
|
|
5381
|
+
* 5. Assemble prompt with 20,000-token ceiling
|
|
5382
|
+
* 6. Dispatch via dispatcher with taskType='test-expansion'
|
|
5383
|
+
* 7. Validate YAML output against TestExpansionResultSchema
|
|
5384
|
+
* 8. Return typed TestExpansionResult (never throws — all errors return graceful fallback)
|
|
5385
|
+
*
|
|
5386
|
+
* @param deps - Injected dependencies (db, pack, contextCompiler, dispatcher)
|
|
5387
|
+
* @param params - Story key, story file path, files modified, working directory, pipeline run ID
|
|
5388
|
+
* @returns Promise resolving to TestExpansionResult (never rejects)
|
|
5389
|
+
*/
|
|
5390
|
+
async function runTestExpansion(deps, params) {
|
|
5391
|
+
const { storyKey, storyFilePath, pipelineRunId, filesModified, workingDirectory } = params;
|
|
5392
|
+
const cwd = workingDirectory ?? process.cwd();
|
|
5393
|
+
logger$8.debug({
|
|
5394
|
+
storyKey,
|
|
5395
|
+
storyFilePath,
|
|
5396
|
+
cwd,
|
|
5397
|
+
pipelineRunId
|
|
5398
|
+
}, "Starting test-expansion workflow");
|
|
5399
|
+
let template;
|
|
5400
|
+
try {
|
|
5401
|
+
template = await deps.pack.getPrompt("test-expansion");
|
|
5402
|
+
} catch (err) {
|
|
5403
|
+
const error = err instanceof Error ? err.message : String(err);
|
|
5404
|
+
logger$8.warn({ error }, "Failed to retrieve test-expansion prompt template");
|
|
5405
|
+
return defaultFallbackResult(`Failed to retrieve prompt template: ${error}`, {
|
|
5406
|
+
input: 0,
|
|
5407
|
+
output: 0
|
|
5408
|
+
});
|
|
5409
|
+
}
|
|
5410
|
+
let storyContent;
|
|
5411
|
+
try {
|
|
5412
|
+
storyContent = await readFile$1(storyFilePath, "utf-8");
|
|
5413
|
+
} catch (err) {
|
|
5414
|
+
const error = err instanceof Error ? err.message : String(err);
|
|
5415
|
+
logger$8.warn({
|
|
5416
|
+
storyFilePath,
|
|
5417
|
+
error
|
|
5418
|
+
}, "Failed to read story file");
|
|
5419
|
+
return defaultFallbackResult(`Failed to read story file: ${error}`, {
|
|
5420
|
+
input: 0,
|
|
5421
|
+
output: 0
|
|
5422
|
+
});
|
|
5423
|
+
}
|
|
5424
|
+
const archConstraintsContent = getArchConstraints(deps);
|
|
5425
|
+
let gitDiffContent = "";
|
|
5426
|
+
if (filesModified && filesModified.length > 0) try {
|
|
5427
|
+
const templateTokens = countTokens(template);
|
|
5428
|
+
const storyTokens = countTokens(storyContent);
|
|
5429
|
+
const constraintTokens = countTokens(archConstraintsContent);
|
|
5430
|
+
const nonDiffTokens = templateTokens + storyTokens + constraintTokens;
|
|
5431
|
+
const scopedDiff = await getGitDiffForFiles(filesModified, cwd);
|
|
5432
|
+
const scopedTotal = nonDiffTokens + countTokens(scopedDiff);
|
|
5433
|
+
if (scopedTotal <= TOKEN_CEILING) {
|
|
5434
|
+
gitDiffContent = scopedDiff;
|
|
5435
|
+
logger$8.debug({
|
|
5436
|
+
fileCount: filesModified.length,
|
|
5437
|
+
tokenCount: scopedTotal
|
|
5438
|
+
}, "Using scoped file diff");
|
|
5439
|
+
} else {
|
|
5440
|
+
logger$8.warn({
|
|
5441
|
+
estimatedTotal: scopedTotal,
|
|
5442
|
+
ceiling: TOKEN_CEILING,
|
|
5443
|
+
fileCount: filesModified.length
|
|
5444
|
+
}, "Scoped diff exceeds token ceiling — falling back to stat-only summary");
|
|
5445
|
+
gitDiffContent = await getGitDiffStatSummary(cwd);
|
|
5446
|
+
}
|
|
5447
|
+
} catch (err) {
|
|
5448
|
+
logger$8.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to get git diff — proceeding with empty diff");
|
|
5449
|
+
}
|
|
5450
|
+
const sections = [
|
|
5451
|
+
{
|
|
5452
|
+
name: "story_content",
|
|
5453
|
+
content: storyContent,
|
|
5454
|
+
priority: "required"
|
|
5455
|
+
},
|
|
5456
|
+
{
|
|
5457
|
+
name: "git_diff",
|
|
5458
|
+
content: gitDiffContent,
|
|
5459
|
+
priority: "important"
|
|
5460
|
+
},
|
|
5461
|
+
{
|
|
5462
|
+
name: "arch_constraints",
|
|
5463
|
+
content: archConstraintsContent,
|
|
5464
|
+
priority: "optional"
|
|
5465
|
+
}
|
|
5466
|
+
];
|
|
5467
|
+
const assembleResult = assemblePrompt(template, sections, TOKEN_CEILING);
|
|
5468
|
+
if (assembleResult.truncated) logger$8.warn({
|
|
5469
|
+
storyKey,
|
|
5470
|
+
tokenCount: assembleResult.tokenCount
|
|
5471
|
+
}, "Test-expansion prompt truncated to fit token ceiling");
|
|
5472
|
+
logger$8.debug({
|
|
5473
|
+
storyKey,
|
|
5474
|
+
tokenCount: assembleResult.tokenCount,
|
|
5475
|
+
truncated: assembleResult.truncated
|
|
5476
|
+
}, "Prompt assembled for test-expansion");
|
|
5477
|
+
const { prompt } = assembleResult;
|
|
5478
|
+
const handle = deps.dispatcher.dispatch({
|
|
5479
|
+
prompt,
|
|
5480
|
+
agent: "claude-code",
|
|
5481
|
+
taskType: "test-expansion",
|
|
5482
|
+
outputSchema: TestExpansionResultSchema,
|
|
5483
|
+
workingDirectory: deps.projectRoot
|
|
5484
|
+
});
|
|
5485
|
+
let dispatchResult;
|
|
5486
|
+
try {
|
|
5487
|
+
dispatchResult = await handle.result;
|
|
5488
|
+
} catch (err) {
|
|
5489
|
+
const error = err instanceof Error ? err.message : String(err);
|
|
5490
|
+
logger$8.warn({
|
|
5491
|
+
storyKey,
|
|
5492
|
+
error
|
|
5493
|
+
}, "Test-expansion dispatch threw unexpected error");
|
|
5494
|
+
return defaultFallbackResult(`Dispatch error: ${error}`, {
|
|
5495
|
+
input: Math.ceil(prompt.length / 4),
|
|
5496
|
+
output: 0
|
|
5497
|
+
});
|
|
5498
|
+
}
|
|
5499
|
+
const tokenUsage = {
|
|
5500
|
+
input: dispatchResult.tokenEstimate.input,
|
|
5501
|
+
output: dispatchResult.tokenEstimate.output
|
|
5502
|
+
};
|
|
5503
|
+
if (dispatchResult.status === "failed") {
|
|
5504
|
+
const errorMsg = `Dispatch status: failed. Exit code: ${dispatchResult.exitCode}. ${dispatchResult.parseError ?? ""}`.trim();
|
|
5505
|
+
logger$8.warn({
|
|
5506
|
+
storyKey,
|
|
5507
|
+
exitCode: dispatchResult.exitCode
|
|
5508
|
+
}, "Test-expansion dispatch failed");
|
|
5509
|
+
return defaultFallbackResult(errorMsg, tokenUsage);
|
|
5510
|
+
}
|
|
5511
|
+
if (dispatchResult.status === "timeout") {
|
|
5512
|
+
logger$8.warn({ storyKey }, "Test-expansion dispatch timed out");
|
|
5513
|
+
return defaultFallbackResult("Dispatch status: timeout. The agent did not complete within the allowed time.", tokenUsage);
|
|
5514
|
+
}
|
|
5515
|
+
if (dispatchResult.parsed === null) {
|
|
5516
|
+
const details = dispatchResult.parseError ?? "No YAML block found in output";
|
|
5517
|
+
logger$8.warn({
|
|
5518
|
+
storyKey,
|
|
5519
|
+
details
|
|
5520
|
+
}, "Test-expansion output has no parseable YAML");
|
|
5521
|
+
return defaultFallbackResult(`schema_validation_failed: ${details}`, tokenUsage);
|
|
5522
|
+
}
|
|
5523
|
+
const parseResult = TestExpansionResultSchema.safeParse(dispatchResult.parsed);
|
|
5524
|
+
if (!parseResult.success) {
|
|
5525
|
+
const details = parseResult.error.message;
|
|
5526
|
+
logger$8.warn({
|
|
5527
|
+
storyKey,
|
|
5528
|
+
details
|
|
5529
|
+
}, "Test-expansion output failed schema validation");
|
|
5530
|
+
return defaultFallbackResult(`schema_validation_failed: ${details}`, tokenUsage);
|
|
5531
|
+
}
|
|
5532
|
+
const parsed = parseResult.data;
|
|
5533
|
+
logger$8.info({
|
|
5534
|
+
storyKey,
|
|
5535
|
+
expansion_priority: parsed.expansion_priority,
|
|
5536
|
+
coverage_gaps: parsed.coverage_gaps.length,
|
|
5537
|
+
suggested_tests: parsed.suggested_tests.length
|
|
5538
|
+
}, "Test-expansion workflow completed successfully");
|
|
5539
|
+
return {
|
|
5540
|
+
expansion_priority: parsed.expansion_priority,
|
|
5541
|
+
coverage_gaps: parsed.coverage_gaps,
|
|
5542
|
+
suggested_tests: parsed.suggested_tests,
|
|
5543
|
+
notes: parsed.notes,
|
|
5544
|
+
tokenUsage
|
|
5545
|
+
};
|
|
5546
|
+
}
|
|
5547
|
+
/**
|
|
5548
|
+
* Retrieve architecture constraints from the decision store.
|
|
5549
|
+
* Looks for decisions with phase='solutioning', category='architecture'.
|
|
5550
|
+
*/
|
|
4932
5551
|
function getArchConstraints(deps) {
|
|
4933
5552
|
try {
|
|
4934
5553
|
const decisions = getDecisionsByPhase(deps.db, "solutioning");
|
|
@@ -5271,8 +5890,8 @@ function detectConflictGroups(storyKeys, config) {
|
|
|
5271
5890
|
const logger$7 = createLogger("implementation-orchestrator:seed");
|
|
5272
5891
|
/** Max chars for the architecture summary seeded into decisions */
|
|
5273
5892
|
const MAX_ARCH_CHARS = 6e3;
|
|
5274
|
-
/** Max chars per epic shard */
|
|
5275
|
-
const MAX_EPIC_SHARD_CHARS =
|
|
5893
|
+
/** Max chars per epic shard (fallback when per-story extraction returns null) */
|
|
5894
|
+
const MAX_EPIC_SHARD_CHARS = 12e3;
|
|
5276
5895
|
/** Max chars for test patterns */
|
|
5277
5896
|
const MAX_TEST_PATTERNS_CHARS = 2e3;
|
|
5278
5897
|
/**
|
|
@@ -5356,16 +5975,35 @@ function seedArchitecture(db, projectRoot) {
|
|
|
5356
5975
|
}
|
|
5357
5976
|
/**
|
|
5358
5977
|
* Seed epic shards from epics.md.
|
|
5359
|
-
* Parses each
|
|
5360
|
-
*
|
|
5978
|
+
* Parses each epic section and creates an implementation/epic-shard decision.
|
|
5979
|
+
*
|
|
5980
|
+
* Uses content-hash comparison (AC1, AC2, AC6):
|
|
5981
|
+
* - Computes SHA-256 of the epics file and compares to the stored `epic-shard-hash` decision.
|
|
5982
|
+
* - If hashes match: skip re-seeding (unchanged file).
|
|
5983
|
+
* - If hash differs or no hash stored: delete existing epic-shard decisions and re-seed.
|
|
5984
|
+
*
|
|
5985
|
+
* Returns number of decisions created, or -1 if skipped (hash unchanged).
|
|
5361
5986
|
*/
|
|
5362
5987
|
function seedEpicShards(db, projectRoot) {
|
|
5363
|
-
const existing = getDecisionsByPhase(db, "implementation");
|
|
5364
|
-
if (existing.some((d) => d.category === "epic-shard")) return -1;
|
|
5365
5988
|
const epicsPath = findArtifact(projectRoot, ["_bmad-output/planning-artifacts/epics.md", "_bmad-output/epics.md"]);
|
|
5366
5989
|
if (epicsPath === void 0) return 0;
|
|
5367
5990
|
const content = readFileSync$1(epicsPath, "utf-8");
|
|
5368
5991
|
if (content.length === 0) return 0;
|
|
5992
|
+
const currentHash = createHash("sha256").update(content).digest("hex");
|
|
5993
|
+
const implementationDecisions = getDecisionsByPhase(db, "implementation");
|
|
5994
|
+
const storedHashDecision = implementationDecisions.find((d) => d.category === "epic-shard-hash" && d.key === "epics-file");
|
|
5995
|
+
const storedHash = storedHashDecision?.value;
|
|
5996
|
+
if (storedHash === currentHash) {
|
|
5997
|
+
logger$7.debug({ hash: currentHash }, "Epic shards up-to-date (hash unchanged) — skipping re-seed");
|
|
5998
|
+
return -1;
|
|
5999
|
+
}
|
|
6000
|
+
if (implementationDecisions.some((d) => d.category === "epic-shard")) {
|
|
6001
|
+
logger$7.debug({
|
|
6002
|
+
storedHash,
|
|
6003
|
+
currentHash
|
|
6004
|
+
}, "Epics file changed — deleting stale epic-shard decisions");
|
|
6005
|
+
db.prepare("DELETE FROM decisions WHERE phase = 'implementation' AND category = 'epic-shard'").run();
|
|
6006
|
+
}
|
|
5369
6007
|
const shards = parseEpicShards(content);
|
|
5370
6008
|
let count = 0;
|
|
5371
6009
|
for (const shard of shards) {
|
|
@@ -5379,7 +6017,19 @@ function seedEpicShards(db, projectRoot) {
|
|
|
5379
6017
|
});
|
|
5380
6018
|
count++;
|
|
5381
6019
|
}
|
|
5382
|
-
|
|
6020
|
+
db.prepare("DELETE FROM decisions WHERE phase = 'implementation' AND category = 'epic-shard-hash' AND key = 'epics-file'").run();
|
|
6021
|
+
createDecision(db, {
|
|
6022
|
+
pipeline_run_id: null,
|
|
6023
|
+
phase: "implementation",
|
|
6024
|
+
category: "epic-shard-hash",
|
|
6025
|
+
key: "epics-file",
|
|
6026
|
+
value: currentHash,
|
|
6027
|
+
rationale: "SHA-256 hash of epics file content for change detection"
|
|
6028
|
+
});
|
|
6029
|
+
logger$7.debug({
|
|
6030
|
+
count,
|
|
6031
|
+
hash: currentHash
|
|
6032
|
+
}, "Seeded epic shard decisions");
|
|
5383
6033
|
return count;
|
|
5384
6034
|
}
|
|
5385
6035
|
/**
|
|
@@ -5446,11 +6096,11 @@ function extractSection(content, headingPattern) {
|
|
|
5446
6096
|
}
|
|
5447
6097
|
/**
|
|
5448
6098
|
* Parse epics.md into individual epic shards.
|
|
5449
|
-
* Matches "## Epic N"
|
|
6099
|
+
* Matches "## Epic N", "### Epic N", "#### Epic N", or depth-2 to depth-4 numeric headings.
|
|
5450
6100
|
*/
|
|
5451
6101
|
function parseEpicShards(content) {
|
|
5452
6102
|
const shards = [];
|
|
5453
|
-
const epicPattern =
|
|
6103
|
+
const epicPattern = /^#{2,4}\s+(?:Epic\s+)?(\d+)[.:\s]/gm;
|
|
5454
6104
|
let match;
|
|
5455
6105
|
const matches = [];
|
|
5456
6106
|
while ((match = epicPattern.exec(content)) !== null) {
|
|
@@ -5592,7 +6242,7 @@ function createPauseGate() {
|
|
|
5592
6242
|
*/
|
|
5593
6243
|
function createImplementationOrchestrator(deps) {
|
|
5594
6244
|
const { db, pack, contextCompiler, dispatcher, eventBus, config, projectRoot } = deps;
|
|
5595
|
-
const logger$
|
|
6245
|
+
const logger$20 = createLogger("implementation-orchestrator");
|
|
5596
6246
|
let _state = "IDLE";
|
|
5597
6247
|
let _startedAt;
|
|
5598
6248
|
let _completedAt;
|
|
@@ -5629,7 +6279,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5629
6279
|
const nowMs = Date.now();
|
|
5630
6280
|
for (const [phase, startMs] of starts) {
|
|
5631
6281
|
const endMs = ends?.get(phase);
|
|
5632
|
-
if (endMs === void 0) logger$
|
|
6282
|
+
if (endMs === void 0) logger$20.warn({
|
|
5633
6283
|
storyKey,
|
|
5634
6284
|
phase
|
|
5635
6285
|
}, "Phase has no end time — story may have errored mid-phase. Duration capped to now() and may be inflated.");
|
|
@@ -5676,13 +6326,13 @@ function createImplementationOrchestrator(deps) {
|
|
|
5676
6326
|
rationale: `Story ${storyKey} completed with result=${result} in ${wallClockSeconds}s. Tokens: ${tokenAgg.input}+${tokenAgg.output}. Review cycles: ${reviewCycles}.`
|
|
5677
6327
|
});
|
|
5678
6328
|
} catch (decisionErr) {
|
|
5679
|
-
logger$
|
|
6329
|
+
logger$20.warn({
|
|
5680
6330
|
err: decisionErr,
|
|
5681
6331
|
storyKey
|
|
5682
6332
|
}, "Failed to write story-metrics decision (best-effort)");
|
|
5683
6333
|
}
|
|
5684
6334
|
} catch (err) {
|
|
5685
|
-
logger$
|
|
6335
|
+
logger$20.warn({
|
|
5686
6336
|
err,
|
|
5687
6337
|
storyKey
|
|
5688
6338
|
}, "Failed to write story metrics (best-effort)");
|
|
@@ -5711,7 +6361,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5711
6361
|
rationale: `Story ${storyKey} ${outcome} after ${reviewCycles} review cycle(s).`
|
|
5712
6362
|
});
|
|
5713
6363
|
} catch (err) {
|
|
5714
|
-
logger$
|
|
6364
|
+
logger$20.warn({
|
|
5715
6365
|
err,
|
|
5716
6366
|
storyKey
|
|
5717
6367
|
}, "Failed to write story-outcome decision (best-effort)");
|
|
@@ -5737,7 +6387,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5737
6387
|
rationale: `Escalation diagnosis for ${payload.storyKey}: ${diagnosis.recommendedAction} — ${diagnosis.rationale}`
|
|
5738
6388
|
});
|
|
5739
6389
|
} catch (err) {
|
|
5740
|
-
logger$
|
|
6390
|
+
logger$20.warn({
|
|
5741
6391
|
err,
|
|
5742
6392
|
storyKey: payload.storyKey
|
|
5743
6393
|
}, "Failed to persist escalation diagnosis (best-effort)");
|
|
@@ -5787,7 +6437,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5787
6437
|
token_usage_json: serialized
|
|
5788
6438
|
});
|
|
5789
6439
|
} catch (err) {
|
|
5790
|
-
logger$
|
|
6440
|
+
logger$20.warn("Failed to persist orchestrator state", { err });
|
|
5791
6441
|
}
|
|
5792
6442
|
}
|
|
5793
6443
|
function recordProgress() {
|
|
@@ -5817,7 +6467,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5817
6467
|
if (_stalledStories.has(key)) continue;
|
|
5818
6468
|
_stalledStories.add(key);
|
|
5819
6469
|
_storiesWithStall.add(key);
|
|
5820
|
-
logger$
|
|
6470
|
+
logger$20.warn({
|
|
5821
6471
|
storyKey: key,
|
|
5822
6472
|
phase: s.phase,
|
|
5823
6473
|
elapsedMs: elapsed
|
|
@@ -5854,7 +6504,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5854
6504
|
* exhausted retries the story is ESCALATED.
|
|
5855
6505
|
*/
|
|
5856
6506
|
async function processStory(storyKey) {
|
|
5857
|
-
logger$
|
|
6507
|
+
logger$20.info("Processing story", { storyKey });
|
|
5858
6508
|
await waitIfPaused();
|
|
5859
6509
|
if (_state !== "RUNNING") return;
|
|
5860
6510
|
startPhase(storyKey, "create-story");
|
|
@@ -5868,22 +6518,31 @@ function createImplementationOrchestrator(deps) {
|
|
|
5868
6518
|
const files = readdirSync$1(artifactsDir);
|
|
5869
6519
|
const match = files.find((f) => f.startsWith(`${storyKey}-`) && f.endsWith(".md"));
|
|
5870
6520
|
if (match) {
|
|
5871
|
-
|
|
5872
|
-
|
|
5873
|
-
|
|
5874
|
-
storyFilePath
|
|
5875
|
-
}, "Found existing story file — skipping create-story");
|
|
5876
|
-
endPhase(storyKey, "create-story");
|
|
5877
|
-
eventBus.emit("orchestrator:story-phase-complete", {
|
|
6521
|
+
const candidatePath = join$1(artifactsDir, match);
|
|
6522
|
+
const validation = await isValidStoryFile(candidatePath);
|
|
6523
|
+
if (!validation.valid) logger$20.warn({
|
|
5878
6524
|
storyKey,
|
|
5879
|
-
|
|
5880
|
-
|
|
5881
|
-
|
|
5882
|
-
|
|
5883
|
-
|
|
5884
|
-
|
|
5885
|
-
|
|
5886
|
-
|
|
6525
|
+
storyFilePath: candidatePath,
|
|
6526
|
+
reason: validation.reason
|
|
6527
|
+
}, `Existing story file for ${storyKey} is invalid (${validation.reason}) — re-creating`);
|
|
6528
|
+
else {
|
|
6529
|
+
storyFilePath = candidatePath;
|
|
6530
|
+
logger$20.info({
|
|
6531
|
+
storyKey,
|
|
6532
|
+
storyFilePath
|
|
6533
|
+
}, "Found existing story file — skipping create-story");
|
|
6534
|
+
endPhase(storyKey, "create-story");
|
|
6535
|
+
eventBus.emit("orchestrator:story-phase-complete", {
|
|
6536
|
+
storyKey,
|
|
6537
|
+
phase: "IN_STORY_CREATION",
|
|
6538
|
+
result: {
|
|
6539
|
+
result: "success",
|
|
6540
|
+
story_file: storyFilePath,
|
|
6541
|
+
story_key: storyKey
|
|
6542
|
+
}
|
|
6543
|
+
});
|
|
6544
|
+
persistState();
|
|
6545
|
+
}
|
|
5887
6546
|
}
|
|
5888
6547
|
} catch {}
|
|
5889
6548
|
if (storyFilePath === void 0) try {
|
|
@@ -5961,6 +6620,39 @@ function createImplementationOrchestrator(deps) {
|
|
|
5961
6620
|
}
|
|
5962
6621
|
await waitIfPaused();
|
|
5963
6622
|
if (_state !== "RUNNING") return;
|
|
6623
|
+
startPhase(storyKey, "test-plan");
|
|
6624
|
+
updateStory(storyKey, { phase: "IN_TEST_PLANNING" });
|
|
6625
|
+
persistState();
|
|
6626
|
+
let testPlanPhaseResult = "failed";
|
|
6627
|
+
try {
|
|
6628
|
+
const testPlanResult = await runTestPlan({
|
|
6629
|
+
db,
|
|
6630
|
+
pack,
|
|
6631
|
+
contextCompiler,
|
|
6632
|
+
dispatcher,
|
|
6633
|
+
projectRoot
|
|
6634
|
+
}, {
|
|
6635
|
+
storyKey,
|
|
6636
|
+
storyFilePath: storyFilePath ?? "",
|
|
6637
|
+
pipelineRunId: config.pipelineRunId
|
|
6638
|
+
});
|
|
6639
|
+
testPlanPhaseResult = testPlanResult.result;
|
|
6640
|
+
if (testPlanResult.result === "success") logger$20.info({ storyKey }, "Test plan generated successfully");
|
|
6641
|
+
else logger$20.warn({ storyKey }, "Test planning returned failed result — proceeding to dev-story without test plan");
|
|
6642
|
+
} catch (err) {
|
|
6643
|
+
logger$20.warn({
|
|
6644
|
+
storyKey,
|
|
6645
|
+
err
|
|
6646
|
+
}, "Test planning failed — proceeding to dev-story without test plan");
|
|
6647
|
+
}
|
|
6648
|
+
endPhase(storyKey, "test-plan");
|
|
6649
|
+
eventBus.emit("orchestrator:story-phase-complete", {
|
|
6650
|
+
storyKey,
|
|
6651
|
+
phase: "IN_TEST_PLANNING",
|
|
6652
|
+
result: { result: testPlanPhaseResult }
|
|
6653
|
+
});
|
|
6654
|
+
await waitIfPaused();
|
|
6655
|
+
if (_state !== "RUNNING") return;
|
|
5964
6656
|
startPhase(storyKey, "dev-story");
|
|
5965
6657
|
updateStory(storyKey, { phase: "IN_DEV" });
|
|
5966
6658
|
persistState();
|
|
@@ -5971,7 +6663,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5971
6663
|
try {
|
|
5972
6664
|
storyContentForAnalysis = await readFile$1(storyFilePath ?? "", "utf-8");
|
|
5973
6665
|
} catch (err) {
|
|
5974
|
-
logger$
|
|
6666
|
+
logger$20.error({
|
|
5975
6667
|
storyKey,
|
|
5976
6668
|
storyFilePath,
|
|
5977
6669
|
error: err instanceof Error ? err.message : String(err)
|
|
@@ -5979,7 +6671,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5979
6671
|
}
|
|
5980
6672
|
const analysis = analyzeStoryComplexity(storyContentForAnalysis);
|
|
5981
6673
|
const batches = planTaskBatches(analysis);
|
|
5982
|
-
logger$
|
|
6674
|
+
logger$20.info({
|
|
5983
6675
|
storyKey,
|
|
5984
6676
|
estimatedScope: analysis.estimatedScope,
|
|
5985
6677
|
batchCount: batches.length,
|
|
@@ -5997,7 +6689,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5997
6689
|
if (_state !== "RUNNING") break;
|
|
5998
6690
|
const taskScope = batch.taskIds.map((id, i) => `T${id}: ${batch.taskTitles[i] ?? ""}`).join("\n");
|
|
5999
6691
|
const priorFiles = allFilesModified.size > 0 ? Array.from(allFilesModified) : void 0;
|
|
6000
|
-
logger$
|
|
6692
|
+
logger$20.info({
|
|
6001
6693
|
storyKey,
|
|
6002
6694
|
batchIndex: batch.batchIndex,
|
|
6003
6695
|
taskCount: batch.taskIds.length
|
|
@@ -6021,7 +6713,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
6021
6713
|
});
|
|
6022
6714
|
} catch (batchErr) {
|
|
6023
6715
|
const errMsg = batchErr instanceof Error ? batchErr.message : String(batchErr);
|
|
6024
|
-
logger$
|
|
6716
|
+
logger$20.warn({
|
|
6025
6717
|
storyKey,
|
|
6026
6718
|
batchIndex: batch.batchIndex,
|
|
6027
6719
|
error: errMsg
|
|
@@ -6041,7 +6733,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
6041
6733
|
filesModified: batchFilesModified,
|
|
6042
6734
|
result: batchResult.result === "success" ? "success" : "failed"
|
|
6043
6735
|
};
|
|
6044
|
-
logger$
|
|
6736
|
+
logger$20.info(batchMetrics, "Batch dev-story metrics");
|
|
6045
6737
|
for (const f of batchFilesModified) allFilesModified.add(f);
|
|
6046
6738
|
if (batchFilesModified.length > 0) batchFileGroups.push({
|
|
6047
6739
|
batchIndex: batch.batchIndex,
|
|
@@ -6063,13 +6755,13 @@ function createImplementationOrchestrator(deps) {
|
|
|
6063
6755
|
})
|
|
6064
6756
|
});
|
|
6065
6757
|
} catch (tokenErr) {
|
|
6066
|
-
logger$
|
|
6758
|
+
logger$20.warn({
|
|
6067
6759
|
storyKey,
|
|
6068
6760
|
batchIndex: batch.batchIndex,
|
|
6069
6761
|
err: tokenErr
|
|
6070
6762
|
}, "Failed to record batch token usage");
|
|
6071
6763
|
}
|
|
6072
|
-
if (batchResult.result === "failed") logger$
|
|
6764
|
+
if (batchResult.result === "failed") logger$20.warn({
|
|
6073
6765
|
storyKey,
|
|
6074
6766
|
batchIndex: batch.batchIndex,
|
|
6075
6767
|
error: batchResult.error
|
|
@@ -6102,7 +6794,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
6102
6794
|
result: devResult
|
|
6103
6795
|
});
|
|
6104
6796
|
persistState();
|
|
6105
|
-
if (devResult.result === "failed") logger$
|
|
6797
|
+
if (devResult.result === "failed") logger$20.warn("Dev-story reported failure, proceeding to code review", {
|
|
6106
6798
|
storyKey,
|
|
6107
6799
|
error: devResult.error,
|
|
6108
6800
|
filesModified: devFilesModified.length
|
|
@@ -6159,7 +6851,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
6159
6851
|
"NEEDS_MAJOR_REWORK": 2
|
|
6160
6852
|
};
|
|
6161
6853
|
for (const group of batchFileGroups) {
|
|
6162
|
-
logger$
|
|
6854
|
+
logger$20.info({
|
|
6163
6855
|
storyKey,
|
|
6164
6856
|
batchIndex: group.batchIndex,
|
|
6165
6857
|
fileCount: group.files.length
|
|
@@ -6196,7 +6888,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
6196
6888
|
rawOutput: lastRawOutput,
|
|
6197
6889
|
tokenUsage: aggregateTokens
|
|
6198
6890
|
};
|
|
6199
|
-
logger$
|
|
6891
|
+
logger$20.info({
|
|
6200
6892
|
storyKey,
|
|
6201
6893
|
batchCount: batchFileGroups.length,
|
|
6202
6894
|
verdict: worstVerdict,
|
|
@@ -6219,10 +6911,10 @@ function createImplementationOrchestrator(deps) {
|
|
|
6219
6911
|
...previousIssueList.length > 0 ? { previousIssues: previousIssueList } : {}
|
|
6220
6912
|
});
|
|
6221
6913
|
}
|
|
6222
|
-
const isPhantomReview = reviewResult.verdict !== "SHIP_IT" && (reviewResult.issue_list === void 0 || reviewResult.issue_list.length === 0) && reviewResult.error !== void 0;
|
|
6914
|
+
const isPhantomReview = reviewResult.dispatchFailed === true || reviewResult.verdict !== "SHIP_IT" && (reviewResult.issue_list === void 0 || reviewResult.issue_list.length === 0) && reviewResult.error !== void 0;
|
|
6223
6915
|
if (isPhantomReview && !timeoutRetried) {
|
|
6224
6916
|
timeoutRetried = true;
|
|
6225
|
-
logger$
|
|
6917
|
+
logger$20.warn({
|
|
6226
6918
|
storyKey,
|
|
6227
6919
|
reviewCycles,
|
|
6228
6920
|
error: reviewResult.error
|
|
@@ -6232,7 +6924,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
6232
6924
|
verdict = reviewResult.verdict;
|
|
6233
6925
|
issueList = reviewResult.issue_list ?? [];
|
|
6234
6926
|
if (verdict === "NEEDS_MAJOR_REWORK" && reviewCycles > 0 && previousIssueList.length > 0 && issueList.length < previousIssueList.length) {
|
|
6235
|
-
logger$
|
|
6927
|
+
logger$20.info({
|
|
6236
6928
|
storyKey,
|
|
6237
6929
|
originalVerdict: verdict,
|
|
6238
6930
|
issuesBefore: previousIssueList.length,
|
|
@@ -6268,7 +6960,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
6268
6960
|
if (_decomposition !== void 0) parts.push(`decomposed: ${_decomposition.batchCount} batches`);
|
|
6269
6961
|
parts.push(`${fileCount} files`);
|
|
6270
6962
|
parts.push(`${totalTokensK} tokens`);
|
|
6271
|
-
logger$
|
|
6963
|
+
logger$20.info({
|
|
6272
6964
|
storyKey,
|
|
6273
6965
|
verdict,
|
|
6274
6966
|
agentVerdict: reviewResult.agentVerdict
|
|
@@ -6305,6 +6997,38 @@ function createImplementationOrchestrator(deps) {
|
|
|
6305
6997
|
reviewCycles
|
|
6306
6998
|
});
|
|
6307
6999
|
persistState();
|
|
7000
|
+
try {
|
|
7001
|
+
const expansionResult = await runTestExpansion({
|
|
7002
|
+
db,
|
|
7003
|
+
pack,
|
|
7004
|
+
contextCompiler,
|
|
7005
|
+
dispatcher,
|
|
7006
|
+
projectRoot
|
|
7007
|
+
}, {
|
|
7008
|
+
storyKey,
|
|
7009
|
+
storyFilePath: storyFilePath ?? "",
|
|
7010
|
+
pipelineRunId: config.pipelineRunId,
|
|
7011
|
+
filesModified: devFilesModified,
|
|
7012
|
+
workingDirectory: projectRoot
|
|
7013
|
+
});
|
|
7014
|
+
logger$20.debug({
|
|
7015
|
+
storyKey,
|
|
7016
|
+
expansion_priority: expansionResult.expansion_priority,
|
|
7017
|
+
coverage_gaps: expansionResult.coverage_gaps.length
|
|
7018
|
+
}, "Test expansion analysis complete");
|
|
7019
|
+
createDecision(db, {
|
|
7020
|
+
pipeline_run_id: config.pipelineRunId ?? "unknown",
|
|
7021
|
+
phase: "implementation",
|
|
7022
|
+
category: TEST_EXPANSION_FINDING,
|
|
7023
|
+
key: `${storyKey}:${config.pipelineRunId ?? "unknown"}`,
|
|
7024
|
+
value: JSON.stringify(expansionResult)
|
|
7025
|
+
});
|
|
7026
|
+
} catch (expansionErr) {
|
|
7027
|
+
logger$20.warn({
|
|
7028
|
+
storyKey,
|
|
7029
|
+
error: expansionErr instanceof Error ? expansionErr.message : String(expansionErr)
|
|
7030
|
+
}, "Test expansion failed — story verdict unchanged");
|
|
7031
|
+
}
|
|
6308
7032
|
keepReviewing = false;
|
|
6309
7033
|
return;
|
|
6310
7034
|
}
|
|
@@ -6327,7 +7051,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
6327
7051
|
persistState();
|
|
6328
7052
|
return;
|
|
6329
7053
|
}
|
|
6330
|
-
logger$
|
|
7054
|
+
logger$20.info({
|
|
6331
7055
|
storyKey,
|
|
6332
7056
|
reviewCycles: finalReviewCycles,
|
|
6333
7057
|
issueCount: issueList.length
|
|
@@ -6377,7 +7101,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
6377
7101
|
fixPrompt = assembled.prompt;
|
|
6378
7102
|
} catch {
|
|
6379
7103
|
fixPrompt = `Fix story ${storyKey}: verdict=${verdict}, minor fixes needed`;
|
|
6380
|
-
logger$
|
|
7104
|
+
logger$20.warn("Failed to assemble auto-approve fix prompt, using fallback", { storyKey });
|
|
6381
7105
|
}
|
|
6382
7106
|
const handle = dispatcher.dispatch({
|
|
6383
7107
|
prompt: fixPrompt,
|
|
@@ -6394,9 +7118,9 @@ function createImplementationOrchestrator(deps) {
|
|
|
6394
7118
|
output: fixResult.tokenEstimate.output
|
|
6395
7119
|
} : void 0 }
|
|
6396
7120
|
});
|
|
6397
|
-
if (fixResult.status === "timeout") logger$
|
|
7121
|
+
if (fixResult.status === "timeout") logger$20.warn("Auto-approve fix timed out — approving anyway (issues were minor)", { storyKey });
|
|
6398
7122
|
} catch (err) {
|
|
6399
|
-
logger$
|
|
7123
|
+
logger$20.warn("Auto-approve fix dispatch failed — approving anyway (issues were minor)", {
|
|
6400
7124
|
storyKey,
|
|
6401
7125
|
err
|
|
6402
7126
|
});
|
|
@@ -6469,7 +7193,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
6469
7193
|
fixPrompt = assembled.prompt;
|
|
6470
7194
|
} catch {
|
|
6471
7195
|
fixPrompt = `Fix story ${storyKey}: verdict=${verdict}, taskType=${taskType}`;
|
|
6472
|
-
logger$
|
|
7196
|
+
logger$20.warn("Failed to assemble fix prompt, using fallback", {
|
|
6473
7197
|
storyKey,
|
|
6474
7198
|
taskType
|
|
6475
7199
|
});
|
|
@@ -6492,7 +7216,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
6492
7216
|
} : void 0 }
|
|
6493
7217
|
});
|
|
6494
7218
|
if (fixResult.status === "timeout") {
|
|
6495
|
-
logger$
|
|
7219
|
+
logger$20.warn("Fix dispatch timed out — escalating story", {
|
|
6496
7220
|
storyKey,
|
|
6497
7221
|
taskType
|
|
6498
7222
|
});
|
|
@@ -6512,13 +7236,13 @@ function createImplementationOrchestrator(deps) {
|
|
|
6512
7236
|
persistState();
|
|
6513
7237
|
return;
|
|
6514
7238
|
}
|
|
6515
|
-
if (fixResult.status === "failed") logger$
|
|
7239
|
+
if (fixResult.status === "failed") logger$20.warn("Fix dispatch failed", {
|
|
6516
7240
|
storyKey,
|
|
6517
7241
|
taskType,
|
|
6518
7242
|
exitCode: fixResult.exitCode
|
|
6519
7243
|
});
|
|
6520
7244
|
} catch (err) {
|
|
6521
|
-
logger$
|
|
7245
|
+
logger$20.warn("Fix dispatch failed, continuing to next review", {
|
|
6522
7246
|
storyKey,
|
|
6523
7247
|
taskType,
|
|
6524
7248
|
err
|
|
@@ -6572,11 +7296,11 @@ function createImplementationOrchestrator(deps) {
|
|
|
6572
7296
|
}
|
|
6573
7297
|
async function run(storyKeys) {
|
|
6574
7298
|
if (_state === "RUNNING" || _state === "PAUSED") {
|
|
6575
|
-
logger$
|
|
7299
|
+
logger$20.warn("run() called while orchestrator is already running or paused — ignoring", { state: _state });
|
|
6576
7300
|
return getStatus();
|
|
6577
7301
|
}
|
|
6578
7302
|
if (_state === "COMPLETE") {
|
|
6579
|
-
logger$
|
|
7303
|
+
logger$20.warn("run() called on a COMPLETE orchestrator — ignoring", { state: _state });
|
|
6580
7304
|
return getStatus();
|
|
6581
7305
|
}
|
|
6582
7306
|
_state = "RUNNING";
|
|
@@ -6594,13 +7318,13 @@ function createImplementationOrchestrator(deps) {
|
|
|
6594
7318
|
if (config.enableHeartbeat) startHeartbeat();
|
|
6595
7319
|
if (projectRoot !== void 0) {
|
|
6596
7320
|
const seedResult = seedMethodologyContext(db, projectRoot);
|
|
6597
|
-
if (seedResult.decisionsCreated > 0) logger$
|
|
7321
|
+
if (seedResult.decisionsCreated > 0) logger$20.info({
|
|
6598
7322
|
decisionsCreated: seedResult.decisionsCreated,
|
|
6599
7323
|
skippedCategories: seedResult.skippedCategories
|
|
6600
7324
|
}, "Methodology context seeded from planning artifacts");
|
|
6601
7325
|
}
|
|
6602
7326
|
const groups = detectConflictGroups(storyKeys);
|
|
6603
|
-
logger$
|
|
7327
|
+
logger$20.info("Orchestrator starting", {
|
|
6604
7328
|
storyCount: storyKeys.length,
|
|
6605
7329
|
groupCount: groups.length,
|
|
6606
7330
|
maxConcurrency: config.maxConcurrency
|
|
@@ -6612,7 +7336,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
6612
7336
|
_state = "FAILED";
|
|
6613
7337
|
_completedAt = new Date().toISOString();
|
|
6614
7338
|
persistState();
|
|
6615
|
-
logger$
|
|
7339
|
+
logger$20.error("Orchestrator failed with unhandled error", { err });
|
|
6616
7340
|
return getStatus();
|
|
6617
7341
|
}
|
|
6618
7342
|
stopHeartbeat();
|
|
@@ -6639,7 +7363,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
6639
7363
|
_pauseGate = createPauseGate();
|
|
6640
7364
|
_state = "PAUSED";
|
|
6641
7365
|
eventBus.emit("orchestrator:paused", {});
|
|
6642
|
-
logger$
|
|
7366
|
+
logger$20.info("Orchestrator paused");
|
|
6643
7367
|
}
|
|
6644
7368
|
function resume() {
|
|
6645
7369
|
if (_state !== "PAUSED") return;
|
|
@@ -6650,7 +7374,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
6650
7374
|
}
|
|
6651
7375
|
_state = "RUNNING";
|
|
6652
7376
|
eventBus.emit("orchestrator:resumed", {});
|
|
6653
|
-
logger$
|
|
7377
|
+
logger$20.info("Orchestrator resumed");
|
|
6654
7378
|
}
|
|
6655
7379
|
return {
|
|
6656
7380
|
run,
|
|
@@ -8481,6 +9205,10 @@ const AMENDMENT_CONTEXT_HEADER$2 = "\n\n--- AMENDMENT CONTEXT (Parent Run Decisi
|
|
|
8481
9205
|
const AMENDMENT_CONTEXT_FOOTER$2 = "\n--- END AMENDMENT CONTEXT ---\n";
|
|
8482
9206
|
/** Marker appended when amendment context is truncated to fit token budget */
|
|
8483
9207
|
const TRUNCATED_MARKER$2 = "\n[TRUNCATED]";
|
|
9208
|
+
/** Prior run findings framing block prefix */
|
|
9209
|
+
const PRIOR_FINDINGS_HEADER = "\n\n--- PRIOR RUN FINDINGS ---\n";
|
|
9210
|
+
/** Prior run findings framing block suffix */
|
|
9211
|
+
const PRIOR_FINDINGS_FOOTER = "\n--- END PRIOR RUN FINDINGS ---\n";
|
|
8484
9212
|
/** Concept placeholder in the prompt template */
|
|
8485
9213
|
const CONCEPT_PLACEHOLDER = "{{concept}}";
|
|
8486
9214
|
/** Product brief fields to persist as decisions */
|
|
@@ -8521,6 +9249,9 @@ function buildAnalysisSteps() {
|
|
|
8521
9249
|
context: [{
|
|
8522
9250
|
placeholder: "concept",
|
|
8523
9251
|
source: "param:concept"
|
|
9252
|
+
}, {
|
|
9253
|
+
placeholder: "prior_findings",
|
|
9254
|
+
source: "param:prior_findings"
|
|
8524
9255
|
}],
|
|
8525
9256
|
persist: [{
|
|
8526
9257
|
field: "problem_statement",
|
|
@@ -8584,7 +9315,14 @@ async function runAnalysisMultiStep(deps, params) {
|
|
|
8584
9315
|
};
|
|
8585
9316
|
try {
|
|
8586
9317
|
const steps = buildAnalysisSteps();
|
|
8587
|
-
|
|
9318
|
+
let priorFindings = "";
|
|
9319
|
+
try {
|
|
9320
|
+
priorFindings = getProjectFindings(deps.db);
|
|
9321
|
+
} catch {}
|
|
9322
|
+
const result = await runSteps(steps, deps, params.runId, "analysis", {
|
|
9323
|
+
concept: params.concept,
|
|
9324
|
+
prior_findings: priorFindings
|
|
9325
|
+
});
|
|
8588
9326
|
if (!result.success) return {
|
|
8589
9327
|
result: "failed",
|
|
8590
9328
|
error: result.error ?? "multi_step_failed",
|
|
@@ -8665,6 +9403,18 @@ async function runAnalysisPhase(deps, params) {
|
|
|
8665
9403
|
let effectiveConcept = concept;
|
|
8666
9404
|
if (concept.length > MAX_CONCEPT_CHARS) effectiveConcept = concept.slice(0, MAX_CONCEPT_CHARS) + "...";
|
|
8667
9405
|
let prompt = template.replace(CONCEPT_PLACEHOLDER, effectiveConcept);
|
|
9406
|
+
try {
|
|
9407
|
+
const priorFindings = getProjectFindings(db);
|
|
9408
|
+
if (priorFindings !== "") {
|
|
9409
|
+
const maxPromptChars = MAX_PROMPT_TOKENS$1 * 4;
|
|
9410
|
+
const framingLen = PRIOR_FINDINGS_HEADER.length + PRIOR_FINDINGS_FOOTER.length;
|
|
9411
|
+
const availableForFindings = maxPromptChars - prompt.length - framingLen - TRUNCATED_MARKER$2.length;
|
|
9412
|
+
if (availableForFindings > 0) {
|
|
9413
|
+
const findingsToInject = priorFindings.length > availableForFindings ? priorFindings.slice(0, availableForFindings) + TRUNCATED_MARKER$2 : priorFindings;
|
|
9414
|
+
prompt += PRIOR_FINDINGS_HEADER + findingsToInject + PRIOR_FINDINGS_FOOTER;
|
|
9415
|
+
}
|
|
9416
|
+
}
|
|
9417
|
+
} catch {}
|
|
8668
9418
|
if (amendmentContext !== void 0 && amendmentContext !== "") {
|
|
8669
9419
|
const maxPromptChars = MAX_PROMPT_TOKENS$1 * 4;
|
|
8670
9420
|
const basePromptLen = prompt.length;
|
|
@@ -10919,7 +11669,7 @@ function mapInternalPhaseToEventPhase(internalPhase) {
|
|
|
10919
11669
|
}
|
|
10920
11670
|
}
|
|
10921
11671
|
async function runRunAction(options) {
|
|
10922
|
-
const { pack: packName, from: startPhase, stopAfter, concept: conceptArg, conceptFile, stories: storiesArg, concurrency, outputFormat, projectRoot, events: eventsFlag, verbose: verboseFlag, tui: tuiFlag, skipUx, research: researchFlag, skipResearch: skipResearchFlag } = options;
|
|
11672
|
+
const { pack: packName, from: startPhase, stopAfter, concept: conceptArg, conceptFile, stories: storiesArg, concurrency, outputFormat, projectRoot, events: eventsFlag, verbose: verboseFlag, tui: tuiFlag, skipUx, research: researchFlag, skipResearch: skipResearchFlag, registry: injectedRegistry } = options;
|
|
10923
11673
|
if (startPhase !== void 0 && !VALID_PHASES.includes(startPhase)) {
|
|
10924
11674
|
const errorMsg = `Invalid phase '${startPhase}'. Valid phases: ${VALID_PHASES.join(", ")}`;
|
|
10925
11675
|
if (outputFormat === "json") process.stdout.write(formatOutput(null, "json", false, errorMsg) + "\n");
|
|
@@ -10977,7 +11727,8 @@ async function runRunAction(options) {
|
|
|
10977
11727
|
...eventsFlag === true ? { events: true } : {},
|
|
10978
11728
|
...skipUx === true ? { skipUx: true } : {},
|
|
10979
11729
|
...researchFlag === true ? { research: true } : {},
|
|
10980
|
-
...skipResearchFlag === true ? { skipResearch: true } : {}
|
|
11730
|
+
...skipResearchFlag === true ? { skipResearch: true } : {},
|
|
11731
|
+
...injectedRegistry !== void 0 ? { registry: injectedRegistry } : {}
|
|
10981
11732
|
});
|
|
10982
11733
|
let storyKeys = [];
|
|
10983
11734
|
if (storiesArg !== void 0 && storiesArg !== "") {
|
|
@@ -11066,11 +11817,10 @@ async function runRunAction(options) {
|
|
|
11066
11817
|
});
|
|
11067
11818
|
const eventBus = createEventBus();
|
|
11068
11819
|
const contextCompiler = createContextCompiler({ db });
|
|
11069
|
-
|
|
11070
|
-
await adapterRegistry.discoverAndRegister();
|
|
11820
|
+
if (!injectedRegistry) throw new Error("AdapterRegistry is required — must be initialized at CLI startup");
|
|
11071
11821
|
const dispatcher = createDispatcher({
|
|
11072
11822
|
eventBus,
|
|
11073
|
-
adapterRegistry
|
|
11823
|
+
adapterRegistry: injectedRegistry
|
|
11074
11824
|
});
|
|
11075
11825
|
eventBus.on("orchestrator:story-phase-complete", (payload) => {
|
|
11076
11826
|
try {
|
|
@@ -11430,7 +12180,7 @@ async function runRunAction(options) {
|
|
|
11430
12180
|
}
|
|
11431
12181
|
}
|
|
11432
12182
|
async function runFullPipeline(options) {
|
|
11433
|
-
const { packName, packPath, dbDir, dbPath, startPhase, stopAfter, concept, concurrency, outputFormat, projectRoot, events: eventsFlag, skipUx, research: researchFlag, skipResearch: skipResearchFlag } = options;
|
|
12183
|
+
const { packName, packPath, dbDir, dbPath, startPhase, stopAfter, concept, concurrency, outputFormat, projectRoot, events: eventsFlag, skipUx, research: researchFlag, skipResearch: skipResearchFlag, registry: injectedRegistry } = options;
|
|
11434
12184
|
if (!existsSync(dbDir)) mkdirSync(dbDir, { recursive: true });
|
|
11435
12185
|
const dbWrapper = new DatabaseWrapper(dbPath);
|
|
11436
12186
|
try {
|
|
@@ -11458,11 +12208,10 @@ async function runFullPipeline(options) {
|
|
|
11458
12208
|
}
|
|
11459
12209
|
const eventBus = createEventBus();
|
|
11460
12210
|
const contextCompiler = createContextCompiler({ db });
|
|
11461
|
-
|
|
11462
|
-
await adapterRegistry.discoverAndRegister();
|
|
12211
|
+
if (!injectedRegistry) throw new Error("AdapterRegistry is required — must be initialized at CLI startup");
|
|
11463
12212
|
const dispatcher = createDispatcher({
|
|
11464
12213
|
eventBus,
|
|
11465
|
-
adapterRegistry
|
|
12214
|
+
adapterRegistry: injectedRegistry
|
|
11466
12215
|
});
|
|
11467
12216
|
const phaseDeps = {
|
|
11468
12217
|
db,
|
|
@@ -11734,7 +12483,7 @@ async function runFullPipeline(options) {
|
|
|
11734
12483
|
} catch {}
|
|
11735
12484
|
}
|
|
11736
12485
|
}
|
|
11737
|
-
function registerRunCommand(program, _version = "0.0.0", projectRoot = process.cwd()) {
|
|
12486
|
+
function registerRunCommand(program, _version = "0.0.0", projectRoot = process.cwd(), registry) {
|
|
11738
12487
|
program.command("run").description("Run the autonomous pipeline (use --from to start from a specific phase)").option("--pack <name>", "Methodology pack name", "bmad").option("--from <phase>", "Start from this phase: analysis, planning, solutioning, implementation").option("--stop-after <phase>", "Stop pipeline after this phase completes").option("--concept <text>", "Inline concept text (required when --from analysis)").option("--concept-file <path>", "Path to a file containing the concept text").option("--stories <keys>", "Comma-separated story keys (e.g., 10-1,10-2)").option("--concurrency <n>", "Maximum parallel conflict groups", (v) => parseInt(v, 10), 3).option("--project-root <path>", "Project root directory", projectRoot).option("--output-format <format>", "Output format: human (default) or json", "human").option("--events", "Emit structured NDJSON events on stdout for programmatic consumption").option("--verbose", "Show detailed pino log output").option("--help-agent", "Print a machine-optimized prompt fragment for AI agents and exit").option("--tui", "Show TUI dashboard").option("--skip-ux", "Skip the UX design phase even if enabled in the pack manifest").option("--research", "Enable the research phase even if not set in the pack manifest").option("--skip-research", "Skip the research phase even if enabled in the pack manifest").action(async (opts) => {
|
|
11739
12488
|
if (opts.helpAgent) {
|
|
11740
12489
|
process.exitCode = await runHelpAgent();
|
|
@@ -11767,7 +12516,8 @@ function registerRunCommand(program, _version = "0.0.0", projectRoot = process.c
|
|
|
11767
12516
|
tui: opts.tui,
|
|
11768
12517
|
skipUx: opts.skipUx,
|
|
11769
12518
|
research: opts.research,
|
|
11770
|
-
skipResearch: opts.skipResearch
|
|
12519
|
+
skipResearch: opts.skipResearch,
|
|
12520
|
+
registry
|
|
11771
12521
|
});
|
|
11772
12522
|
process.exitCode = exitCode;
|
|
11773
12523
|
});
|
|
@@ -11775,4 +12525,4 @@ function registerRunCommand(program, _version = "0.0.0", projectRoot = process.c
|
|
|
11775
12525
|
|
|
11776
12526
|
//#endregion
|
|
11777
12527
|
export { DatabaseWrapper, SUBSTRATE_OWNED_SETTINGS_KEYS, VALID_PHASES, buildPipelineStatusOutput, createContextCompiler, createDispatcher, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStopAfterGate, findPackageRoot, formatOutput, formatPhaseCompletionSummary, formatPipelineStatusHuman, formatPipelineSummary, formatTokenTelemetry, getAllDescendantPids, getAutoHealthData, getSubstrateDefaultSettings, registerHealthCommand, registerRunCommand, resolveBmadMethodSrcPath, resolveBmadMethodVersion, resolveMainRepoRoot, runAnalysisPhase, runMigrations, runPlanningPhase, runRunAction, runSolutioningPhase, validateStopAfterFromConflict };
|
|
11778
|
-
//# sourceMappingURL=run-
|
|
12528
|
+
//# sourceMappingURL=run-BaAws8IQ.js.map
|