substrate-ai 0.2.19 → 0.2.21
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli/index.js +343 -123
- package/dist/{experimenter-CHRVkV3d.js → experimenter-bc40oi8p.js} +2 -2
- package/dist/index.d.ts +13 -0
- package/dist/{operational-CobuCGbM.js → operational-CnMlvWqc.js} +86 -2
- package/dist/{run-mFmS2pw6.js → run-BLIgARum.js} +1046 -170
- package/dist/run-gmS6DsGT.js +7 -0
- package/package.json +1 -1
- package/packs/bmad/prompts/analysis-step-1-vision.md +4 -1
- package/packs/bmad/prompts/code-review.md +5 -1
- package/packs/bmad/prompts/dev-story.md +3 -0
- package/packs/bmad/prompts/test-expansion.md +65 -0
- package/packs/bmad/prompts/test-plan.md +41 -0
- package/dist/run-CUGB4FQx.js +0 -7
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { createLogger } from "./logger-D2fS2ccL.js";
|
|
2
2
|
import { AdapterRegistry, createEventBus, createTuiApp, isTuiCapable, printNonTtyWarning } from "./event-bus-BMxhfxfT.js";
|
|
3
|
-
import { addTokenUsage, createDecision, createPipelineRun, createRequirement, getArtifactByTypeForRun, getArtifactsByRun, getDecisionsByPhase, getDecisionsByPhaseForRun, getLatestRun, getPipelineRunById, getRunningPipelineRuns, getTokenUsageSummary, registerArtifact, updatePipelineRun, updatePipelineRunConfig, upsertDecision } from "./decisions-Dq4cAA2L.js";
|
|
4
|
-
import { STORY_METRICS, aggregateTokenUsageForRun, aggregateTokenUsageForStory, getStoryMetricsForRun, writeRunMetrics, writeStoryMetrics } from "./operational-
|
|
3
|
+
import { addTokenUsage, createDecision, createPipelineRun, createRequirement, getArtifactByTypeForRun, getArtifactsByRun, getDecisionsByCategory, getDecisionsByPhase, getDecisionsByPhaseForRun, getLatestRun, getPipelineRunById, getRunningPipelineRuns, getTokenUsageSummary, registerArtifact, updatePipelineRun, updatePipelineRunConfig, upsertDecision } from "./decisions-Dq4cAA2L.js";
|
|
4
|
+
import { ESCALATION_DIAGNOSIS, OPERATIONAL_FINDING, STORY_METRICS, STORY_OUTCOME, TEST_EXPANSION_FINDING, TEST_PLAN, aggregateTokenUsageForRun, aggregateTokenUsageForStory, getStoryMetricsForRun, writeRunMetrics, writeStoryMetrics } from "./operational-CnMlvWqc.js";
|
|
5
5
|
import { createRequire } from "module";
|
|
6
6
|
import { dirname, join } from "path";
|
|
7
7
|
import { access, readFile, readdir, stat } from "fs/promises";
|
|
@@ -539,7 +539,7 @@ const migration010RunMetrics = {
|
|
|
539
539
|
|
|
540
540
|
//#endregion
|
|
541
541
|
//#region src/persistence/migrations/index.ts
|
|
542
|
-
const logger$
|
|
542
|
+
const logger$19 = createLogger("persistence:migrations");
|
|
543
543
|
const MIGRATIONS = [
|
|
544
544
|
initialSchemaMigration,
|
|
545
545
|
costTrackerSchemaMigration,
|
|
@@ -557,7 +557,7 @@ const MIGRATIONS = [
|
|
|
557
557
|
* Safe to call multiple times — already-applied migrations are skipped.
|
|
558
558
|
*/
|
|
559
559
|
function runMigrations(db) {
|
|
560
|
-
logger$
|
|
560
|
+
logger$19.info("Starting migration runner");
|
|
561
561
|
db.exec(`
|
|
562
562
|
CREATE TABLE IF NOT EXISTS schema_migrations (
|
|
563
563
|
version INTEGER PRIMARY KEY,
|
|
@@ -568,12 +568,12 @@ function runMigrations(db) {
|
|
|
568
568
|
const appliedVersions = new Set(db.prepare("SELECT version FROM schema_migrations").all().map((row) => row.version));
|
|
569
569
|
const pending = MIGRATIONS.filter((m) => !appliedVersions.has(m.version)).sort((a, b) => a.version - b.version);
|
|
570
570
|
if (pending.length === 0) {
|
|
571
|
-
logger$
|
|
571
|
+
logger$19.info("No pending migrations");
|
|
572
572
|
return;
|
|
573
573
|
}
|
|
574
574
|
const insertMigration = db.prepare("INSERT INTO schema_migrations (version, name) VALUES (?, ?)");
|
|
575
575
|
for (const migration of pending) {
|
|
576
|
-
logger$
|
|
576
|
+
logger$19.info({
|
|
577
577
|
version: migration.version,
|
|
578
578
|
name: migration.name
|
|
579
579
|
}, "Applying migration");
|
|
@@ -587,14 +587,14 @@ function runMigrations(db) {
|
|
|
587
587
|
});
|
|
588
588
|
applyMigration();
|
|
589
589
|
}
|
|
590
|
-
logger$
|
|
590
|
+
logger$19.info({ version: migration.version }, "Migration applied successfully");
|
|
591
591
|
}
|
|
592
|
-
logger$
|
|
592
|
+
logger$19.info({ count: pending.length }, "All pending migrations applied");
|
|
593
593
|
}
|
|
594
594
|
|
|
595
595
|
//#endregion
|
|
596
596
|
//#region src/persistence/database.ts
|
|
597
|
-
const logger$
|
|
597
|
+
const logger$18 = createLogger("persistence:database");
|
|
598
598
|
/**
|
|
599
599
|
* Thin wrapper that opens a SQLite database, applies required PRAGMAs,
|
|
600
600
|
* and exposes the raw BetterSqlite3 instance.
|
|
@@ -611,14 +611,14 @@ var DatabaseWrapper = class {
|
|
|
611
611
|
*/
|
|
612
612
|
open() {
|
|
613
613
|
if (this._db !== null) return;
|
|
614
|
-
logger$
|
|
614
|
+
logger$18.info({ path: this._path }, "Opening SQLite database");
|
|
615
615
|
this._db = new BetterSqlite3(this._path);
|
|
616
616
|
const walResult = this._db.pragma("journal_mode = WAL");
|
|
617
|
-
if (walResult?.[0]?.journal_mode !== "wal") logger$
|
|
617
|
+
if (walResult?.[0]?.journal_mode !== "wal") logger$18.warn({ result: walResult?.[0]?.journal_mode }, "WAL pragma did not return expected \"wal\" — journal_mode may be \"memory\" or unsupported");
|
|
618
618
|
this._db.pragma("busy_timeout = 5000");
|
|
619
619
|
this._db.pragma("synchronous = NORMAL");
|
|
620
620
|
this._db.pragma("foreign_keys = ON");
|
|
621
|
-
logger$
|
|
621
|
+
logger$18.info({ path: this._path }, "SQLite database opened with WAL mode");
|
|
622
622
|
}
|
|
623
623
|
/**
|
|
624
624
|
* Close the database. Idempotent — calling close() when already closed is a no-op.
|
|
@@ -627,7 +627,7 @@ var DatabaseWrapper = class {
|
|
|
627
627
|
if (this._db === null) return;
|
|
628
628
|
this._db.close();
|
|
629
629
|
this._db = null;
|
|
630
|
-
logger$
|
|
630
|
+
logger$18.info({ path: this._path }, "SQLite database closed");
|
|
631
631
|
}
|
|
632
632
|
/**
|
|
633
633
|
* Return the raw BetterSqlite3 instance.
|
|
@@ -1208,11 +1208,38 @@ function buildPipelineStatusOutput(run, tokenSummary, decisionsCount, storiesCou
|
|
|
1208
1208
|
totalCost += row.total_cost_usd;
|
|
1209
1209
|
}
|
|
1210
1210
|
let activeDispatches = 0;
|
|
1211
|
+
let storiesSummary;
|
|
1211
1212
|
try {
|
|
1212
1213
|
if (run.token_usage_json) {
|
|
1213
1214
|
const state = JSON.parse(run.token_usage_json);
|
|
1214
|
-
if (state.stories) {
|
|
1215
|
-
|
|
1215
|
+
if (state.stories && Object.keys(state.stories).length > 0) {
|
|
1216
|
+
const now = Date.now();
|
|
1217
|
+
let completed = 0;
|
|
1218
|
+
let inProgress = 0;
|
|
1219
|
+
let escalated = 0;
|
|
1220
|
+
let pending = 0;
|
|
1221
|
+
const details = {};
|
|
1222
|
+
for (const [key, s] of Object.entries(state.stories)) {
|
|
1223
|
+
const phase = s.phase ?? "PENDING";
|
|
1224
|
+
if (phase !== "PENDING" && phase !== "COMPLETE" && phase !== "ESCALATED") activeDispatches++;
|
|
1225
|
+
if (phase === "COMPLETE") completed++;
|
|
1226
|
+
else if (phase === "ESCALATED") escalated++;
|
|
1227
|
+
else if (phase === "PENDING") pending++;
|
|
1228
|
+
else inProgress++;
|
|
1229
|
+
const elapsed = s.startedAt != null ? Math.max(0, Math.round((now - new Date(s.startedAt).getTime()) / 1e3)) : 0;
|
|
1230
|
+
details[key] = {
|
|
1231
|
+
phase,
|
|
1232
|
+
review_cycles: s.reviewCycles ?? 0,
|
|
1233
|
+
elapsed_seconds: elapsed
|
|
1234
|
+
};
|
|
1235
|
+
}
|
|
1236
|
+
storiesSummary = {
|
|
1237
|
+
completed,
|
|
1238
|
+
in_progress: inProgress,
|
|
1239
|
+
escalated,
|
|
1240
|
+
pending,
|
|
1241
|
+
details
|
|
1242
|
+
};
|
|
1216
1243
|
}
|
|
1217
1244
|
}
|
|
1218
1245
|
} catch {}
|
|
@@ -1230,7 +1257,8 @@ function buildPipelineStatusOutput(run, tokenSummary, decisionsCount, storiesCou
|
|
|
1230
1257
|
last_activity: run.updated_at,
|
|
1231
1258
|
staleness_seconds: Math.round((Date.now() - parseDbTimestampAsUtc(run.updated_at).getTime()) / 1e3),
|
|
1232
1259
|
last_event_ts: run.updated_at,
|
|
1233
|
-
active_dispatches: activeDispatches
|
|
1260
|
+
active_dispatches: activeDispatches,
|
|
1261
|
+
...storiesSummary !== void 0 ? { stories: storiesSummary } : {}
|
|
1234
1262
|
};
|
|
1235
1263
|
}
|
|
1236
1264
|
/**
|
|
@@ -1259,6 +1287,19 @@ function formatPipelineStatusHuman(status) {
|
|
|
1259
1287
|
lines.push(` Total Cost: $${status.total_tokens.cost_usd.toFixed(4)}`);
|
|
1260
1288
|
lines.push(` Decisions: ${status.decisions_count}`);
|
|
1261
1289
|
lines.push(` Stories: ${status.stories_count}`);
|
|
1290
|
+
if (status.stories !== void 0 && Object.keys(status.stories.details).length > 0) {
|
|
1291
|
+
lines.push("");
|
|
1292
|
+
lines.push(" Sprint Progress:");
|
|
1293
|
+
lines.push(" " + "─".repeat(68));
|
|
1294
|
+
lines.push(` ${"STORY".padEnd(10)} ${"PHASE".padEnd(24)} ${"CYCLES".padEnd(8)} ELAPSED`);
|
|
1295
|
+
lines.push(" " + "─".repeat(68));
|
|
1296
|
+
for (const [key, detail] of Object.entries(status.stories.details)) {
|
|
1297
|
+
const elapsed = detail.elapsed_seconds > 0 ? `${detail.elapsed_seconds}s` : "-";
|
|
1298
|
+
lines.push(` ${key.padEnd(10)} ${detail.phase.padEnd(24)} ${String(detail.review_cycles).padEnd(8)} ${elapsed}`);
|
|
1299
|
+
}
|
|
1300
|
+
lines.push(" " + "─".repeat(68));
|
|
1301
|
+
lines.push(` Completed: ${status.stories.completed} In Progress: ${status.stories.in_progress} Escalated: ${status.stories.escalated} Pending: ${status.stories.pending}`);
|
|
1302
|
+
}
|
|
1262
1303
|
return lines.join("\n");
|
|
1263
1304
|
}
|
|
1264
1305
|
/**
|
|
@@ -2496,7 +2537,7 @@ function truncateToTokens(text, maxTokens) {
|
|
|
2496
2537
|
|
|
2497
2538
|
//#endregion
|
|
2498
2539
|
//#region src/modules/context-compiler/context-compiler-impl.ts
|
|
2499
|
-
const logger$
|
|
2540
|
+
const logger$17 = createLogger("context-compiler");
|
|
2500
2541
|
/**
|
|
2501
2542
|
* Fraction of the original token budget that must remain (after required +
|
|
2502
2543
|
* important sections) before an optional section is included.
|
|
@@ -2588,7 +2629,7 @@ var ContextCompilerImpl = class {
|
|
|
2588
2629
|
includedParts.push(truncated);
|
|
2589
2630
|
remainingBudget -= truncatedTokens;
|
|
2590
2631
|
anyTruncated = true;
|
|
2591
|
-
logger$
|
|
2632
|
+
logger$17.warn({
|
|
2592
2633
|
section: section.name,
|
|
2593
2634
|
originalTokens: tokens,
|
|
2594
2635
|
budgetTokens: truncatedTokens
|
|
@@ -2602,7 +2643,7 @@ var ContextCompilerImpl = class {
|
|
|
2602
2643
|
});
|
|
2603
2644
|
} else {
|
|
2604
2645
|
anyTruncated = true;
|
|
2605
|
-
logger$
|
|
2646
|
+
logger$17.warn({
|
|
2606
2647
|
section: section.name,
|
|
2607
2648
|
tokens
|
|
2608
2649
|
}, "Context compiler: omitted \"important\" section — no budget remaining");
|
|
@@ -2629,7 +2670,7 @@ var ContextCompilerImpl = class {
|
|
|
2629
2670
|
} else {
|
|
2630
2671
|
if (tokens > 0) {
|
|
2631
2672
|
anyTruncated = true;
|
|
2632
|
-
logger$
|
|
2673
|
+
logger$17.warn({
|
|
2633
2674
|
section: section.name,
|
|
2634
2675
|
tokens,
|
|
2635
2676
|
budgetFractionRemaining: budgetFractionRemaining.toFixed(2)
|
|
@@ -2914,7 +2955,7 @@ function parseYamlResult(yamlText, schema) {
|
|
|
2914
2955
|
|
|
2915
2956
|
//#endregion
|
|
2916
2957
|
//#region src/modules/agent-dispatch/dispatcher-impl.ts
|
|
2917
|
-
const logger$
|
|
2958
|
+
const logger$16 = createLogger("agent-dispatch");
|
|
2918
2959
|
const SHUTDOWN_GRACE_MS = 1e4;
|
|
2919
2960
|
const SHUTDOWN_MAX_WAIT_MS = 3e4;
|
|
2920
2961
|
const CHARS_PER_TOKEN = 4;
|
|
@@ -2957,7 +2998,7 @@ function getAvailableMemory() {
|
|
|
2957
2998
|
encoding: "utf-8"
|
|
2958
2999
|
}).trim(), 10);
|
|
2959
3000
|
if (pressureLevel >= 4) {
|
|
2960
|
-
logger$
|
|
3001
|
+
logger$16.warn({ pressureLevel }, "macOS kernel reports critical memory pressure");
|
|
2961
3002
|
return 0;
|
|
2962
3003
|
}
|
|
2963
3004
|
} catch {}
|
|
@@ -2972,7 +3013,7 @@ function getAvailableMemory() {
|
|
|
2972
3013
|
const speculative = parseInt(vmstat.match(/Pages speculative:\s+(\d+)/)?.[1] ?? "0", 10);
|
|
2973
3014
|
const available = (free + purgeable + speculative) * pageSize;
|
|
2974
3015
|
if (pressureLevel >= 2) {
|
|
2975
|
-
logger$
|
|
3016
|
+
logger$16.warn({
|
|
2976
3017
|
pressureLevel,
|
|
2977
3018
|
availableBeforeDiscount: available
|
|
2978
3019
|
}, "macOS kernel reports memory pressure — discounting estimate");
|
|
@@ -3051,7 +3092,7 @@ var DispatcherImpl = class {
|
|
|
3051
3092
|
resolve: typedResolve,
|
|
3052
3093
|
reject
|
|
3053
3094
|
});
|
|
3054
|
-
logger$
|
|
3095
|
+
logger$16.debug({
|
|
3055
3096
|
id,
|
|
3056
3097
|
queueLength: this._queue.length
|
|
3057
3098
|
}, "Dispatch queued");
|
|
@@ -3082,7 +3123,7 @@ var DispatcherImpl = class {
|
|
|
3082
3123
|
async shutdown() {
|
|
3083
3124
|
this._shuttingDown = true;
|
|
3084
3125
|
this._stopMemoryPressureTimer();
|
|
3085
|
-
logger$
|
|
3126
|
+
logger$16.info({
|
|
3086
3127
|
running: this._running.size,
|
|
3087
3128
|
queued: this._queue.length
|
|
3088
3129
|
}, "Dispatcher shutting down");
|
|
@@ -3115,13 +3156,13 @@ var DispatcherImpl = class {
|
|
|
3115
3156
|
}
|
|
3116
3157
|
}, 50);
|
|
3117
3158
|
});
|
|
3118
|
-
logger$
|
|
3159
|
+
logger$16.info("Dispatcher shutdown complete");
|
|
3119
3160
|
}
|
|
3120
3161
|
async _startDispatch(id, request, resolve$2) {
|
|
3121
3162
|
const { prompt, agent, taskType, timeout, outputSchema, workingDirectory, model, maxTurns } = request;
|
|
3122
3163
|
const adapter = this._adapterRegistry.get(agent);
|
|
3123
3164
|
if (adapter === void 0) {
|
|
3124
|
-
logger$
|
|
3165
|
+
logger$16.warn({
|
|
3125
3166
|
id,
|
|
3126
3167
|
agent
|
|
3127
3168
|
}, "No adapter found for agent");
|
|
@@ -3167,7 +3208,7 @@ var DispatcherImpl = class {
|
|
|
3167
3208
|
});
|
|
3168
3209
|
const startedAt = Date.now();
|
|
3169
3210
|
proc.on("error", (err) => {
|
|
3170
|
-
logger$
|
|
3211
|
+
logger$16.error({
|
|
3171
3212
|
id,
|
|
3172
3213
|
binary: cmd.binary,
|
|
3173
3214
|
error: err.message
|
|
@@ -3175,7 +3216,7 @@ var DispatcherImpl = class {
|
|
|
3175
3216
|
});
|
|
3176
3217
|
if (proc.stdin !== null) {
|
|
3177
3218
|
proc.stdin.on("error", (err) => {
|
|
3178
|
-
if (err.code !== "EPIPE") logger$
|
|
3219
|
+
if (err.code !== "EPIPE") logger$16.warn({
|
|
3179
3220
|
id,
|
|
3180
3221
|
error: err.message
|
|
3181
3222
|
}, "stdin write error");
|
|
@@ -3217,7 +3258,7 @@ var DispatcherImpl = class {
|
|
|
3217
3258
|
agent,
|
|
3218
3259
|
taskType
|
|
3219
3260
|
});
|
|
3220
|
-
logger$
|
|
3261
|
+
logger$16.debug({
|
|
3221
3262
|
id,
|
|
3222
3263
|
agent,
|
|
3223
3264
|
taskType,
|
|
@@ -3234,7 +3275,7 @@ var DispatcherImpl = class {
|
|
|
3234
3275
|
dispatchId: id,
|
|
3235
3276
|
timeoutMs
|
|
3236
3277
|
});
|
|
3237
|
-
logger$
|
|
3278
|
+
logger$16.warn({
|
|
3238
3279
|
id,
|
|
3239
3280
|
agent,
|
|
3240
3281
|
taskType,
|
|
@@ -3288,7 +3329,7 @@ var DispatcherImpl = class {
|
|
|
3288
3329
|
exitCode: code,
|
|
3289
3330
|
output: stdout
|
|
3290
3331
|
});
|
|
3291
|
-
logger$
|
|
3332
|
+
logger$16.debug({
|
|
3292
3333
|
id,
|
|
3293
3334
|
agent,
|
|
3294
3335
|
taskType,
|
|
@@ -3314,7 +3355,7 @@ var DispatcherImpl = class {
|
|
|
3314
3355
|
error: stderr || `Process exited with code ${String(code)}`,
|
|
3315
3356
|
exitCode: code
|
|
3316
3357
|
});
|
|
3317
|
-
logger$
|
|
3358
|
+
logger$16.debug({
|
|
3318
3359
|
id,
|
|
3319
3360
|
agent,
|
|
3320
3361
|
taskType,
|
|
@@ -3373,7 +3414,7 @@ var DispatcherImpl = class {
|
|
|
3373
3414
|
const next = this._queue.shift();
|
|
3374
3415
|
if (next === void 0) return;
|
|
3375
3416
|
next.handle.status = "running";
|
|
3376
|
-
logger$
|
|
3417
|
+
logger$16.debug({
|
|
3377
3418
|
id: next.id,
|
|
3378
3419
|
queueLength: this._queue.length
|
|
3379
3420
|
}, "Dequeued dispatch");
|
|
@@ -3386,7 +3427,7 @@ var DispatcherImpl = class {
|
|
|
3386
3427
|
_isMemoryPressured() {
|
|
3387
3428
|
const free = getAvailableMemory();
|
|
3388
3429
|
if (free < MIN_FREE_MEMORY_BYTES) {
|
|
3389
|
-
logger$
|
|
3430
|
+
logger$16.warn({
|
|
3390
3431
|
freeMB: Math.round(free / 1024 / 1024),
|
|
3391
3432
|
thresholdMB: Math.round(MIN_FREE_MEMORY_BYTES / 1024 / 1024)
|
|
3392
3433
|
}, "Memory pressure detected — holding dispatch queue");
|
|
@@ -3424,9 +3465,94 @@ function createDispatcher(options) {
|
|
|
3424
3465
|
return new DispatcherImpl(options.eventBus, options.adapterRegistry, config);
|
|
3425
3466
|
}
|
|
3426
3467
|
|
|
3468
|
+
//#endregion
|
|
3469
|
+
//#region src/modules/implementation-orchestrator/escalation-diagnosis.ts
|
|
3470
|
+
/**
|
|
3471
|
+
* Generate a structured diagnosis from escalation data.
|
|
3472
|
+
*
|
|
3473
|
+
* Handles both structured issue lists (from code-review) and plain string
|
|
3474
|
+
* arrays (from create-story/dev-story failures).
|
|
3475
|
+
*/
|
|
3476
|
+
function generateEscalationDiagnosis(issues, reviewCycles, lastVerdict) {
|
|
3477
|
+
const structured = issues.map((issue) => {
|
|
3478
|
+
if (typeof issue === "string") return {
|
|
3479
|
+
severity: "major",
|
|
3480
|
+
description: issue
|
|
3481
|
+
};
|
|
3482
|
+
const iss = issue;
|
|
3483
|
+
return {
|
|
3484
|
+
severity: iss.severity ?? "unknown",
|
|
3485
|
+
description: iss.description ?? "",
|
|
3486
|
+
file: iss.file,
|
|
3487
|
+
line: iss.line
|
|
3488
|
+
};
|
|
3489
|
+
});
|
|
3490
|
+
const blockerCount = structured.filter((i) => i.severity === "blocker").length;
|
|
3491
|
+
const majorCount = structured.filter((i) => i.severity === "major").length;
|
|
3492
|
+
const minorCount = structured.filter((i) => i.severity === "minor").length;
|
|
3493
|
+
const totalIssues = structured.length;
|
|
3494
|
+
const fileCounts = new Map();
|
|
3495
|
+
for (const issue of structured) if (issue.file) fileCounts.set(issue.file, (fileCounts.get(issue.file) ?? 0) + 1);
|
|
3496
|
+
const sortedFiles = [...fileCounts.entries()].sort((a, b) => b[1] - a[1]).map(([file]) => file);
|
|
3497
|
+
const issuesWithFiles = structured.filter((i) => i.file).length;
|
|
3498
|
+
let issueDistribution = "widespread";
|
|
3499
|
+
if (issuesWithFiles > 0 && sortedFiles.length > 0) {
|
|
3500
|
+
const topTwoCount = sortedFiles.slice(0, 2).reduce((sum, file) => sum + (fileCounts.get(file) ?? 0), 0);
|
|
3501
|
+
if (topTwoCount > issuesWithFiles * .5) issueDistribution = "concentrated";
|
|
3502
|
+
}
|
|
3503
|
+
let severityProfile;
|
|
3504
|
+
if (totalIssues === 0) severityProfile = "no-structured-issues";
|
|
3505
|
+
else if (blockerCount > 0) severityProfile = "blocker-present";
|
|
3506
|
+
else if (majorCount > 0) severityProfile = "major-only";
|
|
3507
|
+
else severityProfile = "minor-only";
|
|
3508
|
+
const { action, rationale } = pickRecommendation(issueDistribution, severityProfile, totalIssues, reviewCycles, lastVerdict);
|
|
3509
|
+
return {
|
|
3510
|
+
issueDistribution,
|
|
3511
|
+
severityProfile,
|
|
3512
|
+
totalIssues,
|
|
3513
|
+
blockerCount,
|
|
3514
|
+
majorCount,
|
|
3515
|
+
minorCount,
|
|
3516
|
+
affectedFiles: sortedFiles.slice(0, 5),
|
|
3517
|
+
reviewCycles,
|
|
3518
|
+
recommendedAction: action,
|
|
3519
|
+
rationale
|
|
3520
|
+
};
|
|
3521
|
+
}
|
|
3522
|
+
function pickRecommendation(distribution, profile, totalIssues, reviewCycles, lastVerdict) {
|
|
3523
|
+
if (lastVerdict.startsWith("create-story") || lastVerdict.startsWith("dev-story")) return {
|
|
3524
|
+
action: "human-intervention",
|
|
3525
|
+
rationale: `Pipeline failed during ${lastVerdict.replace(/-/g, " ")} before code review. Manual investigation needed.`
|
|
3526
|
+
};
|
|
3527
|
+
if (lastVerdict === "fix-dispatch-timeout") return {
|
|
3528
|
+
action: "retry-targeted",
|
|
3529
|
+
rationale: "Fix dispatch timed out. Retry with a targeted prompt focusing on the remaining issues."
|
|
3530
|
+
};
|
|
3531
|
+
if (profile === "no-structured-issues") return {
|
|
3532
|
+
action: "retry-targeted",
|
|
3533
|
+
rationale: "Review produced no structured issues — likely a schema parse failure. Retry may resolve."
|
|
3534
|
+
};
|
|
3535
|
+
if (profile === "blocker-present") return {
|
|
3536
|
+
action: "human-intervention",
|
|
3537
|
+
rationale: `${totalIssues} issues including blockers after ${reviewCycles} review cycles. Fundamental problems remain — human review recommended.`
|
|
3538
|
+
};
|
|
3539
|
+
if (distribution === "concentrated" && profile === "major-only" && totalIssues <= 5) return {
|
|
3540
|
+
action: "retry-targeted",
|
|
3541
|
+
rationale: `${totalIssues} major issues concentrated in few files. A targeted retry prompt could resolve them.`
|
|
3542
|
+
};
|
|
3543
|
+
if (distribution === "widespread" && totalIssues > 3) return {
|
|
3544
|
+
action: "split-story",
|
|
3545
|
+
rationale: `${totalIssues} issues spread across many files after ${reviewCycles} cycles. Consider splitting into smaller stories.`
|
|
3546
|
+
};
|
|
3547
|
+
return {
|
|
3548
|
+
action: "retry-targeted",
|
|
3549
|
+
rationale: `${totalIssues} issues (${profile}) after ${reviewCycles} cycles. Retry with focused prompt.`
|
|
3550
|
+
};
|
|
3551
|
+
}
|
|
3552
|
+
|
|
3427
3553
|
//#endregion
|
|
3428
3554
|
//#region src/modules/compiled-workflows/prompt-assembler.ts
|
|
3429
|
-
const logger$
|
|
3555
|
+
const logger$15 = createLogger("compiled-workflows:prompt-assembler");
|
|
3430
3556
|
/**
|
|
3431
3557
|
* Assemble a final prompt from a template and sections map.
|
|
3432
3558
|
*
|
|
@@ -3451,7 +3577,7 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
|
|
|
3451
3577
|
tokenCount,
|
|
3452
3578
|
truncated: false
|
|
3453
3579
|
};
|
|
3454
|
-
logger$
|
|
3580
|
+
logger$15.warn({
|
|
3455
3581
|
tokenCount,
|
|
3456
3582
|
ceiling: tokenCeiling
|
|
3457
3583
|
}, "Prompt exceeds token ceiling — truncating optional sections");
|
|
@@ -3467,10 +3593,10 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
|
|
|
3467
3593
|
const targetSectionTokens = Math.max(0, currentSectionTokens - overBy);
|
|
3468
3594
|
if (targetSectionTokens === 0) {
|
|
3469
3595
|
contentMap[section.name] = "";
|
|
3470
|
-
logger$
|
|
3596
|
+
logger$15.warn({ sectionName: section.name }, "Section eliminated to fit token budget");
|
|
3471
3597
|
} else {
|
|
3472
3598
|
contentMap[section.name] = truncateToTokens(section.content, targetSectionTokens);
|
|
3473
|
-
logger$
|
|
3599
|
+
logger$15.warn({
|
|
3474
3600
|
sectionName: section.name,
|
|
3475
3601
|
targetSectionTokens
|
|
3476
3602
|
}, "Section truncated to fit token budget");
|
|
@@ -3481,7 +3607,7 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
|
|
|
3481
3607
|
}
|
|
3482
3608
|
if (tokenCount <= tokenCeiling) break;
|
|
3483
3609
|
}
|
|
3484
|
-
if (tokenCount > tokenCeiling) logger$
|
|
3610
|
+
if (tokenCount > tokenCeiling) logger$15.warn({
|
|
3485
3611
|
tokenCount,
|
|
3486
3612
|
ceiling: tokenCeiling
|
|
3487
3613
|
}, "Required sections alone exceed token ceiling — returning over-budget prompt");
|
|
@@ -3619,14 +3745,89 @@ const CodeReviewResultSchema = z.object({
|
|
|
3619
3745
|
agentVerdict: data.verdict,
|
|
3620
3746
|
verdict: computeVerdict(data.issue_list)
|
|
3621
3747
|
}));
|
|
3748
|
+
/**
|
|
3749
|
+
* Schema for the YAML output contract of the test-plan sub-agent.
|
|
3750
|
+
*
|
|
3751
|
+
* The agent must emit YAML with result, test_files, test_categories, and coverage_notes.
|
|
3752
|
+
* Example:
|
|
3753
|
+
* result: success
|
|
3754
|
+
* test_files:
|
|
3755
|
+
* - src/modules/foo/__tests__/foo.test.ts
|
|
3756
|
+
* test_categories:
|
|
3757
|
+
* - unit
|
|
3758
|
+
* - integration
|
|
3759
|
+
* coverage_notes: "AC1 covered by foo.test.ts"
|
|
3760
|
+
*/
|
|
3761
|
+
const TestPlanResultSchema = z.object({
|
|
3762
|
+
result: z.preprocess((val) => val === "failure" ? "failed" : val, z.enum(["success", "failed"])),
|
|
3763
|
+
test_files: z.array(z.string()).default([]),
|
|
3764
|
+
test_categories: z.array(z.string()).default([]),
|
|
3765
|
+
coverage_notes: z.string().default("")
|
|
3766
|
+
});
|
|
3767
|
+
/**
|
|
3768
|
+
* Schema for a single coverage gap identified during test expansion analysis.
|
|
3769
|
+
*/
|
|
3770
|
+
const CoverageGapSchema = z.object({
|
|
3771
|
+
ac_ref: z.string(),
|
|
3772
|
+
description: z.string(),
|
|
3773
|
+
gap_type: z.enum([
|
|
3774
|
+
"missing-e2e",
|
|
3775
|
+
"missing-integration",
|
|
3776
|
+
"unit-only"
|
|
3777
|
+
])
|
|
3778
|
+
});
|
|
3779
|
+
/**
|
|
3780
|
+
* Schema for a single suggested test generated during test expansion analysis.
|
|
3781
|
+
*/
|
|
3782
|
+
const SuggestedTestSchema = z.object({
|
|
3783
|
+
test_name: z.string(),
|
|
3784
|
+
test_type: z.enum([
|
|
3785
|
+
"e2e",
|
|
3786
|
+
"integration",
|
|
3787
|
+
"unit"
|
|
3788
|
+
]),
|
|
3789
|
+
description: z.string(),
|
|
3790
|
+
target_ac: z.string().optional()
|
|
3791
|
+
});
|
|
3792
|
+
/**
|
|
3793
|
+
* Schema for the YAML output contract of the test-expansion sub-agent.
|
|
3794
|
+
*
|
|
3795
|
+
* The agent must emit YAML with expansion_priority, coverage_gaps, and suggested_tests.
|
|
3796
|
+
* Example:
|
|
3797
|
+
* expansion_priority: medium
|
|
3798
|
+
* coverage_gaps:
|
|
3799
|
+
* - ac_ref: AC1
|
|
3800
|
+
* description: "Happy path not exercised at module boundary"
|
|
3801
|
+
* gap_type: missing-integration
|
|
3802
|
+
* suggested_tests:
|
|
3803
|
+
* - test_name: "runFoo integration happy path"
|
|
3804
|
+
* test_type: integration
|
|
3805
|
+
* description: "Test runFoo with real DB to verify AC1 end-to-end"
|
|
3806
|
+
* target_ac: AC1
|
|
3807
|
+
* notes: "Unit coverage is solid but integration layer is untested."
|
|
3808
|
+
*/
|
|
3809
|
+
const TestExpansionResultSchema = z.object({
|
|
3810
|
+
expansion_priority: z.preprocess((val) => [
|
|
3811
|
+
"low",
|
|
3812
|
+
"medium",
|
|
3813
|
+
"high"
|
|
3814
|
+
].includes(val) ? val : "low", z.enum([
|
|
3815
|
+
"low",
|
|
3816
|
+
"medium",
|
|
3817
|
+
"high"
|
|
3818
|
+
])),
|
|
3819
|
+
coverage_gaps: z.array(CoverageGapSchema).default([]),
|
|
3820
|
+
suggested_tests: z.array(SuggestedTestSchema).default([]),
|
|
3821
|
+
notes: z.string().optional()
|
|
3822
|
+
});
|
|
3622
3823
|
|
|
3623
3824
|
//#endregion
|
|
3624
3825
|
//#region src/modules/compiled-workflows/create-story.ts
|
|
3625
|
-
const logger$
|
|
3826
|
+
const logger$14 = createLogger("compiled-workflows:create-story");
|
|
3626
3827
|
/**
|
|
3627
3828
|
* Hard ceiling for the assembled create-story prompt.
|
|
3628
3829
|
*/
|
|
3629
|
-
const TOKEN_CEILING$
|
|
3830
|
+
const TOKEN_CEILING$4 = 3e3;
|
|
3630
3831
|
/**
|
|
3631
3832
|
* Execute the compiled create-story workflow.
|
|
3632
3833
|
*
|
|
@@ -3646,7 +3847,7 @@ const TOKEN_CEILING$2 = 3e3;
|
|
|
3646
3847
|
*/
|
|
3647
3848
|
async function runCreateStory(deps, params) {
|
|
3648
3849
|
const { epicId, storyKey, pipelineRunId } = params;
|
|
3649
|
-
logger$
|
|
3850
|
+
logger$14.debug({
|
|
3650
3851
|
epicId,
|
|
3651
3852
|
storyKey,
|
|
3652
3853
|
pipelineRunId
|
|
@@ -3656,7 +3857,7 @@ async function runCreateStory(deps, params) {
|
|
|
3656
3857
|
template = await deps.pack.getPrompt("create-story");
|
|
3657
3858
|
} catch (err) {
|
|
3658
3859
|
const error = err instanceof Error ? err.message : String(err);
|
|
3659
|
-
logger$
|
|
3860
|
+
logger$14.error({ error }, "Failed to retrieve create-story prompt template");
|
|
3660
3861
|
return {
|
|
3661
3862
|
result: "failed",
|
|
3662
3863
|
error: `Failed to retrieve prompt template: ${error}`,
|
|
@@ -3669,7 +3870,7 @@ async function runCreateStory(deps, params) {
|
|
|
3669
3870
|
const implementationDecisions = getImplementationDecisions(deps);
|
|
3670
3871
|
const epicShardContent = getEpicShard(implementationDecisions, epicId, deps.projectRoot);
|
|
3671
3872
|
const prevDevNotesContent = getPrevDevNotes(implementationDecisions, epicId);
|
|
3672
|
-
const archConstraintsContent = getArchConstraints$
|
|
3873
|
+
const archConstraintsContent = getArchConstraints$2(deps);
|
|
3673
3874
|
const storyTemplateContent = await getStoryTemplate(deps);
|
|
3674
3875
|
const { prompt, tokenCount, truncated } = assemblePrompt(template, [
|
|
3675
3876
|
{
|
|
@@ -3697,11 +3898,11 @@ async function runCreateStory(deps, params) {
|
|
|
3697
3898
|
content: storyTemplateContent,
|
|
3698
3899
|
priority: "important"
|
|
3699
3900
|
}
|
|
3700
|
-
], TOKEN_CEILING$
|
|
3701
|
-
logger$
|
|
3901
|
+
], TOKEN_CEILING$4);
|
|
3902
|
+
logger$14.debug({
|
|
3702
3903
|
tokenCount,
|
|
3703
3904
|
truncated,
|
|
3704
|
-
tokenCeiling: TOKEN_CEILING$
|
|
3905
|
+
tokenCeiling: TOKEN_CEILING$4
|
|
3705
3906
|
}, "Prompt assembled for create-story");
|
|
3706
3907
|
const handle = deps.dispatcher.dispatch({
|
|
3707
3908
|
prompt,
|
|
@@ -3715,7 +3916,7 @@ async function runCreateStory(deps, params) {
|
|
|
3715
3916
|
dispatchResult = await handle.result;
|
|
3716
3917
|
} catch (err) {
|
|
3717
3918
|
const error = err instanceof Error ? err.message : String(err);
|
|
3718
|
-
logger$
|
|
3919
|
+
logger$14.error({
|
|
3719
3920
|
epicId,
|
|
3720
3921
|
storyKey,
|
|
3721
3922
|
error
|
|
@@ -3736,7 +3937,7 @@ async function runCreateStory(deps, params) {
|
|
|
3736
3937
|
if (dispatchResult.status === "failed") {
|
|
3737
3938
|
const errorMsg = dispatchResult.parseError ?? `Dispatch failed with exit code ${dispatchResult.exitCode}`;
|
|
3738
3939
|
const stderrDetail = dispatchResult.output ? ` Output: ${dispatchResult.output}` : "";
|
|
3739
|
-
logger$
|
|
3940
|
+
logger$14.warn({
|
|
3740
3941
|
epicId,
|
|
3741
3942
|
storyKey,
|
|
3742
3943
|
exitCode: dispatchResult.exitCode
|
|
@@ -3748,7 +3949,7 @@ async function runCreateStory(deps, params) {
|
|
|
3748
3949
|
};
|
|
3749
3950
|
}
|
|
3750
3951
|
if (dispatchResult.status === "timeout") {
|
|
3751
|
-
logger$
|
|
3952
|
+
logger$14.warn({
|
|
3752
3953
|
epicId,
|
|
3753
3954
|
storyKey
|
|
3754
3955
|
}, "Create-story dispatch timed out");
|
|
@@ -3761,7 +3962,7 @@ async function runCreateStory(deps, params) {
|
|
|
3761
3962
|
if (dispatchResult.parsed === null) {
|
|
3762
3963
|
const details = dispatchResult.parseError ?? "No YAML block found in output";
|
|
3763
3964
|
const rawSnippet = dispatchResult.output ? dispatchResult.output.slice(0, 1e3) : "(empty)";
|
|
3764
|
-
logger$
|
|
3965
|
+
logger$14.warn({
|
|
3765
3966
|
epicId,
|
|
3766
3967
|
storyKey,
|
|
3767
3968
|
details,
|
|
@@ -3777,7 +3978,7 @@ async function runCreateStory(deps, params) {
|
|
|
3777
3978
|
const parseResult = CreateStoryResultSchema.safeParse(dispatchResult.parsed);
|
|
3778
3979
|
if (!parseResult.success) {
|
|
3779
3980
|
const details = parseResult.error.message;
|
|
3780
|
-
logger$
|
|
3981
|
+
logger$14.warn({
|
|
3781
3982
|
epicId,
|
|
3782
3983
|
storyKey,
|
|
3783
3984
|
details
|
|
@@ -3790,7 +3991,7 @@ async function runCreateStory(deps, params) {
|
|
|
3790
3991
|
};
|
|
3791
3992
|
}
|
|
3792
3993
|
const parsed = parseResult.data;
|
|
3793
|
-
logger$
|
|
3994
|
+
logger$14.info({
|
|
3794
3995
|
epicId,
|
|
3795
3996
|
storyKey,
|
|
3796
3997
|
storyFile: parsed.story_file,
|
|
@@ -3812,7 +4013,7 @@ function getImplementationDecisions(deps) {
|
|
|
3812
4013
|
try {
|
|
3813
4014
|
return getDecisionsByPhase(deps.db, "implementation");
|
|
3814
4015
|
} catch (err) {
|
|
3815
|
-
logger$
|
|
4016
|
+
logger$14.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve implementation decisions");
|
|
3816
4017
|
return [];
|
|
3817
4018
|
}
|
|
3818
4019
|
}
|
|
@@ -3828,13 +4029,13 @@ function getEpicShard(decisions, epicId, projectRoot) {
|
|
|
3828
4029
|
if (projectRoot) {
|
|
3829
4030
|
const fallback = readEpicShardFromFile(projectRoot, epicId);
|
|
3830
4031
|
if (fallback) {
|
|
3831
|
-
logger$
|
|
4032
|
+
logger$14.info({ epicId }, "Using file-based fallback for epic shard (decisions table empty)");
|
|
3832
4033
|
return fallback;
|
|
3833
4034
|
}
|
|
3834
4035
|
}
|
|
3835
4036
|
return "";
|
|
3836
4037
|
} catch (err) {
|
|
3837
|
-
logger$
|
|
4038
|
+
logger$14.warn({
|
|
3838
4039
|
epicId,
|
|
3839
4040
|
error: err instanceof Error ? err.message : String(err)
|
|
3840
4041
|
}, "Failed to retrieve epic shard");
|
|
@@ -3851,7 +4052,7 @@ function getPrevDevNotes(decisions, epicId) {
|
|
|
3851
4052
|
if (devNotes.length === 0) return "";
|
|
3852
4053
|
return devNotes[devNotes.length - 1].value;
|
|
3853
4054
|
} catch (err) {
|
|
3854
|
-
logger$
|
|
4055
|
+
logger$14.warn({
|
|
3855
4056
|
epicId,
|
|
3856
4057
|
error: err instanceof Error ? err.message : String(err)
|
|
3857
4058
|
}, "Failed to retrieve prev dev notes");
|
|
@@ -3863,7 +4064,7 @@ function getPrevDevNotes(decisions, epicId) {
|
|
|
3863
4064
|
* Looks for decisions with phase='solutioning', category='architecture'.
|
|
3864
4065
|
* Falls back to reading _bmad-output/architecture/architecture.md on disk if decisions are empty.
|
|
3865
4066
|
*/
|
|
3866
|
-
function getArchConstraints$
|
|
4067
|
+
function getArchConstraints$2(deps) {
|
|
3867
4068
|
try {
|
|
3868
4069
|
const decisions = getDecisionsByPhase(deps.db, "solutioning");
|
|
3869
4070
|
const constraints = decisions.filter((d) => d.category === "architecture");
|
|
@@ -3871,13 +4072,13 @@ function getArchConstraints$1(deps) {
|
|
|
3871
4072
|
if (deps.projectRoot) {
|
|
3872
4073
|
const fallback = readArchConstraintsFromFile(deps.projectRoot);
|
|
3873
4074
|
if (fallback) {
|
|
3874
|
-
logger$
|
|
4075
|
+
logger$14.info("Using file-based fallback for architecture constraints (decisions table empty)");
|
|
3875
4076
|
return fallback;
|
|
3876
4077
|
}
|
|
3877
4078
|
}
|
|
3878
4079
|
return "";
|
|
3879
4080
|
} catch (err) {
|
|
3880
|
-
logger$
|
|
4081
|
+
logger$14.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
|
|
3881
4082
|
return "";
|
|
3882
4083
|
}
|
|
3883
4084
|
}
|
|
@@ -3897,7 +4098,7 @@ function readEpicShardFromFile(projectRoot, epicId) {
|
|
|
3897
4098
|
const match = pattern.exec(content);
|
|
3898
4099
|
return match ? match[0].trim() : "";
|
|
3899
4100
|
} catch (err) {
|
|
3900
|
-
logger$
|
|
4101
|
+
logger$14.warn({
|
|
3901
4102
|
epicId,
|
|
3902
4103
|
error: err instanceof Error ? err.message : String(err)
|
|
3903
4104
|
}, "File-based epic shard fallback failed");
|
|
@@ -3920,7 +4121,7 @@ function readArchConstraintsFromFile(projectRoot) {
|
|
|
3920
4121
|
const content = readFileSync$1(archPath, "utf-8");
|
|
3921
4122
|
return content.slice(0, 1500);
|
|
3922
4123
|
} catch (err) {
|
|
3923
|
-
logger$
|
|
4124
|
+
logger$14.warn({ error: err instanceof Error ? err.message : String(err) }, "File-based architecture fallback failed");
|
|
3924
4125
|
return "";
|
|
3925
4126
|
}
|
|
3926
4127
|
}
|
|
@@ -3933,14 +4134,14 @@ async function getStoryTemplate(deps) {
|
|
|
3933
4134
|
try {
|
|
3934
4135
|
return await deps.pack.getTemplate("story");
|
|
3935
4136
|
} catch (err) {
|
|
3936
|
-
logger$
|
|
4137
|
+
logger$14.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve story template from pack");
|
|
3937
4138
|
return "";
|
|
3938
4139
|
}
|
|
3939
4140
|
}
|
|
3940
4141
|
|
|
3941
4142
|
//#endregion
|
|
3942
4143
|
//#region src/modules/compiled-workflows/git-helpers.ts
|
|
3943
|
-
const logger$
|
|
4144
|
+
const logger$13 = createLogger("compiled-workflows:git-helpers");
|
|
3944
4145
|
/**
|
|
3945
4146
|
* Capture the full git diff for HEAD (working tree vs current commit).
|
|
3946
4147
|
*
|
|
@@ -4064,7 +4265,7 @@ async function runGitCommand(args, cwd, logLabel) {
|
|
|
4064
4265
|
stderr += chunk.toString("utf-8");
|
|
4065
4266
|
});
|
|
4066
4267
|
proc.on("error", (err) => {
|
|
4067
|
-
logger$
|
|
4268
|
+
logger$13.warn({
|
|
4068
4269
|
label: logLabel,
|
|
4069
4270
|
cwd,
|
|
4070
4271
|
error: err.message
|
|
@@ -4073,7 +4274,7 @@ async function runGitCommand(args, cwd, logLabel) {
|
|
|
4073
4274
|
});
|
|
4074
4275
|
proc.on("close", (code) => {
|
|
4075
4276
|
if (code !== 0) {
|
|
4076
|
-
logger$
|
|
4277
|
+
logger$13.warn({
|
|
4077
4278
|
label: logLabel,
|
|
4078
4279
|
cwd,
|
|
4079
4280
|
code,
|
|
@@ -4087,13 +4288,91 @@ async function runGitCommand(args, cwd, logLabel) {
|
|
|
4087
4288
|
});
|
|
4088
4289
|
}
|
|
4089
4290
|
|
|
4291
|
+
//#endregion
|
|
4292
|
+
//#region src/modules/implementation-orchestrator/project-findings.ts
|
|
4293
|
+
const logger$12 = createLogger("project-findings");
|
|
4294
|
+
/** Maximum character length for the findings summary */
|
|
4295
|
+
const MAX_CHARS = 2e3;
|
|
4296
|
+
/**
|
|
4297
|
+
* Query the decision store for prior project findings and return a formatted
|
|
4298
|
+
* markdown summary suitable for prompt injection.
|
|
4299
|
+
*
|
|
4300
|
+
* Returns an empty string if no findings exist (AC5: graceful fallback).
|
|
4301
|
+
*/
|
|
4302
|
+
function getProjectFindings(db) {
|
|
4303
|
+
try {
|
|
4304
|
+
const outcomes = getDecisionsByCategory(db, STORY_OUTCOME);
|
|
4305
|
+
const operational = getDecisionsByCategory(db, OPERATIONAL_FINDING);
|
|
4306
|
+
const metrics = getDecisionsByCategory(db, STORY_METRICS);
|
|
4307
|
+
const diagnoses = getDecisionsByCategory(db, ESCALATION_DIAGNOSIS);
|
|
4308
|
+
if (outcomes.length === 0 && operational.length === 0 && metrics.length === 0 && diagnoses.length === 0) return "";
|
|
4309
|
+
const sections = [];
|
|
4310
|
+
if (outcomes.length > 0) {
|
|
4311
|
+
const patterns = extractRecurringPatterns(outcomes);
|
|
4312
|
+
if (patterns.length > 0) {
|
|
4313
|
+
sections.push("**Recurring patterns from prior runs:**");
|
|
4314
|
+
for (const p of patterns) sections.push(`- ${p}`);
|
|
4315
|
+
}
|
|
4316
|
+
}
|
|
4317
|
+
if (diagnoses.length > 0) {
|
|
4318
|
+
sections.push("**Prior escalations:**");
|
|
4319
|
+
for (const d of diagnoses.slice(-3)) try {
|
|
4320
|
+
const val = JSON.parse(d.value);
|
|
4321
|
+
sections.push(`- ${d.key.split(":")[0]}: ${val.recommendedAction} — ${val.rationale}`);
|
|
4322
|
+
} catch {
|
|
4323
|
+
sections.push(`- ${d.key}: escalated`);
|
|
4324
|
+
}
|
|
4325
|
+
}
|
|
4326
|
+
const highCycleStories = metrics.filter((m) => {
|
|
4327
|
+
try {
|
|
4328
|
+
const val = JSON.parse(m.value);
|
|
4329
|
+
return val.review_cycles >= 2;
|
|
4330
|
+
} catch {
|
|
4331
|
+
return false;
|
|
4332
|
+
}
|
|
4333
|
+
}).slice(-5);
|
|
4334
|
+
if (highCycleStories.length > 0) {
|
|
4335
|
+
sections.push("**Stories with high review cycles:**");
|
|
4336
|
+
for (const m of highCycleStories) try {
|
|
4337
|
+
const val = JSON.parse(m.value);
|
|
4338
|
+
sections.push(`- ${m.key.split(":")[0]}: ${val.review_cycles} cycles`);
|
|
4339
|
+
} catch {}
|
|
4340
|
+
}
|
|
4341
|
+
const stalls = operational.filter((o) => o.key.startsWith("stall:"));
|
|
4342
|
+
if (stalls.length > 0) sections.push(`**Prior stalls:** ${stalls.length} stall event(s) recorded`);
|
|
4343
|
+
if (sections.length === 0) return "";
|
|
4344
|
+
let summary = sections.join("\n");
|
|
4345
|
+
if (summary.length > MAX_CHARS) summary = summary.slice(0, MAX_CHARS - 3) + "...";
|
|
4346
|
+
return summary;
|
|
4347
|
+
} catch (err) {
|
|
4348
|
+
logger$12.warn({ err }, "Failed to query project findings (graceful fallback)");
|
|
4349
|
+
return "";
|
|
4350
|
+
}
|
|
4351
|
+
}
|
|
4352
|
+
/**
|
|
4353
|
+
* Extract recurring patterns from story-outcome decisions.
|
|
4354
|
+
*
|
|
4355
|
+
* Looks for patterns that appear across multiple story outcomes
|
|
4356
|
+
* (e.g., "missing error handling" flagged in 3/5 stories).
|
|
4357
|
+
*/
|
|
4358
|
+
function extractRecurringPatterns(outcomes) {
|
|
4359
|
+
const patternCounts = new Map();
|
|
4360
|
+
for (const o of outcomes) try {
|
|
4361
|
+
const val = JSON.parse(o.value);
|
|
4362
|
+
if (Array.isArray(val.recurringPatterns)) {
|
|
4363
|
+
for (const pattern of val.recurringPatterns) if (typeof pattern === "string") patternCounts.set(pattern, (patternCounts.get(pattern) ?? 0) + 1);
|
|
4364
|
+
}
|
|
4365
|
+
} catch {}
|
|
4366
|
+
return [...patternCounts.entries()].filter(([, count]) => count >= 2).sort((a, b) => b[1] - a[1]).slice(0, 5).map(([pattern, count]) => `${pattern} (${count} occurrences)`);
|
|
4367
|
+
}
|
|
4368
|
+
|
|
4090
4369
|
//#endregion
|
|
4091
4370
|
//#region src/modules/compiled-workflows/dev-story.ts
|
|
4092
|
-
const logger$
|
|
4371
|
+
const logger$11 = createLogger("compiled-workflows:dev-story");
|
|
4093
4372
|
/** Hard token ceiling for the assembled dev-story prompt */
|
|
4094
|
-
const TOKEN_CEILING$
|
|
4373
|
+
const TOKEN_CEILING$3 = 24e3;
|
|
4095
4374
|
/** Default timeout for dev-story dispatches in milliseconds (30 min) */
|
|
4096
|
-
const DEFAULT_TIMEOUT_MS = 18e5;
|
|
4375
|
+
const DEFAULT_TIMEOUT_MS$1 = 18e5;
|
|
4097
4376
|
/** Default Vitest test patterns injected when no test-pattern decisions exist */
|
|
4098
4377
|
const DEFAULT_VITEST_PATTERNS = `## Test Patterns (defaults)
|
|
4099
4378
|
- Framework: Vitest (NOT jest — --testPathPattern flag does not work, use -- "pattern")
|
|
@@ -4114,7 +4393,7 @@ const DEFAULT_VITEST_PATTERNS = `## Test Patterns (defaults)
|
|
|
4114
4393
|
*/
|
|
4115
4394
|
async function runDevStory(deps, params) {
|
|
4116
4395
|
const { storyKey, storyFilePath, taskScope, priorFiles } = params;
|
|
4117
|
-
logger$
|
|
4396
|
+
logger$11.info({
|
|
4118
4397
|
storyKey,
|
|
4119
4398
|
storyFilePath
|
|
4120
4399
|
}, "Starting compiled dev-story workflow");
|
|
@@ -4156,10 +4435,10 @@ async function runDevStory(deps, params) {
|
|
|
4156
4435
|
let template;
|
|
4157
4436
|
try {
|
|
4158
4437
|
template = await deps.pack.getPrompt("dev-story");
|
|
4159
|
-
logger$
|
|
4438
|
+
logger$11.debug({ storyKey }, "Retrieved dev-story prompt template from pack");
|
|
4160
4439
|
} catch (err) {
|
|
4161
4440
|
const error = err instanceof Error ? err.message : String(err);
|
|
4162
|
-
logger$
|
|
4441
|
+
logger$11.error({
|
|
4163
4442
|
storyKey,
|
|
4164
4443
|
error
|
|
4165
4444
|
}, "Failed to retrieve dev-story prompt template");
|
|
@@ -4170,14 +4449,14 @@ async function runDevStory(deps, params) {
|
|
|
4170
4449
|
storyContent = await readFile$1(storyFilePath, "utf-8");
|
|
4171
4450
|
} catch (err) {
|
|
4172
4451
|
if (err.code === "ENOENT") {
|
|
4173
|
-
logger$
|
|
4452
|
+
logger$11.error({
|
|
4174
4453
|
storyKey,
|
|
4175
4454
|
storyFilePath
|
|
4176
4455
|
}, "Story file not found");
|
|
4177
4456
|
return makeFailureResult("story_file_not_found");
|
|
4178
4457
|
}
|
|
4179
4458
|
const error = err instanceof Error ? err.message : String(err);
|
|
4180
|
-
logger$
|
|
4459
|
+
logger$11.error({
|
|
4181
4460
|
storyKey,
|
|
4182
4461
|
storyFilePath,
|
|
4183
4462
|
error
|
|
@@ -4185,7 +4464,7 @@ async function runDevStory(deps, params) {
|
|
|
4185
4464
|
return makeFailureResult(`story_file_read_error: ${error}`);
|
|
4186
4465
|
}
|
|
4187
4466
|
if (storyContent.trim().length === 0) {
|
|
4188
|
-
logger$
|
|
4467
|
+
logger$11.error({
|
|
4189
4468
|
storyKey,
|
|
4190
4469
|
storyFilePath
|
|
4191
4470
|
}, "Story file is empty");
|
|
@@ -4197,17 +4476,17 @@ async function runDevStory(deps, params) {
|
|
|
4197
4476
|
const testPatternDecisions = solutioningDecisions.filter((d) => d.category === "test-patterns");
|
|
4198
4477
|
if (testPatternDecisions.length > 0) {
|
|
4199
4478
|
testPatternsContent = "## Test Patterns\n" + testPatternDecisions.map((d) => `- ${d.key}: ${d.value}`).join("\n");
|
|
4200
|
-
logger$
|
|
4479
|
+
logger$11.debug({
|
|
4201
4480
|
storyKey,
|
|
4202
4481
|
count: testPatternDecisions.length
|
|
4203
4482
|
}, "Loaded test patterns from decision store");
|
|
4204
4483
|
} else {
|
|
4205
4484
|
testPatternsContent = DEFAULT_VITEST_PATTERNS;
|
|
4206
|
-
logger$
|
|
4485
|
+
logger$11.debug({ storyKey }, "No test-pattern decisions found — using default Vitest patterns");
|
|
4207
4486
|
}
|
|
4208
4487
|
} catch (err) {
|
|
4209
4488
|
const error = err instanceof Error ? err.message : String(err);
|
|
4210
|
-
logger$
|
|
4489
|
+
logger$11.warn({
|
|
4211
4490
|
storyKey,
|
|
4212
4491
|
error
|
|
4213
4492
|
}, "Failed to load test patterns — using defaults");
|
|
@@ -4217,6 +4496,34 @@ async function runDevStory(deps, params) {
|
|
|
4217
4496
|
const priorFilesContent = priorFiles !== void 0 && priorFiles.length > 0 ? `## Files Modified by Previous Batches\n\nThe following files were created or modified by prior batch dispatches. Review them for context before implementing:\n\n${priorFiles.map((f) => `- ${f}`).join("\n")}` : "";
|
|
4218
4497
|
const filesInScopeContent = extractFilesInScope(storyContent);
|
|
4219
4498
|
const projectContextContent = deps.projectRoot ? await buildProjectContext(storyContent, deps.projectRoot) : "";
|
|
4499
|
+
let priorFindingsContent = "";
|
|
4500
|
+
try {
|
|
4501
|
+
const findings = getProjectFindings(deps.db);
|
|
4502
|
+
if (findings.length > 0) {
|
|
4503
|
+
priorFindingsContent = "Previous pipeline runs encountered these issues — avoid repeating them:\n\n" + findings;
|
|
4504
|
+
logger$11.debug({
|
|
4505
|
+
storyKey,
|
|
4506
|
+
findingsLen: findings.length
|
|
4507
|
+
}, "Injecting prior findings into dev-story prompt");
|
|
4508
|
+
}
|
|
4509
|
+
} catch {}
|
|
4510
|
+
let testPlanContent = "";
|
|
4511
|
+
try {
|
|
4512
|
+
const testPlanDecisions = getDecisionsByCategory(deps.db, "test-plan");
|
|
4513
|
+
const matchingPlan = testPlanDecisions.find((d) => d.key === storyKey);
|
|
4514
|
+
if (matchingPlan) {
|
|
4515
|
+
const plan = JSON.parse(matchingPlan.value);
|
|
4516
|
+
const parts = ["## Test Plan"];
|
|
4517
|
+
if (plan.test_files && plan.test_files.length > 0) {
|
|
4518
|
+
parts.push("\n### Test Files");
|
|
4519
|
+
for (const f of plan.test_files) parts.push(`- ${f}`);
|
|
4520
|
+
}
|
|
4521
|
+
if (plan.test_categories && plan.test_categories.length > 0) parts.push(`\n### Categories: ${plan.test_categories.join(", ")}`);
|
|
4522
|
+
if (plan.coverage_notes) parts.push(`\n### Coverage Notes\n${plan.coverage_notes}`);
|
|
4523
|
+
testPlanContent = parts.join("\n");
|
|
4524
|
+
logger$11.debug({ storyKey }, "Injecting test plan into dev-story prompt");
|
|
4525
|
+
}
|
|
4526
|
+
} catch {}
|
|
4220
4527
|
const sections = [
|
|
4221
4528
|
{
|
|
4222
4529
|
name: "story_content",
|
|
@@ -4247,13 +4554,23 @@ async function runDevStory(deps, params) {
|
|
|
4247
4554
|
name: "test_patterns",
|
|
4248
4555
|
content: testPatternsContent,
|
|
4249
4556
|
priority: "optional"
|
|
4557
|
+
},
|
|
4558
|
+
{
|
|
4559
|
+
name: "test_plan",
|
|
4560
|
+
content: testPlanContent,
|
|
4561
|
+
priority: "optional"
|
|
4562
|
+
},
|
|
4563
|
+
{
|
|
4564
|
+
name: "prior_findings",
|
|
4565
|
+
content: priorFindingsContent,
|
|
4566
|
+
priority: "optional"
|
|
4250
4567
|
}
|
|
4251
4568
|
];
|
|
4252
|
-
const { prompt, tokenCount, truncated } = assemblePrompt(template, sections, TOKEN_CEILING$
|
|
4253
|
-
logger$
|
|
4569
|
+
const { prompt, tokenCount, truncated } = assemblePrompt(template, sections, TOKEN_CEILING$3);
|
|
4570
|
+
logger$11.info({
|
|
4254
4571
|
storyKey,
|
|
4255
4572
|
tokenCount,
|
|
4256
|
-
ceiling: TOKEN_CEILING$
|
|
4573
|
+
ceiling: TOKEN_CEILING$3,
|
|
4257
4574
|
truncated
|
|
4258
4575
|
}, "Assembled dev-story prompt");
|
|
4259
4576
|
let dispatchResult;
|
|
@@ -4262,14 +4579,14 @@ async function runDevStory(deps, params) {
|
|
|
4262
4579
|
prompt,
|
|
4263
4580
|
agent: "claude-code",
|
|
4264
4581
|
taskType: "dev-story",
|
|
4265
|
-
timeout: DEFAULT_TIMEOUT_MS,
|
|
4582
|
+
timeout: DEFAULT_TIMEOUT_MS$1,
|
|
4266
4583
|
outputSchema: DevStoryResultSchema,
|
|
4267
4584
|
...deps.projectRoot !== void 0 ? { workingDirectory: deps.projectRoot } : {}
|
|
4268
4585
|
});
|
|
4269
4586
|
dispatchResult = await handle.result;
|
|
4270
4587
|
} catch (err) {
|
|
4271
4588
|
const error = err instanceof Error ? err.message : String(err);
|
|
4272
|
-
logger$
|
|
4589
|
+
logger$11.error({
|
|
4273
4590
|
storyKey,
|
|
4274
4591
|
error
|
|
4275
4592
|
}, "Dispatch threw an unexpected error");
|
|
@@ -4280,11 +4597,11 @@ async function runDevStory(deps, params) {
|
|
|
4280
4597
|
output: dispatchResult.tokenEstimate.output
|
|
4281
4598
|
};
|
|
4282
4599
|
if (dispatchResult.status === "timeout") {
|
|
4283
|
-
logger$
|
|
4600
|
+
logger$11.error({
|
|
4284
4601
|
storyKey,
|
|
4285
4602
|
durationMs: dispatchResult.durationMs
|
|
4286
4603
|
}, "Dev-story dispatch timed out");
|
|
4287
|
-
if (dispatchResult.output.length > 0) logger$
|
|
4604
|
+
if (dispatchResult.output.length > 0) logger$11.info({
|
|
4288
4605
|
storyKey,
|
|
4289
4606
|
partialOutput: dispatchResult.output.slice(0, 500)
|
|
4290
4607
|
}, "Partial output before timeout");
|
|
@@ -4294,12 +4611,12 @@ async function runDevStory(deps, params) {
|
|
|
4294
4611
|
};
|
|
4295
4612
|
}
|
|
4296
4613
|
if (dispatchResult.status === "failed" || dispatchResult.exitCode !== 0) {
|
|
4297
|
-
logger$
|
|
4614
|
+
logger$11.error({
|
|
4298
4615
|
storyKey,
|
|
4299
4616
|
exitCode: dispatchResult.exitCode,
|
|
4300
4617
|
status: dispatchResult.status
|
|
4301
4618
|
}, "Dev-story dispatch failed");
|
|
4302
|
-
if (dispatchResult.output.length > 0) logger$
|
|
4619
|
+
if (dispatchResult.output.length > 0) logger$11.info({
|
|
4303
4620
|
storyKey,
|
|
4304
4621
|
partialOutput: dispatchResult.output.slice(0, 500)
|
|
4305
4622
|
}, "Partial output from failed dispatch");
|
|
@@ -4311,7 +4628,7 @@ async function runDevStory(deps, params) {
|
|
|
4311
4628
|
if (dispatchResult.parseError !== null || dispatchResult.parsed === null) {
|
|
4312
4629
|
const details = dispatchResult.parseError ?? "parsed result was null";
|
|
4313
4630
|
const rawSnippet = dispatchResult.output ? dispatchResult.output.slice(0, 1e3) : "(empty)";
|
|
4314
|
-
logger$
|
|
4631
|
+
logger$11.error({
|
|
4315
4632
|
storyKey,
|
|
4316
4633
|
parseError: details,
|
|
4317
4634
|
rawOutputSnippet: rawSnippet
|
|
@@ -4319,12 +4636,12 @@ async function runDevStory(deps, params) {
|
|
|
4319
4636
|
let filesModified = [];
|
|
4320
4637
|
try {
|
|
4321
4638
|
filesModified = await getGitChangedFiles(deps.projectRoot ?? process.cwd());
|
|
4322
|
-
if (filesModified.length > 0) logger$
|
|
4639
|
+
if (filesModified.length > 0) logger$11.info({
|
|
4323
4640
|
storyKey,
|
|
4324
4641
|
fileCount: filesModified.length
|
|
4325
4642
|
}, "Recovered files_modified from git status (YAML fallback)");
|
|
4326
4643
|
} catch (err) {
|
|
4327
|
-
logger$
|
|
4644
|
+
logger$11.warn({
|
|
4328
4645
|
storyKey,
|
|
4329
4646
|
error: err instanceof Error ? err.message : String(err)
|
|
4330
4647
|
}, "Failed to recover files_modified from git");
|
|
@@ -4341,7 +4658,7 @@ async function runDevStory(deps, params) {
|
|
|
4341
4658
|
};
|
|
4342
4659
|
}
|
|
4343
4660
|
const parsed = dispatchResult.parsed;
|
|
4344
|
-
logger$
|
|
4661
|
+
logger$11.info({
|
|
4345
4662
|
storyKey,
|
|
4346
4663
|
result: parsed.result,
|
|
4347
4664
|
acMet: parsed.ac_met.length
|
|
@@ -4480,13 +4797,13 @@ function extractFilesInScope(storyContent) {
|
|
|
4480
4797
|
|
|
4481
4798
|
//#endregion
|
|
4482
4799
|
//#region src/modules/compiled-workflows/code-review.ts
|
|
4483
|
-
const logger$
|
|
4800
|
+
const logger$10 = createLogger("compiled-workflows:code-review");
|
|
4484
4801
|
/**
|
|
4485
4802
|
* Hard token ceiling for the assembled code-review prompt (50,000 tokens).
|
|
4486
4803
|
* Quality reviews require seeing actual code diffs, not just file names.
|
|
4487
4804
|
* // TODO: consider externalizing to pack config when multiple packs exist
|
|
4488
4805
|
*/
|
|
4489
|
-
const TOKEN_CEILING = 1e5;
|
|
4806
|
+
const TOKEN_CEILING$2 = 1e5;
|
|
4490
4807
|
/**
|
|
4491
4808
|
* Default fallback result when dispatch fails or times out.
|
|
4492
4809
|
* Uses NEEDS_MINOR_FIXES (not NEEDS_MAJOR_REWORK) so a parse/schema failure
|
|
@@ -4523,7 +4840,7 @@ function defaultFailResult(error, tokenUsage) {
|
|
|
4523
4840
|
async function runCodeReview(deps, params) {
|
|
4524
4841
|
const { storyKey, storyFilePath, workingDirectory, pipelineRunId, filesModified, previousIssues } = params;
|
|
4525
4842
|
const cwd = workingDirectory ?? process.cwd();
|
|
4526
|
-
logger$
|
|
4843
|
+
logger$10.debug({
|
|
4527
4844
|
storyKey,
|
|
4528
4845
|
storyFilePath,
|
|
4529
4846
|
cwd,
|
|
@@ -4534,7 +4851,7 @@ async function runCodeReview(deps, params) {
|
|
|
4534
4851
|
template = await deps.pack.getPrompt("code-review");
|
|
4535
4852
|
} catch (err) {
|
|
4536
4853
|
const error = err instanceof Error ? err.message : String(err);
|
|
4537
|
-
logger$
|
|
4854
|
+
logger$10.error({ error }, "Failed to retrieve code-review prompt template");
|
|
4538
4855
|
return defaultFailResult(`Failed to retrieve prompt template: ${error}`, {
|
|
4539
4856
|
input: 0,
|
|
4540
4857
|
output: 0
|
|
@@ -4545,7 +4862,7 @@ async function runCodeReview(deps, params) {
|
|
|
4545
4862
|
storyContent = await readFile$1(storyFilePath, "utf-8");
|
|
4546
4863
|
} catch (err) {
|
|
4547
4864
|
const error = err instanceof Error ? err.message : String(err);
|
|
4548
|
-
logger$
|
|
4865
|
+
logger$10.error({
|
|
4549
4866
|
storyFilePath,
|
|
4550
4867
|
error
|
|
4551
4868
|
}, "Failed to read story file");
|
|
@@ -4554,7 +4871,7 @@ async function runCodeReview(deps, params) {
|
|
|
4554
4871
|
output: 0
|
|
4555
4872
|
});
|
|
4556
4873
|
}
|
|
4557
|
-
const archConstraintsContent = getArchConstraints(deps);
|
|
4874
|
+
const archConstraintsContent = getArchConstraints$1(deps);
|
|
4558
4875
|
const templateTokens = countTokens(template);
|
|
4559
4876
|
const storyTokens = countTokens(storyContent);
|
|
4560
4877
|
const constraintTokens = countTokens(archConstraintsContent);
|
|
@@ -4563,16 +4880,16 @@ async function runCodeReview(deps, params) {
|
|
|
4563
4880
|
if (filesModified && filesModified.length > 0) {
|
|
4564
4881
|
const scopedDiff = await getGitDiffForFiles(filesModified, cwd);
|
|
4565
4882
|
const scopedTotal = nonDiffTokens + countTokens(scopedDiff);
|
|
4566
|
-
if (scopedTotal <= TOKEN_CEILING) {
|
|
4883
|
+
if (scopedTotal <= TOKEN_CEILING$2) {
|
|
4567
4884
|
gitDiffContent = scopedDiff;
|
|
4568
|
-
logger$
|
|
4885
|
+
logger$10.debug({
|
|
4569
4886
|
fileCount: filesModified.length,
|
|
4570
4887
|
tokenCount: scopedTotal
|
|
4571
4888
|
}, "Using scoped file diff");
|
|
4572
4889
|
} else {
|
|
4573
|
-
logger$
|
|
4890
|
+
logger$10.warn({
|
|
4574
4891
|
estimatedTotal: scopedTotal,
|
|
4575
|
-
ceiling: TOKEN_CEILING,
|
|
4892
|
+
ceiling: TOKEN_CEILING$2,
|
|
4576
4893
|
fileCount: filesModified.length
|
|
4577
4894
|
}, "Scoped diff exceeds token ceiling — falling back to stat-only summary");
|
|
4578
4895
|
gitDiffContent = await getGitDiffStatSummary(cwd);
|
|
@@ -4582,11 +4899,11 @@ async function runCodeReview(deps, params) {
|
|
|
4582
4899
|
await stageIntentToAdd(changedFiles, cwd);
|
|
4583
4900
|
const fullDiff = await getGitDiffSummary(cwd);
|
|
4584
4901
|
const fullTotal = nonDiffTokens + countTokens(fullDiff);
|
|
4585
|
-
if (fullTotal <= TOKEN_CEILING) gitDiffContent = fullDiff;
|
|
4902
|
+
if (fullTotal <= TOKEN_CEILING$2) gitDiffContent = fullDiff;
|
|
4586
4903
|
else {
|
|
4587
|
-
logger$
|
|
4904
|
+
logger$10.warn({
|
|
4588
4905
|
estimatedTotal: fullTotal,
|
|
4589
|
-
ceiling: TOKEN_CEILING
|
|
4906
|
+
ceiling: TOKEN_CEILING$2
|
|
4590
4907
|
}, "Full git diff would exceed token ceiling — using stat-only summary");
|
|
4591
4908
|
gitDiffContent = await getGitDiffStatSummary(cwd);
|
|
4592
4909
|
}
|
|
@@ -4599,6 +4916,17 @@ async function runCodeReview(deps, params) {
|
|
|
4599
4916
|
"",
|
|
4600
4917
|
...previousIssues.map((iss, i) => ` ${i + 1}. [${iss.severity ?? "unknown"}] ${iss.description ?? "no description"}${iss.file ? ` (${iss.file}${iss.line ? `:${iss.line}` : ""})` : ""}`)
|
|
4601
4918
|
].join("\n");
|
|
4919
|
+
let priorFindingsContent = "";
|
|
4920
|
+
try {
|
|
4921
|
+
const findings = getProjectFindings(deps.db);
|
|
4922
|
+
if (findings.length > 0) {
|
|
4923
|
+
priorFindingsContent = "Previous reviews found these recurring patterns — pay special attention:\n\n" + findings;
|
|
4924
|
+
logger$10.debug({
|
|
4925
|
+
storyKey,
|
|
4926
|
+
findingsLen: findings.length
|
|
4927
|
+
}, "Injecting prior findings into code-review prompt");
|
|
4928
|
+
}
|
|
4929
|
+
} catch {}
|
|
4602
4930
|
const sections = [
|
|
4603
4931
|
{
|
|
4604
4932
|
name: "story_content",
|
|
@@ -4619,14 +4947,19 @@ async function runCodeReview(deps, params) {
|
|
|
4619
4947
|
name: "arch_constraints",
|
|
4620
4948
|
content: archConstraintsContent,
|
|
4621
4949
|
priority: "optional"
|
|
4950
|
+
},
|
|
4951
|
+
{
|
|
4952
|
+
name: "prior_findings",
|
|
4953
|
+
content: priorFindingsContent,
|
|
4954
|
+
priority: "optional"
|
|
4622
4955
|
}
|
|
4623
4956
|
];
|
|
4624
|
-
const assembleResult = assemblePrompt(template, sections, TOKEN_CEILING);
|
|
4625
|
-
if (assembleResult.truncated) logger$
|
|
4957
|
+
const assembleResult = assemblePrompt(template, sections, TOKEN_CEILING$2);
|
|
4958
|
+
if (assembleResult.truncated) logger$10.warn({
|
|
4626
4959
|
storyKey,
|
|
4627
4960
|
tokenCount: assembleResult.tokenCount
|
|
4628
4961
|
}, "Code-review prompt truncated to fit token ceiling");
|
|
4629
|
-
logger$
|
|
4962
|
+
logger$10.debug({
|
|
4630
4963
|
storyKey,
|
|
4631
4964
|
tokenCount: assembleResult.tokenCount,
|
|
4632
4965
|
truncated: assembleResult.truncated
|
|
@@ -4644,7 +4977,7 @@ async function runCodeReview(deps, params) {
|
|
|
4644
4977
|
dispatchResult = await handle.result;
|
|
4645
4978
|
} catch (err) {
|
|
4646
4979
|
const error = err instanceof Error ? err.message : String(err);
|
|
4647
|
-
logger$
|
|
4980
|
+
logger$10.error({
|
|
4648
4981
|
storyKey,
|
|
4649
4982
|
error
|
|
4650
4983
|
}, "Code-review dispatch threw unexpected error");
|
|
@@ -4660,7 +4993,7 @@ async function runCodeReview(deps, params) {
|
|
|
4660
4993
|
const rawOutput = dispatchResult.output ?? void 0;
|
|
4661
4994
|
if (dispatchResult.status === "failed") {
|
|
4662
4995
|
const errorMsg = `Dispatch status: failed. Exit code: ${dispatchResult.exitCode}. ${dispatchResult.parseError ?? ""} ${dispatchResult.output ? `Stderr: ${dispatchResult.output}` : ""}`.trim();
|
|
4663
|
-
logger$
|
|
4996
|
+
logger$10.warn({
|
|
4664
4997
|
storyKey,
|
|
4665
4998
|
exitCode: dispatchResult.exitCode
|
|
4666
4999
|
}, "Code-review dispatch failed");
|
|
@@ -4670,7 +5003,7 @@ async function runCodeReview(deps, params) {
|
|
|
4670
5003
|
};
|
|
4671
5004
|
}
|
|
4672
5005
|
if (dispatchResult.status === "timeout") {
|
|
4673
|
-
logger$
|
|
5006
|
+
logger$10.warn({ storyKey }, "Code-review dispatch timed out");
|
|
4674
5007
|
return {
|
|
4675
5008
|
...defaultFailResult("Dispatch status: timeout. The agent did not complete within the allowed time.", tokenUsage),
|
|
4676
5009
|
rawOutput
|
|
@@ -4678,7 +5011,7 @@ async function runCodeReview(deps, params) {
|
|
|
4678
5011
|
}
|
|
4679
5012
|
if (dispatchResult.parsed === null) {
|
|
4680
5013
|
const details = dispatchResult.parseError ?? "No YAML block found in output";
|
|
4681
|
-
logger$
|
|
5014
|
+
logger$10.warn({
|
|
4682
5015
|
storyKey,
|
|
4683
5016
|
details
|
|
4684
5017
|
}, "Code-review output schema validation failed");
|
|
@@ -4695,7 +5028,7 @@ async function runCodeReview(deps, params) {
|
|
|
4695
5028
|
const parseResult = CodeReviewResultSchema.safeParse(dispatchResult.parsed);
|
|
4696
5029
|
if (!parseResult.success) {
|
|
4697
5030
|
const details = parseResult.error.message;
|
|
4698
|
-
logger$
|
|
5031
|
+
logger$10.warn({
|
|
4699
5032
|
storyKey,
|
|
4700
5033
|
details
|
|
4701
5034
|
}, "Code-review output failed schema validation");
|
|
@@ -4710,13 +5043,13 @@ async function runCodeReview(deps, params) {
|
|
|
4710
5043
|
};
|
|
4711
5044
|
}
|
|
4712
5045
|
const parsed = parseResult.data;
|
|
4713
|
-
if (parsed.agentVerdict !== parsed.verdict) logger$
|
|
5046
|
+
if (parsed.agentVerdict !== parsed.verdict) logger$10.info({
|
|
4714
5047
|
storyKey,
|
|
4715
5048
|
agentVerdict: parsed.agentVerdict,
|
|
4716
5049
|
pipelineVerdict: parsed.verdict,
|
|
4717
5050
|
issues: parsed.issues
|
|
4718
5051
|
}, "Pipeline overrode agent verdict based on issue severities");
|
|
4719
|
-
logger$
|
|
5052
|
+
logger$10.info({
|
|
4720
5053
|
storyKey,
|
|
4721
5054
|
verdict: parsed.verdict,
|
|
4722
5055
|
issues: parsed.issues
|
|
@@ -4734,6 +5067,384 @@ async function runCodeReview(deps, params) {
|
|
|
4734
5067
|
* Retrieve architecture constraints from the decision store.
|
|
4735
5068
|
* Looks for decisions with phase='solutioning', category='architecture'.
|
|
4736
5069
|
*/
|
|
5070
|
+
function getArchConstraints$1(deps) {
|
|
5071
|
+
try {
|
|
5072
|
+
const decisions = getDecisionsByPhase(deps.db, "solutioning");
|
|
5073
|
+
const constraints = decisions.filter((d) => d.category === "architecture");
|
|
5074
|
+
if (constraints.length === 0) return "";
|
|
5075
|
+
return constraints.map((d) => `${d.key}: ${d.value}`).join("\n");
|
|
5076
|
+
} catch (err) {
|
|
5077
|
+
logger$10.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
|
|
5078
|
+
return "";
|
|
5079
|
+
}
|
|
5080
|
+
}
|
|
5081
|
+
|
|
5082
|
+
//#endregion
|
|
5083
|
+
//#region src/modules/compiled-workflows/test-plan.ts
|
|
5084
|
+
const logger$9 = createLogger("compiled-workflows:test-plan");
|
|
5085
|
+
/** Hard token ceiling for the assembled test-plan prompt */
|
|
5086
|
+
const TOKEN_CEILING$1 = 8e3;
|
|
5087
|
+
/** Default timeout for test-plan dispatches in milliseconds (5 min — lightweight call) */
|
|
5088
|
+
const DEFAULT_TIMEOUT_MS = 3e5;
|
|
5089
|
+
/**
|
|
5090
|
+
* Execute the compiled test-plan workflow.
|
|
5091
|
+
*
|
|
5092
|
+
* @param deps - Injected dependencies (db, pack, contextCompiler, dispatcher)
|
|
5093
|
+
* @param params - Parameters (storyKey, storyFilePath, pipelineRunId)
|
|
5094
|
+
* @returns TestPlanResult with result, test_files, test_categories, coverage_notes, tokenUsage
|
|
5095
|
+
*/
|
|
5096
|
+
async function runTestPlan(deps, params) {
|
|
5097
|
+
const { storyKey, storyFilePath, pipelineRunId } = params;
|
|
5098
|
+
logger$9.info({
|
|
5099
|
+
storyKey,
|
|
5100
|
+
storyFilePath
|
|
5101
|
+
}, "Starting compiled test-plan workflow");
|
|
5102
|
+
let template;
|
|
5103
|
+
try {
|
|
5104
|
+
template = await deps.pack.getPrompt("test-plan");
|
|
5105
|
+
logger$9.debug({ storyKey }, "Retrieved test-plan prompt template from pack");
|
|
5106
|
+
} catch (err) {
|
|
5107
|
+
const error = err instanceof Error ? err.message : String(err);
|
|
5108
|
+
logger$9.warn({
|
|
5109
|
+
storyKey,
|
|
5110
|
+
error
|
|
5111
|
+
}, "Failed to retrieve test-plan prompt template");
|
|
5112
|
+
return makeTestPlanFailureResult(`template_load_failed: ${error}`);
|
|
5113
|
+
}
|
|
5114
|
+
let storyContent;
|
|
5115
|
+
try {
|
|
5116
|
+
storyContent = await readFile$1(storyFilePath, "utf-8");
|
|
5117
|
+
} catch (err) {
|
|
5118
|
+
if (err.code === "ENOENT") {
|
|
5119
|
+
logger$9.warn({
|
|
5120
|
+
storyKey,
|
|
5121
|
+
storyFilePath
|
|
5122
|
+
}, "Story file not found for test planning");
|
|
5123
|
+
return makeTestPlanFailureResult("story_file_not_found");
|
|
5124
|
+
}
|
|
5125
|
+
const error = err instanceof Error ? err.message : String(err);
|
|
5126
|
+
logger$9.warn({
|
|
5127
|
+
storyKey,
|
|
5128
|
+
storyFilePath,
|
|
5129
|
+
error
|
|
5130
|
+
}, "Failed to read story file for test planning");
|
|
5131
|
+
return makeTestPlanFailureResult(`story_file_read_error: ${error}`);
|
|
5132
|
+
}
|
|
5133
|
+
const { prompt, tokenCount, truncated } = assemblePrompt(template, [{
|
|
5134
|
+
name: "story_content",
|
|
5135
|
+
content: storyContent,
|
|
5136
|
+
priority: "required"
|
|
5137
|
+
}], TOKEN_CEILING$1);
|
|
5138
|
+
logger$9.info({
|
|
5139
|
+
storyKey,
|
|
5140
|
+
tokenCount,
|
|
5141
|
+
ceiling: TOKEN_CEILING$1,
|
|
5142
|
+
truncated
|
|
5143
|
+
}, "Assembled test-plan prompt");
|
|
5144
|
+
let dispatchResult;
|
|
5145
|
+
try {
|
|
5146
|
+
const handle = deps.dispatcher.dispatch({
|
|
5147
|
+
prompt,
|
|
5148
|
+
agent: "claude-code",
|
|
5149
|
+
taskType: "test-plan",
|
|
5150
|
+
timeout: DEFAULT_TIMEOUT_MS,
|
|
5151
|
+
outputSchema: TestPlanResultSchema,
|
|
5152
|
+
...deps.projectRoot !== void 0 ? { workingDirectory: deps.projectRoot } : {}
|
|
5153
|
+
});
|
|
5154
|
+
dispatchResult = await handle.result;
|
|
5155
|
+
} catch (err) {
|
|
5156
|
+
const error = err instanceof Error ? err.message : String(err);
|
|
5157
|
+
logger$9.warn({
|
|
5158
|
+
storyKey,
|
|
5159
|
+
error
|
|
5160
|
+
}, "Test-plan dispatch threw an unexpected error");
|
|
5161
|
+
return makeTestPlanFailureResult(`dispatch_error: ${error}`);
|
|
5162
|
+
}
|
|
5163
|
+
const tokenUsage = {
|
|
5164
|
+
input: dispatchResult.tokenEstimate.input,
|
|
5165
|
+
output: dispatchResult.tokenEstimate.output
|
|
5166
|
+
};
|
|
5167
|
+
if (dispatchResult.status === "timeout") {
|
|
5168
|
+
logger$9.warn({
|
|
5169
|
+
storyKey,
|
|
5170
|
+
durationMs: dispatchResult.durationMs
|
|
5171
|
+
}, "Test-plan dispatch timed out");
|
|
5172
|
+
return {
|
|
5173
|
+
...makeTestPlanFailureResult(`dispatch_timeout after ${dispatchResult.durationMs}ms`),
|
|
5174
|
+
tokenUsage
|
|
5175
|
+
};
|
|
5176
|
+
}
|
|
5177
|
+
if (dispatchResult.status === "failed" || dispatchResult.exitCode !== 0) {
|
|
5178
|
+
logger$9.warn({
|
|
5179
|
+
storyKey,
|
|
5180
|
+
exitCode: dispatchResult.exitCode,
|
|
5181
|
+
status: dispatchResult.status
|
|
5182
|
+
}, "Test-plan dispatch failed");
|
|
5183
|
+
return {
|
|
5184
|
+
...makeTestPlanFailureResult(`dispatch_failed with exit_code=${dispatchResult.exitCode}`),
|
|
5185
|
+
tokenUsage
|
|
5186
|
+
};
|
|
5187
|
+
}
|
|
5188
|
+
if (dispatchResult.parseError !== null || dispatchResult.parsed === null) {
|
|
5189
|
+
const details = dispatchResult.parseError ?? "parsed result was null";
|
|
5190
|
+
logger$9.warn({
|
|
5191
|
+
storyKey,
|
|
5192
|
+
parseError: details
|
|
5193
|
+
}, "Test-plan YAML schema validation failed");
|
|
5194
|
+
return {
|
|
5195
|
+
...makeTestPlanFailureResult(`schema_validation_failed: ${details}`),
|
|
5196
|
+
tokenUsage
|
|
5197
|
+
};
|
|
5198
|
+
}
|
|
5199
|
+
const parsed = dispatchResult.parsed;
|
|
5200
|
+
try {
|
|
5201
|
+
createDecision(deps.db, {
|
|
5202
|
+
pipeline_run_id: pipelineRunId,
|
|
5203
|
+
phase: "implementation",
|
|
5204
|
+
category: TEST_PLAN,
|
|
5205
|
+
key: storyKey,
|
|
5206
|
+
value: JSON.stringify({
|
|
5207
|
+
test_files: parsed.test_files,
|
|
5208
|
+
test_categories: parsed.test_categories,
|
|
5209
|
+
coverage_notes: parsed.coverage_notes
|
|
5210
|
+
}),
|
|
5211
|
+
rationale: `Test plan for ${storyKey}: ${parsed.test_files.length} test files, categories: ${parsed.test_categories.join(", ")}`
|
|
5212
|
+
});
|
|
5213
|
+
logger$9.info({
|
|
5214
|
+
storyKey,
|
|
5215
|
+
fileCount: parsed.test_files.length,
|
|
5216
|
+
categories: parsed.test_categories
|
|
5217
|
+
}, "Test plan stored in decision store");
|
|
5218
|
+
} catch (err) {
|
|
5219
|
+
const error = err instanceof Error ? err.message : String(err);
|
|
5220
|
+
logger$9.warn({
|
|
5221
|
+
storyKey,
|
|
5222
|
+
error
|
|
5223
|
+
}, "Failed to store test plan in decision store — proceeding anyway");
|
|
5224
|
+
}
|
|
5225
|
+
logger$9.info({
|
|
5226
|
+
storyKey,
|
|
5227
|
+
result: parsed.result
|
|
5228
|
+
}, "Test-plan workflow completed");
|
|
5229
|
+
return {
|
|
5230
|
+
result: parsed.result,
|
|
5231
|
+
test_files: parsed.test_files,
|
|
5232
|
+
test_categories: parsed.test_categories,
|
|
5233
|
+
coverage_notes: parsed.coverage_notes,
|
|
5234
|
+
tokenUsage
|
|
5235
|
+
};
|
|
5236
|
+
}
|
|
5237
|
+
/**
|
|
5238
|
+
* Build a failure result with sensible defaults.
|
|
5239
|
+
*/
|
|
5240
|
+
function makeTestPlanFailureResult(error) {
|
|
5241
|
+
return {
|
|
5242
|
+
result: "failed",
|
|
5243
|
+
test_files: [],
|
|
5244
|
+
test_categories: [],
|
|
5245
|
+
coverage_notes: "",
|
|
5246
|
+
error,
|
|
5247
|
+
tokenUsage: {
|
|
5248
|
+
input: 0,
|
|
5249
|
+
output: 0
|
|
5250
|
+
}
|
|
5251
|
+
};
|
|
5252
|
+
}
|
|
5253
|
+
|
|
5254
|
+
//#endregion
|
|
5255
|
+
//#region src/modules/compiled-workflows/test-expansion.ts
|
|
5256
|
+
const logger$8 = createLogger("compiled-workflows:test-expansion");
|
|
5257
|
+
/**
|
|
5258
|
+
* Hard token ceiling for the assembled test-expansion prompt (20,000 tokens).
|
|
5259
|
+
*/
|
|
5260
|
+
const TOKEN_CEILING = 2e4;
|
|
5261
|
+
function defaultFallbackResult(error, tokenUsage) {
|
|
5262
|
+
return {
|
|
5263
|
+
expansion_priority: "low",
|
|
5264
|
+
coverage_gaps: [],
|
|
5265
|
+
suggested_tests: [],
|
|
5266
|
+
error,
|
|
5267
|
+
tokenUsage
|
|
5268
|
+
};
|
|
5269
|
+
}
|
|
5270
|
+
/**
|
|
5271
|
+
* Execute the compiled test-expansion workflow.
|
|
5272
|
+
*
|
|
5273
|
+
* Steps:
|
|
5274
|
+
* 1. Retrieve compiled prompt template via pack.getPrompt('test-expansion')
|
|
5275
|
+
* 2. Read story file contents from storyFilePath
|
|
5276
|
+
* 3. Query decision store for architecture constraints (solutioning, architecture)
|
|
5277
|
+
* 4. Capture scoped git diff for filesModified, with stat-only fallback if oversized
|
|
5278
|
+
* 5. Assemble prompt with 20,000-token ceiling
|
|
5279
|
+
* 6. Dispatch via dispatcher with taskType='test-expansion'
|
|
5280
|
+
* 7. Validate YAML output against TestExpansionResultSchema
|
|
5281
|
+
* 8. Return typed TestExpansionResult (never throws — all errors return graceful fallback)
|
|
5282
|
+
*
|
|
5283
|
+
* @param deps - Injected dependencies (db, pack, contextCompiler, dispatcher)
|
|
5284
|
+
* @param params - Story key, story file path, files modified, working directory, pipeline run ID
|
|
5285
|
+
* @returns Promise resolving to TestExpansionResult (never rejects)
|
|
5286
|
+
*/
|
|
5287
|
+
async function runTestExpansion(deps, params) {
|
|
5288
|
+
const { storyKey, storyFilePath, pipelineRunId, filesModified, workingDirectory } = params;
|
|
5289
|
+
const cwd = workingDirectory ?? process.cwd();
|
|
5290
|
+
logger$8.debug({
|
|
5291
|
+
storyKey,
|
|
5292
|
+
storyFilePath,
|
|
5293
|
+
cwd,
|
|
5294
|
+
pipelineRunId
|
|
5295
|
+
}, "Starting test-expansion workflow");
|
|
5296
|
+
let template;
|
|
5297
|
+
try {
|
|
5298
|
+
template = await deps.pack.getPrompt("test-expansion");
|
|
5299
|
+
} catch (err) {
|
|
5300
|
+
const error = err instanceof Error ? err.message : String(err);
|
|
5301
|
+
logger$8.warn({ error }, "Failed to retrieve test-expansion prompt template");
|
|
5302
|
+
return defaultFallbackResult(`Failed to retrieve prompt template: ${error}`, {
|
|
5303
|
+
input: 0,
|
|
5304
|
+
output: 0
|
|
5305
|
+
});
|
|
5306
|
+
}
|
|
5307
|
+
let storyContent;
|
|
5308
|
+
try {
|
|
5309
|
+
storyContent = await readFile$1(storyFilePath, "utf-8");
|
|
5310
|
+
} catch (err) {
|
|
5311
|
+
const error = err instanceof Error ? err.message : String(err);
|
|
5312
|
+
logger$8.warn({
|
|
5313
|
+
storyFilePath,
|
|
5314
|
+
error
|
|
5315
|
+
}, "Failed to read story file");
|
|
5316
|
+
return defaultFallbackResult(`Failed to read story file: ${error}`, {
|
|
5317
|
+
input: 0,
|
|
5318
|
+
output: 0
|
|
5319
|
+
});
|
|
5320
|
+
}
|
|
5321
|
+
const archConstraintsContent = getArchConstraints(deps);
|
|
5322
|
+
let gitDiffContent = "";
|
|
5323
|
+
if (filesModified && filesModified.length > 0) try {
|
|
5324
|
+
const templateTokens = countTokens(template);
|
|
5325
|
+
const storyTokens = countTokens(storyContent);
|
|
5326
|
+
const constraintTokens = countTokens(archConstraintsContent);
|
|
5327
|
+
const nonDiffTokens = templateTokens + storyTokens + constraintTokens;
|
|
5328
|
+
const scopedDiff = await getGitDiffForFiles(filesModified, cwd);
|
|
5329
|
+
const scopedTotal = nonDiffTokens + countTokens(scopedDiff);
|
|
5330
|
+
if (scopedTotal <= TOKEN_CEILING) {
|
|
5331
|
+
gitDiffContent = scopedDiff;
|
|
5332
|
+
logger$8.debug({
|
|
5333
|
+
fileCount: filesModified.length,
|
|
5334
|
+
tokenCount: scopedTotal
|
|
5335
|
+
}, "Using scoped file diff");
|
|
5336
|
+
} else {
|
|
5337
|
+
logger$8.warn({
|
|
5338
|
+
estimatedTotal: scopedTotal,
|
|
5339
|
+
ceiling: TOKEN_CEILING,
|
|
5340
|
+
fileCount: filesModified.length
|
|
5341
|
+
}, "Scoped diff exceeds token ceiling — falling back to stat-only summary");
|
|
5342
|
+
gitDiffContent = await getGitDiffStatSummary(cwd);
|
|
5343
|
+
}
|
|
5344
|
+
} catch (err) {
|
|
5345
|
+
logger$8.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to get git diff — proceeding with empty diff");
|
|
5346
|
+
}
|
|
5347
|
+
const sections = [
|
|
5348
|
+
{
|
|
5349
|
+
name: "story_content",
|
|
5350
|
+
content: storyContent,
|
|
5351
|
+
priority: "required"
|
|
5352
|
+
},
|
|
5353
|
+
{
|
|
5354
|
+
name: "git_diff",
|
|
5355
|
+
content: gitDiffContent,
|
|
5356
|
+
priority: "important"
|
|
5357
|
+
},
|
|
5358
|
+
{
|
|
5359
|
+
name: "arch_constraints",
|
|
5360
|
+
content: archConstraintsContent,
|
|
5361
|
+
priority: "optional"
|
|
5362
|
+
}
|
|
5363
|
+
];
|
|
5364
|
+
const assembleResult = assemblePrompt(template, sections, TOKEN_CEILING);
|
|
5365
|
+
if (assembleResult.truncated) logger$8.warn({
|
|
5366
|
+
storyKey,
|
|
5367
|
+
tokenCount: assembleResult.tokenCount
|
|
5368
|
+
}, "Test-expansion prompt truncated to fit token ceiling");
|
|
5369
|
+
logger$8.debug({
|
|
5370
|
+
storyKey,
|
|
5371
|
+
tokenCount: assembleResult.tokenCount,
|
|
5372
|
+
truncated: assembleResult.truncated
|
|
5373
|
+
}, "Prompt assembled for test-expansion");
|
|
5374
|
+
const { prompt } = assembleResult;
|
|
5375
|
+
const handle = deps.dispatcher.dispatch({
|
|
5376
|
+
prompt,
|
|
5377
|
+
agent: "claude-code",
|
|
5378
|
+
taskType: "test-expansion",
|
|
5379
|
+
outputSchema: TestExpansionResultSchema,
|
|
5380
|
+
workingDirectory: deps.projectRoot
|
|
5381
|
+
});
|
|
5382
|
+
let dispatchResult;
|
|
5383
|
+
try {
|
|
5384
|
+
dispatchResult = await handle.result;
|
|
5385
|
+
} catch (err) {
|
|
5386
|
+
const error = err instanceof Error ? err.message : String(err);
|
|
5387
|
+
logger$8.warn({
|
|
5388
|
+
storyKey,
|
|
5389
|
+
error
|
|
5390
|
+
}, "Test-expansion dispatch threw unexpected error");
|
|
5391
|
+
return defaultFallbackResult(`Dispatch error: ${error}`, {
|
|
5392
|
+
input: Math.ceil(prompt.length / 4),
|
|
5393
|
+
output: 0
|
|
5394
|
+
});
|
|
5395
|
+
}
|
|
5396
|
+
const tokenUsage = {
|
|
5397
|
+
input: dispatchResult.tokenEstimate.input,
|
|
5398
|
+
output: dispatchResult.tokenEstimate.output
|
|
5399
|
+
};
|
|
5400
|
+
if (dispatchResult.status === "failed") {
|
|
5401
|
+
const errorMsg = `Dispatch status: failed. Exit code: ${dispatchResult.exitCode}. ${dispatchResult.parseError ?? ""}`.trim();
|
|
5402
|
+
logger$8.warn({
|
|
5403
|
+
storyKey,
|
|
5404
|
+
exitCode: dispatchResult.exitCode
|
|
5405
|
+
}, "Test-expansion dispatch failed");
|
|
5406
|
+
return defaultFallbackResult(errorMsg, tokenUsage);
|
|
5407
|
+
}
|
|
5408
|
+
if (dispatchResult.status === "timeout") {
|
|
5409
|
+
logger$8.warn({ storyKey }, "Test-expansion dispatch timed out");
|
|
5410
|
+
return defaultFallbackResult("Dispatch status: timeout. The agent did not complete within the allowed time.", tokenUsage);
|
|
5411
|
+
}
|
|
5412
|
+
if (dispatchResult.parsed === null) {
|
|
5413
|
+
const details = dispatchResult.parseError ?? "No YAML block found in output";
|
|
5414
|
+
logger$8.warn({
|
|
5415
|
+
storyKey,
|
|
5416
|
+
details
|
|
5417
|
+
}, "Test-expansion output has no parseable YAML");
|
|
5418
|
+
return defaultFallbackResult(`schema_validation_failed: ${details}`, tokenUsage);
|
|
5419
|
+
}
|
|
5420
|
+
const parseResult = TestExpansionResultSchema.safeParse(dispatchResult.parsed);
|
|
5421
|
+
if (!parseResult.success) {
|
|
5422
|
+
const details = parseResult.error.message;
|
|
5423
|
+
logger$8.warn({
|
|
5424
|
+
storyKey,
|
|
5425
|
+
details
|
|
5426
|
+
}, "Test-expansion output failed schema validation");
|
|
5427
|
+
return defaultFallbackResult(`schema_validation_failed: ${details}`, tokenUsage);
|
|
5428
|
+
}
|
|
5429
|
+
const parsed = parseResult.data;
|
|
5430
|
+
logger$8.info({
|
|
5431
|
+
storyKey,
|
|
5432
|
+
expansion_priority: parsed.expansion_priority,
|
|
5433
|
+
coverage_gaps: parsed.coverage_gaps.length,
|
|
5434
|
+
suggested_tests: parsed.suggested_tests.length
|
|
5435
|
+
}, "Test-expansion workflow completed successfully");
|
|
5436
|
+
return {
|
|
5437
|
+
expansion_priority: parsed.expansion_priority,
|
|
5438
|
+
coverage_gaps: parsed.coverage_gaps,
|
|
5439
|
+
suggested_tests: parsed.suggested_tests,
|
|
5440
|
+
notes: parsed.notes,
|
|
5441
|
+
tokenUsage
|
|
5442
|
+
};
|
|
5443
|
+
}
|
|
5444
|
+
/**
|
|
5445
|
+
* Retrieve architecture constraints from the decision store.
|
|
5446
|
+
* Looks for decisions with phase='solutioning', category='architecture'.
|
|
5447
|
+
*/
|
|
4737
5448
|
function getArchConstraints(deps) {
|
|
4738
5449
|
try {
|
|
4739
5450
|
const decisions = getDecisionsByPhase(deps.db, "solutioning");
|
|
@@ -5397,7 +6108,7 @@ function createPauseGate() {
|
|
|
5397
6108
|
*/
|
|
5398
6109
|
function createImplementationOrchestrator(deps) {
|
|
5399
6110
|
const { db, pack, contextCompiler, dispatcher, eventBus, config, projectRoot } = deps;
|
|
5400
|
-
const logger$
|
|
6111
|
+
const logger$20 = createLogger("implementation-orchestrator");
|
|
5401
6112
|
let _state = "IDLE";
|
|
5402
6113
|
let _startedAt;
|
|
5403
6114
|
let _completedAt;
|
|
@@ -5434,7 +6145,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5434
6145
|
const nowMs = Date.now();
|
|
5435
6146
|
for (const [phase, startMs] of starts) {
|
|
5436
6147
|
const endMs = ends?.get(phase);
|
|
5437
|
-
if (endMs === void 0) logger$
|
|
6148
|
+
if (endMs === void 0) logger$20.warn({
|
|
5438
6149
|
storyKey,
|
|
5439
6150
|
phase
|
|
5440
6151
|
}, "Phase has no end time — story may have errored mid-phase. Duration capped to now() and may be inflated.");
|
|
@@ -5481,18 +6192,87 @@ function createImplementationOrchestrator(deps) {
|
|
|
5481
6192
|
rationale: `Story ${storyKey} completed with result=${result} in ${wallClockSeconds}s. Tokens: ${tokenAgg.input}+${tokenAgg.output}. Review cycles: ${reviewCycles}.`
|
|
5482
6193
|
});
|
|
5483
6194
|
} catch (decisionErr) {
|
|
5484
|
-
logger$
|
|
6195
|
+
logger$20.warn({
|
|
5485
6196
|
err: decisionErr,
|
|
5486
6197
|
storyKey
|
|
5487
6198
|
}, "Failed to write story-metrics decision (best-effort)");
|
|
5488
6199
|
}
|
|
5489
6200
|
} catch (err) {
|
|
5490
|
-
logger$
|
|
6201
|
+
logger$20.warn({
|
|
5491
6202
|
err,
|
|
5492
6203
|
storyKey
|
|
5493
6204
|
}, "Failed to write story metrics (best-effort)");
|
|
5494
6205
|
}
|
|
5495
6206
|
}
|
|
6207
|
+
/**
|
|
6208
|
+
* Persist a story outcome finding to the decision store (Story 22-1, AC4).
|
|
6209
|
+
*
|
|
6210
|
+
* Records outcome, review cycles, and any recurring issue patterns for
|
|
6211
|
+
* future prompt injection via the learning loop.
|
|
6212
|
+
*/
|
|
6213
|
+
function writeStoryOutcomeBestEffort(storyKey, outcome, reviewCycles, issuePatterns) {
|
|
6214
|
+
if (config.pipelineRunId === void 0) return;
|
|
6215
|
+
try {
|
|
6216
|
+
createDecision(db, {
|
|
6217
|
+
pipeline_run_id: config.pipelineRunId,
|
|
6218
|
+
phase: "implementation",
|
|
6219
|
+
category: STORY_OUTCOME,
|
|
6220
|
+
key: `${storyKey}:${config.pipelineRunId}`,
|
|
6221
|
+
value: JSON.stringify({
|
|
6222
|
+
storyKey,
|
|
6223
|
+
outcome,
|
|
6224
|
+
reviewCycles,
|
|
6225
|
+
recurringPatterns: issuePatterns ?? []
|
|
6226
|
+
}),
|
|
6227
|
+
rationale: `Story ${storyKey} ${outcome} after ${reviewCycles} review cycle(s).`
|
|
6228
|
+
});
|
|
6229
|
+
} catch (err) {
|
|
6230
|
+
logger$20.warn({
|
|
6231
|
+
err,
|
|
6232
|
+
storyKey
|
|
6233
|
+
}, "Failed to write story-outcome decision (best-effort)");
|
|
6234
|
+
}
|
|
6235
|
+
}
|
|
6236
|
+
/**
|
|
6237
|
+
* Emit an escalation event with structured diagnosis and persist the
|
|
6238
|
+
* diagnosis to the decision store (Story 22-3).
|
|
6239
|
+
*/
|
|
6240
|
+
function emitEscalation(payload) {
|
|
6241
|
+
const diagnosis = generateEscalationDiagnosis(payload.issues, payload.reviewCycles, payload.lastVerdict);
|
|
6242
|
+
eventBus.emit("orchestrator:story-escalated", {
|
|
6243
|
+
...payload,
|
|
6244
|
+
diagnosis
|
|
6245
|
+
});
|
|
6246
|
+
if (config.pipelineRunId !== void 0) try {
|
|
6247
|
+
createDecision(db, {
|
|
6248
|
+
pipeline_run_id: config.pipelineRunId,
|
|
6249
|
+
phase: "implementation",
|
|
6250
|
+
category: ESCALATION_DIAGNOSIS,
|
|
6251
|
+
key: `${payload.storyKey}:${config.pipelineRunId}`,
|
|
6252
|
+
value: JSON.stringify(diagnosis),
|
|
6253
|
+
rationale: `Escalation diagnosis for ${payload.storyKey}: ${diagnosis.recommendedAction} — ${diagnosis.rationale}`
|
|
6254
|
+
});
|
|
6255
|
+
} catch (err) {
|
|
6256
|
+
logger$20.warn({
|
|
6257
|
+
err,
|
|
6258
|
+
storyKey: payload.storyKey
|
|
6259
|
+
}, "Failed to persist escalation diagnosis (best-effort)");
|
|
6260
|
+
}
|
|
6261
|
+
const issuePatterns = extractIssuePatterns(payload.issues);
|
|
6262
|
+
writeStoryOutcomeBestEffort(payload.storyKey, "escalated", payload.reviewCycles, issuePatterns);
|
|
6263
|
+
}
|
|
6264
|
+
/**
|
|
6265
|
+
* Extract short pattern descriptions from an issue list for recurring pattern tracking.
|
|
6266
|
+
*/
|
|
6267
|
+
function extractIssuePatterns(issues) {
|
|
6268
|
+
const patterns = [];
|
|
6269
|
+
for (const issue of issues) if (typeof issue === "string") patterns.push(issue.slice(0, 100));
|
|
6270
|
+
else {
|
|
6271
|
+
const iss = issue;
|
|
6272
|
+
if (iss.description && (iss.severity === "blocker" || iss.severity === "major")) patterns.push(iss.description.slice(0, 100));
|
|
6273
|
+
}
|
|
6274
|
+
return patterns.slice(0, 10);
|
|
6275
|
+
}
|
|
5496
6276
|
function getStatus() {
|
|
5497
6277
|
const stories = {};
|
|
5498
6278
|
for (const [key, s] of _stories) stories[key] = { ...s };
|
|
@@ -5523,7 +6303,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5523
6303
|
token_usage_json: serialized
|
|
5524
6304
|
});
|
|
5525
6305
|
} catch (err) {
|
|
5526
|
-
logger$
|
|
6306
|
+
logger$20.warn("Failed to persist orchestrator state", { err });
|
|
5527
6307
|
}
|
|
5528
6308
|
}
|
|
5529
6309
|
function recordProgress() {
|
|
@@ -5553,7 +6333,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5553
6333
|
if (_stalledStories.has(key)) continue;
|
|
5554
6334
|
_stalledStories.add(key);
|
|
5555
6335
|
_storiesWithStall.add(key);
|
|
5556
|
-
logger$
|
|
6336
|
+
logger$20.warn({
|
|
5557
6337
|
storyKey: key,
|
|
5558
6338
|
phase: s.phase,
|
|
5559
6339
|
elapsedMs: elapsed
|
|
@@ -5590,7 +6370,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5590
6370
|
* exhausted retries the story is ESCALATED.
|
|
5591
6371
|
*/
|
|
5592
6372
|
async function processStory(storyKey) {
|
|
5593
|
-
logger$
|
|
6373
|
+
logger$20.info("Processing story", { storyKey });
|
|
5594
6374
|
await waitIfPaused();
|
|
5595
6375
|
if (_state !== "RUNNING") return;
|
|
5596
6376
|
startPhase(storyKey, "create-story");
|
|
@@ -5605,7 +6385,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5605
6385
|
const match = files.find((f) => f.startsWith(`${storyKey}-`) && f.endsWith(".md"));
|
|
5606
6386
|
if (match) {
|
|
5607
6387
|
storyFilePath = join$1(artifactsDir, match);
|
|
5608
|
-
logger$
|
|
6388
|
+
logger$20.info({
|
|
5609
6389
|
storyKey,
|
|
5610
6390
|
storyFilePath
|
|
5611
6391
|
}, "Found existing story file — skipping create-story");
|
|
@@ -5650,7 +6430,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5650
6430
|
completedAt: new Date().toISOString()
|
|
5651
6431
|
});
|
|
5652
6432
|
writeStoryMetricsBestEffort(storyKey, "failed", 0);
|
|
5653
|
-
|
|
6433
|
+
emitEscalation({
|
|
5654
6434
|
storyKey,
|
|
5655
6435
|
lastVerdict: "create-story-failed",
|
|
5656
6436
|
reviewCycles: 0,
|
|
@@ -5667,7 +6447,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5667
6447
|
completedAt: new Date().toISOString()
|
|
5668
6448
|
});
|
|
5669
6449
|
writeStoryMetricsBestEffort(storyKey, "failed", 0);
|
|
5670
|
-
|
|
6450
|
+
emitEscalation({
|
|
5671
6451
|
storyKey,
|
|
5672
6452
|
lastVerdict: "create-story-no-file",
|
|
5673
6453
|
reviewCycles: 0,
|
|
@@ -5686,7 +6466,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5686
6466
|
completedAt: new Date().toISOString()
|
|
5687
6467
|
});
|
|
5688
6468
|
writeStoryMetricsBestEffort(storyKey, "failed", 0);
|
|
5689
|
-
|
|
6469
|
+
emitEscalation({
|
|
5690
6470
|
storyKey,
|
|
5691
6471
|
lastVerdict: "create-story-exception",
|
|
5692
6472
|
reviewCycles: 0,
|
|
@@ -5697,6 +6477,39 @@ function createImplementationOrchestrator(deps) {
|
|
|
5697
6477
|
}
|
|
5698
6478
|
await waitIfPaused();
|
|
5699
6479
|
if (_state !== "RUNNING") return;
|
|
6480
|
+
startPhase(storyKey, "test-plan");
|
|
6481
|
+
updateStory(storyKey, { phase: "IN_TEST_PLANNING" });
|
|
6482
|
+
persistState();
|
|
6483
|
+
let testPlanPhaseResult = "failed";
|
|
6484
|
+
try {
|
|
6485
|
+
const testPlanResult = await runTestPlan({
|
|
6486
|
+
db,
|
|
6487
|
+
pack,
|
|
6488
|
+
contextCompiler,
|
|
6489
|
+
dispatcher,
|
|
6490
|
+
projectRoot
|
|
6491
|
+
}, {
|
|
6492
|
+
storyKey,
|
|
6493
|
+
storyFilePath: storyFilePath ?? "",
|
|
6494
|
+
pipelineRunId: config.pipelineRunId
|
|
6495
|
+
});
|
|
6496
|
+
testPlanPhaseResult = testPlanResult.result;
|
|
6497
|
+
if (testPlanResult.result === "success") logger$20.info({ storyKey }, "Test plan generated successfully");
|
|
6498
|
+
else logger$20.warn({ storyKey }, "Test planning returned failed result — proceeding to dev-story without test plan");
|
|
6499
|
+
} catch (err) {
|
|
6500
|
+
logger$20.warn({
|
|
6501
|
+
storyKey,
|
|
6502
|
+
err
|
|
6503
|
+
}, "Test planning failed — proceeding to dev-story without test plan");
|
|
6504
|
+
}
|
|
6505
|
+
endPhase(storyKey, "test-plan");
|
|
6506
|
+
eventBus.emit("orchestrator:story-phase-complete", {
|
|
6507
|
+
storyKey,
|
|
6508
|
+
phase: "IN_TEST_PLANNING",
|
|
6509
|
+
result: { result: testPlanPhaseResult }
|
|
6510
|
+
});
|
|
6511
|
+
await waitIfPaused();
|
|
6512
|
+
if (_state !== "RUNNING") return;
|
|
5700
6513
|
startPhase(storyKey, "dev-story");
|
|
5701
6514
|
updateStory(storyKey, { phase: "IN_DEV" });
|
|
5702
6515
|
persistState();
|
|
@@ -5707,7 +6520,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5707
6520
|
try {
|
|
5708
6521
|
storyContentForAnalysis = await readFile$1(storyFilePath ?? "", "utf-8");
|
|
5709
6522
|
} catch (err) {
|
|
5710
|
-
logger$
|
|
6523
|
+
logger$20.error({
|
|
5711
6524
|
storyKey,
|
|
5712
6525
|
storyFilePath,
|
|
5713
6526
|
error: err instanceof Error ? err.message : String(err)
|
|
@@ -5715,7 +6528,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5715
6528
|
}
|
|
5716
6529
|
const analysis = analyzeStoryComplexity(storyContentForAnalysis);
|
|
5717
6530
|
const batches = planTaskBatches(analysis);
|
|
5718
|
-
logger$
|
|
6531
|
+
logger$20.info({
|
|
5719
6532
|
storyKey,
|
|
5720
6533
|
estimatedScope: analysis.estimatedScope,
|
|
5721
6534
|
batchCount: batches.length,
|
|
@@ -5733,7 +6546,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5733
6546
|
if (_state !== "RUNNING") break;
|
|
5734
6547
|
const taskScope = batch.taskIds.map((id, i) => `T${id}: ${batch.taskTitles[i] ?? ""}`).join("\n");
|
|
5735
6548
|
const priorFiles = allFilesModified.size > 0 ? Array.from(allFilesModified) : void 0;
|
|
5736
|
-
logger$
|
|
6549
|
+
logger$20.info({
|
|
5737
6550
|
storyKey,
|
|
5738
6551
|
batchIndex: batch.batchIndex,
|
|
5739
6552
|
taskCount: batch.taskIds.length
|
|
@@ -5757,7 +6570,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5757
6570
|
});
|
|
5758
6571
|
} catch (batchErr) {
|
|
5759
6572
|
const errMsg = batchErr instanceof Error ? batchErr.message : String(batchErr);
|
|
5760
|
-
logger$
|
|
6573
|
+
logger$20.warn({
|
|
5761
6574
|
storyKey,
|
|
5762
6575
|
batchIndex: batch.batchIndex,
|
|
5763
6576
|
error: errMsg
|
|
@@ -5777,7 +6590,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5777
6590
|
filesModified: batchFilesModified,
|
|
5778
6591
|
result: batchResult.result === "success" ? "success" : "failed"
|
|
5779
6592
|
};
|
|
5780
|
-
logger$
|
|
6593
|
+
logger$20.info(batchMetrics, "Batch dev-story metrics");
|
|
5781
6594
|
for (const f of batchFilesModified) allFilesModified.add(f);
|
|
5782
6595
|
if (batchFilesModified.length > 0) batchFileGroups.push({
|
|
5783
6596
|
batchIndex: batch.batchIndex,
|
|
@@ -5799,13 +6612,13 @@ function createImplementationOrchestrator(deps) {
|
|
|
5799
6612
|
})
|
|
5800
6613
|
});
|
|
5801
6614
|
} catch (tokenErr) {
|
|
5802
|
-
logger$
|
|
6615
|
+
logger$20.warn({
|
|
5803
6616
|
storyKey,
|
|
5804
6617
|
batchIndex: batch.batchIndex,
|
|
5805
6618
|
err: tokenErr
|
|
5806
6619
|
}, "Failed to record batch token usage");
|
|
5807
6620
|
}
|
|
5808
|
-
if (batchResult.result === "failed") logger$
|
|
6621
|
+
if (batchResult.result === "failed") logger$20.warn({
|
|
5809
6622
|
storyKey,
|
|
5810
6623
|
batchIndex: batch.batchIndex,
|
|
5811
6624
|
error: batchResult.error
|
|
@@ -5838,7 +6651,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5838
6651
|
result: devResult
|
|
5839
6652
|
});
|
|
5840
6653
|
persistState();
|
|
5841
|
-
if (devResult.result === "failed") logger$
|
|
6654
|
+
if (devResult.result === "failed") logger$20.warn("Dev-story reported failure, proceeding to code review", {
|
|
5842
6655
|
storyKey,
|
|
5843
6656
|
error: devResult.error,
|
|
5844
6657
|
filesModified: devFilesModified.length
|
|
@@ -5853,7 +6666,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5853
6666
|
completedAt: new Date().toISOString()
|
|
5854
6667
|
});
|
|
5855
6668
|
writeStoryMetricsBestEffort(storyKey, "failed", 0);
|
|
5856
|
-
|
|
6669
|
+
emitEscalation({
|
|
5857
6670
|
storyKey,
|
|
5858
6671
|
lastVerdict: "dev-story-exception",
|
|
5859
6672
|
reviewCycles: 0,
|
|
@@ -5895,7 +6708,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5895
6708
|
"NEEDS_MAJOR_REWORK": 2
|
|
5896
6709
|
};
|
|
5897
6710
|
for (const group of batchFileGroups) {
|
|
5898
|
-
logger$
|
|
6711
|
+
logger$20.info({
|
|
5899
6712
|
storyKey,
|
|
5900
6713
|
batchIndex: group.batchIndex,
|
|
5901
6714
|
fileCount: group.files.length
|
|
@@ -5932,7 +6745,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5932
6745
|
rawOutput: lastRawOutput,
|
|
5933
6746
|
tokenUsage: aggregateTokens
|
|
5934
6747
|
};
|
|
5935
|
-
logger$
|
|
6748
|
+
logger$20.info({
|
|
5936
6749
|
storyKey,
|
|
5937
6750
|
batchCount: batchFileGroups.length,
|
|
5938
6751
|
verdict: worstVerdict,
|
|
@@ -5958,7 +6771,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5958
6771
|
const isPhantomReview = reviewResult.verdict !== "SHIP_IT" && (reviewResult.issue_list === void 0 || reviewResult.issue_list.length === 0) && reviewResult.error !== void 0;
|
|
5959
6772
|
if (isPhantomReview && !timeoutRetried) {
|
|
5960
6773
|
timeoutRetried = true;
|
|
5961
|
-
logger$
|
|
6774
|
+
logger$20.warn({
|
|
5962
6775
|
storyKey,
|
|
5963
6776
|
reviewCycles,
|
|
5964
6777
|
error: reviewResult.error
|
|
@@ -5968,7 +6781,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
5968
6781
|
verdict = reviewResult.verdict;
|
|
5969
6782
|
issueList = reviewResult.issue_list ?? [];
|
|
5970
6783
|
if (verdict === "NEEDS_MAJOR_REWORK" && reviewCycles > 0 && previousIssueList.length > 0 && issueList.length < previousIssueList.length) {
|
|
5971
|
-
logger$
|
|
6784
|
+
logger$20.info({
|
|
5972
6785
|
storyKey,
|
|
5973
6786
|
originalVerdict: verdict,
|
|
5974
6787
|
issuesBefore: previousIssueList.length,
|
|
@@ -6004,7 +6817,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
6004
6817
|
if (_decomposition !== void 0) parts.push(`decomposed: ${_decomposition.batchCount} batches`);
|
|
6005
6818
|
parts.push(`${fileCount} files`);
|
|
6006
6819
|
parts.push(`${totalTokensK} tokens`);
|
|
6007
|
-
logger$
|
|
6820
|
+
logger$20.info({
|
|
6008
6821
|
storyKey,
|
|
6009
6822
|
verdict,
|
|
6010
6823
|
agentVerdict: reviewResult.agentVerdict
|
|
@@ -6019,7 +6832,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
6019
6832
|
completedAt: new Date().toISOString()
|
|
6020
6833
|
});
|
|
6021
6834
|
writeStoryMetricsBestEffort(storyKey, "failed", reviewCycles);
|
|
6022
|
-
|
|
6835
|
+
emitEscalation({
|
|
6023
6836
|
storyKey,
|
|
6024
6837
|
lastVerdict: "code-review-exception",
|
|
6025
6838
|
reviewCycles,
|
|
@@ -6035,11 +6848,44 @@ function createImplementationOrchestrator(deps) {
|
|
|
6035
6848
|
completedAt: new Date().toISOString()
|
|
6036
6849
|
});
|
|
6037
6850
|
writeStoryMetricsBestEffort(storyKey, "success", reviewCycles + 1);
|
|
6851
|
+
writeStoryOutcomeBestEffort(storyKey, "complete", reviewCycles + 1);
|
|
6038
6852
|
eventBus.emit("orchestrator:story-complete", {
|
|
6039
6853
|
storyKey,
|
|
6040
6854
|
reviewCycles
|
|
6041
6855
|
});
|
|
6042
6856
|
persistState();
|
|
6857
|
+
try {
|
|
6858
|
+
const expansionResult = await runTestExpansion({
|
|
6859
|
+
db,
|
|
6860
|
+
pack,
|
|
6861
|
+
contextCompiler,
|
|
6862
|
+
dispatcher,
|
|
6863
|
+
projectRoot
|
|
6864
|
+
}, {
|
|
6865
|
+
storyKey,
|
|
6866
|
+
storyFilePath: storyFilePath ?? "",
|
|
6867
|
+
pipelineRunId: config.pipelineRunId,
|
|
6868
|
+
filesModified: devFilesModified,
|
|
6869
|
+
workingDirectory: projectRoot
|
|
6870
|
+
});
|
|
6871
|
+
logger$20.debug({
|
|
6872
|
+
storyKey,
|
|
6873
|
+
expansion_priority: expansionResult.expansion_priority,
|
|
6874
|
+
coverage_gaps: expansionResult.coverage_gaps.length
|
|
6875
|
+
}, "Test expansion analysis complete");
|
|
6876
|
+
createDecision(db, {
|
|
6877
|
+
pipeline_run_id: config.pipelineRunId ?? "unknown",
|
|
6878
|
+
phase: "implementation",
|
|
6879
|
+
category: TEST_EXPANSION_FINDING,
|
|
6880
|
+
key: `${storyKey}:${config.pipelineRunId ?? "unknown"}`,
|
|
6881
|
+
value: JSON.stringify(expansionResult)
|
|
6882
|
+
});
|
|
6883
|
+
} catch (expansionErr) {
|
|
6884
|
+
logger$20.warn({
|
|
6885
|
+
storyKey,
|
|
6886
|
+
error: expansionErr instanceof Error ? expansionErr.message : String(expansionErr)
|
|
6887
|
+
}, "Test expansion failed — story verdict unchanged");
|
|
6888
|
+
}
|
|
6043
6889
|
keepReviewing = false;
|
|
6044
6890
|
return;
|
|
6045
6891
|
}
|
|
@@ -6053,7 +6899,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
6053
6899
|
completedAt: new Date().toISOString()
|
|
6054
6900
|
});
|
|
6055
6901
|
writeStoryMetricsBestEffort(storyKey, "escalated", finalReviewCycles);
|
|
6056
|
-
|
|
6902
|
+
emitEscalation({
|
|
6057
6903
|
storyKey,
|
|
6058
6904
|
lastVerdict: verdict,
|
|
6059
6905
|
reviewCycles: finalReviewCycles,
|
|
@@ -6062,7 +6908,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
6062
6908
|
persistState();
|
|
6063
6909
|
return;
|
|
6064
6910
|
}
|
|
6065
|
-
logger$
|
|
6911
|
+
logger$20.info({
|
|
6066
6912
|
storyKey,
|
|
6067
6913
|
reviewCycles: finalReviewCycles,
|
|
6068
6914
|
issueCount: issueList.length
|
|
@@ -6112,7 +6958,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
6112
6958
|
fixPrompt = assembled.prompt;
|
|
6113
6959
|
} catch {
|
|
6114
6960
|
fixPrompt = `Fix story ${storyKey}: verdict=${verdict}, minor fixes needed`;
|
|
6115
|
-
logger$
|
|
6961
|
+
logger$20.warn("Failed to assemble auto-approve fix prompt, using fallback", { storyKey });
|
|
6116
6962
|
}
|
|
6117
6963
|
const handle = dispatcher.dispatch({
|
|
6118
6964
|
prompt: fixPrompt,
|
|
@@ -6129,9 +6975,9 @@ function createImplementationOrchestrator(deps) {
|
|
|
6129
6975
|
output: fixResult.tokenEstimate.output
|
|
6130
6976
|
} : void 0 }
|
|
6131
6977
|
});
|
|
6132
|
-
if (fixResult.status === "timeout") logger$
|
|
6978
|
+
if (fixResult.status === "timeout") logger$20.warn("Auto-approve fix timed out — approving anyway (issues were minor)", { storyKey });
|
|
6133
6979
|
} catch (err) {
|
|
6134
|
-
logger$
|
|
6980
|
+
logger$20.warn("Auto-approve fix dispatch failed — approving anyway (issues were minor)", {
|
|
6135
6981
|
storyKey,
|
|
6136
6982
|
err
|
|
6137
6983
|
});
|
|
@@ -6143,6 +6989,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
6143
6989
|
completedAt: new Date().toISOString()
|
|
6144
6990
|
});
|
|
6145
6991
|
writeStoryMetricsBestEffort(storyKey, "success", finalReviewCycles);
|
|
6992
|
+
writeStoryOutcomeBestEffort(storyKey, "complete", finalReviewCycles);
|
|
6146
6993
|
eventBus.emit("orchestrator:story-complete", {
|
|
6147
6994
|
storyKey,
|
|
6148
6995
|
reviewCycles: finalReviewCycles
|
|
@@ -6203,7 +7050,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
6203
7050
|
fixPrompt = assembled.prompt;
|
|
6204
7051
|
} catch {
|
|
6205
7052
|
fixPrompt = `Fix story ${storyKey}: verdict=${verdict}, taskType=${taskType}`;
|
|
6206
|
-
logger$
|
|
7053
|
+
logger$20.warn("Failed to assemble fix prompt, using fallback", {
|
|
6207
7054
|
storyKey,
|
|
6208
7055
|
taskType
|
|
6209
7056
|
});
|
|
@@ -6226,7 +7073,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
6226
7073
|
} : void 0 }
|
|
6227
7074
|
});
|
|
6228
7075
|
if (fixResult.status === "timeout") {
|
|
6229
|
-
logger$
|
|
7076
|
+
logger$20.warn("Fix dispatch timed out — escalating story", {
|
|
6230
7077
|
storyKey,
|
|
6231
7078
|
taskType
|
|
6232
7079
|
});
|
|
@@ -6237,7 +7084,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
6237
7084
|
completedAt: new Date().toISOString()
|
|
6238
7085
|
});
|
|
6239
7086
|
writeStoryMetricsBestEffort(storyKey, "escalated", reviewCycles + 1);
|
|
6240
|
-
|
|
7087
|
+
emitEscalation({
|
|
6241
7088
|
storyKey,
|
|
6242
7089
|
lastVerdict: verdict,
|
|
6243
7090
|
reviewCycles: reviewCycles + 1,
|
|
@@ -6246,13 +7093,13 @@ function createImplementationOrchestrator(deps) {
|
|
|
6246
7093
|
persistState();
|
|
6247
7094
|
return;
|
|
6248
7095
|
}
|
|
6249
|
-
if (fixResult.status === "failed") logger$
|
|
7096
|
+
if (fixResult.status === "failed") logger$20.warn("Fix dispatch failed", {
|
|
6250
7097
|
storyKey,
|
|
6251
7098
|
taskType,
|
|
6252
7099
|
exitCode: fixResult.exitCode
|
|
6253
7100
|
});
|
|
6254
7101
|
} catch (err) {
|
|
6255
|
-
logger$
|
|
7102
|
+
logger$20.warn("Fix dispatch failed, continuing to next review", {
|
|
6256
7103
|
storyKey,
|
|
6257
7104
|
taskType,
|
|
6258
7105
|
err
|
|
@@ -6306,11 +7153,11 @@ function createImplementationOrchestrator(deps) {
|
|
|
6306
7153
|
}
|
|
6307
7154
|
async function run(storyKeys) {
|
|
6308
7155
|
if (_state === "RUNNING" || _state === "PAUSED") {
|
|
6309
|
-
logger$
|
|
7156
|
+
logger$20.warn("run() called while orchestrator is already running or paused — ignoring", { state: _state });
|
|
6310
7157
|
return getStatus();
|
|
6311
7158
|
}
|
|
6312
7159
|
if (_state === "COMPLETE") {
|
|
6313
|
-
logger$
|
|
7160
|
+
logger$20.warn("run() called on a COMPLETE orchestrator — ignoring", { state: _state });
|
|
6314
7161
|
return getStatus();
|
|
6315
7162
|
}
|
|
6316
7163
|
_state = "RUNNING";
|
|
@@ -6328,13 +7175,13 @@ function createImplementationOrchestrator(deps) {
|
|
|
6328
7175
|
if (config.enableHeartbeat) startHeartbeat();
|
|
6329
7176
|
if (projectRoot !== void 0) {
|
|
6330
7177
|
const seedResult = seedMethodologyContext(db, projectRoot);
|
|
6331
|
-
if (seedResult.decisionsCreated > 0) logger$
|
|
7178
|
+
if (seedResult.decisionsCreated > 0) logger$20.info({
|
|
6332
7179
|
decisionsCreated: seedResult.decisionsCreated,
|
|
6333
7180
|
skippedCategories: seedResult.skippedCategories
|
|
6334
7181
|
}, "Methodology context seeded from planning artifacts");
|
|
6335
7182
|
}
|
|
6336
7183
|
const groups = detectConflictGroups(storyKeys);
|
|
6337
|
-
logger$
|
|
7184
|
+
logger$20.info("Orchestrator starting", {
|
|
6338
7185
|
storyCount: storyKeys.length,
|
|
6339
7186
|
groupCount: groups.length,
|
|
6340
7187
|
maxConcurrency: config.maxConcurrency
|
|
@@ -6346,7 +7193,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
6346
7193
|
_state = "FAILED";
|
|
6347
7194
|
_completedAt = new Date().toISOString();
|
|
6348
7195
|
persistState();
|
|
6349
|
-
logger$
|
|
7196
|
+
logger$20.error("Orchestrator failed with unhandled error", { err });
|
|
6350
7197
|
return getStatus();
|
|
6351
7198
|
}
|
|
6352
7199
|
stopHeartbeat();
|
|
@@ -6373,7 +7220,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
6373
7220
|
_pauseGate = createPauseGate();
|
|
6374
7221
|
_state = "PAUSED";
|
|
6375
7222
|
eventBus.emit("orchestrator:paused", {});
|
|
6376
|
-
logger$
|
|
7223
|
+
logger$20.info("Orchestrator paused");
|
|
6377
7224
|
}
|
|
6378
7225
|
function resume() {
|
|
6379
7226
|
if (_state !== "PAUSED") return;
|
|
@@ -6384,7 +7231,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
6384
7231
|
}
|
|
6385
7232
|
_state = "RUNNING";
|
|
6386
7233
|
eventBus.emit("orchestrator:resumed", {});
|
|
6387
|
-
logger$
|
|
7234
|
+
logger$20.info("Orchestrator resumed");
|
|
6388
7235
|
}
|
|
6389
7236
|
return {
|
|
6390
7237
|
run,
|
|
@@ -8215,6 +9062,10 @@ const AMENDMENT_CONTEXT_HEADER$2 = "\n\n--- AMENDMENT CONTEXT (Parent Run Decisi
|
|
|
8215
9062
|
const AMENDMENT_CONTEXT_FOOTER$2 = "\n--- END AMENDMENT CONTEXT ---\n";
|
|
8216
9063
|
/** Marker appended when amendment context is truncated to fit token budget */
|
|
8217
9064
|
const TRUNCATED_MARKER$2 = "\n[TRUNCATED]";
|
|
9065
|
+
/** Prior run findings framing block prefix */
|
|
9066
|
+
const PRIOR_FINDINGS_HEADER = "\n\n--- PRIOR RUN FINDINGS ---\n";
|
|
9067
|
+
/** Prior run findings framing block suffix */
|
|
9068
|
+
const PRIOR_FINDINGS_FOOTER = "\n--- END PRIOR RUN FINDINGS ---\n";
|
|
8218
9069
|
/** Concept placeholder in the prompt template */
|
|
8219
9070
|
const CONCEPT_PLACEHOLDER = "{{concept}}";
|
|
8220
9071
|
/** Product brief fields to persist as decisions */
|
|
@@ -8255,6 +9106,9 @@ function buildAnalysisSteps() {
|
|
|
8255
9106
|
context: [{
|
|
8256
9107
|
placeholder: "concept",
|
|
8257
9108
|
source: "param:concept"
|
|
9109
|
+
}, {
|
|
9110
|
+
placeholder: "prior_findings",
|
|
9111
|
+
source: "param:prior_findings"
|
|
8258
9112
|
}],
|
|
8259
9113
|
persist: [{
|
|
8260
9114
|
field: "problem_statement",
|
|
@@ -8318,7 +9172,14 @@ async function runAnalysisMultiStep(deps, params) {
|
|
|
8318
9172
|
};
|
|
8319
9173
|
try {
|
|
8320
9174
|
const steps = buildAnalysisSteps();
|
|
8321
|
-
|
|
9175
|
+
let priorFindings = "";
|
|
9176
|
+
try {
|
|
9177
|
+
priorFindings = getProjectFindings(deps.db);
|
|
9178
|
+
} catch {}
|
|
9179
|
+
const result = await runSteps(steps, deps, params.runId, "analysis", {
|
|
9180
|
+
concept: params.concept,
|
|
9181
|
+
prior_findings: priorFindings
|
|
9182
|
+
});
|
|
8322
9183
|
if (!result.success) return {
|
|
8323
9184
|
result: "failed",
|
|
8324
9185
|
error: result.error ?? "multi_step_failed",
|
|
@@ -8399,6 +9260,18 @@ async function runAnalysisPhase(deps, params) {
|
|
|
8399
9260
|
let effectiveConcept = concept;
|
|
8400
9261
|
if (concept.length > MAX_CONCEPT_CHARS) effectiveConcept = concept.slice(0, MAX_CONCEPT_CHARS) + "...";
|
|
8401
9262
|
let prompt = template.replace(CONCEPT_PLACEHOLDER, effectiveConcept);
|
|
9263
|
+
try {
|
|
9264
|
+
const priorFindings = getProjectFindings(db);
|
|
9265
|
+
if (priorFindings !== "") {
|
|
9266
|
+
const maxPromptChars = MAX_PROMPT_TOKENS$1 * 4;
|
|
9267
|
+
const framingLen = PRIOR_FINDINGS_HEADER.length + PRIOR_FINDINGS_FOOTER.length;
|
|
9268
|
+
const availableForFindings = maxPromptChars - prompt.length - framingLen - TRUNCATED_MARKER$2.length;
|
|
9269
|
+
if (availableForFindings > 0) {
|
|
9270
|
+
const findingsToInject = priorFindings.length > availableForFindings ? priorFindings.slice(0, availableForFindings) + TRUNCATED_MARKER$2 : priorFindings;
|
|
9271
|
+
prompt += PRIOR_FINDINGS_HEADER + findingsToInject + PRIOR_FINDINGS_FOOTER;
|
|
9272
|
+
}
|
|
9273
|
+
}
|
|
9274
|
+
} catch {}
|
|
8402
9275
|
if (amendmentContext !== void 0 && amendmentContext !== "") {
|
|
8403
9276
|
const maxPromptChars = MAX_PROMPT_TOKENS$1 * 4;
|
|
8404
9277
|
const basePromptLen = prompt.length;
|
|
@@ -10653,7 +11526,7 @@ function mapInternalPhaseToEventPhase(internalPhase) {
|
|
|
10653
11526
|
}
|
|
10654
11527
|
}
|
|
10655
11528
|
async function runRunAction(options) {
|
|
10656
|
-
const { pack: packName, from: startPhase, stopAfter, concept: conceptArg, conceptFile, stories: storiesArg, concurrency, outputFormat, projectRoot, events: eventsFlag, verbose: verboseFlag, tui: tuiFlag, skipUx, research: researchFlag, skipResearch: skipResearchFlag } = options;
|
|
11529
|
+
const { pack: packName, from: startPhase, stopAfter, concept: conceptArg, conceptFile, stories: storiesArg, concurrency, outputFormat, projectRoot, events: eventsFlag, verbose: verboseFlag, tui: tuiFlag, skipUx, research: researchFlag, skipResearch: skipResearchFlag, registry: injectedRegistry } = options;
|
|
10657
11530
|
if (startPhase !== void 0 && !VALID_PHASES.includes(startPhase)) {
|
|
10658
11531
|
const errorMsg = `Invalid phase '${startPhase}'. Valid phases: ${VALID_PHASES.join(", ")}`;
|
|
10659
11532
|
if (outputFormat === "json") process.stdout.write(formatOutput(null, "json", false, errorMsg) + "\n");
|
|
@@ -10711,7 +11584,8 @@ async function runRunAction(options) {
|
|
|
10711
11584
|
...eventsFlag === true ? { events: true } : {},
|
|
10712
11585
|
...skipUx === true ? { skipUx: true } : {},
|
|
10713
11586
|
...researchFlag === true ? { research: true } : {},
|
|
10714
|
-
...skipResearchFlag === true ? { skipResearch: true } : {}
|
|
11587
|
+
...skipResearchFlag === true ? { skipResearch: true } : {},
|
|
11588
|
+
...injectedRegistry !== void 0 ? { registry: injectedRegistry } : {}
|
|
10715
11589
|
});
|
|
10716
11590
|
let storyKeys = [];
|
|
10717
11591
|
if (storiesArg !== void 0 && storiesArg !== "") {
|
|
@@ -10800,8 +11674,8 @@ async function runRunAction(options) {
|
|
|
10800
11674
|
});
|
|
10801
11675
|
const eventBus = createEventBus();
|
|
10802
11676
|
const contextCompiler = createContextCompiler({ db });
|
|
10803
|
-
const adapterRegistry = new AdapterRegistry();
|
|
10804
|
-
await adapterRegistry.discoverAndRegister();
|
|
11677
|
+
const adapterRegistry = injectedRegistry ?? new AdapterRegistry();
|
|
11678
|
+
if (injectedRegistry === void 0) await adapterRegistry.discoverAndRegister();
|
|
10805
11679
|
const dispatcher = createDispatcher({
|
|
10806
11680
|
eventBus,
|
|
10807
11681
|
adapterRegistry
|
|
@@ -11022,7 +11896,8 @@ async function runRunAction(options) {
|
|
|
11022
11896
|
key: payload.storyKey,
|
|
11023
11897
|
reason: payload.lastVerdict ?? "escalated",
|
|
11024
11898
|
cycles: payload.reviewCycles ?? 0,
|
|
11025
|
-
issues
|
|
11899
|
+
issues,
|
|
11900
|
+
...payload.diagnosis !== void 0 ? { diagnosis: payload.diagnosis } : {}
|
|
11026
11901
|
});
|
|
11027
11902
|
});
|
|
11028
11903
|
eventBus.on("orchestrator:story-warn", (payload) => {
|
|
@@ -11163,7 +12038,7 @@ async function runRunAction(options) {
|
|
|
11163
12038
|
}
|
|
11164
12039
|
}
|
|
11165
12040
|
async function runFullPipeline(options) {
|
|
11166
|
-
const { packName, packPath, dbDir, dbPath, startPhase, stopAfter, concept, concurrency, outputFormat, projectRoot, events: eventsFlag, skipUx, research: researchFlag, skipResearch: skipResearchFlag } = options;
|
|
12041
|
+
const { packName, packPath, dbDir, dbPath, startPhase, stopAfter, concept, concurrency, outputFormat, projectRoot, events: eventsFlag, skipUx, research: researchFlag, skipResearch: skipResearchFlag, registry: injectedRegistry } = options;
|
|
11167
12042
|
if (!existsSync(dbDir)) mkdirSync(dbDir, { recursive: true });
|
|
11168
12043
|
const dbWrapper = new DatabaseWrapper(dbPath);
|
|
11169
12044
|
try {
|
|
@@ -11191,8 +12066,8 @@ async function runFullPipeline(options) {
|
|
|
11191
12066
|
}
|
|
11192
12067
|
const eventBus = createEventBus();
|
|
11193
12068
|
const contextCompiler = createContextCompiler({ db });
|
|
11194
|
-
const adapterRegistry = new AdapterRegistry();
|
|
11195
|
-
await adapterRegistry.discoverAndRegister();
|
|
12069
|
+
const adapterRegistry = injectedRegistry ?? new AdapterRegistry();
|
|
12070
|
+
if (injectedRegistry === void 0) await adapterRegistry.discoverAndRegister();
|
|
11196
12071
|
const dispatcher = createDispatcher({
|
|
11197
12072
|
eventBus,
|
|
11198
12073
|
adapterRegistry
|
|
@@ -11467,7 +12342,7 @@ async function runFullPipeline(options) {
|
|
|
11467
12342
|
} catch {}
|
|
11468
12343
|
}
|
|
11469
12344
|
}
|
|
11470
|
-
function registerRunCommand(program, _version = "0.0.0", projectRoot = process.cwd()) {
|
|
12345
|
+
function registerRunCommand(program, _version = "0.0.0", projectRoot = process.cwd(), registry) {
|
|
11471
12346
|
program.command("run").description("Run the autonomous pipeline (use --from to start from a specific phase)").option("--pack <name>", "Methodology pack name", "bmad").option("--from <phase>", "Start from this phase: analysis, planning, solutioning, implementation").option("--stop-after <phase>", "Stop pipeline after this phase completes").option("--concept <text>", "Inline concept text (required when --from analysis)").option("--concept-file <path>", "Path to a file containing the concept text").option("--stories <keys>", "Comma-separated story keys (e.g., 10-1,10-2)").option("--concurrency <n>", "Maximum parallel conflict groups", (v) => parseInt(v, 10), 3).option("--project-root <path>", "Project root directory", projectRoot).option("--output-format <format>", "Output format: human (default) or json", "human").option("--events", "Emit structured NDJSON events on stdout for programmatic consumption").option("--verbose", "Show detailed pino log output").option("--help-agent", "Print a machine-optimized prompt fragment for AI agents and exit").option("--tui", "Show TUI dashboard").option("--skip-ux", "Skip the UX design phase even if enabled in the pack manifest").option("--research", "Enable the research phase even if not set in the pack manifest").option("--skip-research", "Skip the research phase even if enabled in the pack manifest").action(async (opts) => {
|
|
11472
12347
|
if (opts.helpAgent) {
|
|
11473
12348
|
process.exitCode = await runHelpAgent();
|
|
@@ -11500,7 +12375,8 @@ function registerRunCommand(program, _version = "0.0.0", projectRoot = process.c
|
|
|
11500
12375
|
tui: opts.tui,
|
|
11501
12376
|
skipUx: opts.skipUx,
|
|
11502
12377
|
research: opts.research,
|
|
11503
|
-
skipResearch: opts.skipResearch
|
|
12378
|
+
skipResearch: opts.skipResearch,
|
|
12379
|
+
registry
|
|
11504
12380
|
});
|
|
11505
12381
|
process.exitCode = exitCode;
|
|
11506
12382
|
});
|
|
@@ -11508,4 +12384,4 @@ function registerRunCommand(program, _version = "0.0.0", projectRoot = process.c
|
|
|
11508
12384
|
|
|
11509
12385
|
//#endregion
|
|
11510
12386
|
export { DatabaseWrapper, SUBSTRATE_OWNED_SETTINGS_KEYS, VALID_PHASES, buildPipelineStatusOutput, createContextCompiler, createDispatcher, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStopAfterGate, findPackageRoot, formatOutput, formatPhaseCompletionSummary, formatPipelineStatusHuman, formatPipelineSummary, formatTokenTelemetry, getAllDescendantPids, getAutoHealthData, getSubstrateDefaultSettings, registerHealthCommand, registerRunCommand, resolveBmadMethodSrcPath, resolveBmadMethodVersion, resolveMainRepoRoot, runAnalysisPhase, runMigrations, runPlanningPhase, runRunAction, runSolutioningPhase, validateStopAfterFromConflict };
|
|
11511
|
-
//# sourceMappingURL=run-
|
|
12387
|
+
//# sourceMappingURL=run-BLIgARum.js.map
|