substrate-ai 0.19.52 → 0.19.54
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli/index.js
CHANGED
|
@@ -4,7 +4,7 @@ import { createLogger } from "../logger-KeHncl-f.js";
|
|
|
4
4
|
import { createEventBus } from "../helpers-CElYrONe.js";
|
|
5
5
|
import { AdapterRegistry, BudgetConfigSchema, CURRENT_CONFIG_FORMAT_VERSION, CURRENT_TASK_GRAPH_VERSION, ConfigError, CostTrackerConfigSchema, DEFAULT_CONFIG, DoltClient, DoltNotInstalled, GlobalSettingsSchema, IngestionServer, MonitorDatabaseImpl, OPERATIONAL_FINDING, PartialGlobalSettingsSchema, PartialProviderConfigSchema, ProvidersSchema, RoutingRecommender, STORY_METRICS, TelemetryConfigSchema, addTokenUsage, aggregateTokenUsageForRun, checkDoltInstalled, compareRunMetrics, createAmendmentRun, createConfigSystem, createDecision, createDoltClient, createPipelineRun, getActiveDecisions, getAllCostEntriesFiltered, getBaselineRunMetrics, getDecisionsByCategory, getDecisionsByPhaseForRun, getLatestCompletedRun, getLatestRun, getPipelineRunById, getPlanningCostTotal, getRetryableEscalations, getRunMetrics, getRunningPipelineRuns, getSessionCostSummary, getSessionCostSummaryFiltered, getStoryMetricsForRun, getTokenUsageSummary, incrementRunRestarts, initSchema, initializeDolt, listRunMetrics, loadParentRunDecisions, supersedeDecision, tagRunAsBaseline, updatePipelineRun } from "../dist-sNh9XQ6V.js";
|
|
6
6
|
import "../adapter-registry-DXLMTmfD.js";
|
|
7
|
-
import { AdapterTelemetryPersistence, AppError, DoltRepoMapMetaRepository, DoltSymbolRepository, ERR_REPO_MAP_STORAGE_WRITE, EpicIngester, GitClient, GrammarLoader, RepoMapInjector, RepoMapModule, RepoMapQueryEngine, RepoMapStorage, SymbolParser, createContextCompiler, createDispatcher, createEventEmitter, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStopAfterGate, createTelemetryAdvisor, formatPhaseCompletionSummary, getFactoryRunSummaries, getScenarioResultsForRun, getTwinRunsForRun, listGraphRuns, registerExportCommand, registerFactoryCommand, registerRunCommand, registerScenariosCommand, resolveStoryKeys, runAnalysisPhase, runPlanningPhase, runSolutioningPhase, validateStopAfterFromConflict } from "../run-
|
|
7
|
+
import { AdapterTelemetryPersistence, AppError, DoltRepoMapMetaRepository, DoltSymbolRepository, ERR_REPO_MAP_STORAGE_WRITE, EpicIngester, GitClient, GrammarLoader, RepoMapInjector, RepoMapModule, RepoMapQueryEngine, RepoMapStorage, SymbolParser, createContextCompiler, createDispatcher, createEventEmitter, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStopAfterGate, createTelemetryAdvisor, formatPhaseCompletionSummary, getFactoryRunSummaries, getScenarioResultsForRun, getTwinRunsForRun, listGraphRuns, registerExportCommand, registerFactoryCommand, registerRunCommand, registerScenariosCommand, resolveStoryKeys, runAnalysisPhase, runPlanningPhase, runSolutioningPhase, validateStopAfterFromConflict } from "../run-dCGeboq9.js";
|
|
8
8
|
import "../errors-RupuC-ES.js";
|
|
9
9
|
import "../routing-CcBOCuC9.js";
|
|
10
10
|
import "../decisions-C0pz9Clx.js";
|
|
@@ -2284,6 +2284,164 @@ async function scaffoldClaudeCommands(projectRoot, outputFormat) {
|
|
|
2284
2284
|
logger$15.warn({ err }, "scaffoldClaudeCommands failed; init continues");
|
|
2285
2285
|
}
|
|
2286
2286
|
}
|
|
2287
|
+
/**
|
|
2288
|
+
* Sync every `.md` file from a source commands directory into a target prompts
|
|
2289
|
+
* directory, flat. Codex CLI loads `<root>/.codex/prompts/*.md` as slash
|
|
2290
|
+
* commands, so the already-generated Claude command files map 1:1.
|
|
2291
|
+
*
|
|
2292
|
+
* On re-runs, any `.md` file in the target that matches `ownershipPrefixes`
|
|
2293
|
+
* but is no longer present in the source is removed, so stale substrate-
|
|
2294
|
+
* generated prompts don't accumulate. `.md` files without a matching prefix
|
|
2295
|
+
* are left untouched (protecting user-authored content in `~/.codex/`).
|
|
2296
|
+
*
|
|
2297
|
+
* Returns the number of prompt files written.
|
|
2298
|
+
*/
|
|
2299
|
+
function syncCommandsAsPrompts(commandsDir, promptsDir, ownershipPrefixes, namePrefix) {
|
|
2300
|
+
if (!existsSync$1(commandsDir)) return 0;
|
|
2301
|
+
mkdirSync$1(promptsDir, { recursive: true });
|
|
2302
|
+
const sourceEntries = readdirSync$1(commandsDir, { withFileTypes: true }).filter((e) => e.isFile() && e.name.endsWith(".md"));
|
|
2303
|
+
const destNames = new Set(sourceEntries.map((e) => namePrefix && !e.name.startsWith(namePrefix) ? `${namePrefix}${e.name}` : e.name));
|
|
2304
|
+
try {
|
|
2305
|
+
for (const entry of readdirSync$1(promptsDir, { withFileTypes: true })) {
|
|
2306
|
+
if (!entry.isFile() || !entry.name.endsWith(".md")) continue;
|
|
2307
|
+
const isOwned = ownershipPrefixes.some((p) => entry.name.startsWith(p));
|
|
2308
|
+
if (!isOwned) continue;
|
|
2309
|
+
if (destNames.has(entry.name)) continue;
|
|
2310
|
+
unlinkSync$1(join(promptsDir, entry.name));
|
|
2311
|
+
}
|
|
2312
|
+
} catch (err) {
|
|
2313
|
+
logger$15.debug({
|
|
2314
|
+
err,
|
|
2315
|
+
promptsDir
|
|
2316
|
+
}, "Failed to prune stale prompts");
|
|
2317
|
+
}
|
|
2318
|
+
let count = 0;
|
|
2319
|
+
for (const entry of sourceEntries) {
|
|
2320
|
+
const destName = namePrefix && !entry.name.startsWith(namePrefix) ? `${namePrefix}${entry.name}` : entry.name;
|
|
2321
|
+
cpSync(join(commandsDir, entry.name), join(promptsDir, destName));
|
|
2322
|
+
count++;
|
|
2323
|
+
}
|
|
2324
|
+
return count;
|
|
2325
|
+
}
|
|
2326
|
+
/**
|
|
2327
|
+
* Sync skill directories from a source skills root into a target skills root,
|
|
2328
|
+
* optionally namespaced under `namePrefix`.
|
|
2329
|
+
*
|
|
2330
|
+
* Every direct child directory of `srcSkillsDir` is treated as a skill bundle
|
|
2331
|
+
* (no SKILL.md check — the source is always `.claude/skills/` which has
|
|
2332
|
+
* already been sanitized by `prepareSkillsDir`). On re-runs, any skill
|
|
2333
|
+
* directory in the target whose name starts with one of `ownershipPrefixes`
|
|
2334
|
+
* but is no longer present in the source is removed.
|
|
2335
|
+
*
|
|
2336
|
+
* Returns the number of skill directories copied.
|
|
2337
|
+
*/
|
|
2338
|
+
function syncSkillsToTarget(srcSkillsDir, destSkillsDir, ownershipPrefixes, namePrefix) {
|
|
2339
|
+
if (!existsSync$1(srcSkillsDir)) return 0;
|
|
2340
|
+
mkdirSync$1(destSkillsDir, { recursive: true });
|
|
2341
|
+
const sourceEntries = readdirSync$1(srcSkillsDir, { withFileTypes: true }).filter((e) => e.isDirectory());
|
|
2342
|
+
const destNames = new Set(sourceEntries.map((e) => namePrefix && !e.name.startsWith(namePrefix) ? `${namePrefix}${e.name}` : e.name));
|
|
2343
|
+
try {
|
|
2344
|
+
for (const entry of readdirSync$1(destSkillsDir, { withFileTypes: true })) {
|
|
2345
|
+
if (!entry.isDirectory()) continue;
|
|
2346
|
+
const isOwned = ownershipPrefixes.some((p) => entry.name.startsWith(p));
|
|
2347
|
+
if (!isOwned) continue;
|
|
2348
|
+
if (destNames.has(entry.name)) continue;
|
|
2349
|
+
rmSync$1(join(destSkillsDir, entry.name), {
|
|
2350
|
+
recursive: true,
|
|
2351
|
+
force: true
|
|
2352
|
+
});
|
|
2353
|
+
}
|
|
2354
|
+
} catch (err) {
|
|
2355
|
+
logger$15.debug({
|
|
2356
|
+
err,
|
|
2357
|
+
destSkillsDir
|
|
2358
|
+
}, "Failed to prune stale skills");
|
|
2359
|
+
}
|
|
2360
|
+
let count = 0;
|
|
2361
|
+
for (const entry of sourceEntries) {
|
|
2362
|
+
const destName = namePrefix && !entry.name.startsWith(namePrefix) ? `${namePrefix}${entry.name}` : entry.name;
|
|
2363
|
+
const dest = join(destSkillsDir, destName);
|
|
2364
|
+
rmSync$1(dest, {
|
|
2365
|
+
recursive: true,
|
|
2366
|
+
force: true
|
|
2367
|
+
});
|
|
2368
|
+
cpSync(join(srcSkillsDir, entry.name), dest, { recursive: true });
|
|
2369
|
+
count++;
|
|
2370
|
+
}
|
|
2371
|
+
return count;
|
|
2372
|
+
}
|
|
2373
|
+
const PROJECT_OWNERSHIP_PREFIXES = ["bmad-", "substrate-"];
|
|
2374
|
+
/**
|
|
2375
|
+
* Scaffold project-scoped Codex content from the already-generated
|
|
2376
|
+
* `.claude/commands/` and `.claude/skills/`. Must run AFTER
|
|
2377
|
+
* `scaffoldClaudeCommands`.
|
|
2378
|
+
*
|
|
2379
|
+
* Writes:
|
|
2380
|
+
* - <projectRoot>/.codex/prompts/*.md (slash commands)
|
|
2381
|
+
* - <projectRoot>/.codex/skills/<skill>/ (skill bundles)
|
|
2382
|
+
*
|
|
2383
|
+
* Stale substrate-owned entries (bmad-*, substrate-*) from previous runs are
|
|
2384
|
+
* pruned before new content is written. Non-owned files (e.g., `ship.md` from
|
|
2385
|
+
* a plugin) are left alone.
|
|
2386
|
+
*/
|
|
2387
|
+
function scaffoldCodexProject(projectRoot, outputFormat) {
|
|
2388
|
+
const claudeCommandsDir = join(projectRoot, ".claude", "commands");
|
|
2389
|
+
const claudeSkillsDir = join(projectRoot, ".claude", "skills");
|
|
2390
|
+
const codexDir = join(projectRoot, ".codex");
|
|
2391
|
+
const codexPromptsDir = join(codexDir, "prompts");
|
|
2392
|
+
const codexSkillsDir = join(codexDir, "skills");
|
|
2393
|
+
try {
|
|
2394
|
+
const promptCount = syncCommandsAsPrompts(claudeCommandsDir, codexPromptsDir, PROJECT_OWNERSHIP_PREFIXES, "");
|
|
2395
|
+
const skillCount = syncSkillsToTarget(claudeSkillsDir, codexSkillsDir, PROJECT_OWNERSHIP_PREFIXES, "");
|
|
2396
|
+
const total = promptCount + skillCount;
|
|
2397
|
+
if (outputFormat !== "json" && total > 0) process.stdout.write(`Generated ${String(total)} Codex artifacts (${String(promptCount)} prompts, ${String(skillCount)} skills)\n`);
|
|
2398
|
+
if (total > 0) logger$15.info({
|
|
2399
|
+
promptCount,
|
|
2400
|
+
skillCount,
|
|
2401
|
+
codexDir
|
|
2402
|
+
}, "Generated .codex/");
|
|
2403
|
+
else logger$15.debug({ codexDir }, "No Codex artifacts generated; source Claude content not found");
|
|
2404
|
+
} catch (err) {
|
|
2405
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
2406
|
+
if (outputFormat !== "json") process.stderr.write(`Warning: .codex/ generation failed: ${msg}\n`);
|
|
2407
|
+
logger$15.warn({ err }, "scaffoldCodexProject failed; init continues");
|
|
2408
|
+
}
|
|
2409
|
+
}
|
|
2410
|
+
/**
|
|
2411
|
+
* Install Codex content user-wide at `~/.codex/`, namespaced under
|
|
2412
|
+
* `substrate-` to avoid colliding with user-installed content.
|
|
2413
|
+
*
|
|
2414
|
+
* Writes:
|
|
2415
|
+
* - ~/.codex/prompts/substrate-<slug>.md (slash commands)
|
|
2416
|
+
* - ~/.codex/skills/substrate-<name>/ (skill bundles)
|
|
2417
|
+
*
|
|
2418
|
+
* Stale `substrate-*` entries from previous runs are pruned before new
|
|
2419
|
+
* content is written. Files/directories without the `substrate-` prefix
|
|
2420
|
+
* are never touched.
|
|
2421
|
+
*/
|
|
2422
|
+
function scaffoldCodexUser(projectRoot, homeDir, outputFormat) {
|
|
2423
|
+
const claudeCommandsDir = join(projectRoot, ".claude", "commands");
|
|
2424
|
+
const claudeSkillsDir = join(projectRoot, ".claude", "skills");
|
|
2425
|
+
const userCodexDir = join(homeDir, ".codex");
|
|
2426
|
+
const userPromptsDir = join(userCodexDir, "prompts");
|
|
2427
|
+
const userSkillsDir = join(userCodexDir, "skills");
|
|
2428
|
+
try {
|
|
2429
|
+
const promptCount = syncCommandsAsPrompts(claudeCommandsDir, userPromptsDir, ["substrate-"], "substrate-");
|
|
2430
|
+
const skillCount = syncSkillsToTarget(claudeSkillsDir, userSkillsDir, ["substrate-"], "substrate-");
|
|
2431
|
+
const total = promptCount + skillCount;
|
|
2432
|
+
if (outputFormat !== "json" && total > 0) process.stdout.write(`Installed ${String(total)} Codex artifacts to ${userCodexDir} (${String(promptCount)} prompts, ${String(skillCount)} skills)\n`);
|
|
2433
|
+
if (total > 0) logger$15.info({
|
|
2434
|
+
promptCount,
|
|
2435
|
+
skillCount,
|
|
2436
|
+
userCodexDir
|
|
2437
|
+
}, "Installed user-scope Codex content");
|
|
2438
|
+
else logger$15.debug({ userCodexDir }, "No user-scope Codex content installed; source Claude content not found");
|
|
2439
|
+
} catch (err) {
|
|
2440
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
2441
|
+
if (outputFormat !== "json") process.stderr.write(`Warning: user-scope Codex install failed: ${msg}\n`);
|
|
2442
|
+
logger$15.warn({ err }, "scaffoldCodexUser failed; init continues");
|
|
2443
|
+
}
|
|
2444
|
+
}
|
|
2287
2445
|
const PROVIDER_DEFAULTS = DEFAULT_CONFIG.providers;
|
|
2288
2446
|
const ADAPTER_TO_PROVIDER = {
|
|
2289
2447
|
"claude-code": "claude",
|
|
@@ -2512,6 +2670,12 @@ async function runInitAction(options) {
|
|
|
2512
2670
|
await scaffoldStatuslineScript(projectRoot);
|
|
2513
2671
|
await scaffoldClaudeSettings(projectRoot);
|
|
2514
2672
|
await scaffoldClaudeCommands(projectRoot, outputFormat);
|
|
2673
|
+
scaffoldCodexProject(projectRoot, outputFormat);
|
|
2674
|
+
if (options.installUserScope) {
|
|
2675
|
+
const homeDir = process.env["HOME"] ?? process.env["USERPROFILE"];
|
|
2676
|
+
if (homeDir) scaffoldCodexUser(projectRoot, homeDir, outputFormat);
|
|
2677
|
+
else if (outputFormat !== "json") process.stderr.write("Warning: --install-user-scope requested but HOME/USERPROFILE is not set\n");
|
|
2678
|
+
}
|
|
2515
2679
|
const gitignorePath = join(projectRoot, ".gitignore");
|
|
2516
2680
|
const runtimeEntries = [
|
|
2517
2681
|
".substrate/orchestrator.pid",
|
|
@@ -2519,7 +2683,9 @@ async function runInitAction(options) {
|
|
|
2519
2683
|
".substrate/scenarios/",
|
|
2520
2684
|
".substrate/state/",
|
|
2521
2685
|
".substrate/substrate.db",
|
|
2522
|
-
".substrate/substrate.db-journal"
|
|
2686
|
+
".substrate/substrate.db-journal",
|
|
2687
|
+
".codex/prompts/",
|
|
2688
|
+
".codex/skills/"
|
|
2523
2689
|
];
|
|
2524
2690
|
try {
|
|
2525
2691
|
const existing = existsSync$1(gitignorePath) ? readFileSync$1(gitignorePath, "utf-8") : "";
|
|
@@ -2583,6 +2749,8 @@ async function runInitAction(options) {
|
|
|
2583
2749
|
process.stdout.write(` AGENTS.md pipeline instructions for Codex CLI\n`);
|
|
2584
2750
|
process.stdout.write(` GEMINI.md pipeline instructions for Gemini CLI\n`);
|
|
2585
2751
|
process.stdout.write(` .claude/commands/ /substrate-run, /substrate-supervisor, /substrate-metrics\n`);
|
|
2752
|
+
process.stdout.write(` .codex/prompts/ Codex slash commands (mirror of .claude/commands/)\n`);
|
|
2753
|
+
process.stdout.write(` .codex/skills/ Codex skill bundles (mirror of .claude/skills/)\n`);
|
|
2586
2754
|
process.stdout.write(` .substrate/ config, database, routing policy\n`);
|
|
2587
2755
|
if (doltInitialized) process.stdout.write(`✓ Dolt state store initialized at .substrate/state/\n`);
|
|
2588
2756
|
else if (doltMode !== "skip") process.stdout.write(`ℹ Dolt not detected — install Dolt for versioned state, \`substrate diff\`, and observability persistence. See: https://docs.dolthub.com/introduction/installation\n`);
|
|
@@ -2598,7 +2766,7 @@ async function runInitAction(options) {
|
|
|
2598
2766
|
}
|
|
2599
2767
|
}
|
|
2600
2768
|
function registerInitCommand(program, _version, registry) {
|
|
2601
|
-
program.command("init").description("Initialize Substrate — creates config, scaffolds methodology pack, and sets up database").option("--pack <name>", "Methodology pack name", "bmad").option("--project-root <path>", "Project root directory", process.cwd()).option("-y, --yes", "Skip all interactive prompts and use defaults", false).option("--force", "Overwrite existing files and packs", false).option("--output-format <format>", "Output format: human (default) or json", "human").option("--dolt", "Initialize Dolt state database as part of init (forces Dolt bootstrapping)", false).option("--no-dolt", "Skip Dolt state store initialization even if Dolt is installed").action(async (opts) => {
|
|
2769
|
+
program.command("init").description("Initialize Substrate — creates config, scaffolds methodology pack, and sets up database").option("--pack <name>", "Methodology pack name", "bmad").option("--project-root <path>", "Project root directory", process.cwd()).option("-y, --yes", "Skip all interactive prompts and use defaults", false).option("--force", "Overwrite existing files and packs", false).option("--output-format <format>", "Output format: human (default) or json", "human").option("--dolt", "Initialize Dolt state database as part of init (forces Dolt bootstrapping)", false).option("--no-dolt", "Skip Dolt state store initialization even if Dolt is installed").option("--install-user-scope", "Also install Codex prompts/skills into ~/.codex/ (namespaced substrate-*)", false).action(async (opts) => {
|
|
2602
2770
|
const outputFormat = opts.outputFormat === "json" ? "json" : "human";
|
|
2603
2771
|
const doltMode = opts.noDolt ? "skip" : opts.dolt ? "force" : "auto";
|
|
2604
2772
|
const exitCode = await runInitAction({
|
|
@@ -2608,6 +2776,7 @@ function registerInitCommand(program, _version, registry) {
|
|
|
2608
2776
|
force: opts.force,
|
|
2609
2777
|
yes: opts.yes,
|
|
2610
2778
|
doltMode,
|
|
2779
|
+
installUserScope: opts.installUserScope,
|
|
2611
2780
|
...registry !== void 0 && { registry }
|
|
2612
2781
|
});
|
|
2613
2782
|
process.exitCode = exitCode;
|
|
@@ -5022,7 +5191,7 @@ async function runSupervisorAction(options, deps = {}) {
|
|
|
5022
5191
|
await initSchema(expAdapter);
|
|
5023
5192
|
const { runRunAction: runPipeline } = await import(
|
|
5024
5193
|
/* @vite-ignore */
|
|
5025
|
-
"../run-
|
|
5194
|
+
"../run-D41_ttBq.js"
|
|
5026
5195
|
);
|
|
5027
5196
|
const runStoryFn = async (opts) => {
|
|
5028
5197
|
const exitCode = await runPipeline({
|
|
@@ -2,7 +2,7 @@ import "./health-DrZiqv4h.js";
|
|
|
2
2
|
import "./logger-KeHncl-f.js";
|
|
3
3
|
import "./helpers-CElYrONe.js";
|
|
4
4
|
import "./dist-sNh9XQ6V.js";
|
|
5
|
-
import { normalizeGraphSummaryToStatus, registerRunCommand, resolveMaxReviewCycles, runRunAction, wireNdjsonEmitter } from "./run-
|
|
5
|
+
import { normalizeGraphSummaryToStatus, registerRunCommand, resolveMaxReviewCycles, runRunAction, wireNdjsonEmitter } from "./run-dCGeboq9.js";
|
|
6
6
|
import "./routing-CcBOCuC9.js";
|
|
7
7
|
import "./decisions-C0pz9Clx.js";
|
|
8
8
|
|
|
@@ -6501,6 +6501,142 @@ var TrivialOutputCheck = class {
|
|
|
6501
6501
|
}
|
|
6502
6502
|
};
|
|
6503
6503
|
|
|
6504
|
+
//#endregion
|
|
6505
|
+
//#region packages/sdlc/dist/verification/checks/acceptance-criteria-evidence-check.js
|
|
6506
|
+
/**
|
|
6507
|
+
* AcceptanceCriteriaEvidenceCheck.
|
|
6508
|
+
*
|
|
6509
|
+
* Tier A verification check that compares a story's declared acceptance
|
|
6510
|
+
* criteria against structured dev-story output. The check is intentionally
|
|
6511
|
+
* deterministic: no LLM calls, no shell commands, no repository inspection.
|
|
6512
|
+
*/
|
|
6513
|
+
const EXPLICIT_AC_REF = /\bAC\s*:?\s*#?\s*(\d+)\b/gi;
|
|
6514
|
+
const NUMBERED_CRITERION = /^\s*(?:[-*]\s*)?(?:\[[ xX]\]\s*)?(\d+)[.)]\s+\S/;
|
|
6515
|
+
function normalizeAcId(value) {
|
|
6516
|
+
const parsed = Number.parseInt(value, 10);
|
|
6517
|
+
if (!Number.isFinite(parsed) || parsed <= 0) return void 0;
|
|
6518
|
+
return `AC${parsed}`;
|
|
6519
|
+
}
|
|
6520
|
+
function sortAcIds(ids) {
|
|
6521
|
+
return Array.from(ids).sort((a, b) => {
|
|
6522
|
+
const aNum = Number.parseInt(a.replace(/^AC/i, ""), 10);
|
|
6523
|
+
const bNum = Number.parseInt(b.replace(/^AC/i, ""), 10);
|
|
6524
|
+
return aNum - bNum;
|
|
6525
|
+
});
|
|
6526
|
+
}
|
|
6527
|
+
function addExplicitAcRefs(text, ids) {
|
|
6528
|
+
EXPLICIT_AC_REF.lastIndex = 0;
|
|
6529
|
+
let match$1;
|
|
6530
|
+
while ((match$1 = EXPLICIT_AC_REF.exec(text)) !== null) {
|
|
6531
|
+
const id = normalizeAcId(match$1[1] ?? "");
|
|
6532
|
+
if (id !== void 0) ids.add(id);
|
|
6533
|
+
}
|
|
6534
|
+
}
|
|
6535
|
+
function extractAcceptanceSection(storyContent) {
|
|
6536
|
+
const lines = storyContent.split(/\r?\n/);
|
|
6537
|
+
const start = lines.findIndex((line) => /^##\s+Acceptance Criteria\s*$/i.test(line.trim()));
|
|
6538
|
+
if (start === -1) return void 0;
|
|
6539
|
+
let end = lines.length;
|
|
6540
|
+
for (let i = start + 1; i < lines.length; i += 1) if (/^##\s+\S/.test(lines[i] ?? "")) {
|
|
6541
|
+
end = i;
|
|
6542
|
+
break;
|
|
6543
|
+
}
|
|
6544
|
+
return lines.slice(start + 1, end).join("\n");
|
|
6545
|
+
}
|
|
6546
|
+
/**
|
|
6547
|
+
* Extract normalized AC ids from story markdown.
|
|
6548
|
+
*
|
|
6549
|
+
* Supports the BMAD default format (`### AC1:`), explicit references such as
|
|
6550
|
+
* `AC: #1`, and plain numbered criteria inside the Acceptance Criteria section.
|
|
6551
|
+
*/
|
|
6552
|
+
function extractAcceptanceCriteriaIds(storyContent) {
|
|
6553
|
+
const ids = new Set();
|
|
6554
|
+
const acceptanceSection = extractAcceptanceSection(storyContent);
|
|
6555
|
+
const textToScan = acceptanceSection ?? storyContent;
|
|
6556
|
+
addExplicitAcRefs(textToScan, ids);
|
|
6557
|
+
if (acceptanceSection !== void 0) for (const line of acceptanceSection.split(/\r?\n/)) {
|
|
6558
|
+
const match$1 = line.match(NUMBERED_CRITERION);
|
|
6559
|
+
if (match$1?.[1] !== void 0) {
|
|
6560
|
+
const id = normalizeAcId(match$1[1]);
|
|
6561
|
+
if (id !== void 0) ids.add(id);
|
|
6562
|
+
}
|
|
6563
|
+
}
|
|
6564
|
+
return sortAcIds(ids);
|
|
6565
|
+
}
|
|
6566
|
+
function extractClaimedAcceptanceCriteriaIds(values) {
|
|
6567
|
+
const ids = new Set();
|
|
6568
|
+
for (const value of values ?? []) {
|
|
6569
|
+
addExplicitAcRefs(value, ids);
|
|
6570
|
+
const bareNumber = value.trim().match(/^#?(\d+)\b/);
|
|
6571
|
+
if (bareNumber?.[1] !== void 0) {
|
|
6572
|
+
const id = normalizeAcId(bareNumber[1]);
|
|
6573
|
+
if (id !== void 0) ids.add(id);
|
|
6574
|
+
}
|
|
6575
|
+
}
|
|
6576
|
+
return sortAcIds(ids);
|
|
6577
|
+
}
|
|
6578
|
+
function normalizeTestOutcome(value) {
|
|
6579
|
+
if (value === void 0) return void 0;
|
|
6580
|
+
return value.toLowerCase().includes("fail") ? "fail" : "pass";
|
|
6581
|
+
}
|
|
6582
|
+
function formatIds(ids) {
|
|
6583
|
+
return ids.join(", ");
|
|
6584
|
+
}
|
|
6585
|
+
var AcceptanceCriteriaEvidenceCheck = class {
|
|
6586
|
+
name = "acceptance-criteria-evidence";
|
|
6587
|
+
tier = "A";
|
|
6588
|
+
async run(context) {
|
|
6589
|
+
const start = Date.now();
|
|
6590
|
+
const storyContent = context.storyContent?.trim();
|
|
6591
|
+
if (!storyContent) return {
|
|
6592
|
+
status: "warn",
|
|
6593
|
+
details: "acceptance-criteria-evidence: story content unavailable - skipping AC evidence check",
|
|
6594
|
+
duration_ms: Date.now() - start
|
|
6595
|
+
};
|
|
6596
|
+
const expectedIds = extractAcceptanceCriteriaIds(storyContent);
|
|
6597
|
+
if (expectedIds.length === 0) return {
|
|
6598
|
+
status: "warn",
|
|
6599
|
+
details: "acceptance-criteria-evidence: no numbered acceptance criteria found in story",
|
|
6600
|
+
duration_ms: Date.now() - start
|
|
6601
|
+
};
|
|
6602
|
+
const devResult = context.devStoryResult;
|
|
6603
|
+
if (devResult === void 0) return {
|
|
6604
|
+
status: "warn",
|
|
6605
|
+
details: `acceptance-criteria-evidence: dev-story result unavailable for ${formatIds(expectedIds)}`,
|
|
6606
|
+
duration_ms: Date.now() - start
|
|
6607
|
+
};
|
|
6608
|
+
const acFailures = devResult.ac_failures ?? [];
|
|
6609
|
+
if (acFailures.length > 0) return {
|
|
6610
|
+
status: "fail",
|
|
6611
|
+
details: `acceptance-criteria-evidence: dev-story reported AC failures: ${acFailures.join("; ")}`,
|
|
6612
|
+
duration_ms: Date.now() - start
|
|
6613
|
+
};
|
|
6614
|
+
const testOutcome = normalizeTestOutcome(devResult.tests);
|
|
6615
|
+
if (testOutcome === "fail") return {
|
|
6616
|
+
status: "fail",
|
|
6617
|
+
details: "acceptance-criteria-evidence: dev-story reported failing tests",
|
|
6618
|
+
duration_ms: Date.now() - start
|
|
6619
|
+
};
|
|
6620
|
+
const claimedIds = new Set(extractClaimedAcceptanceCriteriaIds(devResult.ac_met));
|
|
6621
|
+
const missingIds = expectedIds.filter((id) => !claimedIds.has(id));
|
|
6622
|
+
if (missingIds.length > 0) return {
|
|
6623
|
+
status: "fail",
|
|
6624
|
+
details: `acceptance-criteria-evidence: missing dev-story AC evidence for ${formatIds(missingIds)}; expected ${formatIds(expectedIds)}, claimed ${formatIds(sortAcIds(claimedIds)) || "none"}`,
|
|
6625
|
+
duration_ms: Date.now() - start
|
|
6626
|
+
};
|
|
6627
|
+
if (testOutcome === void 0) return {
|
|
6628
|
+
status: "warn",
|
|
6629
|
+
details: `acceptance-criteria-evidence: AC evidence covers ${formatIds(expectedIds)} but test outcome is unavailable`,
|
|
6630
|
+
duration_ms: Date.now() - start
|
|
6631
|
+
};
|
|
6632
|
+
return {
|
|
6633
|
+
status: "pass",
|
|
6634
|
+
details: `acceptance-criteria-evidence: AC evidence covers ${formatIds(expectedIds)}; tests=${testOutcome}`,
|
|
6635
|
+
duration_ms: Date.now() - start
|
|
6636
|
+
};
|
|
6637
|
+
}
|
|
6638
|
+
};
|
|
6639
|
+
|
|
6504
6640
|
//#endregion
|
|
6505
6641
|
//#region packages/sdlc/dist/verification/checks/build-check.js
|
|
6506
6642
|
/** Hard timeout for the build command in milliseconds (FR-V11). */
|
|
@@ -6705,7 +6841,8 @@ var VerificationPipeline = class {
|
|
|
6705
6841
|
* Canonical Tier A check order (architecture section 3.5):
|
|
6706
6842
|
* 1. PhantomReviewCheck — story 51-2 (runs first: unreviewed stories skipped)
|
|
6707
6843
|
* 2. TrivialOutputCheck — story 51-3
|
|
6708
|
-
* 3.
|
|
6844
|
+
* 3. AcceptanceCriteriaEvidenceCheck
|
|
6845
|
+
* 4. BuildCheck — story 51-4
|
|
6709
6846
|
*
|
|
6710
6847
|
* @param bus Typed event bus for verification events.
|
|
6711
6848
|
* @param config Optional config (used to forward threshold to TrivialOutputCheck).
|
|
@@ -6714,6 +6851,7 @@ function createDefaultVerificationPipeline(bus, config) {
|
|
|
6714
6851
|
const checks = [
|
|
6715
6852
|
new PhantomReviewCheck(),
|
|
6716
6853
|
new TrivialOutputCheck(config),
|
|
6854
|
+
new AcceptanceCriteriaEvidenceCheck(),
|
|
6717
6855
|
new BuildCheck()
|
|
6718
6856
|
];
|
|
6719
6857
|
return new VerificationPipeline(bus, checks);
|
|
@@ -10764,6 +10902,8 @@ function assembleVerificationContext(opts) {
|
|
|
10764
10902
|
commitSha,
|
|
10765
10903
|
timeout: 6e4,
|
|
10766
10904
|
reviewResult: opts.reviewResult,
|
|
10905
|
+
storyContent: opts.storyContent,
|
|
10906
|
+
devStoryResult: opts.devStoryResult,
|
|
10767
10907
|
outputTokenCount: opts.outputTokenCount
|
|
10768
10908
|
};
|
|
10769
10909
|
}
|
|
@@ -12597,6 +12737,37 @@ function createImplementationOrchestrator(deps) {
|
|
|
12597
12737
|
const batchFileGroups = [];
|
|
12598
12738
|
let devStoryWasSuccess = false;
|
|
12599
12739
|
let devOutputTokenCount;
|
|
12740
|
+
let storyContentForVerification;
|
|
12741
|
+
let devStorySignals;
|
|
12742
|
+
const normalizeDevStorySignals = (result) => {
|
|
12743
|
+
if (result == null) return void 0;
|
|
12744
|
+
return {
|
|
12745
|
+
result: result.result,
|
|
12746
|
+
ac_met: result.ac_met ?? [],
|
|
12747
|
+
ac_failures: result.ac_failures ?? [],
|
|
12748
|
+
files_modified: result.files_modified ?? [],
|
|
12749
|
+
tests: result.tests
|
|
12750
|
+
};
|
|
12751
|
+
};
|
|
12752
|
+
const replaceDevStorySignals = (result) => {
|
|
12753
|
+
const normalized = normalizeDevStorySignals(result);
|
|
12754
|
+
if (normalized !== void 0) devStorySignals = normalized;
|
|
12755
|
+
};
|
|
12756
|
+
const mergeDevStorySignals = (result) => {
|
|
12757
|
+
const normalized = normalizeDevStorySignals(result);
|
|
12758
|
+
if (normalized === void 0) return;
|
|
12759
|
+
if (devStorySignals === void 0) {
|
|
12760
|
+
devStorySignals = normalized;
|
|
12761
|
+
return;
|
|
12762
|
+
}
|
|
12763
|
+
devStorySignals = {
|
|
12764
|
+
result: devStorySignals.result === "failed" || normalized.result === "failed" ? "failed" : normalized.result ?? devStorySignals.result,
|
|
12765
|
+
ac_met: Array.from(new Set([...devStorySignals.ac_met ?? [], ...normalized.ac_met ?? []])),
|
|
12766
|
+
ac_failures: Array.from(new Set([...devStorySignals.ac_failures ?? [], ...normalized.ac_failures ?? []])),
|
|
12767
|
+
files_modified: Array.from(new Set([...devStorySignals.files_modified ?? [], ...normalized.files_modified ?? []])),
|
|
12768
|
+
tests: devStorySignals.tests === "fail" || normalized.tests === "fail" ? "fail" : normalized.tests ?? devStorySignals.tests
|
|
12769
|
+
};
|
|
12770
|
+
};
|
|
12600
12771
|
let baselineHeadSha;
|
|
12601
12772
|
try {
|
|
12602
12773
|
baselineHeadSha = execSync("git rev-parse HEAD", {
|
|
@@ -12614,6 +12785,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12614
12785
|
let storyContentForAnalysis = "";
|
|
12615
12786
|
try {
|
|
12616
12787
|
storyContentForAnalysis = await readFile$1(storyFilePath ?? "", "utf-8");
|
|
12788
|
+
storyContentForVerification = storyContentForAnalysis;
|
|
12617
12789
|
} catch (err) {
|
|
12618
12790
|
logger$24.error({
|
|
12619
12791
|
storyKey,
|
|
@@ -12681,6 +12853,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12681
12853
|
}
|
|
12682
12854
|
const batchDurationMs = Date.now() - batchStartMs;
|
|
12683
12855
|
const batchFilesModified = batchResult.files_modified ?? [];
|
|
12856
|
+
mergeDevStorySignals(batchResult);
|
|
12684
12857
|
const batchMetrics = {
|
|
12685
12858
|
batchIndex: batch.batchIndex,
|
|
12686
12859
|
taskIds: batch.taskIds,
|
|
@@ -12950,6 +13123,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12950
13123
|
return;
|
|
12951
13124
|
}
|
|
12952
13125
|
const retryParsed = checkpointRetryResult.parsed;
|
|
13126
|
+
replaceDevStorySignals(retryParsed);
|
|
12953
13127
|
devFilesModified = retryParsed?.files_modified ?? checkGitDiffFiles(projectRoot ?? process.cwd());
|
|
12954
13128
|
if (checkpointRetryResult.status === "completed" && retryParsed?.result === "success") devStoryWasSuccess = true;
|
|
12955
13129
|
else logger$24.warn({
|
|
@@ -12958,22 +13132,25 @@ function createImplementationOrchestrator(deps) {
|
|
|
12958
13132
|
}, "Checkpoint retry completed with failure — proceeding to code review");
|
|
12959
13133
|
checkpointHandled = true;
|
|
12960
13134
|
}
|
|
12961
|
-
if (!checkpointHandled)
|
|
12962
|
-
|
|
12963
|
-
|
|
12964
|
-
|
|
12965
|
-
error: devResult.error,
|
|
12966
|
-
filesModified: devFilesModified.length
|
|
12967
|
-
}, "Dev-story reported failure, proceeding to code review");
|
|
12968
|
-
if (!devResult.error?.startsWith("dispatch_timeout")) {
|
|
13135
|
+
if (!checkpointHandled) {
|
|
13136
|
+
replaceDevStorySignals(devResult);
|
|
13137
|
+
if (devResult.result === "success") devStoryWasSuccess = true;
|
|
13138
|
+
else {
|
|
12969
13139
|
logger$24.warn({
|
|
12970
13140
|
storyKey,
|
|
12971
|
-
error: devResult.error
|
|
12972
|
-
|
|
12973
|
-
|
|
12974
|
-
|
|
12975
|
-
|
|
12976
|
-
|
|
13141
|
+
error: devResult.error,
|
|
13142
|
+
filesModified: devFilesModified.length
|
|
13143
|
+
}, "Dev-story reported failure, proceeding to code review");
|
|
13144
|
+
if (!devResult.error?.startsWith("dispatch_timeout")) {
|
|
13145
|
+
logger$24.warn({
|
|
13146
|
+
storyKey,
|
|
13147
|
+
error: devResult.error
|
|
13148
|
+
}, "Agent process failure (non-timeout) — story will proceed to code review with partial work");
|
|
13149
|
+
eventBus.emit("orchestrator:story-warn", {
|
|
13150
|
+
storyKey,
|
|
13151
|
+
msg: "agent process failure (non-timeout)"
|
|
13152
|
+
});
|
|
13153
|
+
}
|
|
12977
13154
|
}
|
|
12978
13155
|
}
|
|
12979
13156
|
}
|
|
@@ -13551,6 +13728,8 @@ function createImplementationOrchestrator(deps) {
|
|
|
13551
13728
|
storyKey,
|
|
13552
13729
|
workingDir: projectRoot ?? process.cwd(),
|
|
13553
13730
|
reviewResult: latestReviewSignals,
|
|
13731
|
+
storyContent: storyContentForVerification,
|
|
13732
|
+
devStoryResult: devStorySignals,
|
|
13554
13733
|
outputTokenCount: devOutputTokenCount
|
|
13555
13734
|
});
|
|
13556
13735
|
const verifSummary = await verificationPipeline.run(verifContext, "A");
|
|
@@ -13791,6 +13970,37 @@ function createImplementationOrchestrator(deps) {
|
|
|
13791
13970
|
}, "Auto-approve fix dispatch failed — approving anyway (issues were minor)");
|
|
13792
13971
|
}
|
|
13793
13972
|
endPhase(storyKey, "code-review");
|
|
13973
|
+
if (config.skipVerification !== true) {
|
|
13974
|
+
const latestReviewSignals = reviewResult != null ? {
|
|
13975
|
+
dispatchFailed: reviewResult.dispatchFailed,
|
|
13976
|
+
error: reviewResult.error,
|
|
13977
|
+
rawOutput: reviewResult.rawOutput
|
|
13978
|
+
} : void 0;
|
|
13979
|
+
const verifContext = assembleVerificationContext({
|
|
13980
|
+
storyKey,
|
|
13981
|
+
workingDir: projectRoot ?? process.cwd(),
|
|
13982
|
+
reviewResult: latestReviewSignals,
|
|
13983
|
+
storyContent: storyContentForVerification,
|
|
13984
|
+
devStoryResult: devStorySignals,
|
|
13985
|
+
outputTokenCount: devOutputTokenCount
|
|
13986
|
+
});
|
|
13987
|
+
const verifSummary = await verificationPipeline.run(verifContext, "A");
|
|
13988
|
+
verificationStore.set(storyKey, verifSummary);
|
|
13989
|
+
persistVerificationResult(storyKey, verifSummary, runManifest);
|
|
13990
|
+
if (verifSummary.status === "fail") {
|
|
13991
|
+
updateStory(storyKey, {
|
|
13992
|
+
phase: "VERIFICATION_FAILED",
|
|
13993
|
+
completedAt: new Date().toISOString()
|
|
13994
|
+
});
|
|
13995
|
+
persistStoryState(storyKey, _stories.get(storyKey)).catch((err) => logger$24.warn({
|
|
13996
|
+
err,
|
|
13997
|
+
storyKey
|
|
13998
|
+
}, "StateStore write failed after verification-failed"));
|
|
13999
|
+
await writeStoryMetricsBestEffort(storyKey, "verification-failed", finalReviewCycles);
|
|
14000
|
+
await persistState();
|
|
14001
|
+
return;
|
|
14002
|
+
}
|
|
14003
|
+
}
|
|
13794
14004
|
eventBus.emit("story:auto-approved", {
|
|
13795
14005
|
storyKey,
|
|
13796
14006
|
verdict,
|
|
@@ -14038,6 +14248,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
14038
14248
|
exitCode: fixResult.exitCode
|
|
14039
14249
|
}, "Fix dispatch failed");
|
|
14040
14250
|
}
|
|
14251
|
+
if (isMajorRework) replaceDevStorySignals(fixResult.parsed);
|
|
14041
14252
|
} catch (err) {
|
|
14042
14253
|
logger$24.warn({
|
|
14043
14254
|
storyKey,
|
|
@@ -43077,4 +43288,4 @@ function registerRunCommand(program, _version = "0.0.0", projectRoot = process.c
|
|
|
43077
43288
|
|
|
43078
43289
|
//#endregion
|
|
43079
43290
|
export { AdapterTelemetryPersistence, AppError, DoltRepoMapMetaRepository, DoltSymbolRepository, ERR_REPO_MAP_STORAGE_WRITE, EpicIngester, GitClient, GrammarLoader, RepoMapInjector, RepoMapModule, RepoMapQueryEngine, RepoMapStorage, SymbolParser, createContextCompiler, createDispatcher, createEventEmitter, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStopAfterGate, createTelemetryAdvisor, formatPhaseCompletionSummary, getFactoryRunSummaries, getScenarioResultsForRun, getTwinRunsForRun, listGraphRuns, normalizeGraphSummaryToStatus, registerExportCommand, registerFactoryCommand, registerRunCommand, registerScenariosCommand, resolveMaxReviewCycles, resolveStoryKeys, runAnalysisPhase, runPlanningPhase, runRunAction, runSolutioningPhase, validateStopAfterFromConflict, wireNdjsonEmitter };
|
|
43080
|
-
//# sourceMappingURL=run-
|
|
43291
|
+
//# sourceMappingURL=run-dCGeboq9.js.map
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "substrate-ai",
|
|
3
|
-
"version": "0.19.
|
|
3
|
+
"version": "0.19.54",
|
|
4
4
|
"description": "Substrate — multi-agent orchestration daemon for AI coding agents",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"license": "MIT",
|
|
@@ -65,7 +65,8 @@
|
|
|
65
65
|
"typecheck": "tsc --noEmit",
|
|
66
66
|
"typecheck:gate": "tsc --noEmit -p tsconfig.typecheck.json",
|
|
67
67
|
"clean": "rm -rf dist",
|
|
68
|
-
"substrate:dev": "node dist/cli/index.js"
|
|
68
|
+
"substrate:dev": "node dist/cli/index.js",
|
|
69
|
+
"version:sync": "node scripts/sync-workspace-versions.mjs"
|
|
69
70
|
},
|
|
70
71
|
"dependencies": {
|
|
71
72
|
"bmad-method": "^6.2.2",
|