substrate-ai 0.19.7 → 0.19.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli/index.js
CHANGED
|
@@ -2,9 +2,9 @@
|
|
|
2
2
|
import { FileStateStore, SUBSTRATE_OWNED_SETTINGS_KEYS, VALID_PHASES, WorkGraphRepository, buildPipelineStatusOutput, createDatabaseAdapter, createStateStore, findPackageRoot, formatOutput, formatPipelineStatusHuman, formatPipelineSummary, formatTokenTelemetry, getAllDescendantPids, getAutoHealthData, getSubstrateDefaultSettings, parseDbTimestampAsUtc, registerHealthCommand, resolveBmadMethodSrcPath, resolveBmadMethodVersion, resolveMainRepoRoot } from "../health-DJgGZhW-.js";
|
|
3
3
|
import { createLogger } from "../logger-KeHncl-f.js";
|
|
4
4
|
import { createEventBus } from "../helpers-CElYrONe.js";
|
|
5
|
-
import { AdapterRegistry, BudgetConfigSchema, CURRENT_CONFIG_FORMAT_VERSION, CURRENT_TASK_GRAPH_VERSION, ConfigError, CostTrackerConfigSchema, DEFAULT_CONFIG, DoltClient, DoltNotInstalled,
|
|
5
|
+
import { AdapterRegistry, BudgetConfigSchema, CURRENT_CONFIG_FORMAT_VERSION, CURRENT_TASK_GRAPH_VERSION, ConfigError, CostTrackerConfigSchema, DEFAULT_CONFIG, DoltClient, DoltNotInstalled, GlobalSettingsSchema, IngestionServer, MonitorDatabaseImpl, OPERATIONAL_FINDING, PartialGlobalSettingsSchema, PartialProviderConfigSchema, ProvidersSchema, RoutingRecommender, STORY_METRICS, TelemetryConfigSchema, addTokenUsage, aggregateTokenUsageForRun, checkDoltInstalled, compareRunMetrics, createAmendmentRun, createConfigSystem, createDecision, createDoltClient, createPipelineRun, getActiveDecisions, getAllCostEntriesFiltered, getBaselineRunMetrics, getDecisionsByCategory, getDecisionsByPhaseForRun, getLatestCompletedRun, getLatestRun, getPipelineRunById, getPlanningCostTotal, getRetryableEscalations, getRunMetrics, getSessionCostSummary, getSessionCostSummaryFiltered, getStoryMetricsForRun, getTokenUsageSummary, incrementRunRestarts, initSchema, initializeDolt, listRunMetrics, loadParentRunDecisions, supersedeDecision, tagRunAsBaseline, updatePipelineRun } from "../dist-adzGUKPc.js";
|
|
6
6
|
import "../adapter-registry-DXLMTmfD.js";
|
|
7
|
-
import { AdapterTelemetryPersistence, AppError, DoltRepoMapMetaRepository, DoltSymbolRepository, ERR_REPO_MAP_STORAGE_WRITE, EpicIngester, GitClient, GrammarLoader, RepoMapInjector, RepoMapModule, RepoMapQueryEngine, RepoMapStorage, SymbolParser, createContextCompiler, createDispatcher, createEventEmitter, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStopAfterGate, createTelemetryAdvisor, formatPhaseCompletionSummary, getFactoryRunSummaries, getScenarioResultsForRun, getTwinRunsForRun, listGraphRuns, registerFactoryCommand, registerRunCommand, registerScenariosCommand, resolveStoryKeys, runAnalysisPhase, runPlanningPhase, runSolutioningPhase, validateStopAfterFromConflict } from "../run-
|
|
7
|
+
import { AdapterTelemetryPersistence, AppError, DoltRepoMapMetaRepository, DoltSymbolRepository, ERR_REPO_MAP_STORAGE_WRITE, EpicIngester, GitClient, GrammarLoader, RepoMapInjector, RepoMapModule, RepoMapQueryEngine, RepoMapStorage, SymbolParser, createContextCompiler, createDispatcher, createEventEmitter, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStopAfterGate, createTelemetryAdvisor, formatPhaseCompletionSummary, getFactoryRunSummaries, getScenarioResultsForRun, getTwinRunsForRun, listGraphRuns, registerExportCommand, registerFactoryCommand, registerRunCommand, registerScenariosCommand, resolveStoryKeys, runAnalysisPhase, runPlanningPhase, runSolutioningPhase, validateStopAfterFromConflict } from "../run-Dg_BEJB6.js";
|
|
8
8
|
import "../errors-CZdr5Wqb.js";
|
|
9
9
|
import "../routing-CcBOCuC9.js";
|
|
10
10
|
import "../decisions-C0pz9Clx.js";
|
|
@@ -15,12 +15,12 @@ import { fileURLToPath } from "url";
|
|
|
15
15
|
import { dirname, join, resolve } from "path";
|
|
16
16
|
import { access, mkdir, readFile, writeFile } from "fs/promises";
|
|
17
17
|
import yaml from "js-yaml";
|
|
18
|
-
import { existsSync,
|
|
18
|
+
import { existsSync, readFileSync } from "node:fs";
|
|
19
19
|
import { spawn } from "node:child_process";
|
|
20
20
|
import * as path$3 from "node:path";
|
|
21
21
|
import * as path$2 from "node:path";
|
|
22
22
|
import * as path$1 from "node:path";
|
|
23
|
-
import {
|
|
23
|
+
import { join as join$1 } from "node:path";
|
|
24
24
|
import { z } from "zod";
|
|
25
25
|
import * as fs from "node:fs/promises";
|
|
26
26
|
import { access as access$1, readFile as readFile$1, readdir as readdir$1 } from "node:fs/promises";
|
|
@@ -332,12 +332,12 @@ var GitWorktreeManagerImpl = class {
|
|
|
332
332
|
_onTaskReady;
|
|
333
333
|
_onTaskComplete;
|
|
334
334
|
_onTaskFailed;
|
|
335
|
-
constructor(eventBus, projectRoot, baseDirectory = DEFAULT_WORKTREE_BASE, db = null, logger$
|
|
335
|
+
constructor(eventBus, projectRoot, baseDirectory = DEFAULT_WORKTREE_BASE, db = null, logger$15) {
|
|
336
336
|
this._eventBus = eventBus;
|
|
337
337
|
this._projectRoot = projectRoot;
|
|
338
338
|
this._baseDirectory = baseDirectory;
|
|
339
339
|
this._db = db;
|
|
340
|
-
this._logger = logger$
|
|
340
|
+
this._logger = logger$15 ?? console;
|
|
341
341
|
this._onTaskReady = ({ taskId }) => {
|
|
342
342
|
this._handleTaskReady(taskId).catch((err) => {
|
|
343
343
|
this._logger.error({
|
|
@@ -641,14 +641,14 @@ var RecommendationEngine = class {
|
|
|
641
641
|
_filters;
|
|
642
642
|
_historyDays;
|
|
643
643
|
_logger;
|
|
644
|
-
constructor(monitorDb, config = {}, logger$
|
|
644
|
+
constructor(monitorDb, config = {}, logger$15) {
|
|
645
645
|
this._monitorDb = monitorDb;
|
|
646
646
|
this._filters = {
|
|
647
647
|
threshold_percentage: config.recommendation_threshold_percentage ?? 5,
|
|
648
648
|
min_sample_size: config.min_sample_size ?? 10
|
|
649
649
|
};
|
|
650
650
|
this._historyDays = config.recommendation_history_days ?? 90;
|
|
651
|
-
this._logger = logger$
|
|
651
|
+
this._logger = logger$15 ?? console;
|
|
652
652
|
}
|
|
653
653
|
generateRecommendations() {
|
|
654
654
|
const sinceDate = new Date(Date.now() - this._historyDays * 24 * 60 * 60 * 1e3).toISOString();
|
|
@@ -1603,7 +1603,7 @@ function buildStackAwareDevNotes(profile) {
|
|
|
1603
1603
|
|
|
1604
1604
|
//#endregion
|
|
1605
1605
|
//#region src/cli/commands/init.ts
|
|
1606
|
-
const logger$
|
|
1606
|
+
const logger$14 = createLogger("init");
|
|
1607
1607
|
const __dirname = dirname(new URL(import.meta.url).pathname);
|
|
1608
1608
|
const SCAFFOLD_VERSION_REGEX = /<!-- substrate:version=([\d.]+) -->/;
|
|
1609
1609
|
/**
|
|
@@ -1644,7 +1644,7 @@ async function scaffoldBmadFramework(projectRoot, force, outputFormat) {
|
|
|
1644
1644
|
const version = resolveBmadMethodVersion();
|
|
1645
1645
|
if (force && bmadExists) process.stderr.write(`Warning: Replacing existing _bmad/ framework with bmad-method@${version}\n`);
|
|
1646
1646
|
process.stdout.write(`Scaffolding BMAD framework from bmad-method@${version}\n`);
|
|
1647
|
-
logger$
|
|
1647
|
+
logger$14.info({
|
|
1648
1648
|
version,
|
|
1649
1649
|
dest: bmadDest
|
|
1650
1650
|
}, "Scaffolding BMAD framework");
|
|
@@ -1654,7 +1654,7 @@ async function scaffoldBmadFramework(projectRoot, force, outputFormat) {
|
|
|
1654
1654
|
const destDir = join(bmadDest, dir);
|
|
1655
1655
|
mkdirSync$1(destDir, { recursive: true });
|
|
1656
1656
|
cpSync(srcDir, destDir, { recursive: true });
|
|
1657
|
-
logger$
|
|
1657
|
+
logger$14.info({
|
|
1658
1658
|
dir,
|
|
1659
1659
|
dest: destDir
|
|
1660
1660
|
}, "Scaffolded BMAD framework directory");
|
|
@@ -1673,7 +1673,7 @@ async function scaffoldBmadFramework(projectRoot, force, outputFormat) {
|
|
|
1673
1673
|
"document_output_language: English"
|
|
1674
1674
|
].join("\n") + "\n";
|
|
1675
1675
|
await writeFile(configFile, configStub, "utf8");
|
|
1676
|
-
logger$
|
|
1676
|
+
logger$14.info({ configFile }, "Generated _bmad/_config/config.yaml stub");
|
|
1677
1677
|
}
|
|
1678
1678
|
}
|
|
1679
1679
|
const CLAUDE_MD_START_MARKER = "<!-- substrate:start -->";
|
|
@@ -1688,7 +1688,7 @@ async function scaffoldClaudeMd(projectRoot, profile) {
|
|
|
1688
1688
|
try {
|
|
1689
1689
|
sectionContent = await readFile(templatePath, "utf8");
|
|
1690
1690
|
} catch {
|
|
1691
|
-
logger$
|
|
1691
|
+
logger$14.warn({ templatePath }, "CLAUDE.md substrate section template not found; skipping");
|
|
1692
1692
|
return;
|
|
1693
1693
|
}
|
|
1694
1694
|
const substrateVersion = readSubstrateVersion(pkgRoot);
|
|
@@ -1724,7 +1724,7 @@ async function scaffoldClaudeMd(projectRoot, profile) {
|
|
|
1724
1724
|
else newContent = updatedExisting;
|
|
1725
1725
|
}
|
|
1726
1726
|
await writeFile(claudeMdPath, newContent, "utf8");
|
|
1727
|
-
logger$
|
|
1727
|
+
logger$14.info({ claudeMdPath }, "Wrote substrate section to CLAUDE.md");
|
|
1728
1728
|
}
|
|
1729
1729
|
async function scaffoldStatuslineScript(projectRoot) {
|
|
1730
1730
|
const pkgRoot = findPackageRoot(__dirname);
|
|
@@ -1735,7 +1735,7 @@ async function scaffoldStatuslineScript(projectRoot) {
|
|
|
1735
1735
|
try {
|
|
1736
1736
|
content = await readFile(templatePath, "utf8");
|
|
1737
1737
|
} catch {
|
|
1738
|
-
logger$
|
|
1738
|
+
logger$14.warn({ templatePath }, "statusline.sh template not found; skipping");
|
|
1739
1739
|
return;
|
|
1740
1740
|
}
|
|
1741
1741
|
const claudeDir = join(projectRoot, ".claude");
|
|
@@ -1743,7 +1743,7 @@ async function scaffoldStatuslineScript(projectRoot) {
|
|
|
1743
1743
|
mkdirSync$1(claudeDir, { recursive: true });
|
|
1744
1744
|
await writeFile(statuslinePath, content, "utf8");
|
|
1745
1745
|
chmodSync(statuslinePath, 493);
|
|
1746
|
-
logger$
|
|
1746
|
+
logger$14.info({ statuslinePath }, "Wrote .claude/statusline.sh");
|
|
1747
1747
|
}
|
|
1748
1748
|
async function scaffoldClaudeSettings(projectRoot) {
|
|
1749
1749
|
const claudeDir = join(projectRoot, ".claude");
|
|
@@ -1759,7 +1759,7 @@ async function scaffoldClaudeSettings(projectRoot) {
|
|
|
1759
1759
|
if (!merged["$schema"]) merged["$schema"] = "https://json.schemastore.org/claude-code-settings.json";
|
|
1760
1760
|
mkdirSync$1(claudeDir, { recursive: true });
|
|
1761
1761
|
await writeFile(settingsPath, JSON.stringify(merged, null, 2) + "\n", "utf8");
|
|
1762
|
-
logger$
|
|
1762
|
+
logger$14.info({ settingsPath }, "Wrote substrate settings to .claude/settings.json");
|
|
1763
1763
|
}
|
|
1764
1764
|
function resolveBmadMethodInstallerLibPath(fromDir = __dirname) {
|
|
1765
1765
|
try {
|
|
@@ -1831,7 +1831,7 @@ async function compileBmadAgents(bmadDir) {
|
|
|
1831
1831
|
writeFileSync$1(mdPath, result.xml, "utf-8");
|
|
1832
1832
|
compiled++;
|
|
1833
1833
|
} catch (compileErr) {
|
|
1834
|
-
logger$
|
|
1834
|
+
logger$14.debug({
|
|
1835
1835
|
err: compileErr,
|
|
1836
1836
|
file
|
|
1837
1837
|
}, "Failed to compile agent YAML");
|
|
@@ -1978,9 +1978,9 @@ async function scaffoldClaudeCommands(projectRoot, outputFormat) {
|
|
|
1978
1978
|
const _require = createRequire(join(__dirname, "synthetic.js"));
|
|
1979
1979
|
try {
|
|
1980
1980
|
const compiledCount = await compileBmadAgents(bmadDir);
|
|
1981
|
-
if (compiledCount > 0) logger$
|
|
1981
|
+
if (compiledCount > 0) logger$14.info({ compiledCount }, "Compiled agent YAML files to MD");
|
|
1982
1982
|
} catch (compileErr) {
|
|
1983
|
-
logger$
|
|
1983
|
+
logger$14.warn({ err: compileErr }, "Agent compilation failed; agent commands may be incomplete");
|
|
1984
1984
|
}
|
|
1985
1985
|
const resolveExport = (mod, name) => {
|
|
1986
1986
|
if (typeof mod[name] === "function") return mod[name];
|
|
@@ -1994,7 +1994,7 @@ async function scaffoldClaudeCommands(projectRoot, outputFormat) {
|
|
|
1994
1994
|
const manifestGenPath = join(installerLibPath, "core", "manifest-generator.js");
|
|
1995
1995
|
const pathUtilsPath = join(installerLibPath, "ide", "shared", "path-utils.js");
|
|
1996
1996
|
if (!existsSync$1(agentGenPath)) {
|
|
1997
|
-
logger$
|
|
1997
|
+
logger$14.info("bmad-method generators not available (requires bmad-method with agent/workflow/task-tool generators)");
|
|
1998
1998
|
return;
|
|
1999
1999
|
}
|
|
2000
2000
|
const agentMod = _require(agentGenPath);
|
|
@@ -2004,11 +2004,11 @@ async function scaffoldClaudeCommands(projectRoot, outputFormat) {
|
|
|
2004
2004
|
if (existsSync$1(workflowGenPath)) {
|
|
2005
2005
|
const workflowMod = _require(workflowGenPath);
|
|
2006
2006
|
WorkflowCommandGenerator = resolveExport(workflowMod, "WorkflowCommandGenerator");
|
|
2007
|
-
} else logger$
|
|
2007
|
+
} else logger$14.info("bmad-method workflow-command-generator not available; will try skill-based installation");
|
|
2008
2008
|
if (existsSync$1(taskToolGenPath)) {
|
|
2009
2009
|
const taskToolMod = _require(taskToolGenPath);
|
|
2010
2010
|
TaskToolCommandGenerator = resolveExport(taskToolMod, "TaskToolCommandGenerator");
|
|
2011
|
-
} else logger$
|
|
2011
|
+
} else logger$14.info("bmad-method task-tool-command-generator not available; will try skill-based installation");
|
|
2012
2012
|
let ManifestGenerator = null;
|
|
2013
2013
|
if (existsSync$1(manifestGenPath)) {
|
|
2014
2014
|
const manifestMod = _require(manifestGenPath);
|
|
@@ -2040,7 +2040,7 @@ async function scaffoldClaudeCommands(projectRoot, outputFormat) {
|
|
|
2040
2040
|
const manifestGen = new ManifestGenerator();
|
|
2041
2041
|
await manifestGen.generateManifests(bmadDir, allModules, [], { ides: ["claude-code"] });
|
|
2042
2042
|
} catch (manifestErr) {
|
|
2043
|
-
logger$
|
|
2043
|
+
logger$14.warn({ err: manifestErr }, "ManifestGenerator failed; workflow/task commands may be incomplete");
|
|
2044
2044
|
}
|
|
2045
2045
|
const commandsDir = join(projectRoot, ".claude", "commands");
|
|
2046
2046
|
mkdirSync$1(commandsDir, { recursive: true });
|
|
@@ -2068,7 +2068,7 @@ async function scaffoldClaudeCommands(projectRoot, outputFormat) {
|
|
|
2068
2068
|
const total = agentCount + workflowCount + taskToolCount + skillCount;
|
|
2069
2069
|
if (outputFormat !== "json") if (skillCount > 0) process.stdout.write(`Generated ${String(total)} Claude Code commands (${String(agentCount)} agents, ${String(skillCount)} skills)\n`);
|
|
2070
2070
|
else process.stdout.write(`Generated ${String(total)} Claude Code commands (${String(agentCount)} agents, ${String(workflowCount)} workflows, ${String(taskToolCount)} tasks/tools)\n`);
|
|
2071
|
-
logger$
|
|
2071
|
+
logger$14.info({
|
|
2072
2072
|
agentCount,
|
|
2073
2073
|
workflowCount,
|
|
2074
2074
|
taskToolCount,
|
|
@@ -2079,7 +2079,7 @@ async function scaffoldClaudeCommands(projectRoot, outputFormat) {
|
|
|
2079
2079
|
} catch (err) {
|
|
2080
2080
|
const msg = err instanceof Error ? err.message : String(err);
|
|
2081
2081
|
if (outputFormat !== "json") process.stderr.write(`Warning: .claude/commands/ generation failed: ${msg}\n`);
|
|
2082
|
-
logger$
|
|
2082
|
+
logger$14.warn({ err }, "scaffoldClaudeCommands failed; init continues");
|
|
2083
2083
|
}
|
|
2084
2084
|
}
|
|
2085
2085
|
const PROVIDER_DEFAULTS = DEFAULT_CONFIG.providers;
|
|
@@ -2199,7 +2199,7 @@ async function runInitAction(options) {
|
|
|
2199
2199
|
discoveryReport = await registry.discoverAndRegister();
|
|
2200
2200
|
} catch (err) {
|
|
2201
2201
|
const message = err instanceof Error ? err.message : String(err);
|
|
2202
|
-
logger$
|
|
2202
|
+
logger$14.error({ err }, "Adapter discovery failed");
|
|
2203
2203
|
if (outputFormat === "json") process.stdout.write(formatOutput(null, "json", false, `Adapter discovery failed: ${message}`) + "\n");
|
|
2204
2204
|
else process.stderr.write(` Error: adapter discovery failed — ${message}\n`);
|
|
2205
2205
|
return INIT_EXIT_ERROR;
|
|
@@ -2242,7 +2242,7 @@ async function runInitAction(options) {
|
|
|
2242
2242
|
try {
|
|
2243
2243
|
detectedProfile = await detectProjectProfile(dbRoot);
|
|
2244
2244
|
} catch (err) {
|
|
2245
|
-
logger$
|
|
2245
|
+
logger$14.warn({ err }, "Project profile detection failed; skipping");
|
|
2246
2246
|
}
|
|
2247
2247
|
if (detectedProfile === null) {
|
|
2248
2248
|
if (outputFormat !== "json") process.stdout.write(" No project stack detected. Create .substrate/project-profile.yaml manually to enable polyglot support.\n");
|
|
@@ -2276,12 +2276,12 @@ async function runInitAction(options) {
|
|
|
2276
2276
|
return INIT_EXIT_ERROR;
|
|
2277
2277
|
}
|
|
2278
2278
|
if (force && existsSync$1(localManifest)) {
|
|
2279
|
-
logger$
|
|
2279
|
+
logger$14.info({ pack: packName }, "Replacing existing pack with bundled version");
|
|
2280
2280
|
process.stderr.write(`Warning: Replacing existing pack '${packName}' with bundled version\n`);
|
|
2281
2281
|
}
|
|
2282
2282
|
mkdirSync$1(dirname(packPath), { recursive: true });
|
|
2283
2283
|
cpSync(bundledPackPath, packPath, { recursive: true });
|
|
2284
|
-
logger$
|
|
2284
|
+
logger$14.info({
|
|
2285
2285
|
pack: packName,
|
|
2286
2286
|
dest: packPath
|
|
2287
2287
|
}, "Scaffolded methodology pack");
|
|
@@ -2320,10 +2320,10 @@ async function runInitAction(options) {
|
|
|
2320
2320
|
if (missing.length > 0) {
|
|
2321
2321
|
const block = "\n# Substrate runtime files\n" + missing.join("\n") + "\n";
|
|
2322
2322
|
appendFileSync(gitignorePath, block);
|
|
2323
|
-
logger$
|
|
2323
|
+
logger$14.info({ entries: missing }, "Added substrate runtime files to .gitignore");
|
|
2324
2324
|
}
|
|
2325
2325
|
} catch (err) {
|
|
2326
|
-
logger$
|
|
2326
|
+
logger$14.debug({ err }, "Could not update .gitignore (non-fatal)");
|
|
2327
2327
|
}
|
|
2328
2328
|
const doltMode = options.doltMode ?? "auto";
|
|
2329
2329
|
let doltInitialized = false;
|
|
@@ -2340,7 +2340,7 @@ async function runInitAction(options) {
|
|
|
2340
2340
|
process.stderr.write(`${err.message}\n`);
|
|
2341
2341
|
return INIT_EXIT_ERROR;
|
|
2342
2342
|
}
|
|
2343
|
-
logger$
|
|
2343
|
+
logger$14.debug("Dolt not installed, skipping auto-init");
|
|
2344
2344
|
} else {
|
|
2345
2345
|
const msg = err instanceof Error ? err.message : String(err);
|
|
2346
2346
|
if (doltMode === "force") {
|
|
@@ -2350,7 +2350,7 @@ async function runInitAction(options) {
|
|
|
2350
2350
|
process.stderr.write(`⚠ Dolt state store initialization failed: ${msg}\n Pipeline metrics, cost tracking, and health monitoring will not persist.\n Fix the issue and re-run: substrate init --dolt\n`);
|
|
2351
2351
|
}
|
|
2352
2352
|
}
|
|
2353
|
-
else logger$
|
|
2353
|
+
else logger$14.debug("Dolt step was skipped (--no-dolt)");
|
|
2354
2354
|
const successMsg = `Pack '${packName}' and database initialized successfully at ${dbPath}`;
|
|
2355
2355
|
if (outputFormat === "json") process.stdout.write(formatOutput({
|
|
2356
2356
|
pack: packName,
|
|
@@ -2384,7 +2384,7 @@ async function runInitAction(options) {
|
|
|
2384
2384
|
const msg = err instanceof Error ? err.message : String(err);
|
|
2385
2385
|
if (outputFormat === "json") process.stdout.write(formatOutput(null, "json", false, msg) + "\n");
|
|
2386
2386
|
else process.stderr.write(`Error: ${msg}\n`);
|
|
2387
|
-
logger$
|
|
2387
|
+
logger$14.error({ err }, "init failed");
|
|
2388
2388
|
return INIT_EXIT_ERROR;
|
|
2389
2389
|
}
|
|
2390
2390
|
}
|
|
@@ -2407,7 +2407,7 @@ function registerInitCommand(program, _version, registry) {
|
|
|
2407
2407
|
|
|
2408
2408
|
//#endregion
|
|
2409
2409
|
//#region src/cli/commands/config.ts
|
|
2410
|
-
const logger$
|
|
2410
|
+
const logger$13 = createLogger("config-cmd");
|
|
2411
2411
|
const CONFIG_EXIT_SUCCESS = 0;
|
|
2412
2412
|
const CONFIG_EXIT_ERROR = 1;
|
|
2413
2413
|
const CONFIG_EXIT_INVALID = 2;
|
|
@@ -2433,7 +2433,7 @@ async function runConfigShow(opts = {}) {
|
|
|
2433
2433
|
return CONFIG_EXIT_INVALID;
|
|
2434
2434
|
}
|
|
2435
2435
|
const message = err instanceof Error ? err.message : String(err);
|
|
2436
|
-
logger$
|
|
2436
|
+
logger$13.error({ err }, "Failed to load configuration");
|
|
2437
2437
|
process.stderr.write(` Error loading configuration: ${message}\n`);
|
|
2438
2438
|
return CONFIG_EXIT_ERROR;
|
|
2439
2439
|
}
|
|
@@ -2507,7 +2507,7 @@ async function runConfigExport(opts = {}) {
|
|
|
2507
2507
|
return CONFIG_EXIT_INVALID;
|
|
2508
2508
|
}
|
|
2509
2509
|
const message = err instanceof Error ? err.message : String(err);
|
|
2510
|
-
logger$
|
|
2510
|
+
logger$13.error({ err }, "Failed to load configuration");
|
|
2511
2511
|
process.stderr.write(`Error loading configuration: ${message}\n`);
|
|
2512
2512
|
return CONFIG_EXIT_ERROR;
|
|
2513
2513
|
}
|
|
@@ -2661,7 +2661,7 @@ function registerConfigCommand(program, _version) {
|
|
|
2661
2661
|
|
|
2662
2662
|
//#endregion
|
|
2663
2663
|
//#region src/cli/commands/resume.ts
|
|
2664
|
-
const logger$
|
|
2664
|
+
const logger$12 = createLogger("resume-cmd");
|
|
2665
2665
|
/**
|
|
2666
2666
|
* Map internal orchestrator phase names to pipeline event protocol phase names.
|
|
2667
2667
|
*/
|
|
@@ -2766,7 +2766,7 @@ async function runResumeAction(options) {
|
|
|
2766
2766
|
const msg = err instanceof Error ? err.message : String(err);
|
|
2767
2767
|
if (outputFormat === "json") process.stdout.write(formatOutput(null, "json", false, msg) + "\n");
|
|
2768
2768
|
else process.stderr.write(`Error: ${msg}\n`);
|
|
2769
|
-
logger$
|
|
2769
|
+
logger$12.error({ err }, "auto resume failed");
|
|
2770
2770
|
return 1;
|
|
2771
2771
|
} finally {
|
|
2772
2772
|
try {
|
|
@@ -3010,11 +3010,11 @@ async function runFullPipelineFromPhase(options) {
|
|
|
3010
3010
|
output_tokens: output,
|
|
3011
3011
|
cost_usd: costUsd
|
|
3012
3012
|
}).catch((err) => {
|
|
3013
|
-
logger$
|
|
3013
|
+
logger$12.warn({ err }, "Failed to record token usage");
|
|
3014
3014
|
});
|
|
3015
3015
|
}
|
|
3016
3016
|
} catch (err) {
|
|
3017
|
-
logger$
|
|
3017
|
+
logger$12.warn({ err }, "Failed to record token usage");
|
|
3018
3018
|
}
|
|
3019
3019
|
});
|
|
3020
3020
|
const storyKeys = await resolveStoryKeys(adapter, projectRoot, {
|
|
@@ -3083,7 +3083,7 @@ async function runFullPipelineFromPhase(options) {
|
|
|
3083
3083
|
const msg = err instanceof Error ? err.message : String(err);
|
|
3084
3084
|
if (outputFormat === "json") process.stdout.write(formatOutput(null, "json", false, msg) + "\n");
|
|
3085
3085
|
else process.stderr.write(`Error: ${msg}\n`);
|
|
3086
|
-
logger$
|
|
3086
|
+
logger$12.error({ err }, "pipeline from phase failed");
|
|
3087
3087
|
return 1;
|
|
3088
3088
|
} finally {
|
|
3089
3089
|
try {
|
|
@@ -3111,7 +3111,7 @@ function registerResumeCommand(program, _version = "0.0.0", projectRoot = proces
|
|
|
3111
3111
|
|
|
3112
3112
|
//#endregion
|
|
3113
3113
|
//#region src/cli/commands/status.ts
|
|
3114
|
-
const logger$
|
|
3114
|
+
const logger$11 = createLogger("status-cmd");
|
|
3115
3115
|
async function runStatusAction(options) {
|
|
3116
3116
|
const { outputFormat, runId, projectRoot, stateStore, history } = options;
|
|
3117
3117
|
if (history === true) {
|
|
@@ -3190,7 +3190,7 @@ async function runStatusAction(options) {
|
|
|
3190
3190
|
};
|
|
3191
3191
|
}
|
|
3192
3192
|
} catch (err) {
|
|
3193
|
-
logger$
|
|
3193
|
+
logger$11.debug({ err }, "Work graph query failed, continuing without work graph data");
|
|
3194
3194
|
}
|
|
3195
3195
|
let run;
|
|
3196
3196
|
if (runId !== void 0 && runId !== "") run = await getPipelineRunById(adapter, runId);
|
|
@@ -3236,7 +3236,7 @@ async function runStatusAction(options) {
|
|
|
3236
3236
|
if (stateStore) try {
|
|
3237
3237
|
storeStories = await stateStore.queryStories({});
|
|
3238
3238
|
} catch (err) {
|
|
3239
|
-
logger$
|
|
3239
|
+
logger$11.debug({ err }, "StateStore query failed, continuing without store data");
|
|
3240
3240
|
}
|
|
3241
3241
|
if (outputFormat === "json") {
|
|
3242
3242
|
const statusOutput = buildPipelineStatusOutput(run, tokenSummary, decisionsCount, storiesCount);
|
|
@@ -3359,7 +3359,7 @@ async function runStatusAction(options) {
|
|
|
3359
3359
|
const msg = err instanceof Error ? err.message : String(err);
|
|
3360
3360
|
if (outputFormat === "json") process.stdout.write(formatOutput(null, "json", false, msg) + "\n");
|
|
3361
3361
|
else process.stderr.write(`Error: ${msg}\n`);
|
|
3362
|
-
logger$
|
|
3362
|
+
logger$11.error({ err }, "status action failed");
|
|
3363
3363
|
return 1;
|
|
3364
3364
|
} finally {
|
|
3365
3365
|
try {
|
|
@@ -3706,7 +3706,7 @@ Analyze thoroughly and return ONLY the JSON array with no additional text.`;
|
|
|
3706
3706
|
|
|
3707
3707
|
//#endregion
|
|
3708
3708
|
//#region src/cli/commands/amend.ts
|
|
3709
|
-
const logger$
|
|
3709
|
+
const logger$10 = createLogger("amend-cmd");
|
|
3710
3710
|
/**
|
|
3711
3711
|
* Detect and apply supersessions after a phase completes in an amendment run.
|
|
3712
3712
|
*
|
|
@@ -3737,7 +3737,7 @@ async function runPostPhaseSupersessionDetection(adapter, amendmentRunId, curren
|
|
|
3737
3737
|
});
|
|
3738
3738
|
} catch (err) {
|
|
3739
3739
|
const msg = err instanceof Error ? err.message : String(err);
|
|
3740
|
-
logger$
|
|
3740
|
+
logger$10.warn({
|
|
3741
3741
|
err,
|
|
3742
3742
|
originalId: parentMatch.id,
|
|
3743
3743
|
supersedingId: newDec.id
|
|
@@ -3873,7 +3873,7 @@ async function runAmendAction(options) {
|
|
|
3873
3873
|
for (let i = startIdx; i < phaseOrder.length; i++) {
|
|
3874
3874
|
const currentPhase = phaseOrder[i];
|
|
3875
3875
|
const amendmentContext = handler.loadContextForPhase(currentPhase);
|
|
3876
|
-
logger$
|
|
3876
|
+
logger$10.info({
|
|
3877
3877
|
phase: currentPhase,
|
|
3878
3878
|
amendmentContextLen: amendmentContext.length
|
|
3879
3879
|
}, "Amendment context loaded for phase");
|
|
@@ -3994,7 +3994,7 @@ async function runAmendAction(options) {
|
|
|
3994
3994
|
} catch (err) {
|
|
3995
3995
|
const msg = err instanceof Error ? err.message : String(err);
|
|
3996
3996
|
process.stderr.write(`Error: ${msg}\n`);
|
|
3997
|
-
logger$
|
|
3997
|
+
logger$10.error({ err }, "amend failed");
|
|
3998
3998
|
return 1;
|
|
3999
3999
|
} finally {
|
|
4000
4000
|
try {
|
|
@@ -4547,7 +4547,7 @@ async function runSupervisorAction(options, deps = {}) {
|
|
|
4547
4547
|
await initSchema(expAdapter);
|
|
4548
4548
|
const { runRunAction: runPipeline } = await import(
|
|
4549
4549
|
/* @vite-ignore */
|
|
4550
|
-
"../run-
|
|
4550
|
+
"../run-0y5KOffG.js"
|
|
4551
4551
|
);
|
|
4552
4552
|
const runStoryFn = async (opts) => {
|
|
4553
4553
|
const exitCode = await runPipeline({
|
|
@@ -4795,7 +4795,7 @@ function registerSupervisorCommand(program, _version = "0.0.0", projectRoot = pr
|
|
|
4795
4795
|
|
|
4796
4796
|
//#endregion
|
|
4797
4797
|
//#region src/cli/commands/metrics.ts
|
|
4798
|
-
const logger$
|
|
4798
|
+
const logger$9 = createLogger("metrics-cmd");
|
|
4799
4799
|
async function openTelemetryAdapter(basePath) {
|
|
4800
4800
|
try {
|
|
4801
4801
|
const adapter = createDatabaseAdapter({
|
|
@@ -5161,7 +5161,7 @@ async function runMetricsAction(options) {
|
|
|
5161
5161
|
}
|
|
5162
5162
|
}
|
|
5163
5163
|
} catch (err) {
|
|
5164
|
-
logger$
|
|
5164
|
+
logger$9.debug({ err }, "getScenarioResultsForRun failed");
|
|
5165
5165
|
}
|
|
5166
5166
|
if (rows.length === 0) {
|
|
5167
5167
|
const msg = `No factory run found with id: ${run}`;
|
|
@@ -5197,7 +5197,7 @@ async function runMetricsAction(options) {
|
|
|
5197
5197
|
}
|
|
5198
5198
|
}
|
|
5199
5199
|
} catch (err) {
|
|
5200
|
-
logger$
|
|
5200
|
+
logger$9.debug({ err }, "getTwinRunsForRun failed — twin_runs table may not exist yet");
|
|
5201
5201
|
}
|
|
5202
5202
|
}
|
|
5203
5203
|
return 0;
|
|
@@ -5207,7 +5207,7 @@ async function runMetricsAction(options) {
|
|
|
5207
5207
|
try {
|
|
5208
5208
|
factoryRuns$1 = await getFactoryRunSummaries(adapter, limit);
|
|
5209
5209
|
} catch (err) {
|
|
5210
|
-
logger$
|
|
5210
|
+
logger$9.debug({ err }, "getFactoryRunSummaries failed in factory-only mode");
|
|
5211
5211
|
}
|
|
5212
5212
|
if (outputFormat === "json") process.stdout.write(formatOutput({ graph_runs: factoryRuns$1 }, "json", true) + "\n");
|
|
5213
5213
|
else if (factoryRuns$1.length === 0) process.stdout.write("No factory runs recorded yet.\n");
|
|
@@ -5268,7 +5268,7 @@ async function runMetricsAction(options) {
|
|
|
5268
5268
|
doltMetrics = await stateStore.queryMetrics(doltFilter);
|
|
5269
5269
|
await stateStore.close();
|
|
5270
5270
|
} catch (doltErr) {
|
|
5271
|
-
logger$
|
|
5271
|
+
logger$9.warn({ err: doltErr }, "StateStore query failed — falling back to SQLite metrics only");
|
|
5272
5272
|
}
|
|
5273
5273
|
const storyMetricDecisions = await getDecisionsByCategory(adapter, STORY_METRICS);
|
|
5274
5274
|
const storyMetrics = storyMetricDecisions.map((d) => {
|
|
@@ -5311,7 +5311,7 @@ async function runMetricsAction(options) {
|
|
|
5311
5311
|
try {
|
|
5312
5312
|
factoryRuns = await getFactoryRunSummaries(adapter, limit);
|
|
5313
5313
|
} catch (err) {
|
|
5314
|
-
logger$
|
|
5314
|
+
logger$9.debug({ err }, "getFactoryRunSummaries failed — table may not exist in older databases");
|
|
5315
5315
|
}
|
|
5316
5316
|
if (outputFormat === "json") {
|
|
5317
5317
|
const runsWithBreakdown = runs.map((run$1) => ({
|
|
@@ -5420,7 +5420,7 @@ async function runMetricsAction(options) {
|
|
|
5420
5420
|
const msg = err instanceof Error ? err.message : String(err);
|
|
5421
5421
|
if (outputFormat === "json") process.stdout.write(formatOutput(null, "json", false, msg) + "\n");
|
|
5422
5422
|
else process.stderr.write(`Error: ${msg}\n`);
|
|
5423
|
-
logger$
|
|
5423
|
+
logger$9.error({ err }, "metrics action failed");
|
|
5424
5424
|
return 1;
|
|
5425
5425
|
} finally {
|
|
5426
5426
|
try {
|
|
@@ -5620,7 +5620,7 @@ function registerMigrateCommand(program) {
|
|
|
5620
5620
|
function getLatestSessionId(_adapter) {
|
|
5621
5621
|
return null;
|
|
5622
5622
|
}
|
|
5623
|
-
const logger$
|
|
5623
|
+
const logger$8 = createLogger("cost-cmd");
|
|
5624
5624
|
const COST_EXIT_SUCCESS = 0;
|
|
5625
5625
|
const COST_EXIT_ERROR = 1;
|
|
5626
5626
|
/**
|
|
@@ -5864,7 +5864,7 @@ async function runCostAction(options) {
|
|
|
5864
5864
|
} catch (err) {
|
|
5865
5865
|
const message = err instanceof Error ? err.message : String(err);
|
|
5866
5866
|
process.stderr.write(`Error: ${message}\n`);
|
|
5867
|
-
logger$
|
|
5867
|
+
logger$8.error({ err }, "runCostAction failed");
|
|
5868
5868
|
return COST_EXIT_ERROR;
|
|
5869
5869
|
} finally {
|
|
5870
5870
|
if (adapter !== null) try {
|
|
@@ -5898,7 +5898,7 @@ function registerCostCommand(program, version = "0.0.0", projectRoot = process.c
|
|
|
5898
5898
|
|
|
5899
5899
|
//#endregion
|
|
5900
5900
|
//#region src/cli/commands/monitor.ts
|
|
5901
|
-
const logger$
|
|
5901
|
+
const logger$7 = createLogger("monitor-cmd");
|
|
5902
5902
|
const MONITOR_EXIT_SUCCESS = 0;
|
|
5903
5903
|
const MONITOR_EXIT_ERROR = 1;
|
|
5904
5904
|
/**
|
|
@@ -6101,7 +6101,7 @@ async function runMonitorReportAction(options) {
|
|
|
6101
6101
|
} catch (err) {
|
|
6102
6102
|
const message = err instanceof Error ? err.message : String(err);
|
|
6103
6103
|
process.stderr.write(`Error: ${message}\n`);
|
|
6104
|
-
logger$
|
|
6104
|
+
logger$7.error({ err }, "runMonitorReportAction failed");
|
|
6105
6105
|
return MONITOR_EXIT_ERROR;
|
|
6106
6106
|
} finally {
|
|
6107
6107
|
if (monitorDb !== null) try {
|
|
@@ -6163,7 +6163,7 @@ async function runMonitorStatusAction(options) {
|
|
|
6163
6163
|
} catch (err) {
|
|
6164
6164
|
const message = err instanceof Error ? err.message : String(err);
|
|
6165
6165
|
process.stderr.write(`Error: ${message}\n`);
|
|
6166
|
-
logger$
|
|
6166
|
+
logger$7.error({ err }, "runMonitorStatusAction failed");
|
|
6167
6167
|
return MONITOR_EXIT_ERROR;
|
|
6168
6168
|
} finally {
|
|
6169
6169
|
if (monitorDb !== null) try {
|
|
@@ -6198,7 +6198,7 @@ async function runMonitorResetAction(options) {
|
|
|
6198
6198
|
} catch (err) {
|
|
6199
6199
|
const message = err instanceof Error ? err.message : String(err);
|
|
6200
6200
|
process.stderr.write(`Error: ${message}\n`);
|
|
6201
|
-
logger$
|
|
6201
|
+
logger$7.error({ err }, "runMonitorResetAction failed");
|
|
6202
6202
|
return MONITOR_EXIT_ERROR;
|
|
6203
6203
|
} finally {
|
|
6204
6204
|
if (monitorDb !== null) try {
|
|
@@ -6246,7 +6246,7 @@ async function runMonitorRecommendationsAction(options) {
|
|
|
6246
6246
|
} catch (err) {
|
|
6247
6247
|
const message = err instanceof Error ? err.message : String(err);
|
|
6248
6248
|
process.stderr.write(`Error: ${message}\n`);
|
|
6249
|
-
logger$
|
|
6249
|
+
logger$7.error({ err }, "runMonitorRecommendationsAction failed");
|
|
6250
6250
|
return MONITOR_EXIT_ERROR;
|
|
6251
6251
|
} finally {
|
|
6252
6252
|
if (monitorDb !== null) try {
|
|
@@ -6324,7 +6324,7 @@ function registerMonitorCommand(program, version = "0.0.0", projectRoot = proces
|
|
|
6324
6324
|
|
|
6325
6325
|
//#endregion
|
|
6326
6326
|
//#region src/cli/commands/merge.ts
|
|
6327
|
-
const logger$
|
|
6327
|
+
const logger$6 = createLogger("merge-cmd");
|
|
6328
6328
|
const MERGE_EXIT_SUCCESS = 0;
|
|
6329
6329
|
const MERGE_EXIT_CONFLICT = 1;
|
|
6330
6330
|
const MERGE_EXIT_ERROR = 2;
|
|
@@ -6362,7 +6362,7 @@ async function mergeTask(taskId, targetBranch, projectRoot) {
|
|
|
6362
6362
|
projectRoot
|
|
6363
6363
|
});
|
|
6364
6364
|
try {
|
|
6365
|
-
logger$
|
|
6365
|
+
logger$6.info({
|
|
6366
6366
|
taskId,
|
|
6367
6367
|
targetBranch
|
|
6368
6368
|
}, "Running conflict detection...");
|
|
@@ -6384,7 +6384,7 @@ async function mergeTask(taskId, targetBranch, projectRoot) {
|
|
|
6384
6384
|
} catch (err) {
|
|
6385
6385
|
const message = err instanceof Error ? err.message : String(err);
|
|
6386
6386
|
console.error(`Error merging task "${taskId}": ${message}`);
|
|
6387
|
-
logger$
|
|
6387
|
+
logger$6.error({
|
|
6388
6388
|
taskId,
|
|
6389
6389
|
err
|
|
6390
6390
|
}, "merge --task failed");
|
|
@@ -6438,7 +6438,7 @@ async function mergeAll(targetBranch, projectRoot, taskIds) {
|
|
|
6438
6438
|
error: message
|
|
6439
6439
|
});
|
|
6440
6440
|
console.log(` Error for task "${taskId}": ${message}`);
|
|
6441
|
-
logger$
|
|
6441
|
+
logger$6.error({
|
|
6442
6442
|
taskId,
|
|
6443
6443
|
err
|
|
6444
6444
|
}, "merge --all: task failed");
|
|
@@ -6491,7 +6491,7 @@ function registerMergeCommand(program, projectRoot = process.cwd()) {
|
|
|
6491
6491
|
|
|
6492
6492
|
//#endregion
|
|
6493
6493
|
//#region src/cli/commands/worktrees.ts
|
|
6494
|
-
const logger$
|
|
6494
|
+
const logger$5 = createLogger("worktrees-cmd");
|
|
6495
6495
|
const WORKTREES_EXIT_SUCCESS = 0;
|
|
6496
6496
|
const WORKTREES_EXIT_ERROR = 1;
|
|
6497
6497
|
/** Valid task statuses for filtering */
|
|
@@ -6618,7 +6618,7 @@ async function listWorktreesAction(options) {
|
|
|
6618
6618
|
try {
|
|
6619
6619
|
worktreeInfos = await manager.listWorktrees();
|
|
6620
6620
|
} catch (err) {
|
|
6621
|
-
logger$
|
|
6621
|
+
logger$5.error({ err }, "Failed to list worktrees");
|
|
6622
6622
|
const message = err instanceof Error ? err.message : String(err);
|
|
6623
6623
|
process.stderr.write(`Error listing worktrees: ${message}\n`);
|
|
6624
6624
|
return WORKTREES_EXIT_ERROR;
|
|
@@ -6645,7 +6645,7 @@ async function listWorktreesAction(options) {
|
|
|
6645
6645
|
} catch (err) {
|
|
6646
6646
|
const message = err instanceof Error ? err.message : String(err);
|
|
6647
6647
|
process.stderr.write(`Error: ${message}\n`);
|
|
6648
|
-
logger$
|
|
6648
|
+
logger$5.error({ err }, "listWorktreesAction failed");
|
|
6649
6649
|
return WORKTREES_EXIT_ERROR;
|
|
6650
6650
|
}
|
|
6651
6651
|
}
|
|
@@ -6686,7 +6686,7 @@ function registerWorktreesCommand(program, version = "0.0.0", projectRoot = proc
|
|
|
6686
6686
|
|
|
6687
6687
|
//#endregion
|
|
6688
6688
|
//#region src/cli/commands/brainstorm.ts
|
|
6689
|
-
const logger$
|
|
6689
|
+
const logger$4 = createLogger("brainstorm-cmd");
|
|
6690
6690
|
/**
|
|
6691
6691
|
* Detect whether the project has existing planning artifacts that indicate
|
|
6692
6692
|
* this is an amendment session (vs. a brand-new project brainstorm).
|
|
@@ -6732,13 +6732,13 @@ async function loadAmendmentContextDocuments(projectRoot) {
|
|
|
6732
6732
|
try {
|
|
6733
6733
|
brief = await readFile(briefPath, "utf-8");
|
|
6734
6734
|
} catch {
|
|
6735
|
-
logger$
|
|
6735
|
+
logger$4.warn({ briefPath }, "product-brief.md not found — continuing without brief context");
|
|
6736
6736
|
process.stderr.write(`Warning: product-brief.md not found at ${briefPath}\n`);
|
|
6737
6737
|
}
|
|
6738
6738
|
try {
|
|
6739
6739
|
prd = await readFile(prdPath, "utf-8");
|
|
6740
6740
|
} catch {
|
|
6741
|
-
logger$
|
|
6741
|
+
logger$4.warn({ prdPath }, "requirements.md not found — continuing without PRD context");
|
|
6742
6742
|
process.stderr.write(`Warning: requirements.md not found at ${prdPath}\n`);
|
|
6743
6743
|
}
|
|
6744
6744
|
return {
|
|
@@ -6947,7 +6947,7 @@ async function dispatchToPersonas(userPrompt, context, llmDispatch) {
|
|
|
6947
6947
|
}
|
|
6948
6948
|
];
|
|
6949
6949
|
const defaultDispatch = async (prompt, personaName) => {
|
|
6950
|
-
logger$
|
|
6950
|
+
logger$4.debug({
|
|
6951
6951
|
personaName,
|
|
6952
6952
|
promptLength: prompt.length
|
|
6953
6953
|
}, "Dispatching to persona (stub mode)");
|
|
@@ -6964,7 +6964,7 @@ async function dispatchToPersonas(userPrompt, context, llmDispatch) {
|
|
|
6964
6964
|
};
|
|
6965
6965
|
} catch (err) {
|
|
6966
6966
|
const msg = err instanceof Error ? err.message : String(err);
|
|
6967
|
-
logger$
|
|
6967
|
+
logger$4.error({
|
|
6968
6968
|
err,
|
|
6969
6969
|
personaName: persona.name
|
|
6970
6970
|
}, "Persona dispatch failed");
|
|
@@ -7116,7 +7116,7 @@ async function runBrainstormSession(options, llmDispatch, rlInterface) {
|
|
|
7116
7116
|
}
|
|
7117
7117
|
});
|
|
7118
7118
|
rl.on("error", (err) => {
|
|
7119
|
-
logger$
|
|
7119
|
+
logger$4.error({ err }, "readline error");
|
|
7120
7120
|
if (!sessionEnded) endSession(false);
|
|
7121
7121
|
});
|
|
7122
7122
|
});
|
|
@@ -7155,709 +7155,6 @@ function registerBrainstormCommand(program, _version = "0.0.0", projectRoot = pr
|
|
|
7155
7155
|
});
|
|
7156
7156
|
}
|
|
7157
7157
|
|
|
7158
|
-
//#endregion
|
|
7159
|
-
//#region src/modules/export/renderers.ts
|
|
7160
|
-
/** Fields from analysis/product-brief decisions to render, in display order */
|
|
7161
|
-
const PRODUCT_BRIEF_FIELDS = [
|
|
7162
|
-
"problem_statement",
|
|
7163
|
-
"target_users",
|
|
7164
|
-
"core_features",
|
|
7165
|
-
"success_metrics",
|
|
7166
|
-
"constraints",
|
|
7167
|
-
"technology_constraints"
|
|
7168
|
-
];
|
|
7169
|
-
/**
|
|
7170
|
-
* Known acronyms that should appear fully uppercased when they are a standalone
|
|
7171
|
-
* word in a label (e.g. 'fr_coverage' → 'FR Coverage', 'api_style' → 'API Style').
|
|
7172
|
-
*/
|
|
7173
|
-
const UPPERCASE_ACRONYMS = new Set([
|
|
7174
|
-
"fr",
|
|
7175
|
-
"nfr",
|
|
7176
|
-
"ux",
|
|
7177
|
-
"api",
|
|
7178
|
-
"db",
|
|
7179
|
-
"id",
|
|
7180
|
-
"url"
|
|
7181
|
-
]);
|
|
7182
|
-
/**
|
|
7183
|
-
* Convert a snake_case key to Title Case for display headings.
|
|
7184
|
-
* Known acronyms (fr, nfr, ux, api, db, id, url) are rendered fully uppercased.
|
|
7185
|
-
*/
|
|
7186
|
-
function fieldLabel(key) {
|
|
7187
|
-
return key.replace(/_/g, " ").replace(/\b\w+/g, (word) => {
|
|
7188
|
-
const lower = word.toLowerCase();
|
|
7189
|
-
if (UPPERCASE_ACRONYMS.has(lower)) return lower.toUpperCase();
|
|
7190
|
-
return word.charAt(0).toUpperCase() + word.slice(1).toLowerCase();
|
|
7191
|
-
});
|
|
7192
|
-
}
|
|
7193
|
-
/**
|
|
7194
|
-
* Safely parse a JSON string; returns the original string if parsing fails.
|
|
7195
|
-
*/
|
|
7196
|
-
function safeParseJson(value) {
|
|
7197
|
-
try {
|
|
7198
|
-
return JSON.parse(value);
|
|
7199
|
-
} catch {
|
|
7200
|
-
return value;
|
|
7201
|
-
}
|
|
7202
|
-
}
|
|
7203
|
-
/**
|
|
7204
|
-
* Render a decision value to a markdown-friendly string.
|
|
7205
|
-
* - Arrays → bulleted list items
|
|
7206
|
-
* - Objects → key: value lines
|
|
7207
|
-
* - Primitives → plain string
|
|
7208
|
-
*/
|
|
7209
|
-
function renderValue(rawValue) {
|
|
7210
|
-
const parsed = safeParseJson(rawValue);
|
|
7211
|
-
if (Array.isArray(parsed)) return parsed.map((item) => `- ${String(item)}`).join("\n");
|
|
7212
|
-
if (typeof parsed === "object" && parsed !== null) return Object.entries(parsed).map(([k, v]) => `- **${fieldLabel(k)}**: ${String(v)}`).join("\n");
|
|
7213
|
-
return String(parsed);
|
|
7214
|
-
}
|
|
7215
|
-
/**
|
|
7216
|
-
* Render analysis-phase decisions as a `product-brief.md` file.
|
|
7217
|
-
*
|
|
7218
|
-
* Merges `product-brief` category decisions with `technology-constraints`
|
|
7219
|
-
* category decisions (they are stored separately in the decision store).
|
|
7220
|
-
*
|
|
7221
|
-
* @param decisions - All decisions from the analysis phase (any category)
|
|
7222
|
-
* @returns Formatted markdown content for product-brief.md
|
|
7223
|
-
*/
|
|
7224
|
-
function renderProductBrief(decisions) {
|
|
7225
|
-
const briefDecisions = decisions.filter((d) => d.category === "product-brief");
|
|
7226
|
-
const techConstraintDecisions = decisions.filter((d) => d.category === "technology-constraints");
|
|
7227
|
-
const briefMap = Object.fromEntries(briefDecisions.map((d) => [d.key, d.value]));
|
|
7228
|
-
if (techConstraintDecisions.length > 0 && briefMap["technology_constraints"] === void 0) {
|
|
7229
|
-
const tcBullets = techConstraintDecisions.flatMap((d) => {
|
|
7230
|
-
const parsed = safeParseJson(d.value);
|
|
7231
|
-
if (Array.isArray(parsed)) return parsed.map((item) => String(item));
|
|
7232
|
-
return [String(parsed)];
|
|
7233
|
-
});
|
|
7234
|
-
briefMap["technology_constraints"] = JSON.stringify(tcBullets);
|
|
7235
|
-
}
|
|
7236
|
-
if (briefDecisions.length === 0 && techConstraintDecisions.length === 0) return "";
|
|
7237
|
-
const parts = ["# Product Brief", ""];
|
|
7238
|
-
for (const field of PRODUCT_BRIEF_FIELDS) {
|
|
7239
|
-
const rawValue = briefMap[field];
|
|
7240
|
-
if (rawValue === void 0) continue;
|
|
7241
|
-
parts.push(`## ${fieldLabel(field)}`);
|
|
7242
|
-
parts.push("");
|
|
7243
|
-
parts.push(renderValue(rawValue));
|
|
7244
|
-
parts.push("");
|
|
7245
|
-
}
|
|
7246
|
-
return parts.join("\n");
|
|
7247
|
-
}
|
|
7248
|
-
/**
|
|
7249
|
-
* Render planning-phase decisions (and requirements table) as a `prd.md` file.
|
|
7250
|
-
*
|
|
7251
|
-
* Sections rendered (when data is present):
|
|
7252
|
-
* - Project Classification (classification decisions)
|
|
7253
|
-
* - Functional Requirements (functional-requirements decisions)
|
|
7254
|
-
* - Non-Functional Requirements (non-functional-requirements decisions)
|
|
7255
|
-
* - Domain Model (domain-model decisions)
|
|
7256
|
-
* - User Stories (user-stories decisions)
|
|
7257
|
-
* - Tech Stack (tech-stack decisions)
|
|
7258
|
-
* - Out of Scope (out-of-scope decisions)
|
|
7259
|
-
*
|
|
7260
|
-
* @param decisions - All decisions from the planning phase
|
|
7261
|
-
* @param requirements - Requirements records from the requirements table (optional)
|
|
7262
|
-
* @returns Formatted markdown content for prd.md
|
|
7263
|
-
*/
|
|
7264
|
-
function renderPrd(decisions, requirements = []) {
|
|
7265
|
-
if (decisions.length === 0) return "";
|
|
7266
|
-
const parts = ["# Product Requirements Document", ""];
|
|
7267
|
-
const classificationDecisions = decisions.filter((d) => d.category === "classification");
|
|
7268
|
-
if (classificationDecisions.length > 0) {
|
|
7269
|
-
parts.push("## Project Classification");
|
|
7270
|
-
parts.push("");
|
|
7271
|
-
for (const d of classificationDecisions) {
|
|
7272
|
-
const parsed = safeParseJson(d.value);
|
|
7273
|
-
if (Array.isArray(parsed)) {
|
|
7274
|
-
parts.push(`**${fieldLabel(d.key)}**:`);
|
|
7275
|
-
for (const item of parsed) parts.push(`- ${String(item)}`);
|
|
7276
|
-
} else parts.push(`**${fieldLabel(d.key)}**: ${String(parsed)}`);
|
|
7277
|
-
}
|
|
7278
|
-
parts.push("");
|
|
7279
|
-
}
|
|
7280
|
-
const frDecisions = decisions.filter((d) => d.category === "functional-requirements");
|
|
7281
|
-
if (frDecisions.length > 0) {
|
|
7282
|
-
parts.push("## Functional Requirements");
|
|
7283
|
-
parts.push("");
|
|
7284
|
-
for (const d of frDecisions) {
|
|
7285
|
-
const parsed = safeParseJson(d.value);
|
|
7286
|
-
if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
|
|
7287
|
-
const fr = parsed;
|
|
7288
|
-
const id = fr.id ?? d.key;
|
|
7289
|
-
const priority = fr.priority ? ` [${fr.priority.toUpperCase()}]` : "";
|
|
7290
|
-
parts.push(`- **${id}**${priority}: ${fr.description ?? d.value}`);
|
|
7291
|
-
if (fr.acceptance_criteria && fr.acceptance_criteria.length > 0) for (const ac of fr.acceptance_criteria) parts.push(` - ${ac}`);
|
|
7292
|
-
} else parts.push(`- **${d.key}**: ${renderValue(d.value)}`);
|
|
7293
|
-
}
|
|
7294
|
-
parts.push("");
|
|
7295
|
-
}
|
|
7296
|
-
const nfrDecisions = decisions.filter((d) => d.category === "non-functional-requirements");
|
|
7297
|
-
if (nfrDecisions.length > 0) {
|
|
7298
|
-
parts.push("## Non-Functional Requirements");
|
|
7299
|
-
parts.push("");
|
|
7300
|
-
for (const d of nfrDecisions) {
|
|
7301
|
-
const parsed = safeParseJson(d.value);
|
|
7302
|
-
if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
|
|
7303
|
-
const nfr = parsed;
|
|
7304
|
-
const id = nfr.id ?? d.key;
|
|
7305
|
-
const cat = nfr.category ? ` [${nfr.category.toUpperCase()}]` : "";
|
|
7306
|
-
parts.push(`- **${id}**${cat}: ${nfr.description ?? d.value}`);
|
|
7307
|
-
} else parts.push(`- **${d.key}**: ${renderValue(d.value)}`);
|
|
7308
|
-
}
|
|
7309
|
-
parts.push("");
|
|
7310
|
-
}
|
|
7311
|
-
const domainDecisions = decisions.filter((d) => d.category === "domain-model");
|
|
7312
|
-
if (domainDecisions.length > 0) {
|
|
7313
|
-
parts.push("## Domain Model");
|
|
7314
|
-
parts.push("");
|
|
7315
|
-
for (const d of domainDecisions) parts.push(renderValue(d.value));
|
|
7316
|
-
parts.push("");
|
|
7317
|
-
}
|
|
7318
|
-
const userStoryDecisions = decisions.filter((d) => d.category === "user-stories");
|
|
7319
|
-
if (userStoryDecisions.length > 0) {
|
|
7320
|
-
parts.push("## User Stories");
|
|
7321
|
-
parts.push("");
|
|
7322
|
-
for (const d of userStoryDecisions) {
|
|
7323
|
-
const parsed = safeParseJson(d.value);
|
|
7324
|
-
if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
|
|
7325
|
-
const us = parsed;
|
|
7326
|
-
if (us.title) {
|
|
7327
|
-
parts.push(`### ${us.title}`);
|
|
7328
|
-
parts.push("");
|
|
7329
|
-
if (us.description) {
|
|
7330
|
-
parts.push(us.description);
|
|
7331
|
-
parts.push("");
|
|
7332
|
-
}
|
|
7333
|
-
} else {
|
|
7334
|
-
parts.push(renderValue(d.value));
|
|
7335
|
-
parts.push("");
|
|
7336
|
-
}
|
|
7337
|
-
} else {
|
|
7338
|
-
parts.push(renderValue(d.value));
|
|
7339
|
-
parts.push("");
|
|
7340
|
-
}
|
|
7341
|
-
}
|
|
7342
|
-
}
|
|
7343
|
-
const techStackDecisions = decisions.filter((d) => d.category === "tech-stack");
|
|
7344
|
-
if (techStackDecisions.length > 0) {
|
|
7345
|
-
parts.push("## Tech Stack");
|
|
7346
|
-
parts.push("");
|
|
7347
|
-
for (const d of techStackDecisions) if (d.key === "tech_stack") {
|
|
7348
|
-
const parsed = safeParseJson(d.value);
|
|
7349
|
-
if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) for (const [k, v] of Object.entries(parsed)) parts.push(`- **${fieldLabel(k)}**: ${String(v)}`);
|
|
7350
|
-
else parts.push(`- **${fieldLabel(d.key)}**: ${d.value}`);
|
|
7351
|
-
} else parts.push(`- **${fieldLabel(d.key)}**: ${d.value}`);
|
|
7352
|
-
parts.push("");
|
|
7353
|
-
}
|
|
7354
|
-
const outOfScopeDecisions = decisions.filter((d) => d.category === "out-of-scope");
|
|
7355
|
-
if (outOfScopeDecisions.length > 0) {
|
|
7356
|
-
parts.push("## Out of Scope");
|
|
7357
|
-
parts.push("");
|
|
7358
|
-
for (const d of outOfScopeDecisions) parts.push(renderValue(d.value));
|
|
7359
|
-
parts.push("");
|
|
7360
|
-
}
|
|
7361
|
-
const functionalReqs = requirements.filter((r) => r.type === "functional");
|
|
7362
|
-
const nonFunctionalReqs = requirements.filter((r) => r.type === "non_functional");
|
|
7363
|
-
if ((functionalReqs.length > 0 || nonFunctionalReqs.length > 0) && frDecisions.length === 0 && nfrDecisions.length === 0) {
|
|
7364
|
-
parts.push("## Requirements (from Requirements Table)");
|
|
7365
|
-
parts.push("");
|
|
7366
|
-
if (functionalReqs.length > 0) {
|
|
7367
|
-
parts.push("### Functional Requirements");
|
|
7368
|
-
parts.push("");
|
|
7369
|
-
for (const r of functionalReqs) {
|
|
7370
|
-
const priority = r.priority ? ` [${r.priority.toUpperCase()}]` : "";
|
|
7371
|
-
parts.push(`- ${r.source ?? ""}${priority}: ${r.description}`);
|
|
7372
|
-
}
|
|
7373
|
-
parts.push("");
|
|
7374
|
-
}
|
|
7375
|
-
if (nonFunctionalReqs.length > 0) {
|
|
7376
|
-
parts.push("### Non-Functional Requirements");
|
|
7377
|
-
parts.push("");
|
|
7378
|
-
for (const r of nonFunctionalReqs) {
|
|
7379
|
-
const priority = r.priority ? ` [${r.priority.toUpperCase()}]` : "";
|
|
7380
|
-
parts.push(`- ${priority}: ${r.description}`);
|
|
7381
|
-
}
|
|
7382
|
-
parts.push("");
|
|
7383
|
-
}
|
|
7384
|
-
}
|
|
7385
|
-
return parts.join("\n");
|
|
7386
|
-
}
|
|
7387
|
-
/**
|
|
7388
|
-
* Render solutioning-phase architecture decisions as an `architecture.md` file.
|
|
7389
|
-
*
|
|
7390
|
-
* Groups all architecture decisions into a single `## Architecture Decisions`
|
|
7391
|
-
* section, formatting each as `**key**: value` with italicised rationale where
|
|
7392
|
-
* present. The heading pattern matches the regex used by `seedMethodologyContext()`
|
|
7393
|
-
* so that the exported file can be round-tripped back into the decision store.
|
|
7394
|
-
*
|
|
7395
|
-
* @param decisions - All decisions from the solutioning phase (any category)
|
|
7396
|
-
* @returns Formatted markdown content for architecture.md, or '' if no data
|
|
7397
|
-
*/
|
|
7398
|
-
function renderArchitecture(decisions) {
|
|
7399
|
-
const archDecisions = decisions.filter((d) => d.category === "architecture");
|
|
7400
|
-
if (archDecisions.length === 0) return "";
|
|
7401
|
-
const parts = ["# Architecture", ""];
|
|
7402
|
-
parts.push("## Architecture Decisions");
|
|
7403
|
-
parts.push("");
|
|
7404
|
-
for (const d of archDecisions) {
|
|
7405
|
-
const value = safeParseJson(d.value);
|
|
7406
|
-
let displayValue;
|
|
7407
|
-
if (typeof value === "object" && value !== null && !Array.isArray(value)) {
|
|
7408
|
-
displayValue = Object.entries(value).map(([k, v]) => ` - *${fieldLabel(k)}*: ${String(v)}`).join("\n");
|
|
7409
|
-
parts.push(`**${d.key}**:`);
|
|
7410
|
-
parts.push(displayValue);
|
|
7411
|
-
} else if (Array.isArray(value)) {
|
|
7412
|
-
displayValue = value.map((item) => ` - ${String(item)}`).join("\n");
|
|
7413
|
-
parts.push(`**${d.key}**:`);
|
|
7414
|
-
parts.push(displayValue);
|
|
7415
|
-
} else {
|
|
7416
|
-
displayValue = String(value);
|
|
7417
|
-
if (d.rationale) parts.push(`**${d.key}**: ${displayValue} *(${d.rationale})*`);
|
|
7418
|
-
else parts.push(`**${d.key}**: ${displayValue}`);
|
|
7419
|
-
}
|
|
7420
|
-
}
|
|
7421
|
-
parts.push("");
|
|
7422
|
-
return parts.join("\n");
|
|
7423
|
-
}
|
|
7424
|
-
/**
|
|
7425
|
-
* Render solutioning-phase epics and stories decisions as an `epics.md` file.
|
|
7426
|
-
*
|
|
7427
|
-
* Output format:
|
|
7428
|
-
* ```
|
|
7429
|
-
* ## Epic 1: Title
|
|
7430
|
-
* Description
|
|
7431
|
-
*
|
|
7432
|
-
* ### Story 1-1: Title
|
|
7433
|
-
* **Priority**: must
|
|
7434
|
-
* **Description**: ...
|
|
7435
|
-
* **Acceptance Criteria**:
|
|
7436
|
-
* - AC1
|
|
7437
|
-
* - AC2
|
|
7438
|
-
* ```
|
|
7439
|
-
*
|
|
7440
|
-
* The `## Epic N:` heading pattern is parsed by `parseEpicShards()` in
|
|
7441
|
-
* `seed-methodology-context.ts`, satisfying the round-trip contract (AC5).
|
|
7442
|
-
*
|
|
7443
|
-
* Stories are associated with their parent epic by the numeric prefix of the
|
|
7444
|
-
* story key (e.g., story key `2-3` → epic 2).
|
|
7445
|
-
*
|
|
7446
|
-
* @param decisions - All decisions from the solutioning phase (any category)
|
|
7447
|
-
* @returns Formatted markdown content for epics.md, or '' if no data
|
|
7448
|
-
*/
|
|
7449
|
-
function renderEpics(decisions) {
|
|
7450
|
-
const epicDecisions = decisions.filter((d) => d.category === "epics");
|
|
7451
|
-
const storyDecisions = decisions.filter((d) => d.category === "stories");
|
|
7452
|
-
if (epicDecisions.length === 0 && storyDecisions.length === 0) return "";
|
|
7453
|
-
const epicMap = new Map();
|
|
7454
|
-
for (const d of epicDecisions) {
|
|
7455
|
-
const match = /^epic-(\d+)$/i.exec(d.key);
|
|
7456
|
-
if (match === null) continue;
|
|
7457
|
-
const epicNum = parseInt(match[1], 10);
|
|
7458
|
-
const parsed = safeParseJson(d.value);
|
|
7459
|
-
if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
|
|
7460
|
-
const p = parsed;
|
|
7461
|
-
epicMap.set(epicNum, {
|
|
7462
|
-
num: epicNum,
|
|
7463
|
-
title: p.title ?? `Epic ${epicNum}`,
|
|
7464
|
-
description: p.description ?? ""
|
|
7465
|
-
});
|
|
7466
|
-
} else epicMap.set(epicNum, {
|
|
7467
|
-
num: epicNum,
|
|
7468
|
-
title: String(parsed),
|
|
7469
|
-
description: ""
|
|
7470
|
-
});
|
|
7471
|
-
}
|
|
7472
|
-
const storyMap = new Map();
|
|
7473
|
-
for (const d of storyDecisions) {
|
|
7474
|
-
const parsed = safeParseJson(d.value);
|
|
7475
|
-
let story;
|
|
7476
|
-
if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
|
|
7477
|
-
const p = parsed;
|
|
7478
|
-
const storyKey = p.key ?? d.key;
|
|
7479
|
-
const keyMatch = /^(\d+)-(\d+)/.exec(storyKey);
|
|
7480
|
-
if (keyMatch === null) continue;
|
|
7481
|
-
const epicNum = parseInt(keyMatch[1], 10);
|
|
7482
|
-
const storyNum = parseInt(keyMatch[2], 10);
|
|
7483
|
-
story = {
|
|
7484
|
-
key: storyKey,
|
|
7485
|
-
epicNum,
|
|
7486
|
-
storyNum,
|
|
7487
|
-
title: p.title ?? `Story ${storyKey}`,
|
|
7488
|
-
description: p.description ?? "",
|
|
7489
|
-
ac: p.acceptance_criteria ?? p.ac ?? [],
|
|
7490
|
-
priority: p.priority ?? "must"
|
|
7491
|
-
};
|
|
7492
|
-
} else {
|
|
7493
|
-
const storyKey = d.key;
|
|
7494
|
-
const keyMatch = /^(\d+)-(\d+)/.exec(storyKey);
|
|
7495
|
-
if (keyMatch === null) continue;
|
|
7496
|
-
const epicNum = parseInt(keyMatch[1], 10);
|
|
7497
|
-
const storyNum = parseInt(keyMatch[2], 10);
|
|
7498
|
-
story = {
|
|
7499
|
-
key: storyKey,
|
|
7500
|
-
epicNum,
|
|
7501
|
-
storyNum,
|
|
7502
|
-
title: `Story ${storyKey}`,
|
|
7503
|
-
description: String(parsed),
|
|
7504
|
-
ac: [],
|
|
7505
|
-
priority: "must"
|
|
7506
|
-
};
|
|
7507
|
-
}
|
|
7508
|
-
if (!storyMap.has(story.epicNum)) storyMap.set(story.epicNum, []);
|
|
7509
|
-
storyMap.get(story.epicNum).push(story);
|
|
7510
|
-
}
|
|
7511
|
-
for (const stories of storyMap.values()) stories.sort((a, b) => a.storyNum - b.storyNum);
|
|
7512
|
-
const allEpicNums = new Set([...epicMap.keys(), ...storyMap.keys()]);
|
|
7513
|
-
const sortedEpicNums = [...allEpicNums].sort((a, b) => a - b);
|
|
7514
|
-
const parts = ["# Epics and Stories", ""];
|
|
7515
|
-
for (const epicNum of sortedEpicNums) {
|
|
7516
|
-
const epic = epicMap.get(epicNum);
|
|
7517
|
-
const epicTitle = epic?.title ?? `Epic ${epicNum}`;
|
|
7518
|
-
const epicDescription = epic?.description ?? "";
|
|
7519
|
-
parts.push(`## Epic ${epicNum}: ${epicTitle}`);
|
|
7520
|
-
parts.push("");
|
|
7521
|
-
if (epicDescription) {
|
|
7522
|
-
parts.push(epicDescription);
|
|
7523
|
-
parts.push("");
|
|
7524
|
-
}
|
|
7525
|
-
const stories = storyMap.get(epicNum) ?? [];
|
|
7526
|
-
for (const story of stories) {
|
|
7527
|
-
parts.push(`### Story ${story.key}: ${story.title}`);
|
|
7528
|
-
parts.push("");
|
|
7529
|
-
parts.push(`**Priority**: ${story.priority}`);
|
|
7530
|
-
if (story.description) parts.push(`**Description**: ${story.description}`);
|
|
7531
|
-
if (story.ac.length > 0) {
|
|
7532
|
-
parts.push("**Acceptance Criteria**:");
|
|
7533
|
-
for (const ac of story.ac) parts.push(`- ${ac}`);
|
|
7534
|
-
}
|
|
7535
|
-
parts.push("");
|
|
7536
|
-
}
|
|
7537
|
-
}
|
|
7538
|
-
return parts.join("\n");
|
|
7539
|
-
}
|
|
7540
|
-
/**
|
|
7541
|
-
* Render `operational-finding` category decisions as an "Operational Findings" section.
|
|
7542
|
-
*
|
|
7543
|
-
* Groups findings by run key (for run-summary decisions) and stall key (for stall decisions).
|
|
7544
|
-
* Returns '' if no matching decisions are found.
|
|
7545
|
-
*
|
|
7546
|
-
* @param decisions - Decisions of any category; filters for 'operational-finding'
|
|
7547
|
-
* @returns Formatted markdown content, or '' if empty
|
|
7548
|
-
*/
|
|
7549
|
-
function renderOperationalFindings(decisions) {
|
|
7550
|
-
const findings = decisions.filter((d) => d.category === "operational-finding");
|
|
7551
|
-
if (findings.length === 0) return "";
|
|
7552
|
-
const parts = ["## Operational Findings", ""];
|
|
7553
|
-
const runSummaries = findings.filter((d) => d.key.startsWith("run-summary:"));
|
|
7554
|
-
const stallFindings = findings.filter((d) => d.key.startsWith("stall:"));
|
|
7555
|
-
const otherFindings = findings.filter((d) => !d.key.startsWith("run-summary:") && !d.key.startsWith("stall:"));
|
|
7556
|
-
if (runSummaries.length > 0) {
|
|
7557
|
-
parts.push("### Run Summaries");
|
|
7558
|
-
parts.push("");
|
|
7559
|
-
for (const d of runSummaries) {
|
|
7560
|
-
const runId = d.key.replace("run-summary:", "");
|
|
7561
|
-
const parsed = safeParseJson(d.value);
|
|
7562
|
-
if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
|
|
7563
|
-
const s = parsed;
|
|
7564
|
-
parts.push(`**Run: ${runId}**`);
|
|
7565
|
-
parts.push(`- Succeeded: ${(s.succeeded ?? []).join(", ") || "none"}`);
|
|
7566
|
-
parts.push(`- Failed: ${(s.failed ?? []).join(", ") || "none"}`);
|
|
7567
|
-
parts.push(`- Escalated: ${(s.escalated ?? []).join(", ") || "none"}`);
|
|
7568
|
-
parts.push(`- Total restarts: ${s.total_restarts ?? 0}`);
|
|
7569
|
-
parts.push(`- Elapsed: ${s.elapsed_seconds ?? 0}s`);
|
|
7570
|
-
parts.push(`- Tokens: ${s.total_input_tokens ?? 0} in / ${s.total_output_tokens ?? 0} out`);
|
|
7571
|
-
} else parts.push(`**Run: ${runId}**: ${String(parsed)}`);
|
|
7572
|
-
parts.push("");
|
|
7573
|
-
}
|
|
7574
|
-
}
|
|
7575
|
-
if (stallFindings.length > 0) {
|
|
7576
|
-
parts.push("### Stall Events");
|
|
7577
|
-
parts.push("");
|
|
7578
|
-
for (const d of stallFindings) {
|
|
7579
|
-
const parsed = safeParseJson(d.value);
|
|
7580
|
-
if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
|
|
7581
|
-
const s = parsed;
|
|
7582
|
-
const outcome = s.outcome ?? "unknown";
|
|
7583
|
-
parts.push(`- **${d.key}**: phase=${s.phase ?? "?"} staleness=${s.staleness_secs ?? 0}s attempt=${s.attempt ?? 0} outcome=${outcome}`);
|
|
7584
|
-
} else parts.push(`- **${d.key}**: ${String(parsed)}`);
|
|
7585
|
-
}
|
|
7586
|
-
parts.push("");
|
|
7587
|
-
}
|
|
7588
|
-
if (otherFindings.length > 0) {
|
|
7589
|
-
for (const d of otherFindings) parts.push(`- **${d.key}**: ${renderValue(d.value)}`);
|
|
7590
|
-
parts.push("");
|
|
7591
|
-
}
|
|
7592
|
-
return parts.join("\n");
|
|
7593
|
-
}
|
|
7594
|
-
/**
|
|
7595
|
-
* Render `experiment-result` category decisions as an "Experiments" section.
|
|
7596
|
-
*
|
|
7597
|
-
* Lists each experiment with its verdict, metric delta, and branch name.
|
|
7598
|
-
* Returns '' if no matching decisions are found.
|
|
7599
|
-
*
|
|
7600
|
-
* @param decisions - Decisions of any category; filters for 'experiment-result'
|
|
7601
|
-
* @returns Formatted markdown content, or '' if empty
|
|
7602
|
-
*/
|
|
7603
|
-
function renderExperiments(decisions) {
|
|
7604
|
-
const experiments = decisions.filter((d) => d.category === "experiment-result");
|
|
7605
|
-
if (experiments.length === 0) return "";
|
|
7606
|
-
const parts = ["## Experiments", ""];
|
|
7607
|
-
const improved = experiments.filter((d) => {
|
|
7608
|
-
const p = safeParseJson(d.value);
|
|
7609
|
-
return typeof p === "object" && p !== null && p["verdict"] === "IMPROVED";
|
|
7610
|
-
});
|
|
7611
|
-
const mixed = experiments.filter((d) => {
|
|
7612
|
-
const p = safeParseJson(d.value);
|
|
7613
|
-
return typeof p === "object" && p !== null && p["verdict"] === "MIXED";
|
|
7614
|
-
});
|
|
7615
|
-
const regressed = experiments.filter((d) => {
|
|
7616
|
-
const p = safeParseJson(d.value);
|
|
7617
|
-
return typeof p === "object" && p !== null && p["verdict"] === "REGRESSED";
|
|
7618
|
-
});
|
|
7619
|
-
parts.push(`**Total**: ${experiments.length} | **Improved**: ${improved.length} | **Mixed**: ${mixed.length} | **Regressed**: ${regressed.length}`);
|
|
7620
|
-
parts.push("");
|
|
7621
|
-
for (const d of experiments) {
|
|
7622
|
-
const parsed = safeParseJson(d.value);
|
|
7623
|
-
if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
|
|
7624
|
-
const e = parsed;
|
|
7625
|
-
const verdict = e.verdict ?? "UNKNOWN";
|
|
7626
|
-
const metric = e.target_metric ?? "unknown";
|
|
7627
|
-
const branch = e.branch_name ? ` → \`${e.branch_name}\`` : "";
|
|
7628
|
-
parts.push(`- **[${verdict}]** ${metric}: before=${e.before ?? "?"} after=${e.after ?? "?"}${branch}`);
|
|
7629
|
-
} else parts.push(`- ${String(parsed)}`);
|
|
7630
|
-
}
|
|
7631
|
-
parts.push("");
|
|
7632
|
-
return parts.join("\n");
|
|
7633
|
-
}
|
|
7634
|
-
/**
|
|
7635
|
-
* Render solutioning-phase readiness-findings decisions as a `readiness-report.md`.
|
|
7636
|
-
*
|
|
7637
|
-
* Groups findings by category, shows severity per finding, and emits an
|
|
7638
|
-
* overall pass/fail verdict based on whether any blockers were found.
|
|
7639
|
-
*
|
|
7640
|
-
* @param decisions - All decisions from the solutioning phase (any category)
|
|
7641
|
-
* @returns Formatted markdown content for readiness-report.md, or '' if no data
|
|
7642
|
-
*/
|
|
7643
|
-
function renderReadinessReport(decisions) {
|
|
7644
|
-
const findingDecisions = decisions.filter((d) => d.category === "readiness-findings");
|
|
7645
|
-
if (findingDecisions.length === 0) return "";
|
|
7646
|
-
const findings = [];
|
|
7647
|
-
for (const d of findingDecisions) {
|
|
7648
|
-
const parsed = safeParseJson(d.value);
|
|
7649
|
-
if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
|
|
7650
|
-
const p = parsed;
|
|
7651
|
-
findings.push({
|
|
7652
|
-
category: p.category ?? "general",
|
|
7653
|
-
severity: p.severity ?? "minor",
|
|
7654
|
-
description: p.description ?? String(parsed),
|
|
7655
|
-
affected_items: p.affected_items ?? []
|
|
7656
|
-
});
|
|
7657
|
-
} else findings.push({
|
|
7658
|
-
category: "general",
|
|
7659
|
-
severity: "minor",
|
|
7660
|
-
description: String(parsed),
|
|
7661
|
-
affected_items: []
|
|
7662
|
-
});
|
|
7663
|
-
}
|
|
7664
|
-
const hasCritical = findings.some((f) => f.severity === "blocker" || f.severity === "major");
|
|
7665
|
-
const verdict = hasCritical ? "FAIL" : "PASS";
|
|
7666
|
-
const parts = ["# Readiness Report", ""];
|
|
7667
|
-
parts.push(`**Overall Verdict**: ${verdict}`);
|
|
7668
|
-
parts.push("");
|
|
7669
|
-
parts.push(`**Total Findings**: ${findings.length}`);
|
|
7670
|
-
parts.push(`**Blockers**: ${findings.filter((f) => f.severity === "blocker").length}`);
|
|
7671
|
-
parts.push(`**Major**: ${findings.filter((f) => f.severity === "major").length}`);
|
|
7672
|
-
parts.push(`**Minor**: ${findings.filter((f) => f.severity === "minor").length}`);
|
|
7673
|
-
parts.push("");
|
|
7674
|
-
const byCategory = new Map();
|
|
7675
|
-
for (const finding of findings) {
|
|
7676
|
-
if (!byCategory.has(finding.category)) byCategory.set(finding.category, []);
|
|
7677
|
-
byCategory.get(finding.category).push(finding);
|
|
7678
|
-
}
|
|
7679
|
-
const categoryOrder = [
|
|
7680
|
-
"fr_coverage",
|
|
7681
|
-
"architecture_compliance",
|
|
7682
|
-
"story_quality",
|
|
7683
|
-
"ux_alignment",
|
|
7684
|
-
"dependency_validity",
|
|
7685
|
-
"general"
|
|
7686
|
-
];
|
|
7687
|
-
const sortedCategories = [...byCategory.keys()].sort((a, b) => {
|
|
7688
|
-
const ai = categoryOrder.indexOf(a);
|
|
7689
|
-
const bi = categoryOrder.indexOf(b);
|
|
7690
|
-
return (ai === -1 ? 999 : ai) - (bi === -1 ? 999 : bi);
|
|
7691
|
-
});
|
|
7692
|
-
for (const category of sortedCategories) {
|
|
7693
|
-
const categoryFindings = byCategory.get(category);
|
|
7694
|
-
const categoryLabel = fieldLabel(category);
|
|
7695
|
-
parts.push(`## ${categoryLabel}`);
|
|
7696
|
-
parts.push("");
|
|
7697
|
-
for (const finding of categoryFindings) {
|
|
7698
|
-
const severityTag = `[${finding.severity.toUpperCase()}]`;
|
|
7699
|
-
parts.push(`- ${severityTag} ${finding.description}`);
|
|
7700
|
-
if (finding.affected_items.length > 0) parts.push(` - *Affected*: ${finding.affected_items.join(", ")}`);
|
|
7701
|
-
}
|
|
7702
|
-
parts.push("");
|
|
7703
|
-
}
|
|
7704
|
-
return parts.join("\n");
|
|
7705
|
-
}
|
|
7706
|
-
|
|
7707
|
-
//#endregion
|
|
7708
|
-
//#region src/cli/commands/export.ts
|
|
7709
|
-
const logger$4 = createLogger("export-cmd");
|
|
7710
|
-
/**
|
|
7711
|
-
* Execute the export action.
|
|
7712
|
-
* Returns an exit code (0 = success, 1 = error).
|
|
7713
|
-
*/
|
|
7714
|
-
async function runExportAction(options) {
|
|
7715
|
-
const { runId, outputDir, projectRoot, outputFormat } = options;
|
|
7716
|
-
let adapter;
|
|
7717
|
-
try {
|
|
7718
|
-
const dbRoot = await resolveMainRepoRoot(projectRoot);
|
|
7719
|
-
const dbPath = join$1(dbRoot, ".substrate", "substrate.db");
|
|
7720
|
-
const doltDir = join$1(dbRoot, ".substrate", "state", ".dolt");
|
|
7721
|
-
if (!existsSync(dbPath) && !existsSync(doltDir)) {
|
|
7722
|
-
const errorMsg = `Decision store not initialized. Run 'substrate init' first.`;
|
|
7723
|
-
if (outputFormat === "json") process.stdout.write(JSON.stringify({ error: errorMsg }) + "\n");
|
|
7724
|
-
else process.stderr.write(`Error: ${errorMsg}\n`);
|
|
7725
|
-
return 1;
|
|
7726
|
-
}
|
|
7727
|
-
adapter = createDatabaseAdapter({
|
|
7728
|
-
backend: "auto",
|
|
7729
|
-
basePath: dbRoot
|
|
7730
|
-
});
|
|
7731
|
-
await initSchema(adapter);
|
|
7732
|
-
let run;
|
|
7733
|
-
if (runId !== void 0 && runId !== "") run = await getPipelineRunById(adapter, runId);
|
|
7734
|
-
else run = await getLatestRun(adapter);
|
|
7735
|
-
if (run === void 0) {
|
|
7736
|
-
const errorMsg = runId !== void 0 ? `Pipeline run '${runId}' not found.` : "No pipeline runs found. Run `substrate run` first.";
|
|
7737
|
-
if (outputFormat === "json") process.stdout.write(JSON.stringify({ error: errorMsg }) + "\n");
|
|
7738
|
-
else process.stderr.write(`Error: ${errorMsg}\n`);
|
|
7739
|
-
return 1;
|
|
7740
|
-
}
|
|
7741
|
-
const activeRunId = run.id;
|
|
7742
|
-
const resolvedOutputDir = isAbsolute(outputDir) ? outputDir : join$1(projectRoot, outputDir);
|
|
7743
|
-
if (!existsSync(resolvedOutputDir)) mkdirSync(resolvedOutputDir, { recursive: true });
|
|
7744
|
-
const filesWritten = [];
|
|
7745
|
-
const phasesExported = [];
|
|
7746
|
-
const analysisDecisions = await getDecisionsByPhaseForRun(adapter, activeRunId, "analysis");
|
|
7747
|
-
if (analysisDecisions.length > 0) {
|
|
7748
|
-
const content = renderProductBrief(analysisDecisions);
|
|
7749
|
-
if (content !== "") {
|
|
7750
|
-
const filePath = join$1(resolvedOutputDir, "product-brief.md");
|
|
7751
|
-
writeFileSync(filePath, content, "utf-8");
|
|
7752
|
-
filesWritten.push(filePath);
|
|
7753
|
-
phasesExported.push("analysis");
|
|
7754
|
-
if (outputFormat === "human") process.stdout.write(` Written: ${filePath}\n`);
|
|
7755
|
-
}
|
|
7756
|
-
}
|
|
7757
|
-
const planningDecisions = await getDecisionsByPhaseForRun(adapter, activeRunId, "planning");
|
|
7758
|
-
if (planningDecisions.length > 0) {
|
|
7759
|
-
const requirements = (await listRequirements(adapter)).filter((r) => r.pipeline_run_id === activeRunId);
|
|
7760
|
-
const content = renderPrd(planningDecisions, requirements);
|
|
7761
|
-
if (content !== "") {
|
|
7762
|
-
const filePath = join$1(resolvedOutputDir, "prd.md");
|
|
7763
|
-
writeFileSync(filePath, content, "utf-8");
|
|
7764
|
-
filesWritten.push(filePath);
|
|
7765
|
-
if (!phasesExported.includes("planning")) phasesExported.push("planning");
|
|
7766
|
-
if (outputFormat === "human") process.stdout.write(` Written: ${filePath}\n`);
|
|
7767
|
-
}
|
|
7768
|
-
}
|
|
7769
|
-
const solutioningDecisions = await getDecisionsByPhaseForRun(adapter, activeRunId, "solutioning");
|
|
7770
|
-
if (solutioningDecisions.length > 0) {
|
|
7771
|
-
const archContent = renderArchitecture(solutioningDecisions);
|
|
7772
|
-
if (archContent !== "") {
|
|
7773
|
-
const filePath = join$1(resolvedOutputDir, "architecture.md");
|
|
7774
|
-
writeFileSync(filePath, archContent, "utf-8");
|
|
7775
|
-
filesWritten.push(filePath);
|
|
7776
|
-
if (!phasesExported.includes("solutioning")) phasesExported.push("solutioning");
|
|
7777
|
-
if (outputFormat === "human") process.stdout.write(` Written: ${filePath}\n`);
|
|
7778
|
-
}
|
|
7779
|
-
const epicsContent = renderEpics(solutioningDecisions);
|
|
7780
|
-
if (epicsContent !== "") {
|
|
7781
|
-
const filePath = join$1(resolvedOutputDir, "epics.md");
|
|
7782
|
-
writeFileSync(filePath, epicsContent, "utf-8");
|
|
7783
|
-
filesWritten.push(filePath);
|
|
7784
|
-
if (!phasesExported.includes("solutioning")) phasesExported.push("solutioning");
|
|
7785
|
-
if (outputFormat === "human") process.stdout.write(` Written: ${filePath}\n`);
|
|
7786
|
-
}
|
|
7787
|
-
const readinessContent = renderReadinessReport(solutioningDecisions);
|
|
7788
|
-
if (readinessContent !== "") {
|
|
7789
|
-
const filePath = join$1(resolvedOutputDir, "readiness-report.md");
|
|
7790
|
-
writeFileSync(filePath, readinessContent, "utf-8");
|
|
7791
|
-
filesWritten.push(filePath);
|
|
7792
|
-
if (!phasesExported.includes("solutioning")) phasesExported.push("solutioning");
|
|
7793
|
-
if (outputFormat === "human") process.stdout.write(` Written: ${filePath}\n`);
|
|
7794
|
-
}
|
|
7795
|
-
}
|
|
7796
|
-
const operationalDecisions = await getDecisionsByCategory(adapter, OPERATIONAL_FINDING);
|
|
7797
|
-
if (operationalDecisions.length > 0) {
|
|
7798
|
-
const operationalContent = renderOperationalFindings(operationalDecisions);
|
|
7799
|
-
if (operationalContent !== "") {
|
|
7800
|
-
const filePath = join$1(resolvedOutputDir, "operational-findings.md");
|
|
7801
|
-
writeFileSync(filePath, operationalContent, "utf-8");
|
|
7802
|
-
filesWritten.push(filePath);
|
|
7803
|
-
if (!phasesExported.includes("operational")) phasesExported.push("operational");
|
|
7804
|
-
if (outputFormat === "human") process.stdout.write(` Written: ${filePath}\n`);
|
|
7805
|
-
}
|
|
7806
|
-
}
|
|
7807
|
-
const experimentDecisions = await getDecisionsByCategory(adapter, EXPERIMENT_RESULT);
|
|
7808
|
-
if (experimentDecisions.length > 0) {
|
|
7809
|
-
const experimentsContent = renderExperiments(experimentDecisions);
|
|
7810
|
-
if (experimentsContent !== "") {
|
|
7811
|
-
const filePath = join$1(resolvedOutputDir, "experiments.md");
|
|
7812
|
-
writeFileSync(filePath, experimentsContent, "utf-8");
|
|
7813
|
-
filesWritten.push(filePath);
|
|
7814
|
-
if (!phasesExported.includes("operational")) phasesExported.push("operational");
|
|
7815
|
-
if (outputFormat === "human") process.stdout.write(` Written: ${filePath}\n`);
|
|
7816
|
-
}
|
|
7817
|
-
}
|
|
7818
|
-
if (outputFormat === "json") {
|
|
7819
|
-
const result = {
|
|
7820
|
-
files_written: filesWritten,
|
|
7821
|
-
run_id: activeRunId,
|
|
7822
|
-
phases_exported: phasesExported
|
|
7823
|
-
};
|
|
7824
|
-
process.stdout.write(JSON.stringify(result) + "\n");
|
|
7825
|
-
} else {
|
|
7826
|
-
if (filesWritten.length === 0) process.stdout.write(`No data found for run ${activeRunId}. The pipeline may not have completed any phases.\n`);
|
|
7827
|
-
else process.stdout.write(`\nExported ${filesWritten.length} file(s) from run ${activeRunId}.\n`);
|
|
7828
|
-
const skippedPhases = [];
|
|
7829
|
-
if (!phasesExported.includes("analysis")) skippedPhases.push("analysis");
|
|
7830
|
-
if (!phasesExported.includes("planning")) skippedPhases.push("planning");
|
|
7831
|
-
if (!phasesExported.includes("solutioning")) skippedPhases.push("solutioning");
|
|
7832
|
-
if (skippedPhases.length > 0) process.stdout.write(`Phases with no data (skipped): ${skippedPhases.join(", ")}\n`);
|
|
7833
|
-
}
|
|
7834
|
-
return 0;
|
|
7835
|
-
} catch (err) {
|
|
7836
|
-
const msg = err instanceof Error ? err.message : String(err);
|
|
7837
|
-
if (outputFormat === "json") process.stdout.write(JSON.stringify({ error: msg }) + "\n");
|
|
7838
|
-
else process.stderr.write(`Error: ${msg}\n`);
|
|
7839
|
-
logger$4.error({ err }, "export action failed");
|
|
7840
|
-
return 1;
|
|
7841
|
-
} finally {
|
|
7842
|
-
if (adapter !== void 0) try {
|
|
7843
|
-
await adapter.close();
|
|
7844
|
-
} catch {}
|
|
7845
|
-
}
|
|
7846
|
-
}
|
|
7847
|
-
function registerExportCommand(program, _version = "0.0.0", projectRoot = process.cwd()) {
|
|
7848
|
-
program.command("export").description("Export decision store contents as human-readable markdown files").option("--run-id <id>", "Pipeline run ID to export (defaults to latest run)").option("--output-dir <path>", "Directory to write exported files to", "_bmad-output/planning-artifacts/").option("--project-root <path>", "Project root directory", projectRoot).option("--output-format <format>", "Output format: human (default) or json", "human").action(async (opts) => {
|
|
7849
|
-
if (opts.outputFormat !== "json" && opts.outputFormat !== "human") process.stderr.write(`Warning: unknown --output-format '${opts.outputFormat}', defaulting to 'human'\n`);
|
|
7850
|
-
const outputFormat = opts.outputFormat === "json" ? "json" : "human";
|
|
7851
|
-
const exitCode = await runExportAction({
|
|
7852
|
-
runId: opts.runId,
|
|
7853
|
-
outputDir: opts.outputDir,
|
|
7854
|
-
projectRoot: opts.projectRoot,
|
|
7855
|
-
outputFormat
|
|
7856
|
-
});
|
|
7857
|
-
process.exitCode = exitCode;
|
|
7858
|
-
});
|
|
7859
|
-
}
|
|
7860
|
-
|
|
7861
7158
|
//#endregion
|
|
7862
7159
|
//#region src/cli/commands/retry-escalated.ts
|
|
7863
7160
|
const logger$3 = createLogger("retry-escalated-cmd");
|