substrate-ai 0.19.7 → 0.19.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { BMAD_BASELINE_TOKENS_FULL, DoltMergeConflict, FileStateStore, STOP_AFTER_VALID_PHASES, STORY_KEY_PATTERN, VALID_PHASES, WorkGraphRepository, __commonJS, __require, __toESM, buildPipelineStatusOutput, createDatabaseAdapter, detectCycles, formatOutput, formatPipelineSummary, formatTokenTelemetry, inspectProcessTree, parseDbTimestampAsUtc, resolveMainRepoRoot, validateStoryKey } from "./health-DJgGZhW-.js";
|
|
2
2
|
import { createLogger } from "./logger-KeHncl-f.js";
|
|
3
3
|
import { TypedEventBusImpl, createEventBus, createTuiApp, isTuiCapable, printNonTtyWarning, sleep } from "./helpers-CElYrONe.js";
|
|
4
|
-
import { ADVISORY_NOTES, Categorizer, ConsumerAnalyzer, DEFAULT_GLOBAL_SETTINGS, DispatcherImpl, DoltClient, ESCALATION_DIAGNOSIS, EfficiencyScorer, IngestionServer, LogTurnAnalyzer, OPERATIONAL_FINDING, Recommender, RoutingRecommender, RoutingResolver, RoutingTelemetry, RoutingTokenAccumulator, RoutingTuner, STORY_METRICS, STORY_OUTCOME, SubstrateConfigSchema, TEST_EXPANSION_FINDING, TEST_PLAN, TelemetryNormalizer, TelemetryPipeline, TurnAnalyzer, addTokenUsage, aggregateTokenUsageForRun, aggregateTokenUsageForStory, callLLM, createConfigSystem, createDatabaseAdapter$1, createDecision, createPipelineRun, createRequirement, detectInterfaceChanges, getArtifactByTypeForRun, getArtifactsByRun, getDecisionsByCategory, getDecisionsByPhase, getDecisionsByPhaseForRun, getPipelineRunById, getRunningPipelineRuns, getStoryMetricsForRun, getTokenUsageSummary, initSchema, loadModelRoutingConfig, registerArtifact, updatePipelineRun, updatePipelineRunConfig, upsertDecision, writeRunMetrics, writeStoryMetrics } from "./dist-adzGUKPc.js";
|
|
4
|
+
import { ADVISORY_NOTES, Categorizer, ConsumerAnalyzer, DEFAULT_GLOBAL_SETTINGS, DispatcherImpl, DoltClient, ESCALATION_DIAGNOSIS, EXPERIMENT_RESULT, EfficiencyScorer, IngestionServer, LogTurnAnalyzer, OPERATIONAL_FINDING, Recommender, RoutingRecommender, RoutingResolver, RoutingTelemetry, RoutingTokenAccumulator, RoutingTuner, STORY_METRICS, STORY_OUTCOME, SubstrateConfigSchema, TEST_EXPANSION_FINDING, TEST_PLAN, TelemetryNormalizer, TelemetryPipeline, TurnAnalyzer, addTokenUsage, aggregateTokenUsageForRun, aggregateTokenUsageForStory, callLLM, createConfigSystem, createDatabaseAdapter$1, createDecision, createPipelineRun, createRequirement, detectInterfaceChanges, getArtifactByTypeForRun, getArtifactsByRun, getDecisionsByCategory, getDecisionsByPhase, getDecisionsByPhaseForRun, getLatestRun, getPipelineRunById, getRunningPipelineRuns, getStoryMetricsForRun, getTokenUsageSummary, initSchema, listRequirements, loadModelRoutingConfig, registerArtifact, updatePipelineRun, updatePipelineRunConfig, upsertDecision, writeRunMetrics, writeStoryMetrics } from "./dist-adzGUKPc.js";
|
|
5
5
|
import { basename, dirname, extname, join } from "path";
|
|
6
6
|
import { access, readFile, readdir, stat } from "fs/promises";
|
|
7
7
|
import { EventEmitter } from "node:events";
|
|
@@ -9,7 +9,7 @@ import yaml from "js-yaml";
|
|
|
9
9
|
import * as actualFS from "node:fs";
|
|
10
10
|
import { accessSync, existsSync, mkdirSync, readFileSync, readdirSync, rmSync, unwatchFile, watchFile, writeFileSync } from "node:fs";
|
|
11
11
|
import { exec, execFile, execSync, spawn } from "node:child_process";
|
|
12
|
-
import path, { dirname as dirname$1, extname as extname$1, join as join$1, posix, resolve as resolve$1, win32 } from "node:path";
|
|
12
|
+
import path, { dirname as dirname$1, extname as extname$1, isAbsolute, join as join$1, posix, resolve as resolve$1, win32 } from "node:path";
|
|
13
13
|
import { tmpdir } from "node:os";
|
|
14
14
|
import { createHash, randomUUID } from "node:crypto";
|
|
15
15
|
import { z } from "zod";
|
|
@@ -1950,7 +1950,7 @@ function truncateToTokens(text, maxTokens) {
|
|
|
1950
1950
|
|
|
1951
1951
|
//#endregion
|
|
1952
1952
|
//#region src/modules/context-compiler/context-compiler-impl.ts
|
|
1953
|
-
const logger$
|
|
1953
|
+
const logger$21 = createLogger("context-compiler");
|
|
1954
1954
|
/**
|
|
1955
1955
|
* Fraction of the original token budget that must remain (after required +
|
|
1956
1956
|
* important sections) before an optional section is included.
|
|
@@ -2011,7 +2011,7 @@ var ContextCompilerImpl = class {
|
|
|
2011
2011
|
}
|
|
2012
2012
|
_applyExclusionFilter(text, sectionName) {
|
|
2013
2013
|
for (const excludedPath of this._excludedPaths) if (text.includes(excludedPath)) {
|
|
2014
|
-
logger$
|
|
2014
|
+
logger$21.warn({
|
|
2015
2015
|
sectionName,
|
|
2016
2016
|
excludedPath
|
|
2017
2017
|
}, "ContextCompiler: section excluded — contains path from exclusion list");
|
|
@@ -2069,7 +2069,7 @@ var ContextCompilerImpl = class {
|
|
|
2069
2069
|
includedParts.push(truncated);
|
|
2070
2070
|
remainingBudget -= truncatedTokens;
|
|
2071
2071
|
anyTruncated = true;
|
|
2072
|
-
logger$
|
|
2072
|
+
logger$21.warn({
|
|
2073
2073
|
section: section.name,
|
|
2074
2074
|
originalTokens: tokens,
|
|
2075
2075
|
budgetTokens: truncatedTokens
|
|
@@ -2083,7 +2083,7 @@ var ContextCompilerImpl = class {
|
|
|
2083
2083
|
});
|
|
2084
2084
|
} else {
|
|
2085
2085
|
anyTruncated = true;
|
|
2086
|
-
logger$
|
|
2086
|
+
logger$21.warn({
|
|
2087
2087
|
section: section.name,
|
|
2088
2088
|
tokens
|
|
2089
2089
|
}, "Context compiler: omitted \"important\" section — no budget remaining");
|
|
@@ -2110,7 +2110,7 @@ var ContextCompilerImpl = class {
|
|
|
2110
2110
|
} else {
|
|
2111
2111
|
if (tokens > 0) {
|
|
2112
2112
|
anyTruncated = true;
|
|
2113
|
-
logger$
|
|
2113
|
+
logger$21.warn({
|
|
2114
2114
|
section: section.name,
|
|
2115
2115
|
tokens,
|
|
2116
2116
|
budgetFractionRemaining: budgetFractionRemaining.toFixed(2)
|
|
@@ -2204,8 +2204,8 @@ var GrammarLoader = class {
|
|
|
2204
2204
|
_extensionMap;
|
|
2205
2205
|
_cache = new Map();
|
|
2206
2206
|
_unavailable = false;
|
|
2207
|
-
constructor(logger$
|
|
2208
|
-
this._logger = logger$
|
|
2207
|
+
constructor(logger$22) {
|
|
2208
|
+
this._logger = logger$22;
|
|
2209
2209
|
this._extensionMap = new Map([
|
|
2210
2210
|
[".ts", "tree-sitter-typescript/typescript"],
|
|
2211
2211
|
[".tsx", "tree-sitter-typescript/tsx"],
|
|
@@ -2291,9 +2291,9 @@ const ERR_REPO_MAP_GIT_FAILED = "ERR_REPO_MAP_GIT_FAILED";
|
|
|
2291
2291
|
var SymbolParser = class {
|
|
2292
2292
|
_grammarLoader;
|
|
2293
2293
|
_logger;
|
|
2294
|
-
constructor(grammarLoader, logger$
|
|
2294
|
+
constructor(grammarLoader, logger$22) {
|
|
2295
2295
|
this._grammarLoader = grammarLoader;
|
|
2296
|
-
this._logger = logger$
|
|
2296
|
+
this._logger = logger$22;
|
|
2297
2297
|
}
|
|
2298
2298
|
async parseFile(filePath) {
|
|
2299
2299
|
const ext$1 = extname$1(filePath);
|
|
@@ -2438,9 +2438,9 @@ async function computeFileHash(filePath) {
|
|
|
2438
2438
|
var DoltSymbolRepository = class {
|
|
2439
2439
|
_client;
|
|
2440
2440
|
_logger;
|
|
2441
|
-
constructor(client, logger$
|
|
2441
|
+
constructor(client, logger$22) {
|
|
2442
2442
|
this._client = client;
|
|
2443
|
-
this._logger = logger$
|
|
2443
|
+
this._logger = logger$22;
|
|
2444
2444
|
}
|
|
2445
2445
|
/**
|
|
2446
2446
|
* Atomically replace all symbols for filePath.
|
|
@@ -2646,11 +2646,11 @@ var RepoMapStorage = class {
|
|
|
2646
2646
|
_metaRepo;
|
|
2647
2647
|
_gitClient;
|
|
2648
2648
|
_logger;
|
|
2649
|
-
constructor(symbolRepo, metaRepo, gitClient, logger$
|
|
2649
|
+
constructor(symbolRepo, metaRepo, gitClient, logger$22) {
|
|
2650
2650
|
this._symbolRepo = symbolRepo;
|
|
2651
2651
|
this._metaRepo = metaRepo;
|
|
2652
2652
|
this._gitClient = gitClient;
|
|
2653
|
-
this._logger = logger$
|
|
2653
|
+
this._logger = logger$22;
|
|
2654
2654
|
}
|
|
2655
2655
|
/**
|
|
2656
2656
|
* Returns true if the file's current content hash differs from the stored hash.
|
|
@@ -2767,8 +2767,8 @@ function runGit(args, cwd) {
|
|
|
2767
2767
|
*/
|
|
2768
2768
|
var GitClient = class {
|
|
2769
2769
|
_logger;
|
|
2770
|
-
constructor(logger$
|
|
2771
|
-
this._logger = logger$
|
|
2770
|
+
constructor(logger$22) {
|
|
2771
|
+
this._logger = logger$22;
|
|
2772
2772
|
}
|
|
2773
2773
|
/**
|
|
2774
2774
|
* Returns the current HEAD commit SHA.
|
|
@@ -4124,9 +4124,9 @@ var RepoMapQueryEngine = class {
|
|
|
4124
4124
|
repo;
|
|
4125
4125
|
logger;
|
|
4126
4126
|
telemetry;
|
|
4127
|
-
constructor(repo, logger$
|
|
4127
|
+
constructor(repo, logger$22, telemetry) {
|
|
4128
4128
|
this.repo = repo;
|
|
4129
|
-
this.logger = logger$
|
|
4129
|
+
this.logger = logger$22;
|
|
4130
4130
|
this.telemetry = telemetry;
|
|
4131
4131
|
}
|
|
4132
4132
|
async query(q) {
|
|
@@ -4346,9 +4346,9 @@ var RepoMapFormatter = class {
|
|
|
4346
4346
|
var RepoMapTelemetry = class {
|
|
4347
4347
|
_telemetry;
|
|
4348
4348
|
_logger;
|
|
4349
|
-
constructor(telemetry, logger$
|
|
4349
|
+
constructor(telemetry, logger$22) {
|
|
4350
4350
|
this._telemetry = telemetry;
|
|
4351
|
-
this._logger = logger$
|
|
4351
|
+
this._logger = logger$22;
|
|
4352
4352
|
}
|
|
4353
4353
|
/**
|
|
4354
4354
|
* Emit a `repo_map.query` span.
|
|
@@ -4373,9 +4373,9 @@ var RepoMapTelemetry = class {
|
|
|
4373
4373
|
var RepoMapModule = class {
|
|
4374
4374
|
_metaRepo;
|
|
4375
4375
|
_logger;
|
|
4376
|
-
constructor(metaRepo, logger$
|
|
4376
|
+
constructor(metaRepo, logger$22) {
|
|
4377
4377
|
this._metaRepo = metaRepo;
|
|
4378
|
-
this._logger = logger$
|
|
4378
|
+
this._logger = logger$22;
|
|
4379
4379
|
}
|
|
4380
4380
|
/**
|
|
4381
4381
|
* Check whether the stored repo-map is stale relative to the current HEAD commit.
|
|
@@ -4419,9 +4419,9 @@ var RepoMapModule = class {
|
|
|
4419
4419
|
var RepoMapInjector = class {
|
|
4420
4420
|
_queryEngine;
|
|
4421
4421
|
_logger;
|
|
4422
|
-
constructor(queryEngine, logger$
|
|
4422
|
+
constructor(queryEngine, logger$22) {
|
|
4423
4423
|
this._queryEngine = queryEngine;
|
|
4424
|
-
this._logger = logger$
|
|
4424
|
+
this._logger = logger$22;
|
|
4425
4425
|
}
|
|
4426
4426
|
/**
|
|
4427
4427
|
* Build repo-map context by extracting file references from the story content,
|
|
@@ -4502,7 +4502,7 @@ const DEFAULT_TIMEOUTS = {
|
|
|
4502
4502
|
|
|
4503
4503
|
//#endregion
|
|
4504
4504
|
//#region src/modules/agent-dispatch/dispatcher-impl.ts
|
|
4505
|
-
const logger$
|
|
4505
|
+
const logger$20 = createLogger("agent-dispatch");
|
|
4506
4506
|
/**
|
|
4507
4507
|
* Create a new Dispatcher instance.
|
|
4508
4508
|
*
|
|
@@ -4646,7 +4646,7 @@ function runBuildVerification(options) {
|
|
|
4646
4646
|
let cmd;
|
|
4647
4647
|
if (verifyCommand === void 0) {
|
|
4648
4648
|
const detection = detectPackageManager(projectRoot);
|
|
4649
|
-
logger$
|
|
4649
|
+
logger$20.info({
|
|
4650
4650
|
packageManager: detection.packageManager,
|
|
4651
4651
|
lockfile: detection.lockfile,
|
|
4652
4652
|
resolvedCommand: detection.command
|
|
@@ -4658,7 +4658,7 @@ function runBuildVerification(options) {
|
|
|
4658
4658
|
const filters = deriveTurboFilters(changedFiles, projectRoot);
|
|
4659
4659
|
if (filters.length > 0) {
|
|
4660
4660
|
cmd = `${cmd} ${filters.join(" ")}`;
|
|
4661
|
-
logger$
|
|
4661
|
+
logger$20.info({
|
|
4662
4662
|
filters,
|
|
4663
4663
|
originalCmd: options.verifyCommand ?? "(auto-detected)"
|
|
4664
4664
|
}, "Build verification: scoped turbo build to affected packages");
|
|
@@ -4694,7 +4694,7 @@ function runBuildVerification(options) {
|
|
|
4694
4694
|
};
|
|
4695
4695
|
const missingScriptPattern = /Missing script[:\s]|No script found|Command "build" not found/i;
|
|
4696
4696
|
if (missingScriptPattern.test(combinedOutput)) {
|
|
4697
|
-
logger$
|
|
4697
|
+
logger$20.warn("Build script not found — skipping pre-flight (greenfield repo)");
|
|
4698
4698
|
return {
|
|
4699
4699
|
status: "skipped",
|
|
4700
4700
|
exitCode,
|
|
@@ -4878,7 +4878,7 @@ function pickRecommendation(distribution, profile, totalIssues, reviewCycles, la
|
|
|
4878
4878
|
|
|
4879
4879
|
//#endregion
|
|
4880
4880
|
//#region src/modules/compiled-workflows/prompt-assembler.ts
|
|
4881
|
-
const logger$
|
|
4881
|
+
const logger$19 = createLogger("compiled-workflows:prompt-assembler");
|
|
4882
4882
|
/**
|
|
4883
4883
|
* Assemble a final prompt from a template and sections map.
|
|
4884
4884
|
*
|
|
@@ -4903,7 +4903,7 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
|
|
|
4903
4903
|
tokenCount,
|
|
4904
4904
|
truncated: false
|
|
4905
4905
|
};
|
|
4906
|
-
logger$
|
|
4906
|
+
logger$19.warn({
|
|
4907
4907
|
tokenCount,
|
|
4908
4908
|
ceiling: tokenCeiling
|
|
4909
4909
|
}, "Prompt exceeds token ceiling — truncating optional sections");
|
|
@@ -4919,10 +4919,10 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
|
|
|
4919
4919
|
const targetSectionTokens = Math.max(0, currentSectionTokens - overBy);
|
|
4920
4920
|
if (targetSectionTokens === 0) {
|
|
4921
4921
|
contentMap[section.name] = "";
|
|
4922
|
-
logger$
|
|
4922
|
+
logger$19.warn({ sectionName: section.name }, "Section eliminated to fit token budget");
|
|
4923
4923
|
} else {
|
|
4924
4924
|
contentMap[section.name] = truncateToTokens(section.content, targetSectionTokens);
|
|
4925
|
-
logger$
|
|
4925
|
+
logger$19.warn({
|
|
4926
4926
|
sectionName: section.name,
|
|
4927
4927
|
targetSectionTokens
|
|
4928
4928
|
}, "Section truncated to fit token budget");
|
|
@@ -4933,7 +4933,7 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
|
|
|
4933
4933
|
}
|
|
4934
4934
|
if (tokenCount <= tokenCeiling) break;
|
|
4935
4935
|
}
|
|
4936
|
-
if (tokenCount > tokenCeiling) logger$
|
|
4936
|
+
if (tokenCount > tokenCeiling) logger$19.warn({
|
|
4937
4937
|
tokenCount,
|
|
4938
4938
|
ceiling: tokenCeiling
|
|
4939
4939
|
}, "Required sections alone exceed token ceiling — returning over-budget prompt");
|
|
@@ -5235,7 +5235,7 @@ function getTokenCeiling(workflowType, tokenCeilings) {
|
|
|
5235
5235
|
|
|
5236
5236
|
//#endregion
|
|
5237
5237
|
//#region src/modules/compiled-workflows/create-story.ts
|
|
5238
|
-
const logger$
|
|
5238
|
+
const logger$18 = createLogger("compiled-workflows:create-story");
|
|
5239
5239
|
/**
|
|
5240
5240
|
* Execute the compiled create-story workflow.
|
|
5241
5241
|
*
|
|
@@ -5255,13 +5255,13 @@ const logger$17 = createLogger("compiled-workflows:create-story");
|
|
|
5255
5255
|
*/
|
|
5256
5256
|
async function runCreateStory(deps, params) {
|
|
5257
5257
|
const { epicId, storyKey, pipelineRunId } = params;
|
|
5258
|
-
logger$
|
|
5258
|
+
logger$18.debug({
|
|
5259
5259
|
epicId,
|
|
5260
5260
|
storyKey,
|
|
5261
5261
|
pipelineRunId
|
|
5262
5262
|
}, "Starting create-story workflow");
|
|
5263
5263
|
const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("create-story", deps.tokenCeilings);
|
|
5264
|
-
logger$
|
|
5264
|
+
logger$18.info({
|
|
5265
5265
|
workflow: "create-story",
|
|
5266
5266
|
ceiling: TOKEN_CEILING,
|
|
5267
5267
|
source: tokenCeilingSource
|
|
@@ -5271,7 +5271,7 @@ async function runCreateStory(deps, params) {
|
|
|
5271
5271
|
template = await deps.pack.getPrompt("create-story");
|
|
5272
5272
|
} catch (err) {
|
|
5273
5273
|
const error = err instanceof Error ? err.message : String(err);
|
|
5274
|
-
logger$
|
|
5274
|
+
logger$18.error({ error }, "Failed to retrieve create-story prompt template");
|
|
5275
5275
|
return {
|
|
5276
5276
|
result: "failed",
|
|
5277
5277
|
error: `Failed to retrieve prompt template: ${error}`,
|
|
@@ -5313,7 +5313,7 @@ async function runCreateStory(deps, params) {
|
|
|
5313
5313
|
priority: "important"
|
|
5314
5314
|
}
|
|
5315
5315
|
], TOKEN_CEILING);
|
|
5316
|
-
logger$
|
|
5316
|
+
logger$18.debug({
|
|
5317
5317
|
tokenCount,
|
|
5318
5318
|
truncated,
|
|
5319
5319
|
tokenCeiling: TOKEN_CEILING
|
|
@@ -5323,6 +5323,7 @@ async function runCreateStory(deps, params) {
|
|
|
5323
5323
|
agent: "claude-code",
|
|
5324
5324
|
taskType: "create-story",
|
|
5325
5325
|
outputSchema: CreateStoryResultSchema,
|
|
5326
|
+
maxTurns: 50,
|
|
5326
5327
|
...deps.projectRoot !== void 0 ? { workingDirectory: deps.projectRoot } : {},
|
|
5327
5328
|
...deps.otlpEndpoint !== void 0 ? { otlpEndpoint: deps.otlpEndpoint } : {},
|
|
5328
5329
|
storyKey
|
|
@@ -5332,7 +5333,7 @@ async function runCreateStory(deps, params) {
|
|
|
5332
5333
|
dispatchResult = await handle.result;
|
|
5333
5334
|
} catch (err) {
|
|
5334
5335
|
const error = err instanceof Error ? err.message : String(err);
|
|
5335
|
-
logger$
|
|
5336
|
+
logger$18.error({
|
|
5336
5337
|
epicId,
|
|
5337
5338
|
storyKey,
|
|
5338
5339
|
error
|
|
@@ -5353,7 +5354,7 @@ async function runCreateStory(deps, params) {
|
|
|
5353
5354
|
if (dispatchResult.status === "failed") {
|
|
5354
5355
|
const errorMsg = dispatchResult.parseError ?? `Dispatch failed with exit code ${dispatchResult.exitCode}`;
|
|
5355
5356
|
const stderrDetail = dispatchResult.output ? ` Output: ${dispatchResult.output}` : "";
|
|
5356
|
-
logger$
|
|
5357
|
+
logger$18.warn({
|
|
5357
5358
|
epicId,
|
|
5358
5359
|
storyKey,
|
|
5359
5360
|
exitCode: dispatchResult.exitCode,
|
|
@@ -5366,7 +5367,7 @@ async function runCreateStory(deps, params) {
|
|
|
5366
5367
|
};
|
|
5367
5368
|
}
|
|
5368
5369
|
if (dispatchResult.status === "timeout") {
|
|
5369
|
-
logger$
|
|
5370
|
+
logger$18.warn({
|
|
5370
5371
|
epicId,
|
|
5371
5372
|
storyKey
|
|
5372
5373
|
}, "Create-story dispatch timed out");
|
|
@@ -5379,7 +5380,7 @@ async function runCreateStory(deps, params) {
|
|
|
5379
5380
|
if (dispatchResult.parsed === null) {
|
|
5380
5381
|
const details = dispatchResult.parseError ?? "No YAML block found in output";
|
|
5381
5382
|
const rawSnippet = dispatchResult.output ? dispatchResult.output.slice(0, 1e3) : "(empty)";
|
|
5382
|
-
logger$
|
|
5383
|
+
logger$18.warn({
|
|
5383
5384
|
epicId,
|
|
5384
5385
|
storyKey,
|
|
5385
5386
|
details,
|
|
@@ -5395,7 +5396,7 @@ async function runCreateStory(deps, params) {
|
|
|
5395
5396
|
const parseResult = CreateStoryResultSchema.safeParse(dispatchResult.parsed);
|
|
5396
5397
|
if (!parseResult.success) {
|
|
5397
5398
|
const details = parseResult.error.message;
|
|
5398
|
-
logger$
|
|
5399
|
+
logger$18.warn({
|
|
5399
5400
|
epicId,
|
|
5400
5401
|
storyKey,
|
|
5401
5402
|
details
|
|
@@ -5408,7 +5409,7 @@ async function runCreateStory(deps, params) {
|
|
|
5408
5409
|
};
|
|
5409
5410
|
}
|
|
5410
5411
|
const parsed = parseResult.data;
|
|
5411
|
-
logger$
|
|
5412
|
+
logger$18.info({
|
|
5412
5413
|
epicId,
|
|
5413
5414
|
storyKey,
|
|
5414
5415
|
storyFile: parsed.story_file,
|
|
@@ -5430,7 +5431,7 @@ async function getImplementationDecisions(deps) {
|
|
|
5430
5431
|
try {
|
|
5431
5432
|
return await getDecisionsByPhase(deps.db, "implementation");
|
|
5432
5433
|
} catch (err) {
|
|
5433
|
-
logger$
|
|
5434
|
+
logger$18.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve implementation decisions");
|
|
5434
5435
|
return [];
|
|
5435
5436
|
}
|
|
5436
5437
|
}
|
|
@@ -5478,7 +5479,7 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
|
|
|
5478
5479
|
if (storyKey) {
|
|
5479
5480
|
const perStoryShard = decisions.find((d) => d.category === "epic-shard" && d.key === storyKey);
|
|
5480
5481
|
if (perStoryShard?.value) {
|
|
5481
|
-
logger$
|
|
5482
|
+
logger$18.debug({
|
|
5482
5483
|
epicId,
|
|
5483
5484
|
storyKey
|
|
5484
5485
|
}, "Found per-story epic shard (direct lookup)");
|
|
@@ -5491,13 +5492,13 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
|
|
|
5491
5492
|
if (storyKey) {
|
|
5492
5493
|
const storySection = extractStorySection(shardContent, storyKey);
|
|
5493
5494
|
if (storySection) {
|
|
5494
|
-
logger$
|
|
5495
|
+
logger$18.debug({
|
|
5495
5496
|
epicId,
|
|
5496
5497
|
storyKey
|
|
5497
5498
|
}, "Extracted per-story section from epic shard (pre-37-0 fallback)");
|
|
5498
5499
|
return storySection;
|
|
5499
5500
|
}
|
|
5500
|
-
logger$
|
|
5501
|
+
logger$18.debug({
|
|
5501
5502
|
epicId,
|
|
5502
5503
|
storyKey
|
|
5503
5504
|
}, "No matching story section found — using full epic shard");
|
|
@@ -5507,11 +5508,11 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
|
|
|
5507
5508
|
if (projectRoot) {
|
|
5508
5509
|
const fallback = readEpicShardFromFile(projectRoot, epicId);
|
|
5509
5510
|
if (fallback) {
|
|
5510
|
-
logger$
|
|
5511
|
+
logger$18.info({ epicId }, "Using file-based fallback for epic shard (decisions table empty)");
|
|
5511
5512
|
if (storyKey) {
|
|
5512
5513
|
const storySection = extractStorySection(fallback, storyKey);
|
|
5513
5514
|
if (storySection) {
|
|
5514
|
-
logger$
|
|
5515
|
+
logger$18.debug({
|
|
5515
5516
|
epicId,
|
|
5516
5517
|
storyKey
|
|
5517
5518
|
}, "Extracted per-story section from file-based epic shard");
|
|
@@ -5523,7 +5524,7 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
|
|
|
5523
5524
|
}
|
|
5524
5525
|
return "";
|
|
5525
5526
|
} catch (err) {
|
|
5526
|
-
logger$
|
|
5527
|
+
logger$18.warn({
|
|
5527
5528
|
epicId,
|
|
5528
5529
|
error: err instanceof Error ? err.message : String(err)
|
|
5529
5530
|
}, "Failed to retrieve epic shard");
|
|
@@ -5540,7 +5541,7 @@ function getPrevDevNotes(decisions, epicId) {
|
|
|
5540
5541
|
if (devNotes.length === 0) return "";
|
|
5541
5542
|
return devNotes[devNotes.length - 1].value;
|
|
5542
5543
|
} catch (err) {
|
|
5543
|
-
logger$
|
|
5544
|
+
logger$18.warn({
|
|
5544
5545
|
epicId,
|
|
5545
5546
|
error: err instanceof Error ? err.message : String(err)
|
|
5546
5547
|
}, "Failed to retrieve prev dev notes");
|
|
@@ -5573,7 +5574,7 @@ async function getArchConstraints$3(deps) {
|
|
|
5573
5574
|
const truncatedBody = body.length > 300 ? body.slice(0, 297) + "..." : body;
|
|
5574
5575
|
return `${header}\n${truncatedBody}`;
|
|
5575
5576
|
}).join("\n\n");
|
|
5576
|
-
logger$
|
|
5577
|
+
logger$18.info({
|
|
5577
5578
|
fullLength: full.length,
|
|
5578
5579
|
summarizedLength: summarized.length,
|
|
5579
5580
|
decisions: constraints.length
|
|
@@ -5583,13 +5584,13 @@ async function getArchConstraints$3(deps) {
|
|
|
5583
5584
|
if (deps.projectRoot) {
|
|
5584
5585
|
const fallback = readArchConstraintsFromFile(deps.projectRoot);
|
|
5585
5586
|
if (fallback) {
|
|
5586
|
-
logger$
|
|
5587
|
+
logger$18.info("Using file-based fallback for architecture constraints (decisions table empty)");
|
|
5587
5588
|
return fallback.length > ARCH_CONSTRAINT_MAX_CHARS ? fallback.slice(0, ARCH_CONSTRAINT_MAX_CHARS) + "\n\n[truncated for token budget]" : fallback;
|
|
5588
5589
|
}
|
|
5589
5590
|
}
|
|
5590
5591
|
return "";
|
|
5591
5592
|
} catch (err) {
|
|
5592
|
-
logger$
|
|
5593
|
+
logger$18.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
|
|
5593
5594
|
return "";
|
|
5594
5595
|
}
|
|
5595
5596
|
}
|
|
@@ -5617,7 +5618,7 @@ function readEpicShardFromFile(projectRoot, epicId) {
|
|
|
5617
5618
|
const endIdx = endMatch ? endMatch.index : content.length;
|
|
5618
5619
|
return content.slice(startIdx, endIdx).trim();
|
|
5619
5620
|
} catch (err) {
|
|
5620
|
-
logger$
|
|
5621
|
+
logger$18.warn({
|
|
5621
5622
|
epicId,
|
|
5622
5623
|
error: err instanceof Error ? err.message : String(err)
|
|
5623
5624
|
}, "File-based epic shard fallback failed");
|
|
@@ -5640,7 +5641,7 @@ function readArchConstraintsFromFile(projectRoot) {
|
|
|
5640
5641
|
const content = readFileSync(archPath, "utf-8");
|
|
5641
5642
|
return content.slice(0, 1500);
|
|
5642
5643
|
} catch (err) {
|
|
5643
|
-
logger$
|
|
5644
|
+
logger$18.warn({ error: err instanceof Error ? err.message : String(err) }, "File-based architecture fallback failed");
|
|
5644
5645
|
return "";
|
|
5645
5646
|
}
|
|
5646
5647
|
}
|
|
@@ -5653,7 +5654,7 @@ async function getStoryTemplate(deps) {
|
|
|
5653
5654
|
try {
|
|
5654
5655
|
return await deps.pack.getTemplate("story");
|
|
5655
5656
|
} catch (err) {
|
|
5656
|
-
logger$
|
|
5657
|
+
logger$18.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve story template from pack");
|
|
5657
5658
|
return "";
|
|
5658
5659
|
}
|
|
5659
5660
|
}
|
|
@@ -5690,7 +5691,7 @@ async function isValidStoryFile(filePath) {
|
|
|
5690
5691
|
|
|
5691
5692
|
//#endregion
|
|
5692
5693
|
//#region src/modules/compiled-workflows/git-helpers.ts
|
|
5693
|
-
const logger$
|
|
5694
|
+
const logger$17 = createLogger("compiled-workflows:git-helpers");
|
|
5694
5695
|
/**
|
|
5695
5696
|
* Check whether the repo at `cwd` has at least one commit (HEAD resolves).
|
|
5696
5697
|
* Returns false for fresh repos with no commits, avoiding `fatal: bad revision 'HEAD'`.
|
|
@@ -5727,7 +5728,7 @@ function hasCommits(cwd) {
|
|
|
5727
5728
|
*/
|
|
5728
5729
|
async function getGitDiffSummary(workingDirectory = process.cwd()) {
|
|
5729
5730
|
if (!hasCommits(workingDirectory)) {
|
|
5730
|
-
logger$
|
|
5731
|
+
logger$17.debug({ cwd: workingDirectory }, "No commits in repo — returning empty diff");
|
|
5731
5732
|
return "";
|
|
5732
5733
|
}
|
|
5733
5734
|
return runGitCommand(["diff", "HEAD"], workingDirectory, "git-diff-summary");
|
|
@@ -5744,7 +5745,7 @@ async function getGitDiffSummary(workingDirectory = process.cwd()) {
|
|
|
5744
5745
|
*/
|
|
5745
5746
|
async function getGitDiffStatSummary(workingDirectory = process.cwd()) {
|
|
5746
5747
|
if (!hasCommits(workingDirectory)) {
|
|
5747
|
-
logger$
|
|
5748
|
+
logger$17.debug({ cwd: workingDirectory }, "No commits in repo — returning empty stat");
|
|
5748
5749
|
return "";
|
|
5749
5750
|
}
|
|
5750
5751
|
return runGitCommand([
|
|
@@ -5770,7 +5771,7 @@ async function getGitDiffStatSummary(workingDirectory = process.cwd()) {
|
|
|
5770
5771
|
async function getGitDiffForFiles(files, workingDirectory = process.cwd()) {
|
|
5771
5772
|
if (files.length === 0) return "";
|
|
5772
5773
|
if (!hasCommits(workingDirectory)) {
|
|
5773
|
-
logger$
|
|
5774
|
+
logger$17.debug({ cwd: workingDirectory }, "No commits in repo — returning empty diff for files");
|
|
5774
5775
|
return "";
|
|
5775
5776
|
}
|
|
5776
5777
|
await stageIntentToAdd(files, workingDirectory);
|
|
@@ -5797,7 +5798,7 @@ async function getGitDiffForFiles(files, workingDirectory = process.cwd()) {
|
|
|
5797
5798
|
async function getGitDiffStatForFiles(files, workingDirectory = process.cwd()) {
|
|
5798
5799
|
if (files.length === 0) return "";
|
|
5799
5800
|
if (!hasCommits(workingDirectory)) {
|
|
5800
|
-
logger$
|
|
5801
|
+
logger$17.debug({ cwd: workingDirectory }, "No commits in repo — returning empty stat for files");
|
|
5801
5802
|
return "";
|
|
5802
5803
|
}
|
|
5803
5804
|
return runGitCommand([
|
|
@@ -5846,7 +5847,7 @@ async function stageIntentToAdd(files, workingDirectory) {
|
|
|
5846
5847
|
if (files.length === 0) return;
|
|
5847
5848
|
const existing = files.filter((f$1) => {
|
|
5848
5849
|
const exists = existsSync(f$1);
|
|
5849
|
-
if (!exists) logger$
|
|
5850
|
+
if (!exists) logger$17.debug({ file: f$1 }, "Skipping nonexistent file in stageIntentToAdd");
|
|
5850
5851
|
return exists;
|
|
5851
5852
|
});
|
|
5852
5853
|
if (existing.length === 0) return;
|
|
@@ -5880,7 +5881,7 @@ async function runGitCommand(args, cwd, logLabel) {
|
|
|
5880
5881
|
stderr += chunk.toString("utf-8");
|
|
5881
5882
|
});
|
|
5882
5883
|
proc$1.on("error", (err) => {
|
|
5883
|
-
logger$
|
|
5884
|
+
logger$17.warn({
|
|
5884
5885
|
label: logLabel,
|
|
5885
5886
|
cwd,
|
|
5886
5887
|
error: err.message
|
|
@@ -5889,7 +5890,7 @@ async function runGitCommand(args, cwd, logLabel) {
|
|
|
5889
5890
|
});
|
|
5890
5891
|
proc$1.on("close", (code) => {
|
|
5891
5892
|
if (code !== 0) {
|
|
5892
|
-
logger$
|
|
5893
|
+
logger$17.warn({
|
|
5893
5894
|
label: logLabel,
|
|
5894
5895
|
cwd,
|
|
5895
5896
|
code,
|
|
@@ -5905,7 +5906,7 @@ async function runGitCommand(args, cwd, logLabel) {
|
|
|
5905
5906
|
|
|
5906
5907
|
//#endregion
|
|
5907
5908
|
//#region src/modules/implementation-orchestrator/project-findings.ts
|
|
5908
|
-
const logger$
|
|
5909
|
+
const logger$16 = createLogger("project-findings");
|
|
5909
5910
|
/** Maximum character length for the findings summary */
|
|
5910
5911
|
const MAX_CHARS = 2e3;
|
|
5911
5912
|
/**
|
|
@@ -5971,7 +5972,7 @@ async function getProjectFindings(db) {
|
|
|
5971
5972
|
if (summary.length > MAX_CHARS) summary = summary.slice(0, MAX_CHARS - 3) + "...";
|
|
5972
5973
|
return summary;
|
|
5973
5974
|
} catch (err) {
|
|
5974
|
-
logger$
|
|
5975
|
+
logger$16.warn({ err }, "Failed to query project findings (graceful fallback)");
|
|
5975
5976
|
return "";
|
|
5976
5977
|
}
|
|
5977
5978
|
}
|
|
@@ -5994,7 +5995,7 @@ function extractRecurringPatterns(outcomes) {
|
|
|
5994
5995
|
|
|
5995
5996
|
//#endregion
|
|
5996
5997
|
//#region src/modules/compiled-workflows/story-complexity.ts
|
|
5997
|
-
const logger$
|
|
5998
|
+
const logger$15 = createLogger("compiled-workflows:story-complexity");
|
|
5998
5999
|
/**
|
|
5999
6000
|
* Compute a complexity score from story markdown content.
|
|
6000
6001
|
*
|
|
@@ -6046,7 +6047,7 @@ function resolveFixStoryMaxTurns(complexityScore) {
|
|
|
6046
6047
|
* @param resolvedMaxTurns - Turn limit resolved for this dispatch
|
|
6047
6048
|
*/
|
|
6048
6049
|
function logComplexityResult(storyKey, complexity, resolvedMaxTurns) {
|
|
6049
|
-
logger$
|
|
6050
|
+
logger$15.info({
|
|
6050
6051
|
storyKey,
|
|
6051
6052
|
taskCount: complexity.taskCount,
|
|
6052
6053
|
subtaskCount: complexity.subtaskCount,
|
|
@@ -6302,7 +6303,7 @@ function resolveInstallCommand(projectRoot) {
|
|
|
6302
6303
|
|
|
6303
6304
|
//#endregion
|
|
6304
6305
|
//#region src/modules/compiled-workflows/dev-story.ts
|
|
6305
|
-
const logger$
|
|
6306
|
+
const logger$14 = createLogger("compiled-workflows:dev-story");
|
|
6306
6307
|
/** Default timeout for dev-story dispatches in milliseconds (30 min) */
|
|
6307
6308
|
const DEFAULT_TIMEOUT_MS$1 = 18e5;
|
|
6308
6309
|
/**
|
|
@@ -6314,12 +6315,12 @@ const DEFAULT_TIMEOUT_MS$1 = 18e5;
|
|
|
6314
6315
|
*/
|
|
6315
6316
|
async function runDevStory(deps, params) {
|
|
6316
6317
|
const { storyKey, storyFilePath, taskScope, priorFiles } = params;
|
|
6317
|
-
logger$
|
|
6318
|
+
logger$14.info({
|
|
6318
6319
|
storyKey,
|
|
6319
6320
|
storyFilePath
|
|
6320
6321
|
}, "Starting compiled dev-story workflow");
|
|
6321
6322
|
const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("dev-story", deps.tokenCeilings);
|
|
6322
|
-
logger$
|
|
6323
|
+
logger$14.info({
|
|
6323
6324
|
workflow: "dev-story",
|
|
6324
6325
|
ceiling: TOKEN_CEILING,
|
|
6325
6326
|
source: tokenCeilingSource
|
|
@@ -6362,10 +6363,10 @@ async function runDevStory(deps, params) {
|
|
|
6362
6363
|
let template;
|
|
6363
6364
|
try {
|
|
6364
6365
|
template = await deps.pack.getPrompt("dev-story");
|
|
6365
|
-
logger$
|
|
6366
|
+
logger$14.debug({ storyKey }, "Retrieved dev-story prompt template from pack");
|
|
6366
6367
|
} catch (err) {
|
|
6367
6368
|
const error = err instanceof Error ? err.message : String(err);
|
|
6368
|
-
logger$
|
|
6369
|
+
logger$14.error({
|
|
6369
6370
|
storyKey,
|
|
6370
6371
|
error
|
|
6371
6372
|
}, "Failed to retrieve dev-story prompt template");
|
|
@@ -6376,14 +6377,14 @@ async function runDevStory(deps, params) {
|
|
|
6376
6377
|
storyContent = await readFile$1(storyFilePath, "utf-8");
|
|
6377
6378
|
} catch (err) {
|
|
6378
6379
|
if (err.code === "ENOENT") {
|
|
6379
|
-
logger$
|
|
6380
|
+
logger$14.error({
|
|
6380
6381
|
storyKey,
|
|
6381
6382
|
storyFilePath
|
|
6382
6383
|
}, "Story file not found");
|
|
6383
6384
|
return makeFailureResult("story_file_not_found");
|
|
6384
6385
|
}
|
|
6385
6386
|
const error = err instanceof Error ? err.message : String(err);
|
|
6386
|
-
logger$
|
|
6387
|
+
logger$14.error({
|
|
6387
6388
|
storyKey,
|
|
6388
6389
|
storyFilePath,
|
|
6389
6390
|
error
|
|
@@ -6391,7 +6392,7 @@ async function runDevStory(deps, params) {
|
|
|
6391
6392
|
return makeFailureResult(`story_file_read_error: ${error}`);
|
|
6392
6393
|
}
|
|
6393
6394
|
if (storyContent.trim().length === 0) {
|
|
6394
|
-
logger$
|
|
6395
|
+
logger$14.error({
|
|
6395
6396
|
storyKey,
|
|
6396
6397
|
storyFilePath
|
|
6397
6398
|
}, "Story file is empty");
|
|
@@ -6399,7 +6400,7 @@ async function runDevStory(deps, params) {
|
|
|
6399
6400
|
}
|
|
6400
6401
|
const staleStatus = detectDeprecatedStatusField(storyContent);
|
|
6401
6402
|
if (staleStatus !== null) {
|
|
6402
|
-
logger$
|
|
6403
|
+
logger$14.warn({
|
|
6403
6404
|
storyFilePath,
|
|
6404
6405
|
staleStatus
|
|
6405
6406
|
}, "Story spec contains deprecated Status field — stripped before dispatch (status is managed by Dolt work graph)");
|
|
@@ -6414,17 +6415,17 @@ async function runDevStory(deps, params) {
|
|
|
6414
6415
|
const testPatternDecisions = solutioningDecisions.filter((d) => d.category === "test-patterns");
|
|
6415
6416
|
if (testPatternDecisions.length > 0) {
|
|
6416
6417
|
testPatternsContent = "## Test Patterns\n" + testPatternDecisions.map((d) => `- ${d.key}: ${d.value}`).join("\n");
|
|
6417
|
-
logger$
|
|
6418
|
+
logger$14.debug({
|
|
6418
6419
|
storyKey,
|
|
6419
6420
|
count: testPatternDecisions.length
|
|
6420
6421
|
}, "Loaded test patterns from decision store");
|
|
6421
6422
|
} else {
|
|
6422
6423
|
testPatternsContent = resolveDefaultTestPatterns(deps.projectRoot);
|
|
6423
|
-
logger$
|
|
6424
|
+
logger$14.debug({ storyKey }, "No test-pattern decisions — using stack-aware defaults");
|
|
6424
6425
|
}
|
|
6425
6426
|
} catch (err) {
|
|
6426
6427
|
const error = err instanceof Error ? err.message : String(err);
|
|
6427
|
-
logger$
|
|
6428
|
+
logger$14.warn({
|
|
6428
6429
|
storyKey,
|
|
6429
6430
|
error
|
|
6430
6431
|
}, "Failed to load test patterns — using defaults");
|
|
@@ -6438,7 +6439,7 @@ async function runDevStory(deps, params) {
|
|
|
6438
6439
|
if (deps.repoMapInjector !== void 0) {
|
|
6439
6440
|
const injection = await deps.repoMapInjector.buildContext(storyContent, deps.maxRepoMapTokens ?? 2e3);
|
|
6440
6441
|
repoContextContent = injection.text;
|
|
6441
|
-
logger$
|
|
6442
|
+
logger$14.info({
|
|
6442
6443
|
storyKey,
|
|
6443
6444
|
repoMapTokens: Math.ceil(injection.text.length / 4),
|
|
6444
6445
|
symbolCount: injection.symbolCount,
|
|
@@ -6450,7 +6451,7 @@ async function runDevStory(deps, params) {
|
|
|
6450
6451
|
const findings = await getProjectFindings(deps.db);
|
|
6451
6452
|
if (findings.length > 0) {
|
|
6452
6453
|
priorFindingsContent = "Previous pipeline runs encountered these issues — avoid repeating them:\n\n" + findings;
|
|
6453
|
-
logger$
|
|
6454
|
+
logger$14.debug({
|
|
6454
6455
|
storyKey,
|
|
6455
6456
|
findingsLen: findings.length
|
|
6456
6457
|
}, "Injecting prior findings into dev-story prompt");
|
|
@@ -6470,7 +6471,7 @@ async function runDevStory(deps, params) {
|
|
|
6470
6471
|
if (plan.test_categories && plan.test_categories.length > 0) parts.push(`\n### Categories: ${plan.test_categories.join(", ")}`);
|
|
6471
6472
|
if (plan.coverage_notes) parts.push(`\n### Coverage Notes\n${plan.coverage_notes}`);
|
|
6472
6473
|
testPlanContent = parts.join("\n");
|
|
6473
|
-
logger$
|
|
6474
|
+
logger$14.debug({ storyKey }, "Injecting test plan into dev-story prompt");
|
|
6474
6475
|
}
|
|
6475
6476
|
} catch {}
|
|
6476
6477
|
const sections = [
|
|
@@ -6531,7 +6532,7 @@ async function runDevStory(deps, params) {
|
|
|
6531
6532
|
}
|
|
6532
6533
|
];
|
|
6533
6534
|
const { prompt, tokenCount, truncated } = assemblePrompt(template, sections, TOKEN_CEILING);
|
|
6534
|
-
logger$
|
|
6535
|
+
logger$14.info({
|
|
6535
6536
|
storyKey,
|
|
6536
6537
|
tokenCount,
|
|
6537
6538
|
ceiling: TOKEN_CEILING,
|
|
@@ -6555,7 +6556,7 @@ async function runDevStory(deps, params) {
|
|
|
6555
6556
|
dispatchResult = await handle.result;
|
|
6556
6557
|
} catch (err) {
|
|
6557
6558
|
const error = err instanceof Error ? err.message : String(err);
|
|
6558
|
-
logger$
|
|
6559
|
+
logger$14.error({
|
|
6559
6560
|
storyKey,
|
|
6560
6561
|
error
|
|
6561
6562
|
}, "Dispatch threw an unexpected error");
|
|
@@ -6566,11 +6567,11 @@ async function runDevStory(deps, params) {
|
|
|
6566
6567
|
output: dispatchResult.tokenEstimate.output
|
|
6567
6568
|
};
|
|
6568
6569
|
if (dispatchResult.status === "timeout") {
|
|
6569
|
-
logger$
|
|
6570
|
+
logger$14.error({
|
|
6570
6571
|
storyKey,
|
|
6571
6572
|
durationMs: dispatchResult.durationMs
|
|
6572
6573
|
}, "Dev-story dispatch timed out");
|
|
6573
|
-
if (dispatchResult.output.length > 0) logger$
|
|
6574
|
+
if (dispatchResult.output.length > 0) logger$14.info({
|
|
6574
6575
|
storyKey,
|
|
6575
6576
|
partialOutput: dispatchResult.output.slice(0, 500)
|
|
6576
6577
|
}, "Partial output before timeout");
|
|
@@ -6580,12 +6581,12 @@ async function runDevStory(deps, params) {
|
|
|
6580
6581
|
};
|
|
6581
6582
|
}
|
|
6582
6583
|
if (dispatchResult.status === "failed" || dispatchResult.exitCode !== 0) {
|
|
6583
|
-
logger$
|
|
6584
|
+
logger$14.error({
|
|
6584
6585
|
storyKey,
|
|
6585
6586
|
exitCode: dispatchResult.exitCode,
|
|
6586
6587
|
status: dispatchResult.status
|
|
6587
6588
|
}, "Dev-story dispatch failed");
|
|
6588
|
-
if (dispatchResult.output.length > 0) logger$
|
|
6589
|
+
if (dispatchResult.output.length > 0) logger$14.info({
|
|
6589
6590
|
storyKey,
|
|
6590
6591
|
partialOutput: dispatchResult.output.slice(0, 500)
|
|
6591
6592
|
}, "Partial output from failed dispatch");
|
|
@@ -6597,7 +6598,7 @@ async function runDevStory(deps, params) {
|
|
|
6597
6598
|
if (dispatchResult.parseError !== null || dispatchResult.parsed === null) {
|
|
6598
6599
|
const details = dispatchResult.parseError ?? "parsed result was null";
|
|
6599
6600
|
const rawSnippet = dispatchResult.output ? dispatchResult.output.slice(0, 1e3) : "(empty)";
|
|
6600
|
-
logger$
|
|
6601
|
+
logger$14.error({
|
|
6601
6602
|
storyKey,
|
|
6602
6603
|
parseError: details,
|
|
6603
6604
|
rawOutputSnippet: rawSnippet
|
|
@@ -6605,12 +6606,12 @@ async function runDevStory(deps, params) {
|
|
|
6605
6606
|
let filesModified = [];
|
|
6606
6607
|
try {
|
|
6607
6608
|
filesModified = await getGitChangedFiles(deps.projectRoot ?? process.cwd());
|
|
6608
|
-
if (filesModified.length > 0) logger$
|
|
6609
|
+
if (filesModified.length > 0) logger$14.info({
|
|
6609
6610
|
storyKey,
|
|
6610
6611
|
fileCount: filesModified.length
|
|
6611
6612
|
}, "Recovered files_modified from git status (YAML fallback)");
|
|
6612
6613
|
} catch (err) {
|
|
6613
|
-
logger$
|
|
6614
|
+
logger$14.warn({
|
|
6614
6615
|
storyKey,
|
|
6615
6616
|
error: err instanceof Error ? err.message : String(err)
|
|
6616
6617
|
}, "Failed to recover files_modified from git");
|
|
@@ -6627,7 +6628,7 @@ async function runDevStory(deps, params) {
|
|
|
6627
6628
|
};
|
|
6628
6629
|
}
|
|
6629
6630
|
const parsed = dispatchResult.parsed;
|
|
6630
|
-
logger$
|
|
6631
|
+
logger$14.info({
|
|
6631
6632
|
storyKey,
|
|
6632
6633
|
result: parsed.result,
|
|
6633
6634
|
acMet: parsed.ac_met.length
|
|
@@ -6766,7 +6767,7 @@ function extractFilesInScope(storyContent) {
|
|
|
6766
6767
|
|
|
6767
6768
|
//#endregion
|
|
6768
6769
|
//#region src/modules/compiled-workflows/code-review.ts
|
|
6769
|
-
const logger$
|
|
6770
|
+
const logger$13 = createLogger("compiled-workflows:code-review");
|
|
6770
6771
|
/**
|
|
6771
6772
|
* Default fallback result when dispatch fails or times out.
|
|
6772
6773
|
* Uses NEEDS_MINOR_FIXES (not NEEDS_MAJOR_REWORK) so a parse/schema failure
|
|
@@ -6841,14 +6842,14 @@ async function countTestMetrics(filesModified, cwd) {
|
|
|
6841
6842
|
async function runCodeReview(deps, params) {
|
|
6842
6843
|
const { storyKey, storyFilePath, workingDirectory, pipelineRunId, filesModified, previousIssues } = params;
|
|
6843
6844
|
const cwd = workingDirectory ?? process.cwd();
|
|
6844
|
-
logger$
|
|
6845
|
+
logger$13.debug({
|
|
6845
6846
|
storyKey,
|
|
6846
6847
|
storyFilePath,
|
|
6847
6848
|
cwd,
|
|
6848
6849
|
pipelineRunId
|
|
6849
6850
|
}, "Starting code-review workflow");
|
|
6850
6851
|
const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("code-review", deps.tokenCeilings);
|
|
6851
|
-
logger$
|
|
6852
|
+
logger$13.info({
|
|
6852
6853
|
workflow: "code-review",
|
|
6853
6854
|
ceiling: TOKEN_CEILING,
|
|
6854
6855
|
source: tokenCeilingSource
|
|
@@ -6858,7 +6859,7 @@ async function runCodeReview(deps, params) {
|
|
|
6858
6859
|
template = await deps.pack.getPrompt("code-review");
|
|
6859
6860
|
} catch (err) {
|
|
6860
6861
|
const error = err instanceof Error ? err.message : String(err);
|
|
6861
|
-
logger$
|
|
6862
|
+
logger$13.error({ error }, "Failed to retrieve code-review prompt template");
|
|
6862
6863
|
return defaultFailResult(`Failed to retrieve prompt template: ${error}`, {
|
|
6863
6864
|
input: 0,
|
|
6864
6865
|
output: 0
|
|
@@ -6869,7 +6870,7 @@ async function runCodeReview(deps, params) {
|
|
|
6869
6870
|
storyContent = await readFile$1(storyFilePath, "utf-8");
|
|
6870
6871
|
} catch (err) {
|
|
6871
6872
|
const error = err instanceof Error ? err.message : String(err);
|
|
6872
|
-
logger$
|
|
6873
|
+
logger$13.error({
|
|
6873
6874
|
storyFilePath,
|
|
6874
6875
|
error
|
|
6875
6876
|
}, "Failed to read story file");
|
|
@@ -6889,12 +6890,12 @@ async function runCodeReview(deps, params) {
|
|
|
6889
6890
|
const scopedTotal = nonDiffTokens + countTokens(scopedDiff);
|
|
6890
6891
|
if (scopedTotal <= TOKEN_CEILING) {
|
|
6891
6892
|
gitDiffContent = scopedDiff;
|
|
6892
|
-
logger$
|
|
6893
|
+
logger$13.debug({
|
|
6893
6894
|
fileCount: filesModified.length,
|
|
6894
6895
|
tokenCount: scopedTotal
|
|
6895
6896
|
}, "Using scoped file diff");
|
|
6896
6897
|
} else {
|
|
6897
|
-
logger$
|
|
6898
|
+
logger$13.warn({
|
|
6898
6899
|
estimatedTotal: scopedTotal,
|
|
6899
6900
|
ceiling: TOKEN_CEILING,
|
|
6900
6901
|
fileCount: filesModified.length
|
|
@@ -6908,7 +6909,7 @@ async function runCodeReview(deps, params) {
|
|
|
6908
6909
|
const fullTotal = nonDiffTokens + countTokens(fullDiff);
|
|
6909
6910
|
if (fullTotal <= TOKEN_CEILING) gitDiffContent = fullDiff;
|
|
6910
6911
|
else {
|
|
6911
|
-
logger$
|
|
6912
|
+
logger$13.warn({
|
|
6912
6913
|
estimatedTotal: fullTotal,
|
|
6913
6914
|
ceiling: TOKEN_CEILING
|
|
6914
6915
|
}, "Full git diff would exceed token ceiling — using stat-only summary");
|
|
@@ -6916,7 +6917,7 @@ async function runCodeReview(deps, params) {
|
|
|
6916
6917
|
}
|
|
6917
6918
|
}
|
|
6918
6919
|
if (gitDiffContent.trim().length === 0) {
|
|
6919
|
-
logger$
|
|
6920
|
+
logger$13.info({ storyKey }, "Empty git diff — skipping review with SHIP_IT");
|
|
6920
6921
|
return {
|
|
6921
6922
|
verdict: "SHIP_IT",
|
|
6922
6923
|
issues: 0,
|
|
@@ -6932,7 +6933,7 @@ async function runCodeReview(deps, params) {
|
|
|
6932
6933
|
if (deps.repoMapInjector !== void 0) {
|
|
6933
6934
|
const injection = await deps.repoMapInjector.buildContext(storyContent, deps.maxRepoMapTokens ?? 2e3);
|
|
6934
6935
|
repoContextContent = injection.text;
|
|
6935
|
-
logger$
|
|
6936
|
+
logger$13.info({
|
|
6936
6937
|
storyKey,
|
|
6937
6938
|
repoMapTokens: Math.ceil(injection.text.length / 4),
|
|
6938
6939
|
symbolCount: injection.symbolCount,
|
|
@@ -6952,14 +6953,14 @@ async function runCodeReview(deps, params) {
|
|
|
6952
6953
|
const findings = await getProjectFindings(deps.db);
|
|
6953
6954
|
if (findings.length > 0) {
|
|
6954
6955
|
priorFindingsContent = "Previous reviews found these recurring patterns — pay special attention:\n\n" + findings;
|
|
6955
|
-
logger$
|
|
6956
|
+
logger$13.debug({
|
|
6956
6957
|
storyKey,
|
|
6957
6958
|
findingsLen: findings.length
|
|
6958
6959
|
}, "Injecting prior findings into code-review prompt");
|
|
6959
6960
|
}
|
|
6960
6961
|
} catch {}
|
|
6961
6962
|
const testMetricsContent = await countTestMetrics(filesModified, cwd);
|
|
6962
|
-
if (testMetricsContent) logger$
|
|
6963
|
+
if (testMetricsContent) logger$13.debug({ storyKey }, "Injecting verified test-count metrics into code-review context");
|
|
6963
6964
|
const sections = [
|
|
6964
6965
|
{
|
|
6965
6966
|
name: "story_content",
|
|
@@ -6998,11 +6999,11 @@ async function runCodeReview(deps, params) {
|
|
|
6998
6999
|
}
|
|
6999
7000
|
];
|
|
7000
7001
|
const assembleResult = assemblePrompt(template, sections, TOKEN_CEILING);
|
|
7001
|
-
if (assembleResult.truncated) logger$
|
|
7002
|
+
if (assembleResult.truncated) logger$13.warn({
|
|
7002
7003
|
storyKey,
|
|
7003
7004
|
tokenCount: assembleResult.tokenCount
|
|
7004
7005
|
}, "Code-review prompt truncated to fit token ceiling");
|
|
7005
|
-
logger$
|
|
7006
|
+
logger$13.debug({
|
|
7006
7007
|
storyKey,
|
|
7007
7008
|
tokenCount: assembleResult.tokenCount,
|
|
7008
7009
|
truncated: assembleResult.truncated
|
|
@@ -7023,7 +7024,7 @@ async function runCodeReview(deps, params) {
|
|
|
7023
7024
|
dispatchResult = await handle.result;
|
|
7024
7025
|
} catch (err) {
|
|
7025
7026
|
const error = err instanceof Error ? err.message : String(err);
|
|
7026
|
-
logger$
|
|
7027
|
+
logger$13.error({
|
|
7027
7028
|
storyKey,
|
|
7028
7029
|
error
|
|
7029
7030
|
}, "Code-review dispatch threw unexpected error");
|
|
@@ -7039,7 +7040,7 @@ async function runCodeReview(deps, params) {
|
|
|
7039
7040
|
const rawOutput = dispatchResult.output ?? void 0;
|
|
7040
7041
|
if (dispatchResult.status === "failed") {
|
|
7041
7042
|
const errorMsg = `Dispatch status: failed. Exit code: ${dispatchResult.exitCode}. ${dispatchResult.parseError ?? ""} ${dispatchResult.output ? `Stderr: ${dispatchResult.output}` : ""}`.trim();
|
|
7042
|
-
logger$
|
|
7043
|
+
logger$13.warn({
|
|
7043
7044
|
storyKey,
|
|
7044
7045
|
exitCode: dispatchResult.exitCode
|
|
7045
7046
|
}, "Code-review dispatch failed");
|
|
@@ -7049,7 +7050,7 @@ async function runCodeReview(deps, params) {
|
|
|
7049
7050
|
};
|
|
7050
7051
|
}
|
|
7051
7052
|
if (dispatchResult.status === "timeout") {
|
|
7052
|
-
logger$
|
|
7053
|
+
logger$13.warn({ storyKey }, "Code-review dispatch timed out");
|
|
7053
7054
|
return {
|
|
7054
7055
|
...defaultFailResult("Dispatch status: timeout. The agent did not complete within the allowed time.", tokenUsage),
|
|
7055
7056
|
rawOutput
|
|
@@ -7057,7 +7058,7 @@ async function runCodeReview(deps, params) {
|
|
|
7057
7058
|
}
|
|
7058
7059
|
if (dispatchResult.parsed === null) {
|
|
7059
7060
|
const details = dispatchResult.parseError ?? "No YAML block found in output";
|
|
7060
|
-
logger$
|
|
7061
|
+
logger$13.warn({
|
|
7061
7062
|
storyKey,
|
|
7062
7063
|
details
|
|
7063
7064
|
}, "Code-review output schema validation failed");
|
|
@@ -7074,7 +7075,7 @@ async function runCodeReview(deps, params) {
|
|
|
7074
7075
|
const parseResult = CodeReviewResultSchema.safeParse(dispatchResult.parsed);
|
|
7075
7076
|
if (!parseResult.success) {
|
|
7076
7077
|
const details = parseResult.error.message;
|
|
7077
|
-
logger$
|
|
7078
|
+
logger$13.warn({
|
|
7078
7079
|
storyKey,
|
|
7079
7080
|
details
|
|
7080
7081
|
}, "Code-review output failed schema validation");
|
|
@@ -7089,13 +7090,13 @@ async function runCodeReview(deps, params) {
|
|
|
7089
7090
|
};
|
|
7090
7091
|
}
|
|
7091
7092
|
const parsed = parseResult.data;
|
|
7092
|
-
if (parsed.agentVerdict !== parsed.verdict) logger$
|
|
7093
|
+
if (parsed.agentVerdict !== parsed.verdict) logger$13.info({
|
|
7093
7094
|
storyKey,
|
|
7094
7095
|
agentVerdict: parsed.agentVerdict,
|
|
7095
7096
|
pipelineVerdict: parsed.verdict,
|
|
7096
7097
|
issues: parsed.issues
|
|
7097
7098
|
}, "Pipeline overrode agent verdict based on issue severities");
|
|
7098
|
-
logger$
|
|
7099
|
+
logger$13.info({
|
|
7099
7100
|
storyKey,
|
|
7100
7101
|
verdict: parsed.verdict,
|
|
7101
7102
|
issues: parsed.issues
|
|
@@ -7120,14 +7121,14 @@ async function getArchConstraints$2(deps) {
|
|
|
7120
7121
|
if (constraints.length === 0) return "";
|
|
7121
7122
|
return constraints.map((d) => `${d.key}: ${d.value}`).join("\n");
|
|
7122
7123
|
} catch (err) {
|
|
7123
|
-
logger$
|
|
7124
|
+
logger$13.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
|
|
7124
7125
|
return "";
|
|
7125
7126
|
}
|
|
7126
7127
|
}
|
|
7127
7128
|
|
|
7128
7129
|
//#endregion
|
|
7129
7130
|
//#region src/modules/compiled-workflows/test-plan.ts
|
|
7130
|
-
const logger$
|
|
7131
|
+
const logger$12 = createLogger("compiled-workflows:test-plan");
|
|
7131
7132
|
/** Default timeout for test-plan dispatches in milliseconds (5 min — lightweight call) */
|
|
7132
7133
|
const DEFAULT_TIMEOUT_MS = 3e5;
|
|
7133
7134
|
/**
|
|
@@ -7139,12 +7140,12 @@ const DEFAULT_TIMEOUT_MS = 3e5;
|
|
|
7139
7140
|
*/
|
|
7140
7141
|
async function runTestPlan(deps, params) {
|
|
7141
7142
|
const { storyKey, storyFilePath, pipelineRunId } = params;
|
|
7142
|
-
logger$
|
|
7143
|
+
logger$12.info({
|
|
7143
7144
|
storyKey,
|
|
7144
7145
|
storyFilePath
|
|
7145
7146
|
}, "Starting compiled test-plan workflow");
|
|
7146
7147
|
const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("test-plan", deps.tokenCeilings);
|
|
7147
|
-
logger$
|
|
7148
|
+
logger$12.info({
|
|
7148
7149
|
workflow: "test-plan",
|
|
7149
7150
|
ceiling: TOKEN_CEILING,
|
|
7150
7151
|
source: tokenCeilingSource
|
|
@@ -7152,10 +7153,10 @@ async function runTestPlan(deps, params) {
|
|
|
7152
7153
|
let template;
|
|
7153
7154
|
try {
|
|
7154
7155
|
template = await deps.pack.getPrompt("test-plan");
|
|
7155
|
-
logger$
|
|
7156
|
+
logger$12.debug({ storyKey }, "Retrieved test-plan prompt template from pack");
|
|
7156
7157
|
} catch (err) {
|
|
7157
7158
|
const error = err instanceof Error ? err.message : String(err);
|
|
7158
|
-
logger$
|
|
7159
|
+
logger$12.warn({
|
|
7159
7160
|
storyKey,
|
|
7160
7161
|
error
|
|
7161
7162
|
}, "Failed to retrieve test-plan prompt template");
|
|
@@ -7166,14 +7167,14 @@ async function runTestPlan(deps, params) {
|
|
|
7166
7167
|
storyContent = await readFile$1(storyFilePath, "utf-8");
|
|
7167
7168
|
} catch (err) {
|
|
7168
7169
|
if (err.code === "ENOENT") {
|
|
7169
|
-
logger$
|
|
7170
|
+
logger$12.warn({
|
|
7170
7171
|
storyKey,
|
|
7171
7172
|
storyFilePath
|
|
7172
7173
|
}, "Story file not found for test planning");
|
|
7173
7174
|
return makeTestPlanFailureResult("story_file_not_found");
|
|
7174
7175
|
}
|
|
7175
7176
|
const error = err instanceof Error ? err.message : String(err);
|
|
7176
|
-
logger$
|
|
7177
|
+
logger$12.warn({
|
|
7177
7178
|
storyKey,
|
|
7178
7179
|
storyFilePath,
|
|
7179
7180
|
error
|
|
@@ -7187,13 +7188,13 @@ async function runTestPlan(deps, params) {
|
|
|
7187
7188
|
const testPatternDecisions = solutioningDecisions.filter((d) => d.category === "test-patterns");
|
|
7188
7189
|
if (testPatternDecisions.length > 0) {
|
|
7189
7190
|
testPatternsContent = "## Test Patterns\n" + testPatternDecisions.map((d) => `- ${d.key}: ${d.value}`).join("\n");
|
|
7190
|
-
logger$
|
|
7191
|
+
logger$12.debug({
|
|
7191
7192
|
storyKey,
|
|
7192
7193
|
count: testPatternDecisions.length
|
|
7193
7194
|
}, "Loaded test patterns from decision store");
|
|
7194
7195
|
} else {
|
|
7195
7196
|
testPatternsContent = resolveDefaultTestPatterns(deps.projectRoot);
|
|
7196
|
-
logger$
|
|
7197
|
+
logger$12.debug({ storyKey }, "No test-pattern decisions — using stack-aware defaults");
|
|
7197
7198
|
}
|
|
7198
7199
|
} catch {
|
|
7199
7200
|
testPatternsContent = resolveDefaultTestPatterns(deps.projectRoot);
|
|
@@ -7215,7 +7216,7 @@ async function runTestPlan(deps, params) {
|
|
|
7215
7216
|
priority: "optional"
|
|
7216
7217
|
}
|
|
7217
7218
|
], TOKEN_CEILING);
|
|
7218
|
-
logger$
|
|
7219
|
+
logger$12.info({
|
|
7219
7220
|
storyKey,
|
|
7220
7221
|
tokenCount,
|
|
7221
7222
|
ceiling: TOKEN_CEILING,
|
|
@@ -7236,7 +7237,7 @@ async function runTestPlan(deps, params) {
|
|
|
7236
7237
|
dispatchResult = await handle.result;
|
|
7237
7238
|
} catch (err) {
|
|
7238
7239
|
const error = err instanceof Error ? err.message : String(err);
|
|
7239
|
-
logger$
|
|
7240
|
+
logger$12.warn({
|
|
7240
7241
|
storyKey,
|
|
7241
7242
|
error
|
|
7242
7243
|
}, "Test-plan dispatch threw an unexpected error");
|
|
@@ -7247,7 +7248,7 @@ async function runTestPlan(deps, params) {
|
|
|
7247
7248
|
output: dispatchResult.tokenEstimate.output
|
|
7248
7249
|
};
|
|
7249
7250
|
if (dispatchResult.status === "timeout") {
|
|
7250
|
-
logger$
|
|
7251
|
+
logger$12.warn({
|
|
7251
7252
|
storyKey,
|
|
7252
7253
|
durationMs: dispatchResult.durationMs
|
|
7253
7254
|
}, "Test-plan dispatch timed out");
|
|
@@ -7257,7 +7258,7 @@ async function runTestPlan(deps, params) {
|
|
|
7257
7258
|
};
|
|
7258
7259
|
}
|
|
7259
7260
|
if (dispatchResult.status === "failed" || dispatchResult.exitCode !== 0) {
|
|
7260
|
-
logger$
|
|
7261
|
+
logger$12.warn({
|
|
7261
7262
|
storyKey,
|
|
7262
7263
|
exitCode: dispatchResult.exitCode,
|
|
7263
7264
|
status: dispatchResult.status
|
|
@@ -7269,7 +7270,7 @@ async function runTestPlan(deps, params) {
|
|
|
7269
7270
|
}
|
|
7270
7271
|
if (dispatchResult.parseError !== null || dispatchResult.parsed === null) {
|
|
7271
7272
|
const details = dispatchResult.parseError ?? "parsed result was null";
|
|
7272
|
-
logger$
|
|
7273
|
+
logger$12.warn({
|
|
7273
7274
|
storyKey,
|
|
7274
7275
|
parseError: details
|
|
7275
7276
|
}, "Test-plan YAML schema validation failed");
|
|
@@ -7292,19 +7293,19 @@ async function runTestPlan(deps, params) {
|
|
|
7292
7293
|
}),
|
|
7293
7294
|
rationale: `Test plan for ${storyKey}: ${parsed.test_files.length} test files, categories: ${parsed.test_categories.join(", ")}`
|
|
7294
7295
|
});
|
|
7295
|
-
logger$
|
|
7296
|
+
logger$12.info({
|
|
7296
7297
|
storyKey,
|
|
7297
7298
|
fileCount: parsed.test_files.length,
|
|
7298
7299
|
categories: parsed.test_categories
|
|
7299
7300
|
}, "Test plan stored in decision store");
|
|
7300
7301
|
} catch (err) {
|
|
7301
7302
|
const error = err instanceof Error ? err.message : String(err);
|
|
7302
|
-
logger$
|
|
7303
|
+
logger$12.warn({
|
|
7303
7304
|
storyKey,
|
|
7304
7305
|
error
|
|
7305
7306
|
}, "Failed to store test plan in decision store — proceeding anyway");
|
|
7306
7307
|
}
|
|
7307
|
-
logger$
|
|
7308
|
+
logger$12.info({
|
|
7308
7309
|
storyKey,
|
|
7309
7310
|
result: parsed.result
|
|
7310
7311
|
}, "Test-plan workflow completed");
|
|
@@ -7344,14 +7345,14 @@ async function getArchConstraints$1(deps) {
|
|
|
7344
7345
|
if (constraints.length === 0) return "";
|
|
7345
7346
|
return constraints.map((d) => `${d.key}: ${d.value}`).join("\n");
|
|
7346
7347
|
} catch (err) {
|
|
7347
|
-
logger$
|
|
7348
|
+
logger$12.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints for test-plan — proceeding without them");
|
|
7348
7349
|
return "";
|
|
7349
7350
|
}
|
|
7350
7351
|
}
|
|
7351
7352
|
|
|
7352
7353
|
//#endregion
|
|
7353
7354
|
//#region src/modules/compiled-workflows/test-expansion.ts
|
|
7354
|
-
const logger$
|
|
7355
|
+
const logger$11 = createLogger("compiled-workflows:test-expansion");
|
|
7355
7356
|
function defaultFallbackResult(error, tokenUsage) {
|
|
7356
7357
|
return {
|
|
7357
7358
|
expansion_priority: "low",
|
|
@@ -7381,14 +7382,14 @@ function defaultFallbackResult(error, tokenUsage) {
|
|
|
7381
7382
|
async function runTestExpansion(deps, params) {
|
|
7382
7383
|
const { storyKey, storyFilePath, pipelineRunId, filesModified, workingDirectory } = params;
|
|
7383
7384
|
const cwd = workingDirectory ?? process.cwd();
|
|
7384
|
-
logger$
|
|
7385
|
+
logger$11.debug({
|
|
7385
7386
|
storyKey,
|
|
7386
7387
|
storyFilePath,
|
|
7387
7388
|
cwd,
|
|
7388
7389
|
pipelineRunId
|
|
7389
7390
|
}, "Starting test-expansion workflow");
|
|
7390
7391
|
const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("test-expansion", deps.tokenCeilings);
|
|
7391
|
-
logger$
|
|
7392
|
+
logger$11.info({
|
|
7392
7393
|
workflow: "test-expansion",
|
|
7393
7394
|
ceiling: TOKEN_CEILING,
|
|
7394
7395
|
source: tokenCeilingSource
|
|
@@ -7398,7 +7399,7 @@ async function runTestExpansion(deps, params) {
|
|
|
7398
7399
|
template = await deps.pack.getPrompt("test-expansion");
|
|
7399
7400
|
} catch (err) {
|
|
7400
7401
|
const error = err instanceof Error ? err.message : String(err);
|
|
7401
|
-
logger$
|
|
7402
|
+
logger$11.warn({ error }, "Failed to retrieve test-expansion prompt template");
|
|
7402
7403
|
return defaultFallbackResult(`Failed to retrieve prompt template: ${error}`, {
|
|
7403
7404
|
input: 0,
|
|
7404
7405
|
output: 0
|
|
@@ -7409,7 +7410,7 @@ async function runTestExpansion(deps, params) {
|
|
|
7409
7410
|
storyContent = await readFile$1(storyFilePath, "utf-8");
|
|
7410
7411
|
} catch (err) {
|
|
7411
7412
|
const error = err instanceof Error ? err.message : String(err);
|
|
7412
|
-
logger$
|
|
7413
|
+
logger$11.warn({
|
|
7413
7414
|
storyFilePath,
|
|
7414
7415
|
error
|
|
7415
7416
|
}, "Failed to read story file");
|
|
@@ -7425,13 +7426,13 @@ async function runTestExpansion(deps, params) {
|
|
|
7425
7426
|
const testPatternDecisions = solutioningDecisions.filter((d) => d.category === "test-patterns");
|
|
7426
7427
|
if (testPatternDecisions.length > 0) {
|
|
7427
7428
|
testPatternsContent = "## Test Patterns\n" + testPatternDecisions.map((d) => `- ${d.key}: ${d.value}`).join("\n");
|
|
7428
|
-
logger$
|
|
7429
|
+
logger$11.debug({
|
|
7429
7430
|
storyKey,
|
|
7430
7431
|
count: testPatternDecisions.length
|
|
7431
7432
|
}, "Loaded test patterns from decision store");
|
|
7432
7433
|
} else {
|
|
7433
7434
|
testPatternsContent = resolveDefaultTestPatterns(deps.projectRoot);
|
|
7434
|
-
logger$
|
|
7435
|
+
logger$11.debug({ storyKey }, "No test-pattern decisions — using stack-aware defaults");
|
|
7435
7436
|
}
|
|
7436
7437
|
} catch {
|
|
7437
7438
|
testPatternsContent = resolveDefaultTestPatterns(deps.projectRoot);
|
|
@@ -7446,12 +7447,12 @@ async function runTestExpansion(deps, params) {
|
|
|
7446
7447
|
const scopedTotal = nonDiffTokens + countTokens(scopedDiff);
|
|
7447
7448
|
if (scopedTotal <= TOKEN_CEILING) {
|
|
7448
7449
|
gitDiffContent = scopedDiff;
|
|
7449
|
-
logger$
|
|
7450
|
+
logger$11.debug({
|
|
7450
7451
|
fileCount: filesModified.length,
|
|
7451
7452
|
tokenCount: scopedTotal
|
|
7452
7453
|
}, "Using scoped file diff");
|
|
7453
7454
|
} else {
|
|
7454
|
-
logger$
|
|
7455
|
+
logger$11.warn({
|
|
7455
7456
|
estimatedTotal: scopedTotal,
|
|
7456
7457
|
ceiling: TOKEN_CEILING,
|
|
7457
7458
|
fileCount: filesModified.length
|
|
@@ -7459,7 +7460,7 @@ async function runTestExpansion(deps, params) {
|
|
|
7459
7460
|
gitDiffContent = await getGitDiffStatForFiles(filesModified, cwd);
|
|
7460
7461
|
}
|
|
7461
7462
|
} catch (err) {
|
|
7462
|
-
logger$
|
|
7463
|
+
logger$11.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to get git diff — proceeding with empty diff");
|
|
7463
7464
|
}
|
|
7464
7465
|
const sections = [
|
|
7465
7466
|
{
|
|
@@ -7484,11 +7485,11 @@ async function runTestExpansion(deps, params) {
|
|
|
7484
7485
|
}
|
|
7485
7486
|
];
|
|
7486
7487
|
const assembleResult = assemblePrompt(template, sections, TOKEN_CEILING);
|
|
7487
|
-
if (assembleResult.truncated) logger$
|
|
7488
|
+
if (assembleResult.truncated) logger$11.warn({
|
|
7488
7489
|
storyKey,
|
|
7489
7490
|
tokenCount: assembleResult.tokenCount
|
|
7490
7491
|
}, "Test-expansion prompt truncated to fit token ceiling");
|
|
7491
|
-
logger$
|
|
7492
|
+
logger$11.debug({
|
|
7492
7493
|
storyKey,
|
|
7493
7494
|
tokenCount: assembleResult.tokenCount,
|
|
7494
7495
|
truncated: assembleResult.truncated
|
|
@@ -7508,7 +7509,7 @@ async function runTestExpansion(deps, params) {
|
|
|
7508
7509
|
dispatchResult = await handle.result;
|
|
7509
7510
|
} catch (err) {
|
|
7510
7511
|
const error = err instanceof Error ? err.message : String(err);
|
|
7511
|
-
logger$
|
|
7512
|
+
logger$11.warn({
|
|
7512
7513
|
storyKey,
|
|
7513
7514
|
error
|
|
7514
7515
|
}, "Test-expansion dispatch threw unexpected error");
|
|
@@ -7523,19 +7524,19 @@ async function runTestExpansion(deps, params) {
|
|
|
7523
7524
|
};
|
|
7524
7525
|
if (dispatchResult.status === "failed") {
|
|
7525
7526
|
const errorMsg = `Dispatch status: failed. Exit code: ${dispatchResult.exitCode}. ${dispatchResult.parseError ?? ""}`.trim();
|
|
7526
|
-
logger$
|
|
7527
|
+
logger$11.warn({
|
|
7527
7528
|
storyKey,
|
|
7528
7529
|
exitCode: dispatchResult.exitCode
|
|
7529
7530
|
}, "Test-expansion dispatch failed");
|
|
7530
7531
|
return defaultFallbackResult(errorMsg, tokenUsage);
|
|
7531
7532
|
}
|
|
7532
7533
|
if (dispatchResult.status === "timeout") {
|
|
7533
|
-
logger$
|
|
7534
|
+
logger$11.warn({ storyKey }, "Test-expansion dispatch timed out");
|
|
7534
7535
|
return defaultFallbackResult("Dispatch status: timeout. The agent did not complete within the allowed time.", tokenUsage);
|
|
7535
7536
|
}
|
|
7536
7537
|
if (dispatchResult.parsed === null) {
|
|
7537
7538
|
const details = dispatchResult.parseError ?? "No YAML block found in output";
|
|
7538
|
-
logger$
|
|
7539
|
+
logger$11.warn({
|
|
7539
7540
|
storyKey,
|
|
7540
7541
|
details
|
|
7541
7542
|
}, "Test-expansion output has no parseable YAML");
|
|
@@ -7544,14 +7545,14 @@ async function runTestExpansion(deps, params) {
|
|
|
7544
7545
|
const parseResult = TestExpansionResultSchema.safeParse(dispatchResult.parsed);
|
|
7545
7546
|
if (!parseResult.success) {
|
|
7546
7547
|
const details = parseResult.error.message;
|
|
7547
|
-
logger$
|
|
7548
|
+
logger$11.warn({
|
|
7548
7549
|
storyKey,
|
|
7549
7550
|
details
|
|
7550
7551
|
}, "Test-expansion output failed schema validation");
|
|
7551
7552
|
return defaultFallbackResult(`schema_validation_failed: ${details}`, tokenUsage);
|
|
7552
7553
|
}
|
|
7553
7554
|
const parsed = parseResult.data;
|
|
7554
|
-
logger$
|
|
7555
|
+
logger$11.info({
|
|
7555
7556
|
storyKey,
|
|
7556
7557
|
expansion_priority: parsed.expansion_priority,
|
|
7557
7558
|
coverage_gaps: parsed.coverage_gaps.length,
|
|
@@ -7576,7 +7577,7 @@ async function getArchConstraints(deps) {
|
|
|
7576
7577
|
if (constraints.length === 0) return "";
|
|
7577
7578
|
return constraints.map((d) => `${d.key}: ${d.value}`).join("\n");
|
|
7578
7579
|
} catch (err) {
|
|
7579
|
-
logger$
|
|
7580
|
+
logger$11.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
|
|
7580
7581
|
return "";
|
|
7581
7582
|
}
|
|
7582
7583
|
}
|
|
@@ -7884,6 +7885,16 @@ function detectConflictGroups(storyKeys, config) {
|
|
|
7884
7885
|
if (existing !== void 0) existing.push(key);
|
|
7885
7886
|
else moduleToStories.set(module$1, [key]);
|
|
7886
7887
|
}
|
|
7888
|
+
if (moduleToStories.size === 1 && storyKeys.length >= 4) {
|
|
7889
|
+
const epicGroups = new Map();
|
|
7890
|
+
for (const key of storyKeys) {
|
|
7891
|
+
const epicNum = key.split("-")[0] ?? key;
|
|
7892
|
+
const existing = epicGroups.get(epicNum);
|
|
7893
|
+
if (existing !== void 0) existing.push(key);
|
|
7894
|
+
else epicGroups.set(epicNum, [key]);
|
|
7895
|
+
}
|
|
7896
|
+
if (epicGroups.size > 1) return Array.from(epicGroups.values());
|
|
7897
|
+
}
|
|
7887
7898
|
return Array.from(moduleToStories.values());
|
|
7888
7899
|
}
|
|
7889
7900
|
/**
|
|
@@ -8008,7 +8019,7 @@ function detectConflictGroupsWithContracts(storyKeys, config, declarations) {
|
|
|
8008
8019
|
|
|
8009
8020
|
//#endregion
|
|
8010
8021
|
//#region src/modules/implementation-orchestrator/seed-methodology-context.ts
|
|
8011
|
-
const logger$
|
|
8022
|
+
const logger$10 = createLogger("implementation-orchestrator:seed");
|
|
8012
8023
|
/** Max chars for the architecture summary seeded into decisions */
|
|
8013
8024
|
const MAX_ARCH_CHARS = 6e3;
|
|
8014
8025
|
/** Max chars per epic-shard decision value (per-story or per-epic fallback) */
|
|
@@ -8042,12 +8053,12 @@ async function seedMethodologyContext(db, projectRoot) {
|
|
|
8042
8053
|
const testCount = await seedTestPatterns(db, projectRoot);
|
|
8043
8054
|
if (testCount === -1) result.skippedCategories.push("test-patterns");
|
|
8044
8055
|
else result.decisionsCreated += testCount;
|
|
8045
|
-
logger$
|
|
8056
|
+
logger$10.info({
|
|
8046
8057
|
decisionsCreated: result.decisionsCreated,
|
|
8047
8058
|
skippedCategories: result.skippedCategories
|
|
8048
8059
|
}, "Methodology context seeding complete");
|
|
8049
8060
|
} catch (err) {
|
|
8050
|
-
logger$
|
|
8061
|
+
logger$10.warn({ error: err instanceof Error ? err.message : String(err) }, "Methodology context seeding failed (non-fatal)");
|
|
8051
8062
|
}
|
|
8052
8063
|
return result;
|
|
8053
8064
|
}
|
|
@@ -8091,7 +8102,7 @@ async function seedArchitecture(db, projectRoot) {
|
|
|
8091
8102
|
});
|
|
8092
8103
|
count = 1;
|
|
8093
8104
|
}
|
|
8094
|
-
logger$
|
|
8105
|
+
logger$10.debug({ count }, "Seeded architecture decisions");
|
|
8095
8106
|
return count;
|
|
8096
8107
|
}
|
|
8097
8108
|
/**
|
|
@@ -8115,11 +8126,11 @@ async function seedEpicShards(db, projectRoot) {
|
|
|
8115
8126
|
const storedHashDecision = implementationDecisions.find((d) => d.category === "epic-shard-hash" && d.key === "epics-file");
|
|
8116
8127
|
const storedHash = storedHashDecision?.value;
|
|
8117
8128
|
if (storedHash === currentHash) {
|
|
8118
|
-
logger$
|
|
8129
|
+
logger$10.debug({ hash: currentHash }, "Epic shards up-to-date (hash unchanged) — skipping re-seed");
|
|
8119
8130
|
return -1;
|
|
8120
8131
|
}
|
|
8121
8132
|
if (implementationDecisions.some((d) => d.category === "epic-shard")) {
|
|
8122
|
-
logger$
|
|
8133
|
+
logger$10.debug({
|
|
8123
8134
|
storedHash,
|
|
8124
8135
|
currentHash
|
|
8125
8136
|
}, "Epics file changed — deleting stale epic-shard decisions");
|
|
@@ -8150,7 +8161,7 @@ async function seedEpicShards(db, projectRoot) {
|
|
|
8150
8161
|
value: currentHash,
|
|
8151
8162
|
rationale: "SHA-256 hash of epics file content for change detection"
|
|
8152
8163
|
});
|
|
8153
|
-
logger$
|
|
8164
|
+
logger$10.debug({
|
|
8154
8165
|
count,
|
|
8155
8166
|
hash: currentHash
|
|
8156
8167
|
}, "Seeded epic shard decisions");
|
|
@@ -8174,7 +8185,7 @@ async function seedTestPatterns(db, projectRoot) {
|
|
|
8174
8185
|
value: patterns.slice(0, MAX_TEST_PATTERNS_CHARS),
|
|
8175
8186
|
rationale: "Detected from project configuration at orchestrator startup"
|
|
8176
8187
|
});
|
|
8177
|
-
logger$
|
|
8188
|
+
logger$10.debug("Seeded test patterns decision");
|
|
8178
8189
|
return 1;
|
|
8179
8190
|
}
|
|
8180
8191
|
/**
|
|
@@ -8631,7 +8642,7 @@ function findArtifact(projectRoot, candidates) {
|
|
|
8631
8642
|
|
|
8632
8643
|
//#endregion
|
|
8633
8644
|
//#region src/modules/implementation-orchestrator/package-snapshot.ts
|
|
8634
|
-
const logger$
|
|
8645
|
+
const logger$9 = createLogger("package-snapshot");
|
|
8635
8646
|
/**
|
|
8636
8647
|
* Discover all package.json paths in a workspace monorepo.
|
|
8637
8648
|
* Checks the `workspaces` field in root package.json,
|
|
@@ -8723,7 +8734,7 @@ function restorePackageSnapshot(snapshot, options) {
|
|
|
8723
8734
|
writeFileSync(filePath, content, "utf-8");
|
|
8724
8735
|
filesRestored++;
|
|
8725
8736
|
} catch (err) {
|
|
8726
|
-
logger$
|
|
8737
|
+
logger$9.warn({
|
|
8727
8738
|
filePath,
|
|
8728
8739
|
err
|
|
8729
8740
|
}, "Failed to restore file from snapshot");
|
|
@@ -8734,7 +8745,7 @@ function restorePackageSnapshot(snapshot, options) {
|
|
|
8734
8745
|
encoding: "utf-8",
|
|
8735
8746
|
stdio: "pipe"
|
|
8736
8747
|
});
|
|
8737
|
-
logger$
|
|
8748
|
+
logger$9.info({
|
|
8738
8749
|
filesRestored,
|
|
8739
8750
|
installCommand: snapshot.installCommand
|
|
8740
8751
|
}, "Package snapshot restored successfully");
|
|
@@ -8745,7 +8756,7 @@ function restorePackageSnapshot(snapshot, options) {
|
|
|
8745
8756
|
};
|
|
8746
8757
|
} catch (err) {
|
|
8747
8758
|
const exitCode = err.status ?? 1;
|
|
8748
|
-
logger$
|
|
8759
|
+
logger$9.warn({
|
|
8749
8760
|
filesRestored,
|
|
8750
8761
|
exitCode,
|
|
8751
8762
|
err
|
|
@@ -9091,7 +9102,7 @@ const RecommendationSchema = z.object({
|
|
|
9091
9102
|
|
|
9092
9103
|
//#endregion
|
|
9093
9104
|
//#region src/modules/telemetry/adapter-persistence.ts
|
|
9094
|
-
const logger$
|
|
9105
|
+
const logger$8 = createLogger("telemetry:adapter-persistence");
|
|
9095
9106
|
/**
|
|
9096
9107
|
* Concrete DatabaseAdapter-backed telemetry persistence.
|
|
9097
9108
|
*
|
|
@@ -9266,7 +9277,7 @@ var AdapterTelemetryPersistence = class {
|
|
|
9266
9277
|
]);
|
|
9267
9278
|
}
|
|
9268
9279
|
});
|
|
9269
|
-
logger$
|
|
9280
|
+
logger$8.debug({
|
|
9270
9281
|
storyKey,
|
|
9271
9282
|
count: turns.length
|
|
9272
9283
|
}, "Stored turn analysis");
|
|
@@ -9338,7 +9349,7 @@ var AdapterTelemetryPersistence = class {
|
|
|
9338
9349
|
score.taskType ?? null,
|
|
9339
9350
|
score.phase ?? null
|
|
9340
9351
|
]);
|
|
9341
|
-
logger$
|
|
9352
|
+
logger$8.debug({
|
|
9342
9353
|
storyKey: score.storyKey,
|
|
9343
9354
|
compositeScore: score.compositeScore
|
|
9344
9355
|
}, "Stored efficiency score");
|
|
@@ -9406,7 +9417,7 @@ var AdapterTelemetryPersistence = class {
|
|
|
9406
9417
|
]);
|
|
9407
9418
|
}
|
|
9408
9419
|
});
|
|
9409
|
-
logger$
|
|
9420
|
+
logger$8.debug({
|
|
9410
9421
|
storyKey,
|
|
9411
9422
|
count: recs.length
|
|
9412
9423
|
}, "Saved recommendations");
|
|
@@ -9485,7 +9496,7 @@ var AdapterTelemetryPersistence = class {
|
|
|
9485
9496
|
]);
|
|
9486
9497
|
} catch {}
|
|
9487
9498
|
});
|
|
9488
|
-
logger$
|
|
9499
|
+
logger$8.debug({
|
|
9489
9500
|
storyKey,
|
|
9490
9501
|
count: stats.length
|
|
9491
9502
|
}, "Stored category stats");
|
|
@@ -9530,7 +9541,7 @@ var AdapterTelemetryPersistence = class {
|
|
|
9530
9541
|
]);
|
|
9531
9542
|
} catch {}
|
|
9532
9543
|
});
|
|
9533
|
-
logger$
|
|
9544
|
+
logger$8.debug({
|
|
9534
9545
|
storyKey,
|
|
9535
9546
|
count: consumers.length
|
|
9536
9547
|
}, "Stored consumer stats");
|
|
@@ -9562,14 +9573,14 @@ var AdapterTelemetryPersistence = class {
|
|
|
9562
9573
|
await adapter.query("DELETE FROM category_stats WHERE story_key = ?", [storyKey]);
|
|
9563
9574
|
await adapter.query("DELETE FROM consumer_stats WHERE story_key = ?", [storyKey]);
|
|
9564
9575
|
});
|
|
9565
|
-
logger$
|
|
9576
|
+
logger$8.debug({ storyKey }, "Purged stale telemetry data for story");
|
|
9566
9577
|
}
|
|
9567
9578
|
/**
|
|
9568
9579
|
* Record a named span with arbitrary attributes.
|
|
9569
9580
|
* Currently logs the span at debug level; no DB persistence.
|
|
9570
9581
|
*/
|
|
9571
9582
|
recordSpan(span) {
|
|
9572
|
-
logger$
|
|
9583
|
+
logger$8.debug({ span }, "recordSpan");
|
|
9573
9584
|
}
|
|
9574
9585
|
};
|
|
9575
9586
|
|
|
@@ -9648,7 +9659,7 @@ var TelemetryPersistence = class {
|
|
|
9648
9659
|
|
|
9649
9660
|
//#endregion
|
|
9650
9661
|
//#region src/modules/telemetry/telemetry-advisor.ts
|
|
9651
|
-
const logger$
|
|
9662
|
+
const logger$7 = createLogger("telemetry-advisor");
|
|
9652
9663
|
/**
|
|
9653
9664
|
* Reads telemetry efficiency data to support retry gate decisions.
|
|
9654
9665
|
*/
|
|
@@ -9670,7 +9681,7 @@ var TelemetryAdvisor = class {
|
|
|
9670
9681
|
try {
|
|
9671
9682
|
const score = await this._persistence.getEfficiencyScore(storyKey);
|
|
9672
9683
|
if (score === null) {
|
|
9673
|
-
logger$
|
|
9684
|
+
logger$7.debug({ storyKey }, "No efficiency score found for story");
|
|
9674
9685
|
return null;
|
|
9675
9686
|
}
|
|
9676
9687
|
return {
|
|
@@ -9685,7 +9696,7 @@ var TelemetryAdvisor = class {
|
|
|
9685
9696
|
coldStartTurnsExcluded: score.coldStartTurnsExcluded ?? 0
|
|
9686
9697
|
};
|
|
9687
9698
|
} catch (err) {
|
|
9688
|
-
logger$
|
|
9699
|
+
logger$7.warn({
|
|
9689
9700
|
err,
|
|
9690
9701
|
storyKey
|
|
9691
9702
|
}, "Failed to retrieve efficiency score");
|
|
@@ -9722,7 +9733,7 @@ var TelemetryAdvisor = class {
|
|
|
9722
9733
|
merged.sort((a, b) => (severityOrder[a.severity] ?? 3) - (severityOrder[b.severity] ?? 3));
|
|
9723
9734
|
return merged;
|
|
9724
9735
|
} catch (err) {
|
|
9725
|
-
logger$
|
|
9736
|
+
logger$7.warn({ err }, "Failed to retrieve recommendations for run — returning empty");
|
|
9726
9737
|
return [];
|
|
9727
9738
|
}
|
|
9728
9739
|
}
|
|
@@ -9752,7 +9763,7 @@ var TelemetryAdvisor = class {
|
|
|
9752
9763
|
const lines = actionable.map((r) => `OPTIMIZATION (${r.severity}): ${r.title}. ${r.description}`);
|
|
9753
9764
|
const full = lines.join("\n");
|
|
9754
9765
|
if (full.length <= MAX_CHARS$1) {
|
|
9755
|
-
logger$
|
|
9766
|
+
logger$7.debug({
|
|
9756
9767
|
count: actionable.length,
|
|
9757
9768
|
chars: full.length
|
|
9758
9769
|
}, "Formatting optimization directives");
|
|
@@ -9760,7 +9771,7 @@ var TelemetryAdvisor = class {
|
|
|
9760
9771
|
}
|
|
9761
9772
|
const cutAt = full.lastIndexOf(" ", MAX_CHARS$1);
|
|
9762
9773
|
const truncated = (cutAt > 0 ? full.slice(0, cutAt) : full.slice(0, MAX_CHARS$1)) + "…";
|
|
9763
|
-
logger$
|
|
9774
|
+
logger$7.debug({
|
|
9764
9775
|
count: actionable.length,
|
|
9765
9776
|
chars: truncated.length
|
|
9766
9777
|
}, "Optimization directives truncated to budget");
|
|
@@ -10516,7 +10527,7 @@ function checkProfileStaleness(projectRoot) {
|
|
|
10516
10527
|
*/
|
|
10517
10528
|
function createImplementationOrchestrator(deps) {
|
|
10518
10529
|
const { db, pack, contextCompiler, dispatcher, eventBus, config, projectRoot, tokenCeilings, stateStore, telemetryPersistence, ingestionServer, repoMapInjector, maxRepoMapTokens } = deps;
|
|
10519
|
-
const logger$
|
|
10530
|
+
const logger$22 = createLogger("implementation-orchestrator");
|
|
10520
10531
|
const telemetryAdvisor = db !== void 0 ? createTelemetryAdvisor({ db }) : void 0;
|
|
10521
10532
|
const wgRepo = new WorkGraphRepository(db);
|
|
10522
10533
|
const _wgInProgressWritten = new Set();
|
|
@@ -10567,7 +10578,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10567
10578
|
const nowMs = Date.now();
|
|
10568
10579
|
for (const [phase, startMs] of starts) {
|
|
10569
10580
|
const endMs = ends?.get(phase);
|
|
10570
|
-
if (endMs === void 0) logger$
|
|
10581
|
+
if (endMs === void 0) logger$22.warn({
|
|
10571
10582
|
storyKey,
|
|
10572
10583
|
phase
|
|
10573
10584
|
}, "Phase has no end time — story may have errored mid-phase. Duration capped to now() and may be inflated.");
|
|
@@ -10614,7 +10625,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10614
10625
|
recordedAt: completedAt,
|
|
10615
10626
|
timestamp: completedAt
|
|
10616
10627
|
}).catch((storeErr) => {
|
|
10617
|
-
logger$
|
|
10628
|
+
logger$22.warn({
|
|
10618
10629
|
err: storeErr,
|
|
10619
10630
|
storyKey
|
|
10620
10631
|
}, "Failed to record metric to StateStore (best-effort)");
|
|
@@ -10636,7 +10647,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10636
10647
|
rationale: `Story ${storyKey} completed with result=${result} in ${wallClockSeconds}s. Tokens: ${tokenAgg.input}+${tokenAgg.output}. Review cycles: ${reviewCycles}.`
|
|
10637
10648
|
});
|
|
10638
10649
|
} catch (decisionErr) {
|
|
10639
|
-
logger$
|
|
10650
|
+
logger$22.warn({
|
|
10640
10651
|
err: decisionErr,
|
|
10641
10652
|
storyKey
|
|
10642
10653
|
}, "Failed to write story-metrics decision (best-effort)");
|
|
@@ -10664,13 +10675,13 @@ function createImplementationOrchestrator(deps) {
|
|
|
10664
10675
|
dispatches: _storyDispatches.get(storyKey) ?? 0
|
|
10665
10676
|
});
|
|
10666
10677
|
} catch (emitErr) {
|
|
10667
|
-
logger$
|
|
10678
|
+
logger$22.warn({
|
|
10668
10679
|
err: emitErr,
|
|
10669
10680
|
storyKey
|
|
10670
10681
|
}, "Failed to emit story:metrics event (best-effort)");
|
|
10671
10682
|
}
|
|
10672
10683
|
} catch (err) {
|
|
10673
|
-
logger$
|
|
10684
|
+
logger$22.warn({
|
|
10674
10685
|
err,
|
|
10675
10686
|
storyKey
|
|
10676
10687
|
}, "Failed to write story metrics (best-effort)");
|
|
@@ -10699,7 +10710,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10699
10710
|
rationale: `Story ${storyKey} ${outcome} after ${reviewCycles} review cycle(s).`
|
|
10700
10711
|
});
|
|
10701
10712
|
} catch (err) {
|
|
10702
|
-
logger$
|
|
10713
|
+
logger$22.warn({
|
|
10703
10714
|
err,
|
|
10704
10715
|
storyKey
|
|
10705
10716
|
}, "Failed to write story-outcome decision (best-effort)");
|
|
@@ -10725,7 +10736,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10725
10736
|
rationale: `Escalation diagnosis for ${payload.storyKey}: ${diagnosis.recommendedAction} — ${diagnosis.rationale}`
|
|
10726
10737
|
});
|
|
10727
10738
|
} catch (err) {
|
|
10728
|
-
logger$
|
|
10739
|
+
logger$22.warn({
|
|
10729
10740
|
err,
|
|
10730
10741
|
storyKey: payload.storyKey
|
|
10731
10742
|
}, "Failed to persist escalation diagnosis (best-effort)");
|
|
@@ -10775,7 +10786,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10775
10786
|
const existing = _stories.get(storyKey);
|
|
10776
10787
|
if (existing !== void 0) {
|
|
10777
10788
|
Object.assign(existing, updates);
|
|
10778
|
-
persistStoryState(storyKey, existing).catch((err) => logger$
|
|
10789
|
+
persistStoryState(storyKey, existing).catch((err) => logger$22.warn({
|
|
10779
10790
|
err,
|
|
10780
10791
|
storyKey
|
|
10781
10792
|
}, "StateStore write failed after updateStory"));
|
|
@@ -10784,12 +10795,12 @@ function createImplementationOrchestrator(deps) {
|
|
|
10784
10795
|
storyKey,
|
|
10785
10796
|
conflict: err
|
|
10786
10797
|
});
|
|
10787
|
-
else logger$
|
|
10798
|
+
else logger$22.warn({
|
|
10788
10799
|
err,
|
|
10789
10800
|
storyKey
|
|
10790
10801
|
}, "mergeStory failed");
|
|
10791
10802
|
});
|
|
10792
|
-
else if (updates.phase === "ESCALATED") stateStore?.rollbackStory(storyKey).catch((err) => logger$
|
|
10803
|
+
else if (updates.phase === "ESCALATED") stateStore?.rollbackStory(storyKey).catch((err) => logger$22.warn({
|
|
10793
10804
|
err,
|
|
10794
10805
|
storyKey
|
|
10795
10806
|
}, "rollbackStory failed — branch may persist"));
|
|
@@ -10801,7 +10812,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10801
10812
|
...updates
|
|
10802
10813
|
};
|
|
10803
10814
|
const opts = targetStatus === "complete" || targetStatus === "escalated" ? { completedAt: fullUpdated.completedAt } : void 0;
|
|
10804
|
-
wgRepo.updateStoryStatus(storyKey, targetStatus, opts).catch((err) => logger$
|
|
10815
|
+
wgRepo.updateStoryStatus(storyKey, targetStatus, opts).catch((err) => logger$22.warn({
|
|
10805
10816
|
err,
|
|
10806
10817
|
storyKey
|
|
10807
10818
|
}, "wg_stories status update failed (best-effort)"));
|
|
@@ -10832,7 +10843,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10832
10843
|
};
|
|
10833
10844
|
await stateStore.setStoryState(storyKey, record);
|
|
10834
10845
|
} catch (err) {
|
|
10835
|
-
logger$
|
|
10846
|
+
logger$22.warn({
|
|
10836
10847
|
err,
|
|
10837
10848
|
storyKey
|
|
10838
10849
|
}, "StateStore.setStoryState failed (best-effort)");
|
|
@@ -10848,7 +10859,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10848
10859
|
token_usage_json: serialized
|
|
10849
10860
|
});
|
|
10850
10861
|
} catch (err) {
|
|
10851
|
-
logger$
|
|
10862
|
+
logger$22.warn({ err }, "Failed to persist orchestrator state");
|
|
10852
10863
|
}
|
|
10853
10864
|
}
|
|
10854
10865
|
function recordProgress() {
|
|
@@ -10875,7 +10886,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10875
10886
|
queuedDispatches: queued
|
|
10876
10887
|
});
|
|
10877
10888
|
if (config.pipelineRunId !== void 0) updatePipelineRun(db, config.pipelineRunId, { current_phase: "implementation" }).catch((err) => {
|
|
10878
|
-
logger$
|
|
10889
|
+
logger$22.debug({ err }, "Heartbeat: failed to touch updated_at (non-fatal)");
|
|
10879
10890
|
});
|
|
10880
10891
|
const elapsed = Date.now() - _lastProgressTs;
|
|
10881
10892
|
let childPids = [];
|
|
@@ -10897,7 +10908,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10897
10908
|
}
|
|
10898
10909
|
if (childActive) {
|
|
10899
10910
|
_lastProgressTs = Date.now();
|
|
10900
|
-
logger$
|
|
10911
|
+
logger$22.debug({
|
|
10901
10912
|
storyKey: key,
|
|
10902
10913
|
phase: s$1.phase,
|
|
10903
10914
|
childPids
|
|
@@ -10906,7 +10917,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10906
10917
|
}
|
|
10907
10918
|
_stalledStories.add(key);
|
|
10908
10919
|
_storiesWithStall.add(key);
|
|
10909
|
-
logger$
|
|
10920
|
+
logger$22.warn({
|
|
10910
10921
|
storyKey: key,
|
|
10911
10922
|
phase: s$1.phase,
|
|
10912
10923
|
elapsedMs: elapsed,
|
|
@@ -10951,7 +10962,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10951
10962
|
for (let attempt = 0; attempt < MEMORY_PRESSURE_BACKOFF_MS.length; attempt++) {
|
|
10952
10963
|
const memState = dispatcher.getMemoryState();
|
|
10953
10964
|
if (!memState.isPressured) return true;
|
|
10954
|
-
logger$
|
|
10965
|
+
logger$22.warn({
|
|
10955
10966
|
storyKey,
|
|
10956
10967
|
freeMB: memState.freeMB,
|
|
10957
10968
|
thresholdMB: memState.thresholdMB,
|
|
@@ -10971,11 +10982,11 @@ function createImplementationOrchestrator(deps) {
|
|
|
10971
10982
|
* exhausted retries the story is ESCALATED.
|
|
10972
10983
|
*/
|
|
10973
10984
|
async function processStory(storyKey, storyOptions) {
|
|
10974
|
-
logger$
|
|
10985
|
+
logger$22.info({ storyKey }, "Processing story");
|
|
10975
10986
|
{
|
|
10976
10987
|
const memoryOk = await checkMemoryPressure(storyKey);
|
|
10977
10988
|
if (!memoryOk) {
|
|
10978
|
-
logger$
|
|
10989
|
+
logger$22.warn({ storyKey }, "Memory pressure exhausted — escalating story without dispatch");
|
|
10979
10990
|
const memPressureState = {
|
|
10980
10991
|
phase: "ESCALATED",
|
|
10981
10992
|
reviewCycles: 0,
|
|
@@ -10984,7 +10995,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10984
10995
|
completedAt: new Date().toISOString()
|
|
10985
10996
|
};
|
|
10986
10997
|
_stories.set(storyKey, memPressureState);
|
|
10987
|
-
persistStoryState(storyKey, memPressureState).catch((err) => logger$
|
|
10998
|
+
persistStoryState(storyKey, memPressureState).catch((err) => logger$22.warn({
|
|
10988
10999
|
err,
|
|
10989
11000
|
storyKey
|
|
10990
11001
|
}, "StateStore write failed after memory-pressure escalation"));
|
|
@@ -11001,7 +11012,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11001
11012
|
}
|
|
11002
11013
|
await waitIfPaused();
|
|
11003
11014
|
if (_state !== "RUNNING") return;
|
|
11004
|
-
stateStore?.branchForStory(storyKey).catch((err) => logger$
|
|
11015
|
+
stateStore?.branchForStory(storyKey).catch((err) => logger$22.warn({
|
|
11005
11016
|
err,
|
|
11006
11017
|
storyKey
|
|
11007
11018
|
}, "branchForStory failed — continuing without branch isolation"));
|
|
@@ -11018,14 +11029,14 @@ function createImplementationOrchestrator(deps) {
|
|
|
11018
11029
|
if (match$1) {
|
|
11019
11030
|
const candidatePath = join$1(artifactsDir, match$1);
|
|
11020
11031
|
const validation = await isValidStoryFile(candidatePath);
|
|
11021
|
-
if (!validation.valid) logger$
|
|
11032
|
+
if (!validation.valid) logger$22.warn({
|
|
11022
11033
|
storyKey,
|
|
11023
11034
|
storyFilePath: candidatePath,
|
|
11024
11035
|
reason: validation.reason
|
|
11025
11036
|
}, `Existing story file for ${storyKey} is invalid (${validation.reason}) — re-creating`);
|
|
11026
11037
|
else {
|
|
11027
11038
|
storyFilePath = candidatePath;
|
|
11028
|
-
logger$
|
|
11039
|
+
logger$22.info({
|
|
11029
11040
|
storyKey,
|
|
11030
11041
|
storyFilePath
|
|
11031
11042
|
}, "Found existing story file — skipping create-story");
|
|
@@ -11044,7 +11055,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11044
11055
|
}
|
|
11045
11056
|
} catch {}
|
|
11046
11057
|
if (storyFilePath === void 0 && projectRoot && isImplicitlyCovered(storyKey, projectRoot)) {
|
|
11047
|
-
logger$
|
|
11058
|
+
logger$22.info({ storyKey }, `Story ${storyKey} appears implicitly covered — all expected new files already exist. Skipping create-story.`);
|
|
11048
11059
|
endPhase(storyKey, "create-story");
|
|
11049
11060
|
eventBus.emit("orchestrator:story-phase-complete", {
|
|
11050
11061
|
storyKey,
|
|
@@ -11093,7 +11104,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11093
11104
|
metadata: JSON.stringify({ storyKey })
|
|
11094
11105
|
});
|
|
11095
11106
|
} catch (tokenErr) {
|
|
11096
|
-
logger$
|
|
11107
|
+
logger$22.warn({
|
|
11097
11108
|
storyKey,
|
|
11098
11109
|
err: tokenErr
|
|
11099
11110
|
}, "Failed to record create-story token usage");
|
|
@@ -11102,7 +11113,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11102
11113
|
if (createResult.result === "failed") {
|
|
11103
11114
|
const errMsg = createResult.error ?? "create-story failed";
|
|
11104
11115
|
const stderrSnippet = errMsg.includes("--- stderr ---") ? errMsg.slice(errMsg.indexOf("--- stderr ---") + 15, errMsg.indexOf("--- stderr ---") + 515) : errMsg.slice(0, 500);
|
|
11105
|
-
logger$
|
|
11116
|
+
logger$22.error({
|
|
11106
11117
|
storyKey,
|
|
11107
11118
|
stderrSnippet
|
|
11108
11119
|
}, `Create-story failed: ${stderrSnippet.split("\n")[0]}`);
|
|
@@ -11155,7 +11166,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11155
11166
|
const overlap = computeTitleOverlap(expectedTitle, createResult.story_title);
|
|
11156
11167
|
if (overlap < TITLE_OVERLAP_WARNING_THRESHOLD) {
|
|
11157
11168
|
const msg = `Story title mismatch: expected "${expectedTitle}" but got "${createResult.story_title}" (word overlap: ${Math.round(overlap * 100)}%). This may indicate the create-story agent received truncated context.`;
|
|
11158
|
-
logger$
|
|
11169
|
+
logger$22.warn({
|
|
11159
11170
|
storyKey,
|
|
11160
11171
|
expectedTitle,
|
|
11161
11172
|
generatedTitle: createResult.story_title,
|
|
@@ -11165,7 +11176,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11165
11176
|
storyKey,
|
|
11166
11177
|
msg
|
|
11167
11178
|
});
|
|
11168
|
-
} else logger$
|
|
11179
|
+
} else logger$22.debug({
|
|
11169
11180
|
storyKey,
|
|
11170
11181
|
expectedTitle,
|
|
11171
11182
|
generatedTitle: createResult.story_title,
|
|
@@ -11174,7 +11185,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11174
11185
|
}
|
|
11175
11186
|
}
|
|
11176
11187
|
} catch (titleValidationErr) {
|
|
11177
|
-
logger$
|
|
11188
|
+
logger$22.debug({
|
|
11178
11189
|
storyKey,
|
|
11179
11190
|
err: titleValidationErr
|
|
11180
11191
|
}, "Story title validation skipped due to error");
|
|
@@ -11222,14 +11233,14 @@ function createImplementationOrchestrator(deps) {
|
|
|
11222
11233
|
...contract.transport !== void 0 ? { transport: contract.transport } : {}
|
|
11223
11234
|
})
|
|
11224
11235
|
});
|
|
11225
|
-
logger$
|
|
11236
|
+
logger$22.info({
|
|
11226
11237
|
storyKey,
|
|
11227
11238
|
contractCount: contracts.length,
|
|
11228
11239
|
contracts
|
|
11229
11240
|
}, "Stored interface contract declarations");
|
|
11230
11241
|
}
|
|
11231
11242
|
} catch (err) {
|
|
11232
|
-
logger$
|
|
11243
|
+
logger$22.warn({
|
|
11233
11244
|
storyKey,
|
|
11234
11245
|
error: err instanceof Error ? err.message : String(err)
|
|
11235
11246
|
}, "Failed to parse interface contracts — continuing without contract declarations");
|
|
@@ -11257,10 +11268,10 @@ function createImplementationOrchestrator(deps) {
|
|
|
11257
11268
|
});
|
|
11258
11269
|
testPlanPhaseResult = testPlanResult.result;
|
|
11259
11270
|
testPlanTokenUsage = testPlanResult.tokenUsage;
|
|
11260
|
-
if (testPlanResult.result === "success") logger$
|
|
11261
|
-
else logger$
|
|
11271
|
+
if (testPlanResult.result === "success") logger$22.info({ storyKey }, "Test plan generated successfully");
|
|
11272
|
+
else logger$22.warn({ storyKey }, "Test planning returned failed result — proceeding to dev-story without test plan");
|
|
11262
11273
|
} catch (err) {
|
|
11263
|
-
logger$
|
|
11274
|
+
logger$22.warn({
|
|
11264
11275
|
storyKey,
|
|
11265
11276
|
err
|
|
11266
11277
|
}, "Test planning failed — proceeding to dev-story without test plan");
|
|
@@ -11276,7 +11287,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11276
11287
|
metadata: JSON.stringify({ storyKey })
|
|
11277
11288
|
});
|
|
11278
11289
|
} catch (tokenErr) {
|
|
11279
|
-
logger$
|
|
11290
|
+
logger$22.warn({
|
|
11280
11291
|
storyKey,
|
|
11281
11292
|
err: tokenErr
|
|
11282
11293
|
}, "Failed to record test-plan token usage");
|
|
@@ -11312,7 +11323,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11312
11323
|
try {
|
|
11313
11324
|
storyContentForAnalysis = await readFile$1(storyFilePath ?? "", "utf-8");
|
|
11314
11325
|
} catch (err) {
|
|
11315
|
-
logger$
|
|
11326
|
+
logger$22.error({
|
|
11316
11327
|
storyKey,
|
|
11317
11328
|
storyFilePath,
|
|
11318
11329
|
error: err instanceof Error ? err.message : String(err)
|
|
@@ -11320,7 +11331,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11320
11331
|
}
|
|
11321
11332
|
const analysis = analyzeStoryComplexity(storyContentForAnalysis);
|
|
11322
11333
|
const batches = planTaskBatches(analysis);
|
|
11323
|
-
logger$
|
|
11334
|
+
logger$22.info({
|
|
11324
11335
|
storyKey,
|
|
11325
11336
|
estimatedScope: analysis.estimatedScope,
|
|
11326
11337
|
batchCount: batches.length,
|
|
@@ -11338,7 +11349,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11338
11349
|
if (_state !== "RUNNING") break;
|
|
11339
11350
|
const taskScope = batch.taskIds.map((id, i) => `T${id}: ${batch.taskTitles[i] ?? ""}`).join("\n");
|
|
11340
11351
|
const priorFiles = allFilesModified.size > 0 ? Array.from(allFilesModified) : void 0;
|
|
11341
|
-
logger$
|
|
11352
|
+
logger$22.info({
|
|
11342
11353
|
storyKey,
|
|
11343
11354
|
batchIndex: batch.batchIndex,
|
|
11344
11355
|
taskCount: batch.taskIds.length
|
|
@@ -11368,7 +11379,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11368
11379
|
});
|
|
11369
11380
|
} catch (batchErr) {
|
|
11370
11381
|
const errMsg = batchErr instanceof Error ? batchErr.message : String(batchErr);
|
|
11371
|
-
logger$
|
|
11382
|
+
logger$22.warn({
|
|
11372
11383
|
storyKey,
|
|
11373
11384
|
batchIndex: batch.batchIndex,
|
|
11374
11385
|
error: errMsg
|
|
@@ -11388,7 +11399,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11388
11399
|
filesModified: batchFilesModified,
|
|
11389
11400
|
result: batchResult.result === "success" ? "success" : "failed"
|
|
11390
11401
|
};
|
|
11391
|
-
logger$
|
|
11402
|
+
logger$22.info(batchMetrics, "Batch dev-story metrics");
|
|
11392
11403
|
for (const f$1 of batchFilesModified) allFilesModified.add(f$1);
|
|
11393
11404
|
if (batchFilesModified.length > 0) batchFileGroups.push({
|
|
11394
11405
|
batchIndex: batch.batchIndex,
|
|
@@ -11410,13 +11421,13 @@ function createImplementationOrchestrator(deps) {
|
|
|
11410
11421
|
})
|
|
11411
11422
|
});
|
|
11412
11423
|
} catch (tokenErr) {
|
|
11413
|
-
logger$
|
|
11424
|
+
logger$22.warn({
|
|
11414
11425
|
storyKey,
|
|
11415
11426
|
batchIndex: batch.batchIndex,
|
|
11416
11427
|
err: tokenErr
|
|
11417
11428
|
}, "Failed to record batch token usage");
|
|
11418
11429
|
}
|
|
11419
|
-
if (batchResult.result === "failed") logger$
|
|
11430
|
+
if (batchResult.result === "failed") logger$22.warn({
|
|
11420
11431
|
storyKey,
|
|
11421
11432
|
batchIndex: batch.batchIndex,
|
|
11422
11433
|
error: batchResult.error
|
|
@@ -11460,7 +11471,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11460
11471
|
metadata: JSON.stringify({ storyKey })
|
|
11461
11472
|
});
|
|
11462
11473
|
} catch (tokenErr) {
|
|
11463
|
-
logger$
|
|
11474
|
+
logger$22.warn({
|
|
11464
11475
|
storyKey,
|
|
11465
11476
|
err: tokenErr
|
|
11466
11477
|
}, "Failed to record dev-story token usage");
|
|
@@ -11476,7 +11487,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11476
11487
|
endPhase(storyKey, "dev-story");
|
|
11477
11488
|
const timeoutFiles = checkGitDiffFiles(projectRoot ?? process.cwd());
|
|
11478
11489
|
if (timeoutFiles.length === 0) {
|
|
11479
|
-
logger$
|
|
11490
|
+
logger$22.warn({ storyKey }, "Dev-story timeout with zero modified files — escalating immediately (no checkpoint)");
|
|
11480
11491
|
updateStory(storyKey, {
|
|
11481
11492
|
phase: "ESCALATED",
|
|
11482
11493
|
error: "timeout-no-files",
|
|
@@ -11492,7 +11503,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11492
11503
|
await persistState();
|
|
11493
11504
|
return;
|
|
11494
11505
|
}
|
|
11495
|
-
logger$
|
|
11506
|
+
logger$22.info({
|
|
11496
11507
|
storyKey,
|
|
11497
11508
|
filesCount: timeoutFiles.length
|
|
11498
11509
|
}, "Dev-story timeout with partial files — capturing checkpoint");
|
|
@@ -11509,7 +11520,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11509
11520
|
]
|
|
11510
11521
|
}).trim();
|
|
11511
11522
|
} catch (diffErr) {
|
|
11512
|
-
logger$
|
|
11523
|
+
logger$22.warn({
|
|
11513
11524
|
storyKey,
|
|
11514
11525
|
error: diffErr instanceof Error ? diffErr.message : String(diffErr)
|
|
11515
11526
|
}, "Failed to capture git diff for checkpoint — proceeding with empty diff");
|
|
@@ -11536,7 +11547,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11536
11547
|
recordedAt: new Date().toISOString(),
|
|
11537
11548
|
sprint: config.sprint
|
|
11538
11549
|
}).catch((storeErr) => {
|
|
11539
|
-
logger$
|
|
11550
|
+
logger$22.warn({
|
|
11540
11551
|
err: storeErr,
|
|
11541
11552
|
storyKey
|
|
11542
11553
|
}, "Failed to record timeout metric to StateStore (best-effort)");
|
|
@@ -11595,9 +11606,9 @@ function createImplementationOrchestrator(deps) {
|
|
|
11595
11606
|
checkpointRetryPrompt = assembled.prompt;
|
|
11596
11607
|
} catch {
|
|
11597
11608
|
checkpointRetryPrompt = `Continue story ${storyKey} from checkpoint. Your prior attempt timed out. Do not redo completed work.`;
|
|
11598
|
-
logger$
|
|
11609
|
+
logger$22.warn({ storyKey }, "Failed to assemble checkpoint retry prompt — using fallback");
|
|
11599
11610
|
}
|
|
11600
|
-
logger$
|
|
11611
|
+
logger$22.info({
|
|
11601
11612
|
storyKey,
|
|
11602
11613
|
filesCount: checkpointData.filesModified.length
|
|
11603
11614
|
}, "Dispatching checkpoint retry for timed-out story");
|
|
@@ -11626,7 +11637,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11626
11637
|
} : void 0 }
|
|
11627
11638
|
});
|
|
11628
11639
|
if (checkpointRetryResult.status === "timeout") {
|
|
11629
|
-
logger$
|
|
11640
|
+
logger$22.warn({ storyKey }, "Checkpoint retry dispatch timed out — escalating story");
|
|
11630
11641
|
updateStory(storyKey, {
|
|
11631
11642
|
phase: "ESCALATED",
|
|
11632
11643
|
error: "checkpoint-retry-timeout",
|
|
@@ -11645,7 +11656,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11645
11656
|
const retryParsed = checkpointRetryResult.parsed;
|
|
11646
11657
|
devFilesModified = retryParsed?.files_modified ?? checkGitDiffFiles(projectRoot ?? process.cwd());
|
|
11647
11658
|
if (checkpointRetryResult.status === "completed" && retryParsed?.result === "success") devStoryWasSuccess = true;
|
|
11648
|
-
else logger$
|
|
11659
|
+
else logger$22.warn({
|
|
11649
11660
|
storyKey,
|
|
11650
11661
|
status: checkpointRetryResult.status
|
|
11651
11662
|
}, "Checkpoint retry completed with failure — proceeding to code review");
|
|
@@ -11653,13 +11664,13 @@ function createImplementationOrchestrator(deps) {
|
|
|
11653
11664
|
}
|
|
11654
11665
|
if (!checkpointHandled) if (devResult.result === "success") devStoryWasSuccess = true;
|
|
11655
11666
|
else {
|
|
11656
|
-
logger$
|
|
11667
|
+
logger$22.warn({
|
|
11657
11668
|
storyKey,
|
|
11658
11669
|
error: devResult.error,
|
|
11659
11670
|
filesModified: devFilesModified.length
|
|
11660
11671
|
}, "Dev-story reported failure, proceeding to code review");
|
|
11661
11672
|
if (!devResult.error?.startsWith("dispatch_timeout")) {
|
|
11662
|
-
logger$
|
|
11673
|
+
logger$22.warn({
|
|
11663
11674
|
storyKey,
|
|
11664
11675
|
error: devResult.error
|
|
11665
11676
|
}, "Agent process failure (non-timeout) — story will proceed to code review with partial work");
|
|
@@ -11706,12 +11717,12 @@ function createImplementationOrchestrator(deps) {
|
|
|
11706
11717
|
}).trim();
|
|
11707
11718
|
hasNewCommits = currentHead !== baselineHeadSha;
|
|
11708
11719
|
} catch {}
|
|
11709
|
-
if (hasNewCommits) logger$
|
|
11720
|
+
if (hasNewCommits) logger$22.info({
|
|
11710
11721
|
storyKey,
|
|
11711
11722
|
baselineHeadSha
|
|
11712
11723
|
}, "Working tree clean but new commits detected since dispatch — skipping zero-diff escalation");
|
|
11713
11724
|
else {
|
|
11714
|
-
logger$
|
|
11725
|
+
logger$22.warn({ storyKey }, "Zero-diff detected after COMPLETE dev-story — no file changes and no new commits");
|
|
11715
11726
|
eventBus.emit("orchestrator:zero-diff-escalation", {
|
|
11716
11727
|
storyKey,
|
|
11717
11728
|
reason: "zero-diff-on-complete"
|
|
@@ -11760,10 +11771,10 @@ function createImplementationOrchestrator(deps) {
|
|
|
11760
11771
|
"pipe"
|
|
11761
11772
|
]
|
|
11762
11773
|
});
|
|
11763
|
-
logger$
|
|
11774
|
+
logger$22.info({ storyKey }, "Secondary typecheck (tsc --noEmit) passed");
|
|
11764
11775
|
} catch (tscErr) {
|
|
11765
11776
|
const tscOutput = tscErr instanceof Error && "stdout" in tscErr ? String(tscErr.stdout ?? "").slice(0, 2e3) : "";
|
|
11766
|
-
logger$
|
|
11777
|
+
logger$22.warn({
|
|
11767
11778
|
storyKey,
|
|
11768
11779
|
tscOutput
|
|
11769
11780
|
}, "Secondary typecheck (tsc --noEmit) failed — treating as build failure");
|
|
@@ -11777,7 +11788,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11777
11788
|
}
|
|
11778
11789
|
if (buildVerifyResult.status === "passed") {
|
|
11779
11790
|
eventBus.emit("story:build-verification-passed", { storyKey });
|
|
11780
|
-
logger$
|
|
11791
|
+
logger$22.info({ storyKey }, "Build verification passed");
|
|
11781
11792
|
} else if (buildVerifyResult.status === "failed" || buildVerifyResult.status === "timeout") {
|
|
11782
11793
|
const truncatedOutput = (buildVerifyResult.output ?? "").slice(0, 2e3);
|
|
11783
11794
|
const reason = buildVerifyResult.reason ?? "build-verification-failed";
|
|
@@ -11786,7 +11797,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11786
11797
|
const resolvedRoot = projectRoot ?? process.cwd();
|
|
11787
11798
|
const hasChanges = detectPackageChanges(_packageSnapshot, resolvedRoot);
|
|
11788
11799
|
if (hasChanges) {
|
|
11789
|
-
logger$
|
|
11800
|
+
logger$22.warn({ storyKey }, "Package files changed since snapshot — restoring to prevent cascade");
|
|
11790
11801
|
const restoreResult = restorePackageSnapshot(_packageSnapshot, { projectRoot: resolvedRoot });
|
|
11791
11802
|
if (restoreResult.restored) {
|
|
11792
11803
|
const retryAfterRestore = runBuildVerification({
|
|
@@ -11798,11 +11809,11 @@ function createImplementationOrchestrator(deps) {
|
|
|
11798
11809
|
if (retryAfterRestore.status === "passed") {
|
|
11799
11810
|
retryPassed = true;
|
|
11800
11811
|
eventBus.emit("story:build-verification-passed", { storyKey });
|
|
11801
|
-
logger$
|
|
11812
|
+
logger$22.warn({
|
|
11802
11813
|
storyKey,
|
|
11803
11814
|
filesRestored: restoreResult.filesRestored
|
|
11804
11815
|
}, "Build passed after package snapshot restore — cross-story pollution detected and cleaned");
|
|
11805
|
-
} else logger$
|
|
11816
|
+
} else logger$22.warn({
|
|
11806
11817
|
storyKey,
|
|
11807
11818
|
filesRestored: restoreResult.filesRestored
|
|
11808
11819
|
}, "Build still fails after snapshot restore — story has its own build errors");
|
|
@@ -11814,7 +11825,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11814
11825
|
if (missingPkgMatch && buildVerifyResult.status !== "timeout") {
|
|
11815
11826
|
const missingPkg = missingPkgMatch[1].replace(/^(@[^/]+\/[^/]+)\/.*$/, "$1").replace(/^([^@][^/]*)\/.*$/, "$1");
|
|
11816
11827
|
const resolvedRoot = projectRoot ?? process.cwd();
|
|
11817
|
-
logger$
|
|
11828
|
+
logger$22.warn({
|
|
11818
11829
|
storyKey,
|
|
11819
11830
|
missingPkg
|
|
11820
11831
|
}, "Build-fix retry: detected missing npm package — attempting npm install");
|
|
@@ -11825,7 +11836,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11825
11836
|
encoding: "utf-8",
|
|
11826
11837
|
stdio: "pipe"
|
|
11827
11838
|
});
|
|
11828
|
-
logger$
|
|
11839
|
+
logger$22.warn({
|
|
11829
11840
|
storyKey,
|
|
11830
11841
|
missingPkg
|
|
11831
11842
|
}, "Build-fix retry: npm install succeeded — retrying build verification");
|
|
@@ -11838,18 +11849,18 @@ function createImplementationOrchestrator(deps) {
|
|
|
11838
11849
|
if (retryResult.status === "passed") {
|
|
11839
11850
|
retryPassed = true;
|
|
11840
11851
|
eventBus.emit("story:build-verification-passed", { storyKey });
|
|
11841
|
-
logger$
|
|
11852
|
+
logger$22.warn({
|
|
11842
11853
|
storyKey,
|
|
11843
11854
|
missingPkg
|
|
11844
11855
|
}, "Build-fix retry: build verification passed after installing missing package");
|
|
11845
|
-
} else logger$
|
|
11856
|
+
} else logger$22.warn({
|
|
11846
11857
|
storyKey,
|
|
11847
11858
|
missingPkg,
|
|
11848
11859
|
retryStatus: retryResult.status
|
|
11849
11860
|
}, "Build-fix retry: build still fails after installing missing package — escalating");
|
|
11850
11861
|
} catch (installErr) {
|
|
11851
11862
|
const installMsg = installErr instanceof Error ? installErr.message : String(installErr);
|
|
11852
|
-
logger$
|
|
11863
|
+
logger$22.warn({
|
|
11853
11864
|
storyKey,
|
|
11854
11865
|
missingPkg,
|
|
11855
11866
|
error: installMsg
|
|
@@ -11859,7 +11870,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11859
11870
|
if (!retryPassed) {
|
|
11860
11871
|
let buildFixPassed = false;
|
|
11861
11872
|
if (buildVerifyResult.status === "failed" && storyFilePath !== void 0) try {
|
|
11862
|
-
logger$
|
|
11873
|
+
logger$22.info({ storyKey }, "Dispatching build-fix agent");
|
|
11863
11874
|
startPhase(storyKey, "build-fix");
|
|
11864
11875
|
const storyContent = await readFile$1(storyFilePath, "utf-8");
|
|
11865
11876
|
let buildFixTemplate;
|
|
@@ -11895,11 +11906,11 @@ function createImplementationOrchestrator(deps) {
|
|
|
11895
11906
|
if (retryAfterFix.status === "passed") {
|
|
11896
11907
|
buildFixPassed = true;
|
|
11897
11908
|
eventBus.emit("story:build-verification-passed", { storyKey });
|
|
11898
|
-
logger$
|
|
11899
|
-
} else logger$
|
|
11909
|
+
logger$22.info({ storyKey }, "Build passed after build-fix dispatch");
|
|
11910
|
+
} else logger$22.warn({ storyKey }, "Build still fails after build-fix dispatch — escalating");
|
|
11900
11911
|
} catch (fixErr) {
|
|
11901
11912
|
const fixMsg = fixErr instanceof Error ? fixErr.message : String(fixErr);
|
|
11902
|
-
logger$
|
|
11913
|
+
logger$22.warn({
|
|
11903
11914
|
storyKey,
|
|
11904
11915
|
error: fixMsg
|
|
11905
11916
|
}, "Build-fix dispatch failed — escalating");
|
|
@@ -11910,7 +11921,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11910
11921
|
exitCode: buildVerifyResult.exitCode ?? 1,
|
|
11911
11922
|
output: truncatedOutput
|
|
11912
11923
|
});
|
|
11913
|
-
logger$
|
|
11924
|
+
logger$22.warn({
|
|
11914
11925
|
storyKey,
|
|
11915
11926
|
reason,
|
|
11916
11927
|
exitCode: buildVerifyResult.exitCode
|
|
@@ -11942,7 +11953,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11942
11953
|
storyKey
|
|
11943
11954
|
});
|
|
11944
11955
|
if (icResult.potentiallyAffectedTests.length > 0) {
|
|
11945
|
-
logger$
|
|
11956
|
+
logger$22.warn({
|
|
11946
11957
|
storyKey,
|
|
11947
11958
|
modifiedInterfaces: icResult.modifiedInterfaces,
|
|
11948
11959
|
potentiallyAffectedTests: icResult.potentiallyAffectedTests
|
|
@@ -11988,7 +11999,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11988
11999
|
"NEEDS_MAJOR_REWORK": 2
|
|
11989
12000
|
};
|
|
11990
12001
|
for (const group of batchFileGroups) {
|
|
11991
|
-
logger$
|
|
12002
|
+
logger$22.info({
|
|
11992
12003
|
storyKey,
|
|
11993
12004
|
batchIndex: group.batchIndex,
|
|
11994
12005
|
fileCount: group.files.length
|
|
@@ -12030,7 +12041,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12030
12041
|
rawOutput: lastRawOutput,
|
|
12031
12042
|
tokenUsage: aggregateTokens
|
|
12032
12043
|
};
|
|
12033
|
-
logger$
|
|
12044
|
+
logger$22.info({
|
|
12034
12045
|
storyKey,
|
|
12035
12046
|
batchCount: batchFileGroups.length,
|
|
12036
12047
|
verdict: worstVerdict,
|
|
@@ -12071,7 +12082,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12071
12082
|
})
|
|
12072
12083
|
});
|
|
12073
12084
|
} catch (tokenErr) {
|
|
12074
|
-
logger$
|
|
12085
|
+
logger$22.warn({
|
|
12075
12086
|
storyKey,
|
|
12076
12087
|
err: tokenErr
|
|
12077
12088
|
}, "Failed to record code-review token usage");
|
|
@@ -12079,7 +12090,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12079
12090
|
const isPhantomReview = reviewResult.dispatchFailed === true || reviewResult.verdict !== "SHIP_IT" && reviewResult.verdict !== "LGTM_WITH_NOTES" && (reviewResult.issue_list === void 0 || reviewResult.issue_list.length === 0) && reviewResult.error !== void 0;
|
|
12080
12091
|
if (isPhantomReview && !timeoutRetried) {
|
|
12081
12092
|
timeoutRetried = true;
|
|
12082
|
-
logger$
|
|
12093
|
+
logger$22.warn({
|
|
12083
12094
|
storyKey,
|
|
12084
12095
|
reviewCycles,
|
|
12085
12096
|
error: reviewResult.error
|
|
@@ -12087,7 +12098,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12087
12098
|
continue;
|
|
12088
12099
|
}
|
|
12089
12100
|
if (isPhantomReview && timeoutRetried) {
|
|
12090
|
-
logger$
|
|
12101
|
+
logger$22.warn({
|
|
12091
12102
|
storyKey,
|
|
12092
12103
|
reviewCycles,
|
|
12093
12104
|
error: reviewResult.error
|
|
@@ -12111,7 +12122,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12111
12122
|
verdict = reviewResult.verdict;
|
|
12112
12123
|
issueList = reviewResult.issue_list ?? [];
|
|
12113
12124
|
if (verdict === "NEEDS_MAJOR_REWORK" && reviewCycles > 0 && previousIssueList.length > 0 && issueList.length < previousIssueList.length) {
|
|
12114
|
-
logger$
|
|
12125
|
+
logger$22.info({
|
|
12115
12126
|
storyKey,
|
|
12116
12127
|
originalVerdict: verdict,
|
|
12117
12128
|
issuesBefore: previousIssueList.length,
|
|
@@ -12147,7 +12158,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12147
12158
|
if (_decomposition !== void 0) parts.push(`decomposed: ${_decomposition.batchCount} batches`);
|
|
12148
12159
|
parts.push(`${fileCount} files`);
|
|
12149
12160
|
parts.push(`${totalTokensK} tokens`);
|
|
12150
|
-
logger$
|
|
12161
|
+
logger$22.info({
|
|
12151
12162
|
storyKey,
|
|
12152
12163
|
verdict,
|
|
12153
12164
|
agentVerdict: reviewResult.agentVerdict
|
|
@@ -12196,9 +12207,9 @@ function createImplementationOrchestrator(deps) {
|
|
|
12196
12207
|
}),
|
|
12197
12208
|
rationale: `Advisory notes from LGTM_WITH_NOTES review of ${storyKey}`
|
|
12198
12209
|
});
|
|
12199
|
-
logger$
|
|
12210
|
+
logger$22.info({ storyKey }, "Advisory notes persisted to decision store");
|
|
12200
12211
|
} catch (advisoryErr) {
|
|
12201
|
-
logger$
|
|
12212
|
+
logger$22.warn({
|
|
12202
12213
|
storyKey,
|
|
12203
12214
|
error: advisoryErr instanceof Error ? advisoryErr.message : String(advisoryErr)
|
|
12204
12215
|
}, "Failed to persist advisory notes (best-effort)");
|
|
@@ -12206,27 +12217,27 @@ function createImplementationOrchestrator(deps) {
|
|
|
12206
12217
|
if (telemetryPersistence !== void 0) try {
|
|
12207
12218
|
const turns = await telemetryPersistence.getTurnAnalysis(storyKey);
|
|
12208
12219
|
if (turns.length > 0) {
|
|
12209
|
-
const scorer = new EfficiencyScorer(logger$
|
|
12220
|
+
const scorer = new EfficiencyScorer(logger$22);
|
|
12210
12221
|
const effScore = scorer.score(storyKey, turns);
|
|
12211
12222
|
await telemetryPersistence.storeEfficiencyScore(effScore);
|
|
12212
|
-
logger$
|
|
12223
|
+
logger$22.info({
|
|
12213
12224
|
storyKey,
|
|
12214
12225
|
compositeScore: effScore.compositeScore,
|
|
12215
12226
|
modelCount: effScore.perModelBreakdown.length
|
|
12216
12227
|
}, "Efficiency score computed and persisted");
|
|
12217
|
-
} else logger$
|
|
12228
|
+
} else logger$22.debug({ storyKey }, "No turn analysis data available — skipping efficiency scoring");
|
|
12218
12229
|
} catch (effErr) {
|
|
12219
|
-
logger$
|
|
12230
|
+
logger$22.warn({
|
|
12220
12231
|
storyKey,
|
|
12221
12232
|
error: effErr instanceof Error ? effErr.message : String(effErr)
|
|
12222
12233
|
}, "Efficiency scoring failed — story verdict unchanged");
|
|
12223
12234
|
}
|
|
12224
12235
|
if (telemetryPersistence !== void 0) try {
|
|
12225
12236
|
const turns = await telemetryPersistence.getTurnAnalysis(storyKey);
|
|
12226
|
-
if (turns.length === 0) logger$
|
|
12237
|
+
if (turns.length === 0) logger$22.debug({ storyKey }, "No turn analysis data for telemetry categorization — skipping");
|
|
12227
12238
|
else {
|
|
12228
|
-
const categorizer = new Categorizer(logger$
|
|
12229
|
-
const consumerAnalyzer = new ConsumerAnalyzer(categorizer, logger$
|
|
12239
|
+
const categorizer = new Categorizer(logger$22);
|
|
12240
|
+
const consumerAnalyzer = new ConsumerAnalyzer(categorizer, logger$22);
|
|
12230
12241
|
const categoryStats = categorizer.computeCategoryStatsFromTurns(turns);
|
|
12231
12242
|
const consumerStats = consumerAnalyzer.analyzeFromTurns(turns);
|
|
12232
12243
|
await telemetryPersistence.storeCategoryStats(storyKey, categoryStats);
|
|
@@ -12234,7 +12245,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12234
12245
|
const growingCount = categoryStats.filter((c) => c.trend === "growing").length;
|
|
12235
12246
|
const topCategory = categoryStats[0]?.category ?? "none";
|
|
12236
12247
|
const topConsumer = consumerStats[0]?.consumerKey ?? "none";
|
|
12237
|
-
logger$
|
|
12248
|
+
logger$22.info({
|
|
12238
12249
|
storyKey,
|
|
12239
12250
|
topCategory,
|
|
12240
12251
|
topConsumer,
|
|
@@ -12242,7 +12253,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12242
12253
|
}, "Semantic categorization and consumer analysis complete");
|
|
12243
12254
|
}
|
|
12244
12255
|
} catch (catErr) {
|
|
12245
|
-
logger$
|
|
12256
|
+
logger$22.warn({
|
|
12246
12257
|
storyKey,
|
|
12247
12258
|
error: catErr instanceof Error ? catErr.message : String(catErr)
|
|
12248
12259
|
}, "Semantic categorization failed — story verdict unchanged");
|
|
@@ -12263,7 +12274,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12263
12274
|
filesModified: devFilesModified,
|
|
12264
12275
|
workingDirectory: projectRoot
|
|
12265
12276
|
});
|
|
12266
|
-
logger$
|
|
12277
|
+
logger$22.debug({
|
|
12267
12278
|
storyKey,
|
|
12268
12279
|
expansion_priority: expansionResult.expansion_priority,
|
|
12269
12280
|
coverage_gaps: expansionResult.coverage_gaps.length
|
|
@@ -12276,7 +12287,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12276
12287
|
value: JSON.stringify(expansionResult)
|
|
12277
12288
|
});
|
|
12278
12289
|
} catch (expansionErr) {
|
|
12279
|
-
logger$
|
|
12290
|
+
logger$22.warn({
|
|
12280
12291
|
storyKey,
|
|
12281
12292
|
error: expansionErr instanceof Error ? expansionErr.message : String(expansionErr)
|
|
12282
12293
|
}, "Test expansion failed — story verdict unchanged");
|
|
@@ -12303,7 +12314,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12303
12314
|
await persistState();
|
|
12304
12315
|
return;
|
|
12305
12316
|
}
|
|
12306
|
-
logger$
|
|
12317
|
+
logger$22.info({
|
|
12307
12318
|
storyKey,
|
|
12308
12319
|
reviewCycles: finalReviewCycles,
|
|
12309
12320
|
issueCount: issueList.length
|
|
@@ -12363,7 +12374,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12363
12374
|
fixPrompt = assembled.prompt;
|
|
12364
12375
|
} catch {
|
|
12365
12376
|
fixPrompt = `Fix story ${storyKey}: verdict=${verdict}, minor fixes needed`;
|
|
12366
|
-
logger$
|
|
12377
|
+
logger$22.warn({ storyKey }, "Failed to assemble auto-approve fix prompt, using fallback");
|
|
12367
12378
|
}
|
|
12368
12379
|
const handle = dispatcher.dispatch({
|
|
12369
12380
|
prompt: fixPrompt,
|
|
@@ -12384,9 +12395,9 @@ function createImplementationOrchestrator(deps) {
|
|
|
12384
12395
|
output: fixResult.tokenEstimate.output
|
|
12385
12396
|
} : void 0 }
|
|
12386
12397
|
});
|
|
12387
|
-
if (fixResult.status === "timeout") logger$
|
|
12398
|
+
if (fixResult.status === "timeout") logger$22.warn({ storyKey }, "Auto-approve fix timed out — approving anyway (issues were minor)");
|
|
12388
12399
|
} catch (err) {
|
|
12389
|
-
logger$
|
|
12400
|
+
logger$22.warn({
|
|
12390
12401
|
storyKey,
|
|
12391
12402
|
err
|
|
12392
12403
|
}, "Auto-approve fix dispatch failed — approving anyway (issues were minor)");
|
|
@@ -12518,7 +12529,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12518
12529
|
fixPrompt = assembled.prompt;
|
|
12519
12530
|
} catch {
|
|
12520
12531
|
fixPrompt = `Fix story ${storyKey}: verdict=${verdict}, taskType=${taskType}`;
|
|
12521
|
-
logger$
|
|
12532
|
+
logger$22.warn({
|
|
12522
12533
|
storyKey,
|
|
12523
12534
|
taskType
|
|
12524
12535
|
}, "Failed to assemble fix prompt, using fallback");
|
|
@@ -12554,7 +12565,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12554
12565
|
} : void 0 }
|
|
12555
12566
|
});
|
|
12556
12567
|
if (fixResult.status === "timeout") {
|
|
12557
|
-
logger$
|
|
12568
|
+
logger$22.warn({
|
|
12558
12569
|
storyKey,
|
|
12559
12570
|
taskType
|
|
12560
12571
|
}, "Fix dispatch timed out — escalating story");
|
|
@@ -12576,7 +12587,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12576
12587
|
}
|
|
12577
12588
|
if (fixResult.status === "failed") {
|
|
12578
12589
|
if (isMajorRework) {
|
|
12579
|
-
logger$
|
|
12590
|
+
logger$22.warn({
|
|
12580
12591
|
storyKey,
|
|
12581
12592
|
exitCode: fixResult.exitCode
|
|
12582
12593
|
}, "Major rework dispatch failed — escalating story");
|
|
@@ -12596,14 +12607,14 @@ function createImplementationOrchestrator(deps) {
|
|
|
12596
12607
|
await persistState();
|
|
12597
12608
|
return;
|
|
12598
12609
|
}
|
|
12599
|
-
logger$
|
|
12610
|
+
logger$22.warn({
|
|
12600
12611
|
storyKey,
|
|
12601
12612
|
taskType,
|
|
12602
12613
|
exitCode: fixResult.exitCode
|
|
12603
12614
|
}, "Fix dispatch failed");
|
|
12604
12615
|
}
|
|
12605
12616
|
} catch (err) {
|
|
12606
|
-
logger$
|
|
12617
|
+
logger$22.warn({
|
|
12607
12618
|
storyKey,
|
|
12608
12619
|
taskType,
|
|
12609
12620
|
err
|
|
@@ -12637,13 +12648,13 @@ function createImplementationOrchestrator(deps) {
|
|
|
12637
12648
|
const directives = telemetryAdvisor.formatOptimizationDirectives(recs);
|
|
12638
12649
|
if (directives.length > 0) {
|
|
12639
12650
|
optimizationDirectives = directives;
|
|
12640
|
-
logger$
|
|
12651
|
+
logger$22.debug({
|
|
12641
12652
|
storyKey,
|
|
12642
12653
|
directiveCount: recs.filter((r) => r.severity !== "info").length
|
|
12643
12654
|
}, "Optimization directives ready for dispatch");
|
|
12644
12655
|
}
|
|
12645
12656
|
} catch (err) {
|
|
12646
|
-
logger$
|
|
12657
|
+
logger$22.debug({
|
|
12647
12658
|
err,
|
|
12648
12659
|
storyKey
|
|
12649
12660
|
}, "Failed to fetch optimization directives — proceeding without");
|
|
@@ -12681,11 +12692,11 @@ function createImplementationOrchestrator(deps) {
|
|
|
12681
12692
|
}
|
|
12682
12693
|
async function run(storyKeys) {
|
|
12683
12694
|
if (_state === "RUNNING" || _state === "PAUSED") {
|
|
12684
|
-
logger$
|
|
12695
|
+
logger$22.warn({ state: _state }, "run() called while orchestrator is already running or paused — ignoring");
|
|
12685
12696
|
return getStatus();
|
|
12686
12697
|
}
|
|
12687
12698
|
if (_state === "COMPLETE") {
|
|
12688
|
-
logger$
|
|
12699
|
+
logger$22.warn({ state: _state }, "run() called on a COMPLETE orchestrator — ignoring");
|
|
12689
12700
|
return getStatus();
|
|
12690
12701
|
}
|
|
12691
12702
|
_state = "RUNNING";
|
|
@@ -12709,7 +12720,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12709
12720
|
const seedStart = Date.now();
|
|
12710
12721
|
const seedResult = await seedMethodologyContext(db, projectRoot);
|
|
12711
12722
|
_startupTimings.seedMethodologyMs = Date.now() - seedStart;
|
|
12712
|
-
if (seedResult.decisionsCreated > 0) logger$
|
|
12723
|
+
if (seedResult.decisionsCreated > 0) logger$22.info({
|
|
12713
12724
|
decisionsCreated: seedResult.decisionsCreated,
|
|
12714
12725
|
skippedCategories: seedResult.skippedCategories,
|
|
12715
12726
|
durationMs: _startupTimings.seedMethodologyMs
|
|
@@ -12719,12 +12730,12 @@ function createImplementationOrchestrator(deps) {
|
|
|
12719
12730
|
const ingestStart = Date.now();
|
|
12720
12731
|
try {
|
|
12721
12732
|
const ingestResult = await autoIngestEpicsDependencies(db, projectRoot);
|
|
12722
|
-
if (ingestResult.storiesIngested > 0 || ingestResult.dependenciesIngested > 0) logger$
|
|
12733
|
+
if (ingestResult.storiesIngested > 0 || ingestResult.dependenciesIngested > 0) logger$22.info({
|
|
12723
12734
|
...ingestResult,
|
|
12724
12735
|
durationMs: Date.now() - ingestStart
|
|
12725
12736
|
}, "Auto-ingested stories and dependencies from epics document");
|
|
12726
12737
|
} catch (err) {
|
|
12727
|
-
logger$
|
|
12738
|
+
logger$22.debug({ err }, "Auto-ingest from epics document skipped — work graph may be unavailable");
|
|
12728
12739
|
}
|
|
12729
12740
|
}
|
|
12730
12741
|
try {
|
|
@@ -12734,7 +12745,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12734
12745
|
_startupTimings.stateStoreInitMs = Date.now() - stateStoreInitStart;
|
|
12735
12746
|
for (const key of storyKeys) {
|
|
12736
12747
|
const pendingState = _stories.get(key);
|
|
12737
|
-
if (pendingState !== void 0) persistStoryState(key, pendingState).catch((err) => logger$
|
|
12748
|
+
if (pendingState !== void 0) persistStoryState(key, pendingState).catch((err) => logger$22.warn({
|
|
12738
12749
|
err,
|
|
12739
12750
|
storyKey: key
|
|
12740
12751
|
}, "StateStore write failed during PENDING init"));
|
|
@@ -12745,12 +12756,12 @@ function createImplementationOrchestrator(deps) {
|
|
|
12745
12756
|
_startupTimings.queryStoriesMs = Date.now() - queryStoriesStart;
|
|
12746
12757
|
for (const record of existingRecords) _stateStoreCache.set(record.storyKey, record);
|
|
12747
12758
|
} catch (err) {
|
|
12748
|
-
logger$
|
|
12759
|
+
logger$22.warn({ err }, "StateStore.queryStories() failed during init — status merge will be empty (best-effort)");
|
|
12749
12760
|
}
|
|
12750
12761
|
}
|
|
12751
12762
|
if (ingestionServer !== void 0) {
|
|
12752
12763
|
if (telemetryPersistence !== void 0) try {
|
|
12753
|
-
const pipelineLogger = logger$
|
|
12764
|
+
const pipelineLogger = logger$22;
|
|
12754
12765
|
const telemetryPipeline = new TelemetryPipeline({
|
|
12755
12766
|
normalizer: new TelemetryNormalizer(pipelineLogger),
|
|
12756
12767
|
turnAnalyzer: new TurnAnalyzer(pipelineLogger),
|
|
@@ -12762,14 +12773,14 @@ function createImplementationOrchestrator(deps) {
|
|
|
12762
12773
|
persistence: telemetryPersistence
|
|
12763
12774
|
});
|
|
12764
12775
|
ingestionServer.setPipeline(telemetryPipeline);
|
|
12765
|
-
logger$
|
|
12776
|
+
logger$22.info("TelemetryPipeline wired to IngestionServer");
|
|
12766
12777
|
} catch (pipelineErr) {
|
|
12767
|
-
logger$
|
|
12778
|
+
logger$22.warn({ err: pipelineErr }, "Failed to create TelemetryPipeline — continuing without analysis pipeline");
|
|
12768
12779
|
}
|
|
12769
|
-
await ingestionServer.start().catch((err) => logger$
|
|
12780
|
+
await ingestionServer.start().catch((err) => logger$22.warn({ err }, "IngestionServer.start() failed — continuing without telemetry (best-effort)"));
|
|
12770
12781
|
try {
|
|
12771
12782
|
_otlpEndpoint = ingestionServer.getOtlpEnvVars().OTEL_EXPORTER_OTLP_ENDPOINT;
|
|
12772
|
-
logger$
|
|
12783
|
+
logger$22.info({ otlpEndpoint: _otlpEndpoint }, "OTLP telemetry ingestion active");
|
|
12773
12784
|
} catch {}
|
|
12774
12785
|
}
|
|
12775
12786
|
let contractDeclarations = [];
|
|
@@ -12809,12 +12820,12 @@ function createImplementationOrchestrator(deps) {
|
|
|
12809
12820
|
const conflictDetectStart = Date.now();
|
|
12810
12821
|
const { batches, edges: contractEdges } = detectConflictGroupsWithContracts(storyKeys, { moduleMap: pack.manifest.conflictGroups }, contractDeclarations);
|
|
12811
12822
|
_startupTimings.conflictDetectMs = Date.now() - conflictDetectStart;
|
|
12812
|
-
if (contractEdges.length > 0) logger$
|
|
12823
|
+
if (contractEdges.length > 0) logger$22.info({
|
|
12813
12824
|
contractEdges,
|
|
12814
12825
|
edgeCount: contractEdges.length
|
|
12815
12826
|
}, "Contract dependency edges detected — applying contract-aware dispatch ordering");
|
|
12816
|
-
wgRepo.addContractDependencies(contractEdges).catch((err) => logger$
|
|
12817
|
-
logger$
|
|
12827
|
+
wgRepo.addContractDependencies(contractEdges).catch((err) => logger$22.warn({ err }, "contract dep persistence failed (best-effort)"));
|
|
12828
|
+
logger$22.info({
|
|
12818
12829
|
storyCount: storyKeys.length,
|
|
12819
12830
|
groupCount: batches.reduce((sum, b) => sum + b.length, 0),
|
|
12820
12831
|
batchCount: batches.length,
|
|
@@ -12824,7 +12835,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12824
12835
|
groups: batch.map((g) => g.join(","))
|
|
12825
12836
|
}))
|
|
12826
12837
|
}, "Orchestrator starting");
|
|
12827
|
-
logger$
|
|
12838
|
+
logger$22.info({
|
|
12828
12839
|
storyCount: storyKeys.length,
|
|
12829
12840
|
conflictGroups: batches.length,
|
|
12830
12841
|
maxConcurrency: config.maxConcurrency
|
|
@@ -12845,7 +12856,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12845
12856
|
exitCode,
|
|
12846
12857
|
output: truncatedOutput
|
|
12847
12858
|
});
|
|
12848
|
-
logger$
|
|
12859
|
+
logger$22.error({
|
|
12849
12860
|
exitCode,
|
|
12850
12861
|
reason: preFlightResult.reason
|
|
12851
12862
|
}, "Pre-flight build check failed — aborting pipeline before any story dispatch");
|
|
@@ -12854,19 +12865,19 @@ function createImplementationOrchestrator(deps) {
|
|
|
12854
12865
|
await persistState();
|
|
12855
12866
|
return getStatus();
|
|
12856
12867
|
}
|
|
12857
|
-
if (preFlightResult.status !== "skipped") logger$
|
|
12868
|
+
if (preFlightResult.status !== "skipped") logger$22.info("Pre-flight build check passed");
|
|
12858
12869
|
}
|
|
12859
|
-
logger$
|
|
12870
|
+
logger$22.info(_startupTimings, "Orchestrator startup timings (ms)");
|
|
12860
12871
|
const totalGroups = batches.reduce((sum, b) => sum + b.length, 0);
|
|
12861
12872
|
const actualConcurrency = Math.min(config.maxConcurrency, totalGroups);
|
|
12862
12873
|
if (actualConcurrency > 1 && projectRoot !== void 0) try {
|
|
12863
12874
|
_packageSnapshot = capturePackageSnapshot({ projectRoot });
|
|
12864
|
-
logger$
|
|
12875
|
+
logger$22.info({
|
|
12865
12876
|
fileCount: _packageSnapshot.files.size,
|
|
12866
12877
|
installCommand: _packageSnapshot.installCommand
|
|
12867
12878
|
}, "Package snapshot captured for concurrent story protection");
|
|
12868
12879
|
} catch (snapErr) {
|
|
12869
|
-
logger$
|
|
12880
|
+
logger$22.warn({ err: snapErr }, "Failed to capture package snapshot — continuing without protection");
|
|
12870
12881
|
}
|
|
12871
12882
|
try {
|
|
12872
12883
|
for (const batchGroups of batches) await runWithConcurrency(batchGroups, config.maxConcurrency);
|
|
@@ -12875,7 +12886,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12875
12886
|
_state = "FAILED";
|
|
12876
12887
|
_completedAt = new Date().toISOString();
|
|
12877
12888
|
await persistState();
|
|
12878
|
-
logger$
|
|
12889
|
+
logger$22.error({ err }, "Orchestrator failed with unhandled error");
|
|
12879
12890
|
return getStatus();
|
|
12880
12891
|
}
|
|
12881
12892
|
stopHeartbeat();
|
|
@@ -12885,7 +12896,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12885
12896
|
const totalDeclarations = contractDeclarations.length;
|
|
12886
12897
|
const currentSprintDeclarations = contractDeclarations.filter((d) => storyKeys.includes(d.storyKey));
|
|
12887
12898
|
const stalePruned = totalDeclarations - currentSprintDeclarations.length;
|
|
12888
|
-
if (stalePruned > 0) logger$
|
|
12899
|
+
if (stalePruned > 0) logger$22.info({
|
|
12889
12900
|
stalePruned,
|
|
12890
12901
|
remaining: currentSprintDeclarations.length
|
|
12891
12902
|
}, "Pruned stale contract declarations from previous epics");
|
|
@@ -12899,11 +12910,11 @@ function createImplementationOrchestrator(deps) {
|
|
|
12899
12910
|
contractName: mismatch.contractName,
|
|
12900
12911
|
mismatchDescription: mismatch.mismatchDescription
|
|
12901
12912
|
});
|
|
12902
|
-
logger$
|
|
12913
|
+
logger$22.warn({
|
|
12903
12914
|
mismatchCount: mismatches.length,
|
|
12904
12915
|
mismatches
|
|
12905
12916
|
}, "Post-sprint contract verification found mismatches — manual review required");
|
|
12906
|
-
} else if (currentSprintDeclarations.length > 0) logger$
|
|
12917
|
+
} else if (currentSprintDeclarations.length > 0) logger$22.info("Post-sprint contract verification passed — all declared contracts satisfied");
|
|
12907
12918
|
eventBus.emit("pipeline:contract-verification-summary", {
|
|
12908
12919
|
verified: currentSprintDeclarations.length,
|
|
12909
12920
|
stalePruned,
|
|
@@ -12938,12 +12949,12 @@ function createImplementationOrchestrator(deps) {
|
|
|
12938
12949
|
});
|
|
12939
12950
|
await stateStore.setContractVerification(sk, records);
|
|
12940
12951
|
}
|
|
12941
|
-
logger$
|
|
12952
|
+
logger$22.info({ storyCount: contractsByStory.size }, "Contract verification results persisted to StateStore");
|
|
12942
12953
|
} catch (persistErr) {
|
|
12943
|
-
logger$
|
|
12954
|
+
logger$22.warn({ err: persistErr }, "Failed to persist contract verification results to StateStore");
|
|
12944
12955
|
}
|
|
12945
12956
|
} catch (err) {
|
|
12946
|
-
logger$
|
|
12957
|
+
logger$22.error({ err }, "Post-sprint contract verification threw an error — skipping");
|
|
12947
12958
|
}
|
|
12948
12959
|
if (projectRoot !== void 0) try {
|
|
12949
12960
|
const indicators = checkProfileStaleness(projectRoot);
|
|
@@ -12953,10 +12964,10 @@ function createImplementationOrchestrator(deps) {
|
|
|
12953
12964
|
message,
|
|
12954
12965
|
indicators
|
|
12955
12966
|
});
|
|
12956
|
-
logger$
|
|
12967
|
+
logger$22.warn({ indicators }, message);
|
|
12957
12968
|
}
|
|
12958
12969
|
} catch (err) {
|
|
12959
|
-
logger$
|
|
12970
|
+
logger$22.debug({ err }, "Profile staleness check failed (best-effort)");
|
|
12960
12971
|
}
|
|
12961
12972
|
let completed = 0;
|
|
12962
12973
|
let escalated = 0;
|
|
@@ -12973,8 +12984,8 @@ function createImplementationOrchestrator(deps) {
|
|
|
12973
12984
|
await persistState();
|
|
12974
12985
|
return getStatus();
|
|
12975
12986
|
} finally {
|
|
12976
|
-
if (stateStore !== void 0) await stateStore.close().catch((err) => logger$
|
|
12977
|
-
if (ingestionServer !== void 0) await ingestionServer.stop().catch((err) => logger$
|
|
12987
|
+
if (stateStore !== void 0) await stateStore.close().catch((err) => logger$22.warn({ err }, "StateStore.close() failed (best-effort)"));
|
|
12988
|
+
if (ingestionServer !== void 0) await ingestionServer.stop().catch((err) => logger$22.warn({ err }, "IngestionServer.stop() failed (best-effort)"));
|
|
12978
12989
|
}
|
|
12979
12990
|
}
|
|
12980
12991
|
function pause() {
|
|
@@ -12983,7 +12994,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12983
12994
|
_pauseGate = createPauseGate();
|
|
12984
12995
|
_state = "PAUSED";
|
|
12985
12996
|
eventBus.emit("orchestrator:paused", {});
|
|
12986
|
-
logger$
|
|
12997
|
+
logger$22.info("Orchestrator paused");
|
|
12987
12998
|
}
|
|
12988
12999
|
function resume() {
|
|
12989
13000
|
if (_state !== "PAUSED") return;
|
|
@@ -12994,7 +13005,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12994
13005
|
}
|
|
12995
13006
|
_state = "RUNNING";
|
|
12996
13007
|
eventBus.emit("orchestrator:resumed", {});
|
|
12997
|
-
logger$
|
|
13008
|
+
logger$22.info("Orchestrator resumed");
|
|
12998
13009
|
}
|
|
12999
13010
|
return {
|
|
13000
13011
|
run,
|
|
@@ -13673,7 +13684,7 @@ const CritiqueOutputSchema = z.object({
|
|
|
13673
13684
|
|
|
13674
13685
|
//#endregion
|
|
13675
13686
|
//#region src/modules/phase-orchestrator/critique-loop.ts
|
|
13676
|
-
const logger$
|
|
13687
|
+
const logger$6 = createLogger("critique-loop");
|
|
13677
13688
|
/**
|
|
13678
13689
|
* Maps a phase name to the critique prompt template name.
|
|
13679
13690
|
* Falls back to `critique-${phase}` for unknown phases.
|
|
@@ -13727,7 +13738,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
|
|
|
13727
13738
|
critiquePrompt = critiqueTemplate.replace("{{artifact_content}}", currentArtifact).replace("{{project_context}}", projectContext);
|
|
13728
13739
|
} catch (err) {
|
|
13729
13740
|
const message = err instanceof Error ? err.message : String(err);
|
|
13730
|
-
logger$
|
|
13741
|
+
logger$6.warn({
|
|
13731
13742
|
phaseId,
|
|
13732
13743
|
promptName: critiquePromptName,
|
|
13733
13744
|
err: message
|
|
@@ -13755,7 +13766,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
|
|
|
13755
13766
|
critiqueTokens.output += result.tokenEstimate.output;
|
|
13756
13767
|
if (result.status !== "completed" || result.parsed === null) {
|
|
13757
13768
|
const errMsg = result.parseError ?? `Critique dispatch ended with status '${result.status}'`;
|
|
13758
|
-
logger$
|
|
13769
|
+
logger$6.warn({
|
|
13759
13770
|
phaseId,
|
|
13760
13771
|
iteration: i + 1,
|
|
13761
13772
|
err: errMsg
|
|
@@ -13774,7 +13785,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
|
|
|
13774
13785
|
lastCritiqueOutput = critiqueOutput;
|
|
13775
13786
|
} catch (err) {
|
|
13776
13787
|
const message = err instanceof Error ? err.message : String(err);
|
|
13777
|
-
logger$
|
|
13788
|
+
logger$6.warn({
|
|
13778
13789
|
phaseId,
|
|
13779
13790
|
iteration: i + 1,
|
|
13780
13791
|
err: message
|
|
@@ -13814,14 +13825,14 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
|
|
|
13814
13825
|
});
|
|
13815
13826
|
} catch (err) {
|
|
13816
13827
|
const message = err instanceof Error ? err.message : String(err);
|
|
13817
|
-
logger$
|
|
13828
|
+
logger$6.warn({
|
|
13818
13829
|
phaseId,
|
|
13819
13830
|
iteration: i + 1,
|
|
13820
13831
|
err: message
|
|
13821
13832
|
}, "Critique loop: failed to store critique decision — continuing");
|
|
13822
13833
|
}
|
|
13823
13834
|
if (critiqueOutput.verdict === "pass") {
|
|
13824
|
-
logger$
|
|
13835
|
+
logger$6.info({
|
|
13825
13836
|
phaseId,
|
|
13826
13837
|
iteration: i + 1
|
|
13827
13838
|
}, "Critique loop: artifact passed critique — loop complete");
|
|
@@ -13834,7 +13845,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
|
|
|
13834
13845
|
totalMs: Date.now() - startMs
|
|
13835
13846
|
};
|
|
13836
13847
|
}
|
|
13837
|
-
logger$
|
|
13848
|
+
logger$6.info({
|
|
13838
13849
|
phaseId,
|
|
13839
13850
|
iteration: i + 1,
|
|
13840
13851
|
issueCount: critiqueOutput.issue_count
|
|
@@ -13847,7 +13858,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
|
|
|
13847
13858
|
refinePrompt = refineTemplate.replace("{{original_artifact}}", currentArtifact).replace("{{critique_issues}}", issuesText).replace("{{phase_context}}", phaseContext);
|
|
13848
13859
|
} catch (err) {
|
|
13849
13860
|
const message = err instanceof Error ? err.message : String(err);
|
|
13850
|
-
logger$
|
|
13861
|
+
logger$6.warn({
|
|
13851
13862
|
phaseId,
|
|
13852
13863
|
iteration: i + 1,
|
|
13853
13864
|
err: message
|
|
@@ -13868,7 +13879,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
|
|
|
13868
13879
|
const originalLength = currentArtifact.length;
|
|
13869
13880
|
const refinedLength = refineResult.output.length;
|
|
13870
13881
|
const delta = refinedLength - originalLength;
|
|
13871
|
-
logger$
|
|
13882
|
+
logger$6.info({
|
|
13872
13883
|
phaseId,
|
|
13873
13884
|
iteration: i + 1,
|
|
13874
13885
|
originalLength,
|
|
@@ -13877,7 +13888,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
|
|
|
13877
13888
|
}, "Critique loop: refinement complete");
|
|
13878
13889
|
currentArtifact = refineResult.output;
|
|
13879
13890
|
} else {
|
|
13880
|
-
logger$
|
|
13891
|
+
logger$6.warn({
|
|
13881
13892
|
phaseId,
|
|
13882
13893
|
iteration: i + 1,
|
|
13883
13894
|
status: refineResult.status
|
|
@@ -13886,7 +13897,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
|
|
|
13886
13897
|
}
|
|
13887
13898
|
} catch (err) {
|
|
13888
13899
|
const message = err instanceof Error ? err.message : String(err);
|
|
13889
|
-
logger$
|
|
13900
|
+
logger$6.warn({
|
|
13890
13901
|
phaseId,
|
|
13891
13902
|
iteration: i + 1,
|
|
13892
13903
|
err: message
|
|
@@ -13897,12 +13908,12 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
|
|
|
13897
13908
|
}
|
|
13898
13909
|
const remainingIssues = lastCritiqueOutput?.issues ?? [];
|
|
13899
13910
|
if (remainingIssues.length > 0) {
|
|
13900
|
-
logger$
|
|
13911
|
+
logger$6.warn({
|
|
13901
13912
|
phaseId,
|
|
13902
13913
|
maxIterations,
|
|
13903
13914
|
issueCount: remainingIssues.length
|
|
13904
13915
|
}, "Critique loop: max iterations reached with unresolved issues");
|
|
13905
|
-
for (const issue of remainingIssues) logger$
|
|
13916
|
+
for (const issue of remainingIssues) logger$6.warn({
|
|
13906
13917
|
phaseId,
|
|
13907
13918
|
severity: issue.severity,
|
|
13908
13919
|
category: issue.category,
|
|
@@ -13921,7 +13932,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
|
|
|
13921
13932
|
|
|
13922
13933
|
//#endregion
|
|
13923
13934
|
//#region src/modules/phase-orchestrator/elicitation-selector.ts
|
|
13924
|
-
const logger$
|
|
13935
|
+
const logger$5 = createLogger("elicitation-selector");
|
|
13925
13936
|
/**
|
|
13926
13937
|
* Affinity scores (0.0–1.0) for each category per content type.
|
|
13927
13938
|
*
|
|
@@ -14043,10 +14054,10 @@ function loadElicitationMethods() {
|
|
|
14043
14054
|
try {
|
|
14044
14055
|
const content = readFileSync$1(csvPath, "utf-8");
|
|
14045
14056
|
const methods = parseMethodsCsv(content);
|
|
14046
|
-
logger$
|
|
14057
|
+
logger$5.debug({ count: methods.length }, "Loaded elicitation methods");
|
|
14047
14058
|
return methods;
|
|
14048
14059
|
} catch (err) {
|
|
14049
|
-
logger$
|
|
14060
|
+
logger$5.warn({
|
|
14050
14061
|
csvPath,
|
|
14051
14062
|
err
|
|
14052
14063
|
}, "Failed to load elicitation methods CSV");
|
|
@@ -14366,7 +14377,7 @@ const ElicitationOutputSchema = z.object({
|
|
|
14366
14377
|
|
|
14367
14378
|
//#endregion
|
|
14368
14379
|
//#region src/modules/phase-orchestrator/step-runner.ts
|
|
14369
|
-
const logger$
|
|
14380
|
+
const logger$4 = createLogger("step-runner");
|
|
14370
14381
|
/**
|
|
14371
14382
|
* Format an array of decision records into a markdown section for injection.
|
|
14372
14383
|
*
|
|
@@ -14473,7 +14484,7 @@ async function runSteps(steps, deps, runId, phase, params) {
|
|
|
14473
14484
|
if (estimatedTokens > budgetTokens) {
|
|
14474
14485
|
const decisionRefs = step.context.filter((ref) => ref.source.startsWith("decision:"));
|
|
14475
14486
|
if (decisionRefs.length > 0) {
|
|
14476
|
-
logger$
|
|
14487
|
+
logger$4.warn({
|
|
14477
14488
|
step: step.name,
|
|
14478
14489
|
estimatedTokens,
|
|
14479
14490
|
budgetTokens
|
|
@@ -14500,7 +14511,7 @@ async function runSteps(steps, deps, runId, phase, params) {
|
|
|
14500
14511
|
}
|
|
14501
14512
|
prompt = summarizedPrompt;
|
|
14502
14513
|
estimatedTokens = Math.ceil(prompt.length / 4);
|
|
14503
|
-
if (estimatedTokens <= budgetTokens) logger$
|
|
14514
|
+
if (estimatedTokens <= budgetTokens) logger$4.info({
|
|
14504
14515
|
step: step.name,
|
|
14505
14516
|
estimatedTokens,
|
|
14506
14517
|
budgetTokens
|
|
@@ -14681,7 +14692,7 @@ async function runSteps(steps, deps, runId, phase, params) {
|
|
|
14681
14692
|
const critiqueResult = await runCritiqueLoop(artifactContent, phase, runId, phase, deps);
|
|
14682
14693
|
totalInput += critiqueResult.critiqueTokens.input + critiqueResult.refinementTokens.input;
|
|
14683
14694
|
totalOutput += critiqueResult.critiqueTokens.output + critiqueResult.refinementTokens.output;
|
|
14684
|
-
logger$
|
|
14695
|
+
logger$4.info({
|
|
14685
14696
|
step: step.name,
|
|
14686
14697
|
verdict: critiqueResult.verdict,
|
|
14687
14698
|
iterations: critiqueResult.iterations,
|
|
@@ -14689,7 +14700,7 @@ async function runSteps(steps, deps, runId, phase, params) {
|
|
|
14689
14700
|
}, "Step critique loop complete");
|
|
14690
14701
|
} catch (critiqueErr) {
|
|
14691
14702
|
const critiqueMsg = critiqueErr instanceof Error ? critiqueErr.message : String(critiqueErr);
|
|
14692
|
-
logger$
|
|
14703
|
+
logger$4.warn({
|
|
14693
14704
|
step: step.name,
|
|
14694
14705
|
err: critiqueMsg
|
|
14695
14706
|
}, "Step critique loop threw an error — continuing without critique");
|
|
@@ -14699,7 +14710,7 @@ async function runSteps(steps, deps, runId, phase, params) {
|
|
|
14699
14710
|
const contentType = deriveContentType(phase, step.name);
|
|
14700
14711
|
const selectedMethods = selectMethods({ content_type: contentType }, usedElicitationMethods);
|
|
14701
14712
|
if (selectedMethods.length > 0) {
|
|
14702
|
-
logger$
|
|
14713
|
+
logger$4.info({
|
|
14703
14714
|
step: step.name,
|
|
14704
14715
|
methods: selectedMethods.map((m) => m.name),
|
|
14705
14716
|
contentType
|
|
@@ -14738,13 +14749,13 @@ async function runSteps(steps, deps, runId, phase, params) {
|
|
|
14738
14749
|
key: `${phase}-round-${roundIndex}-insights`,
|
|
14739
14750
|
value: elicitParsed.insights
|
|
14740
14751
|
});
|
|
14741
|
-
logger$
|
|
14752
|
+
logger$4.info({
|
|
14742
14753
|
step: step.name,
|
|
14743
14754
|
method: method.name,
|
|
14744
14755
|
roundIndex
|
|
14745
14756
|
}, "Elicitation insights stored in decision store");
|
|
14746
14757
|
}
|
|
14747
|
-
} else logger$
|
|
14758
|
+
} else logger$4.warn({
|
|
14748
14759
|
step: step.name,
|
|
14749
14760
|
method: method.name,
|
|
14750
14761
|
status: elicitResult.status
|
|
@@ -14760,7 +14771,7 @@ async function runSteps(steps, deps, runId, phase, params) {
|
|
|
14760
14771
|
}
|
|
14761
14772
|
} catch (elicitErr) {
|
|
14762
14773
|
const elicitMsg = elicitErr instanceof Error ? elicitErr.message : String(elicitErr);
|
|
14763
|
-
logger$
|
|
14774
|
+
logger$4.warn({
|
|
14764
14775
|
step: step.name,
|
|
14765
14776
|
err: elicitMsg
|
|
14766
14777
|
}, "Step elicitation threw an error — continuing without elicitation");
|
|
@@ -15128,7 +15139,7 @@ async function runAnalysisPhase(deps, params) {
|
|
|
15128
15139
|
|
|
15129
15140
|
//#endregion
|
|
15130
15141
|
//#region src/modules/phase-orchestrator/phases/planning.ts
|
|
15131
|
-
const logger$
|
|
15142
|
+
const logger$3 = createLogger("planning-phase");
|
|
15132
15143
|
/** Maximum total prompt length in tokens (3,500 tokens × 4 chars/token = 14,000 chars) */
|
|
15133
15144
|
const MAX_PROMPT_TOKENS = 3500;
|
|
15134
15145
|
const MAX_PROMPT_CHARS = MAX_PROMPT_TOKENS * 4;
|
|
@@ -15182,7 +15193,7 @@ function formatProductBriefFromDecisions(decisions) {
|
|
|
15182
15193
|
for (const field of BRIEF_FIELDS) {
|
|
15183
15194
|
const rawValue = briefMap[field];
|
|
15184
15195
|
if (rawValue === void 0) continue;
|
|
15185
|
-
const fieldLabel = field.replace(/_/g, " ").replace(/\b\w/g, (c) => c.toUpperCase());
|
|
15196
|
+
const fieldLabel$1 = field.replace(/_/g, " ").replace(/\b\w/g, (c) => c.toUpperCase());
|
|
15186
15197
|
let displayValue;
|
|
15187
15198
|
try {
|
|
15188
15199
|
const parsed = JSON.parse(rawValue);
|
|
@@ -15191,7 +15202,7 @@ function formatProductBriefFromDecisions(decisions) {
|
|
|
15191
15202
|
} catch {
|
|
15192
15203
|
displayValue = rawValue;
|
|
15193
15204
|
}
|
|
15194
|
-
parts.push(`### ${fieldLabel}\n${displayValue}`);
|
|
15205
|
+
parts.push(`### ${fieldLabel$1}\n${displayValue}`);
|
|
15195
15206
|
}
|
|
15196
15207
|
return parts.join("\n\n");
|
|
15197
15208
|
}
|
|
@@ -15355,7 +15366,7 @@ async function runPlanningMultiStep(deps, params) {
|
|
|
15355
15366
|
const techConstraintDecisions = allAnalysisDecisions.filter((d) => d.category === "technology-constraints");
|
|
15356
15367
|
const violation = detectTechStackViolation(techStack, techConstraintDecisions);
|
|
15357
15368
|
if (violation) {
|
|
15358
|
-
logger$
|
|
15369
|
+
logger$3.warn({ violation }, "Tech stack constraint violation detected — retrying step 3 with correction");
|
|
15359
15370
|
const correctionPrefix = `CRITICAL CORRECTION: Your previous output was rejected because it violates the stated technology constraints.\n\nViolation: ${violation}\n\nYou MUST NOT use TypeScript, JavaScript, or Node.js for ANY backend service. Choose from Go, Kotlin/JVM, or Rust as stated in the technology constraints.\n\nRe-generate your output with a compliant tech stack. Everything else (NFRs, domain model, out-of-scope) can remain the same.\n\n---\n\n`;
|
|
15360
15371
|
const step3Template = await deps.pack.getPrompt("planning-step-3-nfrs");
|
|
15361
15372
|
const stepOutputs = new Map();
|
|
@@ -15382,10 +15393,10 @@ async function runPlanningMultiStep(deps, params) {
|
|
|
15382
15393
|
const retryTechStack = retryParsed.tech_stack;
|
|
15383
15394
|
const retryViolation = retryTechStack ? detectTechStackViolation(retryTechStack, techConstraintDecisions) : null;
|
|
15384
15395
|
if (!retryViolation) {
|
|
15385
|
-
logger$
|
|
15396
|
+
logger$3.info("Retry produced compliant tech stack — using corrected output");
|
|
15386
15397
|
nfrsOutput = retryParsed;
|
|
15387
|
-
} else logger$
|
|
15388
|
-
} else logger$
|
|
15398
|
+
} else logger$3.warn({ retryViolation }, "Retry still violates constraints — using original output");
|
|
15399
|
+
} else logger$3.warn("Retry dispatch failed — using original output");
|
|
15389
15400
|
}
|
|
15390
15401
|
}
|
|
15391
15402
|
const frs = frsOutput.functional_requirements;
|
|
@@ -15671,7 +15682,7 @@ const ReadinessOutputSchema = z.object({
|
|
|
15671
15682
|
|
|
15672
15683
|
//#endregion
|
|
15673
15684
|
//#region src/modules/phase-orchestrator/phases/solutioning.ts
|
|
15674
|
-
const logger$
|
|
15685
|
+
const logger$2 = createLogger("solutioning");
|
|
15675
15686
|
/** Base token budget for architecture generation (covers template + requirements) */
|
|
15676
15687
|
const BASE_ARCH_PROMPT_TOKENS = 3e3;
|
|
15677
15688
|
/** Base token budget for story generation (covers template + requirements + architecture) */
|
|
@@ -16080,7 +16091,7 @@ async function runReadinessCheck(deps, runId) {
|
|
|
16080
16091
|
input: tokenEstimate.input,
|
|
16081
16092
|
output: tokenEstimate.output
|
|
16082
16093
|
};
|
|
16083
|
-
logger$
|
|
16094
|
+
logger$2.info({
|
|
16084
16095
|
runId,
|
|
16085
16096
|
durationMs: dispatchResult.durationMs,
|
|
16086
16097
|
tokens: tokenEstimate
|
|
@@ -16341,7 +16352,7 @@ async function runSolutioningPhase(deps, params) {
|
|
|
16341
16352
|
let archResult;
|
|
16342
16353
|
if (existingArchArtifact) {
|
|
16343
16354
|
const existingDecisions = (await getDecisionsByPhaseForRun(deps.db, params.runId, "solutioning")).filter((d) => d.category === "architecture");
|
|
16344
|
-
logger$
|
|
16355
|
+
logger$2.info({
|
|
16345
16356
|
runId: params.runId,
|
|
16346
16357
|
artifactId: existingArchArtifact.id,
|
|
16347
16358
|
decisionCount: existingDecisions.length
|
|
@@ -16372,7 +16383,7 @@ async function runSolutioningPhase(deps, params) {
|
|
|
16372
16383
|
output: totalOutput
|
|
16373
16384
|
}
|
|
16374
16385
|
};
|
|
16375
|
-
logger$
|
|
16386
|
+
logger$2.info({
|
|
16376
16387
|
runId: params.runId,
|
|
16377
16388
|
decisionCount: archResult.decisions.length,
|
|
16378
16389
|
mode: hasSteps ? "multi-step" : "single-dispatch"
|
|
@@ -16394,7 +16405,7 @@ async function runSolutioningPhase(deps, params) {
|
|
|
16394
16405
|
totalInput += readinessResult.tokenUsage.input;
|
|
16395
16406
|
totalOutput += readinessResult.tokenUsage.output;
|
|
16396
16407
|
if (readinessResult.verdict === "error") {
|
|
16397
|
-
logger$
|
|
16408
|
+
logger$2.error({
|
|
16398
16409
|
runId: params.runId,
|
|
16399
16410
|
error: readinessResult.error
|
|
16400
16411
|
}, "Readiness check agent failed");
|
|
@@ -16410,7 +16421,7 @@ async function runSolutioningPhase(deps, params) {
|
|
|
16410
16421
|
}
|
|
16411
16422
|
};
|
|
16412
16423
|
}
|
|
16413
|
-
logger$
|
|
16424
|
+
logger$2.info({
|
|
16414
16425
|
runId: params.runId,
|
|
16415
16426
|
verdict: readinessResult.verdict,
|
|
16416
16427
|
coverageScore: readinessResult.coverageScore,
|
|
@@ -16426,7 +16437,7 @@ async function runSolutioningPhase(deps, params) {
|
|
|
16426
16437
|
key: `finding-${i + 1}`,
|
|
16427
16438
|
value: JSON.stringify(finding)
|
|
16428
16439
|
});
|
|
16429
|
-
logger$
|
|
16440
|
+
logger$2.error({
|
|
16430
16441
|
runId: params.runId,
|
|
16431
16442
|
verdict: "NOT_READY",
|
|
16432
16443
|
coverageScore: readinessResult.coverageScore,
|
|
@@ -16492,7 +16503,7 @@ async function runSolutioningPhase(deps, params) {
|
|
|
16492
16503
|
"",
|
|
16493
16504
|
"Please generate additional or revised stories to specifically address each blocker above."
|
|
16494
16505
|
].join("\n");
|
|
16495
|
-
logger$
|
|
16506
|
+
logger$2.info({
|
|
16496
16507
|
runId: params.runId,
|
|
16497
16508
|
blockerCount: blockers.length
|
|
16498
16509
|
}, "Readiness NEEDS_WORK with blockers — retrying story generation with gap analysis");
|
|
@@ -16531,7 +16542,7 @@ async function runSolutioningPhase(deps, params) {
|
|
|
16531
16542
|
};
|
|
16532
16543
|
if (retryReadiness.verdict === "NOT_READY" || retryReadiness.verdict === "NEEDS_WORK") {
|
|
16533
16544
|
const retryBlockers = retryReadiness.findings.filter((f$1) => f$1.severity === "blocker");
|
|
16534
|
-
logger$
|
|
16545
|
+
logger$2.error({
|
|
16535
16546
|
runId: params.runId,
|
|
16536
16547
|
verdict: retryReadiness.verdict,
|
|
16537
16548
|
retryBlockers: retryBlockers.length
|
|
@@ -16555,7 +16566,7 @@ async function runSolutioningPhase(deps, params) {
|
|
|
16555
16566
|
}
|
|
16556
16567
|
const retryStories = retryResult.epics.reduce((sum, epic) => sum + epic.stories.length, 0);
|
|
16557
16568
|
const minorFindings$1 = retryReadiness.findings.filter((f$1) => f$1.severity === "minor");
|
|
16558
|
-
if (minorFindings$1.length > 0) logger$
|
|
16569
|
+
if (minorFindings$1.length > 0) logger$2.warn({
|
|
16559
16570
|
runId: params.runId,
|
|
16560
16571
|
minorFindings: minorFindings$1
|
|
16561
16572
|
}, "Readiness READY with minor findings after retry");
|
|
@@ -16584,7 +16595,7 @@ async function runSolutioningPhase(deps, params) {
|
|
|
16584
16595
|
};
|
|
16585
16596
|
}
|
|
16586
16597
|
const majorFindings = readinessResult.findings.filter((f$1) => f$1.severity === "major");
|
|
16587
|
-
logger$
|
|
16598
|
+
logger$2.warn({
|
|
16588
16599
|
runId: params.runId,
|
|
16589
16600
|
majorCount: majorFindings.length,
|
|
16590
16601
|
findings: readinessResult.findings
|
|
@@ -16600,7 +16611,7 @@ async function runSolutioningPhase(deps, params) {
|
|
|
16600
16611
|
const minorFindings = readinessResult.findings.filter((f$1) => f$1.severity === "minor");
|
|
16601
16612
|
if (minorFindings.length > 0) {
|
|
16602
16613
|
const verdictLabel = readinessResult.verdict === "READY" ? "READY" : "NEEDS_WORK (no blockers)";
|
|
16603
|
-
logger$
|
|
16614
|
+
logger$2.warn({
|
|
16604
16615
|
runId: params.runId,
|
|
16605
16616
|
verdict: readinessResult.verdict,
|
|
16606
16617
|
minorFindings
|
|
@@ -17011,6 +17022,709 @@ async function runResearchPhase(deps, params) {
|
|
|
17011
17022
|
}
|
|
17012
17023
|
}
|
|
17013
17024
|
|
|
17025
|
+
//#endregion
|
|
17026
|
+
//#region src/modules/export/renderers.ts
|
|
17027
|
+
/** Fields from analysis/product-brief decisions to render, in display order */
|
|
17028
|
+
const PRODUCT_BRIEF_FIELDS = [
|
|
17029
|
+
"problem_statement",
|
|
17030
|
+
"target_users",
|
|
17031
|
+
"core_features",
|
|
17032
|
+
"success_metrics",
|
|
17033
|
+
"constraints",
|
|
17034
|
+
"technology_constraints"
|
|
17035
|
+
];
|
|
17036
|
+
/**
|
|
17037
|
+
* Known acronyms that should appear fully uppercased when they are a standalone
|
|
17038
|
+
* word in a label (e.g. 'fr_coverage' → 'FR Coverage', 'api_style' → 'API Style').
|
|
17039
|
+
*/
|
|
17040
|
+
const UPPERCASE_ACRONYMS = new Set([
|
|
17041
|
+
"fr",
|
|
17042
|
+
"nfr",
|
|
17043
|
+
"ux",
|
|
17044
|
+
"api",
|
|
17045
|
+
"db",
|
|
17046
|
+
"id",
|
|
17047
|
+
"url"
|
|
17048
|
+
]);
|
|
17049
|
+
/**
|
|
17050
|
+
* Convert a snake_case key to Title Case for display headings.
|
|
17051
|
+
* Known acronyms (fr, nfr, ux, api, db, id, url) are rendered fully uppercased.
|
|
17052
|
+
*/
|
|
17053
|
+
function fieldLabel(key) {
|
|
17054
|
+
return key.replace(/_/g, " ").replace(/\b\w+/g, (word) => {
|
|
17055
|
+
const lower = word.toLowerCase();
|
|
17056
|
+
if (UPPERCASE_ACRONYMS.has(lower)) return lower.toUpperCase();
|
|
17057
|
+
return word.charAt(0).toUpperCase() + word.slice(1).toLowerCase();
|
|
17058
|
+
});
|
|
17059
|
+
}
|
|
17060
|
+
/**
|
|
17061
|
+
* Safely parse a JSON string; returns the original string if parsing fails.
|
|
17062
|
+
*/
|
|
17063
|
+
function safeParseJson(value) {
|
|
17064
|
+
try {
|
|
17065
|
+
return JSON.parse(value);
|
|
17066
|
+
} catch {
|
|
17067
|
+
return value;
|
|
17068
|
+
}
|
|
17069
|
+
}
|
|
17070
|
+
/**
|
|
17071
|
+
* Render a decision value to a markdown-friendly string.
|
|
17072
|
+
* - Arrays → bulleted list items
|
|
17073
|
+
* - Objects → key: value lines
|
|
17074
|
+
* - Primitives → plain string
|
|
17075
|
+
*/
|
|
17076
|
+
function renderValue(rawValue) {
|
|
17077
|
+
const parsed = safeParseJson(rawValue);
|
|
17078
|
+
if (Array.isArray(parsed)) return parsed.map((item) => `- ${String(item)}`).join("\n");
|
|
17079
|
+
if (typeof parsed === "object" && parsed !== null) return Object.entries(parsed).map(([k, v]) => `- **${fieldLabel(k)}**: ${String(v)}`).join("\n");
|
|
17080
|
+
return String(parsed);
|
|
17081
|
+
}
|
|
17082
|
+
/**
|
|
17083
|
+
* Render analysis-phase decisions as a `product-brief.md` file.
|
|
17084
|
+
*
|
|
17085
|
+
* Merges `product-brief` category decisions with `technology-constraints`
|
|
17086
|
+
* category decisions (they are stored separately in the decision store).
|
|
17087
|
+
*
|
|
17088
|
+
* @param decisions - All decisions from the analysis phase (any category)
|
|
17089
|
+
* @returns Formatted markdown content for product-brief.md
|
|
17090
|
+
*/
|
|
17091
|
+
function renderProductBrief(decisions) {
|
|
17092
|
+
const briefDecisions = decisions.filter((d) => d.category === "product-brief");
|
|
17093
|
+
const techConstraintDecisions = decisions.filter((d) => d.category === "technology-constraints");
|
|
17094
|
+
const briefMap = Object.fromEntries(briefDecisions.map((d) => [d.key, d.value]));
|
|
17095
|
+
if (techConstraintDecisions.length > 0 && briefMap["technology_constraints"] === void 0) {
|
|
17096
|
+
const tcBullets = techConstraintDecisions.flatMap((d) => {
|
|
17097
|
+
const parsed = safeParseJson(d.value);
|
|
17098
|
+
if (Array.isArray(parsed)) return parsed.map((item) => String(item));
|
|
17099
|
+
return [String(parsed)];
|
|
17100
|
+
});
|
|
17101
|
+
briefMap["technology_constraints"] = JSON.stringify(tcBullets);
|
|
17102
|
+
}
|
|
17103
|
+
if (briefDecisions.length === 0 && techConstraintDecisions.length === 0) return "";
|
|
17104
|
+
const parts = ["# Product Brief", ""];
|
|
17105
|
+
for (const field of PRODUCT_BRIEF_FIELDS) {
|
|
17106
|
+
const rawValue = briefMap[field];
|
|
17107
|
+
if (rawValue === void 0) continue;
|
|
17108
|
+
parts.push(`## ${fieldLabel(field)}`);
|
|
17109
|
+
parts.push("");
|
|
17110
|
+
parts.push(renderValue(rawValue));
|
|
17111
|
+
parts.push("");
|
|
17112
|
+
}
|
|
17113
|
+
return parts.join("\n");
|
|
17114
|
+
}
|
|
17115
|
+
/**
|
|
17116
|
+
* Render planning-phase decisions (and requirements table) as a `prd.md` file.
|
|
17117
|
+
*
|
|
17118
|
+
* Sections rendered (when data is present):
|
|
17119
|
+
* - Project Classification (classification decisions)
|
|
17120
|
+
* - Functional Requirements (functional-requirements decisions)
|
|
17121
|
+
* - Non-Functional Requirements (non-functional-requirements decisions)
|
|
17122
|
+
* - Domain Model (domain-model decisions)
|
|
17123
|
+
* - User Stories (user-stories decisions)
|
|
17124
|
+
* - Tech Stack (tech-stack decisions)
|
|
17125
|
+
* - Out of Scope (out-of-scope decisions)
|
|
17126
|
+
*
|
|
17127
|
+
* @param decisions - All decisions from the planning phase
|
|
17128
|
+
* @param requirements - Requirements records from the requirements table (optional)
|
|
17129
|
+
* @returns Formatted markdown content for prd.md
|
|
17130
|
+
*/
|
|
17131
|
+
function renderPrd(decisions, requirements = []) {
|
|
17132
|
+
if (decisions.length === 0) return "";
|
|
17133
|
+
const parts = ["# Product Requirements Document", ""];
|
|
17134
|
+
const classificationDecisions = decisions.filter((d) => d.category === "classification");
|
|
17135
|
+
if (classificationDecisions.length > 0) {
|
|
17136
|
+
parts.push("## Project Classification");
|
|
17137
|
+
parts.push("");
|
|
17138
|
+
for (const d of classificationDecisions) {
|
|
17139
|
+
const parsed = safeParseJson(d.value);
|
|
17140
|
+
if (Array.isArray(parsed)) {
|
|
17141
|
+
parts.push(`**${fieldLabel(d.key)}**:`);
|
|
17142
|
+
for (const item of parsed) parts.push(`- ${String(item)}`);
|
|
17143
|
+
} else parts.push(`**${fieldLabel(d.key)}**: ${String(parsed)}`);
|
|
17144
|
+
}
|
|
17145
|
+
parts.push("");
|
|
17146
|
+
}
|
|
17147
|
+
const frDecisions = decisions.filter((d) => d.category === "functional-requirements");
|
|
17148
|
+
if (frDecisions.length > 0) {
|
|
17149
|
+
parts.push("## Functional Requirements");
|
|
17150
|
+
parts.push("");
|
|
17151
|
+
for (const d of frDecisions) {
|
|
17152
|
+
const parsed = safeParseJson(d.value);
|
|
17153
|
+
if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
|
|
17154
|
+
const fr = parsed;
|
|
17155
|
+
const id = fr.id ?? d.key;
|
|
17156
|
+
const priority = fr.priority ? ` [${fr.priority.toUpperCase()}]` : "";
|
|
17157
|
+
parts.push(`- **${id}**${priority}: ${fr.description ?? d.value}`);
|
|
17158
|
+
if (fr.acceptance_criteria && fr.acceptance_criteria.length > 0) for (const ac of fr.acceptance_criteria) parts.push(` - ${ac}`);
|
|
17159
|
+
} else parts.push(`- **${d.key}**: ${renderValue(d.value)}`);
|
|
17160
|
+
}
|
|
17161
|
+
parts.push("");
|
|
17162
|
+
}
|
|
17163
|
+
const nfrDecisions = decisions.filter((d) => d.category === "non-functional-requirements");
|
|
17164
|
+
if (nfrDecisions.length > 0) {
|
|
17165
|
+
parts.push("## Non-Functional Requirements");
|
|
17166
|
+
parts.push("");
|
|
17167
|
+
for (const d of nfrDecisions) {
|
|
17168
|
+
const parsed = safeParseJson(d.value);
|
|
17169
|
+
if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
|
|
17170
|
+
const nfr = parsed;
|
|
17171
|
+
const id = nfr.id ?? d.key;
|
|
17172
|
+
const cat = nfr.category ? ` [${nfr.category.toUpperCase()}]` : "";
|
|
17173
|
+
parts.push(`- **${id}**${cat}: ${nfr.description ?? d.value}`);
|
|
17174
|
+
} else parts.push(`- **${d.key}**: ${renderValue(d.value)}`);
|
|
17175
|
+
}
|
|
17176
|
+
parts.push("");
|
|
17177
|
+
}
|
|
17178
|
+
const domainDecisions = decisions.filter((d) => d.category === "domain-model");
|
|
17179
|
+
if (domainDecisions.length > 0) {
|
|
17180
|
+
parts.push("## Domain Model");
|
|
17181
|
+
parts.push("");
|
|
17182
|
+
for (const d of domainDecisions) parts.push(renderValue(d.value));
|
|
17183
|
+
parts.push("");
|
|
17184
|
+
}
|
|
17185
|
+
const userStoryDecisions = decisions.filter((d) => d.category === "user-stories");
|
|
17186
|
+
if (userStoryDecisions.length > 0) {
|
|
17187
|
+
parts.push("## User Stories");
|
|
17188
|
+
parts.push("");
|
|
17189
|
+
for (const d of userStoryDecisions) {
|
|
17190
|
+
const parsed = safeParseJson(d.value);
|
|
17191
|
+
if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
|
|
17192
|
+
const us = parsed;
|
|
17193
|
+
if (us.title) {
|
|
17194
|
+
parts.push(`### ${us.title}`);
|
|
17195
|
+
parts.push("");
|
|
17196
|
+
if (us.description) {
|
|
17197
|
+
parts.push(us.description);
|
|
17198
|
+
parts.push("");
|
|
17199
|
+
}
|
|
17200
|
+
} else {
|
|
17201
|
+
parts.push(renderValue(d.value));
|
|
17202
|
+
parts.push("");
|
|
17203
|
+
}
|
|
17204
|
+
} else {
|
|
17205
|
+
parts.push(renderValue(d.value));
|
|
17206
|
+
parts.push("");
|
|
17207
|
+
}
|
|
17208
|
+
}
|
|
17209
|
+
}
|
|
17210
|
+
const techStackDecisions = decisions.filter((d) => d.category === "tech-stack");
|
|
17211
|
+
if (techStackDecisions.length > 0) {
|
|
17212
|
+
parts.push("## Tech Stack");
|
|
17213
|
+
parts.push("");
|
|
17214
|
+
for (const d of techStackDecisions) if (d.key === "tech_stack") {
|
|
17215
|
+
const parsed = safeParseJson(d.value);
|
|
17216
|
+
if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) for (const [k, v] of Object.entries(parsed)) parts.push(`- **${fieldLabel(k)}**: ${String(v)}`);
|
|
17217
|
+
else parts.push(`- **${fieldLabel(d.key)}**: ${d.value}`);
|
|
17218
|
+
} else parts.push(`- **${fieldLabel(d.key)}**: ${d.value}`);
|
|
17219
|
+
parts.push("");
|
|
17220
|
+
}
|
|
17221
|
+
const outOfScopeDecisions = decisions.filter((d) => d.category === "out-of-scope");
|
|
17222
|
+
if (outOfScopeDecisions.length > 0) {
|
|
17223
|
+
parts.push("## Out of Scope");
|
|
17224
|
+
parts.push("");
|
|
17225
|
+
for (const d of outOfScopeDecisions) parts.push(renderValue(d.value));
|
|
17226
|
+
parts.push("");
|
|
17227
|
+
}
|
|
17228
|
+
const functionalReqs = requirements.filter((r) => r.type === "functional");
|
|
17229
|
+
const nonFunctionalReqs = requirements.filter((r) => r.type === "non_functional");
|
|
17230
|
+
if ((functionalReqs.length > 0 || nonFunctionalReqs.length > 0) && frDecisions.length === 0 && nfrDecisions.length === 0) {
|
|
17231
|
+
parts.push("## Requirements (from Requirements Table)");
|
|
17232
|
+
parts.push("");
|
|
17233
|
+
if (functionalReqs.length > 0) {
|
|
17234
|
+
parts.push("### Functional Requirements");
|
|
17235
|
+
parts.push("");
|
|
17236
|
+
for (const r of functionalReqs) {
|
|
17237
|
+
const priority = r.priority ? ` [${r.priority.toUpperCase()}]` : "";
|
|
17238
|
+
parts.push(`- ${r.source ?? ""}${priority}: ${r.description}`);
|
|
17239
|
+
}
|
|
17240
|
+
parts.push("");
|
|
17241
|
+
}
|
|
17242
|
+
if (nonFunctionalReqs.length > 0) {
|
|
17243
|
+
parts.push("### Non-Functional Requirements");
|
|
17244
|
+
parts.push("");
|
|
17245
|
+
for (const r of nonFunctionalReqs) {
|
|
17246
|
+
const priority = r.priority ? ` [${r.priority.toUpperCase()}]` : "";
|
|
17247
|
+
parts.push(`- ${priority}: ${r.description}`);
|
|
17248
|
+
}
|
|
17249
|
+
parts.push("");
|
|
17250
|
+
}
|
|
17251
|
+
}
|
|
17252
|
+
return parts.join("\n");
|
|
17253
|
+
}
|
|
17254
|
+
/**
|
|
17255
|
+
* Render solutioning-phase architecture decisions as an `architecture.md` file.
|
|
17256
|
+
*
|
|
17257
|
+
* Groups all architecture decisions into a single `## Architecture Decisions`
|
|
17258
|
+
* section, formatting each as `**key**: value` with italicised rationale where
|
|
17259
|
+
* present. The heading pattern matches the regex used by `seedMethodologyContext()`
|
|
17260
|
+
* so that the exported file can be round-tripped back into the decision store.
|
|
17261
|
+
*
|
|
17262
|
+
* @param decisions - All decisions from the solutioning phase (any category)
|
|
17263
|
+
* @returns Formatted markdown content for architecture.md, or '' if no data
|
|
17264
|
+
*/
|
|
17265
|
+
function renderArchitecture(decisions) {
|
|
17266
|
+
const archDecisions = decisions.filter((d) => d.category === "architecture");
|
|
17267
|
+
if (archDecisions.length === 0) return "";
|
|
17268
|
+
const parts = ["# Architecture", ""];
|
|
17269
|
+
parts.push("## Architecture Decisions");
|
|
17270
|
+
parts.push("");
|
|
17271
|
+
for (const d of archDecisions) {
|
|
17272
|
+
const value = safeParseJson(d.value);
|
|
17273
|
+
let displayValue;
|
|
17274
|
+
if (typeof value === "object" && value !== null && !Array.isArray(value)) {
|
|
17275
|
+
displayValue = Object.entries(value).map(([k, v]) => ` - *${fieldLabel(k)}*: ${String(v)}`).join("\n");
|
|
17276
|
+
parts.push(`**${d.key}**:`);
|
|
17277
|
+
parts.push(displayValue);
|
|
17278
|
+
} else if (Array.isArray(value)) {
|
|
17279
|
+
displayValue = value.map((item) => ` - ${String(item)}`).join("\n");
|
|
17280
|
+
parts.push(`**${d.key}**:`);
|
|
17281
|
+
parts.push(displayValue);
|
|
17282
|
+
} else {
|
|
17283
|
+
displayValue = String(value);
|
|
17284
|
+
if (d.rationale) parts.push(`**${d.key}**: ${displayValue} *(${d.rationale})*`);
|
|
17285
|
+
else parts.push(`**${d.key}**: ${displayValue}`);
|
|
17286
|
+
}
|
|
17287
|
+
}
|
|
17288
|
+
parts.push("");
|
|
17289
|
+
return parts.join("\n");
|
|
17290
|
+
}
|
|
17291
|
+
/**
|
|
17292
|
+
* Render solutioning-phase epics and stories decisions as an `epics.md` file.
|
|
17293
|
+
*
|
|
17294
|
+
* Output format:
|
|
17295
|
+
* ```
|
|
17296
|
+
* ## Epic 1: Title
|
|
17297
|
+
* Description
|
|
17298
|
+
*
|
|
17299
|
+
* ### Story 1-1: Title
|
|
17300
|
+
* **Priority**: must
|
|
17301
|
+
* **Description**: ...
|
|
17302
|
+
* **Acceptance Criteria**:
|
|
17303
|
+
* - AC1
|
|
17304
|
+
* - AC2
|
|
17305
|
+
* ```
|
|
17306
|
+
*
|
|
17307
|
+
* The `## Epic N:` heading pattern is parsed by `parseEpicShards()` in
|
|
17308
|
+
* `seed-methodology-context.ts`, satisfying the round-trip contract (AC5).
|
|
17309
|
+
*
|
|
17310
|
+
* Stories are associated with their parent epic by the numeric prefix of the
|
|
17311
|
+
* story key (e.g., story key `2-3` → epic 2).
|
|
17312
|
+
*
|
|
17313
|
+
* @param decisions - All decisions from the solutioning phase (any category)
|
|
17314
|
+
* @returns Formatted markdown content for epics.md, or '' if no data
|
|
17315
|
+
*/
|
|
17316
|
+
function renderEpics(decisions) {
|
|
17317
|
+
const epicDecisions = decisions.filter((d) => d.category === "epics");
|
|
17318
|
+
const storyDecisions = decisions.filter((d) => d.category === "stories");
|
|
17319
|
+
if (epicDecisions.length === 0 && storyDecisions.length === 0) return "";
|
|
17320
|
+
const epicMap = new Map();
|
|
17321
|
+
for (const d of epicDecisions) {
|
|
17322
|
+
const match$1 = /^epic-(\d+)$/i.exec(d.key);
|
|
17323
|
+
if (match$1 === null) continue;
|
|
17324
|
+
const epicNum = parseInt(match$1[1], 10);
|
|
17325
|
+
const parsed = safeParseJson(d.value);
|
|
17326
|
+
if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
|
|
17327
|
+
const p = parsed;
|
|
17328
|
+
epicMap.set(epicNum, {
|
|
17329
|
+
num: epicNum,
|
|
17330
|
+
title: p.title ?? `Epic ${epicNum}`,
|
|
17331
|
+
description: p.description ?? ""
|
|
17332
|
+
});
|
|
17333
|
+
} else epicMap.set(epicNum, {
|
|
17334
|
+
num: epicNum,
|
|
17335
|
+
title: String(parsed),
|
|
17336
|
+
description: ""
|
|
17337
|
+
});
|
|
17338
|
+
}
|
|
17339
|
+
const storyMap = new Map();
|
|
17340
|
+
for (const d of storyDecisions) {
|
|
17341
|
+
const parsed = safeParseJson(d.value);
|
|
17342
|
+
let story;
|
|
17343
|
+
if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
|
|
17344
|
+
const p = parsed;
|
|
17345
|
+
const storyKey = p.key ?? d.key;
|
|
17346
|
+
const keyMatch = /^(\d+)-(\d+)/.exec(storyKey);
|
|
17347
|
+
if (keyMatch === null) continue;
|
|
17348
|
+
const epicNum = parseInt(keyMatch[1], 10);
|
|
17349
|
+
const storyNum = parseInt(keyMatch[2], 10);
|
|
17350
|
+
story = {
|
|
17351
|
+
key: storyKey,
|
|
17352
|
+
epicNum,
|
|
17353
|
+
storyNum,
|
|
17354
|
+
title: p.title ?? `Story ${storyKey}`,
|
|
17355
|
+
description: p.description ?? "",
|
|
17356
|
+
ac: p.acceptance_criteria ?? p.ac ?? [],
|
|
17357
|
+
priority: p.priority ?? "must"
|
|
17358
|
+
};
|
|
17359
|
+
} else {
|
|
17360
|
+
const storyKey = d.key;
|
|
17361
|
+
const keyMatch = /^(\d+)-(\d+)/.exec(storyKey);
|
|
17362
|
+
if (keyMatch === null) continue;
|
|
17363
|
+
const epicNum = parseInt(keyMatch[1], 10);
|
|
17364
|
+
const storyNum = parseInt(keyMatch[2], 10);
|
|
17365
|
+
story = {
|
|
17366
|
+
key: storyKey,
|
|
17367
|
+
epicNum,
|
|
17368
|
+
storyNum,
|
|
17369
|
+
title: `Story ${storyKey}`,
|
|
17370
|
+
description: String(parsed),
|
|
17371
|
+
ac: [],
|
|
17372
|
+
priority: "must"
|
|
17373
|
+
};
|
|
17374
|
+
}
|
|
17375
|
+
if (!storyMap.has(story.epicNum)) storyMap.set(story.epicNum, []);
|
|
17376
|
+
storyMap.get(story.epicNum).push(story);
|
|
17377
|
+
}
|
|
17378
|
+
for (const stories of storyMap.values()) stories.sort((a, b) => a.storyNum - b.storyNum);
|
|
17379
|
+
const allEpicNums = new Set([...epicMap.keys(), ...storyMap.keys()]);
|
|
17380
|
+
const sortedEpicNums = [...allEpicNums].sort((a, b) => a - b);
|
|
17381
|
+
const parts = ["# Epics and Stories", ""];
|
|
17382
|
+
for (const epicNum of sortedEpicNums) {
|
|
17383
|
+
const epic = epicMap.get(epicNum);
|
|
17384
|
+
const epicTitle = epic?.title ?? `Epic ${epicNum}`;
|
|
17385
|
+
const epicDescription = epic?.description ?? "";
|
|
17386
|
+
parts.push(`## Epic ${epicNum}: ${epicTitle}`);
|
|
17387
|
+
parts.push("");
|
|
17388
|
+
if (epicDescription) {
|
|
17389
|
+
parts.push(epicDescription);
|
|
17390
|
+
parts.push("");
|
|
17391
|
+
}
|
|
17392
|
+
const stories = storyMap.get(epicNum) ?? [];
|
|
17393
|
+
for (const story of stories) {
|
|
17394
|
+
parts.push(`### Story ${story.key}: ${story.title}`);
|
|
17395
|
+
parts.push("");
|
|
17396
|
+
parts.push(`**Priority**: ${story.priority}`);
|
|
17397
|
+
if (story.description) parts.push(`**Description**: ${story.description}`);
|
|
17398
|
+
if (story.ac.length > 0) {
|
|
17399
|
+
parts.push("**Acceptance Criteria**:");
|
|
17400
|
+
for (const ac of story.ac) parts.push(`- ${ac}`);
|
|
17401
|
+
}
|
|
17402
|
+
parts.push("");
|
|
17403
|
+
}
|
|
17404
|
+
}
|
|
17405
|
+
return parts.join("\n");
|
|
17406
|
+
}
|
|
17407
|
+
/**
|
|
17408
|
+
* Render `operational-finding` category decisions as an "Operational Findings" section.
|
|
17409
|
+
*
|
|
17410
|
+
* Groups findings by run key (for run-summary decisions) and stall key (for stall decisions).
|
|
17411
|
+
* Returns '' if no matching decisions are found.
|
|
17412
|
+
*
|
|
17413
|
+
* @param decisions - Decisions of any category; filters for 'operational-finding'
|
|
17414
|
+
* @returns Formatted markdown content, or '' if empty
|
|
17415
|
+
*/
|
|
17416
|
+
function renderOperationalFindings(decisions) {
|
|
17417
|
+
const findings = decisions.filter((d) => d.category === "operational-finding");
|
|
17418
|
+
if (findings.length === 0) return "";
|
|
17419
|
+
const parts = ["## Operational Findings", ""];
|
|
17420
|
+
const runSummaries = findings.filter((d) => d.key.startsWith("run-summary:"));
|
|
17421
|
+
const stallFindings = findings.filter((d) => d.key.startsWith("stall:"));
|
|
17422
|
+
const otherFindings = findings.filter((d) => !d.key.startsWith("run-summary:") && !d.key.startsWith("stall:"));
|
|
17423
|
+
if (runSummaries.length > 0) {
|
|
17424
|
+
parts.push("### Run Summaries");
|
|
17425
|
+
parts.push("");
|
|
17426
|
+
for (const d of runSummaries) {
|
|
17427
|
+
const runId = d.key.replace("run-summary:", "");
|
|
17428
|
+
const parsed = safeParseJson(d.value);
|
|
17429
|
+
if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
|
|
17430
|
+
const s$1 = parsed;
|
|
17431
|
+
parts.push(`**Run: ${runId}**`);
|
|
17432
|
+
parts.push(`- Succeeded: ${(s$1.succeeded ?? []).join(", ") || "none"}`);
|
|
17433
|
+
parts.push(`- Failed: ${(s$1.failed ?? []).join(", ") || "none"}`);
|
|
17434
|
+
parts.push(`- Escalated: ${(s$1.escalated ?? []).join(", ") || "none"}`);
|
|
17435
|
+
parts.push(`- Total restarts: ${s$1.total_restarts ?? 0}`);
|
|
17436
|
+
parts.push(`- Elapsed: ${s$1.elapsed_seconds ?? 0}s`);
|
|
17437
|
+
parts.push(`- Tokens: ${s$1.total_input_tokens ?? 0} in / ${s$1.total_output_tokens ?? 0} out`);
|
|
17438
|
+
} else parts.push(`**Run: ${runId}**: ${String(parsed)}`);
|
|
17439
|
+
parts.push("");
|
|
17440
|
+
}
|
|
17441
|
+
}
|
|
17442
|
+
if (stallFindings.length > 0) {
|
|
17443
|
+
parts.push("### Stall Events");
|
|
17444
|
+
parts.push("");
|
|
17445
|
+
for (const d of stallFindings) {
|
|
17446
|
+
const parsed = safeParseJson(d.value);
|
|
17447
|
+
if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
|
|
17448
|
+
const s$1 = parsed;
|
|
17449
|
+
const outcome = s$1.outcome ?? "unknown";
|
|
17450
|
+
parts.push(`- **${d.key}**: phase=${s$1.phase ?? "?"} staleness=${s$1.staleness_secs ?? 0}s attempt=${s$1.attempt ?? 0} outcome=${outcome}`);
|
|
17451
|
+
} else parts.push(`- **${d.key}**: ${String(parsed)}`);
|
|
17452
|
+
}
|
|
17453
|
+
parts.push("");
|
|
17454
|
+
}
|
|
17455
|
+
if (otherFindings.length > 0) {
|
|
17456
|
+
for (const d of otherFindings) parts.push(`- **${d.key}**: ${renderValue(d.value)}`);
|
|
17457
|
+
parts.push("");
|
|
17458
|
+
}
|
|
17459
|
+
return parts.join("\n");
|
|
17460
|
+
}
|
|
17461
|
+
/**
|
|
17462
|
+
* Render `experiment-result` category decisions as an "Experiments" section.
|
|
17463
|
+
*
|
|
17464
|
+
* Lists each experiment with its verdict, metric delta, and branch name.
|
|
17465
|
+
* Returns '' if no matching decisions are found.
|
|
17466
|
+
*
|
|
17467
|
+
* @param decisions - Decisions of any category; filters for 'experiment-result'
|
|
17468
|
+
* @returns Formatted markdown content, or '' if empty
|
|
17469
|
+
*/
|
|
17470
|
+
function renderExperiments(decisions) {
|
|
17471
|
+
const experiments = decisions.filter((d) => d.category === "experiment-result");
|
|
17472
|
+
if (experiments.length === 0) return "";
|
|
17473
|
+
const parts = ["## Experiments", ""];
|
|
17474
|
+
const improved = experiments.filter((d) => {
|
|
17475
|
+
const p = safeParseJson(d.value);
|
|
17476
|
+
return typeof p === "object" && p !== null && p["verdict"] === "IMPROVED";
|
|
17477
|
+
});
|
|
17478
|
+
const mixed = experiments.filter((d) => {
|
|
17479
|
+
const p = safeParseJson(d.value);
|
|
17480
|
+
return typeof p === "object" && p !== null && p["verdict"] === "MIXED";
|
|
17481
|
+
});
|
|
17482
|
+
const regressed = experiments.filter((d) => {
|
|
17483
|
+
const p = safeParseJson(d.value);
|
|
17484
|
+
return typeof p === "object" && p !== null && p["verdict"] === "REGRESSED";
|
|
17485
|
+
});
|
|
17486
|
+
parts.push(`**Total**: ${experiments.length} | **Improved**: ${improved.length} | **Mixed**: ${mixed.length} | **Regressed**: ${regressed.length}`);
|
|
17487
|
+
parts.push("");
|
|
17488
|
+
for (const d of experiments) {
|
|
17489
|
+
const parsed = safeParseJson(d.value);
|
|
17490
|
+
if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
|
|
17491
|
+
const e = parsed;
|
|
17492
|
+
const verdict = e.verdict ?? "UNKNOWN";
|
|
17493
|
+
const metric = e.target_metric ?? "unknown";
|
|
17494
|
+
const branch = e.branch_name ? ` → \`${e.branch_name}\`` : "";
|
|
17495
|
+
parts.push(`- **[${verdict}]** ${metric}: before=${e.before ?? "?"} after=${e.after ?? "?"}${branch}`);
|
|
17496
|
+
} else parts.push(`- ${String(parsed)}`);
|
|
17497
|
+
}
|
|
17498
|
+
parts.push("");
|
|
17499
|
+
return parts.join("\n");
|
|
17500
|
+
}
|
|
17501
|
+
/**
|
|
17502
|
+
* Render solutioning-phase readiness-findings decisions as a `readiness-report.md`.
|
|
17503
|
+
*
|
|
17504
|
+
* Groups findings by category, shows severity per finding, and emits an
|
|
17505
|
+
* overall pass/fail verdict based on whether any blockers were found.
|
|
17506
|
+
*
|
|
17507
|
+
* @param decisions - All decisions from the solutioning phase (any category)
|
|
17508
|
+
* @returns Formatted markdown content for readiness-report.md, or '' if no data
|
|
17509
|
+
*/
|
|
17510
|
+
function renderReadinessReport(decisions) {
|
|
17511
|
+
const findingDecisions = decisions.filter((d) => d.category === "readiness-findings");
|
|
17512
|
+
if (findingDecisions.length === 0) return "";
|
|
17513
|
+
const findings = [];
|
|
17514
|
+
for (const d of findingDecisions) {
|
|
17515
|
+
const parsed = safeParseJson(d.value);
|
|
17516
|
+
if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
|
|
17517
|
+
const p = parsed;
|
|
17518
|
+
findings.push({
|
|
17519
|
+
category: p.category ?? "general",
|
|
17520
|
+
severity: p.severity ?? "minor",
|
|
17521
|
+
description: p.description ?? String(parsed),
|
|
17522
|
+
affected_items: p.affected_items ?? []
|
|
17523
|
+
});
|
|
17524
|
+
} else findings.push({
|
|
17525
|
+
category: "general",
|
|
17526
|
+
severity: "minor",
|
|
17527
|
+
description: String(parsed),
|
|
17528
|
+
affected_items: []
|
|
17529
|
+
});
|
|
17530
|
+
}
|
|
17531
|
+
const hasCritical = findings.some((f$1) => f$1.severity === "blocker" || f$1.severity === "major");
|
|
17532
|
+
const verdict = hasCritical ? "FAIL" : "PASS";
|
|
17533
|
+
const parts = ["# Readiness Report", ""];
|
|
17534
|
+
parts.push(`**Overall Verdict**: ${verdict}`);
|
|
17535
|
+
parts.push("");
|
|
17536
|
+
parts.push(`**Total Findings**: ${findings.length}`);
|
|
17537
|
+
parts.push(`**Blockers**: ${findings.filter((f$1) => f$1.severity === "blocker").length}`);
|
|
17538
|
+
parts.push(`**Major**: ${findings.filter((f$1) => f$1.severity === "major").length}`);
|
|
17539
|
+
parts.push(`**Minor**: ${findings.filter((f$1) => f$1.severity === "minor").length}`);
|
|
17540
|
+
parts.push("");
|
|
17541
|
+
const byCategory = new Map();
|
|
17542
|
+
for (const finding of findings) {
|
|
17543
|
+
if (!byCategory.has(finding.category)) byCategory.set(finding.category, []);
|
|
17544
|
+
byCategory.get(finding.category).push(finding);
|
|
17545
|
+
}
|
|
17546
|
+
const categoryOrder = [
|
|
17547
|
+
"fr_coverage",
|
|
17548
|
+
"architecture_compliance",
|
|
17549
|
+
"story_quality",
|
|
17550
|
+
"ux_alignment",
|
|
17551
|
+
"dependency_validity",
|
|
17552
|
+
"general"
|
|
17553
|
+
];
|
|
17554
|
+
const sortedCategories = [...byCategory.keys()].sort((a, b) => {
|
|
17555
|
+
const ai = categoryOrder.indexOf(a);
|
|
17556
|
+
const bi = categoryOrder.indexOf(b);
|
|
17557
|
+
return (ai === -1 ? 999 : ai) - (bi === -1 ? 999 : bi);
|
|
17558
|
+
});
|
|
17559
|
+
for (const category of sortedCategories) {
|
|
17560
|
+
const categoryFindings = byCategory.get(category);
|
|
17561
|
+
const categoryLabel = fieldLabel(category);
|
|
17562
|
+
parts.push(`## ${categoryLabel}`);
|
|
17563
|
+
parts.push("");
|
|
17564
|
+
for (const finding of categoryFindings) {
|
|
17565
|
+
const severityTag = `[${finding.severity.toUpperCase()}]`;
|
|
17566
|
+
parts.push(`- ${severityTag} ${finding.description}`);
|
|
17567
|
+
if (finding.affected_items.length > 0) parts.push(` - *Affected*: ${finding.affected_items.join(", ")}`);
|
|
17568
|
+
}
|
|
17569
|
+
parts.push("");
|
|
17570
|
+
}
|
|
17571
|
+
return parts.join("\n");
|
|
17572
|
+
}
|
|
17573
|
+
|
|
17574
|
+
//#endregion
|
|
17575
|
+
//#region src/cli/commands/export.ts
|
|
17576
|
+
const logger$1 = createLogger("export-cmd");
|
|
17577
|
+
/**
|
|
17578
|
+
* Execute the export action.
|
|
17579
|
+
* Returns an exit code (0 = success, 1 = error).
|
|
17580
|
+
*/
|
|
17581
|
+
async function runExportAction(options) {
|
|
17582
|
+
const { runId, outputDir, projectRoot, outputFormat } = options;
|
|
17583
|
+
let adapter;
|
|
17584
|
+
try {
|
|
17585
|
+
const dbRoot = await resolveMainRepoRoot(projectRoot);
|
|
17586
|
+
const dbPath = join$1(dbRoot, ".substrate", "substrate.db");
|
|
17587
|
+
const doltDir = join$1(dbRoot, ".substrate", "state", ".dolt");
|
|
17588
|
+
if (!existsSync(dbPath) && !existsSync(doltDir)) {
|
|
17589
|
+
const errorMsg = `Decision store not initialized. Run 'substrate init' first.`;
|
|
17590
|
+
if (outputFormat === "json") process.stdout.write(JSON.stringify({ error: errorMsg }) + "\n");
|
|
17591
|
+
else process.stderr.write(`Error: ${errorMsg}\n`);
|
|
17592
|
+
return 1;
|
|
17593
|
+
}
|
|
17594
|
+
adapter = createDatabaseAdapter({
|
|
17595
|
+
backend: "auto",
|
|
17596
|
+
basePath: dbRoot
|
|
17597
|
+
});
|
|
17598
|
+
await initSchema(adapter);
|
|
17599
|
+
let run;
|
|
17600
|
+
if (runId !== void 0 && runId !== "") run = await getPipelineRunById(adapter, runId);
|
|
17601
|
+
else run = await getLatestRun(adapter);
|
|
17602
|
+
if (run === void 0) {
|
|
17603
|
+
const errorMsg = runId !== void 0 ? `Pipeline run '${runId}' not found.` : "No pipeline runs found. Run `substrate run` first.";
|
|
17604
|
+
if (outputFormat === "json") process.stdout.write(JSON.stringify({ error: errorMsg }) + "\n");
|
|
17605
|
+
else process.stderr.write(`Error: ${errorMsg}\n`);
|
|
17606
|
+
return 1;
|
|
17607
|
+
}
|
|
17608
|
+
const activeRunId = run.id;
|
|
17609
|
+
const resolvedOutputDir = isAbsolute(outputDir) ? outputDir : join$1(projectRoot, outputDir);
|
|
17610
|
+
if (!existsSync(resolvedOutputDir)) mkdirSync(resolvedOutputDir, { recursive: true });
|
|
17611
|
+
const filesWritten = [];
|
|
17612
|
+
const phasesExported = [];
|
|
17613
|
+
const analysisDecisions = await getDecisionsByPhaseForRun(adapter, activeRunId, "analysis");
|
|
17614
|
+
if (analysisDecisions.length > 0) {
|
|
17615
|
+
const content = renderProductBrief(analysisDecisions);
|
|
17616
|
+
if (content !== "") {
|
|
17617
|
+
const filePath = join$1(resolvedOutputDir, "product-brief.md");
|
|
17618
|
+
writeFileSync(filePath, content, "utf-8");
|
|
17619
|
+
filesWritten.push(filePath);
|
|
17620
|
+
phasesExported.push("analysis");
|
|
17621
|
+
if (outputFormat === "human") process.stdout.write(` Written: ${filePath}\n`);
|
|
17622
|
+
}
|
|
17623
|
+
}
|
|
17624
|
+
const planningDecisions = await getDecisionsByPhaseForRun(adapter, activeRunId, "planning");
|
|
17625
|
+
if (planningDecisions.length > 0) {
|
|
17626
|
+
const requirements = (await listRequirements(adapter)).filter((r) => r.pipeline_run_id === activeRunId);
|
|
17627
|
+
const content = renderPrd(planningDecisions, requirements);
|
|
17628
|
+
if (content !== "") {
|
|
17629
|
+
const filePath = join$1(resolvedOutputDir, "prd.md");
|
|
17630
|
+
writeFileSync(filePath, content, "utf-8");
|
|
17631
|
+
filesWritten.push(filePath);
|
|
17632
|
+
if (!phasesExported.includes("planning")) phasesExported.push("planning");
|
|
17633
|
+
if (outputFormat === "human") process.stdout.write(` Written: ${filePath}\n`);
|
|
17634
|
+
}
|
|
17635
|
+
}
|
|
17636
|
+
const solutioningDecisions = await getDecisionsByPhaseForRun(adapter, activeRunId, "solutioning");
|
|
17637
|
+
if (solutioningDecisions.length > 0) {
|
|
17638
|
+
const archContent = renderArchitecture(solutioningDecisions);
|
|
17639
|
+
if (archContent !== "") {
|
|
17640
|
+
const filePath = join$1(resolvedOutputDir, "architecture.md");
|
|
17641
|
+
writeFileSync(filePath, archContent, "utf-8");
|
|
17642
|
+
filesWritten.push(filePath);
|
|
17643
|
+
if (!phasesExported.includes("solutioning")) phasesExported.push("solutioning");
|
|
17644
|
+
if (outputFormat === "human") process.stdout.write(` Written: ${filePath}\n`);
|
|
17645
|
+
}
|
|
17646
|
+
const epicsContent = renderEpics(solutioningDecisions);
|
|
17647
|
+
if (epicsContent !== "") {
|
|
17648
|
+
const filePath = join$1(resolvedOutputDir, "epics.md");
|
|
17649
|
+
writeFileSync(filePath, epicsContent, "utf-8");
|
|
17650
|
+
filesWritten.push(filePath);
|
|
17651
|
+
if (!phasesExported.includes("solutioning")) phasesExported.push("solutioning");
|
|
17652
|
+
if (outputFormat === "human") process.stdout.write(` Written: ${filePath}\n`);
|
|
17653
|
+
}
|
|
17654
|
+
const readinessContent = renderReadinessReport(solutioningDecisions);
|
|
17655
|
+
if (readinessContent !== "") {
|
|
17656
|
+
const filePath = join$1(resolvedOutputDir, "readiness-report.md");
|
|
17657
|
+
writeFileSync(filePath, readinessContent, "utf-8");
|
|
17658
|
+
filesWritten.push(filePath);
|
|
17659
|
+
if (!phasesExported.includes("solutioning")) phasesExported.push("solutioning");
|
|
17660
|
+
if (outputFormat === "human") process.stdout.write(` Written: ${filePath}\n`);
|
|
17661
|
+
}
|
|
17662
|
+
}
|
|
17663
|
+
const operationalDecisions = await getDecisionsByCategory(adapter, OPERATIONAL_FINDING);
|
|
17664
|
+
if (operationalDecisions.length > 0) {
|
|
17665
|
+
const operationalContent = renderOperationalFindings(operationalDecisions);
|
|
17666
|
+
if (operationalContent !== "") {
|
|
17667
|
+
const filePath = join$1(resolvedOutputDir, "operational-findings.md");
|
|
17668
|
+
writeFileSync(filePath, operationalContent, "utf-8");
|
|
17669
|
+
filesWritten.push(filePath);
|
|
17670
|
+
if (!phasesExported.includes("operational")) phasesExported.push("operational");
|
|
17671
|
+
if (outputFormat === "human") process.stdout.write(` Written: ${filePath}\n`);
|
|
17672
|
+
}
|
|
17673
|
+
}
|
|
17674
|
+
const experimentDecisions = await getDecisionsByCategory(adapter, EXPERIMENT_RESULT);
|
|
17675
|
+
if (experimentDecisions.length > 0) {
|
|
17676
|
+
const experimentsContent = renderExperiments(experimentDecisions);
|
|
17677
|
+
if (experimentsContent !== "") {
|
|
17678
|
+
const filePath = join$1(resolvedOutputDir, "experiments.md");
|
|
17679
|
+
writeFileSync(filePath, experimentsContent, "utf-8");
|
|
17680
|
+
filesWritten.push(filePath);
|
|
17681
|
+
if (!phasesExported.includes("operational")) phasesExported.push("operational");
|
|
17682
|
+
if (outputFormat === "human") process.stdout.write(` Written: ${filePath}\n`);
|
|
17683
|
+
}
|
|
17684
|
+
}
|
|
17685
|
+
if (outputFormat === "json") {
|
|
17686
|
+
const result = {
|
|
17687
|
+
files_written: filesWritten,
|
|
17688
|
+
run_id: activeRunId,
|
|
17689
|
+
phases_exported: phasesExported
|
|
17690
|
+
};
|
|
17691
|
+
process.stdout.write(JSON.stringify(result) + "\n");
|
|
17692
|
+
} else {
|
|
17693
|
+
if (filesWritten.length === 0) process.stdout.write(`No data found for run ${activeRunId}. The pipeline may not have completed any phases.\n`);
|
|
17694
|
+
else process.stdout.write(`\nExported ${filesWritten.length} file(s) from run ${activeRunId}.\n`);
|
|
17695
|
+
const skippedPhases = [];
|
|
17696
|
+
if (!phasesExported.includes("analysis")) skippedPhases.push("analysis");
|
|
17697
|
+
if (!phasesExported.includes("planning")) skippedPhases.push("planning");
|
|
17698
|
+
if (!phasesExported.includes("solutioning")) skippedPhases.push("solutioning");
|
|
17699
|
+
if (skippedPhases.length > 0) process.stdout.write(`Phases with no data (skipped): ${skippedPhases.join(", ")}\n`);
|
|
17700
|
+
}
|
|
17701
|
+
return 0;
|
|
17702
|
+
} catch (err) {
|
|
17703
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
17704
|
+
if (outputFormat === "json") process.stdout.write(JSON.stringify({ error: msg }) + "\n");
|
|
17705
|
+
else process.stderr.write(`Error: ${msg}\n`);
|
|
17706
|
+
logger$1.error({ err }, "export action failed");
|
|
17707
|
+
return 1;
|
|
17708
|
+
} finally {
|
|
17709
|
+
if (adapter !== void 0) try {
|
|
17710
|
+
await adapter.close();
|
|
17711
|
+
} catch {}
|
|
17712
|
+
}
|
|
17713
|
+
}
|
|
17714
|
+
function registerExportCommand(program, _version = "0.0.0", projectRoot = process.cwd()) {
|
|
17715
|
+
program.command("export").description("Export decision store contents as human-readable markdown files").option("--run-id <id>", "Pipeline run ID to export (defaults to latest run)").option("--output-dir <path>", "Directory to write exported files to", "_bmad-output/planning-artifacts/").option("--project-root <path>", "Project root directory", projectRoot).option("--output-format <format>", "Output format: human (default) or json", "human").action(async (opts) => {
|
|
17716
|
+
if (opts.outputFormat !== "json" && opts.outputFormat !== "human") process.stderr.write(`Warning: unknown --output-format '${opts.outputFormat}', defaulting to 'human'\n`);
|
|
17717
|
+
const outputFormat = opts.outputFormat === "json" ? "json" : "human";
|
|
17718
|
+
const exitCode = await runExportAction({
|
|
17719
|
+
runId: opts.runId,
|
|
17720
|
+
outputDir: opts.outputDir,
|
|
17721
|
+
projectRoot: opts.projectRoot,
|
|
17722
|
+
outputFormat
|
|
17723
|
+
});
|
|
17724
|
+
process.exitCode = exitCode;
|
|
17725
|
+
});
|
|
17726
|
+
}
|
|
17727
|
+
|
|
17014
17728
|
//#endregion
|
|
17015
17729
|
//#region packages/sdlc/dist/handlers/sdlc-create-story-handler.js
|
|
17016
17730
|
/**
|
|
@@ -29548,16 +30262,16 @@ var require_ajv = __commonJS({ "node_modules/ajv/lib/ajv.js"(exports, module) {
|
|
|
29548
30262
|
return metaOpts;
|
|
29549
30263
|
}
|
|
29550
30264
|
function setLogger(self) {
|
|
29551
|
-
var logger$
|
|
29552
|
-
if (logger$
|
|
30265
|
+
var logger$22 = self._opts.logger;
|
|
30266
|
+
if (logger$22 === false) self.logger = {
|
|
29553
30267
|
log: noop,
|
|
29554
30268
|
warn: noop,
|
|
29555
30269
|
error: noop
|
|
29556
30270
|
};
|
|
29557
30271
|
else {
|
|
29558
|
-
if (logger$
|
|
29559
|
-
if (!(typeof logger$
|
|
29560
|
-
self.logger = logger$
|
|
30272
|
+
if (logger$22 === void 0) logger$22 = console;
|
|
30273
|
+
if (!(typeof logger$22 == "object" && logger$22.log && logger$22.warn && logger$22.error)) throw new Error("logger must implement log, warn and error methods");
|
|
30274
|
+
self.logger = logger$22;
|
|
29561
30275
|
}
|
|
29562
30276
|
}
|
|
29563
30277
|
function noop() {}
|
|
@@ -40186,7 +40900,8 @@ async function runRunAction(options) {
|
|
|
40186
40900
|
ts: new Date().toISOString(),
|
|
40187
40901
|
run_id: pipelineRun.id,
|
|
40188
40902
|
stories: storyKeys,
|
|
40189
|
-
concurrency
|
|
40903
|
+
concurrency,
|
|
40904
|
+
engine: resolvedEngine
|
|
40190
40905
|
});
|
|
40191
40906
|
wireNdjsonEmitter(eventBus, ndjsonEmitter);
|
|
40192
40907
|
}
|
|
@@ -40486,6 +41201,13 @@ async function runFullPipeline(options) {
|
|
|
40486
41201
|
db: adapter,
|
|
40487
41202
|
pack
|
|
40488
41203
|
});
|
|
41204
|
+
try {
|
|
41205
|
+
const staleRuns = await getRunningPipelineRuns(adapter) ?? [];
|
|
41206
|
+
if (staleRuns.length > 0) {
|
|
41207
|
+
for (const stale of staleRuns) await updatePipelineRun(adapter, stale.id, { status: "failed" });
|
|
41208
|
+
logger.info({ count: staleRuns.length }, "Swept stale pipeline run(s) from crashed orchestrator");
|
|
41209
|
+
}
|
|
41210
|
+
} catch {}
|
|
40489
41211
|
const startedAt = Date.now();
|
|
40490
41212
|
const runId = await phaseOrchestrator.startRun(concept ?? "", startPhase);
|
|
40491
41213
|
const runIdFilePath = join(dbDir, "current-run-id");
|
|
@@ -40770,6 +41492,19 @@ async function runFullPipeline(options) {
|
|
|
40770
41492
|
ts: new Date().toISOString(),
|
|
40771
41493
|
phase: currentPhase
|
|
40772
41494
|
});
|
|
41495
|
+
if ([
|
|
41496
|
+
"analysis",
|
|
41497
|
+
"planning",
|
|
41498
|
+
"solutioning"
|
|
41499
|
+
].includes(currentPhase)) try {
|
|
41500
|
+
const exportDir = join(projectRoot, "_bmad-output", "planning-artifacts");
|
|
41501
|
+
await runExportAction({
|
|
41502
|
+
runId,
|
|
41503
|
+
outputDir: exportDir,
|
|
41504
|
+
projectRoot,
|
|
41505
|
+
outputFormat: "json"
|
|
41506
|
+
});
|
|
41507
|
+
} catch {}
|
|
40773
41508
|
if (stopAfter !== void 0 && currentPhase === stopAfter) {
|
|
40774
41509
|
const gate = createStopAfterGate(stopAfter);
|
|
40775
41510
|
if (gate.shouldHalt()) {
|
|
@@ -40884,5 +41619,5 @@ function registerRunCommand(program, _version = "0.0.0", projectRoot = process.c
|
|
|
40884
41619
|
}
|
|
40885
41620
|
|
|
40886
41621
|
//#endregion
|
|
40887
|
-
export { AdapterTelemetryPersistence, AppError, DoltRepoMapMetaRepository, DoltSymbolRepository, ERR_REPO_MAP_STORAGE_WRITE, EpicIngester, GitClient, GrammarLoader, RepoMapInjector, RepoMapModule, RepoMapQueryEngine, RepoMapStorage, SymbolParser, createContextCompiler, createDispatcher, createEventEmitter, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStopAfterGate, createTelemetryAdvisor, formatPhaseCompletionSummary, getFactoryRunSummaries, getScenarioResultsForRun, getTwinRunsForRun, listGraphRuns, normalizeGraphSummaryToStatus, registerFactoryCommand, registerRunCommand, registerScenariosCommand, resolveStoryKeys, runAnalysisPhase, runPlanningPhase, runRunAction, runSolutioningPhase, validateStopAfterFromConflict };
|
|
40888
|
-
//# sourceMappingURL=run-
|
|
41622
|
+
export { AdapterTelemetryPersistence, AppError, DoltRepoMapMetaRepository, DoltSymbolRepository, ERR_REPO_MAP_STORAGE_WRITE, EpicIngester, GitClient, GrammarLoader, RepoMapInjector, RepoMapModule, RepoMapQueryEngine, RepoMapStorage, SymbolParser, createContextCompiler, createDispatcher, createEventEmitter, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStopAfterGate, createTelemetryAdvisor, formatPhaseCompletionSummary, getFactoryRunSummaries, getScenarioResultsForRun, getTwinRunsForRun, listGraphRuns, normalizeGraphSummaryToStatus, registerExportCommand, registerFactoryCommand, registerRunCommand, registerScenariosCommand, resolveStoryKeys, runAnalysisPhase, runPlanningPhase, runRunAction, runSolutioningPhase, validateStopAfterFromConflict };
|
|
41623
|
+
//# sourceMappingURL=run-Dg_BEJB6.js.map
|