substrate-ai 0.19.15 → 0.19.17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/adapter-registry-DcoerWf8.js +4 -0
- package/dist/cli/index.js +21 -15
- package/dist/{decisions-CEiirgql.js → decisions-DG-DLD3C.js} +1 -1
- package/dist/{dist-DpghaU2s.js → dist-RlHA9HPJ.js} +141 -8
- package/dist/{errors-CSmGS72Q.js → errors-C3Z7RYu9.js} +2 -2
- package/dist/{experimenter-C67kKXdl.js → experimenter-k8BhW6rB.js} +1 -1
- package/dist/{health-B96IquxJ.js → health-BfwizUAY.js} +2 -2
- package/dist/{health-BBou8ghl.js → health-D8V9_Pn7.js} +2 -2
- package/dist/index.d.ts +24 -0
- package/dist/index.js +2 -2
- package/dist/{routing-QcKaORfX.js → routing-CyuCd4j4.js} +1 -1
- package/dist/{run-DfaDV-t1.js → run-C3F7ylXg.js} +3 -3
- package/dist/{run-CgAga3yD.js → run-tWQlCR2A.js} +192 -143
- package/dist/{upgrade-CM1xHoeF.js → upgrade-2LJZtYUC.js} +2 -2
- package/dist/{upgrade-BMP2XBPm.js → upgrade-BsF4sxEE.js} +2 -2
- package/dist/{version-manager-impl-BwWSH7hR.js → version-manager-impl-tcnWjYl8.js} +1 -1
- package/package.json +1 -1
- package/packs/bmad/prompts/fix-story.md +3 -0
- package/packs/bmad/prompts/rework-story.md +3 -0
- package/dist/adapter-registry-CakXFBjt.js +0 -4
package/dist/cli/index.js
CHANGED
|
@@ -1,15 +1,15 @@
|
|
|
1
1
|
#!/usr/bin/env node
|
|
2
|
-
import { FileStateStore, SUBSTRATE_OWNED_SETTINGS_KEYS, VALID_PHASES, WorkGraphRepository, buildPipelineStatusOutput, createDatabaseAdapter, createStateStore, findPackageRoot, formatOutput, formatPipelineStatusHuman, formatPipelineSummary, formatTokenTelemetry, getAllDescendantPids, getAutoHealthData, getSubstrateDefaultSettings, parseDbTimestampAsUtc, registerHealthCommand, resolveBmadMethodSrcPath, resolveBmadMethodVersion, resolveMainRepoRoot } from "../health-
|
|
2
|
+
import { FileStateStore, SUBSTRATE_OWNED_SETTINGS_KEYS, VALID_PHASES, WorkGraphRepository, buildPipelineStatusOutput, createDatabaseAdapter, createStateStore, findPackageRoot, formatOutput, formatPipelineStatusHuman, formatPipelineSummary, formatTokenTelemetry, getAllDescendantPids, getAutoHealthData, getSubstrateDefaultSettings, parseDbTimestampAsUtc, registerHealthCommand, resolveBmadMethodSrcPath, resolveBmadMethodVersion, resolveMainRepoRoot } from "../health-D8V9_Pn7.js";
|
|
3
3
|
import { createLogger } from "../logger-KeHncl-f.js";
|
|
4
4
|
import { createEventBus } from "../helpers-CElYrONe.js";
|
|
5
|
-
import { AdapterRegistry, BudgetConfigSchema, CURRENT_CONFIG_FORMAT_VERSION, CURRENT_TASK_GRAPH_VERSION, ConfigError, CostTrackerConfigSchema, DEFAULT_CONFIG, DoltClient, DoltNotInstalled, GlobalSettingsSchema, IngestionServer, MonitorDatabaseImpl, OPERATIONAL_FINDING, PartialGlobalSettingsSchema, PartialProviderConfigSchema, ProvidersSchema, RoutingRecommender, STORY_METRICS, TelemetryConfigSchema, addTokenUsage, aggregateTokenUsageForRun, checkDoltInstalled, compareRunMetrics, createAmendmentRun, createConfigSystem, createDecision, createDoltClient, createPipelineRun, getActiveDecisions, getAllCostEntriesFiltered, getBaselineRunMetrics, getDecisionsByCategory, getDecisionsByPhaseForRun, getLatestCompletedRun, getLatestRun, getPipelineRunById, getPlanningCostTotal, getRetryableEscalations, getRunMetrics, getSessionCostSummary, getSessionCostSummaryFiltered, getStoryMetricsForRun, getTokenUsageSummary, incrementRunRestarts, initSchema, initializeDolt, listRunMetrics, loadParentRunDecisions, supersedeDecision, tagRunAsBaseline, updatePipelineRun } from "../dist-
|
|
5
|
+
import { AdapterRegistry, BudgetConfigSchema, CURRENT_CONFIG_FORMAT_VERSION, CURRENT_TASK_GRAPH_VERSION, ConfigError, CostTrackerConfigSchema, DEFAULT_CONFIG, DoltClient, DoltNotInstalled, GlobalSettingsSchema, IngestionServer, MonitorDatabaseImpl, OPERATIONAL_FINDING, PartialGlobalSettingsSchema, PartialProviderConfigSchema, ProvidersSchema, RoutingRecommender, STORY_METRICS, TelemetryConfigSchema, addTokenUsage, aggregateTokenUsageForRun, checkDoltInstalled, compareRunMetrics, createAmendmentRun, createConfigSystem, createDecision, createDoltClient, createPipelineRun, getActiveDecisions, getAllCostEntriesFiltered, getBaselineRunMetrics, getDecisionsByCategory, getDecisionsByPhaseForRun, getLatestCompletedRun, getLatestRun, getPipelineRunById, getPlanningCostTotal, getRetryableEscalations, getRunMetrics, getSessionCostSummary, getSessionCostSummaryFiltered, getStoryMetricsForRun, getTokenUsageSummary, incrementRunRestarts, initSchema, initializeDolt, listRunMetrics, loadParentRunDecisions, supersedeDecision, tagRunAsBaseline, updatePipelineRun } from "../dist-RlHA9HPJ.js";
|
|
6
6
|
import "../adapter-registry-DXLMTmfD.js";
|
|
7
|
-
import { AdapterTelemetryPersistence, AppError, DoltRepoMapMetaRepository, DoltSymbolRepository, ERR_REPO_MAP_STORAGE_WRITE, EpicIngester, GitClient, GrammarLoader, RepoMapInjector, RepoMapModule, RepoMapQueryEngine, RepoMapStorage, SymbolParser, createContextCompiler, createDispatcher, createEventEmitter, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStopAfterGate, createTelemetryAdvisor, formatPhaseCompletionSummary, getFactoryRunSummaries, getScenarioResultsForRun, getTwinRunsForRun, listGraphRuns, registerExportCommand, registerFactoryCommand, registerRunCommand, registerScenariosCommand, resolveStoryKeys, runAnalysisPhase, runPlanningPhase, runSolutioningPhase, validateStopAfterFromConflict } from "../run-
|
|
8
|
-
import "../errors-
|
|
7
|
+
import { AdapterTelemetryPersistence, AppError, DoltRepoMapMetaRepository, DoltSymbolRepository, ERR_REPO_MAP_STORAGE_WRITE, EpicIngester, GitClient, GrammarLoader, RepoMapInjector, RepoMapModule, RepoMapQueryEngine, RepoMapStorage, SymbolParser, createContextCompiler, createDispatcher, createEventEmitter, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStopAfterGate, createTelemetryAdvisor, formatPhaseCompletionSummary, getFactoryRunSummaries, getScenarioResultsForRun, getTwinRunsForRun, listGraphRuns, registerExportCommand, registerFactoryCommand, registerRunCommand, registerScenariosCommand, resolveStoryKeys, runAnalysisPhase, runPlanningPhase, runSolutioningPhase, validateStopAfterFromConflict } from "../run-tWQlCR2A.js";
|
|
8
|
+
import "../errors-C3Z7RYu9.js";
|
|
9
9
|
import "../routing-CcBOCuC9.js";
|
|
10
10
|
import "../decisions-C0pz9Clx.js";
|
|
11
11
|
import "../version-manager-impl-BmOWu8ml.js";
|
|
12
|
-
import { registerUpgradeCommand } from "../upgrade-
|
|
12
|
+
import { registerUpgradeCommand } from "../upgrade-2LJZtYUC.js";
|
|
13
13
|
import { Command } from "commander";
|
|
14
14
|
import { fileURLToPath } from "url";
|
|
15
15
|
import { dirname, join, resolve } from "path";
|
|
@@ -2811,6 +2811,9 @@ async function runFullPipelineFromPhase(options) {
|
|
|
2811
2811
|
dispatcher,
|
|
2812
2812
|
agentId
|
|
2813
2813
|
};
|
|
2814
|
+
const agentAdapter = agentId != null ? injectedRegistry.get(agentId) : void 0;
|
|
2815
|
+
const adapterDefaultCycles = agentAdapter?.getCapabilities?.()?.defaultMaxReviewCycles;
|
|
2816
|
+
const effectiveMaxReviewCycles = adapterDefaultCycles != null ? Math.max(maxReviewCycles, adapterDefaultCycles) : maxReviewCycles;
|
|
2814
2817
|
const phaseOrchestrator = createPhaseOrchestrator({
|
|
2815
2818
|
db: adapter,
|
|
2816
2819
|
pack
|
|
@@ -2916,7 +2919,7 @@ async function runFullPipelineFromPhase(options) {
|
|
|
2916
2919
|
eventBus,
|
|
2917
2920
|
config: {
|
|
2918
2921
|
maxConcurrency: concurrency,
|
|
2919
|
-
maxReviewCycles,
|
|
2922
|
+
maxReviewCycles: effectiveMaxReviewCycles,
|
|
2920
2923
|
pipelineRunId: runId,
|
|
2921
2924
|
enableHeartbeat: eventsFlag === true
|
|
2922
2925
|
},
|
|
@@ -3212,7 +3215,7 @@ async function runStatusAction(options) {
|
|
|
3212
3215
|
if (run === void 0) run = await getLatestRun(adapter);
|
|
3213
3216
|
}
|
|
3214
3217
|
if (run === void 0) {
|
|
3215
|
-
const { inspectProcessTree } = await import("../health-
|
|
3218
|
+
const { inspectProcessTree } = await import("../health-BfwizUAY.js");
|
|
3216
3219
|
const substrateDirPath = join(projectRoot, ".substrate");
|
|
3217
3220
|
const processInfo = inspectProcessTree({
|
|
3218
3221
|
projectRoot,
|
|
@@ -4099,7 +4102,7 @@ function defaultSupervisorDeps() {
|
|
|
4099
4102
|
if (cached === null) {
|
|
4100
4103
|
const { AdapterRegistry: AR } = await import(
|
|
4101
4104
|
/* @vite-ignore */
|
|
4102
|
-
"../adapter-registry-
|
|
4105
|
+
"../adapter-registry-DcoerWf8.js"
|
|
4103
4106
|
);
|
|
4104
4107
|
cached = new AR();
|
|
4105
4108
|
await cached.discoverAndRegister();
|
|
@@ -4541,11 +4544,11 @@ async function runSupervisorAction(options, deps = {}) {
|
|
|
4541
4544
|
try {
|
|
4542
4545
|
const { createExperimenter } = await import(
|
|
4543
4546
|
/* @vite-ignore */
|
|
4544
|
-
"../experimenter-
|
|
4547
|
+
"../experimenter-k8BhW6rB.js"
|
|
4545
4548
|
);
|
|
4546
4549
|
const { getLatestRun: getLatest } = await import(
|
|
4547
4550
|
/* @vite-ignore */
|
|
4548
|
-
"../decisions-
|
|
4551
|
+
"../decisions-DG-DLD3C.js"
|
|
4549
4552
|
);
|
|
4550
4553
|
const expAdapter = createDatabaseAdapter({
|
|
4551
4554
|
backend: "auto",
|
|
@@ -4555,7 +4558,7 @@ async function runSupervisorAction(options, deps = {}) {
|
|
|
4555
4558
|
await initSchema(expAdapter);
|
|
4556
4559
|
const { runRunAction: runPipeline } = await import(
|
|
4557
4560
|
/* @vite-ignore */
|
|
4558
|
-
"../run-
|
|
4561
|
+
"../run-C3F7ylXg.js"
|
|
4559
4562
|
);
|
|
4560
4563
|
const runStoryFn = async (opts) => {
|
|
4561
4564
|
const exitCode = await runPipeline({
|
|
@@ -5085,7 +5088,7 @@ async function runMetricsAction(options) {
|
|
|
5085
5088
|
const routingConfigPath = join(dbDir, "routing.yml");
|
|
5086
5089
|
let routingConfig = null;
|
|
5087
5090
|
if (existsSync$1(routingConfigPath)) try {
|
|
5088
|
-
const { loadModelRoutingConfig } = await import("../routing-
|
|
5091
|
+
const { loadModelRoutingConfig } = await import("../routing-CyuCd4j4.js");
|
|
5089
5092
|
routingConfig = loadModelRoutingConfig(routingConfigPath);
|
|
5090
5093
|
} catch {}
|
|
5091
5094
|
if (routingConfig === null) routingConfig = {
|
|
@@ -7258,6 +7261,9 @@ async function runRetryEscalatedAction(options) {
|
|
|
7258
7261
|
eventBus,
|
|
7259
7262
|
adapterRegistry: injectedRegistry
|
|
7260
7263
|
});
|
|
7264
|
+
const agentAdapter = agentId != null ? injectedRegistry.get(agentId) : void 0;
|
|
7265
|
+
const adapterDefaultCycles = agentAdapter?.getCapabilities?.()?.defaultMaxReviewCycles;
|
|
7266
|
+
const effectiveMaxReviewCycles = adapterDefaultCycles != null ? Math.max(2, adapterDefaultCycles) : 2;
|
|
7261
7267
|
const orchestrator = createImplementationOrchestrator({
|
|
7262
7268
|
db: adapter,
|
|
7263
7269
|
pack,
|
|
@@ -7266,7 +7272,7 @@ async function runRetryEscalatedAction(options) {
|
|
|
7266
7272
|
eventBus,
|
|
7267
7273
|
config: {
|
|
7268
7274
|
maxConcurrency: concurrency,
|
|
7269
|
-
maxReviewCycles:
|
|
7275
|
+
maxReviewCycles: effectiveMaxReviewCycles,
|
|
7270
7276
|
pipelineRunId: pipelineRun.id,
|
|
7271
7277
|
...Object.keys(perStoryContextCeilings).length > 0 ? { perStoryContextCeilings } : {}
|
|
7272
7278
|
},
|
|
@@ -8163,8 +8169,8 @@ async function createProgram() {
|
|
|
8163
8169
|
/** Fire-and-forget startup version check (story 8.3, AC3/AC5) */
|
|
8164
8170
|
function checkForUpdatesInBackground(currentVersion) {
|
|
8165
8171
|
if (process.env.SUBSTRATE_NO_UPDATE_CHECK === "1") return;
|
|
8166
|
-
import("../upgrade-
|
|
8167
|
-
const { createVersionManager } = await import("../version-manager-impl-
|
|
8172
|
+
import("../upgrade-BsF4sxEE.js").then(async () => {
|
|
8173
|
+
const { createVersionManager } = await import("../version-manager-impl-tcnWjYl8.js");
|
|
8168
8174
|
const vm = createVersionManager();
|
|
8169
8175
|
const result = await vm.checkForUpdates();
|
|
8170
8176
|
if (result.updateAvailable) {
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { addTokenUsage, createDecision, createPipelineRun, createRequirement, getArtifactByTypeForRun, getArtifactsByRun, getDecisionsByCategory, getDecisionsByPhase, getDecisionsByPhaseForRun, getLatestRun, getPipelineRunById, getRunningPipelineRuns, getTokenUsageSummary, listRequirements, registerArtifact, updateDecision, updatePipelineRun, updatePipelineRunConfig, upsertDecision } from "./dist-
|
|
1
|
+
import { addTokenUsage, createDecision, createPipelineRun, createRequirement, getArtifactByTypeForRun, getArtifactsByRun, getDecisionsByCategory, getDecisionsByPhase, getDecisionsByPhaseForRun, getLatestRun, getPipelineRunById, getRunningPipelineRuns, getTokenUsageSummary, listRequirements, registerArtifact, updateDecision, updatePipelineRun, updatePipelineRunConfig, upsertDecision } from "./dist-RlHA9HPJ.js";
|
|
2
2
|
import "./decisions-C0pz9Clx.js";
|
|
3
3
|
|
|
4
4
|
export { getLatestRun };
|
|
@@ -394,6 +394,116 @@ function detectInterfaceChanges(options) {
|
|
|
394
394
|
}
|
|
395
395
|
}
|
|
396
396
|
|
|
397
|
+
//#endregion
|
|
398
|
+
//#region packages/core/dist/dispatch/output-quality.js
|
|
399
|
+
/**
|
|
400
|
+
* OutputQualityEstimator — lightweight pre-schema quality signal extraction.
|
|
401
|
+
*
|
|
402
|
+
* Analyzes raw agent stdout for indicators of output quality before the YAML
|
|
403
|
+
* extraction and schema validation pipeline runs. Provides early signals for
|
|
404
|
+
* backends that lack OTLP telemetry (e.g., Codex, Gemini).
|
|
405
|
+
*
|
|
406
|
+
* Signals detected:
|
|
407
|
+
* - Hedging language ("I couldn't", "I was unable", "I'm not sure")
|
|
408
|
+
* - Completeness indicators (test results, file modification mentions)
|
|
409
|
+
* - Error/failure mentions in the narrative
|
|
410
|
+
* - Output length relative to expectations
|
|
411
|
+
*/
|
|
412
|
+
const HEDGING_PATTERNS = [
|
|
413
|
+
/I (?:couldn't|could not|can't|cannot|was unable to|am unable to)/i,
|
|
414
|
+
/I'm not sure/i,
|
|
415
|
+
/I (?:don't|do not) (?:know|understand) how/i,
|
|
416
|
+
/(?:unfortunately|regrettably),? I/i,
|
|
417
|
+
/I (?:wasn't|was not) able to/i,
|
|
418
|
+
/this is beyond/i,
|
|
419
|
+
/I need (?:more information|clarification|help)/i,
|
|
420
|
+
/I (?:skipped|omitted|left out)/i,
|
|
421
|
+
/TODO:? (?:implement|fix|add|complete)/i
|
|
422
|
+
];
|
|
423
|
+
const TEST_EXECUTION_PATTERNS = [
|
|
424
|
+
/(?:running|ran|executing|executed) (?:the )?tests/i,
|
|
425
|
+
/npm (?:run )?test/i,
|
|
426
|
+
/npx (?:vitest|jest|mocha|pytest)/i,
|
|
427
|
+
/turbo (?:run )?test/i,
|
|
428
|
+
/test suite/i
|
|
429
|
+
];
|
|
430
|
+
const TEST_PASS_PATTERNS = [
|
|
431
|
+
/tests? pass(?:ed|ing)?/i,
|
|
432
|
+
/all tests pass/i,
|
|
433
|
+
/\d+ pass(?:ed|ing)/i,
|
|
434
|
+
/test(?:s)? (?:are )?(?:all )?(?:passing|green)/i
|
|
435
|
+
];
|
|
436
|
+
const TEST_FAILURE_PATTERNS = [
|
|
437
|
+
/tests? fail(?:ed|ing|ure)?/i,
|
|
438
|
+
/\d+ fail(?:ed|ing|ure)/i,
|
|
439
|
+
/test(?:s)? (?:are )?(?:failing|red|broken)/i,
|
|
440
|
+
/FAIL\s/
|
|
441
|
+
];
|
|
442
|
+
const FILE_MODIFICATION_PATTERNS = [/(?:created|modified|updated|wrote|wrote to|editing|changed) (?:file |the file )?[`"']?[\w/.]+\.\w+/i, /writing (?:to )?[`"']?[\w/.]+\.\w+/i];
|
|
443
|
+
const ERROR_PATTERNS = [
|
|
444
|
+
/(?:error|exception|stack trace|traceback):/i,
|
|
445
|
+
/(?:TypeError|SyntaxError|ReferenceError|ImportError|ModuleNotFoundError)/,
|
|
446
|
+
/compilation (?:error|failed)/i,
|
|
447
|
+
/build (?:error|failed)/i
|
|
448
|
+
];
|
|
449
|
+
/**
|
|
450
|
+
* Analyze raw agent output for quality signals.
|
|
451
|
+
*
|
|
452
|
+
* This is intentionally lightweight — it scans for patterns in the text
|
|
453
|
+
* without parsing structure. The goal is early detection of problematic
|
|
454
|
+
* outputs (agent gave up, didn't run tests, hit errors) before the
|
|
455
|
+
* heavier YAML extraction + schema validation pipeline runs.
|
|
456
|
+
*/
|
|
457
|
+
function estimateOutputQuality(output) {
|
|
458
|
+
if (!output || output.trim() === "") return {
|
|
459
|
+
hedgingCount: 0,
|
|
460
|
+
hedgingPhrases: [],
|
|
461
|
+
mentionsTestExecution: false,
|
|
462
|
+
mentionsTestPass: false,
|
|
463
|
+
mentionsTestFailure: false,
|
|
464
|
+
fileModificationMentions: 0,
|
|
465
|
+
mentionsErrors: false,
|
|
466
|
+
outputLength: 0,
|
|
467
|
+
qualityScore: 0
|
|
468
|
+
};
|
|
469
|
+
const hedgingPhrases = [];
|
|
470
|
+
for (const pattern of HEDGING_PATTERNS) {
|
|
471
|
+
const match = output.match(pattern);
|
|
472
|
+
if (match) hedgingPhrases.push(match[0]);
|
|
473
|
+
}
|
|
474
|
+
const mentionsTestExecution = TEST_EXECUTION_PATTERNS.some((p) => p.test(output));
|
|
475
|
+
const mentionsTestPass = TEST_PASS_PATTERNS.some((p) => p.test(output));
|
|
476
|
+
const mentionsTestFailure = TEST_FAILURE_PATTERNS.some((p) => p.test(output));
|
|
477
|
+
let fileModificationMentions = 0;
|
|
478
|
+
for (const pattern of FILE_MODIFICATION_PATTERNS) {
|
|
479
|
+
const matches = output.match(new RegExp(pattern.source, "gi"));
|
|
480
|
+
if (matches) fileModificationMentions += matches.length;
|
|
481
|
+
}
|
|
482
|
+
const mentionsErrors = ERROR_PATTERNS.some((p) => p.test(output));
|
|
483
|
+
let score = 50;
|
|
484
|
+
if (mentionsTestExecution) score += 10;
|
|
485
|
+
if (mentionsTestPass) score += 15;
|
|
486
|
+
if (fileModificationMentions > 0) score += Math.min(15, fileModificationMentions * 5);
|
|
487
|
+
if (output.length > 1e3) score += 5;
|
|
488
|
+
if (output.length > 5e3) score += 5;
|
|
489
|
+
score -= hedgingPhrases.length * 10;
|
|
490
|
+
if (mentionsTestFailure) score -= 10;
|
|
491
|
+
if (mentionsErrors) score -= 10;
|
|
492
|
+
if (output.length < 200) score -= 15;
|
|
493
|
+
score = Math.max(0, Math.min(100, score));
|
|
494
|
+
return {
|
|
495
|
+
hedgingCount: hedgingPhrases.length,
|
|
496
|
+
hedgingPhrases,
|
|
497
|
+
mentionsTestExecution,
|
|
498
|
+
mentionsTestPass,
|
|
499
|
+
mentionsTestFailure,
|
|
500
|
+
fileModificationMentions,
|
|
501
|
+
mentionsErrors,
|
|
502
|
+
outputLength: output.length,
|
|
503
|
+
qualityScore: score
|
|
504
|
+
};
|
|
505
|
+
}
|
|
506
|
+
|
|
397
507
|
//#endregion
|
|
398
508
|
//#region packages/core/dist/dispatch/dispatcher-impl.js
|
|
399
509
|
const SHUTDOWN_GRACE_MS = 1e4;
|
|
@@ -709,7 +819,8 @@ var DispatcherImpl = class {
|
|
|
709
819
|
}
|
|
710
820
|
const worktreePath = workingDirectory ?? process.cwd();
|
|
711
821
|
const resolvedMaxTurns = maxTurns ?? DEFAULT_MAX_TURNS[taskType];
|
|
712
|
-
const
|
|
822
|
+
const capabilities = adapter.getCapabilities();
|
|
823
|
+
const effectivePrompt = capabilities.requiresYamlSuffix === true ? prompt + buildYamlOutputSuffix(outputSchema) : prompt;
|
|
713
824
|
const cmd = adapter.buildCommand(effectivePrompt, {
|
|
714
825
|
worktreePath,
|
|
715
826
|
billingMode: "subscription",
|
|
@@ -723,7 +834,7 @@ var DispatcherImpl = class {
|
|
|
723
834
|
dispatchId: id
|
|
724
835
|
});
|
|
725
836
|
const baseTimeoutMs = timeout ?? this._config.defaultTimeouts[taskType] ?? DEFAULT_TIMEOUTS[taskType] ?? 3e5;
|
|
726
|
-
const timeoutMultiplier =
|
|
837
|
+
const timeoutMultiplier = capabilities.timeoutMultiplier ?? 1;
|
|
727
838
|
const timeoutMs = Math.round(baseTimeoutMs * timeoutMultiplier);
|
|
728
839
|
const env = { ...process.env };
|
|
729
840
|
const parentNodeOpts = env["NODE_OPTIONS"] ?? "";
|
|
@@ -857,18 +968,28 @@ var DispatcherImpl = class {
|
|
|
857
968
|
parsed = parseResult.parsed;
|
|
858
969
|
parseError = parseResult.error;
|
|
859
970
|
} else parseError = "no_yaml_block";
|
|
971
|
+
const quality = estimateOutputQuality(stdout);
|
|
972
|
+
if (quality.hedgingCount > 0 || quality.qualityScore < 40) this._logger.warn({
|
|
973
|
+
id,
|
|
974
|
+
agent,
|
|
975
|
+
taskType,
|
|
976
|
+
qualityScore: quality.qualityScore,
|
|
977
|
+
hedging: quality.hedgingPhrases
|
|
978
|
+
}, "Low output quality detected");
|
|
860
979
|
this._eventBus.emit("agent:completed", {
|
|
861
980
|
dispatchId: id,
|
|
862
981
|
exitCode: code,
|
|
863
982
|
output: stdout,
|
|
864
983
|
inputTokens,
|
|
865
|
-
outputTokens: Math.ceil(stdout.length / CHARS_PER_TOKEN$3)
|
|
984
|
+
outputTokens: Math.ceil(stdout.length / CHARS_PER_TOKEN$3),
|
|
985
|
+
qualityScore: quality.qualityScore
|
|
866
986
|
});
|
|
867
987
|
this._logger.debug({
|
|
868
988
|
id,
|
|
869
989
|
agent,
|
|
870
990
|
taskType,
|
|
871
|
-
durationMs
|
|
991
|
+
durationMs,
|
|
992
|
+
qualityScore: quality.qualityScore
|
|
872
993
|
}, "Agent completed");
|
|
873
994
|
resolve$2({
|
|
874
995
|
id,
|
|
@@ -8790,7 +8911,11 @@ var ClaudeCodeAdapter = class {
|
|
|
8790
8911
|
"document",
|
|
8791
8912
|
"analyze"
|
|
8792
8913
|
],
|
|
8793
|
-
supportedLanguages: ["*"]
|
|
8914
|
+
supportedLanguages: ["*"],
|
|
8915
|
+
supportsSystemPrompt: true,
|
|
8916
|
+
supportsOtlpExport: true,
|
|
8917
|
+
requiresYamlSuffix: false,
|
|
8918
|
+
defaultMaxReviewCycles: 2
|
|
8794
8919
|
};
|
|
8795
8920
|
}
|
|
8796
8921
|
_detectBillingModes(versionOutput) {
|
|
@@ -9013,7 +9138,11 @@ var CodexCLIAdapter = class {
|
|
|
9013
9138
|
"analyze"
|
|
9014
9139
|
],
|
|
9015
9140
|
supportedLanguages: ["*"],
|
|
9016
|
-
timeoutMultiplier: 3
|
|
9141
|
+
timeoutMultiplier: 3,
|
|
9142
|
+
supportsSystemPrompt: false,
|
|
9143
|
+
supportsOtlpExport: false,
|
|
9144
|
+
requiresYamlSuffix: true,
|
|
9145
|
+
defaultMaxReviewCycles: 3
|
|
9017
9146
|
};
|
|
9018
9147
|
}
|
|
9019
9148
|
_buildPlanningPrompt(request) {
|
|
@@ -9253,7 +9382,11 @@ var GeminiCLIAdapter = class {
|
|
|
9253
9382
|
"document",
|
|
9254
9383
|
"analyze"
|
|
9255
9384
|
],
|
|
9256
|
-
supportedLanguages: ["*"]
|
|
9385
|
+
supportedLanguages: ["*"],
|
|
9386
|
+
supportsSystemPrompt: false,
|
|
9387
|
+
supportsOtlpExport: false,
|
|
9388
|
+
requiresYamlSuffix: true,
|
|
9389
|
+
defaultMaxReviewCycles: 3
|
|
9257
9390
|
};
|
|
9258
9391
|
}
|
|
9259
9392
|
_detectBillingModes(versionOutput) {
|
|
@@ -10305,4 +10438,4 @@ async function callLLM(params) {
|
|
|
10305
10438
|
|
|
10306
10439
|
//#endregion
|
|
10307
10440
|
export { ADVISORY_NOTES, AdapterRegistry, AdtError, BudgetConfigSchema, CURRENT_CONFIG_FORMAT_VERSION, CURRENT_TASK_GRAPH_VERSION, Categorizer, ClaudeCodeAdapter, CodexCLIAdapter, ConfigError, ConfigIncompatibleFormatError, ConsumerAnalyzer, CostTrackerConfigSchema, DEFAULT_CONFIG, DEFAULT_GLOBAL_SETTINGS, DispatcherImpl, DoltClient, DoltNotInstalled, DoltQueryError, ESCALATION_DIAGNOSIS, EXPERIMENT_RESULT, EfficiencyScorer, GeminiCLIAdapter, GlobalSettingsSchema, IngestionServer, LogTurnAnalyzer, ModelRoutingConfigSchema, MonitorDatabaseImpl, OPERATIONAL_FINDING, PartialGlobalSettingsSchema, PartialProviderConfigSchema, ProviderPolicySchema, ProvidersSchema, Recommender, RoutingConfigError, RoutingRecommender, RoutingResolver, RoutingTelemetry, RoutingTokenAccumulator, RoutingTuner, STORY_METRICS, STORY_OUTCOME, SubstrateConfigSchema, TASK_TYPE_PHASE_MAP, TEST_EXPANSION_FINDING, TEST_PLAN, TelemetryConfigSchema, TelemetryNormalizer, TelemetryPipeline, TurnAnalyzer, VersionManagerImpl, addTokenUsage, aggregateTokenUsageForRun, aggregateTokenUsageForStory, buildAuditLogEntry, buildBranchName, buildModificationDirective, buildPRBody, buildWorktreePath, callLLM, checkDoltInstalled, compareRunMetrics, createAmendmentRun, createConfigSystem, createDatabaseAdapter as createDatabaseAdapter$1, createDecision, createDoltClient, createExperimenter, createPipelineRun, createRequirement, createVersionManager, detectInterfaceChanges, determineVerdict, getActiveDecisions, getAllCostEntriesFiltered, getArtifactByTypeForRun, getArtifactsByRun, getBaselineRunMetrics, getDecisionsByCategory, getDecisionsByPhase, getDecisionsByPhaseForRun, getLatestCompletedRun, getLatestRun, getModelTier, getPipelineRunById, getPlanningCostTotal, getRetryableEscalations, getRunMetrics, getRunningPipelineRuns, getSessionCostSummary, getSessionCostSummaryFiltered, getStoryMetricsForRun, getTokenUsageSummary, incrementRunRestarts, initSchema, initializeDolt, listRequirements, listRunMetrics, loadModelRoutingConfig, loadParentRunDecisions, registerArtifact, resolvePromptFile, supersedeDecision, tagRunAsBaseline, updateDecision, updatePipelineRun, updatePipelineRunConfig, upsertDecision, writeRunMetrics, writeStoryMetrics };
|
|
10308
|
-
//# sourceMappingURL=dist-
|
|
10441
|
+
//# sourceMappingURL=dist-RlHA9HPJ.js.map
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { AdtError } from "./dist-
|
|
1
|
+
import { AdtError } from "./dist-RlHA9HPJ.js";
|
|
2
2
|
|
|
3
3
|
//#region src/core/errors.ts
|
|
4
4
|
/** Error thrown when task configuration is invalid */
|
|
@@ -71,4 +71,4 @@ var TaskGraphIncompatibleFormatError = class extends AdtError {
|
|
|
71
71
|
|
|
72
72
|
//#endregion
|
|
73
73
|
export { BudgetExceededError, GitError, RecoveryError, TaskConfigError, TaskGraphCycleError, TaskGraphError, TaskGraphIncompatibleFormatError, WorkerError, WorkerNotFoundError };
|
|
74
|
-
//# sourceMappingURL=errors-
|
|
74
|
+
//# sourceMappingURL=errors-C3Z7RYu9.js.map
|
|
@@ -1,3 +1,3 @@
|
|
|
1
|
-
import { buildAuditLogEntry, buildBranchName, buildModificationDirective, buildPRBody, buildWorktreePath, createExperimenter, determineVerdict, resolvePromptFile } from "./dist-
|
|
1
|
+
import { buildAuditLogEntry, buildBranchName, buildModificationDirective, buildPRBody, buildWorktreePath, createExperimenter, determineVerdict, resolvePromptFile } from "./dist-RlHA9HPJ.js";
|
|
2
2
|
|
|
3
3
|
export { createExperimenter };
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import { DEFAULT_STALL_THRESHOLD_SECONDS, getAllDescendantPids, getAutoHealthData, inspectProcessTree, isOrchestratorProcessLine, registerHealthCommand, runHealthAction } from "./health-
|
|
1
|
+
import { DEFAULT_STALL_THRESHOLD_SECONDS, getAllDescendantPids, getAutoHealthData, inspectProcessTree, isOrchestratorProcessLine, registerHealthCommand, runHealthAction } from "./health-D8V9_Pn7.js";
|
|
2
2
|
import "./logger-KeHncl-f.js";
|
|
3
|
-
import "./dist-
|
|
3
|
+
import "./dist-RlHA9HPJ.js";
|
|
4
4
|
import "./decisions-C0pz9Clx.js";
|
|
5
5
|
|
|
6
6
|
export { inspectProcessTree };
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { createLogger } from "./logger-KeHncl-f.js";
|
|
2
|
-
import { DoltClient, DoltQueryError, createDatabaseAdapter$1 as createDatabaseAdapter, getLatestRun, getPipelineRunById, initSchema } from "./dist-
|
|
2
|
+
import { DoltClient, DoltQueryError, createDatabaseAdapter$1 as createDatabaseAdapter, getLatestRun, getPipelineRunById, initSchema } from "./dist-RlHA9HPJ.js";
|
|
3
3
|
import { createRequire } from "module";
|
|
4
4
|
import { dirname, join } from "path";
|
|
5
5
|
import { existsSync, readFileSync } from "node:fs";
|
|
@@ -1930,4 +1930,4 @@ function registerHealthCommand(program, _version = "0.0.0", projectRoot = proces
|
|
|
1930
1930
|
|
|
1931
1931
|
//#endregion
|
|
1932
1932
|
export { BMAD_BASELINE_TOKENS_FULL, DEFAULT_STALL_THRESHOLD_SECONDS, DoltMergeConflict, FileStateStore, STOP_AFTER_VALID_PHASES, STORY_KEY_PATTERN$1 as STORY_KEY_PATTERN, SUBSTRATE_OWNED_SETTINGS_KEYS, VALID_PHASES, WorkGraphRepository, __commonJS, __require, __toESM, buildPipelineStatusOutput, createDatabaseAdapter$1 as createDatabaseAdapter, createStateStore, detectCycles, findPackageRoot, formatOutput, formatPipelineStatusHuman, formatPipelineSummary, formatTokenTelemetry, getAllDescendantPids, getAutoHealthData, getSubstrateDefaultSettings, inspectProcessTree, isOrchestratorProcessLine, parseDbTimestampAsUtc, registerHealthCommand, resolveBmadMethodSrcPath, resolveBmadMethodVersion, resolveMainRepoRoot, runHealthAction, validateStoryKey };
|
|
1933
|
-
//# sourceMappingURL=health-
|
|
1933
|
+
//# sourceMappingURL=health-D8V9_Pn7.js.map
|
package/dist/index.d.ts
CHANGED
|
@@ -847,6 +847,30 @@ interface AdapterCapabilities$1 {
|
|
|
847
847
|
* Default: 1.0 (no scaling).
|
|
848
848
|
*/
|
|
849
849
|
timeoutMultiplier?: number;
|
|
850
|
+
/**
|
|
851
|
+
* Whether the agent supports a --system-prompt flag.
|
|
852
|
+
* Claude Code: true. Codex/Gemini: false.
|
|
853
|
+
* When false, system-level instructions must be embedded in the prompt itself.
|
|
854
|
+
*/
|
|
855
|
+
supportsSystemPrompt?: boolean;
|
|
856
|
+
/**
|
|
857
|
+
* Whether the agent exports OTLP telemetry (spans, metrics, logs).
|
|
858
|
+
* Claude Code: true. Codex/Gemini: false.
|
|
859
|
+
* When false, telemetry is heuristic-only (character-based token estimates).
|
|
860
|
+
*/
|
|
861
|
+
supportsOtlpExport?: boolean;
|
|
862
|
+
/**
|
|
863
|
+
* Whether the dispatcher should append a YAML output format reminder to prompts.
|
|
864
|
+
* Claude Code: false (follows methodology pack format instructions reliably).
|
|
865
|
+
* Codex/Gemini: true (need explicit final nudge to emit fenced YAML blocks).
|
|
866
|
+
*/
|
|
867
|
+
requiresYamlSuffix?: boolean;
|
|
868
|
+
/**
|
|
869
|
+
* Default maximum review cycles for this agent backend.
|
|
870
|
+
* Claude Code: 2 (converges quickly). Codex: 3 (needs more iterations).
|
|
871
|
+
* Overridden by explicit --max-review-cycles CLI flag.
|
|
872
|
+
*/
|
|
873
|
+
defaultMaxReviewCycles?: number;
|
|
850
874
|
}
|
|
851
875
|
/**
|
|
852
876
|
* Result returned from an adapter health check.
|
package/dist/index.js
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
import { childLogger, createLogger, logger } from "./logger-KeHncl-f.js";
|
|
2
2
|
import { assertDefined, createEventBus, createTuiApp, deepClone, formatDuration, generateId, isPlainObject, isTuiCapable, printNonTtyWarning, sleep, withRetry } from "./helpers-CElYrONe.js";
|
|
3
|
-
import { AdapterRegistry, AdtError, ClaudeCodeAdapter, CodexCLIAdapter, ConfigError, ConfigIncompatibleFormatError, GeminiCLIAdapter } from "./dist-
|
|
3
|
+
import { AdapterRegistry, AdtError, ClaudeCodeAdapter, CodexCLIAdapter, ConfigError, ConfigIncompatibleFormatError, GeminiCLIAdapter } from "./dist-RlHA9HPJ.js";
|
|
4
4
|
import "./adapter-registry-DXLMTmfD.js";
|
|
5
|
-
import { BudgetExceededError, GitError, RecoveryError, TaskConfigError, TaskGraphCycleError, TaskGraphError, TaskGraphIncompatibleFormatError, WorkerError, WorkerNotFoundError } from "./errors-
|
|
5
|
+
import { BudgetExceededError, GitError, RecoveryError, TaskConfigError, TaskGraphCycleError, TaskGraphError, TaskGraphIncompatibleFormatError, WorkerError, WorkerNotFoundError } from "./errors-C3Z7RYu9.js";
|
|
6
6
|
|
|
7
7
|
//#region src/core/di.ts
|
|
8
8
|
/**
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { ModelRoutingConfigSchema, ProviderPolicySchema, RoutingConfigError, RoutingRecommender, RoutingResolver, RoutingTelemetry, RoutingTokenAccumulator, RoutingTuner, TASK_TYPE_PHASE_MAP, getModelTier, loadModelRoutingConfig } from "./dist-
|
|
1
|
+
import { ModelRoutingConfigSchema, ProviderPolicySchema, RoutingConfigError, RoutingRecommender, RoutingResolver, RoutingTelemetry, RoutingTokenAccumulator, RoutingTuner, TASK_TYPE_PHASE_MAP, getModelTier, loadModelRoutingConfig } from "./dist-RlHA9HPJ.js";
|
|
2
2
|
import "./routing-CcBOCuC9.js";
|
|
3
3
|
|
|
4
4
|
export { loadModelRoutingConfig };
|
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
import "./health-
|
|
1
|
+
import "./health-D8V9_Pn7.js";
|
|
2
2
|
import "./logger-KeHncl-f.js";
|
|
3
3
|
import "./helpers-CElYrONe.js";
|
|
4
|
-
import "./dist-
|
|
5
|
-
import { normalizeGraphSummaryToStatus, registerRunCommand, runRunAction } from "./run-
|
|
4
|
+
import "./dist-RlHA9HPJ.js";
|
|
5
|
+
import { normalizeGraphSummaryToStatus, registerRunCommand, runRunAction } from "./run-tWQlCR2A.js";
|
|
6
6
|
import "./routing-CcBOCuC9.js";
|
|
7
7
|
import "./decisions-C0pz9Clx.js";
|
|
8
8
|
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import { BMAD_BASELINE_TOKENS_FULL, DoltMergeConflict, FileStateStore, STOP_AFTER_VALID_PHASES, STORY_KEY_PATTERN, VALID_PHASES, WorkGraphRepository, __commonJS, __require, __toESM, buildPipelineStatusOutput, createDatabaseAdapter, detectCycles, formatOutput, formatPipelineSummary, formatTokenTelemetry, inspectProcessTree, parseDbTimestampAsUtc, resolveMainRepoRoot, validateStoryKey } from "./health-
|
|
1
|
+
import { BMAD_BASELINE_TOKENS_FULL, DoltMergeConflict, FileStateStore, STOP_AFTER_VALID_PHASES, STORY_KEY_PATTERN, VALID_PHASES, WorkGraphRepository, __commonJS, __require, __toESM, buildPipelineStatusOutput, createDatabaseAdapter, detectCycles, formatOutput, formatPipelineSummary, formatTokenTelemetry, inspectProcessTree, parseDbTimestampAsUtc, resolveMainRepoRoot, validateStoryKey } from "./health-D8V9_Pn7.js";
|
|
2
2
|
import { createLogger } from "./logger-KeHncl-f.js";
|
|
3
3
|
import { TypedEventBusImpl, createEventBus, createTuiApp, isTuiCapable, printNonTtyWarning, sleep } from "./helpers-CElYrONe.js";
|
|
4
|
-
import { ADVISORY_NOTES, Categorizer, ConsumerAnalyzer, DEFAULT_GLOBAL_SETTINGS, DispatcherImpl, DoltClient, ESCALATION_DIAGNOSIS, EXPERIMENT_RESULT, EfficiencyScorer, IngestionServer, LogTurnAnalyzer, OPERATIONAL_FINDING, Recommender, RoutingRecommender, RoutingResolver, RoutingTelemetry, RoutingTokenAccumulator, RoutingTuner, STORY_METRICS, STORY_OUTCOME, SubstrateConfigSchema, TEST_EXPANSION_FINDING, TEST_PLAN, TelemetryNormalizer, TelemetryPipeline, TurnAnalyzer, addTokenUsage, aggregateTokenUsageForRun, aggregateTokenUsageForStory, callLLM, createConfigSystem, createDatabaseAdapter$1, createDecision, createPipelineRun, createRequirement, detectInterfaceChanges, getArtifactByTypeForRun, getArtifactsByRun, getDecisionsByCategory, getDecisionsByPhase, getDecisionsByPhaseForRun, getLatestRun, getPipelineRunById, getRunningPipelineRuns, getStoryMetricsForRun, getTokenUsageSummary, initSchema, listRequirements, loadModelRoutingConfig, registerArtifact, updatePipelineRun, updatePipelineRunConfig, upsertDecision, writeRunMetrics, writeStoryMetrics } from "./dist-
|
|
4
|
+
import { ADVISORY_NOTES, Categorizer, ConsumerAnalyzer, DEFAULT_GLOBAL_SETTINGS, DispatcherImpl, DoltClient, ESCALATION_DIAGNOSIS, EXPERIMENT_RESULT, EfficiencyScorer, IngestionServer, LogTurnAnalyzer, OPERATIONAL_FINDING, Recommender, RoutingRecommender, RoutingResolver, RoutingTelemetry, RoutingTokenAccumulator, RoutingTuner, STORY_METRICS, STORY_OUTCOME, SubstrateConfigSchema, TEST_EXPANSION_FINDING, TEST_PLAN, TelemetryNormalizer, TelemetryPipeline, TurnAnalyzer, addTokenUsage, aggregateTokenUsageForRun, aggregateTokenUsageForStory, callLLM, createConfigSystem, createDatabaseAdapter$1, createDecision, createPipelineRun, createRequirement, detectInterfaceChanges, getArtifactByTypeForRun, getArtifactsByRun, getDecisionsByCategory, getDecisionsByPhase, getDecisionsByPhaseForRun, getLatestRun, getPipelineRunById, getRunningPipelineRuns, getStoryMetricsForRun, getTokenUsageSummary, initSchema, listRequirements, loadModelRoutingConfig, registerArtifact, updatePipelineRun, updatePipelineRunConfig, upsertDecision, writeRunMetrics, writeStoryMetrics } from "./dist-RlHA9HPJ.js";
|
|
5
5
|
import { basename, dirname, extname, join } from "path";
|
|
6
6
|
import { access, readFile, readdir, stat } from "fs/promises";
|
|
7
7
|
import { EventEmitter } from "node:events";
|
|
@@ -4876,9 +4876,105 @@ function pickRecommendation(distribution, profile, totalIssues, reviewCycles, la
|
|
|
4876
4876
|
};
|
|
4877
4877
|
}
|
|
4878
4878
|
|
|
4879
|
+
//#endregion
|
|
4880
|
+
//#region src/modules/implementation-orchestrator/project-findings.ts
|
|
4881
|
+
const logger$19 = createLogger("project-findings");
|
|
4882
|
+
/** Maximum character length for the findings summary */
|
|
4883
|
+
const MAX_CHARS = 2e3;
|
|
4884
|
+
/**
|
|
4885
|
+
* Query the decision store for prior project findings and return a formatted
|
|
4886
|
+
* markdown summary suitable for prompt injection.
|
|
4887
|
+
*
|
|
4888
|
+
* Returns an empty string if no findings exist (AC5: graceful fallback).
|
|
4889
|
+
*/
|
|
4890
|
+
async function getProjectFindings(db) {
|
|
4891
|
+
try {
|
|
4892
|
+
const outcomes = await getDecisionsByCategory(db, STORY_OUTCOME);
|
|
4893
|
+
const operational = await getDecisionsByCategory(db, OPERATIONAL_FINDING);
|
|
4894
|
+
const metrics = await getDecisionsByCategory(db, STORY_METRICS);
|
|
4895
|
+
const diagnoses = await getDecisionsByCategory(db, ESCALATION_DIAGNOSIS);
|
|
4896
|
+
const advisoryNotes = await getDecisionsByCategory(db, ADVISORY_NOTES);
|
|
4897
|
+
if (outcomes.length === 0 && operational.length === 0 && metrics.length === 0 && diagnoses.length === 0 && advisoryNotes.length === 0) return "";
|
|
4898
|
+
const sections = [];
|
|
4899
|
+
if (outcomes.length > 0) {
|
|
4900
|
+
const patterns = extractRecurringPatterns(outcomes);
|
|
4901
|
+
if (patterns.length > 0) {
|
|
4902
|
+
sections.push("**Recurring patterns from prior runs:**");
|
|
4903
|
+
for (const p of patterns) sections.push(`- ${p}`);
|
|
4904
|
+
}
|
|
4905
|
+
}
|
|
4906
|
+
if (diagnoses.length > 0) {
|
|
4907
|
+
sections.push("**Prior escalations:**");
|
|
4908
|
+
for (const d of diagnoses.slice(-3)) try {
|
|
4909
|
+
const val = JSON.parse(d.value);
|
|
4910
|
+
const storyId = (d.key ?? "").split(":")[0];
|
|
4911
|
+
sections.push(`- ${storyId}: ${val.recommendedAction} — ${val.rationale}`);
|
|
4912
|
+
if (Array.isArray(val.issues) && val.issues.length > 0) for (const issue of val.issues.slice(0, 5)) {
|
|
4913
|
+
const sev = issue.severity ? `[${issue.severity}]` : "";
|
|
4914
|
+
const file = issue.file ? ` (${issue.file})` : "";
|
|
4915
|
+
const desc = issue.description ?? "no description";
|
|
4916
|
+
sections.push(` - ${sev} ${desc}${file}`);
|
|
4917
|
+
}
|
|
4918
|
+
} catch {
|
|
4919
|
+
sections.push(`- ${d.key ?? "unknown"}: escalated`);
|
|
4920
|
+
}
|
|
4921
|
+
}
|
|
4922
|
+
const highCycleStories = metrics.filter((m) => {
|
|
4923
|
+
try {
|
|
4924
|
+
const val = JSON.parse(m.value);
|
|
4925
|
+
return val.review_cycles >= 2;
|
|
4926
|
+
} catch {
|
|
4927
|
+
return false;
|
|
4928
|
+
}
|
|
4929
|
+
}).slice(-5);
|
|
4930
|
+
if (highCycleStories.length > 0) {
|
|
4931
|
+
sections.push("**Stories with high review cycles:**");
|
|
4932
|
+
for (const m of highCycleStories) try {
|
|
4933
|
+
const val = JSON.parse(m.value);
|
|
4934
|
+
sections.push(`- ${(m.key ?? "").split(":")[0]}: ${val.review_cycles} cycles`);
|
|
4935
|
+
} catch {}
|
|
4936
|
+
}
|
|
4937
|
+
const stalls = operational.filter((o) => o.key?.startsWith("stall:"));
|
|
4938
|
+
if (stalls.length > 0) sections.push(`**Prior stalls:** ${stalls.length} stall event(s) recorded`);
|
|
4939
|
+
if (advisoryNotes.length > 0) {
|
|
4940
|
+
sections.push("**Advisory notes from prior reviews (LGTM_WITH_NOTES):**");
|
|
4941
|
+
for (const n$1 of advisoryNotes.slice(-3)) try {
|
|
4942
|
+
const val = JSON.parse(n$1.value);
|
|
4943
|
+
const storyId = (n$1.key ?? "").split(":")[0];
|
|
4944
|
+
if (typeof val.notes === "string" && val.notes.length > 0) sections.push(`- ${storyId}: ${val.notes}`);
|
|
4945
|
+
} catch {
|
|
4946
|
+
sections.push(`- ${n$1.key}: advisory notes available`);
|
|
4947
|
+
}
|
|
4948
|
+
}
|
|
4949
|
+
if (sections.length === 0) return "";
|
|
4950
|
+
let summary = sections.join("\n");
|
|
4951
|
+
if (summary.length > MAX_CHARS) summary = summary.slice(0, MAX_CHARS - 3) + "...";
|
|
4952
|
+
return summary;
|
|
4953
|
+
} catch (err) {
|
|
4954
|
+
logger$19.warn({ err }, "Failed to query project findings (graceful fallback)");
|
|
4955
|
+
return "";
|
|
4956
|
+
}
|
|
4957
|
+
}
|
|
4958
|
+
/**
|
|
4959
|
+
* Extract recurring patterns from story-outcome decisions.
|
|
4960
|
+
*
|
|
4961
|
+
* Looks for patterns that appear across multiple story outcomes
|
|
4962
|
+
* (e.g., "missing error handling" flagged in 3/5 stories).
|
|
4963
|
+
*/
|
|
4964
|
+
function extractRecurringPatterns(outcomes) {
|
|
4965
|
+
const patternCounts = new Map();
|
|
4966
|
+
for (const o of outcomes) try {
|
|
4967
|
+
const val = JSON.parse(o.value);
|
|
4968
|
+
if (Array.isArray(val.recurringPatterns)) {
|
|
4969
|
+
for (const pattern of val.recurringPatterns) if (typeof pattern === "string") patternCounts.set(pattern, (patternCounts.get(pattern) ?? 0) + 1);
|
|
4970
|
+
}
|
|
4971
|
+
} catch {}
|
|
4972
|
+
return [...patternCounts.entries()].filter(([, count]) => count >= 2).sort((a, b) => b[1] - a[1]).slice(0, 5).map(([pattern, count]) => `${pattern} (${count} occurrences)`);
|
|
4973
|
+
}
|
|
4974
|
+
|
|
4879
4975
|
//#endregion
|
|
4880
4976
|
//#region src/modules/compiled-workflows/prompt-assembler.ts
|
|
4881
|
-
const logger$
|
|
4977
|
+
const logger$18 = createLogger("compiled-workflows:prompt-assembler");
|
|
4882
4978
|
/**
|
|
4883
4979
|
* Assemble a final prompt from a template and sections map.
|
|
4884
4980
|
*
|
|
@@ -4903,7 +4999,7 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
|
|
|
4903
4999
|
tokenCount,
|
|
4904
5000
|
truncated: false
|
|
4905
5001
|
};
|
|
4906
|
-
logger$
|
|
5002
|
+
logger$18.warn({
|
|
4907
5003
|
tokenCount,
|
|
4908
5004
|
ceiling: tokenCeiling
|
|
4909
5005
|
}, "Prompt exceeds token ceiling — truncating optional sections");
|
|
@@ -4919,10 +5015,10 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
|
|
|
4919
5015
|
const targetSectionTokens = Math.max(0, currentSectionTokens - overBy);
|
|
4920
5016
|
if (targetSectionTokens === 0) {
|
|
4921
5017
|
contentMap[section.name] = "";
|
|
4922
|
-
logger$
|
|
5018
|
+
logger$18.warn({ sectionName: section.name }, "Section eliminated to fit token budget");
|
|
4923
5019
|
} else {
|
|
4924
5020
|
contentMap[section.name] = truncateToTokens(section.content, targetSectionTokens);
|
|
4925
|
-
logger$
|
|
5021
|
+
logger$18.warn({
|
|
4926
5022
|
sectionName: section.name,
|
|
4927
5023
|
targetSectionTokens
|
|
4928
5024
|
}, "Section truncated to fit token budget");
|
|
@@ -4933,7 +5029,7 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
|
|
|
4933
5029
|
}
|
|
4934
5030
|
if (tokenCount <= tokenCeiling) break;
|
|
4935
5031
|
}
|
|
4936
|
-
if (tokenCount > tokenCeiling) logger$
|
|
5032
|
+
if (tokenCount > tokenCeiling) logger$18.warn({
|
|
4937
5033
|
tokenCount,
|
|
4938
5034
|
ceiling: tokenCeiling
|
|
4939
5035
|
}, "Required sections alone exceed token ceiling — returning over-budget prompt");
|
|
@@ -5235,7 +5331,7 @@ function getTokenCeiling(workflowType, tokenCeilings) {
|
|
|
5235
5331
|
|
|
5236
5332
|
//#endregion
|
|
5237
5333
|
//#region src/modules/compiled-workflows/create-story.ts
|
|
5238
|
-
const logger$
|
|
5334
|
+
const logger$17 = createLogger("compiled-workflows:create-story");
|
|
5239
5335
|
/**
|
|
5240
5336
|
* Execute the compiled create-story workflow.
|
|
5241
5337
|
*
|
|
@@ -5255,13 +5351,13 @@ const logger$18 = createLogger("compiled-workflows:create-story");
|
|
|
5255
5351
|
*/
|
|
5256
5352
|
async function runCreateStory(deps, params) {
|
|
5257
5353
|
const { epicId, storyKey, pipelineRunId } = params;
|
|
5258
|
-
logger$
|
|
5354
|
+
logger$17.debug({
|
|
5259
5355
|
epicId,
|
|
5260
5356
|
storyKey,
|
|
5261
5357
|
pipelineRunId
|
|
5262
5358
|
}, "Starting create-story workflow");
|
|
5263
5359
|
const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("create-story", deps.tokenCeilings);
|
|
5264
|
-
logger$
|
|
5360
|
+
logger$17.info({
|
|
5265
5361
|
workflow: "create-story",
|
|
5266
5362
|
ceiling: TOKEN_CEILING,
|
|
5267
5363
|
source: tokenCeilingSource
|
|
@@ -5271,7 +5367,7 @@ async function runCreateStory(deps, params) {
|
|
|
5271
5367
|
template = await deps.pack.getPrompt("create-story");
|
|
5272
5368
|
} catch (err) {
|
|
5273
5369
|
const error = err instanceof Error ? err.message : String(err);
|
|
5274
|
-
logger$
|
|
5370
|
+
logger$17.error({ error }, "Failed to retrieve create-story prompt template");
|
|
5275
5371
|
return {
|
|
5276
5372
|
result: "failed",
|
|
5277
5373
|
error: `Failed to retrieve prompt template: ${error}`,
|
|
@@ -5290,7 +5386,7 @@ async function runCreateStory(deps, params) {
|
|
|
5290
5386
|
const storyDef = storyDecisions.find((d) => d.category === "stories" && d.key === storyKey);
|
|
5291
5387
|
if (storyDef) {
|
|
5292
5388
|
storyDefinitionContent = storyDef.value;
|
|
5293
|
-
logger$
|
|
5389
|
+
logger$17.debug({ storyKey }, "Injected story definition from solutioning decisions");
|
|
5294
5390
|
}
|
|
5295
5391
|
} catch {}
|
|
5296
5392
|
const archConstraintsContent = await getArchConstraints$3(deps);
|
|
@@ -5327,7 +5423,7 @@ async function runCreateStory(deps, params) {
|
|
|
5327
5423
|
priority: "important"
|
|
5328
5424
|
}
|
|
5329
5425
|
], TOKEN_CEILING);
|
|
5330
|
-
logger$
|
|
5426
|
+
logger$17.debug({
|
|
5331
5427
|
tokenCount,
|
|
5332
5428
|
truncated,
|
|
5333
5429
|
tokenCeiling: TOKEN_CEILING
|
|
@@ -5347,7 +5443,7 @@ async function runCreateStory(deps, params) {
|
|
|
5347
5443
|
dispatchResult = await handle.result;
|
|
5348
5444
|
} catch (err) {
|
|
5349
5445
|
const error = err instanceof Error ? err.message : String(err);
|
|
5350
|
-
logger$
|
|
5446
|
+
logger$17.error({
|
|
5351
5447
|
epicId,
|
|
5352
5448
|
storyKey,
|
|
5353
5449
|
error
|
|
@@ -5368,7 +5464,7 @@ async function runCreateStory(deps, params) {
|
|
|
5368
5464
|
if (dispatchResult.status === "failed") {
|
|
5369
5465
|
const errorMsg = dispatchResult.parseError ?? `Dispatch failed with exit code ${dispatchResult.exitCode}`;
|
|
5370
5466
|
const stderrDetail = dispatchResult.output ? ` Output: ${dispatchResult.output}` : "";
|
|
5371
|
-
logger$
|
|
5467
|
+
logger$17.warn({
|
|
5372
5468
|
epicId,
|
|
5373
5469
|
storyKey,
|
|
5374
5470
|
exitCode: dispatchResult.exitCode,
|
|
@@ -5381,7 +5477,7 @@ async function runCreateStory(deps, params) {
|
|
|
5381
5477
|
};
|
|
5382
5478
|
}
|
|
5383
5479
|
if (dispatchResult.status === "timeout") {
|
|
5384
|
-
logger$
|
|
5480
|
+
logger$17.warn({
|
|
5385
5481
|
epicId,
|
|
5386
5482
|
storyKey
|
|
5387
5483
|
}, "Create-story dispatch timed out");
|
|
@@ -5394,7 +5490,7 @@ async function runCreateStory(deps, params) {
|
|
|
5394
5490
|
if (dispatchResult.parsed === null) {
|
|
5395
5491
|
const details = dispatchResult.parseError ?? "No YAML block found in output";
|
|
5396
5492
|
const rawSnippet = dispatchResult.output ? dispatchResult.output.slice(0, 1e3) : "(empty)";
|
|
5397
|
-
logger$
|
|
5493
|
+
logger$17.warn({
|
|
5398
5494
|
epicId,
|
|
5399
5495
|
storyKey,
|
|
5400
5496
|
details,
|
|
@@ -5410,7 +5506,7 @@ async function runCreateStory(deps, params) {
|
|
|
5410
5506
|
const parseResult = CreateStoryResultSchema.safeParse(dispatchResult.parsed);
|
|
5411
5507
|
if (!parseResult.success) {
|
|
5412
5508
|
const details = parseResult.error.message;
|
|
5413
|
-
logger$
|
|
5509
|
+
logger$17.warn({
|
|
5414
5510
|
epicId,
|
|
5415
5511
|
storyKey,
|
|
5416
5512
|
details
|
|
@@ -5423,7 +5519,7 @@ async function runCreateStory(deps, params) {
|
|
|
5423
5519
|
};
|
|
5424
5520
|
}
|
|
5425
5521
|
const parsed = parseResult.data;
|
|
5426
|
-
logger$
|
|
5522
|
+
logger$17.info({
|
|
5427
5523
|
epicId,
|
|
5428
5524
|
storyKey,
|
|
5429
5525
|
storyFile: parsed.story_file,
|
|
@@ -5445,7 +5541,7 @@ async function getImplementationDecisions(deps) {
|
|
|
5445
5541
|
try {
|
|
5446
5542
|
return await getDecisionsByPhase(deps.db, "implementation");
|
|
5447
5543
|
} catch (err) {
|
|
5448
|
-
logger$
|
|
5544
|
+
logger$17.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve implementation decisions");
|
|
5449
5545
|
return [];
|
|
5450
5546
|
}
|
|
5451
5547
|
}
|
|
@@ -5493,7 +5589,7 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
|
|
|
5493
5589
|
if (storyKey) {
|
|
5494
5590
|
const perStoryShard = decisions.find((d) => d.category === "epic-shard" && d.key === storyKey);
|
|
5495
5591
|
if (perStoryShard?.value) {
|
|
5496
|
-
logger$
|
|
5592
|
+
logger$17.debug({
|
|
5497
5593
|
epicId,
|
|
5498
5594
|
storyKey
|
|
5499
5595
|
}, "Found per-story epic shard (direct lookup)");
|
|
@@ -5506,13 +5602,13 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
|
|
|
5506
5602
|
if (storyKey) {
|
|
5507
5603
|
const storySection = extractStorySection(shardContent, storyKey);
|
|
5508
5604
|
if (storySection) {
|
|
5509
|
-
logger$
|
|
5605
|
+
logger$17.debug({
|
|
5510
5606
|
epicId,
|
|
5511
5607
|
storyKey
|
|
5512
5608
|
}, "Extracted per-story section from epic shard (pre-37-0 fallback)");
|
|
5513
5609
|
return storySection;
|
|
5514
5610
|
}
|
|
5515
|
-
logger$
|
|
5611
|
+
logger$17.debug({
|
|
5516
5612
|
epicId,
|
|
5517
5613
|
storyKey
|
|
5518
5614
|
}, "No matching story section found — using full epic shard");
|
|
@@ -5522,11 +5618,11 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
|
|
|
5522
5618
|
if (projectRoot) {
|
|
5523
5619
|
const fallback = readEpicShardFromFile(projectRoot, epicId);
|
|
5524
5620
|
if (fallback) {
|
|
5525
|
-
logger$
|
|
5621
|
+
logger$17.info({ epicId }, "Using file-based fallback for epic shard (decisions table empty)");
|
|
5526
5622
|
if (storyKey) {
|
|
5527
5623
|
const storySection = extractStorySection(fallback, storyKey);
|
|
5528
5624
|
if (storySection) {
|
|
5529
|
-
logger$
|
|
5625
|
+
logger$17.debug({
|
|
5530
5626
|
epicId,
|
|
5531
5627
|
storyKey
|
|
5532
5628
|
}, "Extracted per-story section from file-based epic shard");
|
|
@@ -5538,7 +5634,7 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
|
|
|
5538
5634
|
}
|
|
5539
5635
|
return "";
|
|
5540
5636
|
} catch (err) {
|
|
5541
|
-
logger$
|
|
5637
|
+
logger$17.warn({
|
|
5542
5638
|
epicId,
|
|
5543
5639
|
error: err instanceof Error ? err.message : String(err)
|
|
5544
5640
|
}, "Failed to retrieve epic shard");
|
|
@@ -5555,7 +5651,7 @@ function getPrevDevNotes(decisions, epicId) {
|
|
|
5555
5651
|
if (devNotes.length === 0) return "";
|
|
5556
5652
|
return devNotes[devNotes.length - 1].value;
|
|
5557
5653
|
} catch (err) {
|
|
5558
|
-
logger$
|
|
5654
|
+
logger$17.warn({
|
|
5559
5655
|
epicId,
|
|
5560
5656
|
error: err instanceof Error ? err.message : String(err)
|
|
5561
5657
|
}, "Failed to retrieve prev dev notes");
|
|
@@ -5588,7 +5684,7 @@ async function getArchConstraints$3(deps) {
|
|
|
5588
5684
|
const truncatedBody = body.length > 300 ? body.slice(0, 297) + "..." : body;
|
|
5589
5685
|
return `${header}\n${truncatedBody}`;
|
|
5590
5686
|
}).join("\n\n");
|
|
5591
|
-
logger$
|
|
5687
|
+
logger$17.info({
|
|
5592
5688
|
fullLength: full.length,
|
|
5593
5689
|
summarizedLength: summarized.length,
|
|
5594
5690
|
decisions: constraints.length
|
|
@@ -5598,13 +5694,13 @@ async function getArchConstraints$3(deps) {
|
|
|
5598
5694
|
if (deps.projectRoot) {
|
|
5599
5695
|
const fallback = readArchConstraintsFromFile(deps.projectRoot);
|
|
5600
5696
|
if (fallback) {
|
|
5601
|
-
logger$
|
|
5697
|
+
logger$17.info("Using file-based fallback for architecture constraints (decisions table empty)");
|
|
5602
5698
|
return fallback.length > ARCH_CONSTRAINT_MAX_CHARS ? fallback.slice(0, ARCH_CONSTRAINT_MAX_CHARS) + "\n\n[truncated for token budget]" : fallback;
|
|
5603
5699
|
}
|
|
5604
5700
|
}
|
|
5605
5701
|
return "";
|
|
5606
5702
|
} catch (err) {
|
|
5607
|
-
logger$
|
|
5703
|
+
logger$17.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
|
|
5608
5704
|
return "";
|
|
5609
5705
|
}
|
|
5610
5706
|
}
|
|
@@ -5632,7 +5728,7 @@ function readEpicShardFromFile(projectRoot, epicId) {
|
|
|
5632
5728
|
const endIdx = endMatch ? endMatch.index : content.length;
|
|
5633
5729
|
return content.slice(startIdx, endIdx).trim();
|
|
5634
5730
|
} catch (err) {
|
|
5635
|
-
logger$
|
|
5731
|
+
logger$17.warn({
|
|
5636
5732
|
epicId,
|
|
5637
5733
|
error: err instanceof Error ? err.message : String(err)
|
|
5638
5734
|
}, "File-based epic shard fallback failed");
|
|
@@ -5655,7 +5751,7 @@ function readArchConstraintsFromFile(projectRoot) {
|
|
|
5655
5751
|
const content = readFileSync(archPath, "utf-8");
|
|
5656
5752
|
return content.slice(0, 1500);
|
|
5657
5753
|
} catch (err) {
|
|
5658
|
-
logger$
|
|
5754
|
+
logger$17.warn({ error: err instanceof Error ? err.message : String(err) }, "File-based architecture fallback failed");
|
|
5659
5755
|
return "";
|
|
5660
5756
|
}
|
|
5661
5757
|
}
|
|
@@ -5668,7 +5764,7 @@ async function getStoryTemplate(deps) {
|
|
|
5668
5764
|
try {
|
|
5669
5765
|
return await deps.pack.getTemplate("story");
|
|
5670
5766
|
} catch (err) {
|
|
5671
|
-
logger$
|
|
5767
|
+
logger$17.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve story template from pack");
|
|
5672
5768
|
return "";
|
|
5673
5769
|
}
|
|
5674
5770
|
}
|
|
@@ -5705,7 +5801,7 @@ async function isValidStoryFile(filePath) {
|
|
|
5705
5801
|
|
|
5706
5802
|
//#endregion
|
|
5707
5803
|
//#region src/modules/compiled-workflows/git-helpers.ts
|
|
5708
|
-
const logger$
|
|
5804
|
+
const logger$16 = createLogger("compiled-workflows:git-helpers");
|
|
5709
5805
|
/**
|
|
5710
5806
|
* Check whether the repo at `cwd` has at least one commit (HEAD resolves).
|
|
5711
5807
|
* Returns false for fresh repos with no commits, avoiding `fatal: bad revision 'HEAD'`.
|
|
@@ -5742,7 +5838,7 @@ function hasCommits(cwd) {
|
|
|
5742
5838
|
*/
|
|
5743
5839
|
async function getGitDiffSummary(workingDirectory = process.cwd()) {
|
|
5744
5840
|
if (!hasCommits(workingDirectory)) {
|
|
5745
|
-
logger$
|
|
5841
|
+
logger$16.debug({ cwd: workingDirectory }, "No commits in repo — returning empty diff");
|
|
5746
5842
|
return "";
|
|
5747
5843
|
}
|
|
5748
5844
|
return runGitCommand(["diff", "HEAD"], workingDirectory, "git-diff-summary");
|
|
@@ -5759,7 +5855,7 @@ async function getGitDiffSummary(workingDirectory = process.cwd()) {
|
|
|
5759
5855
|
*/
|
|
5760
5856
|
async function getGitDiffStatSummary(workingDirectory = process.cwd()) {
|
|
5761
5857
|
if (!hasCommits(workingDirectory)) {
|
|
5762
|
-
logger$
|
|
5858
|
+
logger$16.debug({ cwd: workingDirectory }, "No commits in repo — returning empty stat");
|
|
5763
5859
|
return "";
|
|
5764
5860
|
}
|
|
5765
5861
|
return runGitCommand([
|
|
@@ -5785,7 +5881,7 @@ async function getGitDiffStatSummary(workingDirectory = process.cwd()) {
|
|
|
5785
5881
|
async function getGitDiffForFiles(files, workingDirectory = process.cwd()) {
|
|
5786
5882
|
if (files.length === 0) return "";
|
|
5787
5883
|
if (!hasCommits(workingDirectory)) {
|
|
5788
|
-
logger$
|
|
5884
|
+
logger$16.debug({ cwd: workingDirectory }, "No commits in repo — returning empty diff for files");
|
|
5789
5885
|
return "";
|
|
5790
5886
|
}
|
|
5791
5887
|
await stageIntentToAdd(files, workingDirectory);
|
|
@@ -5812,7 +5908,7 @@ async function getGitDiffForFiles(files, workingDirectory = process.cwd()) {
|
|
|
5812
5908
|
async function getGitDiffStatForFiles(files, workingDirectory = process.cwd()) {
|
|
5813
5909
|
if (files.length === 0) return "";
|
|
5814
5910
|
if (!hasCommits(workingDirectory)) {
|
|
5815
|
-
logger$
|
|
5911
|
+
logger$16.debug({ cwd: workingDirectory }, "No commits in repo — returning empty stat for files");
|
|
5816
5912
|
return "";
|
|
5817
5913
|
}
|
|
5818
5914
|
return runGitCommand([
|
|
@@ -5861,7 +5957,7 @@ async function stageIntentToAdd(files, workingDirectory) {
|
|
|
5861
5957
|
if (files.length === 0) return;
|
|
5862
5958
|
const existing = files.filter((f$1) => {
|
|
5863
5959
|
const exists = existsSync(f$1);
|
|
5864
|
-
if (!exists) logger$
|
|
5960
|
+
if (!exists) logger$16.debug({ file: f$1 }, "Skipping nonexistent file in stageIntentToAdd");
|
|
5865
5961
|
return exists;
|
|
5866
5962
|
});
|
|
5867
5963
|
if (existing.length === 0) return;
|
|
@@ -5895,7 +5991,7 @@ async function runGitCommand(args, cwd, logLabel) {
|
|
|
5895
5991
|
stderr += chunk.toString("utf-8");
|
|
5896
5992
|
});
|
|
5897
5993
|
proc$1.on("error", (err) => {
|
|
5898
|
-
logger$
|
|
5994
|
+
logger$16.warn({
|
|
5899
5995
|
label: logLabel,
|
|
5900
5996
|
cwd,
|
|
5901
5997
|
error: err.message
|
|
@@ -5904,7 +6000,7 @@ async function runGitCommand(args, cwd, logLabel) {
|
|
|
5904
6000
|
});
|
|
5905
6001
|
proc$1.on("close", (code) => {
|
|
5906
6002
|
if (code !== 0) {
|
|
5907
|
-
logger$
|
|
6003
|
+
logger$16.warn({
|
|
5908
6004
|
label: logLabel,
|
|
5909
6005
|
cwd,
|
|
5910
6006
|
code,
|
|
@@ -5918,102 +6014,6 @@ async function runGitCommand(args, cwd, logLabel) {
|
|
|
5918
6014
|
});
|
|
5919
6015
|
}
|
|
5920
6016
|
|
|
5921
|
-
//#endregion
|
|
5922
|
-
//#region src/modules/implementation-orchestrator/project-findings.ts
|
|
5923
|
-
const logger$16 = createLogger("project-findings");
|
|
5924
|
-
/** Maximum character length for the findings summary */
|
|
5925
|
-
const MAX_CHARS = 2e3;
|
|
5926
|
-
/**
|
|
5927
|
-
* Query the decision store for prior project findings and return a formatted
|
|
5928
|
-
* markdown summary suitable for prompt injection.
|
|
5929
|
-
*
|
|
5930
|
-
* Returns an empty string if no findings exist (AC5: graceful fallback).
|
|
5931
|
-
*/
|
|
5932
|
-
async function getProjectFindings(db) {
|
|
5933
|
-
try {
|
|
5934
|
-
const outcomes = await getDecisionsByCategory(db, STORY_OUTCOME);
|
|
5935
|
-
const operational = await getDecisionsByCategory(db, OPERATIONAL_FINDING);
|
|
5936
|
-
const metrics = await getDecisionsByCategory(db, STORY_METRICS);
|
|
5937
|
-
const diagnoses = await getDecisionsByCategory(db, ESCALATION_DIAGNOSIS);
|
|
5938
|
-
const advisoryNotes = await getDecisionsByCategory(db, ADVISORY_NOTES);
|
|
5939
|
-
if (outcomes.length === 0 && operational.length === 0 && metrics.length === 0 && diagnoses.length === 0 && advisoryNotes.length === 0) return "";
|
|
5940
|
-
const sections = [];
|
|
5941
|
-
if (outcomes.length > 0) {
|
|
5942
|
-
const patterns = extractRecurringPatterns(outcomes);
|
|
5943
|
-
if (patterns.length > 0) {
|
|
5944
|
-
sections.push("**Recurring patterns from prior runs:**");
|
|
5945
|
-
for (const p of patterns) sections.push(`- ${p}`);
|
|
5946
|
-
}
|
|
5947
|
-
}
|
|
5948
|
-
if (diagnoses.length > 0) {
|
|
5949
|
-
sections.push("**Prior escalations:**");
|
|
5950
|
-
for (const d of diagnoses.slice(-3)) try {
|
|
5951
|
-
const val = JSON.parse(d.value);
|
|
5952
|
-
const storyId = (d.key ?? "").split(":")[0];
|
|
5953
|
-
sections.push(`- ${storyId}: ${val.recommendedAction} — ${val.rationale}`);
|
|
5954
|
-
if (Array.isArray(val.issues) && val.issues.length > 0) for (const issue of val.issues.slice(0, 5)) {
|
|
5955
|
-
const sev = issue.severity ? `[${issue.severity}]` : "";
|
|
5956
|
-
const file = issue.file ? ` (${issue.file})` : "";
|
|
5957
|
-
const desc = issue.description ?? "no description";
|
|
5958
|
-
sections.push(` - ${sev} ${desc}${file}`);
|
|
5959
|
-
}
|
|
5960
|
-
} catch {
|
|
5961
|
-
sections.push(`- ${d.key ?? "unknown"}: escalated`);
|
|
5962
|
-
}
|
|
5963
|
-
}
|
|
5964
|
-
const highCycleStories = metrics.filter((m) => {
|
|
5965
|
-
try {
|
|
5966
|
-
const val = JSON.parse(m.value);
|
|
5967
|
-
return val.review_cycles >= 2;
|
|
5968
|
-
} catch {
|
|
5969
|
-
return false;
|
|
5970
|
-
}
|
|
5971
|
-
}).slice(-5);
|
|
5972
|
-
if (highCycleStories.length > 0) {
|
|
5973
|
-
sections.push("**Stories with high review cycles:**");
|
|
5974
|
-
for (const m of highCycleStories) try {
|
|
5975
|
-
const val = JSON.parse(m.value);
|
|
5976
|
-
sections.push(`- ${(m.key ?? "").split(":")[0]}: ${val.review_cycles} cycles`);
|
|
5977
|
-
} catch {}
|
|
5978
|
-
}
|
|
5979
|
-
const stalls = operational.filter((o) => o.key?.startsWith("stall:"));
|
|
5980
|
-
if (stalls.length > 0) sections.push(`**Prior stalls:** ${stalls.length} stall event(s) recorded`);
|
|
5981
|
-
if (advisoryNotes.length > 0) {
|
|
5982
|
-
sections.push("**Advisory notes from prior reviews (LGTM_WITH_NOTES):**");
|
|
5983
|
-
for (const n$1 of advisoryNotes.slice(-3)) try {
|
|
5984
|
-
const val = JSON.parse(n$1.value);
|
|
5985
|
-
const storyId = (n$1.key ?? "").split(":")[0];
|
|
5986
|
-
if (typeof val.notes === "string" && val.notes.length > 0) sections.push(`- ${storyId}: ${val.notes}`);
|
|
5987
|
-
} catch {
|
|
5988
|
-
sections.push(`- ${n$1.key}: advisory notes available`);
|
|
5989
|
-
}
|
|
5990
|
-
}
|
|
5991
|
-
if (sections.length === 0) return "";
|
|
5992
|
-
let summary = sections.join("\n");
|
|
5993
|
-
if (summary.length > MAX_CHARS) summary = summary.slice(0, MAX_CHARS - 3) + "...";
|
|
5994
|
-
return summary;
|
|
5995
|
-
} catch (err) {
|
|
5996
|
-
logger$16.warn({ err }, "Failed to query project findings (graceful fallback)");
|
|
5997
|
-
return "";
|
|
5998
|
-
}
|
|
5999
|
-
}
|
|
6000
|
-
/**
|
|
6001
|
-
* Extract recurring patterns from story-outcome decisions.
|
|
6002
|
-
*
|
|
6003
|
-
* Looks for patterns that appear across multiple story outcomes
|
|
6004
|
-
* (e.g., "missing error handling" flagged in 3/5 stories).
|
|
6005
|
-
*/
|
|
6006
|
-
function extractRecurringPatterns(outcomes) {
|
|
6007
|
-
const patternCounts = new Map();
|
|
6008
|
-
for (const o of outcomes) try {
|
|
6009
|
-
const val = JSON.parse(o.value);
|
|
6010
|
-
if (Array.isArray(val.recurringPatterns)) {
|
|
6011
|
-
for (const pattern of val.recurringPatterns) if (typeof pattern === "string") patternCounts.set(pattern, (patternCounts.get(pattern) ?? 0) + 1);
|
|
6012
|
-
}
|
|
6013
|
-
} catch {}
|
|
6014
|
-
return [...patternCounts.entries()].filter(([, count]) => count >= 2).sort((a, b) => b[1] - a[1]).slice(0, 5).map(([pattern, count]) => `${pattern} (${count} occurrences)`);
|
|
6015
|
-
}
|
|
6016
|
-
|
|
6017
6017
|
//#endregion
|
|
6018
6018
|
//#region src/modules/compiled-workflows/story-complexity.ts
|
|
6019
6019
|
const logger$15 = createLogger("compiled-workflows:story-complexity");
|
|
@@ -10684,6 +10684,28 @@ function createImplementationOrchestrator(deps) {
|
|
|
10684
10684
|
phaseBreakdown[phase] = endMs !== void 0 ? endMs - startMs : nowMs - startMs;
|
|
10685
10685
|
}
|
|
10686
10686
|
}
|
|
10687
|
+
let diffStats;
|
|
10688
|
+
try {
|
|
10689
|
+
const statOutput = execSync("git diff --stat HEAD", {
|
|
10690
|
+
cwd: projectRoot ?? process.cwd(),
|
|
10691
|
+
encoding: "utf-8",
|
|
10692
|
+
timeout: 5e3,
|
|
10693
|
+
stdio: [
|
|
10694
|
+
"ignore",
|
|
10695
|
+
"pipe",
|
|
10696
|
+
"pipe"
|
|
10697
|
+
]
|
|
10698
|
+
});
|
|
10699
|
+
const summaryLine = statOutput.trim().split("\n").pop() ?? "";
|
|
10700
|
+
const filesMatch = summaryLine.match(/(\d+)\s+files?\s+changed/);
|
|
10701
|
+
const insMatch = summaryLine.match(/(\d+)\s+insertions?/);
|
|
10702
|
+
const delMatch = summaryLine.match(/(\d+)\s+deletions?/);
|
|
10703
|
+
diffStats = {
|
|
10704
|
+
filesChanged: filesMatch ? parseInt(filesMatch[1], 10) : 0,
|
|
10705
|
+
insertions: insMatch ? parseInt(insMatch[1], 10) : 0,
|
|
10706
|
+
deletions: delMatch ? parseInt(delMatch[1], 10) : 0
|
|
10707
|
+
};
|
|
10708
|
+
} catch {}
|
|
10687
10709
|
eventBus.emit("story:metrics", {
|
|
10688
10710
|
storyKey,
|
|
10689
10711
|
wallClockMs,
|
|
@@ -10693,7 +10715,8 @@ function createImplementationOrchestrator(deps) {
|
|
|
10693
10715
|
output: tokenAgg.output
|
|
10694
10716
|
},
|
|
10695
10717
|
reviewCycles,
|
|
10696
|
-
dispatches: _storyDispatches.get(storyKey) ?? 0
|
|
10718
|
+
dispatches: _storyDispatches.get(storyKey) ?? 0,
|
|
10719
|
+
...diffStats !== void 0 ? { diffStats } : {}
|
|
10697
10720
|
});
|
|
10698
10721
|
} catch (emitErr) {
|
|
10699
10722
|
logger$22.warn({
|
|
@@ -12519,6 +12542,11 @@ function createImplementationOrchestrator(deps) {
|
|
|
12519
12542
|
]
|
|
12520
12543
|
}).trim();
|
|
12521
12544
|
} catch {}
|
|
12545
|
+
let priorFindingsContent = "";
|
|
12546
|
+
try {
|
|
12547
|
+
const findings = await getProjectFindings(db);
|
|
12548
|
+
if (findings !== "") priorFindingsContent = "Prior pipeline findings — avoid repeating these patterns:\n\n" + findings;
|
|
12549
|
+
} catch {}
|
|
12522
12550
|
const sections = isMajorRework ? [
|
|
12523
12551
|
{
|
|
12524
12552
|
name: "story_content",
|
|
@@ -12539,6 +12567,11 @@ function createImplementationOrchestrator(deps) {
|
|
|
12539
12567
|
name: "git_diff",
|
|
12540
12568
|
content: gitDiffContent,
|
|
12541
12569
|
priority: "optional"
|
|
12570
|
+
},
|
|
12571
|
+
{
|
|
12572
|
+
name: "prior_findings",
|
|
12573
|
+
content: priorFindingsContent,
|
|
12574
|
+
priority: "optional"
|
|
12542
12575
|
}
|
|
12543
12576
|
] : (() => {
|
|
12544
12577
|
const targetedFilesContent = buildTargetedFilesContent(issueList);
|
|
@@ -12562,7 +12595,12 @@ function createImplementationOrchestrator(deps) {
|
|
|
12562
12595
|
name: "targeted_files",
|
|
12563
12596
|
content: targetedFilesContent,
|
|
12564
12597
|
priority: "important"
|
|
12565
|
-
}] : []
|
|
12598
|
+
}] : [],
|
|
12599
|
+
{
|
|
12600
|
+
name: "prior_findings",
|
|
12601
|
+
content: priorFindingsContent,
|
|
12602
|
+
priority: "optional"
|
|
12603
|
+
}
|
|
12566
12604
|
];
|
|
12567
12605
|
})();
|
|
12568
12606
|
const assembled = assemblePrompt(fixTemplate, sections, 24e3);
|
|
@@ -40184,6 +40222,16 @@ function buildSdlcHandlerRegistry(deps) {
|
|
|
40184
40222
|
//#endregion
|
|
40185
40223
|
//#region src/cli/commands/run.ts
|
|
40186
40224
|
const logger = createLogger("run-cmd");
|
|
40225
|
+
/**
|
|
40226
|
+
* Resolve effective max review cycles by checking the adapter's default.
|
|
40227
|
+
* Uses Math.max so the adapter's recommendation is a floor, not an override.
|
|
40228
|
+
*/
|
|
40229
|
+
function resolveMaxReviewCycles(cliValue, agentId, registry) {
|
|
40230
|
+
if (agentId == null || registry == null) return cliValue;
|
|
40231
|
+
const adapter = registry.get(agentId);
|
|
40232
|
+
const adapterDefault = adapter?.getCapabilities?.()?.defaultMaxReviewCycles;
|
|
40233
|
+
return adapterDefault != null ? Math.max(cliValue, adapterDefault) : cliValue;
|
|
40234
|
+
}
|
|
40187
40235
|
const VALID_ENGINES = ["linear", "graph"];
|
|
40188
40236
|
/**
|
|
40189
40237
|
* Normalizes a `GraphRunSummary` (from GraphOrchestrator.run()) into the same
|
|
@@ -40408,6 +40456,7 @@ async function runRunAction(options) {
|
|
|
40408
40456
|
else process.stderr.write(`Error: ${errorMsg}\n`);
|
|
40409
40457
|
return 1;
|
|
40410
40458
|
}
|
|
40459
|
+
const effectiveMaxReviewCycles = resolveMaxReviewCycles(maxReviewCycles, agentId, injectedRegistry);
|
|
40411
40460
|
if (startPhase !== void 0 && !VALID_PHASES.includes(startPhase)) {
|
|
40412
40461
|
const errorMsg = `Invalid phase '${startPhase}'. Valid phases: ${VALID_PHASES.join(", ")}`;
|
|
40413
40462
|
if (outputFormat === "json") process.stdout.write(formatOutput(null, "json", false, errorMsg) + "\n");
|
|
@@ -40564,7 +40613,7 @@ async function runRunAction(options) {
|
|
|
40564
40613
|
telemetryEnabled: true,
|
|
40565
40614
|
telemetryPort
|
|
40566
40615
|
} : {},
|
|
40567
|
-
maxReviewCycles,
|
|
40616
|
+
maxReviewCycles: effectiveMaxReviewCycles,
|
|
40568
40617
|
agentId
|
|
40569
40618
|
});
|
|
40570
40619
|
let storyKeys = [...parsedStoryKeys];
|
|
@@ -41051,7 +41100,7 @@ async function runRunAction(options) {
|
|
|
41051
41100
|
runId: pipelineRun.id,
|
|
41052
41101
|
eventBus,
|
|
41053
41102
|
pipelineRunId: pipelineRun.id,
|
|
41054
|
-
maxReviewCycles,
|
|
41103
|
+
maxReviewCycles: effectiveMaxReviewCycles,
|
|
41055
41104
|
gcPauseMs: 0
|
|
41056
41105
|
});
|
|
41057
41106
|
if (outputFormat === "human" && progressRenderer === void 0 && ndjsonEmitter === void 0) {
|
|
@@ -41070,7 +41119,7 @@ async function runRunAction(options) {
|
|
|
41070
41119
|
eventBus,
|
|
41071
41120
|
config: {
|
|
41072
41121
|
maxConcurrency: concurrency,
|
|
41073
|
-
maxReviewCycles,
|
|
41122
|
+
maxReviewCycles: effectiveMaxReviewCycles,
|
|
41074
41123
|
pipelineRunId: pipelineRun.id,
|
|
41075
41124
|
enableHeartbeat: eventsFlag === true,
|
|
41076
41125
|
skipPreflight: skipPreflight === true
|
|
@@ -41667,4 +41716,4 @@ function registerRunCommand(program, _version = "0.0.0", projectRoot = process.c
|
|
|
41667
41716
|
|
|
41668
41717
|
//#endregion
|
|
41669
41718
|
export { AdapterTelemetryPersistence, AppError, DoltRepoMapMetaRepository, DoltSymbolRepository, ERR_REPO_MAP_STORAGE_WRITE, EpicIngester, GitClient, GrammarLoader, RepoMapInjector, RepoMapModule, RepoMapQueryEngine, RepoMapStorage, SymbolParser, createContextCompiler, createDispatcher, createEventEmitter, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStopAfterGate, createTelemetryAdvisor, formatPhaseCompletionSummary, getFactoryRunSummaries, getScenarioResultsForRun, getTwinRunsForRun, listGraphRuns, normalizeGraphSummaryToStatus, registerExportCommand, registerFactoryCommand, registerRunCommand, registerScenariosCommand, resolveStoryKeys, runAnalysisPhase, runPlanningPhase, runRunAction, runSolutioningPhase, validateStopAfterFromConflict };
|
|
41670
|
-
//# sourceMappingURL=run-
|
|
41719
|
+
//# sourceMappingURL=run-tWQlCR2A.js.map
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { createVersionManager } from "./dist-
|
|
1
|
+
import { createVersionManager } from "./dist-RlHA9HPJ.js";
|
|
2
2
|
import { execSync, spawn } from "child_process";
|
|
3
3
|
import * as readline from "readline";
|
|
4
4
|
|
|
@@ -123,4 +123,4 @@ function registerUpgradeCommand(program) {
|
|
|
123
123
|
|
|
124
124
|
//#endregion
|
|
125
125
|
export { isGlobalInstall, registerUpgradeCommand, runUpgradeCommand };
|
|
126
|
-
//# sourceMappingURL=upgrade-
|
|
126
|
+
//# sourceMappingURL=upgrade-2LJZtYUC.js.map
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import "./dist-
|
|
1
|
+
import "./dist-RlHA9HPJ.js";
|
|
2
2
|
import "./version-manager-impl-BmOWu8ml.js";
|
|
3
|
-
import { isGlobalInstall, registerUpgradeCommand, runUpgradeCommand } from "./upgrade-
|
|
3
|
+
import { isGlobalInstall, registerUpgradeCommand, runUpgradeCommand } from "./upgrade-2LJZtYUC.js";
|
|
4
4
|
|
|
5
5
|
export { isGlobalInstall, registerUpgradeCommand, runUpgradeCommand };
|
package/package.json
CHANGED