substrate-ai 0.3.0 → 0.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{adapter-registry-PsWhP_1Q.js → adapter-registry-DHl0W-YB.js} +8 -1
- package/dist/cli/index.js +254 -24
- package/dist/{config-migrator-DSi8KhQC.js → config-migrator-CQmBdKeG.js} +9 -3
- package/dist/index.d.ts +16 -0
- package/dist/index.js +1 -1
- package/dist/{run-DP932Mmn.js → run-BJ5z_b2J.js} +2 -2
- package/dist/{run-DO9n3cwy.js → run-Fzhz3-mv.js} +1535 -277
- package/dist/{upgrade-Cvwtnwl4.js → upgrade-DO307rFf.js} +2 -2
- package/dist/{upgrade-CImByfkk.js → upgrade-Ex1ukwsm.js} +3 -3
- package/dist/{version-manager-impl-CizNmmLT.js → version-manager-impl-33JYXsqa.js} +2 -2
- package/dist/version-manager-impl-Dk3S31y6.js +4 -0
- package/package.json +1 -1
- package/dist/version-manager-impl-aL5IemIm.js +0 -4
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { createLogger, deepMask } from "./logger-D2fS2ccL.js";
|
|
2
|
-
import { CURRENT_CONFIG_FORMAT_VERSION, PartialSubstrateConfigSchema, SUPPORTED_CONFIG_FORMAT_VERSIONS, SubstrateConfigSchema, defaultConfigMigrator } from "./config-migrator-
|
|
2
|
+
import { CURRENT_CONFIG_FORMAT_VERSION, PartialSubstrateConfigSchema, SUPPORTED_CONFIG_FORMAT_VERSIONS, SubstrateConfigSchema, defaultConfigMigrator } from "./config-migrator-CQmBdKeG.js";
|
|
3
3
|
import { ConfigError, ConfigIncompatibleFormatError, createEventBus, createTuiApp, isTuiCapable, printNonTtyWarning, sleep } from "./helpers-RL22dYtn.js";
|
|
4
4
|
import { addTokenUsage, createDecision, createPipelineRun, createRequirement, getArtifactByTypeForRun, getArtifactsByRun, getDecisionsByCategory, getDecisionsByPhase, getDecisionsByPhaseForRun, getLatestRun, getPipelineRunById, getRunningPipelineRuns, getTokenUsageSummary, registerArtifact, updatePipelineRun, updatePipelineRunConfig, upsertDecision } from "./decisions-Dq4cAA2L.js";
|
|
5
5
|
import { ADVISORY_NOTES, ESCALATION_DIAGNOSIS, OPERATIONAL_FINDING, STORY_METRICS, STORY_OUTCOME, TEST_EXPANSION_FINDING, TEST_PLAN, aggregateTokenUsageForRun, aggregateTokenUsageForStory, getStoryMetricsForRun, writeRunMetrics, writeStoryMetrics } from "./operational-Bovj4fS-.js";
|
|
@@ -19,6 +19,7 @@ import { existsSync as existsSync$1, readFileSync as readFileSync$1, readdirSync
|
|
|
19
19
|
import { homedir } from "os";
|
|
20
20
|
import { freemem, platform } from "node:os";
|
|
21
21
|
import { createHash, randomUUID } from "node:crypto";
|
|
22
|
+
import { createServer } from "node:http";
|
|
22
23
|
|
|
23
24
|
//#region rolldown:runtime
|
|
24
25
|
var __require = /* @__PURE__ */ createRequire(import.meta.url);
|
|
@@ -605,7 +606,7 @@ const migration010RunMetrics = {
|
|
|
605
606
|
|
|
606
607
|
//#endregion
|
|
607
608
|
//#region src/persistence/migrations/index.ts
|
|
608
|
-
const logger$
|
|
609
|
+
const logger$25 = createLogger("persistence:migrations");
|
|
609
610
|
const MIGRATIONS = [
|
|
610
611
|
initialSchemaMigration,
|
|
611
612
|
costTrackerSchemaMigration,
|
|
@@ -623,7 +624,7 @@ const MIGRATIONS = [
|
|
|
623
624
|
* Safe to call multiple times — already-applied migrations are skipped.
|
|
624
625
|
*/
|
|
625
626
|
function runMigrations(db) {
|
|
626
|
-
logger$
|
|
627
|
+
logger$25.info("Starting migration runner");
|
|
627
628
|
db.exec(`
|
|
628
629
|
CREATE TABLE IF NOT EXISTS schema_migrations (
|
|
629
630
|
version INTEGER PRIMARY KEY,
|
|
@@ -634,12 +635,12 @@ function runMigrations(db) {
|
|
|
634
635
|
const appliedVersions = new Set(db.prepare("SELECT version FROM schema_migrations").all().map((row) => row.version));
|
|
635
636
|
const pending = MIGRATIONS.filter((m) => !appliedVersions.has(m.version)).sort((a, b) => a.version - b.version);
|
|
636
637
|
if (pending.length === 0) {
|
|
637
|
-
logger$
|
|
638
|
+
logger$25.info("No pending migrations");
|
|
638
639
|
return;
|
|
639
640
|
}
|
|
640
641
|
const insertMigration = db.prepare("INSERT INTO schema_migrations (version, name) VALUES (?, ?)");
|
|
641
642
|
for (const migration of pending) {
|
|
642
|
-
logger$
|
|
643
|
+
logger$25.info({
|
|
643
644
|
version: migration.version,
|
|
644
645
|
name: migration.name
|
|
645
646
|
}, "Applying migration");
|
|
@@ -653,14 +654,14 @@ function runMigrations(db) {
|
|
|
653
654
|
});
|
|
654
655
|
applyMigration();
|
|
655
656
|
}
|
|
656
|
-
logger$
|
|
657
|
+
logger$25.info({ version: migration.version }, "Migration applied successfully");
|
|
657
658
|
}
|
|
658
|
-
logger$
|
|
659
|
+
logger$25.info({ count: pending.length }, "All pending migrations applied");
|
|
659
660
|
}
|
|
660
661
|
|
|
661
662
|
//#endregion
|
|
662
663
|
//#region src/persistence/database.ts
|
|
663
|
-
const logger$
|
|
664
|
+
const logger$24 = createLogger("persistence:database");
|
|
664
665
|
/**
|
|
665
666
|
* Thin wrapper that opens a SQLite database, applies required PRAGMAs,
|
|
666
667
|
* and exposes the raw BetterSqlite3 instance.
|
|
@@ -677,14 +678,14 @@ var DatabaseWrapper = class {
|
|
|
677
678
|
*/
|
|
678
679
|
open() {
|
|
679
680
|
if (this._db !== null) return;
|
|
680
|
-
logger$
|
|
681
|
+
logger$24.info({ path: this._path }, "Opening SQLite database");
|
|
681
682
|
this._db = new Database(this._path);
|
|
682
683
|
const walResult = this._db.pragma("journal_mode = WAL");
|
|
683
|
-
if (walResult?.[0]?.journal_mode !== "wal") logger$
|
|
684
|
+
if (walResult?.[0]?.journal_mode !== "wal") logger$24.warn({ result: walResult?.[0]?.journal_mode }, "WAL pragma did not return expected \"wal\" — journal_mode may be \"memory\" or unsupported");
|
|
684
685
|
this._db.pragma("busy_timeout = 5000");
|
|
685
686
|
this._db.pragma("synchronous = NORMAL");
|
|
686
687
|
this._db.pragma("foreign_keys = ON");
|
|
687
|
-
logger$
|
|
688
|
+
logger$24.info({ path: this._path }, "SQLite database opened with WAL mode");
|
|
688
689
|
}
|
|
689
690
|
/**
|
|
690
691
|
* Close the database. Idempotent — calling close() when already closed is a no-op.
|
|
@@ -693,7 +694,7 @@ var DatabaseWrapper = class {
|
|
|
693
694
|
if (this._db === null) return;
|
|
694
695
|
this._db.close();
|
|
695
696
|
this._db = null;
|
|
696
|
-
logger$
|
|
697
|
+
logger$24.info({ path: this._path }, "SQLite database closed");
|
|
697
698
|
}
|
|
698
699
|
/**
|
|
699
700
|
* Return the raw BetterSqlite3 instance.
|
|
@@ -1610,7 +1611,7 @@ function formatUnsupportedVersionError(formatType, version, supported) {
|
|
|
1610
1611
|
|
|
1611
1612
|
//#endregion
|
|
1612
1613
|
//#region src/modules/config/config-system-impl.ts
|
|
1613
|
-
const logger$
|
|
1614
|
+
const logger$23 = createLogger("config");
|
|
1614
1615
|
function deepMerge(base, override) {
|
|
1615
1616
|
const result = { ...base };
|
|
1616
1617
|
for (const [key, val] of Object.entries(override)) if (val !== null && val !== void 0 && typeof val === "object" && !Array.isArray(val) && typeof result[key] === "object" && result[key] !== null && !Array.isArray(result[key])) result[key] = deepMerge(result[key], val);
|
|
@@ -1655,7 +1656,7 @@ function readEnvOverrides() {
|
|
|
1655
1656
|
}
|
|
1656
1657
|
const parsed = PartialSubstrateConfigSchema.safeParse(overrides);
|
|
1657
1658
|
if (!parsed.success) {
|
|
1658
|
-
logger$
|
|
1659
|
+
logger$23.warn({ errors: parsed.error.issues }, "Invalid environment variable overrides ignored");
|
|
1659
1660
|
return {};
|
|
1660
1661
|
}
|
|
1661
1662
|
return parsed.data;
|
|
@@ -1719,7 +1720,7 @@ var ConfigSystemImpl = class {
|
|
|
1719
1720
|
throw new ConfigError(`Configuration validation failed:\n${issues}`, { issues: result.error.issues });
|
|
1720
1721
|
}
|
|
1721
1722
|
this._config = result.data;
|
|
1722
|
-
logger$
|
|
1723
|
+
logger$23.debug("Configuration loaded successfully");
|
|
1723
1724
|
}
|
|
1724
1725
|
getConfig() {
|
|
1725
1726
|
if (this._config === null) throw new ConfigError("Configuration has not been loaded. Call load() before getConfig().", {});
|
|
@@ -1782,7 +1783,7 @@ var ConfigSystemImpl = class {
|
|
|
1782
1783
|
if (version !== void 0 && typeof version === "string" && !isVersionSupported(version, SUPPORTED_CONFIG_FORMAT_VERSIONS)) if (defaultConfigMigrator.canMigrate(version, CURRENT_CONFIG_FORMAT_VERSION)) {
|
|
1783
1784
|
const migrationOutput = defaultConfigMigrator.migrate(rawObj, version, CURRENT_CONFIG_FORMAT_VERSION, filePath);
|
|
1784
1785
|
if (migrationOutput.result.success) {
|
|
1785
|
-
logger$
|
|
1786
|
+
logger$23.info({
|
|
1786
1787
|
from: version,
|
|
1787
1788
|
to: CURRENT_CONFIG_FORMAT_VERSION,
|
|
1788
1789
|
backup: migrationOutput.result.backupPath
|
|
@@ -3191,7 +3192,7 @@ function truncateToTokens(text, maxTokens) {
|
|
|
3191
3192
|
|
|
3192
3193
|
//#endregion
|
|
3193
3194
|
//#region src/modules/context-compiler/context-compiler-impl.ts
|
|
3194
|
-
const logger$
|
|
3195
|
+
const logger$22 = createLogger("context-compiler");
|
|
3195
3196
|
/**
|
|
3196
3197
|
* Fraction of the original token budget that must remain (after required +
|
|
3197
3198
|
* important sections) before an optional section is included.
|
|
@@ -3283,7 +3284,7 @@ var ContextCompilerImpl = class {
|
|
|
3283
3284
|
includedParts.push(truncated);
|
|
3284
3285
|
remainingBudget -= truncatedTokens;
|
|
3285
3286
|
anyTruncated = true;
|
|
3286
|
-
logger$
|
|
3287
|
+
logger$22.warn({
|
|
3287
3288
|
section: section.name,
|
|
3288
3289
|
originalTokens: tokens,
|
|
3289
3290
|
budgetTokens: truncatedTokens
|
|
@@ -3297,7 +3298,7 @@ var ContextCompilerImpl = class {
|
|
|
3297
3298
|
});
|
|
3298
3299
|
} else {
|
|
3299
3300
|
anyTruncated = true;
|
|
3300
|
-
logger$
|
|
3301
|
+
logger$22.warn({
|
|
3301
3302
|
section: section.name,
|
|
3302
3303
|
tokens
|
|
3303
3304
|
}, "Context compiler: omitted \"important\" section — no budget remaining");
|
|
@@ -3324,7 +3325,7 @@ var ContextCompilerImpl = class {
|
|
|
3324
3325
|
} else {
|
|
3325
3326
|
if (tokens > 0) {
|
|
3326
3327
|
anyTruncated = true;
|
|
3327
|
-
logger$
|
|
3328
|
+
logger$22.warn({
|
|
3328
3329
|
section: section.name,
|
|
3329
3330
|
tokens,
|
|
3330
3331
|
budgetFractionRemaining: budgetFractionRemaining.toFixed(2)
|
|
@@ -3609,7 +3610,7 @@ function parseYamlResult(yamlText, schema) {
|
|
|
3609
3610
|
|
|
3610
3611
|
//#endregion
|
|
3611
3612
|
//#region src/modules/agent-dispatch/dispatcher-impl.ts
|
|
3612
|
-
const logger$
|
|
3613
|
+
const logger$21 = createLogger("agent-dispatch");
|
|
3613
3614
|
const SHUTDOWN_GRACE_MS = 1e4;
|
|
3614
3615
|
const SHUTDOWN_MAX_WAIT_MS = 3e4;
|
|
3615
3616
|
const CHARS_PER_TOKEN = 4;
|
|
@@ -3654,7 +3655,7 @@ function getAvailableMemory() {
|
|
|
3654
3655
|
}).trim(), 10);
|
|
3655
3656
|
_lastKnownPressureLevel = pressureLevel;
|
|
3656
3657
|
if (pressureLevel >= 4) {
|
|
3657
|
-
logger$
|
|
3658
|
+
logger$21.warn({ pressureLevel }, "macOS kernel reports critical memory pressure");
|
|
3658
3659
|
return 0;
|
|
3659
3660
|
}
|
|
3660
3661
|
} catch {}
|
|
@@ -3669,7 +3670,7 @@ function getAvailableMemory() {
|
|
|
3669
3670
|
const speculative = parseInt(vmstat.match(/Pages speculative:\s+(\d+)/)?.[1] ?? "0", 10);
|
|
3670
3671
|
const available = (free + purgeable + speculative) * pageSize;
|
|
3671
3672
|
if (pressureLevel >= 2) {
|
|
3672
|
-
logger$
|
|
3673
|
+
logger$21.warn({
|
|
3673
3674
|
pressureLevel,
|
|
3674
3675
|
availableBeforeDiscount: available
|
|
3675
3676
|
}, "macOS kernel reports memory pressure — discounting estimate");
|
|
@@ -3749,7 +3750,7 @@ var DispatcherImpl = class {
|
|
|
3749
3750
|
resolve: typedResolve,
|
|
3750
3751
|
reject
|
|
3751
3752
|
});
|
|
3752
|
-
logger$
|
|
3753
|
+
logger$21.debug({
|
|
3753
3754
|
id,
|
|
3754
3755
|
queueLength: this._queue.length
|
|
3755
3756
|
}, "Dispatch queued");
|
|
@@ -3780,7 +3781,7 @@ var DispatcherImpl = class {
|
|
|
3780
3781
|
async shutdown() {
|
|
3781
3782
|
this._shuttingDown = true;
|
|
3782
3783
|
this._stopMemoryPressureTimer();
|
|
3783
|
-
logger$
|
|
3784
|
+
logger$21.info({
|
|
3784
3785
|
running: this._running.size,
|
|
3785
3786
|
queued: this._queue.length
|
|
3786
3787
|
}, "Dispatcher shutting down");
|
|
@@ -3813,13 +3814,13 @@ var DispatcherImpl = class {
|
|
|
3813
3814
|
}
|
|
3814
3815
|
}, 50);
|
|
3815
3816
|
});
|
|
3816
|
-
logger$
|
|
3817
|
+
logger$21.info("Dispatcher shutdown complete");
|
|
3817
3818
|
}
|
|
3818
3819
|
async _startDispatch(id, request, resolve$2) {
|
|
3819
|
-
const { prompt, agent, taskType, timeout, outputSchema, workingDirectory, model, maxTurns } = request;
|
|
3820
|
+
const { prompt, agent, taskType, timeout, outputSchema, workingDirectory, model, maxTurns, otlpEndpoint } = request;
|
|
3820
3821
|
const adapter = this._adapterRegistry.get(agent);
|
|
3821
3822
|
if (adapter === void 0) {
|
|
3822
|
-
logger$
|
|
3823
|
+
logger$21.warn({
|
|
3823
3824
|
id,
|
|
3824
3825
|
agent
|
|
3825
3826
|
}, "No adapter found for agent");
|
|
@@ -3846,7 +3847,8 @@ var DispatcherImpl = class {
|
|
|
3846
3847
|
worktreePath,
|
|
3847
3848
|
billingMode: "subscription",
|
|
3848
3849
|
...model !== void 0 ? { model } : {},
|
|
3849
|
-
...resolvedMaxTurns !== void 0 ? { maxTurns: resolvedMaxTurns } : {}
|
|
3850
|
+
...resolvedMaxTurns !== void 0 ? { maxTurns: resolvedMaxTurns } : {},
|
|
3851
|
+
...otlpEndpoint !== void 0 ? { otlpEndpoint } : {}
|
|
3850
3852
|
});
|
|
3851
3853
|
const timeoutMs = timeout ?? this._config.defaultTimeouts[taskType] ?? DEFAULT_TIMEOUTS[taskType] ?? 3e5;
|
|
3852
3854
|
const env = { ...process.env };
|
|
@@ -3865,7 +3867,7 @@ var DispatcherImpl = class {
|
|
|
3865
3867
|
});
|
|
3866
3868
|
const startedAt = Date.now();
|
|
3867
3869
|
proc.on("error", (err) => {
|
|
3868
|
-
logger$
|
|
3870
|
+
logger$21.error({
|
|
3869
3871
|
id,
|
|
3870
3872
|
binary: cmd.binary,
|
|
3871
3873
|
error: err.message
|
|
@@ -3873,7 +3875,7 @@ var DispatcherImpl = class {
|
|
|
3873
3875
|
});
|
|
3874
3876
|
if (proc.stdin !== null) {
|
|
3875
3877
|
proc.stdin.on("error", (err) => {
|
|
3876
|
-
if (err.code !== "EPIPE") logger$
|
|
3878
|
+
if (err.code !== "EPIPE") logger$21.warn({
|
|
3877
3879
|
id,
|
|
3878
3880
|
error: err.message
|
|
3879
3881
|
}, "stdin write error");
|
|
@@ -3915,7 +3917,7 @@ var DispatcherImpl = class {
|
|
|
3915
3917
|
agent,
|
|
3916
3918
|
taskType
|
|
3917
3919
|
});
|
|
3918
|
-
logger$
|
|
3920
|
+
logger$21.debug({
|
|
3919
3921
|
id,
|
|
3920
3922
|
agent,
|
|
3921
3923
|
taskType,
|
|
@@ -3932,7 +3934,7 @@ var DispatcherImpl = class {
|
|
|
3932
3934
|
dispatchId: id,
|
|
3933
3935
|
timeoutMs
|
|
3934
3936
|
});
|
|
3935
|
-
logger$
|
|
3937
|
+
logger$21.warn({
|
|
3936
3938
|
id,
|
|
3937
3939
|
agent,
|
|
3938
3940
|
taskType,
|
|
@@ -3986,7 +3988,7 @@ var DispatcherImpl = class {
|
|
|
3986
3988
|
exitCode: code,
|
|
3987
3989
|
output: stdout
|
|
3988
3990
|
});
|
|
3989
|
-
logger$
|
|
3991
|
+
logger$21.debug({
|
|
3990
3992
|
id,
|
|
3991
3993
|
agent,
|
|
3992
3994
|
taskType,
|
|
@@ -4012,7 +4014,7 @@ var DispatcherImpl = class {
|
|
|
4012
4014
|
error: stderr || `Process exited with code ${String(code)}`,
|
|
4013
4015
|
exitCode: code
|
|
4014
4016
|
});
|
|
4015
|
-
logger$
|
|
4017
|
+
logger$21.debug({
|
|
4016
4018
|
id,
|
|
4017
4019
|
agent,
|
|
4018
4020
|
taskType,
|
|
@@ -4071,7 +4073,7 @@ var DispatcherImpl = class {
|
|
|
4071
4073
|
const next = this._queue.shift();
|
|
4072
4074
|
if (next === void 0) return;
|
|
4073
4075
|
next.handle.status = "running";
|
|
4074
|
-
logger$
|
|
4076
|
+
logger$21.debug({
|
|
4075
4077
|
id: next.id,
|
|
4076
4078
|
queueLength: this._queue.length
|
|
4077
4079
|
}, "Dequeued dispatch");
|
|
@@ -4084,7 +4086,7 @@ var DispatcherImpl = class {
|
|
|
4084
4086
|
_isMemoryPressured() {
|
|
4085
4087
|
const free = getAvailableMemory();
|
|
4086
4088
|
if (free < MIN_FREE_MEMORY_BYTES) {
|
|
4087
|
-
logger$
|
|
4089
|
+
logger$21.warn({
|
|
4088
4090
|
freeMB: Math.round(free / 1024 / 1024),
|
|
4089
4091
|
thresholdMB: Math.round(MIN_FREE_MEMORY_BYTES / 1024 / 1024),
|
|
4090
4092
|
pressureLevel: _lastKnownPressureLevel
|
|
@@ -4200,7 +4202,7 @@ function runBuildVerification(options) {
|
|
|
4200
4202
|
let cmd;
|
|
4201
4203
|
if (verifyCommand === void 0) {
|
|
4202
4204
|
const detection = detectPackageManager(projectRoot);
|
|
4203
|
-
logger$
|
|
4205
|
+
logger$21.info({
|
|
4204
4206
|
packageManager: detection.packageManager,
|
|
4205
4207
|
lockfile: detection.lockfile,
|
|
4206
4208
|
resolvedCommand: detection.command
|
|
@@ -4399,7 +4401,7 @@ function pickRecommendation(distribution, profile, totalIssues, reviewCycles, la
|
|
|
4399
4401
|
|
|
4400
4402
|
//#endregion
|
|
4401
4403
|
//#region src/modules/compiled-workflows/prompt-assembler.ts
|
|
4402
|
-
const logger$
|
|
4404
|
+
const logger$20 = createLogger("compiled-workflows:prompt-assembler");
|
|
4403
4405
|
/**
|
|
4404
4406
|
* Assemble a final prompt from a template and sections map.
|
|
4405
4407
|
*
|
|
@@ -4424,7 +4426,7 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
|
|
|
4424
4426
|
tokenCount,
|
|
4425
4427
|
truncated: false
|
|
4426
4428
|
};
|
|
4427
|
-
logger$
|
|
4429
|
+
logger$20.warn({
|
|
4428
4430
|
tokenCount,
|
|
4429
4431
|
ceiling: tokenCeiling
|
|
4430
4432
|
}, "Prompt exceeds token ceiling — truncating optional sections");
|
|
@@ -4440,10 +4442,10 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
|
|
|
4440
4442
|
const targetSectionTokens = Math.max(0, currentSectionTokens - overBy);
|
|
4441
4443
|
if (targetSectionTokens === 0) {
|
|
4442
4444
|
contentMap[section.name] = "";
|
|
4443
|
-
logger$
|
|
4445
|
+
logger$20.warn({ sectionName: section.name }, "Section eliminated to fit token budget");
|
|
4444
4446
|
} else {
|
|
4445
4447
|
contentMap[section.name] = truncateToTokens(section.content, targetSectionTokens);
|
|
4446
|
-
logger$
|
|
4448
|
+
logger$20.warn({
|
|
4447
4449
|
sectionName: section.name,
|
|
4448
4450
|
targetSectionTokens
|
|
4449
4451
|
}, "Section truncated to fit token budget");
|
|
@@ -4454,7 +4456,7 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
|
|
|
4454
4456
|
}
|
|
4455
4457
|
if (tokenCount <= tokenCeiling) break;
|
|
4456
4458
|
}
|
|
4457
|
-
if (tokenCount > tokenCeiling) logger$
|
|
4459
|
+
if (tokenCount > tokenCeiling) logger$20.warn({
|
|
4458
4460
|
tokenCount,
|
|
4459
4461
|
ceiling: tokenCeiling
|
|
4460
4462
|
}, "Required sections alone exceed token ceiling — returning over-budget prompt");
|
|
@@ -4752,7 +4754,7 @@ function getTokenCeiling(workflowType, tokenCeilings) {
|
|
|
4752
4754
|
|
|
4753
4755
|
//#endregion
|
|
4754
4756
|
//#region src/modules/compiled-workflows/create-story.ts
|
|
4755
|
-
const logger$
|
|
4757
|
+
const logger$19 = createLogger("compiled-workflows:create-story");
|
|
4756
4758
|
/**
|
|
4757
4759
|
* Execute the compiled create-story workflow.
|
|
4758
4760
|
*
|
|
@@ -4772,13 +4774,13 @@ const logger$17 = createLogger("compiled-workflows:create-story");
|
|
|
4772
4774
|
*/
|
|
4773
4775
|
async function runCreateStory(deps, params) {
|
|
4774
4776
|
const { epicId, storyKey, pipelineRunId } = params;
|
|
4775
|
-
logger$
|
|
4777
|
+
logger$19.debug({
|
|
4776
4778
|
epicId,
|
|
4777
4779
|
storyKey,
|
|
4778
4780
|
pipelineRunId
|
|
4779
4781
|
}, "Starting create-story workflow");
|
|
4780
4782
|
const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("create-story", deps.tokenCeilings);
|
|
4781
|
-
logger$
|
|
4783
|
+
logger$19.info({
|
|
4782
4784
|
workflow: "create-story",
|
|
4783
4785
|
ceiling: TOKEN_CEILING,
|
|
4784
4786
|
source: tokenCeilingSource
|
|
@@ -4788,7 +4790,7 @@ async function runCreateStory(deps, params) {
|
|
|
4788
4790
|
template = await deps.pack.getPrompt("create-story");
|
|
4789
4791
|
} catch (err) {
|
|
4790
4792
|
const error = err instanceof Error ? err.message : String(err);
|
|
4791
|
-
logger$
|
|
4793
|
+
logger$19.error({ error }, "Failed to retrieve create-story prompt template");
|
|
4792
4794
|
return {
|
|
4793
4795
|
result: "failed",
|
|
4794
4796
|
error: `Failed to retrieve prompt template: ${error}`,
|
|
@@ -4830,7 +4832,7 @@ async function runCreateStory(deps, params) {
|
|
|
4830
4832
|
priority: "important"
|
|
4831
4833
|
}
|
|
4832
4834
|
], TOKEN_CEILING);
|
|
4833
|
-
logger$
|
|
4835
|
+
logger$19.debug({
|
|
4834
4836
|
tokenCount,
|
|
4835
4837
|
truncated,
|
|
4836
4838
|
tokenCeiling: TOKEN_CEILING
|
|
@@ -4840,14 +4842,15 @@ async function runCreateStory(deps, params) {
|
|
|
4840
4842
|
agent: "claude-code",
|
|
4841
4843
|
taskType: "create-story",
|
|
4842
4844
|
outputSchema: CreateStoryResultSchema,
|
|
4843
|
-
...deps.projectRoot !== void 0 ? { workingDirectory: deps.projectRoot } : {}
|
|
4845
|
+
...deps.projectRoot !== void 0 ? { workingDirectory: deps.projectRoot } : {},
|
|
4846
|
+
...deps.otlpEndpoint !== void 0 ? { otlpEndpoint: deps.otlpEndpoint } : {}
|
|
4844
4847
|
});
|
|
4845
4848
|
let dispatchResult;
|
|
4846
4849
|
try {
|
|
4847
4850
|
dispatchResult = await handle.result;
|
|
4848
4851
|
} catch (err) {
|
|
4849
4852
|
const error = err instanceof Error ? err.message : String(err);
|
|
4850
|
-
logger$
|
|
4853
|
+
logger$19.error({
|
|
4851
4854
|
epicId,
|
|
4852
4855
|
storyKey,
|
|
4853
4856
|
error
|
|
@@ -4868,7 +4871,7 @@ async function runCreateStory(deps, params) {
|
|
|
4868
4871
|
if (dispatchResult.status === "failed") {
|
|
4869
4872
|
const errorMsg = dispatchResult.parseError ?? `Dispatch failed with exit code ${dispatchResult.exitCode}`;
|
|
4870
4873
|
const stderrDetail = dispatchResult.output ? ` Output: ${dispatchResult.output}` : "";
|
|
4871
|
-
logger$
|
|
4874
|
+
logger$19.warn({
|
|
4872
4875
|
epicId,
|
|
4873
4876
|
storyKey,
|
|
4874
4877
|
exitCode: dispatchResult.exitCode
|
|
@@ -4880,7 +4883,7 @@ async function runCreateStory(deps, params) {
|
|
|
4880
4883
|
};
|
|
4881
4884
|
}
|
|
4882
4885
|
if (dispatchResult.status === "timeout") {
|
|
4883
|
-
logger$
|
|
4886
|
+
logger$19.warn({
|
|
4884
4887
|
epicId,
|
|
4885
4888
|
storyKey
|
|
4886
4889
|
}, "Create-story dispatch timed out");
|
|
@@ -4893,7 +4896,7 @@ async function runCreateStory(deps, params) {
|
|
|
4893
4896
|
if (dispatchResult.parsed === null) {
|
|
4894
4897
|
const details = dispatchResult.parseError ?? "No YAML block found in output";
|
|
4895
4898
|
const rawSnippet = dispatchResult.output ? dispatchResult.output.slice(0, 1e3) : "(empty)";
|
|
4896
|
-
logger$
|
|
4899
|
+
logger$19.warn({
|
|
4897
4900
|
epicId,
|
|
4898
4901
|
storyKey,
|
|
4899
4902
|
details,
|
|
@@ -4909,7 +4912,7 @@ async function runCreateStory(deps, params) {
|
|
|
4909
4912
|
const parseResult = CreateStoryResultSchema.safeParse(dispatchResult.parsed);
|
|
4910
4913
|
if (!parseResult.success) {
|
|
4911
4914
|
const details = parseResult.error.message;
|
|
4912
|
-
logger$
|
|
4915
|
+
logger$19.warn({
|
|
4913
4916
|
epicId,
|
|
4914
4917
|
storyKey,
|
|
4915
4918
|
details
|
|
@@ -4922,7 +4925,7 @@ async function runCreateStory(deps, params) {
|
|
|
4922
4925
|
};
|
|
4923
4926
|
}
|
|
4924
4927
|
const parsed = parseResult.data;
|
|
4925
|
-
logger$
|
|
4928
|
+
logger$19.info({
|
|
4926
4929
|
epicId,
|
|
4927
4930
|
storyKey,
|
|
4928
4931
|
storyFile: parsed.story_file,
|
|
@@ -4944,7 +4947,7 @@ function getImplementationDecisions(deps) {
|
|
|
4944
4947
|
try {
|
|
4945
4948
|
return getDecisionsByPhase(deps.db, "implementation");
|
|
4946
4949
|
} catch (err) {
|
|
4947
|
-
logger$
|
|
4950
|
+
logger$19.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve implementation decisions");
|
|
4948
4951
|
return [];
|
|
4949
4952
|
}
|
|
4950
4953
|
}
|
|
@@ -4987,13 +4990,13 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
|
|
|
4987
4990
|
if (storyKey) {
|
|
4988
4991
|
const storySection = extractStorySection(shardContent, storyKey);
|
|
4989
4992
|
if (storySection) {
|
|
4990
|
-
logger$
|
|
4993
|
+
logger$19.debug({
|
|
4991
4994
|
epicId,
|
|
4992
4995
|
storyKey
|
|
4993
4996
|
}, "Extracted per-story section from epic shard");
|
|
4994
4997
|
return storySection;
|
|
4995
4998
|
}
|
|
4996
|
-
logger$
|
|
4999
|
+
logger$19.debug({
|
|
4997
5000
|
epicId,
|
|
4998
5001
|
storyKey
|
|
4999
5002
|
}, "No matching story section found — using full epic shard");
|
|
@@ -5003,11 +5006,11 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
|
|
|
5003
5006
|
if (projectRoot) {
|
|
5004
5007
|
const fallback = readEpicShardFromFile(projectRoot, epicId);
|
|
5005
5008
|
if (fallback) {
|
|
5006
|
-
logger$
|
|
5009
|
+
logger$19.info({ epicId }, "Using file-based fallback for epic shard (decisions table empty)");
|
|
5007
5010
|
if (storyKey) {
|
|
5008
5011
|
const storySection = extractStorySection(fallback, storyKey);
|
|
5009
5012
|
if (storySection) {
|
|
5010
|
-
logger$
|
|
5013
|
+
logger$19.debug({
|
|
5011
5014
|
epicId,
|
|
5012
5015
|
storyKey
|
|
5013
5016
|
}, "Extracted per-story section from file-based epic shard");
|
|
@@ -5019,7 +5022,7 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
|
|
|
5019
5022
|
}
|
|
5020
5023
|
return "";
|
|
5021
5024
|
} catch (err) {
|
|
5022
|
-
logger$
|
|
5025
|
+
logger$19.warn({
|
|
5023
5026
|
epicId,
|
|
5024
5027
|
error: err instanceof Error ? err.message : String(err)
|
|
5025
5028
|
}, "Failed to retrieve epic shard");
|
|
@@ -5036,7 +5039,7 @@ function getPrevDevNotes(decisions, epicId) {
|
|
|
5036
5039
|
if (devNotes.length === 0) return "";
|
|
5037
5040
|
return devNotes[devNotes.length - 1].value;
|
|
5038
5041
|
} catch (err) {
|
|
5039
|
-
logger$
|
|
5042
|
+
logger$19.warn({
|
|
5040
5043
|
epicId,
|
|
5041
5044
|
error: err instanceof Error ? err.message : String(err)
|
|
5042
5045
|
}, "Failed to retrieve prev dev notes");
|
|
@@ -5056,13 +5059,13 @@ function getArchConstraints$3(deps) {
|
|
|
5056
5059
|
if (deps.projectRoot) {
|
|
5057
5060
|
const fallback = readArchConstraintsFromFile(deps.projectRoot);
|
|
5058
5061
|
if (fallback) {
|
|
5059
|
-
logger$
|
|
5062
|
+
logger$19.info("Using file-based fallback for architecture constraints (decisions table empty)");
|
|
5060
5063
|
return fallback;
|
|
5061
5064
|
}
|
|
5062
5065
|
}
|
|
5063
5066
|
return "";
|
|
5064
5067
|
} catch (err) {
|
|
5065
|
-
logger$
|
|
5068
|
+
logger$19.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
|
|
5066
5069
|
return "";
|
|
5067
5070
|
}
|
|
5068
5071
|
}
|
|
@@ -5082,7 +5085,7 @@ function readEpicShardFromFile(projectRoot, epicId) {
|
|
|
5082
5085
|
const match = pattern.exec(content);
|
|
5083
5086
|
return match ? match[0].trim() : "";
|
|
5084
5087
|
} catch (err) {
|
|
5085
|
-
logger$
|
|
5088
|
+
logger$19.warn({
|
|
5086
5089
|
epicId,
|
|
5087
5090
|
error: err instanceof Error ? err.message : String(err)
|
|
5088
5091
|
}, "File-based epic shard fallback failed");
|
|
@@ -5105,7 +5108,7 @@ function readArchConstraintsFromFile(projectRoot) {
|
|
|
5105
5108
|
const content = readFileSync$1(archPath, "utf-8");
|
|
5106
5109
|
return content.slice(0, 1500);
|
|
5107
5110
|
} catch (err) {
|
|
5108
|
-
logger$
|
|
5111
|
+
logger$19.warn({ error: err instanceof Error ? err.message : String(err) }, "File-based architecture fallback failed");
|
|
5109
5112
|
return "";
|
|
5110
5113
|
}
|
|
5111
5114
|
}
|
|
@@ -5118,7 +5121,7 @@ async function getStoryTemplate(deps) {
|
|
|
5118
5121
|
try {
|
|
5119
5122
|
return await deps.pack.getTemplate("story");
|
|
5120
5123
|
} catch (err) {
|
|
5121
|
-
logger$
|
|
5124
|
+
logger$19.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve story template from pack");
|
|
5122
5125
|
return "";
|
|
5123
5126
|
}
|
|
5124
5127
|
}
|
|
@@ -5155,7 +5158,7 @@ async function isValidStoryFile(filePath) {
|
|
|
5155
5158
|
|
|
5156
5159
|
//#endregion
|
|
5157
5160
|
//#region src/modules/compiled-workflows/git-helpers.ts
|
|
5158
|
-
const logger$
|
|
5161
|
+
const logger$18 = createLogger("compiled-workflows:git-helpers");
|
|
5159
5162
|
/**
|
|
5160
5163
|
* Capture the full git diff for HEAD (working tree vs current commit).
|
|
5161
5164
|
*
|
|
@@ -5251,7 +5254,7 @@ async function stageIntentToAdd(files, workingDirectory) {
|
|
|
5251
5254
|
if (files.length === 0) return;
|
|
5252
5255
|
const existing = files.filter((f) => {
|
|
5253
5256
|
const exists = existsSync$1(f);
|
|
5254
|
-
if (!exists) logger$
|
|
5257
|
+
if (!exists) logger$18.debug({ file: f }, "Skipping nonexistent file in stageIntentToAdd");
|
|
5255
5258
|
return exists;
|
|
5256
5259
|
});
|
|
5257
5260
|
if (existing.length === 0) return;
|
|
@@ -5285,7 +5288,7 @@ async function runGitCommand(args, cwd, logLabel) {
|
|
|
5285
5288
|
stderr += chunk.toString("utf-8");
|
|
5286
5289
|
});
|
|
5287
5290
|
proc.on("error", (err) => {
|
|
5288
|
-
logger$
|
|
5291
|
+
logger$18.warn({
|
|
5289
5292
|
label: logLabel,
|
|
5290
5293
|
cwd,
|
|
5291
5294
|
error: err.message
|
|
@@ -5294,7 +5297,7 @@ async function runGitCommand(args, cwd, logLabel) {
|
|
|
5294
5297
|
});
|
|
5295
5298
|
proc.on("close", (code) => {
|
|
5296
5299
|
if (code !== 0) {
|
|
5297
|
-
logger$
|
|
5300
|
+
logger$18.warn({
|
|
5298
5301
|
label: logLabel,
|
|
5299
5302
|
cwd,
|
|
5300
5303
|
code,
|
|
@@ -5310,7 +5313,7 @@ async function runGitCommand(args, cwd, logLabel) {
|
|
|
5310
5313
|
|
|
5311
5314
|
//#endregion
|
|
5312
5315
|
//#region src/modules/implementation-orchestrator/project-findings.ts
|
|
5313
|
-
const logger$
|
|
5316
|
+
const logger$17 = createLogger("project-findings");
|
|
5314
5317
|
/** Maximum character length for the findings summary */
|
|
5315
5318
|
const MAX_CHARS = 2e3;
|
|
5316
5319
|
/**
|
|
@@ -5376,7 +5379,7 @@ function getProjectFindings(db) {
|
|
|
5376
5379
|
if (summary.length > MAX_CHARS) summary = summary.slice(0, MAX_CHARS - 3) + "...";
|
|
5377
5380
|
return summary;
|
|
5378
5381
|
} catch (err) {
|
|
5379
|
-
logger$
|
|
5382
|
+
logger$17.warn({ err }, "Failed to query project findings (graceful fallback)");
|
|
5380
5383
|
return "";
|
|
5381
5384
|
}
|
|
5382
5385
|
}
|
|
@@ -5399,7 +5402,7 @@ function extractRecurringPatterns(outcomes) {
|
|
|
5399
5402
|
|
|
5400
5403
|
//#endregion
|
|
5401
5404
|
//#region src/modules/compiled-workflows/story-complexity.ts
|
|
5402
|
-
const logger$
|
|
5405
|
+
const logger$16 = createLogger("compiled-workflows:story-complexity");
|
|
5403
5406
|
/**
|
|
5404
5407
|
* Compute a complexity score from story markdown content.
|
|
5405
5408
|
*
|
|
@@ -5451,7 +5454,7 @@ function resolveFixStoryMaxTurns(complexityScore) {
|
|
|
5451
5454
|
* @param resolvedMaxTurns - Turn limit resolved for this dispatch
|
|
5452
5455
|
*/
|
|
5453
5456
|
function logComplexityResult(storyKey, complexity, resolvedMaxTurns) {
|
|
5454
|
-
logger$
|
|
5457
|
+
logger$16.info({
|
|
5455
5458
|
storyKey,
|
|
5456
5459
|
taskCount: complexity.taskCount,
|
|
5457
5460
|
subtaskCount: complexity.subtaskCount,
|
|
@@ -5509,7 +5512,7 @@ function countFilesInLayout(content) {
|
|
|
5509
5512
|
|
|
5510
5513
|
//#endregion
|
|
5511
5514
|
//#region src/modules/compiled-workflows/dev-story.ts
|
|
5512
|
-
const logger$
|
|
5515
|
+
const logger$15 = createLogger("compiled-workflows:dev-story");
|
|
5513
5516
|
/** Default timeout for dev-story dispatches in milliseconds (30 min) */
|
|
5514
5517
|
const DEFAULT_TIMEOUT_MS$1 = 18e5;
|
|
5515
5518
|
/** Default Vitest test patterns injected when no test-pattern decisions exist */
|
|
@@ -5532,12 +5535,12 @@ const DEFAULT_VITEST_PATTERNS = `## Test Patterns (defaults)
|
|
|
5532
5535
|
*/
|
|
5533
5536
|
async function runDevStory(deps, params) {
|
|
5534
5537
|
const { storyKey, storyFilePath, taskScope, priorFiles } = params;
|
|
5535
|
-
logger$
|
|
5538
|
+
logger$15.info({
|
|
5536
5539
|
storyKey,
|
|
5537
5540
|
storyFilePath
|
|
5538
5541
|
}, "Starting compiled dev-story workflow");
|
|
5539
5542
|
const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("dev-story", deps.tokenCeilings);
|
|
5540
|
-
logger$
|
|
5543
|
+
logger$15.info({
|
|
5541
5544
|
workflow: "dev-story",
|
|
5542
5545
|
ceiling: TOKEN_CEILING,
|
|
5543
5546
|
source: tokenCeilingSource
|
|
@@ -5580,10 +5583,10 @@ async function runDevStory(deps, params) {
|
|
|
5580
5583
|
let template;
|
|
5581
5584
|
try {
|
|
5582
5585
|
template = await deps.pack.getPrompt("dev-story");
|
|
5583
|
-
logger$
|
|
5586
|
+
logger$15.debug({ storyKey }, "Retrieved dev-story prompt template from pack");
|
|
5584
5587
|
} catch (err) {
|
|
5585
5588
|
const error = err instanceof Error ? err.message : String(err);
|
|
5586
|
-
logger$
|
|
5589
|
+
logger$15.error({
|
|
5587
5590
|
storyKey,
|
|
5588
5591
|
error
|
|
5589
5592
|
}, "Failed to retrieve dev-story prompt template");
|
|
@@ -5594,14 +5597,14 @@ async function runDevStory(deps, params) {
|
|
|
5594
5597
|
storyContent = await readFile$1(storyFilePath, "utf-8");
|
|
5595
5598
|
} catch (err) {
|
|
5596
5599
|
if (err.code === "ENOENT") {
|
|
5597
|
-
logger$
|
|
5600
|
+
logger$15.error({
|
|
5598
5601
|
storyKey,
|
|
5599
5602
|
storyFilePath
|
|
5600
5603
|
}, "Story file not found");
|
|
5601
5604
|
return makeFailureResult("story_file_not_found");
|
|
5602
5605
|
}
|
|
5603
5606
|
const error = err instanceof Error ? err.message : String(err);
|
|
5604
|
-
logger$
|
|
5607
|
+
logger$15.error({
|
|
5605
5608
|
storyKey,
|
|
5606
5609
|
storyFilePath,
|
|
5607
5610
|
error
|
|
@@ -5609,7 +5612,7 @@ async function runDevStory(deps, params) {
|
|
|
5609
5612
|
return makeFailureResult(`story_file_read_error: ${error}`);
|
|
5610
5613
|
}
|
|
5611
5614
|
if (storyContent.trim().length === 0) {
|
|
5612
|
-
logger$
|
|
5615
|
+
logger$15.error({
|
|
5613
5616
|
storyKey,
|
|
5614
5617
|
storyFilePath
|
|
5615
5618
|
}, "Story file is empty");
|
|
@@ -5624,17 +5627,17 @@ async function runDevStory(deps, params) {
|
|
|
5624
5627
|
const testPatternDecisions = solutioningDecisions.filter((d) => d.category === "test-patterns");
|
|
5625
5628
|
if (testPatternDecisions.length > 0) {
|
|
5626
5629
|
testPatternsContent = "## Test Patterns\n" + testPatternDecisions.map((d) => `- ${d.key}: ${d.value}`).join("\n");
|
|
5627
|
-
logger$
|
|
5630
|
+
logger$15.debug({
|
|
5628
5631
|
storyKey,
|
|
5629
5632
|
count: testPatternDecisions.length
|
|
5630
5633
|
}, "Loaded test patterns from decision store");
|
|
5631
5634
|
} else {
|
|
5632
5635
|
testPatternsContent = DEFAULT_VITEST_PATTERNS;
|
|
5633
|
-
logger$
|
|
5636
|
+
logger$15.debug({ storyKey }, "No test-pattern decisions found — using default Vitest patterns");
|
|
5634
5637
|
}
|
|
5635
5638
|
} catch (err) {
|
|
5636
5639
|
const error = err instanceof Error ? err.message : String(err);
|
|
5637
|
-
logger$
|
|
5640
|
+
logger$15.warn({
|
|
5638
5641
|
storyKey,
|
|
5639
5642
|
error
|
|
5640
5643
|
}, "Failed to load test patterns — using defaults");
|
|
@@ -5649,7 +5652,7 @@ async function runDevStory(deps, params) {
|
|
|
5649
5652
|
const findings = getProjectFindings(deps.db);
|
|
5650
5653
|
if (findings.length > 0) {
|
|
5651
5654
|
priorFindingsContent = "Previous pipeline runs encountered these issues — avoid repeating them:\n\n" + findings;
|
|
5652
|
-
logger$
|
|
5655
|
+
logger$15.debug({
|
|
5653
5656
|
storyKey,
|
|
5654
5657
|
findingsLen: findings.length
|
|
5655
5658
|
}, "Injecting prior findings into dev-story prompt");
|
|
@@ -5669,7 +5672,7 @@ async function runDevStory(deps, params) {
|
|
|
5669
5672
|
if (plan.test_categories && plan.test_categories.length > 0) parts.push(`\n### Categories: ${plan.test_categories.join(", ")}`);
|
|
5670
5673
|
if (plan.coverage_notes) parts.push(`\n### Coverage Notes\n${plan.coverage_notes}`);
|
|
5671
5674
|
testPlanContent = parts.join("\n");
|
|
5672
|
-
logger$
|
|
5675
|
+
logger$15.debug({ storyKey }, "Injecting test plan into dev-story prompt");
|
|
5673
5676
|
}
|
|
5674
5677
|
} catch {}
|
|
5675
5678
|
const sections = [
|
|
@@ -5715,7 +5718,7 @@ async function runDevStory(deps, params) {
|
|
|
5715
5718
|
}
|
|
5716
5719
|
];
|
|
5717
5720
|
const { prompt, tokenCount, truncated } = assemblePrompt(template, sections, TOKEN_CEILING);
|
|
5718
|
-
logger$
|
|
5721
|
+
logger$15.info({
|
|
5719
5722
|
storyKey,
|
|
5720
5723
|
tokenCount,
|
|
5721
5724
|
ceiling: TOKEN_CEILING,
|
|
@@ -5730,12 +5733,13 @@ async function runDevStory(deps, params) {
|
|
|
5730
5733
|
timeout: DEFAULT_TIMEOUT_MS$1,
|
|
5731
5734
|
outputSchema: DevStoryResultSchema,
|
|
5732
5735
|
maxTurns: resolvedMaxTurns,
|
|
5733
|
-
...deps.projectRoot !== void 0 ? { workingDirectory: deps.projectRoot } : {}
|
|
5736
|
+
...deps.projectRoot !== void 0 ? { workingDirectory: deps.projectRoot } : {},
|
|
5737
|
+
...deps.otlpEndpoint !== void 0 ? { otlpEndpoint: deps.otlpEndpoint } : {}
|
|
5734
5738
|
});
|
|
5735
5739
|
dispatchResult = await handle.result;
|
|
5736
5740
|
} catch (err) {
|
|
5737
5741
|
const error = err instanceof Error ? err.message : String(err);
|
|
5738
|
-
logger$
|
|
5742
|
+
logger$15.error({
|
|
5739
5743
|
storyKey,
|
|
5740
5744
|
error
|
|
5741
5745
|
}, "Dispatch threw an unexpected error");
|
|
@@ -5746,11 +5750,11 @@ async function runDevStory(deps, params) {
|
|
|
5746
5750
|
output: dispatchResult.tokenEstimate.output
|
|
5747
5751
|
};
|
|
5748
5752
|
if (dispatchResult.status === "timeout") {
|
|
5749
|
-
logger$
|
|
5753
|
+
logger$15.error({
|
|
5750
5754
|
storyKey,
|
|
5751
5755
|
durationMs: dispatchResult.durationMs
|
|
5752
5756
|
}, "Dev-story dispatch timed out");
|
|
5753
|
-
if (dispatchResult.output.length > 0) logger$
|
|
5757
|
+
if (dispatchResult.output.length > 0) logger$15.info({
|
|
5754
5758
|
storyKey,
|
|
5755
5759
|
partialOutput: dispatchResult.output.slice(0, 500)
|
|
5756
5760
|
}, "Partial output before timeout");
|
|
@@ -5760,12 +5764,12 @@ async function runDevStory(deps, params) {
|
|
|
5760
5764
|
};
|
|
5761
5765
|
}
|
|
5762
5766
|
if (dispatchResult.status === "failed" || dispatchResult.exitCode !== 0) {
|
|
5763
|
-
logger$
|
|
5767
|
+
logger$15.error({
|
|
5764
5768
|
storyKey,
|
|
5765
5769
|
exitCode: dispatchResult.exitCode,
|
|
5766
5770
|
status: dispatchResult.status
|
|
5767
5771
|
}, "Dev-story dispatch failed");
|
|
5768
|
-
if (dispatchResult.output.length > 0) logger$
|
|
5772
|
+
if (dispatchResult.output.length > 0) logger$15.info({
|
|
5769
5773
|
storyKey,
|
|
5770
5774
|
partialOutput: dispatchResult.output.slice(0, 500)
|
|
5771
5775
|
}, "Partial output from failed dispatch");
|
|
@@ -5777,7 +5781,7 @@ async function runDevStory(deps, params) {
|
|
|
5777
5781
|
if (dispatchResult.parseError !== null || dispatchResult.parsed === null) {
|
|
5778
5782
|
const details = dispatchResult.parseError ?? "parsed result was null";
|
|
5779
5783
|
const rawSnippet = dispatchResult.output ? dispatchResult.output.slice(0, 1e3) : "(empty)";
|
|
5780
|
-
logger$
|
|
5784
|
+
logger$15.error({
|
|
5781
5785
|
storyKey,
|
|
5782
5786
|
parseError: details,
|
|
5783
5787
|
rawOutputSnippet: rawSnippet
|
|
@@ -5785,12 +5789,12 @@ async function runDevStory(deps, params) {
|
|
|
5785
5789
|
let filesModified = [];
|
|
5786
5790
|
try {
|
|
5787
5791
|
filesModified = await getGitChangedFiles(deps.projectRoot ?? process.cwd());
|
|
5788
|
-
if (filesModified.length > 0) logger$
|
|
5792
|
+
if (filesModified.length > 0) logger$15.info({
|
|
5789
5793
|
storyKey,
|
|
5790
5794
|
fileCount: filesModified.length
|
|
5791
5795
|
}, "Recovered files_modified from git status (YAML fallback)");
|
|
5792
5796
|
} catch (err) {
|
|
5793
|
-
logger$
|
|
5797
|
+
logger$15.warn({
|
|
5794
5798
|
storyKey,
|
|
5795
5799
|
error: err instanceof Error ? err.message : String(err)
|
|
5796
5800
|
}, "Failed to recover files_modified from git");
|
|
@@ -5807,7 +5811,7 @@ async function runDevStory(deps, params) {
|
|
|
5807
5811
|
};
|
|
5808
5812
|
}
|
|
5809
5813
|
const parsed = dispatchResult.parsed;
|
|
5810
|
-
logger$
|
|
5814
|
+
logger$15.info({
|
|
5811
5815
|
storyKey,
|
|
5812
5816
|
result: parsed.result,
|
|
5813
5817
|
acMet: parsed.ac_met.length
|
|
@@ -5946,7 +5950,7 @@ function extractFilesInScope(storyContent) {
|
|
|
5946
5950
|
|
|
5947
5951
|
//#endregion
|
|
5948
5952
|
//#region src/modules/compiled-workflows/code-review.ts
|
|
5949
|
-
const logger$
|
|
5953
|
+
const logger$14 = createLogger("compiled-workflows:code-review");
|
|
5950
5954
|
/**
|
|
5951
5955
|
* Default fallback result when dispatch fails or times out.
|
|
5952
5956
|
* Uses NEEDS_MINOR_FIXES (not NEEDS_MAJOR_REWORK) so a parse/schema failure
|
|
@@ -5984,14 +5988,14 @@ function defaultFailResult(error, tokenUsage) {
|
|
|
5984
5988
|
async function runCodeReview(deps, params) {
|
|
5985
5989
|
const { storyKey, storyFilePath, workingDirectory, pipelineRunId, filesModified, previousIssues } = params;
|
|
5986
5990
|
const cwd = workingDirectory ?? process.cwd();
|
|
5987
|
-
logger$
|
|
5991
|
+
logger$14.debug({
|
|
5988
5992
|
storyKey,
|
|
5989
5993
|
storyFilePath,
|
|
5990
5994
|
cwd,
|
|
5991
5995
|
pipelineRunId
|
|
5992
5996
|
}, "Starting code-review workflow");
|
|
5993
5997
|
const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("code-review", deps.tokenCeilings);
|
|
5994
|
-
logger$
|
|
5998
|
+
logger$14.info({
|
|
5995
5999
|
workflow: "code-review",
|
|
5996
6000
|
ceiling: TOKEN_CEILING,
|
|
5997
6001
|
source: tokenCeilingSource
|
|
@@ -6001,7 +6005,7 @@ async function runCodeReview(deps, params) {
|
|
|
6001
6005
|
template = await deps.pack.getPrompt("code-review");
|
|
6002
6006
|
} catch (err) {
|
|
6003
6007
|
const error = err instanceof Error ? err.message : String(err);
|
|
6004
|
-
logger$
|
|
6008
|
+
logger$14.error({ error }, "Failed to retrieve code-review prompt template");
|
|
6005
6009
|
return defaultFailResult(`Failed to retrieve prompt template: ${error}`, {
|
|
6006
6010
|
input: 0,
|
|
6007
6011
|
output: 0
|
|
@@ -6012,7 +6016,7 @@ async function runCodeReview(deps, params) {
|
|
|
6012
6016
|
storyContent = await readFile$1(storyFilePath, "utf-8");
|
|
6013
6017
|
} catch (err) {
|
|
6014
6018
|
const error = err instanceof Error ? err.message : String(err);
|
|
6015
|
-
logger$
|
|
6019
|
+
logger$14.error({
|
|
6016
6020
|
storyFilePath,
|
|
6017
6021
|
error
|
|
6018
6022
|
}, "Failed to read story file");
|
|
@@ -6032,12 +6036,12 @@ async function runCodeReview(deps, params) {
|
|
|
6032
6036
|
const scopedTotal = nonDiffTokens + countTokens(scopedDiff);
|
|
6033
6037
|
if (scopedTotal <= TOKEN_CEILING) {
|
|
6034
6038
|
gitDiffContent = scopedDiff;
|
|
6035
|
-
logger$
|
|
6039
|
+
logger$14.debug({
|
|
6036
6040
|
fileCount: filesModified.length,
|
|
6037
6041
|
tokenCount: scopedTotal
|
|
6038
6042
|
}, "Using scoped file diff");
|
|
6039
6043
|
} else {
|
|
6040
|
-
logger$
|
|
6044
|
+
logger$14.warn({
|
|
6041
6045
|
estimatedTotal: scopedTotal,
|
|
6042
6046
|
ceiling: TOKEN_CEILING,
|
|
6043
6047
|
fileCount: filesModified.length
|
|
@@ -6051,7 +6055,7 @@ async function runCodeReview(deps, params) {
|
|
|
6051
6055
|
const fullTotal = nonDiffTokens + countTokens(fullDiff);
|
|
6052
6056
|
if (fullTotal <= TOKEN_CEILING) gitDiffContent = fullDiff;
|
|
6053
6057
|
else {
|
|
6054
|
-
logger$
|
|
6058
|
+
logger$14.warn({
|
|
6055
6059
|
estimatedTotal: fullTotal,
|
|
6056
6060
|
ceiling: TOKEN_CEILING
|
|
6057
6061
|
}, "Full git diff would exceed token ceiling — using stat-only summary");
|
|
@@ -6059,7 +6063,7 @@ async function runCodeReview(deps, params) {
|
|
|
6059
6063
|
}
|
|
6060
6064
|
}
|
|
6061
6065
|
if (gitDiffContent.trim().length === 0) {
|
|
6062
|
-
logger$
|
|
6066
|
+
logger$14.info({ storyKey }, "Empty git diff — skipping review with SHIP_IT");
|
|
6063
6067
|
return {
|
|
6064
6068
|
verdict: "SHIP_IT",
|
|
6065
6069
|
issues: 0,
|
|
@@ -6084,7 +6088,7 @@ async function runCodeReview(deps, params) {
|
|
|
6084
6088
|
const findings = getProjectFindings(deps.db);
|
|
6085
6089
|
if (findings.length > 0) {
|
|
6086
6090
|
priorFindingsContent = "Previous reviews found these recurring patterns — pay special attention:\n\n" + findings;
|
|
6087
|
-
logger$
|
|
6091
|
+
logger$14.debug({
|
|
6088
6092
|
storyKey,
|
|
6089
6093
|
findingsLen: findings.length
|
|
6090
6094
|
}, "Injecting prior findings into code-review prompt");
|
|
@@ -6118,11 +6122,11 @@ async function runCodeReview(deps, params) {
|
|
|
6118
6122
|
}
|
|
6119
6123
|
];
|
|
6120
6124
|
const assembleResult = assemblePrompt(template, sections, TOKEN_CEILING);
|
|
6121
|
-
if (assembleResult.truncated) logger$
|
|
6125
|
+
if (assembleResult.truncated) logger$14.warn({
|
|
6122
6126
|
storyKey,
|
|
6123
6127
|
tokenCount: assembleResult.tokenCount
|
|
6124
6128
|
}, "Code-review prompt truncated to fit token ceiling");
|
|
6125
|
-
logger$
|
|
6129
|
+
logger$14.debug({
|
|
6126
6130
|
storyKey,
|
|
6127
6131
|
tokenCount: assembleResult.tokenCount,
|
|
6128
6132
|
truncated: assembleResult.truncated
|
|
@@ -6133,14 +6137,15 @@ async function runCodeReview(deps, params) {
|
|
|
6133
6137
|
agent: "claude-code",
|
|
6134
6138
|
taskType: "code-review",
|
|
6135
6139
|
outputSchema: CodeReviewResultSchema,
|
|
6136
|
-
workingDirectory: deps.projectRoot
|
|
6140
|
+
workingDirectory: deps.projectRoot,
|
|
6141
|
+
...deps.otlpEndpoint !== void 0 ? { otlpEndpoint: deps.otlpEndpoint } : {}
|
|
6137
6142
|
});
|
|
6138
6143
|
let dispatchResult;
|
|
6139
6144
|
try {
|
|
6140
6145
|
dispatchResult = await handle.result;
|
|
6141
6146
|
} catch (err) {
|
|
6142
6147
|
const error = err instanceof Error ? err.message : String(err);
|
|
6143
|
-
logger$
|
|
6148
|
+
logger$14.error({
|
|
6144
6149
|
storyKey,
|
|
6145
6150
|
error
|
|
6146
6151
|
}, "Code-review dispatch threw unexpected error");
|
|
@@ -6156,7 +6161,7 @@ async function runCodeReview(deps, params) {
|
|
|
6156
6161
|
const rawOutput = dispatchResult.output ?? void 0;
|
|
6157
6162
|
if (dispatchResult.status === "failed") {
|
|
6158
6163
|
const errorMsg = `Dispatch status: failed. Exit code: ${dispatchResult.exitCode}. ${dispatchResult.parseError ?? ""} ${dispatchResult.output ? `Stderr: ${dispatchResult.output}` : ""}`.trim();
|
|
6159
|
-
logger$
|
|
6164
|
+
logger$14.warn({
|
|
6160
6165
|
storyKey,
|
|
6161
6166
|
exitCode: dispatchResult.exitCode
|
|
6162
6167
|
}, "Code-review dispatch failed");
|
|
@@ -6166,7 +6171,7 @@ async function runCodeReview(deps, params) {
|
|
|
6166
6171
|
};
|
|
6167
6172
|
}
|
|
6168
6173
|
if (dispatchResult.status === "timeout") {
|
|
6169
|
-
logger$
|
|
6174
|
+
logger$14.warn({ storyKey }, "Code-review dispatch timed out");
|
|
6170
6175
|
return {
|
|
6171
6176
|
...defaultFailResult("Dispatch status: timeout. The agent did not complete within the allowed time.", tokenUsage),
|
|
6172
6177
|
rawOutput
|
|
@@ -6174,7 +6179,7 @@ async function runCodeReview(deps, params) {
|
|
|
6174
6179
|
}
|
|
6175
6180
|
if (dispatchResult.parsed === null) {
|
|
6176
6181
|
const details = dispatchResult.parseError ?? "No YAML block found in output";
|
|
6177
|
-
logger$
|
|
6182
|
+
logger$14.warn({
|
|
6178
6183
|
storyKey,
|
|
6179
6184
|
details
|
|
6180
6185
|
}, "Code-review output schema validation failed");
|
|
@@ -6191,7 +6196,7 @@ async function runCodeReview(deps, params) {
|
|
|
6191
6196
|
const parseResult = CodeReviewResultSchema.safeParse(dispatchResult.parsed);
|
|
6192
6197
|
if (!parseResult.success) {
|
|
6193
6198
|
const details = parseResult.error.message;
|
|
6194
|
-
logger$
|
|
6199
|
+
logger$14.warn({
|
|
6195
6200
|
storyKey,
|
|
6196
6201
|
details
|
|
6197
6202
|
}, "Code-review output failed schema validation");
|
|
@@ -6206,13 +6211,13 @@ async function runCodeReview(deps, params) {
|
|
|
6206
6211
|
};
|
|
6207
6212
|
}
|
|
6208
6213
|
const parsed = parseResult.data;
|
|
6209
|
-
if (parsed.agentVerdict !== parsed.verdict) logger$
|
|
6214
|
+
if (parsed.agentVerdict !== parsed.verdict) logger$14.info({
|
|
6210
6215
|
storyKey,
|
|
6211
6216
|
agentVerdict: parsed.agentVerdict,
|
|
6212
6217
|
pipelineVerdict: parsed.verdict,
|
|
6213
6218
|
issues: parsed.issues
|
|
6214
6219
|
}, "Pipeline overrode agent verdict based on issue severities");
|
|
6215
|
-
logger$
|
|
6220
|
+
logger$14.info({
|
|
6216
6221
|
storyKey,
|
|
6217
6222
|
verdict: parsed.verdict,
|
|
6218
6223
|
issues: parsed.issues
|
|
@@ -6237,14 +6242,14 @@ function getArchConstraints$2(deps) {
|
|
|
6237
6242
|
if (constraints.length === 0) return "";
|
|
6238
6243
|
return constraints.map((d) => `${d.key}: ${d.value}`).join("\n");
|
|
6239
6244
|
} catch (err) {
|
|
6240
|
-
logger$
|
|
6245
|
+
logger$14.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
|
|
6241
6246
|
return "";
|
|
6242
6247
|
}
|
|
6243
6248
|
}
|
|
6244
6249
|
|
|
6245
6250
|
//#endregion
|
|
6246
6251
|
//#region src/modules/compiled-workflows/test-plan.ts
|
|
6247
|
-
const logger$
|
|
6252
|
+
const logger$13 = createLogger("compiled-workflows:test-plan");
|
|
6248
6253
|
/** Default timeout for test-plan dispatches in milliseconds (5 min — lightweight call) */
|
|
6249
6254
|
const DEFAULT_TIMEOUT_MS = 3e5;
|
|
6250
6255
|
/**
|
|
@@ -6256,12 +6261,12 @@ const DEFAULT_TIMEOUT_MS = 3e5;
|
|
|
6256
6261
|
*/
|
|
6257
6262
|
async function runTestPlan(deps, params) {
|
|
6258
6263
|
const { storyKey, storyFilePath, pipelineRunId } = params;
|
|
6259
|
-
logger$
|
|
6264
|
+
logger$13.info({
|
|
6260
6265
|
storyKey,
|
|
6261
6266
|
storyFilePath
|
|
6262
6267
|
}, "Starting compiled test-plan workflow");
|
|
6263
6268
|
const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("test-plan", deps.tokenCeilings);
|
|
6264
|
-
logger$
|
|
6269
|
+
logger$13.info({
|
|
6265
6270
|
workflow: "test-plan",
|
|
6266
6271
|
ceiling: TOKEN_CEILING,
|
|
6267
6272
|
source: tokenCeilingSource
|
|
@@ -6269,10 +6274,10 @@ async function runTestPlan(deps, params) {
|
|
|
6269
6274
|
let template;
|
|
6270
6275
|
try {
|
|
6271
6276
|
template = await deps.pack.getPrompt("test-plan");
|
|
6272
|
-
logger$
|
|
6277
|
+
logger$13.debug({ storyKey }, "Retrieved test-plan prompt template from pack");
|
|
6273
6278
|
} catch (err) {
|
|
6274
6279
|
const error = err instanceof Error ? err.message : String(err);
|
|
6275
|
-
logger$
|
|
6280
|
+
logger$13.warn({
|
|
6276
6281
|
storyKey,
|
|
6277
6282
|
error
|
|
6278
6283
|
}, "Failed to retrieve test-plan prompt template");
|
|
@@ -6283,14 +6288,14 @@ async function runTestPlan(deps, params) {
|
|
|
6283
6288
|
storyContent = await readFile$1(storyFilePath, "utf-8");
|
|
6284
6289
|
} catch (err) {
|
|
6285
6290
|
if (err.code === "ENOENT") {
|
|
6286
|
-
logger$
|
|
6291
|
+
logger$13.warn({
|
|
6287
6292
|
storyKey,
|
|
6288
6293
|
storyFilePath
|
|
6289
6294
|
}, "Story file not found for test planning");
|
|
6290
6295
|
return makeTestPlanFailureResult("story_file_not_found");
|
|
6291
6296
|
}
|
|
6292
6297
|
const error = err instanceof Error ? err.message : String(err);
|
|
6293
|
-
logger$
|
|
6298
|
+
logger$13.warn({
|
|
6294
6299
|
storyKey,
|
|
6295
6300
|
storyFilePath,
|
|
6296
6301
|
error
|
|
@@ -6307,7 +6312,7 @@ async function runTestPlan(deps, params) {
|
|
|
6307
6312
|
content: archConstraintsContent,
|
|
6308
6313
|
priority: "optional"
|
|
6309
6314
|
}], TOKEN_CEILING);
|
|
6310
|
-
logger$
|
|
6315
|
+
logger$13.info({
|
|
6311
6316
|
storyKey,
|
|
6312
6317
|
tokenCount,
|
|
6313
6318
|
ceiling: TOKEN_CEILING,
|
|
@@ -6321,12 +6326,13 @@ async function runTestPlan(deps, params) {
|
|
|
6321
6326
|
taskType: "test-plan",
|
|
6322
6327
|
timeout: DEFAULT_TIMEOUT_MS,
|
|
6323
6328
|
outputSchema: TestPlanResultSchema,
|
|
6324
|
-
...deps.projectRoot !== void 0 ? { workingDirectory: deps.projectRoot } : {}
|
|
6329
|
+
...deps.projectRoot !== void 0 ? { workingDirectory: deps.projectRoot } : {},
|
|
6330
|
+
...deps.otlpEndpoint !== void 0 ? { otlpEndpoint: deps.otlpEndpoint } : {}
|
|
6325
6331
|
});
|
|
6326
6332
|
dispatchResult = await handle.result;
|
|
6327
6333
|
} catch (err) {
|
|
6328
6334
|
const error = err instanceof Error ? err.message : String(err);
|
|
6329
|
-
logger$
|
|
6335
|
+
logger$13.warn({
|
|
6330
6336
|
storyKey,
|
|
6331
6337
|
error
|
|
6332
6338
|
}, "Test-plan dispatch threw an unexpected error");
|
|
@@ -6337,7 +6343,7 @@ async function runTestPlan(deps, params) {
|
|
|
6337
6343
|
output: dispatchResult.tokenEstimate.output
|
|
6338
6344
|
};
|
|
6339
6345
|
if (dispatchResult.status === "timeout") {
|
|
6340
|
-
logger$
|
|
6346
|
+
logger$13.warn({
|
|
6341
6347
|
storyKey,
|
|
6342
6348
|
durationMs: dispatchResult.durationMs
|
|
6343
6349
|
}, "Test-plan dispatch timed out");
|
|
@@ -6347,7 +6353,7 @@ async function runTestPlan(deps, params) {
|
|
|
6347
6353
|
};
|
|
6348
6354
|
}
|
|
6349
6355
|
if (dispatchResult.status === "failed" || dispatchResult.exitCode !== 0) {
|
|
6350
|
-
logger$
|
|
6356
|
+
logger$13.warn({
|
|
6351
6357
|
storyKey,
|
|
6352
6358
|
exitCode: dispatchResult.exitCode,
|
|
6353
6359
|
status: dispatchResult.status
|
|
@@ -6359,7 +6365,7 @@ async function runTestPlan(deps, params) {
|
|
|
6359
6365
|
}
|
|
6360
6366
|
if (dispatchResult.parseError !== null || dispatchResult.parsed === null) {
|
|
6361
6367
|
const details = dispatchResult.parseError ?? "parsed result was null";
|
|
6362
|
-
logger$
|
|
6368
|
+
logger$13.warn({
|
|
6363
6369
|
storyKey,
|
|
6364
6370
|
parseError: details
|
|
6365
6371
|
}, "Test-plan YAML schema validation failed");
|
|
@@ -6382,19 +6388,19 @@ async function runTestPlan(deps, params) {
|
|
|
6382
6388
|
}),
|
|
6383
6389
|
rationale: `Test plan for ${storyKey}: ${parsed.test_files.length} test files, categories: ${parsed.test_categories.join(", ")}`
|
|
6384
6390
|
});
|
|
6385
|
-
logger$
|
|
6391
|
+
logger$13.info({
|
|
6386
6392
|
storyKey,
|
|
6387
6393
|
fileCount: parsed.test_files.length,
|
|
6388
6394
|
categories: parsed.test_categories
|
|
6389
6395
|
}, "Test plan stored in decision store");
|
|
6390
6396
|
} catch (err) {
|
|
6391
6397
|
const error = err instanceof Error ? err.message : String(err);
|
|
6392
|
-
logger$
|
|
6398
|
+
logger$13.warn({
|
|
6393
6399
|
storyKey,
|
|
6394
6400
|
error
|
|
6395
6401
|
}, "Failed to store test plan in decision store — proceeding anyway");
|
|
6396
6402
|
}
|
|
6397
|
-
logger$
|
|
6403
|
+
logger$13.info({
|
|
6398
6404
|
storyKey,
|
|
6399
6405
|
result: parsed.result
|
|
6400
6406
|
}, "Test-plan workflow completed");
|
|
@@ -6434,14 +6440,14 @@ function getArchConstraints$1(deps) {
|
|
|
6434
6440
|
if (constraints.length === 0) return "";
|
|
6435
6441
|
return constraints.map((d) => `${d.key}: ${d.value}`).join("\n");
|
|
6436
6442
|
} catch (err) {
|
|
6437
|
-
logger$
|
|
6443
|
+
logger$13.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints for test-plan — proceeding without them");
|
|
6438
6444
|
return "";
|
|
6439
6445
|
}
|
|
6440
6446
|
}
|
|
6441
6447
|
|
|
6442
6448
|
//#endregion
|
|
6443
6449
|
//#region src/modules/compiled-workflows/test-expansion.ts
|
|
6444
|
-
const logger$
|
|
6450
|
+
const logger$12 = createLogger("compiled-workflows:test-expansion");
|
|
6445
6451
|
function defaultFallbackResult(error, tokenUsage) {
|
|
6446
6452
|
return {
|
|
6447
6453
|
expansion_priority: "low",
|
|
@@ -6471,14 +6477,14 @@ function defaultFallbackResult(error, tokenUsage) {
|
|
|
6471
6477
|
async function runTestExpansion(deps, params) {
|
|
6472
6478
|
const { storyKey, storyFilePath, pipelineRunId, filesModified, workingDirectory } = params;
|
|
6473
6479
|
const cwd = workingDirectory ?? process.cwd();
|
|
6474
|
-
logger$
|
|
6480
|
+
logger$12.debug({
|
|
6475
6481
|
storyKey,
|
|
6476
6482
|
storyFilePath,
|
|
6477
6483
|
cwd,
|
|
6478
6484
|
pipelineRunId
|
|
6479
6485
|
}, "Starting test-expansion workflow");
|
|
6480
6486
|
const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("test-expansion", deps.tokenCeilings);
|
|
6481
|
-
logger$
|
|
6487
|
+
logger$12.info({
|
|
6482
6488
|
workflow: "test-expansion",
|
|
6483
6489
|
ceiling: TOKEN_CEILING,
|
|
6484
6490
|
source: tokenCeilingSource
|
|
@@ -6488,7 +6494,7 @@ async function runTestExpansion(deps, params) {
|
|
|
6488
6494
|
template = await deps.pack.getPrompt("test-expansion");
|
|
6489
6495
|
} catch (err) {
|
|
6490
6496
|
const error = err instanceof Error ? err.message : String(err);
|
|
6491
|
-
logger$
|
|
6497
|
+
logger$12.warn({ error }, "Failed to retrieve test-expansion prompt template");
|
|
6492
6498
|
return defaultFallbackResult(`Failed to retrieve prompt template: ${error}`, {
|
|
6493
6499
|
input: 0,
|
|
6494
6500
|
output: 0
|
|
@@ -6499,7 +6505,7 @@ async function runTestExpansion(deps, params) {
|
|
|
6499
6505
|
storyContent = await readFile$1(storyFilePath, "utf-8");
|
|
6500
6506
|
} catch (err) {
|
|
6501
6507
|
const error = err instanceof Error ? err.message : String(err);
|
|
6502
|
-
logger$
|
|
6508
|
+
logger$12.warn({
|
|
6503
6509
|
storyFilePath,
|
|
6504
6510
|
error
|
|
6505
6511
|
}, "Failed to read story file");
|
|
@@ -6519,12 +6525,12 @@ async function runTestExpansion(deps, params) {
|
|
|
6519
6525
|
const scopedTotal = nonDiffTokens + countTokens(scopedDiff);
|
|
6520
6526
|
if (scopedTotal <= TOKEN_CEILING) {
|
|
6521
6527
|
gitDiffContent = scopedDiff;
|
|
6522
|
-
logger$
|
|
6528
|
+
logger$12.debug({
|
|
6523
6529
|
fileCount: filesModified.length,
|
|
6524
6530
|
tokenCount: scopedTotal
|
|
6525
6531
|
}, "Using scoped file diff");
|
|
6526
6532
|
} else {
|
|
6527
|
-
logger$
|
|
6533
|
+
logger$12.warn({
|
|
6528
6534
|
estimatedTotal: scopedTotal,
|
|
6529
6535
|
ceiling: TOKEN_CEILING,
|
|
6530
6536
|
fileCount: filesModified.length
|
|
@@ -6532,7 +6538,7 @@ async function runTestExpansion(deps, params) {
|
|
|
6532
6538
|
gitDiffContent = await getGitDiffStatSummary(cwd);
|
|
6533
6539
|
}
|
|
6534
6540
|
} catch (err) {
|
|
6535
|
-
logger$
|
|
6541
|
+
logger$12.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to get git diff — proceeding with empty diff");
|
|
6536
6542
|
}
|
|
6537
6543
|
const sections = [
|
|
6538
6544
|
{
|
|
@@ -6552,11 +6558,11 @@ async function runTestExpansion(deps, params) {
|
|
|
6552
6558
|
}
|
|
6553
6559
|
];
|
|
6554
6560
|
const assembleResult = assemblePrompt(template, sections, TOKEN_CEILING);
|
|
6555
|
-
if (assembleResult.truncated) logger$
|
|
6561
|
+
if (assembleResult.truncated) logger$12.warn({
|
|
6556
6562
|
storyKey,
|
|
6557
6563
|
tokenCount: assembleResult.tokenCount
|
|
6558
6564
|
}, "Test-expansion prompt truncated to fit token ceiling");
|
|
6559
|
-
logger$
|
|
6565
|
+
logger$12.debug({
|
|
6560
6566
|
storyKey,
|
|
6561
6567
|
tokenCount: assembleResult.tokenCount,
|
|
6562
6568
|
truncated: assembleResult.truncated
|
|
@@ -6567,14 +6573,15 @@ async function runTestExpansion(deps, params) {
|
|
|
6567
6573
|
agent: "claude-code",
|
|
6568
6574
|
taskType: "test-expansion",
|
|
6569
6575
|
outputSchema: TestExpansionResultSchema,
|
|
6570
|
-
workingDirectory: deps.projectRoot
|
|
6576
|
+
workingDirectory: deps.projectRoot,
|
|
6577
|
+
...deps.otlpEndpoint !== void 0 ? { otlpEndpoint: deps.otlpEndpoint } : {}
|
|
6571
6578
|
});
|
|
6572
6579
|
let dispatchResult;
|
|
6573
6580
|
try {
|
|
6574
6581
|
dispatchResult = await handle.result;
|
|
6575
6582
|
} catch (err) {
|
|
6576
6583
|
const error = err instanceof Error ? err.message : String(err);
|
|
6577
|
-
logger$
|
|
6584
|
+
logger$12.warn({
|
|
6578
6585
|
storyKey,
|
|
6579
6586
|
error
|
|
6580
6587
|
}, "Test-expansion dispatch threw unexpected error");
|
|
@@ -6589,19 +6596,19 @@ async function runTestExpansion(deps, params) {
|
|
|
6589
6596
|
};
|
|
6590
6597
|
if (dispatchResult.status === "failed") {
|
|
6591
6598
|
const errorMsg = `Dispatch status: failed. Exit code: ${dispatchResult.exitCode}. ${dispatchResult.parseError ?? ""}`.trim();
|
|
6592
|
-
logger$
|
|
6599
|
+
logger$12.warn({
|
|
6593
6600
|
storyKey,
|
|
6594
6601
|
exitCode: dispatchResult.exitCode
|
|
6595
6602
|
}, "Test-expansion dispatch failed");
|
|
6596
6603
|
return defaultFallbackResult(errorMsg, tokenUsage);
|
|
6597
6604
|
}
|
|
6598
6605
|
if (dispatchResult.status === "timeout") {
|
|
6599
|
-
logger$
|
|
6606
|
+
logger$12.warn({ storyKey }, "Test-expansion dispatch timed out");
|
|
6600
6607
|
return defaultFallbackResult("Dispatch status: timeout. The agent did not complete within the allowed time.", tokenUsage);
|
|
6601
6608
|
}
|
|
6602
6609
|
if (dispatchResult.parsed === null) {
|
|
6603
6610
|
const details = dispatchResult.parseError ?? "No YAML block found in output";
|
|
6604
|
-
logger$
|
|
6611
|
+
logger$12.warn({
|
|
6605
6612
|
storyKey,
|
|
6606
6613
|
details
|
|
6607
6614
|
}, "Test-expansion output has no parseable YAML");
|
|
@@ -6610,14 +6617,14 @@ async function runTestExpansion(deps, params) {
|
|
|
6610
6617
|
const parseResult = TestExpansionResultSchema.safeParse(dispatchResult.parsed);
|
|
6611
6618
|
if (!parseResult.success) {
|
|
6612
6619
|
const details = parseResult.error.message;
|
|
6613
|
-
logger$
|
|
6620
|
+
logger$12.warn({
|
|
6614
6621
|
storyKey,
|
|
6615
6622
|
details
|
|
6616
6623
|
}, "Test-expansion output failed schema validation");
|
|
6617
6624
|
return defaultFallbackResult(`schema_validation_failed: ${details}`, tokenUsage);
|
|
6618
6625
|
}
|
|
6619
6626
|
const parsed = parseResult.data;
|
|
6620
|
-
logger$
|
|
6627
|
+
logger$12.info({
|
|
6621
6628
|
storyKey,
|
|
6622
6629
|
expansion_priority: parsed.expansion_priority,
|
|
6623
6630
|
coverage_gaps: parsed.coverage_gaps.length,
|
|
@@ -6642,7 +6649,7 @@ function getArchConstraints(deps) {
|
|
|
6642
6649
|
if (constraints.length === 0) return "";
|
|
6643
6650
|
return constraints.map((d) => `${d.key}: ${d.value}`).join("\n");
|
|
6644
6651
|
} catch (err) {
|
|
6645
|
-
logger$
|
|
6652
|
+
logger$12.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
|
|
6646
6653
|
return "";
|
|
6647
6654
|
}
|
|
6648
6655
|
}
|
|
@@ -7766,7 +7773,8 @@ var DoltStateStore = class DoltStateStore {
|
|
|
7766
7773
|
const entries = [];
|
|
7767
7774
|
for (const row of rows) {
|
|
7768
7775
|
const hash = String(row.commit_hash ?? "");
|
|
7769
|
-
const
|
|
7776
|
+
const dateVal = row.date;
|
|
7777
|
+
const timestamp = dateVal instanceof Date ? dateVal.toISOString() : String(dateVal ?? "");
|
|
7770
7778
|
const message = String(row.message ?? "");
|
|
7771
7779
|
const author = row.committer ? String(row.committer) : void 0;
|
|
7772
7780
|
const storyKeyMatch = /story\/([0-9]+-[0-9]+)/i.exec(message);
|
|
@@ -7929,7 +7937,7 @@ function createDoltClient(options) {
|
|
|
7929
7937
|
|
|
7930
7938
|
//#endregion
|
|
7931
7939
|
//#region src/modules/state/index.ts
|
|
7932
|
-
const logger$
|
|
7940
|
+
const logger$11 = createLogger("state:factory");
|
|
7933
7941
|
/**
|
|
7934
7942
|
* Synchronously check whether Dolt is available and a Dolt repo exists at the
|
|
7935
7943
|
* canonical state path under `basePath`.
|
|
@@ -7976,14 +7984,14 @@ function createStateStore(config = {}) {
|
|
|
7976
7984
|
const repoPath = config.basePath ?? process.cwd();
|
|
7977
7985
|
const detection = detectDoltAvailableSync(repoPath);
|
|
7978
7986
|
if (detection.available) {
|
|
7979
|
-
logger$
|
|
7987
|
+
logger$11.debug(`Dolt detected, using DoltStateStore (state path: ${join$1(repoPath, ".substrate", "state")})`);
|
|
7980
7988
|
const client = new DoltClient({ repoPath });
|
|
7981
7989
|
return new DoltStateStore({
|
|
7982
7990
|
repoPath,
|
|
7983
7991
|
client
|
|
7984
7992
|
});
|
|
7985
7993
|
} else {
|
|
7986
|
-
logger$
|
|
7994
|
+
logger$11.debug(`Dolt not found, using FileStateStore (reason: ${detection.reason})`);
|
|
7987
7995
|
return new FileStateStore({ basePath: config.basePath });
|
|
7988
7996
|
}
|
|
7989
7997
|
}
|
|
@@ -7992,7 +8000,7 @@ function createStateStore(config = {}) {
|
|
|
7992
8000
|
|
|
7993
8001
|
//#endregion
|
|
7994
8002
|
//#region src/cli/commands/health.ts
|
|
7995
|
-
const logger$
|
|
8003
|
+
const logger$10 = createLogger("health-cmd");
|
|
7996
8004
|
/** Default stall threshold in seconds — also used by supervisor default */
|
|
7997
8005
|
const DEFAULT_STALL_THRESHOLD_SECONDS = 600;
|
|
7998
8006
|
/**
|
|
@@ -8294,7 +8302,7 @@ async function runHealthAction(options) {
|
|
|
8294
8302
|
const msg = err instanceof Error ? err.message : String(err);
|
|
8295
8303
|
if (outputFormat === "json") process.stdout.write(formatOutput(null, "json", false, msg) + "\n");
|
|
8296
8304
|
else process.stderr.write(`Error: ${msg}\n`);
|
|
8297
|
-
logger$
|
|
8305
|
+
logger$10.error({ err }, "health action failed");
|
|
8298
8306
|
return 1;
|
|
8299
8307
|
}
|
|
8300
8308
|
}
|
|
@@ -8341,7 +8349,7 @@ function registerHealthCommand(program, _version = "0.0.0", projectRoot = proces
|
|
|
8341
8349
|
|
|
8342
8350
|
//#endregion
|
|
8343
8351
|
//#region src/modules/implementation-orchestrator/seed-methodology-context.ts
|
|
8344
|
-
const logger$
|
|
8352
|
+
const logger$9 = createLogger("implementation-orchestrator:seed");
|
|
8345
8353
|
/** Max chars for the architecture summary seeded into decisions */
|
|
8346
8354
|
const MAX_ARCH_CHARS = 6e3;
|
|
8347
8355
|
/** Max chars per epic shard (fallback when per-story extraction returns null) */
|
|
@@ -8375,12 +8383,12 @@ function seedMethodologyContext(db, projectRoot) {
|
|
|
8375
8383
|
const testCount = seedTestPatterns(db, projectRoot);
|
|
8376
8384
|
if (testCount === -1) result.skippedCategories.push("test-patterns");
|
|
8377
8385
|
else result.decisionsCreated += testCount;
|
|
8378
|
-
logger$
|
|
8386
|
+
logger$9.info({
|
|
8379
8387
|
decisionsCreated: result.decisionsCreated,
|
|
8380
8388
|
skippedCategories: result.skippedCategories
|
|
8381
8389
|
}, "Methodology context seeding complete");
|
|
8382
8390
|
} catch (err) {
|
|
8383
|
-
logger$
|
|
8391
|
+
logger$9.warn({ error: err instanceof Error ? err.message : String(err) }, "Methodology context seeding failed (non-fatal)");
|
|
8384
8392
|
}
|
|
8385
8393
|
return result;
|
|
8386
8394
|
}
|
|
@@ -8424,7 +8432,7 @@ function seedArchitecture(db, projectRoot) {
|
|
|
8424
8432
|
});
|
|
8425
8433
|
count = 1;
|
|
8426
8434
|
}
|
|
8427
|
-
logger$
|
|
8435
|
+
logger$9.debug({ count }, "Seeded architecture decisions");
|
|
8428
8436
|
return count;
|
|
8429
8437
|
}
|
|
8430
8438
|
/**
|
|
@@ -8448,11 +8456,11 @@ function seedEpicShards(db, projectRoot) {
|
|
|
8448
8456
|
const storedHashDecision = implementationDecisions.find((d) => d.category === "epic-shard-hash" && d.key === "epics-file");
|
|
8449
8457
|
const storedHash = storedHashDecision?.value;
|
|
8450
8458
|
if (storedHash === currentHash) {
|
|
8451
|
-
logger$
|
|
8459
|
+
logger$9.debug({ hash: currentHash }, "Epic shards up-to-date (hash unchanged) — skipping re-seed");
|
|
8452
8460
|
return -1;
|
|
8453
8461
|
}
|
|
8454
8462
|
if (implementationDecisions.some((d) => d.category === "epic-shard")) {
|
|
8455
|
-
logger$
|
|
8463
|
+
logger$9.debug({
|
|
8456
8464
|
storedHash,
|
|
8457
8465
|
currentHash
|
|
8458
8466
|
}, "Epics file changed — deleting stale epic-shard decisions");
|
|
@@ -8480,7 +8488,7 @@ function seedEpicShards(db, projectRoot) {
|
|
|
8480
8488
|
value: currentHash,
|
|
8481
8489
|
rationale: "SHA-256 hash of epics file content for change detection"
|
|
8482
8490
|
});
|
|
8483
|
-
logger$
|
|
8491
|
+
logger$9.debug({
|
|
8484
8492
|
count,
|
|
8485
8493
|
hash: currentHash
|
|
8486
8494
|
}, "Seeded epic shard decisions");
|
|
@@ -8504,7 +8512,7 @@ function seedTestPatterns(db, projectRoot) {
|
|
|
8504
8512
|
value: patterns.slice(0, MAX_TEST_PATTERNS_CHARS),
|
|
8505
8513
|
rationale: "Detected from project configuration at orchestrator startup"
|
|
8506
8514
|
});
|
|
8507
|
-
logger$
|
|
8515
|
+
logger$9.debug("Seeded test patterns decision");
|
|
8508
8516
|
return 1;
|
|
8509
8517
|
}
|
|
8510
8518
|
/**
|
|
@@ -8677,7 +8685,7 @@ function findArtifact(projectRoot, candidates) {
|
|
|
8677
8685
|
|
|
8678
8686
|
//#endregion
|
|
8679
8687
|
//#region src/modules/agent-dispatch/interface-change-detector.ts
|
|
8680
|
-
const logger$
|
|
8688
|
+
const logger$8 = createLogger("interface-change-detector");
|
|
8681
8689
|
/**
|
|
8682
8690
|
* Extract exported interface and type names from TypeScript source content.
|
|
8683
8691
|
*
|
|
@@ -8724,7 +8732,7 @@ function detectInterfaceChanges(options) {
|
|
|
8724
8732
|
for (const name of names) allNames.add(name);
|
|
8725
8733
|
sourceDirs.push(dirname$1(relPath));
|
|
8726
8734
|
} catch {
|
|
8727
|
-
logger$
|
|
8735
|
+
logger$8.debug({
|
|
8728
8736
|
absPath,
|
|
8729
8737
|
storyKey
|
|
8730
8738
|
}, "Could not read modified file for interface extraction");
|
|
@@ -8765,7 +8773,7 @@ function detectInterfaceChanges(options) {
|
|
|
8765
8773
|
potentiallyAffectedTests: Array.from(affectedTests)
|
|
8766
8774
|
};
|
|
8767
8775
|
} catch (err) {
|
|
8768
|
-
logger$
|
|
8776
|
+
logger$8.warn({
|
|
8769
8777
|
err,
|
|
8770
8778
|
storyKey: options.storyKey
|
|
8771
8779
|
}, "Interface change detection failed — skipping");
|
|
@@ -8933,6 +8941,1159 @@ function verifyContracts(declarations, projectRoot) {
|
|
|
8933
8941
|
return mismatches;
|
|
8934
8942
|
}
|
|
8935
8943
|
|
|
8944
|
+
//#endregion
|
|
8945
|
+
//#region src/modules/telemetry/types.ts
|
|
8946
|
+
const ChildSpanSummarySchema = z.object({
|
|
8947
|
+
spanId: z.string(),
|
|
8948
|
+
name: z.string(),
|
|
8949
|
+
toolName: z.string().optional(),
|
|
8950
|
+
inputTokens: z.number(),
|
|
8951
|
+
outputTokens: z.number(),
|
|
8952
|
+
durationMs: z.number()
|
|
8953
|
+
});
|
|
8954
|
+
const TurnAnalysisSchema = z.object({
|
|
8955
|
+
spanId: z.string(),
|
|
8956
|
+
turnNumber: z.number().int().positive(),
|
|
8957
|
+
name: z.string(),
|
|
8958
|
+
timestamp: z.number(),
|
|
8959
|
+
source: z.string(),
|
|
8960
|
+
model: z.string().optional(),
|
|
8961
|
+
inputTokens: z.number(),
|
|
8962
|
+
outputTokens: z.number(),
|
|
8963
|
+
cacheReadTokens: z.number(),
|
|
8964
|
+
freshTokens: z.number(),
|
|
8965
|
+
cacheHitRate: z.number(),
|
|
8966
|
+
costUsd: z.number(),
|
|
8967
|
+
durationMs: z.number(),
|
|
8968
|
+
contextSize: z.number(),
|
|
8969
|
+
contextDelta: z.number(),
|
|
8970
|
+
toolName: z.string().optional(),
|
|
8971
|
+
isContextSpike: z.boolean(),
|
|
8972
|
+
childSpans: z.array(ChildSpanSummarySchema)
|
|
8973
|
+
});
|
|
8974
|
+
const SemanticCategorySchema = z.enum([
|
|
8975
|
+
"tool_outputs",
|
|
8976
|
+
"file_reads",
|
|
8977
|
+
"system_prompts",
|
|
8978
|
+
"conversation_history",
|
|
8979
|
+
"user_prompts",
|
|
8980
|
+
"other"
|
|
8981
|
+
]);
|
|
8982
|
+
const TrendSchema = z.enum([
|
|
8983
|
+
"growing",
|
|
8984
|
+
"stable",
|
|
8985
|
+
"shrinking"
|
|
8986
|
+
]);
|
|
8987
|
+
const TopInvocationSchema = z.object({
|
|
8988
|
+
spanId: z.string(),
|
|
8989
|
+
name: z.string(),
|
|
8990
|
+
toolName: z.string().optional(),
|
|
8991
|
+
totalTokens: z.number(),
|
|
8992
|
+
inputTokens: z.number(),
|
|
8993
|
+
outputTokens: z.number()
|
|
8994
|
+
});
|
|
8995
|
+
const CategoryStatsSchema = z.object({
|
|
8996
|
+
category: SemanticCategorySchema,
|
|
8997
|
+
totalTokens: z.number(),
|
|
8998
|
+
percentage: z.number(),
|
|
8999
|
+
eventCount: z.number(),
|
|
9000
|
+
avgTokensPerEvent: z.number(),
|
|
9001
|
+
trend: TrendSchema
|
|
9002
|
+
});
|
|
9003
|
+
const ConsumerStatsSchema = z.object({
|
|
9004
|
+
consumerKey: z.string(),
|
|
9005
|
+
category: SemanticCategorySchema,
|
|
9006
|
+
totalTokens: z.number(),
|
|
9007
|
+
percentage: z.number(),
|
|
9008
|
+
eventCount: z.number(),
|
|
9009
|
+
topInvocations: z.array(TopInvocationSchema).max(20)
|
|
9010
|
+
});
|
|
9011
|
+
const ModelEfficiencySchema = z.object({
|
|
9012
|
+
model: z.string(),
|
|
9013
|
+
cacheHitRate: z.number(),
|
|
9014
|
+
avgIoRatio: z.number(),
|
|
9015
|
+
costPer1KOutputTokens: z.number()
|
|
9016
|
+
});
|
|
9017
|
+
const SourceEfficiencySchema = z.object({
|
|
9018
|
+
source: z.string(),
|
|
9019
|
+
compositeScore: z.number(),
|
|
9020
|
+
turnCount: z.number()
|
|
9021
|
+
});
|
|
9022
|
+
const EfficiencyScoreSchema = z.object({
|
|
9023
|
+
storyKey: z.string(),
|
|
9024
|
+
timestamp: z.number(),
|
|
9025
|
+
compositeScore: z.number().int().min(0).max(100),
|
|
9026
|
+
cacheHitSubScore: z.number().min(0).max(100),
|
|
9027
|
+
ioRatioSubScore: z.number().min(0).max(100),
|
|
9028
|
+
contextManagementSubScore: z.number().min(0).max(100),
|
|
9029
|
+
avgCacheHitRate: z.number(),
|
|
9030
|
+
avgIoRatio: z.number(),
|
|
9031
|
+
contextSpikeCount: z.number().int().nonnegative(),
|
|
9032
|
+
totalTurns: z.number().int().nonnegative(),
|
|
9033
|
+
perModelBreakdown: z.array(ModelEfficiencySchema),
|
|
9034
|
+
perSourceBreakdown: z.array(SourceEfficiencySchema)
|
|
9035
|
+
});
|
|
9036
|
+
const RuleIdSchema = z.enum([
|
|
9037
|
+
"biggest_consumers",
|
|
9038
|
+
"large_file_reads",
|
|
9039
|
+
"expensive_bash",
|
|
9040
|
+
"repeated_tool_calls",
|
|
9041
|
+
"context_growth_spike",
|
|
9042
|
+
"growing_categories",
|
|
9043
|
+
"cache_efficiency",
|
|
9044
|
+
"per_model_comparison"
|
|
9045
|
+
]);
|
|
9046
|
+
const RecommendationSeveritySchema = z.enum([
|
|
9047
|
+
"critical",
|
|
9048
|
+
"warning",
|
|
9049
|
+
"info"
|
|
9050
|
+
]);
|
|
9051
|
+
const RecommendationSchema = z.object({
|
|
9052
|
+
id: z.string().length(16),
|
|
9053
|
+
storyKey: z.string(),
|
|
9054
|
+
sprintId: z.string().optional(),
|
|
9055
|
+
ruleId: RuleIdSchema,
|
|
9056
|
+
severity: RecommendationSeveritySchema,
|
|
9057
|
+
title: z.string(),
|
|
9058
|
+
description: z.string(),
|
|
9059
|
+
potentialSavingsTokens: z.number().optional(),
|
|
9060
|
+
potentialSavingsUsd: z.number().optional(),
|
|
9061
|
+
actionTarget: z.string().optional(),
|
|
9062
|
+
generatedAt: z.string()
|
|
9063
|
+
});
|
|
9064
|
+
|
|
9065
|
+
//#endregion
|
|
9066
|
+
//#region src/modules/telemetry/persistence.ts
|
|
9067
|
+
const logger$7 = createLogger("telemetry:persistence");
|
|
9068
|
+
/**
|
|
9069
|
+
* Concrete SQLite-backed telemetry persistence.
|
|
9070
|
+
*
|
|
9071
|
+
* All prepared statements are compiled once at construction time.
|
|
9072
|
+
* Call `initSchema()` before using if the tables may not exist yet.
|
|
9073
|
+
*/
|
|
9074
|
+
var TelemetryPersistence = class {
|
|
9075
|
+
_db;
|
|
9076
|
+
_insertTurnAnalysis;
|
|
9077
|
+
_getTurnAnalysis;
|
|
9078
|
+
_insertEfficiencyScore;
|
|
9079
|
+
_getEfficiencyScore;
|
|
9080
|
+
_getEfficiencyScores;
|
|
9081
|
+
_insertRecommendation;
|
|
9082
|
+
_getRecommendations;
|
|
9083
|
+
_getAllRecommendations;
|
|
9084
|
+
_insertCategoryStats;
|
|
9085
|
+
_getCategoryStats;
|
|
9086
|
+
_getAllCategoryStats;
|
|
9087
|
+
_insertConsumerStats;
|
|
9088
|
+
_getConsumerStats;
|
|
9089
|
+
constructor(db) {
|
|
9090
|
+
this._db = db;
|
|
9091
|
+
this._insertTurnAnalysis = this._db.prepare(`
|
|
9092
|
+
INSERT OR REPLACE INTO turn_analysis (
|
|
9093
|
+
story_key, span_id, turn_number, name, timestamp, source, model,
|
|
9094
|
+
input_tokens, output_tokens, cache_read_tokens, fresh_tokens,
|
|
9095
|
+
cache_hit_rate, cost_usd, duration_ms, context_size, context_delta,
|
|
9096
|
+
tool_name, is_context_spike, child_spans_json
|
|
9097
|
+
) VALUES (
|
|
9098
|
+
?, ?, ?, ?, ?, ?, ?,
|
|
9099
|
+
?, ?, ?, ?,
|
|
9100
|
+
?, ?, ?, ?, ?,
|
|
9101
|
+
?, ?, ?
|
|
9102
|
+
)
|
|
9103
|
+
`);
|
|
9104
|
+
this._getTurnAnalysis = this._db.prepare(`
|
|
9105
|
+
SELECT * FROM turn_analysis
|
|
9106
|
+
WHERE story_key = ?
|
|
9107
|
+
ORDER BY turn_number ASC
|
|
9108
|
+
`);
|
|
9109
|
+
this._insertEfficiencyScore = this._db.prepare(`
|
|
9110
|
+
INSERT OR REPLACE INTO efficiency_scores (
|
|
9111
|
+
story_key, timestamp, composite_score,
|
|
9112
|
+
cache_hit_sub_score, io_ratio_sub_score, context_management_sub_score,
|
|
9113
|
+
avg_cache_hit_rate, avg_io_ratio, context_spike_count, total_turns,
|
|
9114
|
+
per_model_json, per_source_json
|
|
9115
|
+
) VALUES (
|
|
9116
|
+
?, ?, ?,
|
|
9117
|
+
?, ?, ?,
|
|
9118
|
+
?, ?, ?, ?,
|
|
9119
|
+
?, ?
|
|
9120
|
+
)
|
|
9121
|
+
`);
|
|
9122
|
+
this._getEfficiencyScore = this._db.prepare(`
|
|
9123
|
+
SELECT * FROM efficiency_scores
|
|
9124
|
+
WHERE story_key = ?
|
|
9125
|
+
ORDER BY timestamp DESC
|
|
9126
|
+
LIMIT 1
|
|
9127
|
+
`);
|
|
9128
|
+
this._getEfficiencyScores = this._db.prepare(`
|
|
9129
|
+
SELECT * FROM efficiency_scores
|
|
9130
|
+
ORDER BY timestamp DESC
|
|
9131
|
+
LIMIT ?
|
|
9132
|
+
`);
|
|
9133
|
+
this._insertRecommendation = this._db.prepare(`
|
|
9134
|
+
INSERT OR REPLACE INTO recommendations (
|
|
9135
|
+
id, story_key, sprint_id, rule_id, severity, title, description,
|
|
9136
|
+
potential_savings_tokens, potential_savings_usd, action_target, generated_at
|
|
9137
|
+
) VALUES (
|
|
9138
|
+
?, ?, ?, ?, ?, ?, ?,
|
|
9139
|
+
?, ?, ?, ?
|
|
9140
|
+
)
|
|
9141
|
+
`);
|
|
9142
|
+
this._getRecommendations = this._db.prepare(`
|
|
9143
|
+
SELECT * FROM recommendations
|
|
9144
|
+
WHERE story_key = ?
|
|
9145
|
+
ORDER BY
|
|
9146
|
+
CASE severity
|
|
9147
|
+
WHEN 'critical' THEN 1
|
|
9148
|
+
WHEN 'warning' THEN 2
|
|
9149
|
+
ELSE 3
|
|
9150
|
+
END,
|
|
9151
|
+
COALESCE(potential_savings_tokens, 0) DESC
|
|
9152
|
+
`);
|
|
9153
|
+
this._getAllRecommendations = this._db.prepare(`
|
|
9154
|
+
SELECT * FROM recommendations
|
|
9155
|
+
ORDER BY
|
|
9156
|
+
CASE severity
|
|
9157
|
+
WHEN 'critical' THEN 1
|
|
9158
|
+
WHEN 'warning' THEN 2
|
|
9159
|
+
ELSE 3
|
|
9160
|
+
END,
|
|
9161
|
+
COALESCE(potential_savings_tokens, 0) DESC
|
|
9162
|
+
LIMIT ?
|
|
9163
|
+
`);
|
|
9164
|
+
this._insertCategoryStats = this._db.prepare(`
|
|
9165
|
+
INSERT OR IGNORE INTO category_stats (
|
|
9166
|
+
story_key, category, total_tokens, percentage, event_count,
|
|
9167
|
+
avg_tokens_per_event, trend
|
|
9168
|
+
) VALUES (?, ?, ?, ?, ?, ?, ?)
|
|
9169
|
+
`);
|
|
9170
|
+
this._getCategoryStats = this._db.prepare(`
|
|
9171
|
+
SELECT * FROM category_stats
|
|
9172
|
+
WHERE story_key = ?
|
|
9173
|
+
ORDER BY total_tokens DESC
|
|
9174
|
+
`);
|
|
9175
|
+
this._getAllCategoryStats = this._db.prepare(`
|
|
9176
|
+
SELECT category, SUM(total_tokens) AS total_tokens,
|
|
9177
|
+
AVG(percentage) AS percentage,
|
|
9178
|
+
SUM(event_count) AS event_count,
|
|
9179
|
+
AVG(avg_tokens_per_event) AS avg_tokens_per_event,
|
|
9180
|
+
MAX(trend) AS trend
|
|
9181
|
+
FROM category_stats
|
|
9182
|
+
GROUP BY category
|
|
9183
|
+
ORDER BY total_tokens DESC
|
|
9184
|
+
`);
|
|
9185
|
+
this._insertConsumerStats = this._db.prepare(`
|
|
9186
|
+
INSERT OR IGNORE INTO consumer_stats (
|
|
9187
|
+
story_key, consumer_key, category, total_tokens, percentage,
|
|
9188
|
+
event_count, top_invocations_json
|
|
9189
|
+
) VALUES (?, ?, ?, ?, ?, ?, ?)
|
|
9190
|
+
`);
|
|
9191
|
+
this._getConsumerStats = this._db.prepare(`
|
|
9192
|
+
SELECT * FROM consumer_stats
|
|
9193
|
+
WHERE story_key = ?
|
|
9194
|
+
ORDER BY total_tokens DESC
|
|
9195
|
+
`);
|
|
9196
|
+
}
|
|
9197
|
+
/**
|
|
9198
|
+
* Apply the telemetry schema DDL to the database.
|
|
9199
|
+
* Idempotent — uses CREATE TABLE IF NOT EXISTS.
|
|
9200
|
+
*/
|
|
9201
|
+
initSchema() {
|
|
9202
|
+
this._db.exec(`
|
|
9203
|
+
CREATE TABLE IF NOT EXISTS turn_analysis (
|
|
9204
|
+
story_key VARCHAR(64) NOT NULL,
|
|
9205
|
+
span_id VARCHAR(128) NOT NULL,
|
|
9206
|
+
turn_number INTEGER NOT NULL,
|
|
9207
|
+
name VARCHAR(255) NOT NULL DEFAULT '',
|
|
9208
|
+
timestamp BIGINT NOT NULL DEFAULT 0,
|
|
9209
|
+
source VARCHAR(32) NOT NULL DEFAULT '',
|
|
9210
|
+
model VARCHAR(64),
|
|
9211
|
+
input_tokens INTEGER NOT NULL DEFAULT 0,
|
|
9212
|
+
output_tokens INTEGER NOT NULL DEFAULT 0,
|
|
9213
|
+
cache_read_tokens INTEGER NOT NULL DEFAULT 0,
|
|
9214
|
+
fresh_tokens INTEGER NOT NULL DEFAULT 0,
|
|
9215
|
+
cache_hit_rate DOUBLE NOT NULL DEFAULT 0,
|
|
9216
|
+
cost_usd DOUBLE NOT NULL DEFAULT 0,
|
|
9217
|
+
duration_ms INTEGER NOT NULL DEFAULT 0,
|
|
9218
|
+
context_size INTEGER NOT NULL DEFAULT 0,
|
|
9219
|
+
context_delta INTEGER NOT NULL DEFAULT 0,
|
|
9220
|
+
tool_name VARCHAR(128),
|
|
9221
|
+
is_context_spike BOOLEAN NOT NULL DEFAULT 0,
|
|
9222
|
+
child_spans_json TEXT NOT NULL DEFAULT '[]',
|
|
9223
|
+
PRIMARY KEY (story_key, span_id)
|
|
9224
|
+
);
|
|
9225
|
+
|
|
9226
|
+
CREATE INDEX IF NOT EXISTS idx_turn_analysis_story
|
|
9227
|
+
ON turn_analysis (story_key, turn_number);
|
|
9228
|
+
|
|
9229
|
+
CREATE TABLE IF NOT EXISTS efficiency_scores (
|
|
9230
|
+
story_key VARCHAR(64) NOT NULL,
|
|
9231
|
+
timestamp BIGINT NOT NULL,
|
|
9232
|
+
composite_score INTEGER NOT NULL DEFAULT 0,
|
|
9233
|
+
cache_hit_sub_score DOUBLE NOT NULL DEFAULT 0,
|
|
9234
|
+
io_ratio_sub_score DOUBLE NOT NULL DEFAULT 0,
|
|
9235
|
+
context_management_sub_score DOUBLE NOT NULL DEFAULT 0,
|
|
9236
|
+
avg_cache_hit_rate DOUBLE NOT NULL DEFAULT 0,
|
|
9237
|
+
avg_io_ratio DOUBLE NOT NULL DEFAULT 0,
|
|
9238
|
+
context_spike_count INTEGER NOT NULL DEFAULT 0,
|
|
9239
|
+
total_turns INTEGER NOT NULL DEFAULT 0,
|
|
9240
|
+
per_model_json TEXT NOT NULL DEFAULT '[]',
|
|
9241
|
+
per_source_json TEXT NOT NULL DEFAULT '[]',
|
|
9242
|
+
PRIMARY KEY (story_key, timestamp)
|
|
9243
|
+
);
|
|
9244
|
+
|
|
9245
|
+
CREATE INDEX IF NOT EXISTS idx_efficiency_story
|
|
9246
|
+
ON efficiency_scores (story_key, timestamp DESC);
|
|
9247
|
+
|
|
9248
|
+
CREATE TABLE IF NOT EXISTS recommendations (
|
|
9249
|
+
id VARCHAR(16) NOT NULL,
|
|
9250
|
+
story_key VARCHAR(64) NOT NULL,
|
|
9251
|
+
sprint_id VARCHAR(64),
|
|
9252
|
+
rule_id VARCHAR(64) NOT NULL,
|
|
9253
|
+
severity VARCHAR(16) NOT NULL,
|
|
9254
|
+
title TEXT NOT NULL,
|
|
9255
|
+
description TEXT NOT NULL,
|
|
9256
|
+
potential_savings_tokens INTEGER,
|
|
9257
|
+
potential_savings_usd DOUBLE,
|
|
9258
|
+
action_target TEXT,
|
|
9259
|
+
generated_at VARCHAR(32) NOT NULL,
|
|
9260
|
+
PRIMARY KEY (id)
|
|
9261
|
+
);
|
|
9262
|
+
|
|
9263
|
+
CREATE INDEX IF NOT EXISTS idx_recommendations_story
|
|
9264
|
+
ON recommendations (story_key, severity);
|
|
9265
|
+
|
|
9266
|
+
CREATE TABLE IF NOT EXISTS category_stats (
|
|
9267
|
+
story_key VARCHAR(100) NOT NULL,
|
|
9268
|
+
category VARCHAR(30) NOT NULL,
|
|
9269
|
+
total_tokens BIGINT NOT NULL DEFAULT 0,
|
|
9270
|
+
percentage DECIMAL(6,3) NOT NULL DEFAULT 0,
|
|
9271
|
+
event_count INTEGER NOT NULL DEFAULT 0,
|
|
9272
|
+
avg_tokens_per_event DECIMAL(12,2) NOT NULL DEFAULT 0,
|
|
9273
|
+
trend VARCHAR(10) NOT NULL DEFAULT 'stable',
|
|
9274
|
+
PRIMARY KEY (story_key, category)
|
|
9275
|
+
);
|
|
9276
|
+
|
|
9277
|
+
CREATE INDEX IF NOT EXISTS idx_category_stats_story
|
|
9278
|
+
ON category_stats (story_key, total_tokens);
|
|
9279
|
+
|
|
9280
|
+
CREATE TABLE IF NOT EXISTS consumer_stats (
|
|
9281
|
+
story_key VARCHAR(100) NOT NULL,
|
|
9282
|
+
consumer_key VARCHAR(300) NOT NULL,
|
|
9283
|
+
category VARCHAR(30) NOT NULL,
|
|
9284
|
+
total_tokens BIGINT NOT NULL DEFAULT 0,
|
|
9285
|
+
percentage DECIMAL(6,3) NOT NULL DEFAULT 0,
|
|
9286
|
+
event_count INTEGER NOT NULL DEFAULT 0,
|
|
9287
|
+
top_invocations_json TEXT,
|
|
9288
|
+
PRIMARY KEY (story_key, consumer_key)
|
|
9289
|
+
);
|
|
9290
|
+
|
|
9291
|
+
CREATE INDEX IF NOT EXISTS idx_consumer_stats_story
|
|
9292
|
+
ON consumer_stats (story_key, total_tokens);
|
|
9293
|
+
`);
|
|
9294
|
+
}
|
|
9295
|
+
async storeTurnAnalysis(storyKey, turns) {
|
|
9296
|
+
if (turns.length === 0) return;
|
|
9297
|
+
const insertAll = this._db.transaction((rows) => {
|
|
9298
|
+
for (const turn of rows) this._insertTurnAnalysis.run(storyKey, turn.spanId, turn.turnNumber, turn.name, turn.timestamp, turn.source, turn.model ?? null, turn.inputTokens, turn.outputTokens, turn.cacheReadTokens, turn.freshTokens, turn.cacheHitRate, turn.costUsd, turn.durationMs, turn.contextSize, turn.contextDelta, turn.toolName ?? null, turn.isContextSpike ? 1 : 0, JSON.stringify(turn.childSpans));
|
|
9299
|
+
});
|
|
9300
|
+
insertAll(turns);
|
|
9301
|
+
logger$7.debug({
|
|
9302
|
+
storyKey,
|
|
9303
|
+
count: turns.length
|
|
9304
|
+
}, "Stored turn analysis");
|
|
9305
|
+
}
|
|
9306
|
+
async getTurnAnalysis(storyKey) {
|
|
9307
|
+
const rows = this._getTurnAnalysis.all(storyKey);
|
|
9308
|
+
if (rows.length === 0) return [];
|
|
9309
|
+
return rows.map((row) => {
|
|
9310
|
+
const raw = {
|
|
9311
|
+
spanId: row.span_id,
|
|
9312
|
+
turnNumber: row.turn_number,
|
|
9313
|
+
name: row.name,
|
|
9314
|
+
timestamp: row.timestamp,
|
|
9315
|
+
source: row.source,
|
|
9316
|
+
model: row.model ?? void 0,
|
|
9317
|
+
inputTokens: row.input_tokens,
|
|
9318
|
+
outputTokens: row.output_tokens,
|
|
9319
|
+
cacheReadTokens: row.cache_read_tokens,
|
|
9320
|
+
freshTokens: row.fresh_tokens,
|
|
9321
|
+
cacheHitRate: row.cache_hit_rate,
|
|
9322
|
+
costUsd: row.cost_usd,
|
|
9323
|
+
durationMs: row.duration_ms,
|
|
9324
|
+
contextSize: row.context_size,
|
|
9325
|
+
contextDelta: row.context_delta,
|
|
9326
|
+
toolName: row.tool_name ?? void 0,
|
|
9327
|
+
isContextSpike: row.is_context_spike === 1,
|
|
9328
|
+
childSpans: JSON.parse(row.child_spans_json)
|
|
9329
|
+
};
|
|
9330
|
+
return TurnAnalysisSchema.parse(raw);
|
|
9331
|
+
});
|
|
9332
|
+
}
|
|
9333
|
+
async storeEfficiencyScore(score) {
|
|
9334
|
+
this._insertEfficiencyScore.run(score.storyKey, score.timestamp, score.compositeScore, score.cacheHitSubScore, score.ioRatioSubScore, score.contextManagementSubScore, score.avgCacheHitRate, score.avgIoRatio, score.contextSpikeCount, score.totalTurns, JSON.stringify(score.perModelBreakdown), JSON.stringify(score.perSourceBreakdown));
|
|
9335
|
+
logger$7.debug({
|
|
9336
|
+
storyKey: score.storyKey,
|
|
9337
|
+
compositeScore: score.compositeScore
|
|
9338
|
+
}, "Stored efficiency score");
|
|
9339
|
+
}
|
|
9340
|
+
async getEfficiencyScore(storyKey) {
|
|
9341
|
+
const row = this._getEfficiencyScore.get(storyKey);
|
|
9342
|
+
if (row === void 0) return null;
|
|
9343
|
+
const raw = {
|
|
9344
|
+
storyKey: row.story_key,
|
|
9345
|
+
timestamp: row.timestamp,
|
|
9346
|
+
compositeScore: row.composite_score,
|
|
9347
|
+
cacheHitSubScore: row.cache_hit_sub_score,
|
|
9348
|
+
ioRatioSubScore: row.io_ratio_sub_score,
|
|
9349
|
+
contextManagementSubScore: row.context_management_sub_score,
|
|
9350
|
+
avgCacheHitRate: row.avg_cache_hit_rate,
|
|
9351
|
+
avgIoRatio: row.avg_io_ratio,
|
|
9352
|
+
contextSpikeCount: row.context_spike_count,
|
|
9353
|
+
totalTurns: row.total_turns,
|
|
9354
|
+
perModelBreakdown: JSON.parse(row.per_model_json),
|
|
9355
|
+
perSourceBreakdown: JSON.parse(row.per_source_json)
|
|
9356
|
+
};
|
|
9357
|
+
return EfficiencyScoreSchema.parse(raw);
|
|
9358
|
+
}
|
|
9359
|
+
/**
|
|
9360
|
+
* Retrieve multiple efficiency scores ordered by timestamp DESC.
|
|
9361
|
+
* Returns up to `limit` records (default 20).
|
|
9362
|
+
*/
|
|
9363
|
+
async getEfficiencyScores(limit = 20) {
|
|
9364
|
+
const rows = this._getEfficiencyScores.all(limit);
|
|
9365
|
+
if (rows.length === 0) return [];
|
|
9366
|
+
return rows.map((row) => {
|
|
9367
|
+
const raw = {
|
|
9368
|
+
storyKey: row.story_key,
|
|
9369
|
+
timestamp: row.timestamp,
|
|
9370
|
+
compositeScore: row.composite_score,
|
|
9371
|
+
cacheHitSubScore: row.cache_hit_sub_score,
|
|
9372
|
+
ioRatioSubScore: row.io_ratio_sub_score,
|
|
9373
|
+
contextManagementSubScore: row.context_management_sub_score,
|
|
9374
|
+
avgCacheHitRate: row.avg_cache_hit_rate,
|
|
9375
|
+
avgIoRatio: row.avg_io_ratio,
|
|
9376
|
+
contextSpikeCount: row.context_spike_count,
|
|
9377
|
+
totalTurns: row.total_turns,
|
|
9378
|
+
perModelBreakdown: JSON.parse(row.per_model_json),
|
|
9379
|
+
perSourceBreakdown: JSON.parse(row.per_source_json)
|
|
9380
|
+
};
|
|
9381
|
+
return EfficiencyScoreSchema.parse(raw);
|
|
9382
|
+
});
|
|
9383
|
+
}
|
|
9384
|
+
/**
|
|
9385
|
+
* Batch-insert all recommendations for a story in a single transaction.
|
|
9386
|
+
* Uses INSERT OR REPLACE for idempotency (IDs are deterministic hashes).
|
|
9387
|
+
*/
|
|
9388
|
+
async saveRecommendations(storyKey, recs) {
|
|
9389
|
+
if (recs.length === 0) return;
|
|
9390
|
+
const insertAll = this._db.transaction((rows) => {
|
|
9391
|
+
for (const rec of rows) this._insertRecommendation.run(rec.id, rec.storyKey, rec.sprintId ?? null, rec.ruleId, rec.severity, rec.title, rec.description, rec.potentialSavingsTokens ?? null, rec.potentialSavingsUsd ?? null, rec.actionTarget ?? null, rec.generatedAt);
|
|
9392
|
+
});
|
|
9393
|
+
insertAll(recs);
|
|
9394
|
+
logger$7.debug({
|
|
9395
|
+
storyKey,
|
|
9396
|
+
count: recs.length
|
|
9397
|
+
}, "Saved recommendations");
|
|
9398
|
+
}
|
|
9399
|
+
/**
|
|
9400
|
+
* Retrieve recommendations for a story ordered by severity (critical first)
|
|
9401
|
+
* then by potentialSavingsTokens descending.
|
|
9402
|
+
* Each row is validated with RecommendationSchema.parse().
|
|
9403
|
+
*/
|
|
9404
|
+
async getRecommendations(storyKey) {
|
|
9405
|
+
const rows = this._getRecommendations.all(storyKey);
|
|
9406
|
+
if (rows.length === 0) return [];
|
|
9407
|
+
return rows.map((row) => {
|
|
9408
|
+
const raw = {
|
|
9409
|
+
id: row.id,
|
|
9410
|
+
storyKey: row.story_key,
|
|
9411
|
+
sprintId: row.sprint_id ?? void 0,
|
|
9412
|
+
ruleId: row.rule_id,
|
|
9413
|
+
severity: row.severity,
|
|
9414
|
+
title: row.title,
|
|
9415
|
+
description: row.description,
|
|
9416
|
+
potentialSavingsTokens: row.potential_savings_tokens != null ? Number(row.potential_savings_tokens) : void 0,
|
|
9417
|
+
potentialSavingsUsd: row.potential_savings_usd != null ? Number(row.potential_savings_usd) : void 0,
|
|
9418
|
+
actionTarget: row.action_target ?? void 0,
|
|
9419
|
+
generatedAt: row.generated_at
|
|
9420
|
+
};
|
|
9421
|
+
return RecommendationSchema.parse(raw);
|
|
9422
|
+
});
|
|
9423
|
+
}
|
|
9424
|
+
/**
|
|
9425
|
+
* Retrieve all recommendations across all stories, ordered by severity (critical first)
|
|
9426
|
+
* then by potentialSavingsTokens descending. Returns up to `limit` records (default 20).
|
|
9427
|
+
*/
|
|
9428
|
+
async getAllRecommendations(limit = 20) {
|
|
9429
|
+
const rows = this._getAllRecommendations.all(limit);
|
|
9430
|
+
if (rows.length === 0) return [];
|
|
9431
|
+
return rows.map((row) => {
|
|
9432
|
+
const raw = {
|
|
9433
|
+
id: row.id,
|
|
9434
|
+
storyKey: row.story_key,
|
|
9435
|
+
sprintId: row.sprint_id ?? void 0,
|
|
9436
|
+
ruleId: row.rule_id,
|
|
9437
|
+
severity: row.severity,
|
|
9438
|
+
title: row.title,
|
|
9439
|
+
description: row.description,
|
|
9440
|
+
potentialSavingsTokens: row.potential_savings_tokens != null ? Number(row.potential_savings_tokens) : void 0,
|
|
9441
|
+
potentialSavingsUsd: row.potential_savings_usd != null ? Number(row.potential_savings_usd) : void 0,
|
|
9442
|
+
actionTarget: row.action_target ?? void 0,
|
|
9443
|
+
generatedAt: row.generated_at
|
|
9444
|
+
};
|
|
9445
|
+
return RecommendationSchema.parse(raw);
|
|
9446
|
+
});
|
|
9447
|
+
}
|
|
9448
|
+
/**
|
|
9449
|
+
* Batch-insert category stats for a story.
|
|
9450
|
+
* Uses INSERT OR IGNORE — existing rows for the same (story_key, category) are preserved.
|
|
9451
|
+
*/
|
|
9452
|
+
async storeCategoryStats(storyKey, stats) {
|
|
9453
|
+
if (stats.length === 0) return;
|
|
9454
|
+
const insertAll = this._db.transaction((rows) => {
|
|
9455
|
+
for (const stat$2 of rows) this._insertCategoryStats.run(storyKey, stat$2.category, stat$2.totalTokens, stat$2.percentage, stat$2.eventCount, stat$2.avgTokensPerEvent, stat$2.trend);
|
|
9456
|
+
});
|
|
9457
|
+
insertAll(stats);
|
|
9458
|
+
logger$7.debug({
|
|
9459
|
+
storyKey,
|
|
9460
|
+
count: stats.length
|
|
9461
|
+
}, "Stored category stats");
|
|
9462
|
+
}
|
|
9463
|
+
/**
|
|
9464
|
+
* Retrieve category stats for a story ordered by total_tokens descending.
|
|
9465
|
+
* Each row is validated with CategoryStatsSchema.parse().
|
|
9466
|
+
* Returns [] when no rows exist for the given storyKey.
|
|
9467
|
+
*/
|
|
9468
|
+
async getCategoryStats(storyKey) {
|
|
9469
|
+
const rows = storyKey === "" ? this._getAllCategoryStats.all() : this._getCategoryStats.all(storyKey);
|
|
9470
|
+
if (rows.length === 0) return [];
|
|
9471
|
+
return rows.map((row) => {
|
|
9472
|
+
const raw = {
|
|
9473
|
+
category: row.category,
|
|
9474
|
+
totalTokens: Number(row.total_tokens),
|
|
9475
|
+
percentage: Number(row.percentage),
|
|
9476
|
+
eventCount: Number(row.event_count),
|
|
9477
|
+
avgTokensPerEvent: Number(row.avg_tokens_per_event),
|
|
9478
|
+
trend: row.trend
|
|
9479
|
+
};
|
|
9480
|
+
return CategoryStatsSchema.parse(raw);
|
|
9481
|
+
});
|
|
9482
|
+
}
|
|
9483
|
+
/**
|
|
9484
|
+
* Batch-insert consumer stats for a story.
|
|
9485
|
+
* topInvocations is serialized to JSON.
|
|
9486
|
+
* Uses INSERT OR IGNORE — existing rows for the same (story_key, consumer_key) are preserved.
|
|
9487
|
+
*/
|
|
9488
|
+
async storeConsumerStats(storyKey, consumers) {
|
|
9489
|
+
if (consumers.length === 0) return;
|
|
9490
|
+
const insertAll = this._db.transaction((rows) => {
|
|
9491
|
+
for (const consumer of rows) this._insertConsumerStats.run(storyKey, consumer.consumerKey, consumer.category, consumer.totalTokens, consumer.percentage, consumer.eventCount, JSON.stringify(consumer.topInvocations));
|
|
9492
|
+
});
|
|
9493
|
+
insertAll(consumers);
|
|
9494
|
+
logger$7.debug({
|
|
9495
|
+
storyKey,
|
|
9496
|
+
count: consumers.length
|
|
9497
|
+
}, "Stored consumer stats");
|
|
9498
|
+
}
|
|
9499
|
+
/**
|
|
9500
|
+
* Retrieve consumer stats for a story ordered by total_tokens descending.
|
|
9501
|
+
* Deserializes top_invocations_json back to TopInvocation[].
|
|
9502
|
+
* Each row is validated with ConsumerStatsSchema.parse().
|
|
9503
|
+
* Returns [] when no rows exist for the given storyKey.
|
|
9504
|
+
*/
|
|
9505
|
+
async getConsumerStats(storyKey) {
|
|
9506
|
+
const rows = this._getConsumerStats.all(storyKey);
|
|
9507
|
+
if (rows.length === 0) return [];
|
|
9508
|
+
return rows.map((row) => {
|
|
9509
|
+
const raw = {
|
|
9510
|
+
consumerKey: row.consumer_key,
|
|
9511
|
+
category: row.category,
|
|
9512
|
+
totalTokens: Number(row.total_tokens),
|
|
9513
|
+
percentage: Number(row.percentage),
|
|
9514
|
+
eventCount: Number(row.event_count),
|
|
9515
|
+
topInvocations: JSON.parse(row.top_invocations_json ?? "[]")
|
|
9516
|
+
};
|
|
9517
|
+
return ConsumerStatsSchema.parse(raw);
|
|
9518
|
+
});
|
|
9519
|
+
}
|
|
9520
|
+
};
|
|
9521
|
+
|
|
9522
|
+
//#endregion
|
|
9523
|
+
//#region src/errors/app-error.ts
|
|
9524
|
+
/**
|
|
9525
|
+
* AppError — base error class for substrate with machine-readable error codes.
|
|
9526
|
+
*
|
|
9527
|
+
* Architecture decision: AppError base class with numeric exit codes
|
|
9528
|
+
* (process.exit codes: 0 success, 1 user error, 2 internal error).
|
|
9529
|
+
* All structured errors in substrate should extend this class.
|
|
9530
|
+
*/
|
|
9531
|
+
/**
|
|
9532
|
+
* Base error class for substrate with machine-readable error codes and exit codes.
|
|
9533
|
+
*
|
|
9534
|
+
* @example
|
|
9535
|
+
* throw new AppError('ERR_TELEMETRY_NOT_STARTED', 2, 'IngestionServer is not started')
|
|
9536
|
+
*/
|
|
9537
|
+
var AppError = class extends Error {
|
|
9538
|
+
/** Machine-readable error code (e.g. ERR_DB_LOCKED, ERR_INVALID_INPUT) */
|
|
9539
|
+
code;
|
|
9540
|
+
/** Process exit code: 0 success, 1 user error, 2 internal error */
|
|
9541
|
+
exitCode;
|
|
9542
|
+
constructor(code, exitCode, message) {
|
|
9543
|
+
super(message);
|
|
9544
|
+
this.name = "AppError";
|
|
9545
|
+
this.code = code;
|
|
9546
|
+
this.exitCode = exitCode;
|
|
9547
|
+
}
|
|
9548
|
+
};
|
|
9549
|
+
|
|
9550
|
+
//#endregion
|
|
9551
|
+
//#region src/modules/telemetry/ingestion-server.ts
|
|
9552
|
+
const logger$6 = createLogger("telemetry:ingestion-server");
|
|
9553
|
+
/**
|
|
9554
|
+
* Error thrown by IngestionServer for server lifecycle violations.
|
|
9555
|
+
* Extends AppError to align with the project-standard error-handling pattern
|
|
9556
|
+
* (AppError base class with numeric exit codes).
|
|
9557
|
+
*/
|
|
9558
|
+
var TelemetryError = class extends AppError {
|
|
9559
|
+
constructor(code, message) {
|
|
9560
|
+
super(code, 2, message);
|
|
9561
|
+
this.name = "TelemetryError";
|
|
9562
|
+
}
|
|
9563
|
+
};
|
|
9564
|
+
/**
|
|
9565
|
+
* Local HTTP server that accepts OTLP payloads from Claude Code sub-agents.
|
|
9566
|
+
*
|
|
9567
|
+
* Binds to `port` (default 4318). Use port 0 in tests for an OS-assigned port.
|
|
9568
|
+
*/
|
|
9569
|
+
var IngestionServer = class {
|
|
9570
|
+
_server = null;
|
|
9571
|
+
_port;
|
|
9572
|
+
constructor(options = {}) {
|
|
9573
|
+
this._port = options.port ?? 4318;
|
|
9574
|
+
}
|
|
9575
|
+
/**
|
|
9576
|
+
* Start the HTTP ingestion server.
|
|
9577
|
+
* Resolves when the server is listening and ready to accept connections.
|
|
9578
|
+
*/
|
|
9579
|
+
async start() {
|
|
9580
|
+
if (this._server !== null) {
|
|
9581
|
+
logger$6.warn("IngestionServer.start() called while already started — ignoring");
|
|
9582
|
+
return;
|
|
9583
|
+
}
|
|
9584
|
+
return new Promise((resolve$2, reject) => {
|
|
9585
|
+
const server = createServer(this._handleRequest.bind(this));
|
|
9586
|
+
server.on("error", (err) => {
|
|
9587
|
+
logger$6.error({ err }, "IngestionServer failed to start");
|
|
9588
|
+
reject(err);
|
|
9589
|
+
});
|
|
9590
|
+
server.listen(this._port, "127.0.0.1", () => {
|
|
9591
|
+
this._server = server;
|
|
9592
|
+
const addr = server.address();
|
|
9593
|
+
logger$6.info({ port: addr.port }, "IngestionServer listening");
|
|
9594
|
+
resolve$2();
|
|
9595
|
+
});
|
|
9596
|
+
});
|
|
9597
|
+
}
|
|
9598
|
+
/**
|
|
9599
|
+
* Stop the HTTP ingestion server.
|
|
9600
|
+
* Resolves when the server has closed all connections.
|
|
9601
|
+
*/
|
|
9602
|
+
async stop() {
|
|
9603
|
+
const server = this._server;
|
|
9604
|
+
if (server === null) return;
|
|
9605
|
+
this._server = null;
|
|
9606
|
+
return new Promise((resolve$2, reject) => {
|
|
9607
|
+
server.close((err) => {
|
|
9608
|
+
if (err !== void 0 && err !== null) reject(err);
|
|
9609
|
+
else {
|
|
9610
|
+
logger$6.info("IngestionServer stopped");
|
|
9611
|
+
resolve$2();
|
|
9612
|
+
}
|
|
9613
|
+
});
|
|
9614
|
+
});
|
|
9615
|
+
}
|
|
9616
|
+
/**
|
|
9617
|
+
* Return the 5 OTLP environment variables to inject into sub-agent processes.
|
|
9618
|
+
*
|
|
9619
|
+
* @throws {TelemetryError} ERR_TELEMETRY_NOT_STARTED if the server is not started.
|
|
9620
|
+
*/
|
|
9621
|
+
getOtlpEnvVars() {
|
|
9622
|
+
const addr = this._server?.address();
|
|
9623
|
+
if (addr === null || addr === void 0 || typeof addr === "string") throw new TelemetryError("ERR_TELEMETRY_NOT_STARTED", "IngestionServer is not started — call start() before getOtlpEnvVars()");
|
|
9624
|
+
const endpoint = `http://localhost:${addr.port}`;
|
|
9625
|
+
return {
|
|
9626
|
+
CLAUDE_CODE_ENABLE_TELEMETRY: "1",
|
|
9627
|
+
OTEL_LOGS_EXPORTER: "otlp",
|
|
9628
|
+
OTEL_METRICS_EXPORTER: "otlp",
|
|
9629
|
+
OTEL_EXPORTER_OTLP_PROTOCOL: "http/json",
|
|
9630
|
+
OTEL_EXPORTER_OTLP_ENDPOINT: endpoint
|
|
9631
|
+
};
|
|
9632
|
+
}
|
|
9633
|
+
_handleRequest(_req, res) {
|
|
9634
|
+
const chunks = [];
|
|
9635
|
+
_req.on("data", (chunk) => {
|
|
9636
|
+
chunks.push(chunk);
|
|
9637
|
+
});
|
|
9638
|
+
_req.on("end", () => {
|
|
9639
|
+
const body = Buffer.concat(chunks).toString("utf-8");
|
|
9640
|
+
logger$6.trace({
|
|
9641
|
+
url: _req.url,
|
|
9642
|
+
bodyLength: body.length
|
|
9643
|
+
}, "OTLP payload received");
|
|
9644
|
+
res.writeHead(200, { "Content-Type": "application/json" });
|
|
9645
|
+
res.end("{}");
|
|
9646
|
+
});
|
|
9647
|
+
_req.on("error", (err) => {
|
|
9648
|
+
logger$6.warn({ err }, "Error reading OTLP request body");
|
|
9649
|
+
res.writeHead(400);
|
|
9650
|
+
res.end("Bad Request");
|
|
9651
|
+
});
|
|
9652
|
+
}
|
|
9653
|
+
};
|
|
9654
|
+
|
|
9655
|
+
//#endregion
|
|
9656
|
+
//#region src/modules/telemetry/efficiency-scorer.ts
|
|
9657
|
+
var EfficiencyScorer = class {
|
|
9658
|
+
_logger;
|
|
9659
|
+
constructor(logger$26) {
|
|
9660
|
+
this._logger = logger$26;
|
|
9661
|
+
}
|
|
9662
|
+
/**
|
|
9663
|
+
* Compute an efficiency score for a story given its turn analyses.
|
|
9664
|
+
*
|
|
9665
|
+
* Returns a zeroed `EfficiencyScore` immediately when `turns` is empty.
|
|
9666
|
+
*
|
|
9667
|
+
* @param storyKey - The story identifier (e.g. "27-6")
|
|
9668
|
+
* @param turns - Turn analysis records from `TurnAnalyzer.analyze()`
|
|
9669
|
+
*/
|
|
9670
|
+
score(storyKey, turns) {
|
|
9671
|
+
if (turns.length === 0) return {
|
|
9672
|
+
storyKey,
|
|
9673
|
+
timestamp: Date.now(),
|
|
9674
|
+
compositeScore: 0,
|
|
9675
|
+
cacheHitSubScore: 0,
|
|
9676
|
+
ioRatioSubScore: 0,
|
|
9677
|
+
contextManagementSubScore: 0,
|
|
9678
|
+
avgCacheHitRate: 0,
|
|
9679
|
+
avgIoRatio: 0,
|
|
9680
|
+
contextSpikeCount: 0,
|
|
9681
|
+
totalTurns: 0,
|
|
9682
|
+
perModelBreakdown: [],
|
|
9683
|
+
perSourceBreakdown: []
|
|
9684
|
+
};
|
|
9685
|
+
const avgCacheHitRate = this._computeAvgCacheHitRate(turns);
|
|
9686
|
+
const avgIoRatio = this._computeAvgIoRatio(turns);
|
|
9687
|
+
const contextSpikeCount = turns.filter((t) => t.isContextSpike).length;
|
|
9688
|
+
const totalTurns = turns.length;
|
|
9689
|
+
const cacheHitSubScore = this._computeCacheHitSubScore(turns);
|
|
9690
|
+
const ioRatioSubScore = this._computeIoRatioSubScore(turns);
|
|
9691
|
+
const contextManagementSubScore = this._computeContextManagementSubScore(turns);
|
|
9692
|
+
const compositeScore = Math.round(cacheHitSubScore * .4 + ioRatioSubScore * .3 + contextManagementSubScore * .3);
|
|
9693
|
+
const perModelBreakdown = this._buildPerModelBreakdown(turns);
|
|
9694
|
+
const perSourceBreakdown = this._buildPerSourceBreakdown(turns);
|
|
9695
|
+
this._logger.info({
|
|
9696
|
+
storyKey,
|
|
9697
|
+
compositeScore,
|
|
9698
|
+
contextSpikeCount
|
|
9699
|
+
}, "Computed efficiency score");
|
|
9700
|
+
return {
|
|
9701
|
+
storyKey,
|
|
9702
|
+
timestamp: Date.now(),
|
|
9703
|
+
compositeScore,
|
|
9704
|
+
cacheHitSubScore,
|
|
9705
|
+
ioRatioSubScore,
|
|
9706
|
+
contextManagementSubScore,
|
|
9707
|
+
avgCacheHitRate,
|
|
9708
|
+
avgIoRatio,
|
|
9709
|
+
contextSpikeCount,
|
|
9710
|
+
totalTurns,
|
|
9711
|
+
perModelBreakdown,
|
|
9712
|
+
perSourceBreakdown
|
|
9713
|
+
};
|
|
9714
|
+
}
|
|
9715
|
+
/**
|
|
9716
|
+
* Average cache hit rate across all turns, clamped to [0, 100].
|
|
9717
|
+
* Formula: clamp(avgCacheHitRate × 100, 0, 100)
|
|
9718
|
+
*/
|
|
9719
|
+
_computeCacheHitSubScore(turns) {
|
|
9720
|
+
const avg = this._computeAvgCacheHitRate(turns);
|
|
9721
|
+
return this._clamp(avg * 100, 0, 100);
|
|
9722
|
+
}
|
|
9723
|
+
/**
|
|
9724
|
+
* I/O ratio sub-score: lower ratio = better = higher score.
|
|
9725
|
+
* Formula: clamp(100 - (avgIoRatio - 1) × 20, 0, 100)
|
|
9726
|
+
*
|
|
9727
|
+
* At avgIoRatio=1: score=80 (equal input/output tokens)
|
|
9728
|
+
* At avgIoRatio=5: score=20
|
|
9729
|
+
* At avgIoRatio≥6: clamped to 0
|
|
9730
|
+
*/
|
|
9731
|
+
_computeIoRatioSubScore(turns) {
|
|
9732
|
+
const avg = this._computeAvgIoRatio(turns);
|
|
9733
|
+
return this._clamp(100 - (avg - 1) * 20, 0, 100);
|
|
9734
|
+
}
|
|
9735
|
+
/**
|
|
9736
|
+
* Context management sub-score: penalizes context spike frequency.
|
|
9737
|
+
* Formula: clamp(100 - spikeRatio × 100, 0, 100)
|
|
9738
|
+
* where spikeRatio = contextSpikeCount / max(totalTurns, 1)
|
|
9739
|
+
*/
|
|
9740
|
+
_computeContextManagementSubScore(turns) {
|
|
9741
|
+
const totalTurns = Math.max(turns.length, 1);
|
|
9742
|
+
const spikeCount = turns.filter((t) => t.isContextSpike).length;
|
|
9743
|
+
const spikeRatio = spikeCount / totalTurns;
|
|
9744
|
+
return this._clamp(100 - spikeRatio * 100, 0, 100);
|
|
9745
|
+
}
|
|
9746
|
+
_computeAvgCacheHitRate(turns) {
|
|
9747
|
+
if (turns.length === 0) return 0;
|
|
9748
|
+
const sum = turns.reduce((acc, t) => acc + t.cacheHitRate, 0);
|
|
9749
|
+
return sum / turns.length;
|
|
9750
|
+
}
|
|
9751
|
+
/**
|
|
9752
|
+
* Average I/O ratio: inputTokens / max(outputTokens, 1) per turn.
|
|
9753
|
+
*/
|
|
9754
|
+
_computeAvgIoRatio(turns) {
|
|
9755
|
+
if (turns.length === 0) return 0;
|
|
9756
|
+
const sum = turns.reduce((acc, t) => acc + t.inputTokens / Math.max(t.outputTokens, 1), 0);
|
|
9757
|
+
return sum / turns.length;
|
|
9758
|
+
}
|
|
9759
|
+
/**
|
|
9760
|
+
* Group turns by model, computing per-group efficiency metrics.
|
|
9761
|
+
* Turns with null/undefined model are grouped under "unknown".
|
|
9762
|
+
*/
|
|
9763
|
+
_buildPerModelBreakdown(turns) {
|
|
9764
|
+
const groups = new Map();
|
|
9765
|
+
for (const turn of turns) {
|
|
9766
|
+
const key = turn.model != null && turn.model !== "" ? turn.model : "unknown";
|
|
9767
|
+
const existing = groups.get(key);
|
|
9768
|
+
if (existing !== void 0) existing.push(turn);
|
|
9769
|
+
else groups.set(key, [turn]);
|
|
9770
|
+
}
|
|
9771
|
+
const result = [];
|
|
9772
|
+
for (const [model, groupTurns] of groups) {
|
|
9773
|
+
const cacheHitRate = groupTurns.reduce((acc, t) => acc + t.cacheHitRate, 0) / groupTurns.length;
|
|
9774
|
+
const avgIoRatio = groupTurns.reduce((acc, t) => acc + t.inputTokens / Math.max(t.outputTokens, 1), 0) / groupTurns.length;
|
|
9775
|
+
const totalCostUsd = groupTurns.reduce((acc, t) => acc + t.costUsd, 0);
|
|
9776
|
+
const totalOutputTokens = groupTurns.reduce((acc, t) => acc + t.outputTokens, 0);
|
|
9777
|
+
const costPer1KOutputTokens = totalCostUsd / Math.max(totalOutputTokens, 1) * 1e3;
|
|
9778
|
+
result.push({
|
|
9779
|
+
model,
|
|
9780
|
+
cacheHitRate,
|
|
9781
|
+
avgIoRatio,
|
|
9782
|
+
costPer1KOutputTokens
|
|
9783
|
+
});
|
|
9784
|
+
}
|
|
9785
|
+
return result;
|
|
9786
|
+
}
|
|
9787
|
+
/**
|
|
9788
|
+
* Group turns by source, computing a per-group composite score using the
|
|
9789
|
+
* same formula as the overall score. Sources with zero turns are excluded.
|
|
9790
|
+
*/
|
|
9791
|
+
_buildPerSourceBreakdown(turns) {
|
|
9792
|
+
const groups = new Map();
|
|
9793
|
+
for (const turn of turns) {
|
|
9794
|
+
const key = turn.source;
|
|
9795
|
+
const existing = groups.get(key);
|
|
9796
|
+
if (existing !== void 0) existing.push(turn);
|
|
9797
|
+
else groups.set(key, [turn]);
|
|
9798
|
+
}
|
|
9799
|
+
const result = [];
|
|
9800
|
+
for (const [source, groupTurns] of groups) {
|
|
9801
|
+
if (groupTurns.length === 0) continue;
|
|
9802
|
+
const cacheHitSub = this._computeCacheHitSubScoreForGroup(groupTurns);
|
|
9803
|
+
const ioRatioSub = this._computeIoRatioSubScoreForGroup(groupTurns);
|
|
9804
|
+
const contextSub = this._computeContextManagementSubScoreForGroup(groupTurns);
|
|
9805
|
+
const compositeScore = Math.round(cacheHitSub * .4 + ioRatioSub * .3 + contextSub * .3);
|
|
9806
|
+
result.push({
|
|
9807
|
+
source,
|
|
9808
|
+
compositeScore,
|
|
9809
|
+
turnCount: groupTurns.length
|
|
9810
|
+
});
|
|
9811
|
+
}
|
|
9812
|
+
return result;
|
|
9813
|
+
}
|
|
9814
|
+
_computeCacheHitSubScoreForGroup(turns) {
|
|
9815
|
+
if (turns.length === 0) return 0;
|
|
9816
|
+
const avg = turns.reduce((acc, t) => acc + t.cacheHitRate, 0) / turns.length;
|
|
9817
|
+
return this._clamp(avg * 100, 0, 100);
|
|
9818
|
+
}
|
|
9819
|
+
_computeIoRatioSubScoreForGroup(turns) {
|
|
9820
|
+
if (turns.length === 0) return 0;
|
|
9821
|
+
const avg = turns.reduce((acc, t) => acc + t.inputTokens / Math.max(t.outputTokens, 1), 0) / turns.length;
|
|
9822
|
+
return this._clamp(100 - (avg - 1) * 20, 0, 100);
|
|
9823
|
+
}
|
|
9824
|
+
_computeContextManagementSubScoreForGroup(turns) {
|
|
9825
|
+
if (turns.length === 0) return 0;
|
|
9826
|
+
const spikeCount = turns.filter((t) => t.isContextSpike).length;
|
|
9827
|
+
const spikeRatio = spikeCount / turns.length;
|
|
9828
|
+
return this._clamp(100 - spikeRatio * 100, 0, 100);
|
|
9829
|
+
}
|
|
9830
|
+
_clamp(value, min, max) {
|
|
9831
|
+
return Math.max(min, Math.min(max, value));
|
|
9832
|
+
}
|
|
9833
|
+
};
|
|
9834
|
+
|
|
9835
|
+
//#endregion
|
|
9836
|
+
//#region src/modules/telemetry/categorizer.ts
|
|
9837
|
+
const EXACT_CATEGORY_MAP = new Map([
|
|
9838
|
+
["read_file", "file_reads"],
|
|
9839
|
+
["write_file", "tool_outputs"],
|
|
9840
|
+
["bash", "tool_outputs"],
|
|
9841
|
+
["tool_use", "tool_outputs"],
|
|
9842
|
+
["tool_result", "tool_outputs"],
|
|
9843
|
+
["system_prompt", "system_prompts"],
|
|
9844
|
+
["human_turn", "user_prompts"],
|
|
9845
|
+
["user_message", "user_prompts"],
|
|
9846
|
+
["assistant_turn", "conversation_history"],
|
|
9847
|
+
["assistant_message", "conversation_history"],
|
|
9848
|
+
["search_files", "file_reads"],
|
|
9849
|
+
["list_files", "file_reads"],
|
|
9850
|
+
["run_command", "tool_outputs"],
|
|
9851
|
+
["memory_read", "system_prompts"],
|
|
9852
|
+
["web_fetch", "tool_outputs"]
|
|
9853
|
+
]);
|
|
9854
|
+
const PREFIX_PATTERNS = [
|
|
9855
|
+
{
|
|
9856
|
+
pattern: /^(bash|exec|run|spawn)/i,
|
|
9857
|
+
category: "tool_outputs"
|
|
9858
|
+
},
|
|
9859
|
+
{
|
|
9860
|
+
pattern: /^(read|open|cat|head|tail).*file/i,
|
|
9861
|
+
category: "file_reads"
|
|
9862
|
+
},
|
|
9863
|
+
{
|
|
9864
|
+
pattern: /^(list|glob|find).*file/i,
|
|
9865
|
+
category: "file_reads"
|
|
9866
|
+
},
|
|
9867
|
+
{
|
|
9868
|
+
pattern: /^tool/i,
|
|
9869
|
+
category: "tool_outputs"
|
|
9870
|
+
},
|
|
9871
|
+
{
|
|
9872
|
+
pattern: /^system/i,
|
|
9873
|
+
category: "system_prompts"
|
|
9874
|
+
},
|
|
9875
|
+
{
|
|
9876
|
+
pattern: /^(human|user)/i,
|
|
9877
|
+
category: "user_prompts"
|
|
9878
|
+
},
|
|
9879
|
+
{
|
|
9880
|
+
pattern: /^(assistant|ai|model)/i,
|
|
9881
|
+
category: "conversation_history"
|
|
9882
|
+
}
|
|
9883
|
+
];
|
|
9884
|
+
/** All six semantic categories in a stable order for zero-fill initialisation. */
|
|
9885
|
+
const ALL_CATEGORIES = [
|
|
9886
|
+
"tool_outputs",
|
|
9887
|
+
"file_reads",
|
|
9888
|
+
"system_prompts",
|
|
9889
|
+
"conversation_history",
|
|
9890
|
+
"user_prompts",
|
|
9891
|
+
"other"
|
|
9892
|
+
];
|
|
9893
|
+
var Categorizer = class {
|
|
9894
|
+
_logger;
|
|
9895
|
+
constructor(logger$26) {
|
|
9896
|
+
this._logger = logger$26;
|
|
9897
|
+
}
|
|
9898
|
+
/**
|
|
9899
|
+
* Classify an operation into a SemanticCategory using three-tier logic.
|
|
9900
|
+
*
|
|
9901
|
+
* @param operationName - Span operation name (e.g. 'read_file', 'bash')
|
|
9902
|
+
* @param toolName - Optional tool name; non-empty value overrides fallback to tool_outputs
|
|
9903
|
+
*/
|
|
9904
|
+
classify(operationName, toolName) {
|
|
9905
|
+
const exact = EXACT_CATEGORY_MAP.get(operationName);
|
|
9906
|
+
if (exact !== void 0) return exact;
|
|
9907
|
+
for (const { pattern, category } of PREFIX_PATTERNS) if (pattern.test(operationName)) return category;
|
|
9908
|
+
const lower = operationName.toLowerCase();
|
|
9909
|
+
if (lower.includes("file") && (lower.includes("read") || lower.includes("open"))) return "file_reads";
|
|
9910
|
+
if (lower.includes("system") || lower.includes("prompt")) return "system_prompts";
|
|
9911
|
+
if (lower.includes("bash") || lower.includes("exec") || lower.includes("tool")) return "tool_outputs";
|
|
9912
|
+
if (lower.includes("conversation") || lower.includes("history") || lower.includes("chat")) return "conversation_history";
|
|
9913
|
+
if (lower.includes("user") || lower.includes("human")) return "user_prompts";
|
|
9914
|
+
if (toolName !== void 0 && toolName.length > 0) return "tool_outputs";
|
|
9915
|
+
return "other";
|
|
9916
|
+
}
|
|
9917
|
+
/**
|
|
9918
|
+
* Detect whether a category's token consumption is growing, stable, or shrinking
|
|
9919
|
+
* by comparing first-half vs second-half turn attribution.
|
|
9920
|
+
*
|
|
9921
|
+
* @param categorySpans - Spans already classified into this category
|
|
9922
|
+
* @param turns - Full turn sequence for the story
|
|
9923
|
+
*/
|
|
9924
|
+
computeTrend(categorySpans, turns) {
|
|
9925
|
+
if (turns.length < 2) return "stable";
|
|
9926
|
+
const spanTurnMap = new Map();
|
|
9927
|
+
for (let i = 0; i < turns.length; i++) {
|
|
9928
|
+
const turn = turns[i];
|
|
9929
|
+
spanTurnMap.set(turn.spanId, i);
|
|
9930
|
+
for (const child of turn.childSpans) spanTurnMap.set(child.spanId, i);
|
|
9931
|
+
}
|
|
9932
|
+
const half = Math.floor(turns.length / 2);
|
|
9933
|
+
let firstHalfTokens = 0;
|
|
9934
|
+
let secondHalfTokens = 0;
|
|
9935
|
+
for (const span of categorySpans) {
|
|
9936
|
+
const turnIdx = spanTurnMap.has(span.spanId) ? spanTurnMap.get(span.spanId) : attributeSpanToTurnIndex(span.startTime, turns);
|
|
9937
|
+
const tokens = span.inputTokens + span.outputTokens;
|
|
9938
|
+
if (turnIdx < half) firstHalfTokens += tokens;
|
|
9939
|
+
else secondHalfTokens += tokens;
|
|
9940
|
+
}
|
|
9941
|
+
if (firstHalfTokens === 0 && secondHalfTokens === 0) return "stable";
|
|
9942
|
+
if (firstHalfTokens === 0) return "growing";
|
|
9943
|
+
if (secondHalfTokens > 1.2 * firstHalfTokens) return "growing";
|
|
9944
|
+
if (secondHalfTokens < .8 * firstHalfTokens) return "shrinking";
|
|
9945
|
+
return "stable";
|
|
9946
|
+
}
|
|
9947
|
+
/**
|
|
9948
|
+
* Compute per-category token statistics for a complete set of spans.
|
|
9949
|
+
*
|
|
9950
|
+
* All six SemanticCategory values are always present in the result (zero-token
|
|
9951
|
+
* categories are included with totalTokens: 0). Results are sorted by
|
|
9952
|
+
* totalTokens descending.
|
|
9953
|
+
*
|
|
9954
|
+
* @param spans - All NormalizedSpans for the story
|
|
9955
|
+
* @param turns - TurnAnalysis sequence (may be empty)
|
|
9956
|
+
*/
|
|
9957
|
+
computeCategoryStats(spans, turns) {
|
|
9958
|
+
const grandTotal = spans.reduce((sum, s) => sum + s.inputTokens + s.outputTokens, 0);
|
|
9959
|
+
const buckets = new Map();
|
|
9960
|
+
for (const cat of ALL_CATEGORIES) buckets.set(cat, []);
|
|
9961
|
+
for (const span of spans) {
|
|
9962
|
+
const toolName = extractToolNameFromSpan(span);
|
|
9963
|
+
const cat = this.classify(span.operationName ?? span.name, toolName);
|
|
9964
|
+
buckets.get(cat).push(span);
|
|
9965
|
+
}
|
|
9966
|
+
const results = ALL_CATEGORIES.map((category) => {
|
|
9967
|
+
const catSpans = buckets.get(category);
|
|
9968
|
+
const totalTokens = catSpans.reduce((sum, s) => sum + s.inputTokens + s.outputTokens, 0);
|
|
9969
|
+
const eventCount = catSpans.length;
|
|
9970
|
+
const percentage = grandTotal > 0 ? Math.round(totalTokens / grandTotal * 100 * 1e3) / 1e3 : 0;
|
|
9971
|
+
const avgTokensPerEvent = eventCount > 0 ? totalTokens / eventCount : 0;
|
|
9972
|
+
const trend = this.computeTrend(catSpans, turns);
|
|
9973
|
+
return {
|
|
9974
|
+
category,
|
|
9975
|
+
totalTokens,
|
|
9976
|
+
percentage,
|
|
9977
|
+
eventCount,
|
|
9978
|
+
avgTokensPerEvent,
|
|
9979
|
+
trend
|
|
9980
|
+
};
|
|
9981
|
+
});
|
|
9982
|
+
this._logger.debug({
|
|
9983
|
+
categories: results.length,
|
|
9984
|
+
grandTotal
|
|
9985
|
+
}, "Computed category stats");
|
|
9986
|
+
return results.sort((a, b) => b.totalTokens - a.totalTokens);
|
|
9987
|
+
}
|
|
9988
|
+
};
|
|
9989
|
+
/**
|
|
9990
|
+
* Binary search: find the index of the last turn whose timestamp ≤ spanStartTime.
|
|
9991
|
+
* Returns 0 if no turn precedes the span.
|
|
9992
|
+
*/
|
|
9993
|
+
function attributeSpanToTurnIndex(spanStartTime, turns) {
|
|
9994
|
+
let lo = 0;
|
|
9995
|
+
let hi = turns.length - 1;
|
|
9996
|
+
let result = 0;
|
|
9997
|
+
while (lo <= hi) {
|
|
9998
|
+
const mid = lo + hi >> 1;
|
|
9999
|
+
if (turns[mid].timestamp <= spanStartTime) {
|
|
10000
|
+
result = mid;
|
|
10001
|
+
lo = mid + 1;
|
|
10002
|
+
} else hi = mid - 1;
|
|
10003
|
+
}
|
|
10004
|
+
return result;
|
|
10005
|
+
}
|
|
10006
|
+
/**
|
|
10007
|
+
* Extract a tool name from a span's attributes, checking known attribute keys
|
|
10008
|
+
* in priority order.
|
|
10009
|
+
*/
|
|
10010
|
+
function extractToolNameFromSpan(span) {
|
|
10011
|
+
if (!span.attributes) return void 0;
|
|
10012
|
+
const attrs = span.attributes;
|
|
10013
|
+
const name = attrs["tool.name"] || attrs["llm.tool.name"] || attrs["claude.tool_name"];
|
|
10014
|
+
return name || void 0;
|
|
10015
|
+
}
|
|
10016
|
+
|
|
10017
|
+
//#endregion
|
|
10018
|
+
//#region src/modules/telemetry/consumer-analyzer.ts
|
|
10019
|
+
var ConsumerAnalyzer = class {
|
|
10020
|
+
_categorizer;
|
|
10021
|
+
_logger;
|
|
10022
|
+
constructor(categorizer, logger$26) {
|
|
10023
|
+
this._categorizer = categorizer;
|
|
10024
|
+
this._logger = logger$26;
|
|
10025
|
+
}
|
|
10026
|
+
/**
|
|
10027
|
+
* Group spans by consumer key, rank by totalTokens descending, and return
|
|
10028
|
+
* ConsumerStats for each non-zero-token group.
|
|
10029
|
+
*
|
|
10030
|
+
* @param spans - All NormalizedSpans for the story
|
|
10031
|
+
*/
|
|
10032
|
+
analyze(spans) {
|
|
10033
|
+
if (spans.length === 0) return [];
|
|
10034
|
+
const grandTotal = spans.reduce((sum, s) => sum + s.inputTokens + s.outputTokens, 0);
|
|
10035
|
+
const groups = new Map();
|
|
10036
|
+
for (const span of spans) {
|
|
10037
|
+
const key = this._buildConsumerKey(span);
|
|
10038
|
+
const existing = groups.get(key);
|
|
10039
|
+
if (existing !== void 0) existing.push(span);
|
|
10040
|
+
else groups.set(key, [span]);
|
|
10041
|
+
}
|
|
10042
|
+
const results = [];
|
|
10043
|
+
for (const [consumerKey, groupSpans] of groups) {
|
|
10044
|
+
const totalTokens = groupSpans.reduce((sum, s) => sum + s.inputTokens + s.outputTokens, 0);
|
|
10045
|
+
if (totalTokens === 0) continue;
|
|
10046
|
+
const percentage = grandTotal > 0 ? Math.round(totalTokens / grandTotal * 100 * 1e3) / 1e3 : 0;
|
|
10047
|
+
const eventCount = groupSpans.length;
|
|
10048
|
+
const firstSpan = groupSpans[0];
|
|
10049
|
+
const toolName = this._extractToolName(firstSpan);
|
|
10050
|
+
const operationName = firstSpan.operationName ?? firstSpan.name ?? "unknown";
|
|
10051
|
+
const category = this._categorizer.classify(operationName, toolName);
|
|
10052
|
+
const sorted = groupSpans.slice().sort((a, b) => b.inputTokens + b.outputTokens - (a.inputTokens + a.outputTokens));
|
|
10053
|
+
const topInvocations = sorted.slice(0, 20).map((s) => ({
|
|
10054
|
+
spanId: s.spanId,
|
|
10055
|
+
name: s.name,
|
|
10056
|
+
toolName: this._extractToolName(s),
|
|
10057
|
+
totalTokens: s.inputTokens + s.outputTokens,
|
|
10058
|
+
inputTokens: s.inputTokens,
|
|
10059
|
+
outputTokens: s.outputTokens
|
|
10060
|
+
}));
|
|
10061
|
+
results.push({
|
|
10062
|
+
consumerKey,
|
|
10063
|
+
category,
|
|
10064
|
+
totalTokens,
|
|
10065
|
+
percentage,
|
|
10066
|
+
eventCount,
|
|
10067
|
+
topInvocations
|
|
10068
|
+
});
|
|
10069
|
+
}
|
|
10070
|
+
this._logger.debug({
|
|
10071
|
+
consumers: results.length,
|
|
10072
|
+
grandTotal
|
|
10073
|
+
}, "Computed consumer stats");
|
|
10074
|
+
return results.sort((a, b) => b.totalTokens - a.totalTokens);
|
|
10075
|
+
}
|
|
10076
|
+
/**
|
|
10077
|
+
* Build a stable, collision-resistant consumer key from a span.
|
|
10078
|
+
* Format: `operationName|toolName` (tool part is empty string if absent).
|
|
10079
|
+
*/
|
|
10080
|
+
_buildConsumerKey(span) {
|
|
10081
|
+
const operationPart = (span.operationName ?? span.name ?? "unknown").slice(0, 200);
|
|
10082
|
+
const toolPart = (this._extractToolName(span) ?? "").slice(0, 100);
|
|
10083
|
+
return `${operationPart}|${toolPart}`;
|
|
10084
|
+
}
|
|
10085
|
+
/**
|
|
10086
|
+
* Extract a tool name from span attributes, checking three known attribute keys
|
|
10087
|
+
* in priority order.
|
|
10088
|
+
*/
|
|
10089
|
+
_extractToolName(span) {
|
|
10090
|
+
if (!span.attributes) return void 0;
|
|
10091
|
+
const attrs = span.attributes;
|
|
10092
|
+
const name = attrs["tool.name"] || attrs["llm.tool.name"] || attrs["claude.tool_name"];
|
|
10093
|
+
return name || void 0;
|
|
10094
|
+
}
|
|
10095
|
+
};
|
|
10096
|
+
|
|
8936
10097
|
//#endregion
|
|
8937
10098
|
//#region src/modules/implementation-orchestrator/orchestrator-impl.ts
|
|
8938
10099
|
function createPauseGate() {
|
|
@@ -8974,8 +10135,8 @@ function buildTargetedFilesContent(issueList) {
|
|
|
8974
10135
|
* @returns A fully-configured ImplementationOrchestrator ready to call run()
|
|
8975
10136
|
*/
|
|
8976
10137
|
function createImplementationOrchestrator(deps) {
|
|
8977
|
-
const { db, pack, contextCompiler, dispatcher, eventBus, config, projectRoot, tokenCeilings, stateStore } = deps;
|
|
8978
|
-
const logger$
|
|
10138
|
+
const { db, pack, contextCompiler, dispatcher, eventBus, config, projectRoot, tokenCeilings, stateStore, telemetryPersistence, ingestionServer } = deps;
|
|
10139
|
+
const logger$26 = createLogger("implementation-orchestrator");
|
|
8979
10140
|
let _state = "IDLE";
|
|
8980
10141
|
let _startedAt;
|
|
8981
10142
|
let _completedAt;
|
|
@@ -8995,6 +10156,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
8995
10156
|
const _storyDispatches = new Map();
|
|
8996
10157
|
let _maxConcurrentActual = 0;
|
|
8997
10158
|
let _contractMismatches;
|
|
10159
|
+
let _otlpEndpoint;
|
|
8998
10160
|
const _stateStoreCache = new Map();
|
|
8999
10161
|
const MEMORY_PRESSURE_BACKOFF_MS = [
|
|
9000
10162
|
3e4,
|
|
@@ -9020,7 +10182,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9020
10182
|
const nowMs = Date.now();
|
|
9021
10183
|
for (const [phase, startMs] of starts) {
|
|
9022
10184
|
const endMs = ends?.get(phase);
|
|
9023
|
-
if (endMs === void 0) logger$
|
|
10185
|
+
if (endMs === void 0) logger$26.warn({
|
|
9024
10186
|
storyKey,
|
|
9025
10187
|
phase
|
|
9026
10188
|
}, "Phase has no end time — story may have errored mid-phase. Duration capped to now() and may be inflated.");
|
|
@@ -9067,7 +10229,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9067
10229
|
recordedAt: completedAt,
|
|
9068
10230
|
timestamp: completedAt
|
|
9069
10231
|
}).catch((storeErr) => {
|
|
9070
|
-
logger$
|
|
10232
|
+
logger$26.warn({
|
|
9071
10233
|
err: storeErr,
|
|
9072
10234
|
storyKey
|
|
9073
10235
|
}, "Failed to record metric to StateStore (best-effort)");
|
|
@@ -9089,7 +10251,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9089
10251
|
rationale: `Story ${storyKey} completed with result=${result} in ${wallClockSeconds}s. Tokens: ${tokenAgg.input}+${tokenAgg.output}. Review cycles: ${reviewCycles}.`
|
|
9090
10252
|
});
|
|
9091
10253
|
} catch (decisionErr) {
|
|
9092
|
-
logger$
|
|
10254
|
+
logger$26.warn({
|
|
9093
10255
|
err: decisionErr,
|
|
9094
10256
|
storyKey
|
|
9095
10257
|
}, "Failed to write story-metrics decision (best-effort)");
|
|
@@ -9117,13 +10279,13 @@ function createImplementationOrchestrator(deps) {
|
|
|
9117
10279
|
dispatches: _storyDispatches.get(storyKey) ?? 0
|
|
9118
10280
|
});
|
|
9119
10281
|
} catch (emitErr) {
|
|
9120
|
-
logger$
|
|
10282
|
+
logger$26.warn({
|
|
9121
10283
|
err: emitErr,
|
|
9122
10284
|
storyKey
|
|
9123
10285
|
}, "Failed to emit story:metrics event (best-effort)");
|
|
9124
10286
|
}
|
|
9125
10287
|
} catch (err) {
|
|
9126
|
-
logger$
|
|
10288
|
+
logger$26.warn({
|
|
9127
10289
|
err,
|
|
9128
10290
|
storyKey
|
|
9129
10291
|
}, "Failed to write story metrics (best-effort)");
|
|
@@ -9152,7 +10314,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9152
10314
|
rationale: `Story ${storyKey} ${outcome} after ${reviewCycles} review cycle(s).`
|
|
9153
10315
|
});
|
|
9154
10316
|
} catch (err) {
|
|
9155
|
-
logger$
|
|
10317
|
+
logger$26.warn({
|
|
9156
10318
|
err,
|
|
9157
10319
|
storyKey
|
|
9158
10320
|
}, "Failed to write story-outcome decision (best-effort)");
|
|
@@ -9178,7 +10340,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9178
10340
|
rationale: `Escalation diagnosis for ${payload.storyKey}: ${diagnosis.recommendedAction} — ${diagnosis.rationale}`
|
|
9179
10341
|
});
|
|
9180
10342
|
} catch (err) {
|
|
9181
|
-
logger$
|
|
10343
|
+
logger$26.warn({
|
|
9182
10344
|
err,
|
|
9183
10345
|
storyKey: payload.storyKey
|
|
9184
10346
|
}, "Failed to persist escalation diagnosis (best-effort)");
|
|
@@ -9227,7 +10389,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9227
10389
|
const existing = _stories.get(storyKey);
|
|
9228
10390
|
if (existing !== void 0) {
|
|
9229
10391
|
Object.assign(existing, updates);
|
|
9230
|
-
persistStoryState(storyKey, existing).catch((err) => logger$
|
|
10392
|
+
persistStoryState(storyKey, existing).catch((err) => logger$26.warn({
|
|
9231
10393
|
err,
|
|
9232
10394
|
storyKey
|
|
9233
10395
|
}, "StateStore write failed after updateStory"));
|
|
@@ -9236,12 +10398,12 @@ function createImplementationOrchestrator(deps) {
|
|
|
9236
10398
|
storyKey,
|
|
9237
10399
|
conflict: err
|
|
9238
10400
|
});
|
|
9239
|
-
else logger$
|
|
10401
|
+
else logger$26.warn({
|
|
9240
10402
|
err,
|
|
9241
10403
|
storyKey
|
|
9242
10404
|
}, "mergeStory failed");
|
|
9243
10405
|
});
|
|
9244
|
-
else if (updates.phase === "ESCALATED"
|
|
10406
|
+
else if (updates.phase === "ESCALATED") stateStore?.rollbackStory(storyKey).catch((err) => logger$26.warn({
|
|
9245
10407
|
err,
|
|
9246
10408
|
storyKey
|
|
9247
10409
|
}, "rollbackStory failed — branch may persist"));
|
|
@@ -9268,7 +10430,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9268
10430
|
};
|
|
9269
10431
|
await stateStore.setStoryState(storyKey, record);
|
|
9270
10432
|
} catch (err) {
|
|
9271
|
-
logger$
|
|
10433
|
+
logger$26.warn({
|
|
9272
10434
|
err,
|
|
9273
10435
|
storyKey
|
|
9274
10436
|
}, "StateStore.setStoryState failed (best-effort)");
|
|
@@ -9284,7 +10446,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9284
10446
|
token_usage_json: serialized
|
|
9285
10447
|
});
|
|
9286
10448
|
} catch (err) {
|
|
9287
|
-
logger$
|
|
10449
|
+
logger$26.warn({ err }, "Failed to persist orchestrator state");
|
|
9288
10450
|
}
|
|
9289
10451
|
}
|
|
9290
10452
|
function recordProgress() {
|
|
@@ -9331,7 +10493,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9331
10493
|
}
|
|
9332
10494
|
if (childActive) {
|
|
9333
10495
|
_lastProgressTs = Date.now();
|
|
9334
|
-
logger$
|
|
10496
|
+
logger$26.debug({
|
|
9335
10497
|
storyKey: key,
|
|
9336
10498
|
phase: s.phase,
|
|
9337
10499
|
childPids
|
|
@@ -9340,7 +10502,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9340
10502
|
}
|
|
9341
10503
|
_stalledStories.add(key);
|
|
9342
10504
|
_storiesWithStall.add(key);
|
|
9343
|
-
logger$
|
|
10505
|
+
logger$26.warn({
|
|
9344
10506
|
storyKey: key,
|
|
9345
10507
|
phase: s.phase,
|
|
9346
10508
|
elapsedMs: elapsed,
|
|
@@ -9385,7 +10547,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9385
10547
|
for (let attempt = 0; attempt < MEMORY_PRESSURE_BACKOFF_MS.length; attempt++) {
|
|
9386
10548
|
const memState = dispatcher.getMemoryState();
|
|
9387
10549
|
if (!memState.isPressured) return true;
|
|
9388
|
-
logger$
|
|
10550
|
+
logger$26.warn({
|
|
9389
10551
|
storyKey,
|
|
9390
10552
|
freeMB: memState.freeMB,
|
|
9391
10553
|
thresholdMB: memState.thresholdMB,
|
|
@@ -9405,11 +10567,11 @@ function createImplementationOrchestrator(deps) {
|
|
|
9405
10567
|
* exhausted retries the story is ESCALATED.
|
|
9406
10568
|
*/
|
|
9407
10569
|
async function processStory(storyKey) {
|
|
9408
|
-
logger$
|
|
10570
|
+
logger$26.info({ storyKey }, "Processing story");
|
|
9409
10571
|
{
|
|
9410
10572
|
const memoryOk = await checkMemoryPressure(storyKey);
|
|
9411
10573
|
if (!memoryOk) {
|
|
9412
|
-
logger$
|
|
10574
|
+
logger$26.warn({ storyKey }, "Memory pressure exhausted — escalating story without dispatch");
|
|
9413
10575
|
const memPressureState = {
|
|
9414
10576
|
phase: "ESCALATED",
|
|
9415
10577
|
reviewCycles: 0,
|
|
@@ -9418,7 +10580,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9418
10580
|
completedAt: new Date().toISOString()
|
|
9419
10581
|
};
|
|
9420
10582
|
_stories.set(storyKey, memPressureState);
|
|
9421
|
-
persistStoryState(storyKey, memPressureState).catch((err) => logger$
|
|
10583
|
+
persistStoryState(storyKey, memPressureState).catch((err) => logger$26.warn({
|
|
9422
10584
|
err,
|
|
9423
10585
|
storyKey
|
|
9424
10586
|
}, "StateStore write failed after memory-pressure escalation"));
|
|
@@ -9435,7 +10597,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9435
10597
|
}
|
|
9436
10598
|
await waitIfPaused();
|
|
9437
10599
|
if (_state !== "RUNNING") return;
|
|
9438
|
-
stateStore?.branchForStory(storyKey).catch((err) => logger$
|
|
10600
|
+
stateStore?.branchForStory(storyKey).catch((err) => logger$26.warn({
|
|
9439
10601
|
err,
|
|
9440
10602
|
storyKey
|
|
9441
10603
|
}, "branchForStory failed — continuing without branch isolation"));
|
|
@@ -9452,14 +10614,14 @@ function createImplementationOrchestrator(deps) {
|
|
|
9452
10614
|
if (match) {
|
|
9453
10615
|
const candidatePath = join$1(artifactsDir, match);
|
|
9454
10616
|
const validation = await isValidStoryFile(candidatePath);
|
|
9455
|
-
if (!validation.valid) logger$
|
|
10617
|
+
if (!validation.valid) logger$26.warn({
|
|
9456
10618
|
storyKey,
|
|
9457
10619
|
storyFilePath: candidatePath,
|
|
9458
10620
|
reason: validation.reason
|
|
9459
10621
|
}, `Existing story file for ${storyKey} is invalid (${validation.reason}) — re-creating`);
|
|
9460
10622
|
else {
|
|
9461
10623
|
storyFilePath = candidatePath;
|
|
9462
|
-
logger$
|
|
10624
|
+
logger$26.info({
|
|
9463
10625
|
storyKey,
|
|
9464
10626
|
storyFilePath
|
|
9465
10627
|
}, "Found existing story file — skipping create-story");
|
|
@@ -9485,7 +10647,8 @@ function createImplementationOrchestrator(deps) {
|
|
|
9485
10647
|
contextCompiler,
|
|
9486
10648
|
dispatcher,
|
|
9487
10649
|
projectRoot,
|
|
9488
|
-
tokenCeilings
|
|
10650
|
+
tokenCeilings,
|
|
10651
|
+
otlpEndpoint: _otlpEndpoint
|
|
9489
10652
|
}, {
|
|
9490
10653
|
epicId: storyKey.split("-")[0] ?? storyKey,
|
|
9491
10654
|
storyKey,
|
|
@@ -9576,14 +10739,14 @@ function createImplementationOrchestrator(deps) {
|
|
|
9576
10739
|
...contract.transport !== void 0 ? { transport: contract.transport } : {}
|
|
9577
10740
|
})
|
|
9578
10741
|
});
|
|
9579
|
-
logger$
|
|
10742
|
+
logger$26.info({
|
|
9580
10743
|
storyKey,
|
|
9581
10744
|
contractCount: contracts.length,
|
|
9582
10745
|
contracts
|
|
9583
10746
|
}, "Stored interface contract declarations");
|
|
9584
10747
|
}
|
|
9585
10748
|
} catch (err) {
|
|
9586
|
-
logger$
|
|
10749
|
+
logger$26.warn({
|
|
9587
10750
|
storyKey,
|
|
9588
10751
|
error: err instanceof Error ? err.message : String(err)
|
|
9589
10752
|
}, "Failed to parse interface contracts — continuing without contract declarations");
|
|
@@ -9601,17 +10764,18 @@ function createImplementationOrchestrator(deps) {
|
|
|
9601
10764
|
contextCompiler,
|
|
9602
10765
|
dispatcher,
|
|
9603
10766
|
projectRoot,
|
|
9604
|
-
tokenCeilings
|
|
10767
|
+
tokenCeilings,
|
|
10768
|
+
otlpEndpoint: _otlpEndpoint
|
|
9605
10769
|
}, {
|
|
9606
10770
|
storyKey,
|
|
9607
10771
|
storyFilePath: storyFilePath ?? "",
|
|
9608
10772
|
pipelineRunId: config.pipelineRunId ?? ""
|
|
9609
10773
|
});
|
|
9610
10774
|
testPlanPhaseResult = testPlanResult.result;
|
|
9611
|
-
if (testPlanResult.result === "success") logger$
|
|
9612
|
-
else logger$
|
|
10775
|
+
if (testPlanResult.result === "success") logger$26.info({ storyKey }, "Test plan generated successfully");
|
|
10776
|
+
else logger$26.warn({ storyKey }, "Test planning returned failed result — proceeding to dev-story without test plan");
|
|
9613
10777
|
} catch (err) {
|
|
9614
|
-
logger$
|
|
10778
|
+
logger$26.warn({
|
|
9615
10779
|
storyKey,
|
|
9616
10780
|
err
|
|
9617
10781
|
}, "Test planning failed — proceeding to dev-story without test plan");
|
|
@@ -9635,7 +10799,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9635
10799
|
try {
|
|
9636
10800
|
storyContentForAnalysis = await readFile$1(storyFilePath ?? "", "utf-8");
|
|
9637
10801
|
} catch (err) {
|
|
9638
|
-
logger$
|
|
10802
|
+
logger$26.error({
|
|
9639
10803
|
storyKey,
|
|
9640
10804
|
storyFilePath,
|
|
9641
10805
|
error: err instanceof Error ? err.message : String(err)
|
|
@@ -9643,7 +10807,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9643
10807
|
}
|
|
9644
10808
|
const analysis = analyzeStoryComplexity(storyContentForAnalysis);
|
|
9645
10809
|
const batches = planTaskBatches(analysis);
|
|
9646
|
-
logger$
|
|
10810
|
+
logger$26.info({
|
|
9647
10811
|
storyKey,
|
|
9648
10812
|
estimatedScope: analysis.estimatedScope,
|
|
9649
10813
|
batchCount: batches.length,
|
|
@@ -9661,7 +10825,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9661
10825
|
if (_state !== "RUNNING") break;
|
|
9662
10826
|
const taskScope = batch.taskIds.map((id, i) => `T${id}: ${batch.taskTitles[i] ?? ""}`).join("\n");
|
|
9663
10827
|
const priorFiles = allFilesModified.size > 0 ? Array.from(allFilesModified) : void 0;
|
|
9664
|
-
logger$
|
|
10828
|
+
logger$26.info({
|
|
9665
10829
|
storyKey,
|
|
9666
10830
|
batchIndex: batch.batchIndex,
|
|
9667
10831
|
taskCount: batch.taskIds.length
|
|
@@ -9676,7 +10840,8 @@ function createImplementationOrchestrator(deps) {
|
|
|
9676
10840
|
contextCompiler,
|
|
9677
10841
|
dispatcher,
|
|
9678
10842
|
projectRoot,
|
|
9679
|
-
tokenCeilings
|
|
10843
|
+
tokenCeilings,
|
|
10844
|
+
otlpEndpoint: _otlpEndpoint
|
|
9680
10845
|
}, {
|
|
9681
10846
|
storyKey,
|
|
9682
10847
|
storyFilePath: storyFilePath ?? "",
|
|
@@ -9686,7 +10851,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9686
10851
|
});
|
|
9687
10852
|
} catch (batchErr) {
|
|
9688
10853
|
const errMsg = batchErr instanceof Error ? batchErr.message : String(batchErr);
|
|
9689
|
-
logger$
|
|
10854
|
+
logger$26.warn({
|
|
9690
10855
|
storyKey,
|
|
9691
10856
|
batchIndex: batch.batchIndex,
|
|
9692
10857
|
error: errMsg
|
|
@@ -9706,7 +10871,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9706
10871
|
filesModified: batchFilesModified,
|
|
9707
10872
|
result: batchResult.result === "success" ? "success" : "failed"
|
|
9708
10873
|
};
|
|
9709
|
-
logger$
|
|
10874
|
+
logger$26.info(batchMetrics, "Batch dev-story metrics");
|
|
9710
10875
|
for (const f of batchFilesModified) allFilesModified.add(f);
|
|
9711
10876
|
if (batchFilesModified.length > 0) batchFileGroups.push({
|
|
9712
10877
|
batchIndex: batch.batchIndex,
|
|
@@ -9728,13 +10893,13 @@ function createImplementationOrchestrator(deps) {
|
|
|
9728
10893
|
})
|
|
9729
10894
|
});
|
|
9730
10895
|
} catch (tokenErr) {
|
|
9731
|
-
logger$
|
|
10896
|
+
logger$26.warn({
|
|
9732
10897
|
storyKey,
|
|
9733
10898
|
batchIndex: batch.batchIndex,
|
|
9734
10899
|
err: tokenErr
|
|
9735
10900
|
}, "Failed to record batch token usage");
|
|
9736
10901
|
}
|
|
9737
|
-
if (batchResult.result === "failed") logger$
|
|
10902
|
+
if (batchResult.result === "failed") logger$26.warn({
|
|
9738
10903
|
storyKey,
|
|
9739
10904
|
batchIndex: batch.batchIndex,
|
|
9740
10905
|
error: batchResult.error
|
|
@@ -9756,7 +10921,8 @@ function createImplementationOrchestrator(deps) {
|
|
|
9756
10921
|
contextCompiler,
|
|
9757
10922
|
dispatcher,
|
|
9758
10923
|
projectRoot,
|
|
9759
|
-
tokenCeilings
|
|
10924
|
+
tokenCeilings,
|
|
10925
|
+
otlpEndpoint: _otlpEndpoint
|
|
9760
10926
|
}, {
|
|
9761
10927
|
storyKey,
|
|
9762
10928
|
storyFilePath: storyFilePath ?? "",
|
|
@@ -9770,7 +10936,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9770
10936
|
});
|
|
9771
10937
|
persistState();
|
|
9772
10938
|
if (devResult.result === "success") devStoryWasSuccess = true;
|
|
9773
|
-
else logger$
|
|
10939
|
+
else logger$26.warn({
|
|
9774
10940
|
storyKey,
|
|
9775
10941
|
error: devResult.error,
|
|
9776
10942
|
filesModified: devFilesModified.length
|
|
@@ -9798,7 +10964,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9798
10964
|
if (devStoryWasSuccess) {
|
|
9799
10965
|
gitDiffFiles = checkGitDiffFiles(projectRoot ?? process.cwd());
|
|
9800
10966
|
if (gitDiffFiles.length === 0) {
|
|
9801
|
-
logger$
|
|
10967
|
+
logger$26.warn({ storyKey }, "Zero-diff detected after COMPLETE dev-story — no file changes in git working tree");
|
|
9802
10968
|
eventBus.emit("orchestrator:zero-diff-escalation", {
|
|
9803
10969
|
storyKey,
|
|
9804
10970
|
reason: "zero-diff-on-complete"
|
|
@@ -9829,7 +10995,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9829
10995
|
});
|
|
9830
10996
|
if (buildVerifyResult.status === "passed") {
|
|
9831
10997
|
eventBus.emit("story:build-verification-passed", { storyKey });
|
|
9832
|
-
logger$
|
|
10998
|
+
logger$26.info({ storyKey }, "Build verification passed");
|
|
9833
10999
|
} else if (buildVerifyResult.status === "failed" || buildVerifyResult.status === "timeout") {
|
|
9834
11000
|
const truncatedOutput = (buildVerifyResult.output ?? "").slice(0, 2e3);
|
|
9835
11001
|
const reason = buildVerifyResult.reason ?? "build-verification-failed";
|
|
@@ -9838,7 +11004,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9838
11004
|
exitCode: buildVerifyResult.exitCode ?? 1,
|
|
9839
11005
|
output: truncatedOutput
|
|
9840
11006
|
});
|
|
9841
|
-
logger$
|
|
11007
|
+
logger$26.warn({
|
|
9842
11008
|
storyKey,
|
|
9843
11009
|
reason,
|
|
9844
11010
|
exitCode: buildVerifyResult.exitCode
|
|
@@ -9868,7 +11034,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9868
11034
|
storyKey
|
|
9869
11035
|
});
|
|
9870
11036
|
if (icResult.potentiallyAffectedTests.length > 0) {
|
|
9871
|
-
logger$
|
|
11037
|
+
logger$26.warn({
|
|
9872
11038
|
storyKey,
|
|
9873
11039
|
modifiedInterfaces: icResult.modifiedInterfaces,
|
|
9874
11040
|
potentiallyAffectedTests: icResult.potentiallyAffectedTests
|
|
@@ -9914,7 +11080,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9914
11080
|
"NEEDS_MAJOR_REWORK": 2
|
|
9915
11081
|
};
|
|
9916
11082
|
for (const group of batchFileGroups) {
|
|
9917
|
-
logger$
|
|
11083
|
+
logger$26.info({
|
|
9918
11084
|
storyKey,
|
|
9919
11085
|
batchIndex: group.batchIndex,
|
|
9920
11086
|
fileCount: group.files.length
|
|
@@ -9926,7 +11092,8 @@ function createImplementationOrchestrator(deps) {
|
|
|
9926
11092
|
contextCompiler,
|
|
9927
11093
|
dispatcher,
|
|
9928
11094
|
projectRoot,
|
|
9929
|
-
tokenCeilings
|
|
11095
|
+
tokenCeilings,
|
|
11096
|
+
otlpEndpoint: _otlpEndpoint
|
|
9930
11097
|
}, {
|
|
9931
11098
|
storyKey,
|
|
9932
11099
|
storyFilePath: storyFilePath ?? "",
|
|
@@ -9952,7 +11119,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9952
11119
|
rawOutput: lastRawOutput,
|
|
9953
11120
|
tokenUsage: aggregateTokens
|
|
9954
11121
|
};
|
|
9955
|
-
logger$
|
|
11122
|
+
logger$26.info({
|
|
9956
11123
|
storyKey,
|
|
9957
11124
|
batchCount: batchFileGroups.length,
|
|
9958
11125
|
verdict: worstVerdict,
|
|
@@ -9966,7 +11133,8 @@ function createImplementationOrchestrator(deps) {
|
|
|
9966
11133
|
contextCompiler,
|
|
9967
11134
|
dispatcher,
|
|
9968
11135
|
projectRoot,
|
|
9969
|
-
tokenCeilings
|
|
11136
|
+
tokenCeilings,
|
|
11137
|
+
otlpEndpoint: _otlpEndpoint
|
|
9970
11138
|
}, {
|
|
9971
11139
|
storyKey,
|
|
9972
11140
|
storyFilePath: storyFilePath ?? "",
|
|
@@ -9979,7 +11147,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9979
11147
|
const isPhantomReview = reviewResult.dispatchFailed === true || reviewResult.verdict !== "SHIP_IT" && reviewResult.verdict !== "LGTM_WITH_NOTES" && (reviewResult.issue_list === void 0 || reviewResult.issue_list.length === 0) && reviewResult.error !== void 0;
|
|
9980
11148
|
if (isPhantomReview && !timeoutRetried) {
|
|
9981
11149
|
timeoutRetried = true;
|
|
9982
|
-
logger$
|
|
11150
|
+
logger$26.warn({
|
|
9983
11151
|
storyKey,
|
|
9984
11152
|
reviewCycles,
|
|
9985
11153
|
error: reviewResult.error
|
|
@@ -9989,7 +11157,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
9989
11157
|
verdict = reviewResult.verdict;
|
|
9990
11158
|
issueList = reviewResult.issue_list ?? [];
|
|
9991
11159
|
if (verdict === "NEEDS_MAJOR_REWORK" && reviewCycles > 0 && previousIssueList.length > 0 && issueList.length < previousIssueList.length) {
|
|
9992
|
-
logger$
|
|
11160
|
+
logger$26.info({
|
|
9993
11161
|
storyKey,
|
|
9994
11162
|
originalVerdict: verdict,
|
|
9995
11163
|
issuesBefore: previousIssueList.length,
|
|
@@ -10025,7 +11193,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10025
11193
|
if (_decomposition !== void 0) parts.push(`decomposed: ${_decomposition.batchCount} batches`);
|
|
10026
11194
|
parts.push(`${fileCount} files`);
|
|
10027
11195
|
parts.push(`${totalTokensK} tokens`);
|
|
10028
|
-
logger$
|
|
11196
|
+
logger$26.info({
|
|
10029
11197
|
storyKey,
|
|
10030
11198
|
verdict,
|
|
10031
11199
|
agentVerdict: reviewResult.agentVerdict
|
|
@@ -10074,13 +11242,58 @@ function createImplementationOrchestrator(deps) {
|
|
|
10074
11242
|
}),
|
|
10075
11243
|
rationale: `Advisory notes from LGTM_WITH_NOTES review of ${storyKey}`
|
|
10076
11244
|
});
|
|
10077
|
-
logger$
|
|
11245
|
+
logger$26.info({ storyKey }, "Advisory notes persisted to decision store");
|
|
10078
11246
|
} catch (advisoryErr) {
|
|
10079
|
-
logger$
|
|
11247
|
+
logger$26.warn({
|
|
10080
11248
|
storyKey,
|
|
10081
11249
|
error: advisoryErr instanceof Error ? advisoryErr.message : String(advisoryErr)
|
|
10082
11250
|
}, "Failed to persist advisory notes (best-effort)");
|
|
10083
11251
|
}
|
|
11252
|
+
if (telemetryPersistence !== void 0) try {
|
|
11253
|
+
const turns = await telemetryPersistence.getTurnAnalysis(storyKey);
|
|
11254
|
+
if (turns.length > 0) {
|
|
11255
|
+
const scorer = new EfficiencyScorer(logger$26);
|
|
11256
|
+
const effScore = scorer.score(storyKey, turns);
|
|
11257
|
+
await telemetryPersistence.storeEfficiencyScore(effScore);
|
|
11258
|
+
logger$26.info({
|
|
11259
|
+
storyKey,
|
|
11260
|
+
compositeScore: effScore.compositeScore,
|
|
11261
|
+
modelCount: effScore.perModelBreakdown.length
|
|
11262
|
+
}, "Efficiency score computed and persisted");
|
|
11263
|
+
} else logger$26.debug({ storyKey }, "No turn analysis data available — skipping efficiency scoring");
|
|
11264
|
+
} catch (effErr) {
|
|
11265
|
+
logger$26.warn({
|
|
11266
|
+
storyKey,
|
|
11267
|
+
error: effErr instanceof Error ? effErr.message : String(effErr)
|
|
11268
|
+
}, "Efficiency scoring failed — story verdict unchanged");
|
|
11269
|
+
}
|
|
11270
|
+
if (telemetryPersistence !== void 0) try {
|
|
11271
|
+
const turns = await telemetryPersistence.getTurnAnalysis(storyKey);
|
|
11272
|
+
const spans = [];
|
|
11273
|
+
if (spans.length === 0) logger$26.debug({ storyKey }, "No spans for telemetry categorization — skipping");
|
|
11274
|
+
else {
|
|
11275
|
+
const categorizer = new Categorizer(logger$26);
|
|
11276
|
+
const consumerAnalyzer = new ConsumerAnalyzer(categorizer, logger$26);
|
|
11277
|
+
const categoryStats = categorizer.computeCategoryStats(spans, turns);
|
|
11278
|
+
const consumerStats = consumerAnalyzer.analyze(spans);
|
|
11279
|
+
await telemetryPersistence.storeCategoryStats(storyKey, categoryStats);
|
|
11280
|
+
await telemetryPersistence.storeConsumerStats(storyKey, consumerStats);
|
|
11281
|
+
const growingCount = categoryStats.filter((c) => c.trend === "growing").length;
|
|
11282
|
+
const topCategory = categoryStats[0]?.category ?? "none";
|
|
11283
|
+
const topConsumer = consumerStats[0]?.consumerKey ?? "none";
|
|
11284
|
+
logger$26.info({
|
|
11285
|
+
storyKey,
|
|
11286
|
+
topCategory,
|
|
11287
|
+
topConsumer,
|
|
11288
|
+
growingCount
|
|
11289
|
+
}, "Semantic categorization and consumer analysis complete");
|
|
11290
|
+
}
|
|
11291
|
+
} catch (catErr) {
|
|
11292
|
+
logger$26.warn({
|
|
11293
|
+
storyKey,
|
|
11294
|
+
error: catErr instanceof Error ? catErr.message : String(catErr)
|
|
11295
|
+
}, "Semantic categorization failed — story verdict unchanged");
|
|
11296
|
+
}
|
|
10084
11297
|
try {
|
|
10085
11298
|
const expansionResult = await runTestExpansion({
|
|
10086
11299
|
db,
|
|
@@ -10088,7 +11301,8 @@ function createImplementationOrchestrator(deps) {
|
|
|
10088
11301
|
contextCompiler,
|
|
10089
11302
|
dispatcher,
|
|
10090
11303
|
projectRoot,
|
|
10091
|
-
tokenCeilings
|
|
11304
|
+
tokenCeilings,
|
|
11305
|
+
otlpEndpoint: _otlpEndpoint
|
|
10092
11306
|
}, {
|
|
10093
11307
|
storyKey,
|
|
10094
11308
|
storyFilePath: storyFilePath ?? "",
|
|
@@ -10096,7 +11310,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10096
11310
|
filesModified: devFilesModified,
|
|
10097
11311
|
workingDirectory: projectRoot
|
|
10098
11312
|
});
|
|
10099
|
-
logger$
|
|
11313
|
+
logger$26.debug({
|
|
10100
11314
|
storyKey,
|
|
10101
11315
|
expansion_priority: expansionResult.expansion_priority,
|
|
10102
11316
|
coverage_gaps: expansionResult.coverage_gaps.length
|
|
@@ -10109,7 +11323,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10109
11323
|
value: JSON.stringify(expansionResult)
|
|
10110
11324
|
});
|
|
10111
11325
|
} catch (expansionErr) {
|
|
10112
|
-
logger$
|
|
11326
|
+
logger$26.warn({
|
|
10113
11327
|
storyKey,
|
|
10114
11328
|
error: expansionErr instanceof Error ? expansionErr.message : String(expansionErr)
|
|
10115
11329
|
}, "Test expansion failed — story verdict unchanged");
|
|
@@ -10136,7 +11350,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10136
11350
|
persistState();
|
|
10137
11351
|
return;
|
|
10138
11352
|
}
|
|
10139
|
-
logger$
|
|
11353
|
+
logger$26.info({
|
|
10140
11354
|
storyKey,
|
|
10141
11355
|
reviewCycles: finalReviewCycles,
|
|
10142
11356
|
issueCount: issueList.length
|
|
@@ -10196,14 +11410,15 @@ function createImplementationOrchestrator(deps) {
|
|
|
10196
11410
|
fixPrompt = assembled.prompt;
|
|
10197
11411
|
} catch {
|
|
10198
11412
|
fixPrompt = `Fix story ${storyKey}: verdict=${verdict}, minor fixes needed`;
|
|
10199
|
-
logger$
|
|
11413
|
+
logger$26.warn({ storyKey }, "Failed to assemble auto-approve fix prompt, using fallback");
|
|
10200
11414
|
}
|
|
10201
11415
|
const handle = dispatcher.dispatch({
|
|
10202
11416
|
prompt: fixPrompt,
|
|
10203
11417
|
agent: "claude-code",
|
|
10204
11418
|
taskType: "minor-fixes",
|
|
10205
11419
|
workingDirectory: projectRoot,
|
|
10206
|
-
...autoApproveMaxTurns !== void 0 ? { maxTurns: autoApproveMaxTurns } : {}
|
|
11420
|
+
...autoApproveMaxTurns !== void 0 ? { maxTurns: autoApproveMaxTurns } : {},
|
|
11421
|
+
..._otlpEndpoint !== void 0 ? { otlpEndpoint: _otlpEndpoint } : {}
|
|
10207
11422
|
});
|
|
10208
11423
|
const fixResult = await handle.result;
|
|
10209
11424
|
eventBus.emit("orchestrator:story-phase-complete", {
|
|
@@ -10214,9 +11429,9 @@ function createImplementationOrchestrator(deps) {
|
|
|
10214
11429
|
output: fixResult.tokenEstimate.output
|
|
10215
11430
|
} : void 0 }
|
|
10216
11431
|
});
|
|
10217
|
-
if (fixResult.status === "timeout") logger$
|
|
11432
|
+
if (fixResult.status === "timeout") logger$26.warn({ storyKey }, "Auto-approve fix timed out — approving anyway (issues were minor)");
|
|
10218
11433
|
} catch (err) {
|
|
10219
|
-
logger$
|
|
11434
|
+
logger$26.warn({
|
|
10220
11435
|
storyKey,
|
|
10221
11436
|
err
|
|
10222
11437
|
}, "Auto-approve fix dispatch failed — approving anyway (issues were minor)");
|
|
@@ -10333,7 +11548,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10333
11548
|
fixPrompt = assembled.prompt;
|
|
10334
11549
|
} catch {
|
|
10335
11550
|
fixPrompt = `Fix story ${storyKey}: verdict=${verdict}, taskType=${taskType}`;
|
|
10336
|
-
logger$
|
|
11551
|
+
logger$26.warn({
|
|
10337
11552
|
storyKey,
|
|
10338
11553
|
taskType
|
|
10339
11554
|
}, "Failed to assemble fix prompt, using fallback");
|
|
@@ -10346,14 +11561,16 @@ function createImplementationOrchestrator(deps) {
|
|
|
10346
11561
|
...fixModel !== void 0 ? { model: fixModel } : {},
|
|
10347
11562
|
outputSchema: DevStoryResultSchema,
|
|
10348
11563
|
...fixMaxTurns !== void 0 ? { maxTurns: fixMaxTurns } : {},
|
|
10349
|
-
...projectRoot !== void 0 ? { workingDirectory: projectRoot } : {}
|
|
11564
|
+
...projectRoot !== void 0 ? { workingDirectory: projectRoot } : {},
|
|
11565
|
+
..._otlpEndpoint !== void 0 ? { otlpEndpoint: _otlpEndpoint } : {}
|
|
10350
11566
|
}) : dispatcher.dispatch({
|
|
10351
11567
|
prompt: fixPrompt,
|
|
10352
11568
|
agent: "claude-code",
|
|
10353
11569
|
taskType,
|
|
10354
11570
|
...fixModel !== void 0 ? { model: fixModel } : {},
|
|
10355
11571
|
...fixMaxTurns !== void 0 ? { maxTurns: fixMaxTurns } : {},
|
|
10356
|
-
...projectRoot !== void 0 ? { workingDirectory: projectRoot } : {}
|
|
11572
|
+
...projectRoot !== void 0 ? { workingDirectory: projectRoot } : {},
|
|
11573
|
+
..._otlpEndpoint !== void 0 ? { otlpEndpoint: _otlpEndpoint } : {}
|
|
10357
11574
|
});
|
|
10358
11575
|
const fixResult = await handle.result;
|
|
10359
11576
|
eventBus.emit("orchestrator:story-phase-complete", {
|
|
@@ -10365,7 +11582,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10365
11582
|
} : void 0 }
|
|
10366
11583
|
});
|
|
10367
11584
|
if (fixResult.status === "timeout") {
|
|
10368
|
-
logger$
|
|
11585
|
+
logger$26.warn({
|
|
10369
11586
|
storyKey,
|
|
10370
11587
|
taskType
|
|
10371
11588
|
}, "Fix dispatch timed out — escalating story");
|
|
@@ -10387,7 +11604,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10387
11604
|
}
|
|
10388
11605
|
if (fixResult.status === "failed") {
|
|
10389
11606
|
if (isMajorRework) {
|
|
10390
|
-
logger$
|
|
11607
|
+
logger$26.warn({
|
|
10391
11608
|
storyKey,
|
|
10392
11609
|
exitCode: fixResult.exitCode
|
|
10393
11610
|
}, "Major rework dispatch failed — escalating story");
|
|
@@ -10407,14 +11624,14 @@ function createImplementationOrchestrator(deps) {
|
|
|
10407
11624
|
persistState();
|
|
10408
11625
|
return;
|
|
10409
11626
|
}
|
|
10410
|
-
logger$
|
|
11627
|
+
logger$26.warn({
|
|
10411
11628
|
storyKey,
|
|
10412
11629
|
taskType,
|
|
10413
11630
|
exitCode: fixResult.exitCode
|
|
10414
11631
|
}, "Fix dispatch failed");
|
|
10415
11632
|
}
|
|
10416
11633
|
} catch (err) {
|
|
10417
|
-
logger$
|
|
11634
|
+
logger$26.warn({
|
|
10418
11635
|
storyKey,
|
|
10419
11636
|
taskType,
|
|
10420
11637
|
err
|
|
@@ -10477,11 +11694,11 @@ function createImplementationOrchestrator(deps) {
|
|
|
10477
11694
|
}
|
|
10478
11695
|
async function run(storyKeys) {
|
|
10479
11696
|
if (_state === "RUNNING" || _state === "PAUSED") {
|
|
10480
|
-
logger$
|
|
11697
|
+
logger$26.warn({ state: _state }, "run() called while orchestrator is already running or paused — ignoring");
|
|
10481
11698
|
return getStatus();
|
|
10482
11699
|
}
|
|
10483
11700
|
if (_state === "COMPLETE") {
|
|
10484
|
-
logger$
|
|
11701
|
+
logger$26.warn({ state: _state }, "run() called on a COMPLETE orchestrator — ignoring");
|
|
10485
11702
|
return getStatus();
|
|
10486
11703
|
}
|
|
10487
11704
|
_state = "RUNNING";
|
|
@@ -10502,7 +11719,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10502
11719
|
if (config.enableHeartbeat) startHeartbeat();
|
|
10503
11720
|
if (projectRoot !== void 0) {
|
|
10504
11721
|
const seedResult = seedMethodologyContext(db, projectRoot);
|
|
10505
|
-
if (seedResult.decisionsCreated > 0) logger$
|
|
11722
|
+
if (seedResult.decisionsCreated > 0) logger$26.info({
|
|
10506
11723
|
decisionsCreated: seedResult.decisionsCreated,
|
|
10507
11724
|
skippedCategories: seedResult.skippedCategories
|
|
10508
11725
|
}, "Methodology context seeded from planning artifacts");
|
|
@@ -10512,7 +11729,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10512
11729
|
await stateStore.initialize();
|
|
10513
11730
|
for (const key of storyKeys) {
|
|
10514
11731
|
const pendingState = _stories.get(key);
|
|
10515
|
-
if (pendingState !== void 0) persistStoryState(key, pendingState).catch((err) => logger$
|
|
11732
|
+
if (pendingState !== void 0) persistStoryState(key, pendingState).catch((err) => logger$26.warn({
|
|
10516
11733
|
err,
|
|
10517
11734
|
storyKey: key
|
|
10518
11735
|
}, "StateStore write failed during PENDING init"));
|
|
@@ -10521,9 +11738,16 @@ function createImplementationOrchestrator(deps) {
|
|
|
10521
11738
|
const existingRecords = await stateStore.queryStories({});
|
|
10522
11739
|
for (const record of existingRecords) _stateStoreCache.set(record.storyKey, record);
|
|
10523
11740
|
} catch (err) {
|
|
10524
|
-
logger$
|
|
11741
|
+
logger$26.warn({ err }, "StateStore.queryStories() failed during init — status merge will be empty (best-effort)");
|
|
10525
11742
|
}
|
|
10526
11743
|
}
|
|
11744
|
+
if (ingestionServer !== void 0) {
|
|
11745
|
+
await ingestionServer.start().catch((err) => logger$26.warn({ err }, "IngestionServer.start() failed — continuing without telemetry (best-effort)"));
|
|
11746
|
+
try {
|
|
11747
|
+
_otlpEndpoint = ingestionServer.getOtlpEnvVars().OTEL_EXPORTER_OTLP_ENDPOINT;
|
|
11748
|
+
logger$26.info({ otlpEndpoint: _otlpEndpoint }, "OTLP telemetry ingestion active");
|
|
11749
|
+
} catch {}
|
|
11750
|
+
}
|
|
10527
11751
|
let contractDeclarations = [];
|
|
10528
11752
|
if (stateStore !== void 0) {
|
|
10529
11753
|
const allContractRecords = await stateStore.queryContracts();
|
|
@@ -10557,11 +11781,11 @@ function createImplementationOrchestrator(deps) {
|
|
|
10557
11781
|
}).filter((d) => d !== null);
|
|
10558
11782
|
}
|
|
10559
11783
|
const { batches, edges: contractEdges } = detectConflictGroupsWithContracts(storyKeys, { moduleMap: pack.manifest.conflictGroups }, contractDeclarations);
|
|
10560
|
-
if (contractEdges.length > 0) logger$
|
|
11784
|
+
if (contractEdges.length > 0) logger$26.info({
|
|
10561
11785
|
contractEdges,
|
|
10562
11786
|
edgeCount: contractEdges.length
|
|
10563
11787
|
}, "Contract dependency edges detected — applying contract-aware dispatch ordering");
|
|
10564
|
-
logger$
|
|
11788
|
+
logger$26.info({
|
|
10565
11789
|
storyCount: storyKeys.length,
|
|
10566
11790
|
groupCount: batches.reduce((sum, b) => sum + b.length, 0),
|
|
10567
11791
|
batchCount: batches.length,
|
|
@@ -10581,7 +11805,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10581
11805
|
exitCode,
|
|
10582
11806
|
output: truncatedOutput
|
|
10583
11807
|
});
|
|
10584
|
-
logger$
|
|
11808
|
+
logger$26.error({
|
|
10585
11809
|
exitCode,
|
|
10586
11810
|
reason: preFlightResult.reason
|
|
10587
11811
|
}, "Pre-flight build check failed — aborting pipeline before any story dispatch");
|
|
@@ -10590,7 +11814,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10590
11814
|
persistState();
|
|
10591
11815
|
return getStatus();
|
|
10592
11816
|
}
|
|
10593
|
-
if (preFlightResult.status !== "skipped") logger$
|
|
11817
|
+
if (preFlightResult.status !== "skipped") logger$26.info("Pre-flight build check passed");
|
|
10594
11818
|
}
|
|
10595
11819
|
try {
|
|
10596
11820
|
for (const batchGroups of batches) await runWithConcurrency(batchGroups, config.maxConcurrency);
|
|
@@ -10599,7 +11823,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10599
11823
|
_state = "FAILED";
|
|
10600
11824
|
_completedAt = new Date().toISOString();
|
|
10601
11825
|
persistState();
|
|
10602
|
-
logger$
|
|
11826
|
+
logger$26.error({ err }, "Orchestrator failed with unhandled error");
|
|
10603
11827
|
return getStatus();
|
|
10604
11828
|
}
|
|
10605
11829
|
stopHeartbeat();
|
|
@@ -10615,11 +11839,11 @@ function createImplementationOrchestrator(deps) {
|
|
|
10615
11839
|
contractName: mismatch.contractName,
|
|
10616
11840
|
mismatchDescription: mismatch.mismatchDescription
|
|
10617
11841
|
});
|
|
10618
|
-
logger$
|
|
11842
|
+
logger$26.warn({
|
|
10619
11843
|
mismatchCount: mismatches.length,
|
|
10620
11844
|
mismatches
|
|
10621
11845
|
}, "Post-sprint contract verification found mismatches — manual review required");
|
|
10622
|
-
} else logger$
|
|
11846
|
+
} else logger$26.info("Post-sprint contract verification passed — all declared contracts satisfied");
|
|
10623
11847
|
if (stateStore !== void 0) try {
|
|
10624
11848
|
const allContractsForVerification = await stateStore.queryContracts();
|
|
10625
11849
|
const verifiedAt = new Date().toISOString();
|
|
@@ -10648,12 +11872,12 @@ function createImplementationOrchestrator(deps) {
|
|
|
10648
11872
|
});
|
|
10649
11873
|
await stateStore.setContractVerification(sk, records);
|
|
10650
11874
|
}
|
|
10651
|
-
logger$
|
|
11875
|
+
logger$26.info({ storyCount: contractsByStory.size }, "Contract verification results persisted to StateStore");
|
|
10652
11876
|
} catch (persistErr) {
|
|
10653
|
-
logger$
|
|
11877
|
+
logger$26.warn({ err: persistErr }, "Failed to persist contract verification results to StateStore");
|
|
10654
11878
|
}
|
|
10655
11879
|
} catch (err) {
|
|
10656
|
-
logger$
|
|
11880
|
+
logger$26.error({ err }, "Post-sprint contract verification threw an error — skipping");
|
|
10657
11881
|
}
|
|
10658
11882
|
let completed = 0;
|
|
10659
11883
|
let escalated = 0;
|
|
@@ -10670,7 +11894,8 @@ function createImplementationOrchestrator(deps) {
|
|
|
10670
11894
|
persistState();
|
|
10671
11895
|
return getStatus();
|
|
10672
11896
|
} finally {
|
|
10673
|
-
if (stateStore !== void 0) await stateStore.close().catch((err) => logger$
|
|
11897
|
+
if (stateStore !== void 0) await stateStore.close().catch((err) => logger$26.warn({ err }, "StateStore.close() failed (best-effort)"));
|
|
11898
|
+
if (ingestionServer !== void 0) await ingestionServer.stop().catch((err) => logger$26.warn({ err }, "IngestionServer.stop() failed (best-effort)"));
|
|
10674
11899
|
}
|
|
10675
11900
|
}
|
|
10676
11901
|
function pause() {
|
|
@@ -10679,7 +11904,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10679
11904
|
_pauseGate = createPauseGate();
|
|
10680
11905
|
_state = "PAUSED";
|
|
10681
11906
|
eventBus.emit("orchestrator:paused", {});
|
|
10682
|
-
logger$
|
|
11907
|
+
logger$26.info("Orchestrator paused");
|
|
10683
11908
|
}
|
|
10684
11909
|
function resume() {
|
|
10685
11910
|
if (_state !== "PAUSED") return;
|
|
@@ -10690,7 +11915,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10690
11915
|
}
|
|
10691
11916
|
_state = "RUNNING";
|
|
10692
11917
|
eventBus.emit("orchestrator:resumed", {});
|
|
10693
|
-
logger$
|
|
11918
|
+
logger$26.info("Orchestrator resumed");
|
|
10694
11919
|
}
|
|
10695
11920
|
return {
|
|
10696
11921
|
run,
|
|
@@ -10733,7 +11958,11 @@ function resolveStoryKeys(db, projectRoot, opts) {
|
|
|
10733
11958
|
const allContent = shardRows.map((r) => r.value).join("\n");
|
|
10734
11959
|
if (allContent.length > 0) keys = parseStoryKeysFromEpics(allContent);
|
|
10735
11960
|
} catch {}
|
|
10736
|
-
if (keys.length === 0) keys = discoverPendingStoryKeys(projectRoot);
|
|
11961
|
+
if (keys.length === 0) keys = discoverPendingStoryKeys(projectRoot, opts?.epicNumber);
|
|
11962
|
+
if (opts?.epicNumber !== void 0 && keys.length > 0) {
|
|
11963
|
+
const prefix = `${opts.epicNumber}-`;
|
|
11964
|
+
keys = keys.filter((k) => k.startsWith(prefix));
|
|
11965
|
+
}
|
|
10737
11966
|
if (opts?.filterCompleted === true && keys.length > 0) {
|
|
10738
11967
|
const completedKeys = getCompletedStoryKeys(db);
|
|
10739
11968
|
keys = keys.filter((k) => !completedKeys.has(k));
|
|
@@ -10781,21 +12010,33 @@ function parseStoryKeysFromEpics(content) {
|
|
|
10781
12010
|
* @param projectRoot - Absolute path to the project root directory
|
|
10782
12011
|
* @returns Sorted array of pending story keys in "N-M" format
|
|
10783
12012
|
*/
|
|
10784
|
-
function discoverPendingStoryKeys(projectRoot) {
|
|
12013
|
+
function discoverPendingStoryKeys(projectRoot, epicNumber) {
|
|
10785
12014
|
let allKeys = [];
|
|
10786
|
-
|
|
10787
|
-
if (epicsPath !== void 0) try {
|
|
10788
|
-
const content = readFileSync$1(epicsPath, "utf-8");
|
|
10789
|
-
allKeys = parseStoryKeysFromEpics(content);
|
|
10790
|
-
} catch {}
|
|
10791
|
-
if (allKeys.length === 0) {
|
|
12015
|
+
if (epicNumber !== void 0) {
|
|
10792
12016
|
const epicFiles = findEpicFiles(projectRoot);
|
|
10793
|
-
|
|
12017
|
+
const targetPattern = new RegExp(`^epic-${epicNumber}[^0-9]`);
|
|
12018
|
+
const matched = epicFiles.filter((f) => targetPattern.test(f.split("/").pop()));
|
|
12019
|
+
for (const epicFile of matched) try {
|
|
10794
12020
|
const content = readFileSync$1(epicFile, "utf-8");
|
|
10795
12021
|
const keys = parseStoryKeysFromEpics(content);
|
|
10796
12022
|
allKeys.push(...keys);
|
|
10797
12023
|
} catch {}
|
|
10798
12024
|
allKeys = sortStoryKeys([...new Set(allKeys)]);
|
|
12025
|
+
} else {
|
|
12026
|
+
const epicsPath = findEpicsFile(projectRoot);
|
|
12027
|
+
if (epicsPath !== void 0) try {
|
|
12028
|
+
const content = readFileSync$1(epicsPath, "utf-8");
|
|
12029
|
+
allKeys = parseStoryKeysFromEpics(content);
|
|
12030
|
+
} catch {}
|
|
12031
|
+
if (allKeys.length === 0) {
|
|
12032
|
+
const epicFiles = findEpicFiles(projectRoot);
|
|
12033
|
+
for (const epicFile of epicFiles) try {
|
|
12034
|
+
const content = readFileSync$1(epicFile, "utf-8");
|
|
12035
|
+
const keys = parseStoryKeysFromEpics(content);
|
|
12036
|
+
allKeys.push(...keys);
|
|
12037
|
+
} catch {}
|
|
12038
|
+
allKeys = sortStoryKeys([...new Set(allKeys)]);
|
|
12039
|
+
}
|
|
10799
12040
|
}
|
|
10800
12041
|
if (allKeys.length === 0) return [];
|
|
10801
12042
|
const existingKeys = collectExistingStoryKeys(projectRoot);
|
|
@@ -14884,7 +16125,7 @@ function mapInternalPhaseToEventPhase(internalPhase) {
|
|
|
14884
16125
|
}
|
|
14885
16126
|
}
|
|
14886
16127
|
async function runRunAction(options) {
|
|
14887
|
-
const { pack: packName, from: startPhase, stopAfter, concept: conceptArg, conceptFile, stories: storiesArg, concurrency, outputFormat, projectRoot, events: eventsFlag, verbose: verboseFlag, tui: tuiFlag, skipUx, research: researchFlag, skipResearch: skipResearchFlag, skipPreflight, registry: injectedRegistry } = options;
|
|
16128
|
+
const { pack: packName, from: startPhase, stopAfter, concept: conceptArg, conceptFile, stories: storiesArg, concurrency, outputFormat, projectRoot, events: eventsFlag, verbose: verboseFlag, tui: tuiFlag, skipUx, research: researchFlag, skipResearch: skipResearchFlag, skipPreflight, epic: epicNumber, registry: injectedRegistry } = options;
|
|
14888
16129
|
if (startPhase !== void 0 && !VALID_PHASES.includes(startPhase)) {
|
|
14889
16130
|
const errorMsg = `Invalid phase '${startPhase}'. Valid phases: ${VALID_PHASES.join(", ")}`;
|
|
14890
16131
|
if (outputFormat === "json") process.stdout.write(formatOutput(null, "json", false, errorMsg) + "\n");
|
|
@@ -14948,12 +16189,19 @@ async function runRunAction(options) {
|
|
|
14948
16189
|
});
|
|
14949
16190
|
} catch {}
|
|
14950
16191
|
let tokenCeilings;
|
|
16192
|
+
let telemetryEnabled = false;
|
|
16193
|
+
let telemetryPort = 4318;
|
|
14951
16194
|
try {
|
|
14952
16195
|
const configSystem = createConfigSystem({ projectConfigDir: dbDir });
|
|
14953
16196
|
await configSystem.load();
|
|
14954
|
-
|
|
16197
|
+
const cfg = configSystem.getConfig();
|
|
16198
|
+
tokenCeilings = cfg.token_ceilings;
|
|
16199
|
+
if (cfg.telemetry?.enabled === true) {
|
|
16200
|
+
telemetryEnabled = true;
|
|
16201
|
+
telemetryPort = cfg.telemetry.port ?? 4318;
|
|
16202
|
+
}
|
|
14955
16203
|
} catch {
|
|
14956
|
-
logger.debug("Config loading skipped — using default token ceilings");
|
|
16204
|
+
logger.debug("Config loading skipped — using default token ceilings and telemetry settings");
|
|
14957
16205
|
}
|
|
14958
16206
|
let parsedStoryKeys = [];
|
|
14959
16207
|
if (storiesArg !== void 0 && storiesArg !== "") {
|
|
@@ -15008,6 +16256,7 @@ async function runRunAction(options) {
|
|
|
15008
16256
|
...researchFlag === true ? { research: true } : {},
|
|
15009
16257
|
...skipResearchFlag === true ? { skipResearch: true } : {},
|
|
15010
16258
|
...skipPreflight === true ? { skipPreflight: true } : {},
|
|
16259
|
+
...epicNumber !== void 0 ? { epic: epicNumber } : {},
|
|
15011
16260
|
...injectedRegistry !== void 0 ? { registry: injectedRegistry } : {}
|
|
15012
16261
|
});
|
|
15013
16262
|
let storyKeys = [...parsedStoryKeys];
|
|
@@ -15056,8 +16305,11 @@ async function runRunAction(options) {
|
|
|
15056
16305
|
storyKeys = storyKeys.filter((k) => !completedStoryKeys.has(k));
|
|
15057
16306
|
}
|
|
15058
16307
|
if (storyKeys.length === 0) {
|
|
15059
|
-
storyKeys = discoverPendingStoryKeys(projectRoot);
|
|
15060
|
-
if (storyKeys.length > 0)
|
|
16308
|
+
storyKeys = discoverPendingStoryKeys(projectRoot, epicNumber);
|
|
16309
|
+
if (storyKeys.length > 0) {
|
|
16310
|
+
const scopeLabel = epicNumber !== void 0 ? `epic ${epicNumber}` : "epics.md";
|
|
16311
|
+
process.stdout.write(`Discovered ${storyKeys.length} pending stories from ${scopeLabel}: ${storyKeys.join(", ")}\n`);
|
|
16312
|
+
}
|
|
15061
16313
|
}
|
|
15062
16314
|
if (storyKeys.length === 0) {
|
|
15063
16315
|
if (outputFormat === "human") process.stdout.write("No pending stories found in decision store.\n");
|
|
@@ -15407,6 +16659,7 @@ async function runRunAction(options) {
|
|
|
15407
16659
|
});
|
|
15408
16660
|
});
|
|
15409
16661
|
}
|
|
16662
|
+
const ingestionServer = telemetryEnabled ? new IngestionServer({ port: telemetryPort }) : void 0;
|
|
15410
16663
|
const orchestrator = createImplementationOrchestrator({
|
|
15411
16664
|
db,
|
|
15412
16665
|
pack,
|
|
@@ -15421,7 +16674,8 @@ async function runRunAction(options) {
|
|
|
15421
16674
|
skipPreflight: skipPreflight === true
|
|
15422
16675
|
},
|
|
15423
16676
|
projectRoot,
|
|
15424
|
-
tokenCeilings
|
|
16677
|
+
tokenCeilings,
|
|
16678
|
+
...ingestionServer !== void 0 ? { ingestionServer } : {}
|
|
15425
16679
|
});
|
|
15426
16680
|
if (outputFormat === "human" && progressRenderer === void 0 && ndjsonEmitter === void 0) {
|
|
15427
16681
|
process.stdout.write(`Starting pipeline: ${storyKeys.length} story/stories, concurrency=${concurrency}\n`);
|
|
@@ -15755,7 +17009,10 @@ async function runFullPipeline(options) {
|
|
|
15755
17009
|
process.stdout.write(` [ESCALATED] ${payload.storyKey}: ${payload.lastVerdict}\n`);
|
|
15756
17010
|
});
|
|
15757
17011
|
}
|
|
15758
|
-
const storyKeys = resolveStoryKeys(db, projectRoot, {
|
|
17012
|
+
const storyKeys = resolveStoryKeys(db, projectRoot, {
|
|
17013
|
+
explicit: explicitStories,
|
|
17014
|
+
epicNumber: options.epic
|
|
17015
|
+
});
|
|
15759
17016
|
if (storyKeys.length === 0 && outputFormat === "human") process.stdout.write("[IMPLEMENTATION] No stories found. Run solutioning first or pass --stories.\n");
|
|
15760
17017
|
if (outputFormat === "human") process.stdout.write(`[IMPLEMENTATION] Starting ${storyKeys.length} stories with concurrency=${concurrency}\n`);
|
|
15761
17018
|
await orchestrator.run(storyKeys);
|
|
@@ -15819,7 +17076,7 @@ async function runFullPipeline(options) {
|
|
|
15819
17076
|
}
|
|
15820
17077
|
}
|
|
15821
17078
|
function registerRunCommand(program, _version = "0.0.0", projectRoot = process.cwd(), registry) {
|
|
15822
|
-
program.command("run").description("Run the autonomous pipeline (use --from to start from a specific phase)").option("--pack <name>", "Methodology pack name", "bmad").option("--from <phase>", "Start from this phase: analysis, planning, solutioning, implementation").option("--stop-after <phase>", "Stop pipeline after this phase completes").option("--concept <text>", "Inline concept text (required when --from analysis)").option("--concept-file <path>", "Path to a file containing the concept text").option("--stories <keys>", "Comma-separated story keys (e.g., 10-1,10-2)").option("--concurrency <n>", "Maximum parallel conflict groups", (v) => parseInt(v, 10), 3).option("--project-root <path>", "Project root directory", projectRoot).option("--output-format <format>", "Output format: human (default) or json", "human").option("--events", "Emit structured NDJSON events on stdout for programmatic consumption").option("--verbose", "Show detailed pino log output").option("--help-agent", "Print a machine-optimized prompt fragment for AI agents and exit").option("--tui", "Show TUI dashboard").option("--skip-ux", "Skip the UX design phase even if enabled in the pack manifest").option("--research", "Enable the research phase even if not set in the pack manifest").option("--skip-research", "Skip the research phase even if enabled in the pack manifest").option("--skip-preflight", "Skip the pre-flight build check (escape hatch for known-broken projects)").action(async (opts) => {
|
|
17079
|
+
program.command("run").description("Run the autonomous pipeline (use --from to start from a specific phase)").option("--pack <name>", "Methodology pack name", "bmad").option("--from <phase>", "Start from this phase: analysis, planning, solutioning, implementation").option("--stop-after <phase>", "Stop pipeline after this phase completes").option("--concept <text>", "Inline concept text (required when --from analysis)").option("--concept-file <path>", "Path to a file containing the concept text").option("--stories <keys>", "Comma-separated story keys (e.g., 10-1,10-2)").option("--epic <n>", "Scope story discovery to a single epic number (e.g., 27)", (v) => parseInt(v, 10)).option("--concurrency <n>", "Maximum parallel conflict groups", (v) => parseInt(v, 10), 3).option("--project-root <path>", "Project root directory", projectRoot).option("--output-format <format>", "Output format: human (default) or json", "human").option("--events", "Emit structured NDJSON events on stdout for programmatic consumption").option("--verbose", "Show detailed pino log output").option("--help-agent", "Print a machine-optimized prompt fragment for AI agents and exit").option("--tui", "Show TUI dashboard").option("--skip-ux", "Skip the UX design phase even if enabled in the pack manifest").option("--research", "Enable the research phase even if not set in the pack manifest").option("--skip-research", "Skip the research phase even if enabled in the pack manifest").option("--skip-preflight", "Skip the pre-flight build check (escape hatch for known-broken projects)").action(async (opts) => {
|
|
15823
17080
|
if (opts.helpAgent) {
|
|
15824
17081
|
process.exitCode = await runHelpAgent();
|
|
15825
17082
|
return;
|
|
@@ -15843,6 +17100,7 @@ function registerRunCommand(program, _version = "0.0.0", projectRoot = process.c
|
|
|
15843
17100
|
concept: opts.concept,
|
|
15844
17101
|
conceptFile: opts.conceptFile,
|
|
15845
17102
|
stories: opts.stories,
|
|
17103
|
+
epic: opts.epic,
|
|
15846
17104
|
concurrency: opts.concurrency,
|
|
15847
17105
|
outputFormat,
|
|
15848
17106
|
projectRoot: opts.projectRoot,
|
|
@@ -15860,5 +17118,5 @@ function registerRunCommand(program, _version = "0.0.0", projectRoot = process.c
|
|
|
15860
17118
|
}
|
|
15861
17119
|
|
|
15862
17120
|
//#endregion
|
|
15863
|
-
export { DEFAULT_CONFIG, DEFAULT_ROUTING_POLICY, DatabaseWrapper, DoltNotInstalled, FileStateStore, SUBSTRATE_OWNED_SETTINGS_KEYS, VALID_PHASES, buildPipelineStatusOutput, checkDoltInstalled, createConfigSystem, createContextCompiler, createDispatcher, createDoltClient, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStateStore, createStopAfterGate, findPackageRoot, formatOutput, formatPhaseCompletionSummary, formatPipelineStatusHuman, formatPipelineSummary, formatTokenTelemetry, getAllDescendantPids, getAutoHealthData, getSubstrateDefaultSettings, initializeDolt, parseDbTimestampAsUtc, registerHealthCommand, registerRunCommand, resolveBmadMethodSrcPath, resolveBmadMethodVersion, resolveMainRepoRoot, resolveStoryKeys, runAnalysisPhase, runMigrations, runPlanningPhase, runRunAction, runSolutioningPhase, validateStopAfterFromConflict };
|
|
15864
|
-
//# sourceMappingURL=run-
|
|
17121
|
+
export { DEFAULT_CONFIG, DEFAULT_ROUTING_POLICY, DatabaseWrapper, DoltNotInstalled, FileStateStore, SUBSTRATE_OWNED_SETTINGS_KEYS, TelemetryPersistence, VALID_PHASES, buildPipelineStatusOutput, checkDoltInstalled, createConfigSystem, createContextCompiler, createDispatcher, createDoltClient, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStateStore, createStopAfterGate, findPackageRoot, formatOutput, formatPhaseCompletionSummary, formatPipelineStatusHuman, formatPipelineSummary, formatTokenTelemetry, getAllDescendantPids, getAutoHealthData, getSubstrateDefaultSettings, initializeDolt, parseDbTimestampAsUtc, registerHealthCommand, registerRunCommand, resolveBmadMethodSrcPath, resolveBmadMethodVersion, resolveMainRepoRoot, resolveStoryKeys, runAnalysisPhase, runMigrations, runPlanningPhase, runRunAction, runSolutioningPhase, validateStopAfterFromConflict };
|
|
17122
|
+
//# sourceMappingURL=run-Fzhz3-mv.js.map
|