substrate-ai 0.3.2 → 0.3.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/adapter-registry-eyyVr21J.js +3 -0
- package/dist/cli/index.js +45 -25
- package/dist/{config-migrator-CQmBdKeG.js → config-migrator-DtZW1maj.js} +1 -1
- package/dist/{decisions-DxgMpQpz.js → decisions-CbysnTi5.js} +1 -1
- package/dist/{decisions-Dq4cAA2L.js → decisions-CdpiJIm5.js} +1 -1
- package/dist/{experimenter-Br1-vzYv.js → experimenter-jto3orYl.js} +4 -4
- package/dist/{git-utils-CtmrZrHS.js → git-utils-UbKLSGsD.js} +1 -1
- package/dist/{helpers-RL22dYtn.js → helpers-BihqWgVe.js} +1 -1
- package/dist/index.js +1 -1
- package/dist/{operational-Bovj4fS-.js → operational-DisxqtjC.js} +1 -1
- package/dist/{run-Fzhz3-mv.js → run-8ygA8hgY.js} +1783 -315
- package/dist/run-Dul2DU3D.js +8 -0
- package/dist/{upgrade-Ex1ukwsm.js → upgrade-BlJKjr6I.js} +3 -3
- package/dist/{upgrade-DO307rFf.js → upgrade-DTzeenA-.js} +2 -2
- package/dist/version-manager-impl-BsHqAeGT.js +4 -0
- package/dist/{version-manager-impl-33JYXsqa.js → version-manager-impl-zsJjBhak.js} +2 -2
- package/package.json +1 -1
- package/dist/run-BJ5z_b2J.js +0 -8
- package/dist/version-manager-impl-Dk3S31y6.js +0 -4
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
import { createLogger, deepMask } from "./logger-D2fS2ccL.js";
|
|
2
|
-
import { CURRENT_CONFIG_FORMAT_VERSION, PartialSubstrateConfigSchema, SUPPORTED_CONFIG_FORMAT_VERSIONS, SubstrateConfigSchema, defaultConfigMigrator } from "./config-migrator-
|
|
3
|
-
import { ConfigError, ConfigIncompatibleFormatError, createEventBus, createTuiApp, isTuiCapable, printNonTtyWarning, sleep } from "./helpers-
|
|
4
|
-
import { addTokenUsage, createDecision, createPipelineRun, createRequirement, getArtifactByTypeForRun, getArtifactsByRun, getDecisionsByCategory, getDecisionsByPhase, getDecisionsByPhaseForRun, getLatestRun, getPipelineRunById, getRunningPipelineRuns, getTokenUsageSummary, registerArtifact, updatePipelineRun, updatePipelineRunConfig, upsertDecision } from "./decisions-
|
|
5
|
-
import { ADVISORY_NOTES, ESCALATION_DIAGNOSIS, OPERATIONAL_FINDING, STORY_METRICS, STORY_OUTCOME, TEST_EXPANSION_FINDING, TEST_PLAN, aggregateTokenUsageForRun, aggregateTokenUsageForStory, getStoryMetricsForRun, writeRunMetrics, writeStoryMetrics } from "./operational-
|
|
2
|
+
import { CURRENT_CONFIG_FORMAT_VERSION, PartialSubstrateConfigSchema, SUPPORTED_CONFIG_FORMAT_VERSIONS, SubstrateConfigSchema, defaultConfigMigrator } from "./config-migrator-DtZW1maj.js";
|
|
3
|
+
import { ConfigError, ConfigIncompatibleFormatError, createEventBus, createTuiApp, isTuiCapable, printNonTtyWarning, sleep } from "./helpers-BihqWgVe.js";
|
|
4
|
+
import { addTokenUsage, createDecision, createPipelineRun, createRequirement, getArtifactByTypeForRun, getArtifactsByRun, getDecisionsByCategory, getDecisionsByPhase, getDecisionsByPhaseForRun, getLatestRun, getPipelineRunById, getRunningPipelineRuns, getTokenUsageSummary, registerArtifact, updatePipelineRun, updatePipelineRunConfig, upsertDecision } from "./decisions-CdpiJIm5.js";
|
|
5
|
+
import { ADVISORY_NOTES, ESCALATION_DIAGNOSIS, OPERATIONAL_FINDING, STORY_METRICS, STORY_OUTCOME, TEST_EXPANSION_FINDING, TEST_PLAN, aggregateTokenUsageForRun, aggregateTokenUsageForStory, getStoryMetricsForRun, writeRunMetrics, writeStoryMetrics } from "./operational-DisxqtjC.js";
|
|
6
6
|
import { createRequire } from "module";
|
|
7
7
|
import { dirname, join, resolve } from "path";
|
|
8
8
|
import { access, mkdir, readFile, readdir, stat, writeFile } from "fs/promises";
|
|
@@ -17,6 +17,7 @@ import { access as access$1, mkdir as mkdir$1, readFile as readFile$1, stat as s
|
|
|
17
17
|
import { fileURLToPath } from "node:url";
|
|
18
18
|
import { existsSync as existsSync$1, readFileSync as readFileSync$1, readdirSync as readdirSync$1 } from "node:fs";
|
|
19
19
|
import { homedir } from "os";
|
|
20
|
+
import { EventEmitter } from "node:events";
|
|
20
21
|
import { freemem, platform } from "node:os";
|
|
21
22
|
import { createHash, randomUUID } from "node:crypto";
|
|
22
23
|
import { createServer } from "node:http";
|
|
@@ -604,9 +605,110 @@ const migration010RunMetrics = {
|
|
|
604
605
|
}
|
|
605
606
|
};
|
|
606
607
|
|
|
608
|
+
//#endregion
|
|
609
|
+
//#region src/persistence/migrations/011-telemetry-schema.ts
|
|
610
|
+
const migration011TelemetrySchema = {
|
|
611
|
+
version: 11,
|
|
612
|
+
name: "telemetry-schema",
|
|
613
|
+
up(db) {
|
|
614
|
+
db.exec(`
|
|
615
|
+
CREATE TABLE IF NOT EXISTS turn_analysis (
|
|
616
|
+
story_key VARCHAR(64) NOT NULL,
|
|
617
|
+
span_id VARCHAR(128) NOT NULL,
|
|
618
|
+
turn_number INTEGER NOT NULL,
|
|
619
|
+
name VARCHAR(255) NOT NULL DEFAULT '',
|
|
620
|
+
timestamp BIGINT NOT NULL DEFAULT 0,
|
|
621
|
+
source VARCHAR(32) NOT NULL DEFAULT '',
|
|
622
|
+
model VARCHAR(64),
|
|
623
|
+
input_tokens INTEGER NOT NULL DEFAULT 0,
|
|
624
|
+
output_tokens INTEGER NOT NULL DEFAULT 0,
|
|
625
|
+
cache_read_tokens INTEGER NOT NULL DEFAULT 0,
|
|
626
|
+
fresh_tokens INTEGER NOT NULL DEFAULT 0,
|
|
627
|
+
cache_hit_rate DOUBLE NOT NULL DEFAULT 0,
|
|
628
|
+
cost_usd DOUBLE NOT NULL DEFAULT 0,
|
|
629
|
+
duration_ms INTEGER NOT NULL DEFAULT 0,
|
|
630
|
+
context_size INTEGER NOT NULL DEFAULT 0,
|
|
631
|
+
context_delta INTEGER NOT NULL DEFAULT 0,
|
|
632
|
+
tool_name VARCHAR(128),
|
|
633
|
+
is_context_spike BOOLEAN NOT NULL DEFAULT 0,
|
|
634
|
+
child_spans_json TEXT NOT NULL DEFAULT '[]',
|
|
635
|
+
PRIMARY KEY (story_key, span_id)
|
|
636
|
+
);
|
|
637
|
+
|
|
638
|
+
CREATE INDEX IF NOT EXISTS idx_turn_analysis_story
|
|
639
|
+
ON turn_analysis (story_key, turn_number);
|
|
640
|
+
|
|
641
|
+
CREATE TABLE IF NOT EXISTS efficiency_scores (
|
|
642
|
+
story_key VARCHAR(64) NOT NULL,
|
|
643
|
+
timestamp BIGINT NOT NULL,
|
|
644
|
+
composite_score INTEGER NOT NULL DEFAULT 0,
|
|
645
|
+
cache_hit_sub_score DOUBLE NOT NULL DEFAULT 0,
|
|
646
|
+
io_ratio_sub_score DOUBLE NOT NULL DEFAULT 0,
|
|
647
|
+
context_management_sub_score DOUBLE NOT NULL DEFAULT 0,
|
|
648
|
+
avg_cache_hit_rate DOUBLE NOT NULL DEFAULT 0,
|
|
649
|
+
avg_io_ratio DOUBLE NOT NULL DEFAULT 0,
|
|
650
|
+
context_spike_count INTEGER NOT NULL DEFAULT 0,
|
|
651
|
+
total_turns INTEGER NOT NULL DEFAULT 0,
|
|
652
|
+
per_model_json TEXT NOT NULL DEFAULT '[]',
|
|
653
|
+
per_source_json TEXT NOT NULL DEFAULT '[]',
|
|
654
|
+
PRIMARY KEY (story_key, timestamp)
|
|
655
|
+
);
|
|
656
|
+
|
|
657
|
+
CREATE INDEX IF NOT EXISTS idx_efficiency_story
|
|
658
|
+
ON efficiency_scores (story_key, timestamp DESC);
|
|
659
|
+
|
|
660
|
+
CREATE TABLE IF NOT EXISTS recommendations (
|
|
661
|
+
id VARCHAR(16) NOT NULL,
|
|
662
|
+
story_key VARCHAR(64) NOT NULL,
|
|
663
|
+
sprint_id VARCHAR(64),
|
|
664
|
+
rule_id VARCHAR(64) NOT NULL,
|
|
665
|
+
severity VARCHAR(16) NOT NULL,
|
|
666
|
+
title TEXT NOT NULL,
|
|
667
|
+
description TEXT NOT NULL,
|
|
668
|
+
potential_savings_tokens INTEGER,
|
|
669
|
+
potential_savings_usd DOUBLE,
|
|
670
|
+
action_target TEXT,
|
|
671
|
+
generated_at VARCHAR(32) NOT NULL,
|
|
672
|
+
PRIMARY KEY (id)
|
|
673
|
+
);
|
|
674
|
+
|
|
675
|
+
CREATE INDEX IF NOT EXISTS idx_recommendations_story
|
|
676
|
+
ON recommendations (story_key, severity);
|
|
677
|
+
|
|
678
|
+
CREATE TABLE IF NOT EXISTS category_stats (
|
|
679
|
+
story_key VARCHAR(100) NOT NULL,
|
|
680
|
+
category VARCHAR(30) NOT NULL,
|
|
681
|
+
total_tokens BIGINT NOT NULL DEFAULT 0,
|
|
682
|
+
percentage DECIMAL(6,3) NOT NULL DEFAULT 0,
|
|
683
|
+
event_count INTEGER NOT NULL DEFAULT 0,
|
|
684
|
+
avg_tokens_per_event DECIMAL(12,2) NOT NULL DEFAULT 0,
|
|
685
|
+
trend VARCHAR(10) NOT NULL DEFAULT 'stable',
|
|
686
|
+
PRIMARY KEY (story_key, category)
|
|
687
|
+
);
|
|
688
|
+
|
|
689
|
+
CREATE INDEX IF NOT EXISTS idx_category_stats_story
|
|
690
|
+
ON category_stats (story_key, total_tokens);
|
|
691
|
+
|
|
692
|
+
CREATE TABLE IF NOT EXISTS consumer_stats (
|
|
693
|
+
story_key VARCHAR(100) NOT NULL,
|
|
694
|
+
consumer_key VARCHAR(300) NOT NULL,
|
|
695
|
+
category VARCHAR(30) NOT NULL,
|
|
696
|
+
total_tokens BIGINT NOT NULL DEFAULT 0,
|
|
697
|
+
percentage DECIMAL(6,3) NOT NULL DEFAULT 0,
|
|
698
|
+
event_count INTEGER NOT NULL DEFAULT 0,
|
|
699
|
+
top_invocations_json TEXT,
|
|
700
|
+
PRIMARY KEY (story_key, consumer_key)
|
|
701
|
+
);
|
|
702
|
+
|
|
703
|
+
CREATE INDEX IF NOT EXISTS idx_consumer_stats_story
|
|
704
|
+
ON consumer_stats (story_key, total_tokens);
|
|
705
|
+
`);
|
|
706
|
+
}
|
|
707
|
+
};
|
|
708
|
+
|
|
607
709
|
//#endregion
|
|
608
710
|
//#region src/persistence/migrations/index.ts
|
|
609
|
-
const logger$
|
|
711
|
+
const logger$26 = createLogger("persistence:migrations");
|
|
610
712
|
const MIGRATIONS = [
|
|
611
713
|
initialSchemaMigration,
|
|
612
714
|
costTrackerSchemaMigration,
|
|
@@ -617,14 +719,15 @@ const MIGRATIONS = [
|
|
|
617
719
|
migration007DecisionStore,
|
|
618
720
|
migration008AmendmentSchema,
|
|
619
721
|
migration009TokenUsageMetadata,
|
|
620
|
-
migration010RunMetrics
|
|
722
|
+
migration010RunMetrics,
|
|
723
|
+
migration011TelemetrySchema
|
|
621
724
|
];
|
|
622
725
|
/**
|
|
623
726
|
* Ensure `schema_migrations` table exists and run any pending migrations.
|
|
624
727
|
* Safe to call multiple times — already-applied migrations are skipped.
|
|
625
728
|
*/
|
|
626
729
|
function runMigrations(db) {
|
|
627
|
-
logger$
|
|
730
|
+
logger$26.info("Starting migration runner");
|
|
628
731
|
db.exec(`
|
|
629
732
|
CREATE TABLE IF NOT EXISTS schema_migrations (
|
|
630
733
|
version INTEGER PRIMARY KEY,
|
|
@@ -635,12 +738,12 @@ function runMigrations(db) {
|
|
|
635
738
|
const appliedVersions = new Set(db.prepare("SELECT version FROM schema_migrations").all().map((row) => row.version));
|
|
636
739
|
const pending = MIGRATIONS.filter((m) => !appliedVersions.has(m.version)).sort((a, b) => a.version - b.version);
|
|
637
740
|
if (pending.length === 0) {
|
|
638
|
-
logger$
|
|
741
|
+
logger$26.info("No pending migrations");
|
|
639
742
|
return;
|
|
640
743
|
}
|
|
641
744
|
const insertMigration = db.prepare("INSERT INTO schema_migrations (version, name) VALUES (?, ?)");
|
|
642
745
|
for (const migration of pending) {
|
|
643
|
-
logger$
|
|
746
|
+
logger$26.info({
|
|
644
747
|
version: migration.version,
|
|
645
748
|
name: migration.name
|
|
646
749
|
}, "Applying migration");
|
|
@@ -654,14 +757,14 @@ function runMigrations(db) {
|
|
|
654
757
|
});
|
|
655
758
|
applyMigration();
|
|
656
759
|
}
|
|
657
|
-
logger$
|
|
760
|
+
logger$26.info({ version: migration.version }, "Migration applied successfully");
|
|
658
761
|
}
|
|
659
|
-
logger$
|
|
762
|
+
logger$26.info({ count: pending.length }, "All pending migrations applied");
|
|
660
763
|
}
|
|
661
764
|
|
|
662
765
|
//#endregion
|
|
663
766
|
//#region src/persistence/database.ts
|
|
664
|
-
const logger$
|
|
767
|
+
const logger$25 = createLogger("persistence:database");
|
|
665
768
|
/**
|
|
666
769
|
* Thin wrapper that opens a SQLite database, applies required PRAGMAs,
|
|
667
770
|
* and exposes the raw BetterSqlite3 instance.
|
|
@@ -678,14 +781,14 @@ var DatabaseWrapper = class {
|
|
|
678
781
|
*/
|
|
679
782
|
open() {
|
|
680
783
|
if (this._db !== null) return;
|
|
681
|
-
logger$
|
|
784
|
+
logger$25.info({ path: this._path }, "Opening SQLite database");
|
|
682
785
|
this._db = new Database(this._path);
|
|
683
786
|
const walResult = this._db.pragma("journal_mode = WAL");
|
|
684
|
-
if (walResult?.[0]?.journal_mode !== "wal") logger$
|
|
787
|
+
if (walResult?.[0]?.journal_mode !== "wal") logger$25.warn({ result: walResult?.[0]?.journal_mode }, "WAL pragma did not return expected \"wal\" — journal_mode may be \"memory\" or unsupported");
|
|
685
788
|
this._db.pragma("busy_timeout = 5000");
|
|
686
789
|
this._db.pragma("synchronous = NORMAL");
|
|
687
790
|
this._db.pragma("foreign_keys = ON");
|
|
688
|
-
logger$
|
|
791
|
+
logger$25.info({ path: this._path }, "SQLite database opened with WAL mode");
|
|
689
792
|
}
|
|
690
793
|
/**
|
|
691
794
|
* Close the database. Idempotent — calling close() when already closed is a no-op.
|
|
@@ -694,7 +797,7 @@ var DatabaseWrapper = class {
|
|
|
694
797
|
if (this._db === null) return;
|
|
695
798
|
this._db.close();
|
|
696
799
|
this._db = null;
|
|
697
|
-
logger$
|
|
800
|
+
logger$25.info({ path: this._path }, "SQLite database closed");
|
|
698
801
|
}
|
|
699
802
|
/**
|
|
700
803
|
* Return the raw BetterSqlite3 instance.
|
|
@@ -1611,7 +1714,7 @@ function formatUnsupportedVersionError(formatType, version, supported) {
|
|
|
1611
1714
|
|
|
1612
1715
|
//#endregion
|
|
1613
1716
|
//#region src/modules/config/config-system-impl.ts
|
|
1614
|
-
const logger$
|
|
1717
|
+
const logger$24 = createLogger("config");
|
|
1615
1718
|
function deepMerge(base, override) {
|
|
1616
1719
|
const result = { ...base };
|
|
1617
1720
|
for (const [key, val] of Object.entries(override)) if (val !== null && val !== void 0 && typeof val === "object" && !Array.isArray(val) && typeof result[key] === "object" && result[key] !== null && !Array.isArray(result[key])) result[key] = deepMerge(result[key], val);
|
|
@@ -1656,7 +1759,7 @@ function readEnvOverrides() {
|
|
|
1656
1759
|
}
|
|
1657
1760
|
const parsed = PartialSubstrateConfigSchema.safeParse(overrides);
|
|
1658
1761
|
if (!parsed.success) {
|
|
1659
|
-
logger$
|
|
1762
|
+
logger$24.warn({ errors: parsed.error.issues }, "Invalid environment variable overrides ignored");
|
|
1660
1763
|
return {};
|
|
1661
1764
|
}
|
|
1662
1765
|
return parsed.data;
|
|
@@ -1720,7 +1823,7 @@ var ConfigSystemImpl = class {
|
|
|
1720
1823
|
throw new ConfigError(`Configuration validation failed:\n${issues}`, { issues: result.error.issues });
|
|
1721
1824
|
}
|
|
1722
1825
|
this._config = result.data;
|
|
1723
|
-
logger$
|
|
1826
|
+
logger$24.debug("Configuration loaded successfully");
|
|
1724
1827
|
}
|
|
1725
1828
|
getConfig() {
|
|
1726
1829
|
if (this._config === null) throw new ConfigError("Configuration has not been loaded. Call load() before getConfig().", {});
|
|
@@ -1783,7 +1886,7 @@ var ConfigSystemImpl = class {
|
|
|
1783
1886
|
if (version !== void 0 && typeof version === "string" && !isVersionSupported(version, SUPPORTED_CONFIG_FORMAT_VERSIONS)) if (defaultConfigMigrator.canMigrate(version, CURRENT_CONFIG_FORMAT_VERSION)) {
|
|
1784
1887
|
const migrationOutput = defaultConfigMigrator.migrate(rawObj, version, CURRENT_CONFIG_FORMAT_VERSION, filePath);
|
|
1785
1888
|
if (migrationOutput.result.success) {
|
|
1786
|
-
logger$
|
|
1889
|
+
logger$24.info({
|
|
1787
1890
|
from: version,
|
|
1788
1891
|
to: CURRENT_CONFIG_FORMAT_VERSION,
|
|
1789
1892
|
backup: migrationOutput.result.backupPath
|
|
@@ -3192,7 +3295,7 @@ function truncateToTokens(text, maxTokens) {
|
|
|
3192
3295
|
|
|
3193
3296
|
//#endregion
|
|
3194
3297
|
//#region src/modules/context-compiler/context-compiler-impl.ts
|
|
3195
|
-
const logger$
|
|
3298
|
+
const logger$23 = createLogger("context-compiler");
|
|
3196
3299
|
/**
|
|
3197
3300
|
* Fraction of the original token budget that must remain (after required +
|
|
3198
3301
|
* important sections) before an optional section is included.
|
|
@@ -3284,7 +3387,7 @@ var ContextCompilerImpl = class {
|
|
|
3284
3387
|
includedParts.push(truncated);
|
|
3285
3388
|
remainingBudget -= truncatedTokens;
|
|
3286
3389
|
anyTruncated = true;
|
|
3287
|
-
logger$
|
|
3390
|
+
logger$23.warn({
|
|
3288
3391
|
section: section.name,
|
|
3289
3392
|
originalTokens: tokens,
|
|
3290
3393
|
budgetTokens: truncatedTokens
|
|
@@ -3298,7 +3401,7 @@ var ContextCompilerImpl = class {
|
|
|
3298
3401
|
});
|
|
3299
3402
|
} else {
|
|
3300
3403
|
anyTruncated = true;
|
|
3301
|
-
logger$
|
|
3404
|
+
logger$23.warn({
|
|
3302
3405
|
section: section.name,
|
|
3303
3406
|
tokens
|
|
3304
3407
|
}, "Context compiler: omitted \"important\" section — no budget remaining");
|
|
@@ -3325,7 +3428,7 @@ var ContextCompilerImpl = class {
|
|
|
3325
3428
|
} else {
|
|
3326
3429
|
if (tokens > 0) {
|
|
3327
3430
|
anyTruncated = true;
|
|
3328
|
-
logger$
|
|
3431
|
+
logger$23.warn({
|
|
3329
3432
|
section: section.name,
|
|
3330
3433
|
tokens,
|
|
3331
3434
|
budgetFractionRemaining: budgetFractionRemaining.toFixed(2)
|
|
@@ -3610,7 +3713,7 @@ function parseYamlResult(yamlText, schema) {
|
|
|
3610
3713
|
|
|
3611
3714
|
//#endregion
|
|
3612
3715
|
//#region src/modules/agent-dispatch/dispatcher-impl.ts
|
|
3613
|
-
const logger$
|
|
3716
|
+
const logger$22 = createLogger("agent-dispatch");
|
|
3614
3717
|
const SHUTDOWN_GRACE_MS = 1e4;
|
|
3615
3718
|
const SHUTDOWN_MAX_WAIT_MS = 3e4;
|
|
3616
3719
|
const CHARS_PER_TOKEN = 4;
|
|
@@ -3655,7 +3758,7 @@ function getAvailableMemory() {
|
|
|
3655
3758
|
}).trim(), 10);
|
|
3656
3759
|
_lastKnownPressureLevel = pressureLevel;
|
|
3657
3760
|
if (pressureLevel >= 4) {
|
|
3658
|
-
logger$
|
|
3761
|
+
logger$22.warn({ pressureLevel }, "macOS kernel reports critical memory pressure");
|
|
3659
3762
|
return 0;
|
|
3660
3763
|
}
|
|
3661
3764
|
} catch {}
|
|
@@ -3670,7 +3773,7 @@ function getAvailableMemory() {
|
|
|
3670
3773
|
const speculative = parseInt(vmstat.match(/Pages speculative:\s+(\d+)/)?.[1] ?? "0", 10);
|
|
3671
3774
|
const available = (free + purgeable + speculative) * pageSize;
|
|
3672
3775
|
if (pressureLevel >= 2) {
|
|
3673
|
-
logger$
|
|
3776
|
+
logger$22.warn({
|
|
3674
3777
|
pressureLevel,
|
|
3675
3778
|
availableBeforeDiscount: available
|
|
3676
3779
|
}, "macOS kernel reports memory pressure — discounting estimate");
|
|
@@ -3750,7 +3853,7 @@ var DispatcherImpl = class {
|
|
|
3750
3853
|
resolve: typedResolve,
|
|
3751
3854
|
reject
|
|
3752
3855
|
});
|
|
3753
|
-
logger$
|
|
3856
|
+
logger$22.debug({
|
|
3754
3857
|
id,
|
|
3755
3858
|
queueLength: this._queue.length
|
|
3756
3859
|
}, "Dispatch queued");
|
|
@@ -3781,7 +3884,7 @@ var DispatcherImpl = class {
|
|
|
3781
3884
|
async shutdown() {
|
|
3782
3885
|
this._shuttingDown = true;
|
|
3783
3886
|
this._stopMemoryPressureTimer();
|
|
3784
|
-
logger$
|
|
3887
|
+
logger$22.info({
|
|
3785
3888
|
running: this._running.size,
|
|
3786
3889
|
queued: this._queue.length
|
|
3787
3890
|
}, "Dispatcher shutting down");
|
|
@@ -3814,13 +3917,13 @@ var DispatcherImpl = class {
|
|
|
3814
3917
|
}
|
|
3815
3918
|
}, 50);
|
|
3816
3919
|
});
|
|
3817
|
-
logger$
|
|
3920
|
+
logger$22.info("Dispatcher shutdown complete");
|
|
3818
3921
|
}
|
|
3819
3922
|
async _startDispatch(id, request, resolve$2) {
|
|
3820
3923
|
const { prompt, agent, taskType, timeout, outputSchema, workingDirectory, model, maxTurns, otlpEndpoint } = request;
|
|
3821
3924
|
const adapter = this._adapterRegistry.get(agent);
|
|
3822
3925
|
if (adapter === void 0) {
|
|
3823
|
-
logger$
|
|
3926
|
+
logger$22.warn({
|
|
3824
3927
|
id,
|
|
3825
3928
|
agent
|
|
3826
3929
|
}, "No adapter found for agent");
|
|
@@ -3867,7 +3970,7 @@ var DispatcherImpl = class {
|
|
|
3867
3970
|
});
|
|
3868
3971
|
const startedAt = Date.now();
|
|
3869
3972
|
proc.on("error", (err) => {
|
|
3870
|
-
logger$
|
|
3973
|
+
logger$22.error({
|
|
3871
3974
|
id,
|
|
3872
3975
|
binary: cmd.binary,
|
|
3873
3976
|
error: err.message
|
|
@@ -3875,7 +3978,7 @@ var DispatcherImpl = class {
|
|
|
3875
3978
|
});
|
|
3876
3979
|
if (proc.stdin !== null) {
|
|
3877
3980
|
proc.stdin.on("error", (err) => {
|
|
3878
|
-
if (err.code !== "EPIPE") logger$
|
|
3981
|
+
if (err.code !== "EPIPE") logger$22.warn({
|
|
3879
3982
|
id,
|
|
3880
3983
|
error: err.message
|
|
3881
3984
|
}, "stdin write error");
|
|
@@ -3917,7 +4020,7 @@ var DispatcherImpl = class {
|
|
|
3917
4020
|
agent,
|
|
3918
4021
|
taskType
|
|
3919
4022
|
});
|
|
3920
|
-
logger$
|
|
4023
|
+
logger$22.debug({
|
|
3921
4024
|
id,
|
|
3922
4025
|
agent,
|
|
3923
4026
|
taskType,
|
|
@@ -3934,7 +4037,7 @@ var DispatcherImpl = class {
|
|
|
3934
4037
|
dispatchId: id,
|
|
3935
4038
|
timeoutMs
|
|
3936
4039
|
});
|
|
3937
|
-
logger$
|
|
4040
|
+
logger$22.warn({
|
|
3938
4041
|
id,
|
|
3939
4042
|
agent,
|
|
3940
4043
|
taskType,
|
|
@@ -3988,7 +4091,7 @@ var DispatcherImpl = class {
|
|
|
3988
4091
|
exitCode: code,
|
|
3989
4092
|
output: stdout
|
|
3990
4093
|
});
|
|
3991
|
-
logger$
|
|
4094
|
+
logger$22.debug({
|
|
3992
4095
|
id,
|
|
3993
4096
|
agent,
|
|
3994
4097
|
taskType,
|
|
@@ -4014,7 +4117,7 @@ var DispatcherImpl = class {
|
|
|
4014
4117
|
error: stderr || `Process exited with code ${String(code)}`,
|
|
4015
4118
|
exitCode: code
|
|
4016
4119
|
});
|
|
4017
|
-
logger$
|
|
4120
|
+
logger$22.debug({
|
|
4018
4121
|
id,
|
|
4019
4122
|
agent,
|
|
4020
4123
|
taskType,
|
|
@@ -4073,7 +4176,7 @@ var DispatcherImpl = class {
|
|
|
4073
4176
|
const next = this._queue.shift();
|
|
4074
4177
|
if (next === void 0) return;
|
|
4075
4178
|
next.handle.status = "running";
|
|
4076
|
-
logger$
|
|
4179
|
+
logger$22.debug({
|
|
4077
4180
|
id: next.id,
|
|
4078
4181
|
queueLength: this._queue.length
|
|
4079
4182
|
}, "Dequeued dispatch");
|
|
@@ -4086,7 +4189,7 @@ var DispatcherImpl = class {
|
|
|
4086
4189
|
_isMemoryPressured() {
|
|
4087
4190
|
const free = getAvailableMemory();
|
|
4088
4191
|
if (free < MIN_FREE_MEMORY_BYTES) {
|
|
4089
|
-
logger$
|
|
4192
|
+
logger$22.warn({
|
|
4090
4193
|
freeMB: Math.round(free / 1024 / 1024),
|
|
4091
4194
|
thresholdMB: Math.round(MIN_FREE_MEMORY_BYTES / 1024 / 1024),
|
|
4092
4195
|
pressureLevel: _lastKnownPressureLevel
|
|
@@ -4202,7 +4305,7 @@ function runBuildVerification(options) {
|
|
|
4202
4305
|
let cmd;
|
|
4203
4306
|
if (verifyCommand === void 0) {
|
|
4204
4307
|
const detection = detectPackageManager(projectRoot);
|
|
4205
|
-
logger$
|
|
4308
|
+
logger$22.info({
|
|
4206
4309
|
packageManager: detection.packageManager,
|
|
4207
4310
|
lockfile: detection.lockfile,
|
|
4208
4311
|
resolvedCommand: detection.command
|
|
@@ -4401,7 +4504,7 @@ function pickRecommendation(distribution, profile, totalIssues, reviewCycles, la
|
|
|
4401
4504
|
|
|
4402
4505
|
//#endregion
|
|
4403
4506
|
//#region src/modules/compiled-workflows/prompt-assembler.ts
|
|
4404
|
-
const logger$
|
|
4507
|
+
const logger$21 = createLogger("compiled-workflows:prompt-assembler");
|
|
4405
4508
|
/**
|
|
4406
4509
|
* Assemble a final prompt from a template and sections map.
|
|
4407
4510
|
*
|
|
@@ -4426,7 +4529,7 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
|
|
|
4426
4529
|
tokenCount,
|
|
4427
4530
|
truncated: false
|
|
4428
4531
|
};
|
|
4429
|
-
logger$
|
|
4532
|
+
logger$21.warn({
|
|
4430
4533
|
tokenCount,
|
|
4431
4534
|
ceiling: tokenCeiling
|
|
4432
4535
|
}, "Prompt exceeds token ceiling — truncating optional sections");
|
|
@@ -4442,10 +4545,10 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
|
|
|
4442
4545
|
const targetSectionTokens = Math.max(0, currentSectionTokens - overBy);
|
|
4443
4546
|
if (targetSectionTokens === 0) {
|
|
4444
4547
|
contentMap[section.name] = "";
|
|
4445
|
-
logger$
|
|
4548
|
+
logger$21.warn({ sectionName: section.name }, "Section eliminated to fit token budget");
|
|
4446
4549
|
} else {
|
|
4447
4550
|
contentMap[section.name] = truncateToTokens(section.content, targetSectionTokens);
|
|
4448
|
-
logger$
|
|
4551
|
+
logger$21.warn({
|
|
4449
4552
|
sectionName: section.name,
|
|
4450
4553
|
targetSectionTokens
|
|
4451
4554
|
}, "Section truncated to fit token budget");
|
|
@@ -4456,7 +4559,7 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
|
|
|
4456
4559
|
}
|
|
4457
4560
|
if (tokenCount <= tokenCeiling) break;
|
|
4458
4561
|
}
|
|
4459
|
-
if (tokenCount > tokenCeiling) logger$
|
|
4562
|
+
if (tokenCount > tokenCeiling) logger$21.warn({
|
|
4460
4563
|
tokenCount,
|
|
4461
4564
|
ceiling: tokenCeiling
|
|
4462
4565
|
}, "Required sections alone exceed token ceiling — returning over-budget prompt");
|
|
@@ -4754,7 +4857,7 @@ function getTokenCeiling(workflowType, tokenCeilings) {
|
|
|
4754
4857
|
|
|
4755
4858
|
//#endregion
|
|
4756
4859
|
//#region src/modules/compiled-workflows/create-story.ts
|
|
4757
|
-
const logger$
|
|
4860
|
+
const logger$20 = createLogger("compiled-workflows:create-story");
|
|
4758
4861
|
/**
|
|
4759
4862
|
* Execute the compiled create-story workflow.
|
|
4760
4863
|
*
|
|
@@ -4774,13 +4877,13 @@ const logger$19 = createLogger("compiled-workflows:create-story");
|
|
|
4774
4877
|
*/
|
|
4775
4878
|
async function runCreateStory(deps, params) {
|
|
4776
4879
|
const { epicId, storyKey, pipelineRunId } = params;
|
|
4777
|
-
logger$
|
|
4880
|
+
logger$20.debug({
|
|
4778
4881
|
epicId,
|
|
4779
4882
|
storyKey,
|
|
4780
4883
|
pipelineRunId
|
|
4781
4884
|
}, "Starting create-story workflow");
|
|
4782
4885
|
const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("create-story", deps.tokenCeilings);
|
|
4783
|
-
logger$
|
|
4886
|
+
logger$20.info({
|
|
4784
4887
|
workflow: "create-story",
|
|
4785
4888
|
ceiling: TOKEN_CEILING,
|
|
4786
4889
|
source: tokenCeilingSource
|
|
@@ -4790,7 +4893,7 @@ async function runCreateStory(deps, params) {
|
|
|
4790
4893
|
template = await deps.pack.getPrompt("create-story");
|
|
4791
4894
|
} catch (err) {
|
|
4792
4895
|
const error = err instanceof Error ? err.message : String(err);
|
|
4793
|
-
logger$
|
|
4896
|
+
logger$20.error({ error }, "Failed to retrieve create-story prompt template");
|
|
4794
4897
|
return {
|
|
4795
4898
|
result: "failed",
|
|
4796
4899
|
error: `Failed to retrieve prompt template: ${error}`,
|
|
@@ -4832,7 +4935,7 @@ async function runCreateStory(deps, params) {
|
|
|
4832
4935
|
priority: "important"
|
|
4833
4936
|
}
|
|
4834
4937
|
], TOKEN_CEILING);
|
|
4835
|
-
logger$
|
|
4938
|
+
logger$20.debug({
|
|
4836
4939
|
tokenCount,
|
|
4837
4940
|
truncated,
|
|
4838
4941
|
tokenCeiling: TOKEN_CEILING
|
|
@@ -4850,7 +4953,7 @@ async function runCreateStory(deps, params) {
|
|
|
4850
4953
|
dispatchResult = await handle.result;
|
|
4851
4954
|
} catch (err) {
|
|
4852
4955
|
const error = err instanceof Error ? err.message : String(err);
|
|
4853
|
-
logger$
|
|
4956
|
+
logger$20.error({
|
|
4854
4957
|
epicId,
|
|
4855
4958
|
storyKey,
|
|
4856
4959
|
error
|
|
@@ -4871,7 +4974,7 @@ async function runCreateStory(deps, params) {
|
|
|
4871
4974
|
if (dispatchResult.status === "failed") {
|
|
4872
4975
|
const errorMsg = dispatchResult.parseError ?? `Dispatch failed with exit code ${dispatchResult.exitCode}`;
|
|
4873
4976
|
const stderrDetail = dispatchResult.output ? ` Output: ${dispatchResult.output}` : "";
|
|
4874
|
-
logger$
|
|
4977
|
+
logger$20.warn({
|
|
4875
4978
|
epicId,
|
|
4876
4979
|
storyKey,
|
|
4877
4980
|
exitCode: dispatchResult.exitCode
|
|
@@ -4883,7 +4986,7 @@ async function runCreateStory(deps, params) {
|
|
|
4883
4986
|
};
|
|
4884
4987
|
}
|
|
4885
4988
|
if (dispatchResult.status === "timeout") {
|
|
4886
|
-
logger$
|
|
4989
|
+
logger$20.warn({
|
|
4887
4990
|
epicId,
|
|
4888
4991
|
storyKey
|
|
4889
4992
|
}, "Create-story dispatch timed out");
|
|
@@ -4896,7 +4999,7 @@ async function runCreateStory(deps, params) {
|
|
|
4896
4999
|
if (dispatchResult.parsed === null) {
|
|
4897
5000
|
const details = dispatchResult.parseError ?? "No YAML block found in output";
|
|
4898
5001
|
const rawSnippet = dispatchResult.output ? dispatchResult.output.slice(0, 1e3) : "(empty)";
|
|
4899
|
-
logger$
|
|
5002
|
+
logger$20.warn({
|
|
4900
5003
|
epicId,
|
|
4901
5004
|
storyKey,
|
|
4902
5005
|
details,
|
|
@@ -4912,7 +5015,7 @@ async function runCreateStory(deps, params) {
|
|
|
4912
5015
|
const parseResult = CreateStoryResultSchema.safeParse(dispatchResult.parsed);
|
|
4913
5016
|
if (!parseResult.success) {
|
|
4914
5017
|
const details = parseResult.error.message;
|
|
4915
|
-
logger$
|
|
5018
|
+
logger$20.warn({
|
|
4916
5019
|
epicId,
|
|
4917
5020
|
storyKey,
|
|
4918
5021
|
details
|
|
@@ -4925,7 +5028,7 @@ async function runCreateStory(deps, params) {
|
|
|
4925
5028
|
};
|
|
4926
5029
|
}
|
|
4927
5030
|
const parsed = parseResult.data;
|
|
4928
|
-
logger$
|
|
5031
|
+
logger$20.info({
|
|
4929
5032
|
epicId,
|
|
4930
5033
|
storyKey,
|
|
4931
5034
|
storyFile: parsed.story_file,
|
|
@@ -4947,7 +5050,7 @@ function getImplementationDecisions(deps) {
|
|
|
4947
5050
|
try {
|
|
4948
5051
|
return getDecisionsByPhase(deps.db, "implementation");
|
|
4949
5052
|
} catch (err) {
|
|
4950
|
-
logger$
|
|
5053
|
+
logger$20.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve implementation decisions");
|
|
4951
5054
|
return [];
|
|
4952
5055
|
}
|
|
4953
5056
|
}
|
|
@@ -4990,13 +5093,13 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
|
|
|
4990
5093
|
if (storyKey) {
|
|
4991
5094
|
const storySection = extractStorySection(shardContent, storyKey);
|
|
4992
5095
|
if (storySection) {
|
|
4993
|
-
logger$
|
|
5096
|
+
logger$20.debug({
|
|
4994
5097
|
epicId,
|
|
4995
5098
|
storyKey
|
|
4996
5099
|
}, "Extracted per-story section from epic shard");
|
|
4997
5100
|
return storySection;
|
|
4998
5101
|
}
|
|
4999
|
-
logger$
|
|
5102
|
+
logger$20.debug({
|
|
5000
5103
|
epicId,
|
|
5001
5104
|
storyKey
|
|
5002
5105
|
}, "No matching story section found — using full epic shard");
|
|
@@ -5006,11 +5109,11 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
|
|
|
5006
5109
|
if (projectRoot) {
|
|
5007
5110
|
const fallback = readEpicShardFromFile(projectRoot, epicId);
|
|
5008
5111
|
if (fallback) {
|
|
5009
|
-
logger$
|
|
5112
|
+
logger$20.info({ epicId }, "Using file-based fallback for epic shard (decisions table empty)");
|
|
5010
5113
|
if (storyKey) {
|
|
5011
5114
|
const storySection = extractStorySection(fallback, storyKey);
|
|
5012
5115
|
if (storySection) {
|
|
5013
|
-
logger$
|
|
5116
|
+
logger$20.debug({
|
|
5014
5117
|
epicId,
|
|
5015
5118
|
storyKey
|
|
5016
5119
|
}, "Extracted per-story section from file-based epic shard");
|
|
@@ -5022,7 +5125,7 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
|
|
|
5022
5125
|
}
|
|
5023
5126
|
return "";
|
|
5024
5127
|
} catch (err) {
|
|
5025
|
-
logger$
|
|
5128
|
+
logger$20.warn({
|
|
5026
5129
|
epicId,
|
|
5027
5130
|
error: err instanceof Error ? err.message : String(err)
|
|
5028
5131
|
}, "Failed to retrieve epic shard");
|
|
@@ -5039,7 +5142,7 @@ function getPrevDevNotes(decisions, epicId) {
|
|
|
5039
5142
|
if (devNotes.length === 0) return "";
|
|
5040
5143
|
return devNotes[devNotes.length - 1].value;
|
|
5041
5144
|
} catch (err) {
|
|
5042
|
-
logger$
|
|
5145
|
+
logger$20.warn({
|
|
5043
5146
|
epicId,
|
|
5044
5147
|
error: err instanceof Error ? err.message : String(err)
|
|
5045
5148
|
}, "Failed to retrieve prev dev notes");
|
|
@@ -5059,13 +5162,13 @@ function getArchConstraints$3(deps) {
|
|
|
5059
5162
|
if (deps.projectRoot) {
|
|
5060
5163
|
const fallback = readArchConstraintsFromFile(deps.projectRoot);
|
|
5061
5164
|
if (fallback) {
|
|
5062
|
-
logger$
|
|
5165
|
+
logger$20.info("Using file-based fallback for architecture constraints (decisions table empty)");
|
|
5063
5166
|
return fallback;
|
|
5064
5167
|
}
|
|
5065
5168
|
}
|
|
5066
5169
|
return "";
|
|
5067
5170
|
} catch (err) {
|
|
5068
|
-
logger$
|
|
5171
|
+
logger$20.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
|
|
5069
5172
|
return "";
|
|
5070
5173
|
}
|
|
5071
5174
|
}
|
|
@@ -5085,7 +5188,7 @@ function readEpicShardFromFile(projectRoot, epicId) {
|
|
|
5085
5188
|
const match = pattern.exec(content);
|
|
5086
5189
|
return match ? match[0].trim() : "";
|
|
5087
5190
|
} catch (err) {
|
|
5088
|
-
logger$
|
|
5191
|
+
logger$20.warn({
|
|
5089
5192
|
epicId,
|
|
5090
5193
|
error: err instanceof Error ? err.message : String(err)
|
|
5091
5194
|
}, "File-based epic shard fallback failed");
|
|
@@ -5108,7 +5211,7 @@ function readArchConstraintsFromFile(projectRoot) {
|
|
|
5108
5211
|
const content = readFileSync$1(archPath, "utf-8");
|
|
5109
5212
|
return content.slice(0, 1500);
|
|
5110
5213
|
} catch (err) {
|
|
5111
|
-
logger$
|
|
5214
|
+
logger$20.warn({ error: err instanceof Error ? err.message : String(err) }, "File-based architecture fallback failed");
|
|
5112
5215
|
return "";
|
|
5113
5216
|
}
|
|
5114
5217
|
}
|
|
@@ -5121,7 +5224,7 @@ async function getStoryTemplate(deps) {
|
|
|
5121
5224
|
try {
|
|
5122
5225
|
return await deps.pack.getTemplate("story");
|
|
5123
5226
|
} catch (err) {
|
|
5124
|
-
logger$
|
|
5227
|
+
logger$20.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve story template from pack");
|
|
5125
5228
|
return "";
|
|
5126
5229
|
}
|
|
5127
5230
|
}
|
|
@@ -5158,7 +5261,7 @@ async function isValidStoryFile(filePath) {
|
|
|
5158
5261
|
|
|
5159
5262
|
//#endregion
|
|
5160
5263
|
//#region src/modules/compiled-workflows/git-helpers.ts
|
|
5161
|
-
const logger$
|
|
5264
|
+
const logger$19 = createLogger("compiled-workflows:git-helpers");
|
|
5162
5265
|
/**
|
|
5163
5266
|
* Capture the full git diff for HEAD (working tree vs current commit).
|
|
5164
5267
|
*
|
|
@@ -5254,7 +5357,7 @@ async function stageIntentToAdd(files, workingDirectory) {
|
|
|
5254
5357
|
if (files.length === 0) return;
|
|
5255
5358
|
const existing = files.filter((f) => {
|
|
5256
5359
|
const exists = existsSync$1(f);
|
|
5257
|
-
if (!exists) logger$
|
|
5360
|
+
if (!exists) logger$19.debug({ file: f }, "Skipping nonexistent file in stageIntentToAdd");
|
|
5258
5361
|
return exists;
|
|
5259
5362
|
});
|
|
5260
5363
|
if (existing.length === 0) return;
|
|
@@ -5288,7 +5391,7 @@ async function runGitCommand(args, cwd, logLabel) {
|
|
|
5288
5391
|
stderr += chunk.toString("utf-8");
|
|
5289
5392
|
});
|
|
5290
5393
|
proc.on("error", (err) => {
|
|
5291
|
-
logger$
|
|
5394
|
+
logger$19.warn({
|
|
5292
5395
|
label: logLabel,
|
|
5293
5396
|
cwd,
|
|
5294
5397
|
error: err.message
|
|
@@ -5297,7 +5400,7 @@ async function runGitCommand(args, cwd, logLabel) {
|
|
|
5297
5400
|
});
|
|
5298
5401
|
proc.on("close", (code) => {
|
|
5299
5402
|
if (code !== 0) {
|
|
5300
|
-
logger$
|
|
5403
|
+
logger$19.warn({
|
|
5301
5404
|
label: logLabel,
|
|
5302
5405
|
cwd,
|
|
5303
5406
|
code,
|
|
@@ -5313,7 +5416,7 @@ async function runGitCommand(args, cwd, logLabel) {
|
|
|
5313
5416
|
|
|
5314
5417
|
//#endregion
|
|
5315
5418
|
//#region src/modules/implementation-orchestrator/project-findings.ts
|
|
5316
|
-
const logger$
|
|
5419
|
+
const logger$18 = createLogger("project-findings");
|
|
5317
5420
|
/** Maximum character length for the findings summary */
|
|
5318
5421
|
const MAX_CHARS = 2e3;
|
|
5319
5422
|
/**
|
|
@@ -5379,7 +5482,7 @@ function getProjectFindings(db) {
|
|
|
5379
5482
|
if (summary.length > MAX_CHARS) summary = summary.slice(0, MAX_CHARS - 3) + "...";
|
|
5380
5483
|
return summary;
|
|
5381
5484
|
} catch (err) {
|
|
5382
|
-
logger$
|
|
5485
|
+
logger$18.warn({ err }, "Failed to query project findings (graceful fallback)");
|
|
5383
5486
|
return "";
|
|
5384
5487
|
}
|
|
5385
5488
|
}
|
|
@@ -5402,7 +5505,7 @@ function extractRecurringPatterns(outcomes) {
|
|
|
5402
5505
|
|
|
5403
5506
|
//#endregion
|
|
5404
5507
|
//#region src/modules/compiled-workflows/story-complexity.ts
|
|
5405
|
-
const logger$
|
|
5508
|
+
const logger$17 = createLogger("compiled-workflows:story-complexity");
|
|
5406
5509
|
/**
|
|
5407
5510
|
* Compute a complexity score from story markdown content.
|
|
5408
5511
|
*
|
|
@@ -5454,7 +5557,7 @@ function resolveFixStoryMaxTurns(complexityScore) {
|
|
|
5454
5557
|
* @param resolvedMaxTurns - Turn limit resolved for this dispatch
|
|
5455
5558
|
*/
|
|
5456
5559
|
function logComplexityResult(storyKey, complexity, resolvedMaxTurns) {
|
|
5457
|
-
logger$
|
|
5560
|
+
logger$17.info({
|
|
5458
5561
|
storyKey,
|
|
5459
5562
|
taskCount: complexity.taskCount,
|
|
5460
5563
|
subtaskCount: complexity.subtaskCount,
|
|
@@ -5512,7 +5615,7 @@ function countFilesInLayout(content) {
|
|
|
5512
5615
|
|
|
5513
5616
|
//#endregion
|
|
5514
5617
|
//#region src/modules/compiled-workflows/dev-story.ts
|
|
5515
|
-
const logger$
|
|
5618
|
+
const logger$16 = createLogger("compiled-workflows:dev-story");
|
|
5516
5619
|
/** Default timeout for dev-story dispatches in milliseconds (30 min) */
|
|
5517
5620
|
const DEFAULT_TIMEOUT_MS$1 = 18e5;
|
|
5518
5621
|
/** Default Vitest test patterns injected when no test-pattern decisions exist */
|
|
@@ -5535,12 +5638,12 @@ const DEFAULT_VITEST_PATTERNS = `## Test Patterns (defaults)
|
|
|
5535
5638
|
*/
|
|
5536
5639
|
async function runDevStory(deps, params) {
|
|
5537
5640
|
const { storyKey, storyFilePath, taskScope, priorFiles } = params;
|
|
5538
|
-
logger$
|
|
5641
|
+
logger$16.info({
|
|
5539
5642
|
storyKey,
|
|
5540
5643
|
storyFilePath
|
|
5541
5644
|
}, "Starting compiled dev-story workflow");
|
|
5542
5645
|
const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("dev-story", deps.tokenCeilings);
|
|
5543
|
-
logger$
|
|
5646
|
+
logger$16.info({
|
|
5544
5647
|
workflow: "dev-story",
|
|
5545
5648
|
ceiling: TOKEN_CEILING,
|
|
5546
5649
|
source: tokenCeilingSource
|
|
@@ -5583,10 +5686,10 @@ async function runDevStory(deps, params) {
|
|
|
5583
5686
|
let template;
|
|
5584
5687
|
try {
|
|
5585
5688
|
template = await deps.pack.getPrompt("dev-story");
|
|
5586
|
-
logger$
|
|
5689
|
+
logger$16.debug({ storyKey }, "Retrieved dev-story prompt template from pack");
|
|
5587
5690
|
} catch (err) {
|
|
5588
5691
|
const error = err instanceof Error ? err.message : String(err);
|
|
5589
|
-
logger$
|
|
5692
|
+
logger$16.error({
|
|
5590
5693
|
storyKey,
|
|
5591
5694
|
error
|
|
5592
5695
|
}, "Failed to retrieve dev-story prompt template");
|
|
@@ -5597,14 +5700,14 @@ async function runDevStory(deps, params) {
|
|
|
5597
5700
|
storyContent = await readFile$1(storyFilePath, "utf-8");
|
|
5598
5701
|
} catch (err) {
|
|
5599
5702
|
if (err.code === "ENOENT") {
|
|
5600
|
-
logger$
|
|
5703
|
+
logger$16.error({
|
|
5601
5704
|
storyKey,
|
|
5602
5705
|
storyFilePath
|
|
5603
5706
|
}, "Story file not found");
|
|
5604
5707
|
return makeFailureResult("story_file_not_found");
|
|
5605
5708
|
}
|
|
5606
5709
|
const error = err instanceof Error ? err.message : String(err);
|
|
5607
|
-
logger$
|
|
5710
|
+
logger$16.error({
|
|
5608
5711
|
storyKey,
|
|
5609
5712
|
storyFilePath,
|
|
5610
5713
|
error
|
|
@@ -5612,7 +5715,7 @@ async function runDevStory(deps, params) {
|
|
|
5612
5715
|
return makeFailureResult(`story_file_read_error: ${error}`);
|
|
5613
5716
|
}
|
|
5614
5717
|
if (storyContent.trim().length === 0) {
|
|
5615
|
-
logger$
|
|
5718
|
+
logger$16.error({
|
|
5616
5719
|
storyKey,
|
|
5617
5720
|
storyFilePath
|
|
5618
5721
|
}, "Story file is empty");
|
|
@@ -5627,17 +5730,17 @@ async function runDevStory(deps, params) {
|
|
|
5627
5730
|
const testPatternDecisions = solutioningDecisions.filter((d) => d.category === "test-patterns");
|
|
5628
5731
|
if (testPatternDecisions.length > 0) {
|
|
5629
5732
|
testPatternsContent = "## Test Patterns\n" + testPatternDecisions.map((d) => `- ${d.key}: ${d.value}`).join("\n");
|
|
5630
|
-
logger$
|
|
5733
|
+
logger$16.debug({
|
|
5631
5734
|
storyKey,
|
|
5632
5735
|
count: testPatternDecisions.length
|
|
5633
5736
|
}, "Loaded test patterns from decision store");
|
|
5634
5737
|
} else {
|
|
5635
5738
|
testPatternsContent = DEFAULT_VITEST_PATTERNS;
|
|
5636
|
-
logger$
|
|
5739
|
+
logger$16.debug({ storyKey }, "No test-pattern decisions found — using default Vitest patterns");
|
|
5637
5740
|
}
|
|
5638
5741
|
} catch (err) {
|
|
5639
5742
|
const error = err instanceof Error ? err.message : String(err);
|
|
5640
|
-
logger$
|
|
5743
|
+
logger$16.warn({
|
|
5641
5744
|
storyKey,
|
|
5642
5745
|
error
|
|
5643
5746
|
}, "Failed to load test patterns — using defaults");
|
|
@@ -5652,7 +5755,7 @@ async function runDevStory(deps, params) {
|
|
|
5652
5755
|
const findings = getProjectFindings(deps.db);
|
|
5653
5756
|
if (findings.length > 0) {
|
|
5654
5757
|
priorFindingsContent = "Previous pipeline runs encountered these issues — avoid repeating them:\n\n" + findings;
|
|
5655
|
-
logger$
|
|
5758
|
+
logger$16.debug({
|
|
5656
5759
|
storyKey,
|
|
5657
5760
|
findingsLen: findings.length
|
|
5658
5761
|
}, "Injecting prior findings into dev-story prompt");
|
|
@@ -5672,7 +5775,7 @@ async function runDevStory(deps, params) {
|
|
|
5672
5775
|
if (plan.test_categories && plan.test_categories.length > 0) parts.push(`\n### Categories: ${plan.test_categories.join(", ")}`);
|
|
5673
5776
|
if (plan.coverage_notes) parts.push(`\n### Coverage Notes\n${plan.coverage_notes}`);
|
|
5674
5777
|
testPlanContent = parts.join("\n");
|
|
5675
|
-
logger$
|
|
5778
|
+
logger$16.debug({ storyKey }, "Injecting test plan into dev-story prompt");
|
|
5676
5779
|
}
|
|
5677
5780
|
} catch {}
|
|
5678
5781
|
const sections = [
|
|
@@ -5718,7 +5821,7 @@ async function runDevStory(deps, params) {
|
|
|
5718
5821
|
}
|
|
5719
5822
|
];
|
|
5720
5823
|
const { prompt, tokenCount, truncated } = assemblePrompt(template, sections, TOKEN_CEILING);
|
|
5721
|
-
logger$
|
|
5824
|
+
logger$16.info({
|
|
5722
5825
|
storyKey,
|
|
5723
5826
|
tokenCount,
|
|
5724
5827
|
ceiling: TOKEN_CEILING,
|
|
@@ -5739,7 +5842,7 @@ async function runDevStory(deps, params) {
|
|
|
5739
5842
|
dispatchResult = await handle.result;
|
|
5740
5843
|
} catch (err) {
|
|
5741
5844
|
const error = err instanceof Error ? err.message : String(err);
|
|
5742
|
-
logger$
|
|
5845
|
+
logger$16.error({
|
|
5743
5846
|
storyKey,
|
|
5744
5847
|
error
|
|
5745
5848
|
}, "Dispatch threw an unexpected error");
|
|
@@ -5750,11 +5853,11 @@ async function runDevStory(deps, params) {
|
|
|
5750
5853
|
output: dispatchResult.tokenEstimate.output
|
|
5751
5854
|
};
|
|
5752
5855
|
if (dispatchResult.status === "timeout") {
|
|
5753
|
-
logger$
|
|
5856
|
+
logger$16.error({
|
|
5754
5857
|
storyKey,
|
|
5755
5858
|
durationMs: dispatchResult.durationMs
|
|
5756
5859
|
}, "Dev-story dispatch timed out");
|
|
5757
|
-
if (dispatchResult.output.length > 0) logger$
|
|
5860
|
+
if (dispatchResult.output.length > 0) logger$16.info({
|
|
5758
5861
|
storyKey,
|
|
5759
5862
|
partialOutput: dispatchResult.output.slice(0, 500)
|
|
5760
5863
|
}, "Partial output before timeout");
|
|
@@ -5764,12 +5867,12 @@ async function runDevStory(deps, params) {
|
|
|
5764
5867
|
};
|
|
5765
5868
|
}
|
|
5766
5869
|
if (dispatchResult.status === "failed" || dispatchResult.exitCode !== 0) {
|
|
5767
|
-
logger$
|
|
5870
|
+
logger$16.error({
|
|
5768
5871
|
storyKey,
|
|
5769
5872
|
exitCode: dispatchResult.exitCode,
|
|
5770
5873
|
status: dispatchResult.status
|
|
5771
5874
|
}, "Dev-story dispatch failed");
|
|
5772
|
-
if (dispatchResult.output.length > 0) logger$
|
|
5875
|
+
if (dispatchResult.output.length > 0) logger$16.info({
|
|
5773
5876
|
storyKey,
|
|
5774
5877
|
partialOutput: dispatchResult.output.slice(0, 500)
|
|
5775
5878
|
}, "Partial output from failed dispatch");
|
|
@@ -5781,7 +5884,7 @@ async function runDevStory(deps, params) {
|
|
|
5781
5884
|
if (dispatchResult.parseError !== null || dispatchResult.parsed === null) {
|
|
5782
5885
|
const details = dispatchResult.parseError ?? "parsed result was null";
|
|
5783
5886
|
const rawSnippet = dispatchResult.output ? dispatchResult.output.slice(0, 1e3) : "(empty)";
|
|
5784
|
-
logger$
|
|
5887
|
+
logger$16.error({
|
|
5785
5888
|
storyKey,
|
|
5786
5889
|
parseError: details,
|
|
5787
5890
|
rawOutputSnippet: rawSnippet
|
|
@@ -5789,12 +5892,12 @@ async function runDevStory(deps, params) {
|
|
|
5789
5892
|
let filesModified = [];
|
|
5790
5893
|
try {
|
|
5791
5894
|
filesModified = await getGitChangedFiles(deps.projectRoot ?? process.cwd());
|
|
5792
|
-
if (filesModified.length > 0) logger$
|
|
5895
|
+
if (filesModified.length > 0) logger$16.info({
|
|
5793
5896
|
storyKey,
|
|
5794
5897
|
fileCount: filesModified.length
|
|
5795
5898
|
}, "Recovered files_modified from git status (YAML fallback)");
|
|
5796
5899
|
} catch (err) {
|
|
5797
|
-
logger$
|
|
5900
|
+
logger$16.warn({
|
|
5798
5901
|
storyKey,
|
|
5799
5902
|
error: err instanceof Error ? err.message : String(err)
|
|
5800
5903
|
}, "Failed to recover files_modified from git");
|
|
@@ -5811,7 +5914,7 @@ async function runDevStory(deps, params) {
|
|
|
5811
5914
|
};
|
|
5812
5915
|
}
|
|
5813
5916
|
const parsed = dispatchResult.parsed;
|
|
5814
|
-
logger$
|
|
5917
|
+
logger$16.info({
|
|
5815
5918
|
storyKey,
|
|
5816
5919
|
result: parsed.result,
|
|
5817
5920
|
acMet: parsed.ac_met.length
|
|
@@ -5950,7 +6053,7 @@ function extractFilesInScope(storyContent) {
|
|
|
5950
6053
|
|
|
5951
6054
|
//#endregion
|
|
5952
6055
|
//#region src/modules/compiled-workflows/code-review.ts
|
|
5953
|
-
const logger$
|
|
6056
|
+
const logger$15 = createLogger("compiled-workflows:code-review");
|
|
5954
6057
|
/**
|
|
5955
6058
|
* Default fallback result when dispatch fails or times out.
|
|
5956
6059
|
* Uses NEEDS_MINOR_FIXES (not NEEDS_MAJOR_REWORK) so a parse/schema failure
|
|
@@ -5988,14 +6091,14 @@ function defaultFailResult(error, tokenUsage) {
|
|
|
5988
6091
|
async function runCodeReview(deps, params) {
|
|
5989
6092
|
const { storyKey, storyFilePath, workingDirectory, pipelineRunId, filesModified, previousIssues } = params;
|
|
5990
6093
|
const cwd = workingDirectory ?? process.cwd();
|
|
5991
|
-
logger$
|
|
6094
|
+
logger$15.debug({
|
|
5992
6095
|
storyKey,
|
|
5993
6096
|
storyFilePath,
|
|
5994
6097
|
cwd,
|
|
5995
6098
|
pipelineRunId
|
|
5996
6099
|
}, "Starting code-review workflow");
|
|
5997
6100
|
const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("code-review", deps.tokenCeilings);
|
|
5998
|
-
logger$
|
|
6101
|
+
logger$15.info({
|
|
5999
6102
|
workflow: "code-review",
|
|
6000
6103
|
ceiling: TOKEN_CEILING,
|
|
6001
6104
|
source: tokenCeilingSource
|
|
@@ -6005,7 +6108,7 @@ async function runCodeReview(deps, params) {
|
|
|
6005
6108
|
template = await deps.pack.getPrompt("code-review");
|
|
6006
6109
|
} catch (err) {
|
|
6007
6110
|
const error = err instanceof Error ? err.message : String(err);
|
|
6008
|
-
logger$
|
|
6111
|
+
logger$15.error({ error }, "Failed to retrieve code-review prompt template");
|
|
6009
6112
|
return defaultFailResult(`Failed to retrieve prompt template: ${error}`, {
|
|
6010
6113
|
input: 0,
|
|
6011
6114
|
output: 0
|
|
@@ -6016,7 +6119,7 @@ async function runCodeReview(deps, params) {
|
|
|
6016
6119
|
storyContent = await readFile$1(storyFilePath, "utf-8");
|
|
6017
6120
|
} catch (err) {
|
|
6018
6121
|
const error = err instanceof Error ? err.message : String(err);
|
|
6019
|
-
logger$
|
|
6122
|
+
logger$15.error({
|
|
6020
6123
|
storyFilePath,
|
|
6021
6124
|
error
|
|
6022
6125
|
}, "Failed to read story file");
|
|
@@ -6036,12 +6139,12 @@ async function runCodeReview(deps, params) {
|
|
|
6036
6139
|
const scopedTotal = nonDiffTokens + countTokens(scopedDiff);
|
|
6037
6140
|
if (scopedTotal <= TOKEN_CEILING) {
|
|
6038
6141
|
gitDiffContent = scopedDiff;
|
|
6039
|
-
logger$
|
|
6142
|
+
logger$15.debug({
|
|
6040
6143
|
fileCount: filesModified.length,
|
|
6041
6144
|
tokenCount: scopedTotal
|
|
6042
6145
|
}, "Using scoped file diff");
|
|
6043
6146
|
} else {
|
|
6044
|
-
logger$
|
|
6147
|
+
logger$15.warn({
|
|
6045
6148
|
estimatedTotal: scopedTotal,
|
|
6046
6149
|
ceiling: TOKEN_CEILING,
|
|
6047
6150
|
fileCount: filesModified.length
|
|
@@ -6055,7 +6158,7 @@ async function runCodeReview(deps, params) {
|
|
|
6055
6158
|
const fullTotal = nonDiffTokens + countTokens(fullDiff);
|
|
6056
6159
|
if (fullTotal <= TOKEN_CEILING) gitDiffContent = fullDiff;
|
|
6057
6160
|
else {
|
|
6058
|
-
logger$
|
|
6161
|
+
logger$15.warn({
|
|
6059
6162
|
estimatedTotal: fullTotal,
|
|
6060
6163
|
ceiling: TOKEN_CEILING
|
|
6061
6164
|
}, "Full git diff would exceed token ceiling — using stat-only summary");
|
|
@@ -6063,7 +6166,7 @@ async function runCodeReview(deps, params) {
|
|
|
6063
6166
|
}
|
|
6064
6167
|
}
|
|
6065
6168
|
if (gitDiffContent.trim().length === 0) {
|
|
6066
|
-
logger$
|
|
6169
|
+
logger$15.info({ storyKey }, "Empty git diff — skipping review with SHIP_IT");
|
|
6067
6170
|
return {
|
|
6068
6171
|
verdict: "SHIP_IT",
|
|
6069
6172
|
issues: 0,
|
|
@@ -6088,7 +6191,7 @@ async function runCodeReview(deps, params) {
|
|
|
6088
6191
|
const findings = getProjectFindings(deps.db);
|
|
6089
6192
|
if (findings.length > 0) {
|
|
6090
6193
|
priorFindingsContent = "Previous reviews found these recurring patterns — pay special attention:\n\n" + findings;
|
|
6091
|
-
logger$
|
|
6194
|
+
logger$15.debug({
|
|
6092
6195
|
storyKey,
|
|
6093
6196
|
findingsLen: findings.length
|
|
6094
6197
|
}, "Injecting prior findings into code-review prompt");
|
|
@@ -6122,11 +6225,11 @@ async function runCodeReview(deps, params) {
|
|
|
6122
6225
|
}
|
|
6123
6226
|
];
|
|
6124
6227
|
const assembleResult = assemblePrompt(template, sections, TOKEN_CEILING);
|
|
6125
|
-
if (assembleResult.truncated) logger$
|
|
6228
|
+
if (assembleResult.truncated) logger$15.warn({
|
|
6126
6229
|
storyKey,
|
|
6127
6230
|
tokenCount: assembleResult.tokenCount
|
|
6128
6231
|
}, "Code-review prompt truncated to fit token ceiling");
|
|
6129
|
-
logger$
|
|
6232
|
+
logger$15.debug({
|
|
6130
6233
|
storyKey,
|
|
6131
6234
|
tokenCount: assembleResult.tokenCount,
|
|
6132
6235
|
truncated: assembleResult.truncated
|
|
@@ -6145,7 +6248,7 @@ async function runCodeReview(deps, params) {
|
|
|
6145
6248
|
dispatchResult = await handle.result;
|
|
6146
6249
|
} catch (err) {
|
|
6147
6250
|
const error = err instanceof Error ? err.message : String(err);
|
|
6148
|
-
logger$
|
|
6251
|
+
logger$15.error({
|
|
6149
6252
|
storyKey,
|
|
6150
6253
|
error
|
|
6151
6254
|
}, "Code-review dispatch threw unexpected error");
|
|
@@ -6161,7 +6264,7 @@ async function runCodeReview(deps, params) {
|
|
|
6161
6264
|
const rawOutput = dispatchResult.output ?? void 0;
|
|
6162
6265
|
if (dispatchResult.status === "failed") {
|
|
6163
6266
|
const errorMsg = `Dispatch status: failed. Exit code: ${dispatchResult.exitCode}. ${dispatchResult.parseError ?? ""} ${dispatchResult.output ? `Stderr: ${dispatchResult.output}` : ""}`.trim();
|
|
6164
|
-
logger$
|
|
6267
|
+
logger$15.warn({
|
|
6165
6268
|
storyKey,
|
|
6166
6269
|
exitCode: dispatchResult.exitCode
|
|
6167
6270
|
}, "Code-review dispatch failed");
|
|
@@ -6171,7 +6274,7 @@ async function runCodeReview(deps, params) {
|
|
|
6171
6274
|
};
|
|
6172
6275
|
}
|
|
6173
6276
|
if (dispatchResult.status === "timeout") {
|
|
6174
|
-
logger$
|
|
6277
|
+
logger$15.warn({ storyKey }, "Code-review dispatch timed out");
|
|
6175
6278
|
return {
|
|
6176
6279
|
...defaultFailResult("Dispatch status: timeout. The agent did not complete within the allowed time.", tokenUsage),
|
|
6177
6280
|
rawOutput
|
|
@@ -6179,7 +6282,7 @@ async function runCodeReview(deps, params) {
|
|
|
6179
6282
|
}
|
|
6180
6283
|
if (dispatchResult.parsed === null) {
|
|
6181
6284
|
const details = dispatchResult.parseError ?? "No YAML block found in output";
|
|
6182
|
-
logger$
|
|
6285
|
+
logger$15.warn({
|
|
6183
6286
|
storyKey,
|
|
6184
6287
|
details
|
|
6185
6288
|
}, "Code-review output schema validation failed");
|
|
@@ -6196,7 +6299,7 @@ async function runCodeReview(deps, params) {
|
|
|
6196
6299
|
const parseResult = CodeReviewResultSchema.safeParse(dispatchResult.parsed);
|
|
6197
6300
|
if (!parseResult.success) {
|
|
6198
6301
|
const details = parseResult.error.message;
|
|
6199
|
-
logger$
|
|
6302
|
+
logger$15.warn({
|
|
6200
6303
|
storyKey,
|
|
6201
6304
|
details
|
|
6202
6305
|
}, "Code-review output failed schema validation");
|
|
@@ -6211,13 +6314,13 @@ async function runCodeReview(deps, params) {
|
|
|
6211
6314
|
};
|
|
6212
6315
|
}
|
|
6213
6316
|
const parsed = parseResult.data;
|
|
6214
|
-
if (parsed.agentVerdict !== parsed.verdict) logger$
|
|
6317
|
+
if (parsed.agentVerdict !== parsed.verdict) logger$15.info({
|
|
6215
6318
|
storyKey,
|
|
6216
6319
|
agentVerdict: parsed.agentVerdict,
|
|
6217
6320
|
pipelineVerdict: parsed.verdict,
|
|
6218
6321
|
issues: parsed.issues
|
|
6219
6322
|
}, "Pipeline overrode agent verdict based on issue severities");
|
|
6220
|
-
logger$
|
|
6323
|
+
logger$15.info({
|
|
6221
6324
|
storyKey,
|
|
6222
6325
|
verdict: parsed.verdict,
|
|
6223
6326
|
issues: parsed.issues
|
|
@@ -6242,14 +6345,14 @@ function getArchConstraints$2(deps) {
|
|
|
6242
6345
|
if (constraints.length === 0) return "";
|
|
6243
6346
|
return constraints.map((d) => `${d.key}: ${d.value}`).join("\n");
|
|
6244
6347
|
} catch (err) {
|
|
6245
|
-
logger$
|
|
6348
|
+
logger$15.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
|
|
6246
6349
|
return "";
|
|
6247
6350
|
}
|
|
6248
6351
|
}
|
|
6249
6352
|
|
|
6250
6353
|
//#endregion
|
|
6251
6354
|
//#region src/modules/compiled-workflows/test-plan.ts
|
|
6252
|
-
const logger$
|
|
6355
|
+
const logger$14 = createLogger("compiled-workflows:test-plan");
|
|
6253
6356
|
/** Default timeout for test-plan dispatches in milliseconds (5 min — lightweight call) */
|
|
6254
6357
|
const DEFAULT_TIMEOUT_MS = 3e5;
|
|
6255
6358
|
/**
|
|
@@ -6261,12 +6364,12 @@ const DEFAULT_TIMEOUT_MS = 3e5;
|
|
|
6261
6364
|
*/
|
|
6262
6365
|
async function runTestPlan(deps, params) {
|
|
6263
6366
|
const { storyKey, storyFilePath, pipelineRunId } = params;
|
|
6264
|
-
logger$
|
|
6367
|
+
logger$14.info({
|
|
6265
6368
|
storyKey,
|
|
6266
6369
|
storyFilePath
|
|
6267
6370
|
}, "Starting compiled test-plan workflow");
|
|
6268
6371
|
const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("test-plan", deps.tokenCeilings);
|
|
6269
|
-
logger$
|
|
6372
|
+
logger$14.info({
|
|
6270
6373
|
workflow: "test-plan",
|
|
6271
6374
|
ceiling: TOKEN_CEILING,
|
|
6272
6375
|
source: tokenCeilingSource
|
|
@@ -6274,10 +6377,10 @@ async function runTestPlan(deps, params) {
|
|
|
6274
6377
|
let template;
|
|
6275
6378
|
try {
|
|
6276
6379
|
template = await deps.pack.getPrompt("test-plan");
|
|
6277
|
-
logger$
|
|
6380
|
+
logger$14.debug({ storyKey }, "Retrieved test-plan prompt template from pack");
|
|
6278
6381
|
} catch (err) {
|
|
6279
6382
|
const error = err instanceof Error ? err.message : String(err);
|
|
6280
|
-
logger$
|
|
6383
|
+
logger$14.warn({
|
|
6281
6384
|
storyKey,
|
|
6282
6385
|
error
|
|
6283
6386
|
}, "Failed to retrieve test-plan prompt template");
|
|
@@ -6288,14 +6391,14 @@ async function runTestPlan(deps, params) {
|
|
|
6288
6391
|
storyContent = await readFile$1(storyFilePath, "utf-8");
|
|
6289
6392
|
} catch (err) {
|
|
6290
6393
|
if (err.code === "ENOENT") {
|
|
6291
|
-
logger$
|
|
6394
|
+
logger$14.warn({
|
|
6292
6395
|
storyKey,
|
|
6293
6396
|
storyFilePath
|
|
6294
6397
|
}, "Story file not found for test planning");
|
|
6295
6398
|
return makeTestPlanFailureResult("story_file_not_found");
|
|
6296
6399
|
}
|
|
6297
6400
|
const error = err instanceof Error ? err.message : String(err);
|
|
6298
|
-
logger$
|
|
6401
|
+
logger$14.warn({
|
|
6299
6402
|
storyKey,
|
|
6300
6403
|
storyFilePath,
|
|
6301
6404
|
error
|
|
@@ -6312,7 +6415,7 @@ async function runTestPlan(deps, params) {
|
|
|
6312
6415
|
content: archConstraintsContent,
|
|
6313
6416
|
priority: "optional"
|
|
6314
6417
|
}], TOKEN_CEILING);
|
|
6315
|
-
logger$
|
|
6418
|
+
logger$14.info({
|
|
6316
6419
|
storyKey,
|
|
6317
6420
|
tokenCount,
|
|
6318
6421
|
ceiling: TOKEN_CEILING,
|
|
@@ -6332,7 +6435,7 @@ async function runTestPlan(deps, params) {
|
|
|
6332
6435
|
dispatchResult = await handle.result;
|
|
6333
6436
|
} catch (err) {
|
|
6334
6437
|
const error = err instanceof Error ? err.message : String(err);
|
|
6335
|
-
logger$
|
|
6438
|
+
logger$14.warn({
|
|
6336
6439
|
storyKey,
|
|
6337
6440
|
error
|
|
6338
6441
|
}, "Test-plan dispatch threw an unexpected error");
|
|
@@ -6343,7 +6446,7 @@ async function runTestPlan(deps, params) {
|
|
|
6343
6446
|
output: dispatchResult.tokenEstimate.output
|
|
6344
6447
|
};
|
|
6345
6448
|
if (dispatchResult.status === "timeout") {
|
|
6346
|
-
logger$
|
|
6449
|
+
logger$14.warn({
|
|
6347
6450
|
storyKey,
|
|
6348
6451
|
durationMs: dispatchResult.durationMs
|
|
6349
6452
|
}, "Test-plan dispatch timed out");
|
|
@@ -6353,7 +6456,7 @@ async function runTestPlan(deps, params) {
|
|
|
6353
6456
|
};
|
|
6354
6457
|
}
|
|
6355
6458
|
if (dispatchResult.status === "failed" || dispatchResult.exitCode !== 0) {
|
|
6356
|
-
logger$
|
|
6459
|
+
logger$14.warn({
|
|
6357
6460
|
storyKey,
|
|
6358
6461
|
exitCode: dispatchResult.exitCode,
|
|
6359
6462
|
status: dispatchResult.status
|
|
@@ -6365,7 +6468,7 @@ async function runTestPlan(deps, params) {
|
|
|
6365
6468
|
}
|
|
6366
6469
|
if (dispatchResult.parseError !== null || dispatchResult.parsed === null) {
|
|
6367
6470
|
const details = dispatchResult.parseError ?? "parsed result was null";
|
|
6368
|
-
logger$
|
|
6471
|
+
logger$14.warn({
|
|
6369
6472
|
storyKey,
|
|
6370
6473
|
parseError: details
|
|
6371
6474
|
}, "Test-plan YAML schema validation failed");
|
|
@@ -6388,19 +6491,19 @@ async function runTestPlan(deps, params) {
|
|
|
6388
6491
|
}),
|
|
6389
6492
|
rationale: `Test plan for ${storyKey}: ${parsed.test_files.length} test files, categories: ${parsed.test_categories.join(", ")}`
|
|
6390
6493
|
});
|
|
6391
|
-
logger$
|
|
6494
|
+
logger$14.info({
|
|
6392
6495
|
storyKey,
|
|
6393
6496
|
fileCount: parsed.test_files.length,
|
|
6394
6497
|
categories: parsed.test_categories
|
|
6395
6498
|
}, "Test plan stored in decision store");
|
|
6396
6499
|
} catch (err) {
|
|
6397
6500
|
const error = err instanceof Error ? err.message : String(err);
|
|
6398
|
-
logger$
|
|
6501
|
+
logger$14.warn({
|
|
6399
6502
|
storyKey,
|
|
6400
6503
|
error
|
|
6401
6504
|
}, "Failed to store test plan in decision store — proceeding anyway");
|
|
6402
6505
|
}
|
|
6403
|
-
logger$
|
|
6506
|
+
logger$14.info({
|
|
6404
6507
|
storyKey,
|
|
6405
6508
|
result: parsed.result
|
|
6406
6509
|
}, "Test-plan workflow completed");
|
|
@@ -6440,14 +6543,14 @@ function getArchConstraints$1(deps) {
|
|
|
6440
6543
|
if (constraints.length === 0) return "";
|
|
6441
6544
|
return constraints.map((d) => `${d.key}: ${d.value}`).join("\n");
|
|
6442
6545
|
} catch (err) {
|
|
6443
|
-
logger$
|
|
6546
|
+
logger$14.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints for test-plan — proceeding without them");
|
|
6444
6547
|
return "";
|
|
6445
6548
|
}
|
|
6446
6549
|
}
|
|
6447
6550
|
|
|
6448
6551
|
//#endregion
|
|
6449
6552
|
//#region src/modules/compiled-workflows/test-expansion.ts
|
|
6450
|
-
const logger$
|
|
6553
|
+
const logger$13 = createLogger("compiled-workflows:test-expansion");
|
|
6451
6554
|
function defaultFallbackResult(error, tokenUsage) {
|
|
6452
6555
|
return {
|
|
6453
6556
|
expansion_priority: "low",
|
|
@@ -6477,14 +6580,14 @@ function defaultFallbackResult(error, tokenUsage) {
|
|
|
6477
6580
|
async function runTestExpansion(deps, params) {
|
|
6478
6581
|
const { storyKey, storyFilePath, pipelineRunId, filesModified, workingDirectory } = params;
|
|
6479
6582
|
const cwd = workingDirectory ?? process.cwd();
|
|
6480
|
-
logger$
|
|
6583
|
+
logger$13.debug({
|
|
6481
6584
|
storyKey,
|
|
6482
6585
|
storyFilePath,
|
|
6483
6586
|
cwd,
|
|
6484
6587
|
pipelineRunId
|
|
6485
6588
|
}, "Starting test-expansion workflow");
|
|
6486
6589
|
const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("test-expansion", deps.tokenCeilings);
|
|
6487
|
-
logger$
|
|
6590
|
+
logger$13.info({
|
|
6488
6591
|
workflow: "test-expansion",
|
|
6489
6592
|
ceiling: TOKEN_CEILING,
|
|
6490
6593
|
source: tokenCeilingSource
|
|
@@ -6494,7 +6597,7 @@ async function runTestExpansion(deps, params) {
|
|
|
6494
6597
|
template = await deps.pack.getPrompt("test-expansion");
|
|
6495
6598
|
} catch (err) {
|
|
6496
6599
|
const error = err instanceof Error ? err.message : String(err);
|
|
6497
|
-
logger$
|
|
6600
|
+
logger$13.warn({ error }, "Failed to retrieve test-expansion prompt template");
|
|
6498
6601
|
return defaultFallbackResult(`Failed to retrieve prompt template: ${error}`, {
|
|
6499
6602
|
input: 0,
|
|
6500
6603
|
output: 0
|
|
@@ -6505,7 +6608,7 @@ async function runTestExpansion(deps, params) {
|
|
|
6505
6608
|
storyContent = await readFile$1(storyFilePath, "utf-8");
|
|
6506
6609
|
} catch (err) {
|
|
6507
6610
|
const error = err instanceof Error ? err.message : String(err);
|
|
6508
|
-
logger$
|
|
6611
|
+
logger$13.warn({
|
|
6509
6612
|
storyFilePath,
|
|
6510
6613
|
error
|
|
6511
6614
|
}, "Failed to read story file");
|
|
@@ -6525,12 +6628,12 @@ async function runTestExpansion(deps, params) {
|
|
|
6525
6628
|
const scopedTotal = nonDiffTokens + countTokens(scopedDiff);
|
|
6526
6629
|
if (scopedTotal <= TOKEN_CEILING) {
|
|
6527
6630
|
gitDiffContent = scopedDiff;
|
|
6528
|
-
logger$
|
|
6631
|
+
logger$13.debug({
|
|
6529
6632
|
fileCount: filesModified.length,
|
|
6530
6633
|
tokenCount: scopedTotal
|
|
6531
6634
|
}, "Using scoped file diff");
|
|
6532
6635
|
} else {
|
|
6533
|
-
logger$
|
|
6636
|
+
logger$13.warn({
|
|
6534
6637
|
estimatedTotal: scopedTotal,
|
|
6535
6638
|
ceiling: TOKEN_CEILING,
|
|
6536
6639
|
fileCount: filesModified.length
|
|
@@ -6538,7 +6641,7 @@ async function runTestExpansion(deps, params) {
|
|
|
6538
6641
|
gitDiffContent = await getGitDiffStatSummary(cwd);
|
|
6539
6642
|
}
|
|
6540
6643
|
} catch (err) {
|
|
6541
|
-
logger$
|
|
6644
|
+
logger$13.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to get git diff — proceeding with empty diff");
|
|
6542
6645
|
}
|
|
6543
6646
|
const sections = [
|
|
6544
6647
|
{
|
|
@@ -6558,11 +6661,11 @@ async function runTestExpansion(deps, params) {
|
|
|
6558
6661
|
}
|
|
6559
6662
|
];
|
|
6560
6663
|
const assembleResult = assemblePrompt(template, sections, TOKEN_CEILING);
|
|
6561
|
-
if (assembleResult.truncated) logger$
|
|
6664
|
+
if (assembleResult.truncated) logger$13.warn({
|
|
6562
6665
|
storyKey,
|
|
6563
6666
|
tokenCount: assembleResult.tokenCount
|
|
6564
6667
|
}, "Test-expansion prompt truncated to fit token ceiling");
|
|
6565
|
-
logger$
|
|
6668
|
+
logger$13.debug({
|
|
6566
6669
|
storyKey,
|
|
6567
6670
|
tokenCount: assembleResult.tokenCount,
|
|
6568
6671
|
truncated: assembleResult.truncated
|
|
@@ -6581,7 +6684,7 @@ async function runTestExpansion(deps, params) {
|
|
|
6581
6684
|
dispatchResult = await handle.result;
|
|
6582
6685
|
} catch (err) {
|
|
6583
6686
|
const error = err instanceof Error ? err.message : String(err);
|
|
6584
|
-
logger$
|
|
6687
|
+
logger$13.warn({
|
|
6585
6688
|
storyKey,
|
|
6586
6689
|
error
|
|
6587
6690
|
}, "Test-expansion dispatch threw unexpected error");
|
|
@@ -6596,19 +6699,19 @@ async function runTestExpansion(deps, params) {
|
|
|
6596
6699
|
};
|
|
6597
6700
|
if (dispatchResult.status === "failed") {
|
|
6598
6701
|
const errorMsg = `Dispatch status: failed. Exit code: ${dispatchResult.exitCode}. ${dispatchResult.parseError ?? ""}`.trim();
|
|
6599
|
-
logger$
|
|
6702
|
+
logger$13.warn({
|
|
6600
6703
|
storyKey,
|
|
6601
6704
|
exitCode: dispatchResult.exitCode
|
|
6602
6705
|
}, "Test-expansion dispatch failed");
|
|
6603
6706
|
return defaultFallbackResult(errorMsg, tokenUsage);
|
|
6604
6707
|
}
|
|
6605
6708
|
if (dispatchResult.status === "timeout") {
|
|
6606
|
-
logger$
|
|
6709
|
+
logger$13.warn({ storyKey }, "Test-expansion dispatch timed out");
|
|
6607
6710
|
return defaultFallbackResult("Dispatch status: timeout. The agent did not complete within the allowed time.", tokenUsage);
|
|
6608
6711
|
}
|
|
6609
6712
|
if (dispatchResult.parsed === null) {
|
|
6610
6713
|
const details = dispatchResult.parseError ?? "No YAML block found in output";
|
|
6611
|
-
logger$
|
|
6714
|
+
logger$13.warn({
|
|
6612
6715
|
storyKey,
|
|
6613
6716
|
details
|
|
6614
6717
|
}, "Test-expansion output has no parseable YAML");
|
|
@@ -6617,14 +6720,14 @@ async function runTestExpansion(deps, params) {
|
|
|
6617
6720
|
const parseResult = TestExpansionResultSchema.safeParse(dispatchResult.parsed);
|
|
6618
6721
|
if (!parseResult.success) {
|
|
6619
6722
|
const details = parseResult.error.message;
|
|
6620
|
-
logger$
|
|
6723
|
+
logger$13.warn({
|
|
6621
6724
|
storyKey,
|
|
6622
6725
|
details
|
|
6623
6726
|
}, "Test-expansion output failed schema validation");
|
|
6624
6727
|
return defaultFallbackResult(`schema_validation_failed: ${details}`, tokenUsage);
|
|
6625
6728
|
}
|
|
6626
6729
|
const parsed = parseResult.data;
|
|
6627
|
-
logger$
|
|
6730
|
+
logger$13.info({
|
|
6628
6731
|
storyKey,
|
|
6629
6732
|
expansion_priority: parsed.expansion_priority,
|
|
6630
6733
|
coverage_gaps: parsed.coverage_gaps.length,
|
|
@@ -6649,7 +6752,7 @@ function getArchConstraints(deps) {
|
|
|
6649
6752
|
if (constraints.length === 0) return "";
|
|
6650
6753
|
return constraints.map((d) => `${d.key}: ${d.value}`).join("\n");
|
|
6651
6754
|
} catch (err) {
|
|
6652
|
-
logger$
|
|
6755
|
+
logger$13.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
|
|
6653
6756
|
return "";
|
|
6654
6757
|
}
|
|
6655
6758
|
}
|
|
@@ -7937,7 +8040,7 @@ function createDoltClient(options) {
|
|
|
7937
8040
|
|
|
7938
8041
|
//#endregion
|
|
7939
8042
|
//#region src/modules/state/index.ts
|
|
7940
|
-
const logger$
|
|
8043
|
+
const logger$12 = createLogger("state:factory");
|
|
7941
8044
|
/**
|
|
7942
8045
|
* Synchronously check whether Dolt is available and a Dolt repo exists at the
|
|
7943
8046
|
* canonical state path under `basePath`.
|
|
@@ -7984,14 +8087,14 @@ function createStateStore(config = {}) {
|
|
|
7984
8087
|
const repoPath = config.basePath ?? process.cwd();
|
|
7985
8088
|
const detection = detectDoltAvailableSync(repoPath);
|
|
7986
8089
|
if (detection.available) {
|
|
7987
|
-
logger$
|
|
8090
|
+
logger$12.debug(`Dolt detected, using DoltStateStore (state path: ${join$1(repoPath, ".substrate", "state")})`);
|
|
7988
8091
|
const client = new DoltClient({ repoPath });
|
|
7989
8092
|
return new DoltStateStore({
|
|
7990
8093
|
repoPath,
|
|
7991
8094
|
client
|
|
7992
8095
|
});
|
|
7993
8096
|
} else {
|
|
7994
|
-
logger$
|
|
8097
|
+
logger$12.debug(`Dolt not found, using FileStateStore (reason: ${detection.reason})`);
|
|
7995
8098
|
return new FileStateStore({ basePath: config.basePath });
|
|
7996
8099
|
}
|
|
7997
8100
|
}
|
|
@@ -8000,7 +8103,7 @@ function createStateStore(config = {}) {
|
|
|
8000
8103
|
|
|
8001
8104
|
//#endregion
|
|
8002
8105
|
//#region src/cli/commands/health.ts
|
|
8003
|
-
const logger$
|
|
8106
|
+
const logger$11 = createLogger("health-cmd");
|
|
8004
8107
|
/** Default stall threshold in seconds — also used by supervisor default */
|
|
8005
8108
|
const DEFAULT_STALL_THRESHOLD_SECONDS = 600;
|
|
8006
8109
|
/**
|
|
@@ -8155,6 +8258,8 @@ async function getAutoHealthData(options) {
|
|
|
8155
8258
|
const initialized = existsSync$1(doltDirPath);
|
|
8156
8259
|
let responsive = false;
|
|
8157
8260
|
let version;
|
|
8261
|
+
let branches;
|
|
8262
|
+
let currentBranch;
|
|
8158
8263
|
try {
|
|
8159
8264
|
await stateStore.getHistory(1);
|
|
8160
8265
|
responsive = true;
|
|
@@ -8166,13 +8271,30 @@ async function getAutoHealthData(options) {
|
|
|
8166
8271
|
const match = stdout.match(/dolt version (\S+)/);
|
|
8167
8272
|
if (match) version = match[1];
|
|
8168
8273
|
} catch {}
|
|
8274
|
+
try {
|
|
8275
|
+
const { execFile: ef } = await import("node:child_process");
|
|
8276
|
+
const { promisify: p } = await import("node:util");
|
|
8277
|
+
const execFileAsync = p(ef);
|
|
8278
|
+
const { stdout } = await execFileAsync("dolt", ["branch", "--list"], { cwd: repoPath });
|
|
8279
|
+
const lines = stdout.split("\n").filter((l) => l.trim().length > 0);
|
|
8280
|
+
branches = lines.map((l) => {
|
|
8281
|
+
const trimmed = l.trim();
|
|
8282
|
+
if (trimmed.startsWith("* ")) {
|
|
8283
|
+
currentBranch = trimmed.slice(2).trim();
|
|
8284
|
+
return currentBranch;
|
|
8285
|
+
}
|
|
8286
|
+
return trimmed;
|
|
8287
|
+
});
|
|
8288
|
+
} catch {}
|
|
8169
8289
|
} catch {
|
|
8170
8290
|
responsive = false;
|
|
8171
8291
|
}
|
|
8172
8292
|
doltStateInfo = {
|
|
8173
8293
|
initialized,
|
|
8174
8294
|
responsive,
|
|
8175
|
-
...version !== void 0 ? { version } : {}
|
|
8295
|
+
...version !== void 0 ? { version } : {},
|
|
8296
|
+
...branches !== void 0 ? { branches } : {},
|
|
8297
|
+
...currentBranch !== void 0 ? { current_branch: currentBranch } : {}
|
|
8176
8298
|
};
|
|
8177
8299
|
}
|
|
8178
8300
|
const NO_PIPELINE = {
|
|
@@ -8236,6 +8358,7 @@ async function getAutoHealthData(options) {
|
|
|
8236
8358
|
else if (processInfo.orchestrator_pid !== null && processInfo.child_pids.length > 0 && stalenessSeconds > DEFAULT_STALL_THRESHOLD_SECONDS) verdict = "HEALTHY";
|
|
8237
8359
|
else if (stalenessSeconds > DEFAULT_STALL_THRESHOLD_SECONDS) verdict = "STALLED";
|
|
8238
8360
|
else if (processInfo.orchestrator_pid !== null && processInfo.child_pids.length === 0 && active > 0) verdict = "STALLED";
|
|
8361
|
+
else if (processInfo.orchestrator_pid === null && active > 0) verdict = "STALLED";
|
|
8239
8362
|
else verdict = "HEALTHY";
|
|
8240
8363
|
else if (run.status === "completed" || run.status === "failed" || run.status === "stopped") verdict = "NO_PIPELINE_RUNNING";
|
|
8241
8364
|
const healthOutput = {
|
|
@@ -8302,7 +8425,7 @@ async function runHealthAction(options) {
|
|
|
8302
8425
|
const msg = err instanceof Error ? err.message : String(err);
|
|
8303
8426
|
if (outputFormat === "json") process.stdout.write(formatOutput(null, "json", false, msg) + "\n");
|
|
8304
8427
|
else process.stderr.write(`Error: ${msg}\n`);
|
|
8305
|
-
logger$
|
|
8428
|
+
logger$11.error({ err }, "health action failed");
|
|
8306
8429
|
return 1;
|
|
8307
8430
|
}
|
|
8308
8431
|
}
|
|
@@ -8349,7 +8472,7 @@ function registerHealthCommand(program, _version = "0.0.0", projectRoot = proces
|
|
|
8349
8472
|
|
|
8350
8473
|
//#endregion
|
|
8351
8474
|
//#region src/modules/implementation-orchestrator/seed-methodology-context.ts
|
|
8352
|
-
const logger$
|
|
8475
|
+
const logger$10 = createLogger("implementation-orchestrator:seed");
|
|
8353
8476
|
/** Max chars for the architecture summary seeded into decisions */
|
|
8354
8477
|
const MAX_ARCH_CHARS = 6e3;
|
|
8355
8478
|
/** Max chars per epic shard (fallback when per-story extraction returns null) */
|
|
@@ -8383,12 +8506,12 @@ function seedMethodologyContext(db, projectRoot) {
|
|
|
8383
8506
|
const testCount = seedTestPatterns(db, projectRoot);
|
|
8384
8507
|
if (testCount === -1) result.skippedCategories.push("test-patterns");
|
|
8385
8508
|
else result.decisionsCreated += testCount;
|
|
8386
|
-
logger$
|
|
8509
|
+
logger$10.info({
|
|
8387
8510
|
decisionsCreated: result.decisionsCreated,
|
|
8388
8511
|
skippedCategories: result.skippedCategories
|
|
8389
8512
|
}, "Methodology context seeding complete");
|
|
8390
8513
|
} catch (err) {
|
|
8391
|
-
logger$
|
|
8514
|
+
logger$10.warn({ error: err instanceof Error ? err.message : String(err) }, "Methodology context seeding failed (non-fatal)");
|
|
8392
8515
|
}
|
|
8393
8516
|
return result;
|
|
8394
8517
|
}
|
|
@@ -8432,7 +8555,7 @@ function seedArchitecture(db, projectRoot) {
|
|
|
8432
8555
|
});
|
|
8433
8556
|
count = 1;
|
|
8434
8557
|
}
|
|
8435
|
-
logger$
|
|
8558
|
+
logger$10.debug({ count }, "Seeded architecture decisions");
|
|
8436
8559
|
return count;
|
|
8437
8560
|
}
|
|
8438
8561
|
/**
|
|
@@ -8456,11 +8579,11 @@ function seedEpicShards(db, projectRoot) {
|
|
|
8456
8579
|
const storedHashDecision = implementationDecisions.find((d) => d.category === "epic-shard-hash" && d.key === "epics-file");
|
|
8457
8580
|
const storedHash = storedHashDecision?.value;
|
|
8458
8581
|
if (storedHash === currentHash) {
|
|
8459
|
-
logger$
|
|
8582
|
+
logger$10.debug({ hash: currentHash }, "Epic shards up-to-date (hash unchanged) — skipping re-seed");
|
|
8460
8583
|
return -1;
|
|
8461
8584
|
}
|
|
8462
8585
|
if (implementationDecisions.some((d) => d.category === "epic-shard")) {
|
|
8463
|
-
logger$
|
|
8586
|
+
logger$10.debug({
|
|
8464
8587
|
storedHash,
|
|
8465
8588
|
currentHash
|
|
8466
8589
|
}, "Epics file changed — deleting stale epic-shard decisions");
|
|
@@ -8488,7 +8611,7 @@ function seedEpicShards(db, projectRoot) {
|
|
|
8488
8611
|
value: currentHash,
|
|
8489
8612
|
rationale: "SHA-256 hash of epics file content for change detection"
|
|
8490
8613
|
});
|
|
8491
|
-
logger$
|
|
8614
|
+
logger$10.debug({
|
|
8492
8615
|
count,
|
|
8493
8616
|
hash: currentHash
|
|
8494
8617
|
}, "Seeded epic shard decisions");
|
|
@@ -8512,7 +8635,7 @@ function seedTestPatterns(db, projectRoot) {
|
|
|
8512
8635
|
value: patterns.slice(0, MAX_TEST_PATTERNS_CHARS),
|
|
8513
8636
|
rationale: "Detected from project configuration at orchestrator startup"
|
|
8514
8637
|
});
|
|
8515
|
-
logger$
|
|
8638
|
+
logger$10.debug("Seeded test patterns decision");
|
|
8516
8639
|
return 1;
|
|
8517
8640
|
}
|
|
8518
8641
|
/**
|
|
@@ -8685,7 +8808,7 @@ function findArtifact(projectRoot, candidates) {
|
|
|
8685
8808
|
|
|
8686
8809
|
//#endregion
|
|
8687
8810
|
//#region src/modules/agent-dispatch/interface-change-detector.ts
|
|
8688
|
-
const logger$
|
|
8811
|
+
const logger$9 = createLogger("interface-change-detector");
|
|
8689
8812
|
/**
|
|
8690
8813
|
* Extract exported interface and type names from TypeScript source content.
|
|
8691
8814
|
*
|
|
@@ -8732,7 +8855,7 @@ function detectInterfaceChanges(options) {
|
|
|
8732
8855
|
for (const name of names) allNames.add(name);
|
|
8733
8856
|
sourceDirs.push(dirname$1(relPath));
|
|
8734
8857
|
} catch {
|
|
8735
|
-
logger$
|
|
8858
|
+
logger$9.debug({
|
|
8736
8859
|
absPath,
|
|
8737
8860
|
storyKey
|
|
8738
8861
|
}, "Could not read modified file for interface extraction");
|
|
@@ -8773,7 +8896,7 @@ function detectInterfaceChanges(options) {
|
|
|
8773
8896
|
potentiallyAffectedTests: Array.from(affectedTests)
|
|
8774
8897
|
};
|
|
8775
8898
|
} catch (err) {
|
|
8776
|
-
logger$
|
|
8899
|
+
logger$9.warn({
|
|
8777
8900
|
err,
|
|
8778
8901
|
storyKey: options.storyKey
|
|
8779
8902
|
}, "Interface change detection failed — skipping");
|
|
@@ -9064,7 +9187,7 @@ const RecommendationSchema = z.object({
|
|
|
9064
9187
|
|
|
9065
9188
|
//#endregion
|
|
9066
9189
|
//#region src/modules/telemetry/persistence.ts
|
|
9067
|
-
const logger$
|
|
9190
|
+
const logger$8 = createLogger("telemetry:persistence");
|
|
9068
9191
|
/**
|
|
9069
9192
|
* Concrete SQLite-backed telemetry persistence.
|
|
9070
9193
|
*
|
|
@@ -9298,7 +9421,7 @@ var TelemetryPersistence = class {
|
|
|
9298
9421
|
for (const turn of rows) this._insertTurnAnalysis.run(storyKey, turn.spanId, turn.turnNumber, turn.name, turn.timestamp, turn.source, turn.model ?? null, turn.inputTokens, turn.outputTokens, turn.cacheReadTokens, turn.freshTokens, turn.cacheHitRate, turn.costUsd, turn.durationMs, turn.contextSize, turn.contextDelta, turn.toolName ?? null, turn.isContextSpike ? 1 : 0, JSON.stringify(turn.childSpans));
|
|
9299
9422
|
});
|
|
9300
9423
|
insertAll(turns);
|
|
9301
|
-
logger$
|
|
9424
|
+
logger$8.debug({
|
|
9302
9425
|
storyKey,
|
|
9303
9426
|
count: turns.length
|
|
9304
9427
|
}, "Stored turn analysis");
|
|
@@ -9332,7 +9455,7 @@ var TelemetryPersistence = class {
|
|
|
9332
9455
|
}
|
|
9333
9456
|
async storeEfficiencyScore(score) {
|
|
9334
9457
|
this._insertEfficiencyScore.run(score.storyKey, score.timestamp, score.compositeScore, score.cacheHitSubScore, score.ioRatioSubScore, score.contextManagementSubScore, score.avgCacheHitRate, score.avgIoRatio, score.contextSpikeCount, score.totalTurns, JSON.stringify(score.perModelBreakdown), JSON.stringify(score.perSourceBreakdown));
|
|
9335
|
-
logger$
|
|
9458
|
+
logger$8.debug({
|
|
9336
9459
|
storyKey: score.storyKey,
|
|
9337
9460
|
compositeScore: score.compositeScore
|
|
9338
9461
|
}, "Stored efficiency score");
|
|
@@ -9391,7 +9514,7 @@ var TelemetryPersistence = class {
|
|
|
9391
9514
|
for (const rec of rows) this._insertRecommendation.run(rec.id, rec.storyKey, rec.sprintId ?? null, rec.ruleId, rec.severity, rec.title, rec.description, rec.potentialSavingsTokens ?? null, rec.potentialSavingsUsd ?? null, rec.actionTarget ?? null, rec.generatedAt);
|
|
9392
9515
|
});
|
|
9393
9516
|
insertAll(recs);
|
|
9394
|
-
logger$
|
|
9517
|
+
logger$8.debug({
|
|
9395
9518
|
storyKey,
|
|
9396
9519
|
count: recs.length
|
|
9397
9520
|
}, "Saved recommendations");
|
|
@@ -9455,7 +9578,7 @@ var TelemetryPersistence = class {
|
|
|
9455
9578
|
for (const stat$2 of rows) this._insertCategoryStats.run(storyKey, stat$2.category, stat$2.totalTokens, stat$2.percentage, stat$2.eventCount, stat$2.avgTokensPerEvent, stat$2.trend);
|
|
9456
9579
|
});
|
|
9457
9580
|
insertAll(stats);
|
|
9458
|
-
logger$
|
|
9581
|
+
logger$8.debug({
|
|
9459
9582
|
storyKey,
|
|
9460
9583
|
count: stats.length
|
|
9461
9584
|
}, "Stored category stats");
|
|
@@ -9491,7 +9614,7 @@ var TelemetryPersistence = class {
|
|
|
9491
9614
|
for (const consumer of rows) this._insertConsumerStats.run(storyKey, consumer.consumerKey, consumer.category, consumer.totalTokens, consumer.percentage, consumer.eventCount, JSON.stringify(consumer.topInvocations));
|
|
9492
9615
|
});
|
|
9493
9616
|
insertAll(consumers);
|
|
9494
|
-
logger$
|
|
9617
|
+
logger$8.debug({
|
|
9495
9618
|
storyKey,
|
|
9496
9619
|
count: consumers.length
|
|
9497
9620
|
}, "Stored consumer stats");
|
|
@@ -9547,9 +9670,120 @@ var AppError = class extends Error {
|
|
|
9547
9670
|
}
|
|
9548
9671
|
};
|
|
9549
9672
|
|
|
9673
|
+
//#endregion
|
|
9674
|
+
//#region src/modules/telemetry/batch-buffer.ts
|
|
9675
|
+
var BatchBuffer = class extends EventEmitter {
|
|
9676
|
+
_items = [];
|
|
9677
|
+
_timer = null;
|
|
9678
|
+
_batchSize;
|
|
9679
|
+
_flushIntervalMs;
|
|
9680
|
+
constructor(options = {}) {
|
|
9681
|
+
super();
|
|
9682
|
+
this._batchSize = options.batchSize ?? 100;
|
|
9683
|
+
this._flushIntervalMs = options.flushIntervalMs ?? 5e3;
|
|
9684
|
+
}
|
|
9685
|
+
/**
|
|
9686
|
+
* Add an item to the buffer.
|
|
9687
|
+
* Triggers a flush immediately when the buffer reaches `batchSize`.
|
|
9688
|
+
*/
|
|
9689
|
+
push(item) {
|
|
9690
|
+
this._items.push(item);
|
|
9691
|
+
if (this._items.length >= this._batchSize) this._flush();
|
|
9692
|
+
}
|
|
9693
|
+
/**
|
|
9694
|
+
* Start the interval timer that flushes items on a schedule.
|
|
9695
|
+
* Safe to call multiple times — subsequent calls are ignored.
|
|
9696
|
+
*/
|
|
9697
|
+
start() {
|
|
9698
|
+
if (this._timer !== null) return;
|
|
9699
|
+
this._timer = setInterval(() => this._flush(), this._flushIntervalMs);
|
|
9700
|
+
if (typeof this._timer.unref === "function") this._timer.unref();
|
|
9701
|
+
}
|
|
9702
|
+
/**
|
|
9703
|
+
* Stop the interval timer and flush any remaining items.
|
|
9704
|
+
* Safe to call multiple times — subsequent calls are ignored.
|
|
9705
|
+
*/
|
|
9706
|
+
stop() {
|
|
9707
|
+
if (this._timer !== null) {
|
|
9708
|
+
clearInterval(this._timer);
|
|
9709
|
+
this._timer = null;
|
|
9710
|
+
}
|
|
9711
|
+
this._flush();
|
|
9712
|
+
}
|
|
9713
|
+
_flush() {
|
|
9714
|
+
if (this._items.length === 0) return;
|
|
9715
|
+
const items = this._items.splice(0);
|
|
9716
|
+
this.emit("flush", items);
|
|
9717
|
+
}
|
|
9718
|
+
};
|
|
9719
|
+
|
|
9720
|
+
//#endregion
|
|
9721
|
+
//#region src/modules/telemetry/source-detector.ts
|
|
9722
|
+
const SOURCE_DETECTION_TABLE = [
|
|
9723
|
+
{
|
|
9724
|
+
pattern: /claude[\s-]?code/i,
|
|
9725
|
+
source: "claude-code"
|
|
9726
|
+
},
|
|
9727
|
+
{
|
|
9728
|
+
pattern: /claude/i,
|
|
9729
|
+
source: "claude-code"
|
|
9730
|
+
},
|
|
9731
|
+
{
|
|
9732
|
+
pattern: /codex/i,
|
|
9733
|
+
source: "codex"
|
|
9734
|
+
},
|
|
9735
|
+
{
|
|
9736
|
+
pattern: /openai/i,
|
|
9737
|
+
source: "codex"
|
|
9738
|
+
},
|
|
9739
|
+
{
|
|
9740
|
+
pattern: /ollama|llama|local/i,
|
|
9741
|
+
source: "local-llm"
|
|
9742
|
+
}
|
|
9743
|
+
];
|
|
9744
|
+
/**
|
|
9745
|
+
* Extract string values for service.name and telemetry.sdk.name from
|
|
9746
|
+
* raw OTLP resource attributes, supporting both resourceSpans and resourceLogs.
|
|
9747
|
+
*/
|
|
9748
|
+
function extractAttributes(body) {
|
|
9749
|
+
if (!body || typeof body !== "object") return [];
|
|
9750
|
+
const values = [];
|
|
9751
|
+
const keysOfInterest = ["service.name", "telemetry.sdk.name"];
|
|
9752
|
+
const extractFromResources = (resources) => {
|
|
9753
|
+
if (!Array.isArray(resources)) return;
|
|
9754
|
+
for (const entry of resources) {
|
|
9755
|
+
if (!entry?.resource?.attributes) continue;
|
|
9756
|
+
for (const attr of entry.resource.attributes) {
|
|
9757
|
+
if (!attr?.key || !keysOfInterest.includes(attr.key)) continue;
|
|
9758
|
+
const v = attr.value;
|
|
9759
|
+
if (!v) continue;
|
|
9760
|
+
const str = v.stringValue ?? (v.intValue !== void 0 ? String(v.intValue) : void 0) ?? (v.doubleValue !== void 0 ? String(v.doubleValue) : void 0);
|
|
9761
|
+
if (str !== void 0) values.push(str);
|
|
9762
|
+
}
|
|
9763
|
+
}
|
|
9764
|
+
};
|
|
9765
|
+
const payload = body;
|
|
9766
|
+
extractFromResources(payload.resourceSpans);
|
|
9767
|
+
extractFromResources(payload.resourceLogs);
|
|
9768
|
+
return values;
|
|
9769
|
+
}
|
|
9770
|
+
/**
|
|
9771
|
+
* Detect the OTLP source from a raw payload.
|
|
9772
|
+
*
|
|
9773
|
+
* Inspects `service.name` and `telemetry.sdk.name` from resource attributes
|
|
9774
|
+
* in both `resourceSpans` and `resourceLogs` envelope formats.
|
|
9775
|
+
*
|
|
9776
|
+
* Returns 'unknown' when no match is found or input is malformed.
|
|
9777
|
+
*/
|
|
9778
|
+
function detectSource(body) {
|
|
9779
|
+
const values = extractAttributes(body);
|
|
9780
|
+
for (const value of values) for (const { pattern, source } of SOURCE_DETECTION_TABLE) if (pattern.test(value)) return source;
|
|
9781
|
+
return "unknown";
|
|
9782
|
+
}
|
|
9783
|
+
|
|
9550
9784
|
//#endregion
|
|
9551
9785
|
//#region src/modules/telemetry/ingestion-server.ts
|
|
9552
|
-
const logger$
|
|
9786
|
+
const logger$7 = createLogger("telemetry:ingestion-server");
|
|
9553
9787
|
/**
|
|
9554
9788
|
* Error thrown by IngestionServer for server lifecycle violations.
|
|
9555
9789
|
* Extends AppError to align with the project-standard error-handling pattern
|
|
@@ -9569,8 +9803,41 @@ var TelemetryError = class extends AppError {
|
|
|
9569
9803
|
var IngestionServer = class {
|
|
9570
9804
|
_server = null;
|
|
9571
9805
|
_port;
|
|
9806
|
+
_batchSize;
|
|
9807
|
+
_flushIntervalMs;
|
|
9808
|
+
_buffer;
|
|
9809
|
+
_pendingBatches = new Set();
|
|
9572
9810
|
constructor(options = {}) {
|
|
9573
9811
|
this._port = options.port ?? 4318;
|
|
9812
|
+
this._batchSize = options.batchSize ?? 100;
|
|
9813
|
+
this._flushIntervalMs = options.flushIntervalMs ?? 5e3;
|
|
9814
|
+
if (options.pipeline !== void 0) this._initPipeline(options.pipeline);
|
|
9815
|
+
}
|
|
9816
|
+
/**
|
|
9817
|
+
* Wire a TelemetryPipeline before the server is started.
|
|
9818
|
+
* Must be called before start() — has no effect if called after start().
|
|
9819
|
+
*/
|
|
9820
|
+
setPipeline(pipeline) {
|
|
9821
|
+
if (this._server !== null) {
|
|
9822
|
+
logger$7.warn("IngestionServer.setPipeline() called after start() — ignoring");
|
|
9823
|
+
return;
|
|
9824
|
+
}
|
|
9825
|
+
this._initPipeline(pipeline);
|
|
9826
|
+
}
|
|
9827
|
+
_initPipeline(pipeline) {
|
|
9828
|
+
this._buffer = new BatchBuffer({
|
|
9829
|
+
batchSize: this._batchSize,
|
|
9830
|
+
flushIntervalMs: this._flushIntervalMs
|
|
9831
|
+
});
|
|
9832
|
+
this._buffer.on("flush", (items) => {
|
|
9833
|
+
const pending = pipeline.processBatch(items).catch((err) => {
|
|
9834
|
+
logger$7.warn({ err }, "TelemetryPipeline.processBatch failed (batch flush)");
|
|
9835
|
+
});
|
|
9836
|
+
this._pendingBatches.add(pending);
|
|
9837
|
+
pending.then(() => {
|
|
9838
|
+
this._pendingBatches.delete(pending);
|
|
9839
|
+
});
|
|
9840
|
+
});
|
|
9574
9841
|
}
|
|
9575
9842
|
/**
|
|
9576
9843
|
* Start the HTTP ingestion server.
|
|
@@ -9578,36 +9845,40 @@ var IngestionServer = class {
|
|
|
9578
9845
|
*/
|
|
9579
9846
|
async start() {
|
|
9580
9847
|
if (this._server !== null) {
|
|
9581
|
-
logger$
|
|
9848
|
+
logger$7.warn("IngestionServer.start() called while already started — ignoring");
|
|
9582
9849
|
return;
|
|
9583
9850
|
}
|
|
9584
9851
|
return new Promise((resolve$2, reject) => {
|
|
9585
9852
|
const server = createServer(this._handleRequest.bind(this));
|
|
9586
9853
|
server.on("error", (err) => {
|
|
9587
|
-
logger$
|
|
9854
|
+
logger$7.error({ err }, "IngestionServer failed to start");
|
|
9588
9855
|
reject(err);
|
|
9589
9856
|
});
|
|
9590
9857
|
server.listen(this._port, "127.0.0.1", () => {
|
|
9591
9858
|
this._server = server;
|
|
9592
9859
|
const addr = server.address();
|
|
9593
|
-
logger$
|
|
9860
|
+
logger$7.info({ port: addr.port }, "IngestionServer listening");
|
|
9861
|
+
this._buffer?.start();
|
|
9594
9862
|
resolve$2();
|
|
9595
9863
|
});
|
|
9596
9864
|
});
|
|
9597
9865
|
}
|
|
9598
9866
|
/**
|
|
9599
9867
|
* Stop the HTTP ingestion server.
|
|
9868
|
+
* Drains the batch buffer before closing the HTTP server.
|
|
9600
9869
|
* Resolves when the server has closed all connections.
|
|
9601
9870
|
*/
|
|
9602
9871
|
async stop() {
|
|
9603
9872
|
const server = this._server;
|
|
9604
9873
|
if (server === null) return;
|
|
9605
9874
|
this._server = null;
|
|
9875
|
+
this._buffer?.stop();
|
|
9876
|
+
if (this._pendingBatches.size > 0) await Promise.all([...this._pendingBatches]);
|
|
9606
9877
|
return new Promise((resolve$2, reject) => {
|
|
9607
9878
|
server.close((err) => {
|
|
9608
9879
|
if (err !== void 0 && err !== null) reject(err);
|
|
9609
9880
|
else {
|
|
9610
|
-
logger$
|
|
9881
|
+
logger$7.info("IngestionServer stopped");
|
|
9611
9882
|
resolve$2();
|
|
9612
9883
|
}
|
|
9613
9884
|
});
|
|
@@ -9630,22 +9901,42 @@ var IngestionServer = class {
|
|
|
9630
9901
|
OTEL_EXPORTER_OTLP_ENDPOINT: endpoint
|
|
9631
9902
|
};
|
|
9632
9903
|
}
|
|
9633
|
-
_handleRequest(
|
|
9904
|
+
_handleRequest(req, res) {
|
|
9905
|
+
if (req.url === "/health" && req.method === "GET") {
|
|
9906
|
+
res.writeHead(200, { "Content-Type": "application/json" });
|
|
9907
|
+
res.end(JSON.stringify({ status: "ok" }));
|
|
9908
|
+
return;
|
|
9909
|
+
}
|
|
9634
9910
|
const chunks = [];
|
|
9635
|
-
|
|
9911
|
+
req.on("data", (chunk) => {
|
|
9636
9912
|
chunks.push(chunk);
|
|
9637
9913
|
});
|
|
9638
|
-
|
|
9639
|
-
const
|
|
9640
|
-
logger$
|
|
9641
|
-
url:
|
|
9642
|
-
bodyLength:
|
|
9914
|
+
req.on("end", () => {
|
|
9915
|
+
const bodyStr = Buffer.concat(chunks).toString("utf-8");
|
|
9916
|
+
logger$7.trace({
|
|
9917
|
+
url: req.url,
|
|
9918
|
+
bodyLength: bodyStr.length
|
|
9643
9919
|
}, "OTLP payload received");
|
|
9920
|
+
if (this._buffer !== void 0) try {
|
|
9921
|
+
const body = JSON.parse(bodyStr);
|
|
9922
|
+
const source = detectSource(body);
|
|
9923
|
+
const payload = {
|
|
9924
|
+
body,
|
|
9925
|
+
source,
|
|
9926
|
+
receivedAt: Date.now()
|
|
9927
|
+
};
|
|
9928
|
+
this._buffer.push(payload);
|
|
9929
|
+
} catch (err) {
|
|
9930
|
+
logger$7.warn({
|
|
9931
|
+
err,
|
|
9932
|
+
url: req.url
|
|
9933
|
+
}, "Failed to parse OTLP payload JSON — discarding");
|
|
9934
|
+
}
|
|
9644
9935
|
res.writeHead(200, { "Content-Type": "application/json" });
|
|
9645
9936
|
res.end("{}");
|
|
9646
9937
|
});
|
|
9647
|
-
|
|
9648
|
-
logger$
|
|
9938
|
+
req.on("error", (err) => {
|
|
9939
|
+
logger$7.warn({ err }, "Error reading OTLP request body");
|
|
9649
9940
|
res.writeHead(400);
|
|
9650
9941
|
res.end("Bad Request");
|
|
9651
9942
|
});
|
|
@@ -9656,8 +9947,8 @@ var IngestionServer = class {
|
|
|
9656
9947
|
//#region src/modules/telemetry/efficiency-scorer.ts
|
|
9657
9948
|
var EfficiencyScorer = class {
|
|
9658
9949
|
_logger;
|
|
9659
|
-
constructor(logger$
|
|
9660
|
-
this._logger = logger$
|
|
9950
|
+
constructor(logger$27) {
|
|
9951
|
+
this._logger = logger$27;
|
|
9661
9952
|
}
|
|
9662
9953
|
/**
|
|
9663
9954
|
* Compute an efficiency score for a story given its turn analyses.
|
|
@@ -9892,8 +10183,8 @@ const ALL_CATEGORIES = [
|
|
|
9892
10183
|
];
|
|
9893
10184
|
var Categorizer = class {
|
|
9894
10185
|
_logger;
|
|
9895
|
-
constructor(logger$
|
|
9896
|
-
this._logger = logger$
|
|
10186
|
+
constructor(logger$27) {
|
|
10187
|
+
this._logger = logger$27;
|
|
9897
10188
|
}
|
|
9898
10189
|
/**
|
|
9899
10190
|
* Classify an operation into a SemanticCategory using three-tier logic.
|
|
@@ -10019,9 +10310,9 @@ function extractToolNameFromSpan(span) {
|
|
|
10019
10310
|
var ConsumerAnalyzer = class {
|
|
10020
10311
|
_categorizer;
|
|
10021
10312
|
_logger;
|
|
10022
|
-
constructor(categorizer, logger$
|
|
10313
|
+
constructor(categorizer, logger$27) {
|
|
10023
10314
|
this._categorizer = categorizer;
|
|
10024
|
-
this._logger = logger$
|
|
10315
|
+
this._logger = logger$27;
|
|
10025
10316
|
}
|
|
10026
10317
|
/**
|
|
10027
10318
|
* Group spans by consumer key, rank by totalTokens descending, and return
|
|
@@ -10095,24 +10386,1092 @@ var ConsumerAnalyzer = class {
|
|
|
10095
10386
|
};
|
|
10096
10387
|
|
|
10097
10388
|
//#endregion
|
|
10098
|
-
//#region src/modules/
|
|
10099
|
-
|
|
10100
|
-
|
|
10101
|
-
|
|
10102
|
-
|
|
10103
|
-
}
|
|
10104
|
-
|
|
10105
|
-
|
|
10106
|
-
|
|
10107
|
-
|
|
10108
|
-
|
|
10109
|
-
|
|
10110
|
-
|
|
10111
|
-
|
|
10112
|
-
|
|
10113
|
-
|
|
10114
|
-
|
|
10115
|
-
|
|
10389
|
+
//#region src/modules/telemetry/recommender.ts
|
|
10390
|
+
var Recommender = class {
|
|
10391
|
+
_logger;
|
|
10392
|
+
constructor(logger$27) {
|
|
10393
|
+
this._logger = logger$27;
|
|
10394
|
+
}
|
|
10395
|
+
/**
|
|
10396
|
+
* Run all 8 rules against the given context and return sorted recommendations.
|
|
10397
|
+
* Output is sorted: critical → warning → info, then by potentialSavingsTokens descending.
|
|
10398
|
+
* No Date.now() or Math.random() is called — generatedAt comes from context.
|
|
10399
|
+
*/
|
|
10400
|
+
analyze(context) {
|
|
10401
|
+
const allRecs = [
|
|
10402
|
+
...this._runBiggestConsumers(context),
|
|
10403
|
+
...this._runExpensiveBash(context),
|
|
10404
|
+
...this._runLargeFileReads(context),
|
|
10405
|
+
...this._runRepeatedToolCalls(context),
|
|
10406
|
+
...this._runContextGrowthSpikes(context),
|
|
10407
|
+
...this._runGrowingCategories(context),
|
|
10408
|
+
...this._runCacheEfficiency(context),
|
|
10409
|
+
...this._runModelComparison(context)
|
|
10410
|
+
];
|
|
10411
|
+
const severityOrder = {
|
|
10412
|
+
critical: 0,
|
|
10413
|
+
warning: 1,
|
|
10414
|
+
info: 2
|
|
10415
|
+
};
|
|
10416
|
+
return allRecs.sort((a, b) => {
|
|
10417
|
+
const sA = severityOrder[a.severity];
|
|
10418
|
+
const sB = severityOrder[b.severity];
|
|
10419
|
+
if (sA !== sB) return sA - sB;
|
|
10420
|
+
const savA = a.potentialSavingsTokens ?? 0;
|
|
10421
|
+
const savB = b.potentialSavingsTokens ?? 0;
|
|
10422
|
+
return savB - savA;
|
|
10423
|
+
});
|
|
10424
|
+
}
|
|
10425
|
+
/**
|
|
10426
|
+
* Generate a 16-char hex ID from the sha256 of `ruleId:storyKey:actionTarget:index`.
|
|
10427
|
+
*/
|
|
10428
|
+
_makeId(ruleId, storyKey, actionTarget, index) {
|
|
10429
|
+
return createHash("sha256").update(`${ruleId}:${storyKey}:${actionTarget}:${index}`).digest("hex").slice(0, 16);
|
|
10430
|
+
}
|
|
10431
|
+
/**
|
|
10432
|
+
* Map a token percentage to a severity level.
|
|
10433
|
+
* >25% → critical, >10% → warning, ≤10% → info.
|
|
10434
|
+
*/
|
|
10435
|
+
_assignSeverity(tokenPercent) {
|
|
10436
|
+
if (tokenPercent > 25) return "critical";
|
|
10437
|
+
if (tokenPercent > 10) return "warning";
|
|
10438
|
+
return "info";
|
|
10439
|
+
}
|
|
10440
|
+
/**
|
|
10441
|
+
* Compute total tokens across all spans. Guards against empty arrays.
|
|
10442
|
+
*/
|
|
10443
|
+
_totalSpanTokens(spans) {
|
|
10444
|
+
return spans.reduce((sum, s) => sum + s.inputTokens + s.outputTokens, 0);
|
|
10445
|
+
}
|
|
10446
|
+
/**
|
|
10447
|
+
* Identify top 3 token consumers (by inputTokens + outputTokens) where pct >5%.
|
|
10448
|
+
* Severity based on the consumer's percentage of total tokens.
|
|
10449
|
+
*/
|
|
10450
|
+
_runBiggestConsumers(ctx) {
|
|
10451
|
+
const { consumers, storyKey, sprintId, generatedAt } = ctx;
|
|
10452
|
+
if (consumers.length === 0) return [];
|
|
10453
|
+
const grandTotal = consumers.reduce((sum, c) => sum + c.totalTokens, 0);
|
|
10454
|
+
if (grandTotal === 0) return [];
|
|
10455
|
+
const sorted = [...consumers].sort((a, b) => b.totalTokens - a.totalTokens);
|
|
10456
|
+
const top3 = sorted.slice(0, 3).filter((c) => {
|
|
10457
|
+
return c.percentage > 5;
|
|
10458
|
+
});
|
|
10459
|
+
return top3.map((consumer, index) => {
|
|
10460
|
+
const pct = consumer.percentage;
|
|
10461
|
+
const severity = this._assignSeverity(pct);
|
|
10462
|
+
const actionTarget = consumer.consumerKey;
|
|
10463
|
+
const id = this._makeId("biggest_consumers", storyKey, actionTarget, index);
|
|
10464
|
+
return {
|
|
10465
|
+
id,
|
|
10466
|
+
storyKey,
|
|
10467
|
+
sprintId,
|
|
10468
|
+
ruleId: "biggest_consumers",
|
|
10469
|
+
severity,
|
|
10470
|
+
title: `High token consumer: ${consumer.consumerKey}`,
|
|
10471
|
+
description: `"${consumer.consumerKey}" consumed ${consumer.totalTokens.toLocaleString()} tokens (${pct.toFixed(1)}% of total). Consider reducing the frequency or size of this operation.`,
|
|
10472
|
+
potentialSavingsTokens: Math.round(consumer.totalTokens * .3),
|
|
10473
|
+
actionTarget,
|
|
10474
|
+
generatedAt
|
|
10475
|
+
};
|
|
10476
|
+
});
|
|
10477
|
+
}
|
|
10478
|
+
/**
|
|
10479
|
+
* Flag file-read spans with inputTokens > 3000.
|
|
10480
|
+
* Suggest using line ranges to reduce token count.
|
|
10481
|
+
*/
|
|
10482
|
+
_runLargeFileReads(ctx) {
|
|
10483
|
+
const { allSpans, storyKey, sprintId, generatedAt } = ctx;
|
|
10484
|
+
if (allSpans.length === 0) return [];
|
|
10485
|
+
const grandTotal = this._totalSpanTokens(allSpans);
|
|
10486
|
+
const largReads = allSpans.filter((s) => s.operationName === "file_read" && s.inputTokens > 3e3);
|
|
10487
|
+
return largReads.map((span, index) => {
|
|
10488
|
+
const pct = grandTotal > 0 ? (span.inputTokens + span.outputTokens) / grandTotal * 100 : 0;
|
|
10489
|
+
const severity = this._assignSeverity(pct);
|
|
10490
|
+
const actionTarget = span.attributes?.["file.path"] ?? span.name;
|
|
10491
|
+
const id = this._makeId("large_file_reads", storyKey, actionTarget, index);
|
|
10492
|
+
return {
|
|
10493
|
+
id,
|
|
10494
|
+
storyKey,
|
|
10495
|
+
sprintId,
|
|
10496
|
+
ruleId: "large_file_reads",
|
|
10497
|
+
severity,
|
|
10498
|
+
title: `Large file read: ${actionTarget}`,
|
|
10499
|
+
description: `File read of "${actionTarget}" consumed ${span.inputTokens.toLocaleString()} input tokens. Consider specifying line ranges (e.g., offset/limit) to reduce context size.`,
|
|
10500
|
+
potentialSavingsTokens: Math.round(span.inputTokens * .5),
|
|
10501
|
+
actionTarget,
|
|
10502
|
+
generatedAt
|
|
10503
|
+
};
|
|
10504
|
+
});
|
|
10505
|
+
}
|
|
10506
|
+
/**
|
|
10507
|
+
* Flag bash/execute_command spans with outputTokens > 3000.
|
|
10508
|
+
* Suggest filtering or truncating command output.
|
|
10509
|
+
*/
|
|
10510
|
+
_runExpensiveBash(ctx) {
|
|
10511
|
+
const { allSpans, storyKey, sprintId, generatedAt } = ctx;
|
|
10512
|
+
if (allSpans.length === 0) return [];
|
|
10513
|
+
const grandTotal = this._totalSpanTokens(allSpans);
|
|
10514
|
+
const expensiveBash = allSpans.filter((s) => (s.attributes?.["tool.name"] === "bash" || s.attributes?.["tool.name"] === "execute_command" || s.name === "bash" || s.name === "execute_command" || s.operationName !== void 0 && (s.operationName === "bash" || s.operationName === "execute_command")) && s.outputTokens > 3e3);
|
|
10515
|
+
return expensiveBash.map((span, index) => {
|
|
10516
|
+
const pct = grandTotal > 0 ? (span.inputTokens + span.outputTokens) / grandTotal * 100 : 0;
|
|
10517
|
+
const severity = this._assignSeverity(pct);
|
|
10518
|
+
const actionTarget = span.attributes?.["bash.command"] ?? span.name ?? "bash";
|
|
10519
|
+
const id = this._makeId("expensive_bash", storyKey, actionTarget, index);
|
|
10520
|
+
return {
|
|
10521
|
+
id,
|
|
10522
|
+
storyKey,
|
|
10523
|
+
sprintId,
|
|
10524
|
+
ruleId: "expensive_bash",
|
|
10525
|
+
severity,
|
|
10526
|
+
title: `Expensive bash output: ${actionTarget}`,
|
|
10527
|
+
description: `Bash command "${actionTarget}" produced ${span.outputTokens.toLocaleString()} output tokens. Consider filtering output (e.g., piping to head/grep) to reduce token consumption.`,
|
|
10528
|
+
potentialSavingsTokens: Math.round(span.outputTokens * .5),
|
|
10529
|
+
actionTarget,
|
|
10530
|
+
generatedAt
|
|
10531
|
+
};
|
|
10532
|
+
});
|
|
10533
|
+
}
|
|
10534
|
+
/**
|
|
10535
|
+
* Detect tool calls with the same actionTarget appearing more than once.
|
|
10536
|
+
* Suggests caching the result to avoid redundant token consumption.
|
|
10537
|
+
*/
|
|
10538
|
+
_runRepeatedToolCalls(ctx) {
|
|
10539
|
+
const { turns, storyKey, sprintId, generatedAt, allSpans } = ctx;
|
|
10540
|
+
const allChildSpans = [];
|
|
10541
|
+
for (const turn of turns) for (const child of turn.childSpans) allChildSpans.push({
|
|
10542
|
+
toolName: child.toolName,
|
|
10543
|
+
name: child.name
|
|
10544
|
+
});
|
|
10545
|
+
if (allChildSpans.length === 0 && allSpans.length > 0) for (const span of allSpans) allChildSpans.push({
|
|
10546
|
+
toolName: span.attributes?.["tool.name"],
|
|
10547
|
+
name: span.name,
|
|
10548
|
+
actionTarget: span.attributes?.["file.path"]
|
|
10549
|
+
});
|
|
10550
|
+
const groups = new Map();
|
|
10551
|
+
for (const span of allChildSpans) {
|
|
10552
|
+
const key = `${span.toolName ?? ""}:${span.actionTarget ?? span.name}`;
|
|
10553
|
+
groups.set(key, (groups.get(key) ?? 0) + 1);
|
|
10554
|
+
}
|
|
10555
|
+
const recommendations = [];
|
|
10556
|
+
let index = 0;
|
|
10557
|
+
for (const [key, count] of groups) if (count > 1) {
|
|
10558
|
+
const id = this._makeId("repeated_tool_calls", storyKey, key, index);
|
|
10559
|
+
recommendations.push({
|
|
10560
|
+
id,
|
|
10561
|
+
storyKey,
|
|
10562
|
+
sprintId,
|
|
10563
|
+
ruleId: "repeated_tool_calls",
|
|
10564
|
+
severity: "warning",
|
|
10565
|
+
title: `Repeated tool call: ${key}`,
|
|
10566
|
+
description: `"${key}" was invoked ${count} times. Consider caching the result after the first call to avoid redundant token consumption.`,
|
|
10567
|
+
actionTarget: key,
|
|
10568
|
+
generatedAt
|
|
10569
|
+
});
|
|
10570
|
+
index++;
|
|
10571
|
+
}
|
|
10572
|
+
return recommendations;
|
|
10573
|
+
}
|
|
10574
|
+
/**
|
|
10575
|
+
* Flag turns where isContextSpike is true.
|
|
10576
|
+
* Severity is always at least 'warning'.
|
|
10577
|
+
*/
|
|
10578
|
+
_runContextGrowthSpikes(ctx) {
|
|
10579
|
+
const { turns, storyKey, sprintId, generatedAt, allSpans } = ctx;
|
|
10580
|
+
if (turns.length === 0) return [];
|
|
10581
|
+
const grandTotal = this._totalSpanTokens(allSpans);
|
|
10582
|
+
const spiketurns = turns.filter((t) => t.isContextSpike);
|
|
10583
|
+
return spiketurns.map((turn, index) => {
|
|
10584
|
+
const pct = grandTotal > 0 ? (turn.inputTokens + turn.outputTokens) / grandTotal * 100 : 0;
|
|
10585
|
+
const baseSeverity = this._assignSeverity(pct);
|
|
10586
|
+
const severity = baseSeverity === "info" ? "warning" : baseSeverity;
|
|
10587
|
+
const topContributors = [...turn.childSpans].sort((a, b) => b.inputTokens + b.outputTokens - (a.inputTokens + a.outputTokens)).slice(0, 3).map((c) => c.name);
|
|
10588
|
+
const actionTarget = `turn:${turn.turnNumber}`;
|
|
10589
|
+
const id = this._makeId("context_growth_spike", storyKey, actionTarget, index);
|
|
10590
|
+
return {
|
|
10591
|
+
id,
|
|
10592
|
+
storyKey,
|
|
10593
|
+
sprintId,
|
|
10594
|
+
ruleId: "context_growth_spike",
|
|
10595
|
+
severity,
|
|
10596
|
+
title: `Context spike at turn ${turn.turnNumber}`,
|
|
10597
|
+
description: `Turn ${turn.turnNumber} had a context spike with ${turn.inputTokens.toLocaleString()} input tokens. Top contributors: ${topContributors.length > 0 ? topContributors.join(", ") : "none identified"}. Consider compressing or evicting context before this turn.`,
|
|
10598
|
+
potentialSavingsTokens: Math.round(turn.contextDelta * .3),
|
|
10599
|
+
actionTarget,
|
|
10600
|
+
generatedAt
|
|
10601
|
+
};
|
|
10602
|
+
});
|
|
10603
|
+
}
|
|
10604
|
+
/**
|
|
10605
|
+
* Flag semantic categories with trend === 'growing'.
|
|
10606
|
+
* Severity is 'info' by default; 'warning' if percentage > 25%.
|
|
10607
|
+
*/
|
|
10608
|
+
_runGrowingCategories(ctx) {
|
|
10609
|
+
const { categories, storyKey, sprintId, generatedAt } = ctx;
|
|
10610
|
+
if (categories.length === 0) return [];
|
|
10611
|
+
const growing = categories.filter((c) => c.trend === "growing");
|
|
10612
|
+
return growing.map((cat, index) => {
|
|
10613
|
+
const severity = cat.percentage > 25 ? "warning" : "info";
|
|
10614
|
+
const actionTarget = cat.category;
|
|
10615
|
+
const id = this._makeId("growing_categories", storyKey, actionTarget, index);
|
|
10616
|
+
return {
|
|
10617
|
+
id,
|
|
10618
|
+
storyKey,
|
|
10619
|
+
sprintId,
|
|
10620
|
+
ruleId: "growing_categories",
|
|
10621
|
+
severity,
|
|
10622
|
+
title: `Growing category: ${cat.category}`,
|
|
10623
|
+
description: `The "${cat.category}" category is growing across turns, currently at ${cat.percentage.toFixed(1)}% of total tokens (${cat.totalTokens.toLocaleString()} tokens). This trend suggests increasing context pressure from this source.`,
|
|
10624
|
+
potentialSavingsTokens: Math.round(cat.totalTokens * .2),
|
|
10625
|
+
actionTarget,
|
|
10626
|
+
generatedAt
|
|
10627
|
+
};
|
|
10628
|
+
});
|
|
10629
|
+
}
|
|
10630
|
+
/**
|
|
10631
|
+
* If cache hit rate < 30%, flag the worst-performing operations and compute
|
|
10632
|
+
* potential savings as totalCacheMissTokens * 0.5.
|
|
10633
|
+
*/
|
|
10634
|
+
_runCacheEfficiency(ctx) {
|
|
10635
|
+
const { efficiencyScore, allSpans, storyKey, sprintId, generatedAt } = ctx;
|
|
10636
|
+
const cacheHitRate = isNaN(efficiencyScore.avgCacheHitRate) ? 0 : efficiencyScore.avgCacheHitRate;
|
|
10637
|
+
if (cacheHitRate >= .3) return [];
|
|
10638
|
+
if (allSpans.length === 0) return [];
|
|
10639
|
+
const totalCacheMissTokens = allSpans.reduce((sum, s) => {
|
|
10640
|
+
const missTokens = s.inputTokens - s.cacheReadTokens;
|
|
10641
|
+
return sum + Math.max(0, missTokens);
|
|
10642
|
+
}, 0);
|
|
10643
|
+
if (totalCacheMissTokens === 0) return [];
|
|
10644
|
+
const potentialSavingsTokens = Math.round(totalCacheMissTokens * .5);
|
|
10645
|
+
const spansWithRate = allSpans.filter((s) => s.inputTokens > 0).map((s) => ({
|
|
10646
|
+
span: s,
|
|
10647
|
+
hitRate: s.cacheReadTokens / s.inputTokens
|
|
10648
|
+
})).sort((a, b) => a.hitRate - b.hitRate).slice(0, 3);
|
|
10649
|
+
const worstOps = spansWithRate.map((e) => e.span.name).join(", ");
|
|
10650
|
+
const actionTarget = worstOps || "unknown";
|
|
10651
|
+
const id = this._makeId("cache_efficiency", storyKey, actionTarget, 0);
|
|
10652
|
+
this._logger.debug({
|
|
10653
|
+
storyKey,
|
|
10654
|
+
cacheHitRate,
|
|
10655
|
+
potentialSavingsTokens
|
|
10656
|
+
}, "cache_efficiency recommendation generated");
|
|
10657
|
+
return [{
|
|
10658
|
+
id,
|
|
10659
|
+
storyKey,
|
|
10660
|
+
sprintId,
|
|
10661
|
+
ruleId: "cache_efficiency",
|
|
10662
|
+
severity: "warning",
|
|
10663
|
+
title: "Low cache hit rate",
|
|
10664
|
+
description: `Overall cache hit rate is ${(cacheHitRate * 100).toFixed(1)}% (below 30% threshold). Worst performing operations: ${worstOps || "none identified"}. Potential savings if hit rate reached 50%: ${potentialSavingsTokens.toLocaleString()} tokens.`,
|
|
10665
|
+
potentialSavingsTokens,
|
|
10666
|
+
actionTarget,
|
|
10667
|
+
generatedAt
|
|
10668
|
+
}];
|
|
10669
|
+
}
|
|
10670
|
+
/**
|
|
10671
|
+
* If more than one model is present, flag the underperforming model.
|
|
10672
|
+
* Severity is 'info' by default; 'warning' if cache efficiency gap > 20pp.
|
|
10673
|
+
*/
|
|
10674
|
+
_runModelComparison(ctx) {
|
|
10675
|
+
const { efficiencyScore, storyKey, sprintId, generatedAt } = ctx;
|
|
10676
|
+
const models = efficiencyScore.perModelBreakdown;
|
|
10677
|
+
if (models.length <= 1) return [];
|
|
10678
|
+
const sorted = [...models].sort((a, b) => b.cacheHitRate - a.cacheHitRate);
|
|
10679
|
+
const best = sorted[0];
|
|
10680
|
+
const worst = sorted[sorted.length - 1];
|
|
10681
|
+
if (best.model === worst.model) return [];
|
|
10682
|
+
const gapPP = (best.cacheHitRate - worst.cacheHitRate) * 100;
|
|
10683
|
+
const severity = gapPP > 20 ? "warning" : "info";
|
|
10684
|
+
const actionTarget = worst.model;
|
|
10685
|
+
const id = this._makeId("per_model_comparison", storyKey, actionTarget, 0);
|
|
10686
|
+
return [{
|
|
10687
|
+
id,
|
|
10688
|
+
storyKey,
|
|
10689
|
+
sprintId,
|
|
10690
|
+
ruleId: "per_model_comparison",
|
|
10691
|
+
severity,
|
|
10692
|
+
title: `Underperforming model: ${worst.model}`,
|
|
10693
|
+
description: `Model "${worst.model}" has a cache hit rate of ${(worst.cacheHitRate * 100).toFixed(1)}% vs. "${best.model}" at ${(best.cacheHitRate * 100).toFixed(1)}% (gap: ${gapPP.toFixed(1)} percentage points). Consider routing tasks to the higher-performing model.`,
|
|
10694
|
+
actionTarget,
|
|
10695
|
+
generatedAt
|
|
10696
|
+
}];
|
|
10697
|
+
}
|
|
10698
|
+
_isToolNameMatch(span) {
|
|
10699
|
+
const toolName = span.attributes?.["tool.name"];
|
|
10700
|
+
return toolName === "bash" || toolName === "execute_command" || span.name === "bash" || span.name === "execute_command" || span.operationName === "bash" || span.operationName === "execute_command";
|
|
10701
|
+
}
|
|
10702
|
+
};
|
|
10703
|
+
|
|
10704
|
+
//#endregion
|
|
10705
|
+
//#region src/modules/telemetry/turn-analyzer.ts
|
|
10706
|
+
var TurnAnalyzer = class {
|
|
10707
|
+
_logger;
|
|
10708
|
+
constructor(logger$27) {
|
|
10709
|
+
this._logger = logger$27;
|
|
10710
|
+
}
|
|
10711
|
+
/**
|
|
10712
|
+
* Analyze a list of NormalizedSpan records and produce TurnAnalysis[].
|
|
10713
|
+
*
|
|
10714
|
+
* Returns an empty array immediately when spans is empty.
|
|
10715
|
+
*
|
|
10716
|
+
* @param spans - All spans for a story (root and child spans mixed)
|
|
10717
|
+
*/
|
|
10718
|
+
analyze(spans) {
|
|
10719
|
+
if (spans.length === 0) return [];
|
|
10720
|
+
const allSpanIds = new Set(spans.map((s) => s.spanId));
|
|
10721
|
+
const rootSpans = spans.filter((s) => !s.parentSpanId || !allSpanIds.has(s.parentSpanId));
|
|
10722
|
+
const ordered = [...rootSpans].sort((a, b) => a.startTime - b.startTime);
|
|
10723
|
+
const childIndex = new Map();
|
|
10724
|
+
for (const span of spans) if (span.parentSpanId && allSpanIds.has(span.parentSpanId)) {
|
|
10725
|
+
const children = childIndex.get(span.parentSpanId) ?? [];
|
|
10726
|
+
children.push(span);
|
|
10727
|
+
childIndex.set(span.parentSpanId, children);
|
|
10728
|
+
}
|
|
10729
|
+
let runningContext = 0;
|
|
10730
|
+
const turns = ordered.map((span, idx) => {
|
|
10731
|
+
const prevContext = runningContext;
|
|
10732
|
+
runningContext += span.inputTokens;
|
|
10733
|
+
const freshTokens = span.inputTokens - span.cacheReadTokens;
|
|
10734
|
+
const cacheHitRate = span.inputTokens > 0 ? span.cacheReadTokens / span.inputTokens : 0;
|
|
10735
|
+
const childSpanSummaries = (childIndex.get(span.spanId) ?? []).map((child) => ({
|
|
10736
|
+
spanId: child.spanId,
|
|
10737
|
+
name: child.name,
|
|
10738
|
+
toolName: child.attributes?.["tool.name"],
|
|
10739
|
+
inputTokens: child.inputTokens,
|
|
10740
|
+
outputTokens: child.outputTokens,
|
|
10741
|
+
durationMs: child.durationMs
|
|
10742
|
+
}));
|
|
10743
|
+
return {
|
|
10744
|
+
spanId: span.spanId,
|
|
10745
|
+
turnNumber: idx + 1,
|
|
10746
|
+
name: span.name,
|
|
10747
|
+
timestamp: span.startTime,
|
|
10748
|
+
source: span.source,
|
|
10749
|
+
model: span.model,
|
|
10750
|
+
inputTokens: span.inputTokens,
|
|
10751
|
+
outputTokens: span.outputTokens,
|
|
10752
|
+
cacheReadTokens: span.cacheReadTokens,
|
|
10753
|
+
freshTokens,
|
|
10754
|
+
cacheHitRate,
|
|
10755
|
+
costUsd: span.costUsd,
|
|
10756
|
+
durationMs: span.durationMs,
|
|
10757
|
+
contextSize: runningContext,
|
|
10758
|
+
contextDelta: runningContext - prevContext,
|
|
10759
|
+
toolName: span.attributes?.["tool.name"],
|
|
10760
|
+
isContextSpike: false,
|
|
10761
|
+
childSpans: childSpanSummaries
|
|
10762
|
+
};
|
|
10763
|
+
});
|
|
10764
|
+
const avg = turns.reduce((sum, t) => sum + t.inputTokens, 0) / turns.length;
|
|
10765
|
+
for (const turn of turns) turn.isContextSpike = avg > 0 && turn.inputTokens > 2 * avg;
|
|
10766
|
+
this._logger.debug({
|
|
10767
|
+
turnCount: turns.length,
|
|
10768
|
+
avg
|
|
10769
|
+
}, "TurnAnalyzer.analyze complete");
|
|
10770
|
+
return turns;
|
|
10771
|
+
}
|
|
10772
|
+
};
|
|
10773
|
+
|
|
10774
|
+
//#endregion
|
|
10775
|
+
//#region src/modules/telemetry/cost-table.ts
|
|
10776
|
+
/**
|
|
10777
|
+
* Per-million-token pricing for known LLM models.
|
|
10778
|
+
* All prices are in USD.
|
|
10779
|
+
*/
|
|
10780
|
+
const COST_TABLE = {
|
|
10781
|
+
"claude-3-opus-20240229": {
|
|
10782
|
+
inputPerMToken: 15,
|
|
10783
|
+
outputPerMToken: 75,
|
|
10784
|
+
cacheReadPerMToken: 1.5,
|
|
10785
|
+
cacheCreationPerMToken: 18.75
|
|
10786
|
+
},
|
|
10787
|
+
"claude-3-5-sonnet-20241022": {
|
|
10788
|
+
inputPerMToken: 3,
|
|
10789
|
+
outputPerMToken: 15,
|
|
10790
|
+
cacheReadPerMToken: .3,
|
|
10791
|
+
cacheCreationPerMToken: 3.75
|
|
10792
|
+
},
|
|
10793
|
+
"claude-3-5-haiku-20241022": {
|
|
10794
|
+
inputPerMToken: .8,
|
|
10795
|
+
outputPerMToken: 4,
|
|
10796
|
+
cacheReadPerMToken: .08,
|
|
10797
|
+
cacheCreationPerMToken: 1
|
|
10798
|
+
},
|
|
10799
|
+
"claude-3-haiku-20240307": {
|
|
10800
|
+
inputPerMToken: .25,
|
|
10801
|
+
outputPerMToken: 1.25,
|
|
10802
|
+
cacheReadPerMToken: .03,
|
|
10803
|
+
cacheCreationPerMToken: .3
|
|
10804
|
+
},
|
|
10805
|
+
"claude-3-sonnet-20240229": {
|
|
10806
|
+
inputPerMToken: 3,
|
|
10807
|
+
outputPerMToken: 15,
|
|
10808
|
+
cacheReadPerMToken: .3,
|
|
10809
|
+
cacheCreationPerMToken: 3.75
|
|
10810
|
+
},
|
|
10811
|
+
"gpt-4": {
|
|
10812
|
+
inputPerMToken: 30,
|
|
10813
|
+
outputPerMToken: 60,
|
|
10814
|
+
cacheReadPerMToken: 3,
|
|
10815
|
+
cacheCreationPerMToken: 30
|
|
10816
|
+
},
|
|
10817
|
+
"gpt-4-turbo": {
|
|
10818
|
+
inputPerMToken: 10,
|
|
10819
|
+
outputPerMToken: 30,
|
|
10820
|
+
cacheReadPerMToken: 1,
|
|
10821
|
+
cacheCreationPerMToken: 10
|
|
10822
|
+
},
|
|
10823
|
+
"gpt-3.5-turbo": {
|
|
10824
|
+
inputPerMToken: .5,
|
|
10825
|
+
outputPerMToken: 1.5,
|
|
10826
|
+
cacheReadPerMToken: .05,
|
|
10827
|
+
cacheCreationPerMToken: .5
|
|
10828
|
+
}
|
|
10829
|
+
};
|
|
10830
|
+
/**
|
|
10831
|
+
* Resolve a model string to a key in COST_TABLE.
|
|
10832
|
+
* Returns the matched key, or undefined if not found.
|
|
10833
|
+
*
|
|
10834
|
+
* Performs exact match first, then case-insensitive substring match.
|
|
10835
|
+
*/
|
|
10836
|
+
function resolveModel$1(model) {
|
|
10837
|
+
if (model in COST_TABLE) return model;
|
|
10838
|
+
const lower = model.toLowerCase();
|
|
10839
|
+
for (const key of Object.keys(COST_TABLE)) if (key.toLowerCase() === lower) return key;
|
|
10840
|
+
for (const key of Object.keys(COST_TABLE)) if (lower.includes(key.toLowerCase()) || key.toLowerCase().includes(lower)) return key;
|
|
10841
|
+
return void 0;
|
|
10842
|
+
}
|
|
10843
|
+
/**
|
|
10844
|
+
* Estimate the cost in USD for a set of token counts and a model identifier.
|
|
10845
|
+
*
|
|
10846
|
+
* - Uses `cacheReadPerMToken` from the table directly (already discounted).
|
|
10847
|
+
* - Returns 0 for unknown models without throwing.
|
|
10848
|
+
*
|
|
10849
|
+
* @param model - Model identifier string (exact or fuzzy match against COST_TABLE)
|
|
10850
|
+
* @param tokens - Token counts object
|
|
10851
|
+
* @returns Estimated cost in USD
|
|
10852
|
+
*/
|
|
10853
|
+
function estimateCost(model, tokens) {
|
|
10854
|
+
const resolvedKey = resolveModel$1(model);
|
|
10855
|
+
if (resolvedKey === void 0) return 0;
|
|
10856
|
+
const pricing = COST_TABLE[resolvedKey];
|
|
10857
|
+
const perM = 1e6;
|
|
10858
|
+
const inputCost = tokens.input / perM * pricing.inputPerMToken;
|
|
10859
|
+
const outputCost = tokens.output / perM * pricing.outputPerMToken;
|
|
10860
|
+
const cacheReadCost = tokens.cacheRead / perM * pricing.cacheReadPerMToken;
|
|
10861
|
+
const cacheCreationCost = tokens.cacheCreation / perM * pricing.cacheCreationPerMToken;
|
|
10862
|
+
return inputCost + outputCost + cacheReadCost + cacheCreationCost;
|
|
10863
|
+
}
|
|
10864
|
+
|
|
10865
|
+
//#endregion
|
|
10866
|
+
//#region src/modules/telemetry/timestamp-normalizer.ts
|
|
10867
|
+
/**
|
|
10868
|
+
* Timestamp normalization for OTLP telemetry payloads.
|
|
10869
|
+
*
|
|
10870
|
+
* OTLP payloads use nanosecond integers for timestamps (e.g. `startTimeUnixNano`).
|
|
10871
|
+
* Claude Code and other providers may emit timestamps in various formats.
|
|
10872
|
+
*
|
|
10873
|
+
* `normalizeTimestamp()` accepts any unknown value and returns a Unix millisecond
|
|
10874
|
+
* number. Null/undefined/unparseable inputs fall back to `Date.now()`.
|
|
10875
|
+
*
|
|
10876
|
+
* Detection order (after ISO string check):
|
|
10877
|
+
* 1. Nanoseconds (>= 1e18)
|
|
10878
|
+
* 2. Microseconds (>= 1e15)
|
|
10879
|
+
* 3. Milliseconds (>= 1e12)
|
|
10880
|
+
* 4. Seconds (< 1e12)
|
|
10881
|
+
*/
|
|
10882
|
+
/**
|
|
10883
|
+
* Normalize any timestamp value to Unix milliseconds.
|
|
10884
|
+
*
|
|
10885
|
+
* Handles:
|
|
10886
|
+
* - ISO 8601 strings (e.g. "2024-03-08T12:00:00Z")
|
|
10887
|
+
* - Nanosecond integers or numeric strings (>= 1e18)
|
|
10888
|
+
* - Microsecond integers or numeric strings (>= 1e15)
|
|
10889
|
+
* - Millisecond integers or numeric strings (>= 1e12)
|
|
10890
|
+
* - Second integers or numeric strings (< 1e12)
|
|
10891
|
+
* - BigInt string values from OTLP `startTimeUnixNano` (e.g. "1709900000000000000")
|
|
10892
|
+
* - null / undefined / unparseable → falls back to Date.now()
|
|
10893
|
+
*
|
|
10894
|
+
* @param value - Raw timestamp value of unknown type
|
|
10895
|
+
* @returns Unix millisecond timestamp
|
|
10896
|
+
*/
|
|
10897
|
+
function normalizeTimestamp(value) {
|
|
10898
|
+
if (value === null || value === void 0) return Date.now();
|
|
10899
|
+
if (typeof value === "string") {
|
|
10900
|
+
if (isIsoDateString(value)) {
|
|
10901
|
+
const parsed = Date.parse(value);
|
|
10902
|
+
if (!isNaN(parsed)) return parsed;
|
|
10903
|
+
}
|
|
10904
|
+
const trimmed = value.trim();
|
|
10905
|
+
if (/^\d+$/.test(trimmed)) try {
|
|
10906
|
+
const big = BigInt(trimmed);
|
|
10907
|
+
return bigIntToMillis(big);
|
|
10908
|
+
} catch {}
|
|
10909
|
+
return Date.now();
|
|
10910
|
+
}
|
|
10911
|
+
if (typeof value === "bigint") return bigIntToMillis(value);
|
|
10912
|
+
if (typeof value === "number") {
|
|
10913
|
+
if (!isFinite(value) || isNaN(value)) return Date.now();
|
|
10914
|
+
return numericToMillis(value);
|
|
10915
|
+
}
|
|
10916
|
+
return Date.now();
|
|
10917
|
+
}
|
|
10918
|
+
/**
|
|
10919
|
+
* Returns true if the string looks like an ISO 8601 date string
|
|
10920
|
+
* (contains letters or dashes/colons in date-like positions).
|
|
10921
|
+
*/
|
|
10922
|
+
function isIsoDateString(value) {
|
|
10923
|
+
return /^\d{4}-\d{2}-\d{2}/.test(value) || /Z$/.test(value) || /[+-]\d{2}:\d{2}$/.test(value) || value.includes("T");
|
|
10924
|
+
}
|
|
10925
|
+
/**
|
|
10926
|
+
* Convert a BigInt nanosecond/microsecond/millisecond/second value to milliseconds.
|
|
10927
|
+
*/
|
|
10928
|
+
function bigIntToMillis(value) {
|
|
10929
|
+
const NS_THRESHOLD = BigInt("1000000000000000000");
|
|
10930
|
+
const US_THRESHOLD = BigInt("1000000000000000");
|
|
10931
|
+
const MS_THRESHOLD = BigInt("1000000000000");
|
|
10932
|
+
if (value >= NS_THRESHOLD) return Number(value / BigInt(1e6));
|
|
10933
|
+
else if (value >= US_THRESHOLD) return Number(value / BigInt(1e3));
|
|
10934
|
+
else if (value >= MS_THRESHOLD) return Number(value);
|
|
10935
|
+
else return Number(value) * 1e3;
|
|
10936
|
+
}
|
|
10937
|
+
/**
|
|
10938
|
+
* Convert a numeric value to milliseconds based on magnitude.
|
|
10939
|
+
*/
|
|
10940
|
+
function numericToMillis(value) {
|
|
10941
|
+
if (value >= 1e18) return Math.floor(value / 1e6);
|
|
10942
|
+
else if (value >= 1e15) return Math.floor(value / 1e3);
|
|
10943
|
+
else if (value >= 1e12) return Math.floor(value);
|
|
10944
|
+
else return Math.floor(value * 1e3);
|
|
10945
|
+
}
|
|
10946
|
+
|
|
10947
|
+
//#endregion
|
|
10948
|
+
//#region src/modules/telemetry/token-extractor.ts
|
|
10949
|
+
/**
|
|
10950
|
+
* Patterns for matching attribute keys to token fields.
|
|
10951
|
+
* Each pattern is checked case-insensitively via substring match.
|
|
10952
|
+
*
|
|
10953
|
+
* IMPORTANT: more-specific patterns (cacheRead, cacheCreation) MUST come first
|
|
10954
|
+
* so that keys like `cache_read_input_tokens` match `cache_read` before `input_token`.
|
|
10955
|
+
*/
|
|
10956
|
+
const TOKEN_PATTERNS = {
|
|
10957
|
+
cacheRead: ["cache_read"],
|
|
10958
|
+
cacheCreation: ["cache_creation", "cache_write"],
|
|
10959
|
+
input: ["input_token", "prompt_token"],
|
|
10960
|
+
output: ["output_token", "completion_token"]
|
|
10961
|
+
};
|
|
10962
|
+
/**
|
|
10963
|
+
* Extract token counts from an OTLP attributes array.
|
|
10964
|
+
*
|
|
10965
|
+
* Matches attribute keys case-insensitively against known patterns.
|
|
10966
|
+
* The first matching value for each field wins.
|
|
10967
|
+
*
|
|
10968
|
+
* @param attributes - Array of OTLP attribute entries
|
|
10969
|
+
* @returns Partial token counts (only fields found in attributes)
|
|
10970
|
+
*/
|
|
10971
|
+
function extractTokensFromAttributes(attributes) {
|
|
10972
|
+
if (!Array.isArray(attributes) || attributes.length === 0) return {};
|
|
10973
|
+
const result = {};
|
|
10974
|
+
for (const attr of attributes) {
|
|
10975
|
+
if (!attr?.key || !attr?.value) continue;
|
|
10976
|
+
const keyLower = attr.key.toLowerCase();
|
|
10977
|
+
const numValue = resolveAttrValue(attr.value);
|
|
10978
|
+
if (numValue === void 0) continue;
|
|
10979
|
+
let matched = false;
|
|
10980
|
+
for (const [field, patterns] of Object.entries(TOKEN_PATTERNS)) {
|
|
10981
|
+
if (matched) break;
|
|
10982
|
+
if (result[field] !== void 0) continue;
|
|
10983
|
+
for (const pattern of patterns) if (keyLower.includes(pattern)) {
|
|
10984
|
+
result[field] = numValue;
|
|
10985
|
+
matched = true;
|
|
10986
|
+
break;
|
|
10987
|
+
}
|
|
10988
|
+
}
|
|
10989
|
+
}
|
|
10990
|
+
return result;
|
|
10991
|
+
}
|
|
10992
|
+
/**
|
|
10993
|
+
* Extract token counts from a JSON body string via recursive search.
|
|
10994
|
+
*
|
|
10995
|
+
* Parses the body as JSON and recursively walks the object tree up to
|
|
10996
|
+
* depth 4, looking for keys matching token patterns.
|
|
10997
|
+
*
|
|
10998
|
+
* @param body - Raw body string (may be JSON)
|
|
10999
|
+
* @returns Partial token counts found in body
|
|
11000
|
+
*/
|
|
11001
|
+
function extractTokensFromBody(body) {
|
|
11002
|
+
if (!body || typeof body !== "string") return {};
|
|
11003
|
+
let parsed;
|
|
11004
|
+
try {
|
|
11005
|
+
parsed = JSON.parse(body);
|
|
11006
|
+
} catch {
|
|
11007
|
+
return {};
|
|
11008
|
+
}
|
|
11009
|
+
return searchObjectForTokens(parsed, 0);
|
|
11010
|
+
}
|
|
11011
|
+
/**
|
|
11012
|
+
* Merge attribute-derived and body-derived token counts.
|
|
11013
|
+
*
|
|
11014
|
+
* Attributes take priority over body for each field.
|
|
11015
|
+
* Missing fields default to 0.
|
|
11016
|
+
*
|
|
11017
|
+
* @param fromAttributes - Token counts from attributes (higher priority)
|
|
11018
|
+
* @param fromBody - Token counts from body JSON (lower priority)
|
|
11019
|
+
* @returns Complete TokenCounts with all fields
|
|
11020
|
+
*/
|
|
11021
|
+
function mergeTokenCounts(fromAttributes, fromBody) {
|
|
11022
|
+
return {
|
|
11023
|
+
input: fromAttributes.input ?? fromBody.input ?? 0,
|
|
11024
|
+
output: fromAttributes.output ?? fromBody.output ?? 0,
|
|
11025
|
+
cacheRead: fromAttributes.cacheRead ?? fromBody.cacheRead ?? 0,
|
|
11026
|
+
cacheCreation: fromAttributes.cacheCreation ?? fromBody.cacheCreation ?? 0
|
|
11027
|
+
};
|
|
11028
|
+
}
|
|
11029
|
+
/**
|
|
11030
|
+
* Resolve an OTLP attribute value to a number.
|
|
11031
|
+
* OTLP integer values arrive as strings (e.g. `"intValue": "2048"`).
|
|
11032
|
+
*/
|
|
11033
|
+
function resolveAttrValue(value) {
|
|
11034
|
+
if (value.intValue !== void 0) {
|
|
11035
|
+
const n = Number(value.intValue);
|
|
11036
|
+
return isFinite(n) ? n : void 0;
|
|
11037
|
+
}
|
|
11038
|
+
if (value.doubleValue !== void 0) {
|
|
11039
|
+
const n = Number(value.doubleValue);
|
|
11040
|
+
return isFinite(n) ? n : void 0;
|
|
11041
|
+
}
|
|
11042
|
+
if (value.stringValue !== void 0) {
|
|
11043
|
+
const n = Number(value.stringValue);
|
|
11044
|
+
if (!isNaN(n) && isFinite(n)) return n;
|
|
11045
|
+
}
|
|
11046
|
+
return void 0;
|
|
11047
|
+
}
|
|
11048
|
+
/**
|
|
11049
|
+
* Recursively search an object for token count fields up to maxDepth.
|
|
11050
|
+
*/
|
|
11051
|
+
function searchObjectForTokens(obj, depth) {
|
|
11052
|
+
if (depth >= 4 || obj === null || typeof obj !== "object") return {};
|
|
11053
|
+
const result = {};
|
|
11054
|
+
if (Array.isArray(obj)) {
|
|
11055
|
+
for (const item of obj) {
|
|
11056
|
+
const found = searchObjectForTokens(item, depth + 1);
|
|
11057
|
+
mergePartialInto(result, found);
|
|
11058
|
+
}
|
|
11059
|
+
return result;
|
|
11060
|
+
}
|
|
11061
|
+
const record = obj;
|
|
11062
|
+
for (const [key, val] of Object.entries(record)) {
|
|
11063
|
+
const keyLower = key.toLowerCase();
|
|
11064
|
+
let keyMatched = false;
|
|
11065
|
+
for (const [field, patterns] of Object.entries(TOKEN_PATTERNS)) {
|
|
11066
|
+
if (keyMatched) break;
|
|
11067
|
+
if (result[field] !== void 0) continue;
|
|
11068
|
+
for (const pattern of patterns) if (keyLower.includes(pattern)) {
|
|
11069
|
+
const num = typeof val === "number" ? val : typeof val === "string" ? Number(val) : NaN;
|
|
11070
|
+
if (!isNaN(num) && isFinite(num)) result[field] = num;
|
|
11071
|
+
keyMatched = true;
|
|
11072
|
+
break;
|
|
11073
|
+
}
|
|
11074
|
+
}
|
|
11075
|
+
if (val !== null && typeof val === "object") {
|
|
11076
|
+
const nested = searchObjectForTokens(val, depth + 1);
|
|
11077
|
+
mergePartialInto(result, nested);
|
|
11078
|
+
}
|
|
11079
|
+
}
|
|
11080
|
+
return result;
|
|
11081
|
+
}
|
|
11082
|
+
/**
|
|
11083
|
+
* Merge source into target, only filling missing fields.
|
|
11084
|
+
*/
|
|
11085
|
+
function mergePartialInto(target, source) {
|
|
11086
|
+
if (target.input === void 0 && source.input !== void 0) target.input = source.input;
|
|
11087
|
+
if (target.output === void 0 && source.output !== void 0) target.output = source.output;
|
|
11088
|
+
if (target.cacheRead === void 0 && source.cacheRead !== void 0) target.cacheRead = source.cacheRead;
|
|
11089
|
+
if (target.cacheCreation === void 0 && source.cacheCreation !== void 0) target.cacheCreation = source.cacheCreation;
|
|
11090
|
+
}
|
|
11091
|
+
|
|
11092
|
+
//#endregion
|
|
11093
|
+
//#region src/modules/telemetry/normalizer.ts
|
|
11094
|
+
/**
|
|
11095
|
+
* Extract a string value from an OTLP attribute array by key.
|
|
11096
|
+
*/
|
|
11097
|
+
function getAttrString(attrs, key) {
|
|
11098
|
+
if (!Array.isArray(attrs)) return void 0;
|
|
11099
|
+
const entry = attrs.find((a) => a?.key === key);
|
|
11100
|
+
if (!entry?.value) return void 0;
|
|
11101
|
+
return entry.value.stringValue ?? (entry.value.intValue !== void 0 ? String(entry.value.intValue) : void 0) ?? (entry.value.doubleValue !== void 0 ? String(entry.value.doubleValue) : void 0);
|
|
11102
|
+
}
|
|
11103
|
+
/**
|
|
11104
|
+
* Determine source from resource attributes service.name.
|
|
11105
|
+
*/
|
|
11106
|
+
function resolveSource(resourceAttrs) {
|
|
11107
|
+
const serviceName = getAttrString(resourceAttrs, "service.name");
|
|
11108
|
+
if (!serviceName) return "unknown";
|
|
11109
|
+
const lower = serviceName.toLowerCase();
|
|
11110
|
+
if (lower.includes("claude")) return "claude-code";
|
|
11111
|
+
if (lower.includes("codex") || lower.includes("openai")) return "codex";
|
|
11112
|
+
if (lower.includes("local")) return "local-llm";
|
|
11113
|
+
return serviceName;
|
|
11114
|
+
}
|
|
11115
|
+
/**
|
|
11116
|
+
* Resolve model from span attributes (tries multiple known keys).
|
|
11117
|
+
*/
|
|
11118
|
+
function resolveModel(attrs) {
|
|
11119
|
+
const modelKeys = [
|
|
11120
|
+
"gen_ai.request.model",
|
|
11121
|
+
"gen_ai.response.model",
|
|
11122
|
+
"llm.request.model",
|
|
11123
|
+
"anthropic.model",
|
|
11124
|
+
"openai.model",
|
|
11125
|
+
"model"
|
|
11126
|
+
];
|
|
11127
|
+
for (const key of modelKeys) {
|
|
11128
|
+
const val = getAttrString(attrs, key);
|
|
11129
|
+
if (val) return val;
|
|
11130
|
+
}
|
|
11131
|
+
return void 0;
|
|
11132
|
+
}
|
|
11133
|
+
/**
|
|
11134
|
+
* Resolve provider from span attributes.
|
|
11135
|
+
*/
|
|
11136
|
+
function resolveProvider(attrs, source) {
|
|
11137
|
+
const providerVal = getAttrString(attrs, "gen_ai.system");
|
|
11138
|
+
if (providerVal) return providerVal;
|
|
11139
|
+
if (source === "claude-code") return "anthropic";
|
|
11140
|
+
if (source === "codex") return "openai";
|
|
11141
|
+
return void 0;
|
|
11142
|
+
}
|
|
11143
|
+
/**
|
|
11144
|
+
* Extract the body string from a log record body field.
|
|
11145
|
+
*/
|
|
11146
|
+
function extractBodyString(body) {
|
|
11147
|
+
if (!body) return void 0;
|
|
11148
|
+
if (typeof body === "string") return body;
|
|
11149
|
+
if (typeof body === "object" && body.stringValue) return body.stringValue;
|
|
11150
|
+
return void 0;
|
|
11151
|
+
}
|
|
11152
|
+
/**
|
|
11153
|
+
* Generate a unique log record ID.
|
|
11154
|
+
*/
|
|
11155
|
+
let _logIdCounter = 0;
|
|
11156
|
+
function generateLogId() {
|
|
11157
|
+
return `log-${Date.now()}-${++_logIdCounter}`;
|
|
11158
|
+
}
|
|
11159
|
+
/**
|
|
11160
|
+
* Transforms raw OTLP payloads into normalized telemetry models.
|
|
11161
|
+
*
|
|
11162
|
+
* Inject an `ILogger` (pino-compatible) for structured logging.
|
|
11163
|
+
* All public methods return empty arrays on any error — never throw.
|
|
11164
|
+
*/
|
|
11165
|
+
var TelemetryNormalizer = class {
|
|
11166
|
+
_logger;
|
|
11167
|
+
constructor(logger$27) {
|
|
11168
|
+
this._logger = logger$27;
|
|
11169
|
+
}
|
|
11170
|
+
/**
|
|
11171
|
+
* Normalize a raw OTLP trace payload into an array of `NormalizedSpan`.
|
|
11172
|
+
*
|
|
11173
|
+
* @param raw - Raw OTLP trace payload (resourceSpans structure)
|
|
11174
|
+
* @returns Array of normalized spans; empty on error or empty input
|
|
11175
|
+
*/
|
|
11176
|
+
normalizeSpan(raw) {
|
|
11177
|
+
try {
|
|
11178
|
+
return this._normalizeSpanInternal(raw);
|
|
11179
|
+
} catch (err) {
|
|
11180
|
+
this._logger.warn({ err }, "TelemetryNormalizer.normalizeSpan: unexpected error");
|
|
11181
|
+
return [];
|
|
11182
|
+
}
|
|
11183
|
+
}
|
|
11184
|
+
_normalizeSpanInternal(raw) {
|
|
11185
|
+
if (!raw || typeof raw !== "object") return [];
|
|
11186
|
+
const payload = raw;
|
|
11187
|
+
if (!Array.isArray(payload.resourceSpans)) return [];
|
|
11188
|
+
const results = [];
|
|
11189
|
+
for (const resourceSpan of payload.resourceSpans) {
|
|
11190
|
+
if (!resourceSpan) continue;
|
|
11191
|
+
const resourceAttrs = resourceSpan.resource?.attributes;
|
|
11192
|
+
const source = resolveSource(resourceAttrs);
|
|
11193
|
+
if (!Array.isArray(resourceSpan.scopeSpans)) continue;
|
|
11194
|
+
for (const scopeSpan of resourceSpan.scopeSpans) {
|
|
11195
|
+
if (!Array.isArray(scopeSpan?.spans)) continue;
|
|
11196
|
+
for (const span of scopeSpan.spans) {
|
|
11197
|
+
if (!span) continue;
|
|
11198
|
+
try {
|
|
11199
|
+
const normalized = this._normalizeOneSpan(span, resourceAttrs, source);
|
|
11200
|
+
results.push(normalized);
|
|
11201
|
+
} catch (err) {
|
|
11202
|
+
this._logger.warn({
|
|
11203
|
+
err,
|
|
11204
|
+
spanId: span.spanId
|
|
11205
|
+
}, "Failed to normalize span — skipping");
|
|
11206
|
+
}
|
|
11207
|
+
}
|
|
11208
|
+
}
|
|
11209
|
+
}
|
|
11210
|
+
return results;
|
|
11211
|
+
}
|
|
11212
|
+
_normalizeOneSpan(span, resourceAttrs, source) {
|
|
11213
|
+
const spanId = span.spanId ?? "";
|
|
11214
|
+
const traceId = span.traceId ?? "";
|
|
11215
|
+
const name = span.name ?? "";
|
|
11216
|
+
const model = resolveModel(span.attributes) ?? resolveModel(resourceAttrs);
|
|
11217
|
+
const provider = resolveProvider(span.attributes, source);
|
|
11218
|
+
const operationName = getAttrString(span.attributes, "gen_ai.operation.name") ?? name;
|
|
11219
|
+
const startTime = normalizeTimestamp(span.startTimeUnixNano);
|
|
11220
|
+
const endTime = span.endTimeUnixNano ? normalizeTimestamp(span.endTimeUnixNano) : void 0;
|
|
11221
|
+
const durationMs = endTime !== void 0 ? endTime - startTime : 0;
|
|
11222
|
+
const fromAttrs = extractTokensFromAttributes(span.attributes);
|
|
11223
|
+
const bodyStr = getAttrString(span.attributes, "llm.response.body") ?? getAttrString(span.attributes, "gen_ai.response.body");
|
|
11224
|
+
const fromBody = extractTokensFromBody(bodyStr);
|
|
11225
|
+
const tokens = mergeTokenCounts(fromAttrs, fromBody);
|
|
11226
|
+
const storyKey = getAttrString(span.attributes, "substrate.story_key") ?? getAttrString(resourceAttrs, "substrate.story_key");
|
|
11227
|
+
const costUsd = model ? estimateCost(model, tokens) : 0;
|
|
11228
|
+
const attributesRecord = {};
|
|
11229
|
+
if (Array.isArray(span.attributes)) {
|
|
11230
|
+
for (const attr of span.attributes) if (attr?.key) attributesRecord[attr.key] = attr.value?.stringValue ?? attr.value?.intValue ?? attr.value?.doubleValue ?? attr.value?.boolValue;
|
|
11231
|
+
}
|
|
11232
|
+
return {
|
|
11233
|
+
spanId,
|
|
11234
|
+
traceId,
|
|
11235
|
+
parentSpanId: span.parentSpanId,
|
|
11236
|
+
name,
|
|
11237
|
+
source,
|
|
11238
|
+
model,
|
|
11239
|
+
provider,
|
|
11240
|
+
operationName,
|
|
11241
|
+
storyKey,
|
|
11242
|
+
inputTokens: tokens.input,
|
|
11243
|
+
outputTokens: tokens.output,
|
|
11244
|
+
cacheReadTokens: tokens.cacheRead,
|
|
11245
|
+
cacheCreationTokens: tokens.cacheCreation,
|
|
11246
|
+
costUsd,
|
|
11247
|
+
durationMs,
|
|
11248
|
+
startTime,
|
|
11249
|
+
endTime,
|
|
11250
|
+
attributes: attributesRecord,
|
|
11251
|
+
events: span.events
|
|
11252
|
+
};
|
|
11253
|
+
}
|
|
11254
|
+
/**
|
|
11255
|
+
* Normalize a raw OTLP log payload into an array of `NormalizedLog`.
|
|
11256
|
+
*
|
|
11257
|
+
* @param raw - Raw OTLP log payload (resourceLogs structure)
|
|
11258
|
+
* @returns Array of normalized logs; empty on error or empty input
|
|
11259
|
+
*/
|
|
11260
|
+
normalizeLog(raw) {
|
|
11261
|
+
try {
|
|
11262
|
+
return this._normalizeLogInternal(raw);
|
|
11263
|
+
} catch (err) {
|
|
11264
|
+
this._logger.warn({ err }, "TelemetryNormalizer.normalizeLog: unexpected error");
|
|
11265
|
+
return [];
|
|
11266
|
+
}
|
|
11267
|
+
}
|
|
11268
|
+
_normalizeLogInternal(raw) {
|
|
11269
|
+
if (!raw || typeof raw !== "object") return [];
|
|
11270
|
+
const payload = raw;
|
|
11271
|
+
if (!Array.isArray(payload.resourceLogs)) return [];
|
|
11272
|
+
const results = [];
|
|
11273
|
+
for (const resourceLog of payload.resourceLogs) {
|
|
11274
|
+
if (!resourceLog) continue;
|
|
11275
|
+
const resourceAttrs = resourceLog.resource?.attributes;
|
|
11276
|
+
if (!Array.isArray(resourceLog.scopeLogs)) continue;
|
|
11277
|
+
for (const scopeLog of resourceLog.scopeLogs) {
|
|
11278
|
+
if (!Array.isArray(scopeLog?.logRecords)) continue;
|
|
11279
|
+
for (const record of scopeLog.logRecords) {
|
|
11280
|
+
if (!record) continue;
|
|
11281
|
+
try {
|
|
11282
|
+
const normalized = this._normalizeOneLog(record, resourceAttrs);
|
|
11283
|
+
results.push(normalized);
|
|
11284
|
+
} catch (err) {
|
|
11285
|
+
this._logger.warn({ err }, "Failed to normalize log record — skipping");
|
|
11286
|
+
}
|
|
11287
|
+
}
|
|
11288
|
+
}
|
|
11289
|
+
}
|
|
11290
|
+
return results;
|
|
11291
|
+
}
|
|
11292
|
+
_normalizeOneLog(record, resourceAttrs) {
|
|
11293
|
+
const logId = record.logRecordId ?? generateLogId();
|
|
11294
|
+
const timestamp = normalizeTimestamp(record.timeUnixNano);
|
|
11295
|
+
const bodyStr = extractBodyString(record.body);
|
|
11296
|
+
const fromAttrs = extractTokensFromAttributes(record.attributes);
|
|
11297
|
+
const fromBody = extractTokensFromBody(bodyStr);
|
|
11298
|
+
const tokens = mergeTokenCounts(fromAttrs, fromBody);
|
|
11299
|
+
const eventName = getAttrString(record.attributes, "event.name") ?? getAttrString(record.attributes, "gen_ai.event.name") ?? getAttrString(record.attributes, "event_name");
|
|
11300
|
+
const sessionId = getAttrString(record.attributes, "session.id") ?? getAttrString(record.attributes, "gen_ai.session.id") ?? getAttrString(resourceAttrs, "session.id");
|
|
11301
|
+
const toolName = getAttrString(record.attributes, "tool.name") ?? getAttrString(record.attributes, "gen_ai.tool.name") ?? getAttrString(record.attributes, "tool_name");
|
|
11302
|
+
const model = resolveModel(record.attributes) ?? resolveModel(resourceAttrs);
|
|
11303
|
+
const storyKey = getAttrString(record.attributes, "substrate.story_key") ?? getAttrString(resourceAttrs, "substrate.story_key");
|
|
11304
|
+
const costUsd = model ? estimateCost(model, tokens) : 0;
|
|
11305
|
+
return {
|
|
11306
|
+
logId,
|
|
11307
|
+
traceId: record.traceId,
|
|
11308
|
+
spanId: record.spanId,
|
|
11309
|
+
timestamp,
|
|
11310
|
+
severity: record.severityText,
|
|
11311
|
+
body: bodyStr,
|
|
11312
|
+
eventName,
|
|
11313
|
+
sessionId,
|
|
11314
|
+
toolName,
|
|
11315
|
+
inputTokens: tokens.input,
|
|
11316
|
+
outputTokens: tokens.output,
|
|
11317
|
+
cacheReadTokens: tokens.cacheRead,
|
|
11318
|
+
costUsd,
|
|
11319
|
+
model,
|
|
11320
|
+
storyKey
|
|
11321
|
+
};
|
|
11322
|
+
}
|
|
11323
|
+
};
|
|
11324
|
+
|
|
11325
|
+
//#endregion
|
|
11326
|
+
//#region src/modules/telemetry/telemetry-pipeline.ts
|
|
11327
|
+
const logger$6 = createLogger("telemetry:pipeline");
|
|
11328
|
+
/**
|
|
11329
|
+
* Wires together the full OTLP analysis and persistence pipeline.
|
|
11330
|
+
*
|
|
11331
|
+
* Usage:
|
|
11332
|
+
* const pipeline = new TelemetryPipeline(deps)
|
|
11333
|
+
* await pipeline.processBatch(items)
|
|
11334
|
+
*/
|
|
11335
|
+
var TelemetryPipeline = class {
|
|
11336
|
+
_normalizer;
|
|
11337
|
+
_turnAnalyzer;
|
|
11338
|
+
_categorizer;
|
|
11339
|
+
_consumerAnalyzer;
|
|
11340
|
+
_efficiencyScorer;
|
|
11341
|
+
_recommender;
|
|
11342
|
+
_persistence;
|
|
11343
|
+
constructor(deps) {
|
|
11344
|
+
this._normalizer = deps.normalizer;
|
|
11345
|
+
this._turnAnalyzer = deps.turnAnalyzer;
|
|
11346
|
+
this._categorizer = deps.categorizer;
|
|
11347
|
+
this._consumerAnalyzer = deps.consumerAnalyzer;
|
|
11348
|
+
this._efficiencyScorer = deps.efficiencyScorer;
|
|
11349
|
+
this._recommender = deps.recommender;
|
|
11350
|
+
this._persistence = deps.persistence;
|
|
11351
|
+
}
|
|
11352
|
+
/**
|
|
11353
|
+
* Process a batch of raw OTLP payloads through the full analysis pipeline.
|
|
11354
|
+
*
|
|
11355
|
+
* Each payload is normalized independently. Spans are then grouped by storyKey
|
|
11356
|
+
* for per-story analysis. Items that fail normalization are skipped with a warning.
|
|
11357
|
+
*/
|
|
11358
|
+
async processBatch(items) {
|
|
11359
|
+
if (items.length === 0) return;
|
|
11360
|
+
logger$6.debug({ count: items.length }, "TelemetryPipeline.processBatch start");
|
|
11361
|
+
const allSpans = [];
|
|
11362
|
+
const allLogs = [];
|
|
11363
|
+
for (const item of items) {
|
|
11364
|
+
try {
|
|
11365
|
+
const spans = this._normalizer.normalizeSpan(item.body);
|
|
11366
|
+
allSpans.push(...spans);
|
|
11367
|
+
} catch (err) {
|
|
11368
|
+
logger$6.warn({ err }, "TelemetryPipeline: normalizeSpan failed — skipping payload");
|
|
11369
|
+
}
|
|
11370
|
+
try {
|
|
11371
|
+
const logs = this._normalizer.normalizeLog(item.body);
|
|
11372
|
+
allLogs.push(...logs);
|
|
11373
|
+
} catch (err) {
|
|
11374
|
+
logger$6.warn({ err }, "TelemetryPipeline: normalizeLog failed — skipping payload");
|
|
11375
|
+
}
|
|
11376
|
+
}
|
|
11377
|
+
logger$6.debug({
|
|
11378
|
+
spans: allSpans.length,
|
|
11379
|
+
logs: allLogs.length
|
|
11380
|
+
}, "TelemetryPipeline: normalized batch");
|
|
11381
|
+
if (allSpans.length === 0) {
|
|
11382
|
+
logger$6.debug("TelemetryPipeline: no spans normalized from batch");
|
|
11383
|
+
return;
|
|
11384
|
+
}
|
|
11385
|
+
const spansByStory = new Map();
|
|
11386
|
+
const unknownStoryKey = "__unknown__";
|
|
11387
|
+
for (const span of allSpans) {
|
|
11388
|
+
const key = span.storyKey ?? unknownStoryKey;
|
|
11389
|
+
const existing = spansByStory.get(key);
|
|
11390
|
+
if (existing !== void 0) existing.push(span);
|
|
11391
|
+
else spansByStory.set(key, [span]);
|
|
11392
|
+
}
|
|
11393
|
+
for (const [storyKey, spans] of spansByStory) {
|
|
11394
|
+
if (storyKey === unknownStoryKey) {
|
|
11395
|
+
logger$6.debug({ spanCount: spans.length }, "TelemetryPipeline: spans without storyKey — skipping analysis");
|
|
11396
|
+
continue;
|
|
11397
|
+
}
|
|
11398
|
+
try {
|
|
11399
|
+
await this._processStory(storyKey, spans);
|
|
11400
|
+
} catch (err) {
|
|
11401
|
+
logger$6.warn({
|
|
11402
|
+
err,
|
|
11403
|
+
storyKey
|
|
11404
|
+
}, "TelemetryPipeline: story processing failed — skipping");
|
|
11405
|
+
}
|
|
11406
|
+
}
|
|
11407
|
+
logger$6.debug({ storyCount: spansByStory.size }, "TelemetryPipeline.processBatch complete");
|
|
11408
|
+
}
|
|
11409
|
+
async _processStory(storyKey, spans) {
|
|
11410
|
+
const turns = this._turnAnalyzer.analyze(spans);
|
|
11411
|
+
const categories = this._categorizer.computeCategoryStats(spans, turns);
|
|
11412
|
+
const consumers = this._consumerAnalyzer.analyze(spans);
|
|
11413
|
+
const efficiencyScore = this._efficiencyScorer.score(storyKey, turns);
|
|
11414
|
+
const generatedAt = new Date().toISOString();
|
|
11415
|
+
const context = {
|
|
11416
|
+
storyKey,
|
|
11417
|
+
generatedAt,
|
|
11418
|
+
turns,
|
|
11419
|
+
categories,
|
|
11420
|
+
consumers,
|
|
11421
|
+
efficiencyScore,
|
|
11422
|
+
allSpans: spans
|
|
11423
|
+
};
|
|
11424
|
+
const recommendations = this._recommender.analyze(context);
|
|
11425
|
+
await Promise.all([
|
|
11426
|
+
turns.length > 0 ? this._persistence.storeTurnAnalysis(storyKey, turns).catch((err) => logger$6.warn({
|
|
11427
|
+
err,
|
|
11428
|
+
storyKey
|
|
11429
|
+
}, "Failed to store turn analysis")) : Promise.resolve(),
|
|
11430
|
+
categories.length > 0 ? this._persistence.storeCategoryStats(storyKey, categories).catch((err) => logger$6.warn({
|
|
11431
|
+
err,
|
|
11432
|
+
storyKey
|
|
11433
|
+
}, "Failed to store category stats")) : Promise.resolve(),
|
|
11434
|
+
consumers.length > 0 ? this._persistence.storeConsumerStats(storyKey, consumers).catch((err) => logger$6.warn({
|
|
11435
|
+
err,
|
|
11436
|
+
storyKey
|
|
11437
|
+
}, "Failed to store consumer stats")) : Promise.resolve(),
|
|
11438
|
+
this._persistence.storeEfficiencyScore(efficiencyScore).catch((err) => logger$6.warn({
|
|
11439
|
+
err,
|
|
11440
|
+
storyKey
|
|
11441
|
+
}, "Failed to store efficiency score")),
|
|
11442
|
+
recommendations.length > 0 ? this._persistence.saveRecommendations(storyKey, recommendations).catch((err) => logger$6.warn({
|
|
11443
|
+
err,
|
|
11444
|
+
storyKey
|
|
11445
|
+
}, "Failed to save recommendations")) : Promise.resolve()
|
|
11446
|
+
]);
|
|
11447
|
+
logger$6.info({
|
|
11448
|
+
storyKey,
|
|
11449
|
+
turns: turns.length,
|
|
11450
|
+
compositeScore: efficiencyScore.compositeScore,
|
|
11451
|
+
recommendations: recommendations.length
|
|
11452
|
+
}, "TelemetryPipeline: story analysis complete");
|
|
11453
|
+
}
|
|
11454
|
+
};
|
|
11455
|
+
|
|
11456
|
+
//#endregion
|
|
11457
|
+
//#region src/modules/implementation-orchestrator/orchestrator-impl.ts
|
|
11458
|
+
function createPauseGate() {
|
|
11459
|
+
let resolve$2;
|
|
11460
|
+
const promise = new Promise((res) => {
|
|
11461
|
+
resolve$2 = res;
|
|
11462
|
+
});
|
|
11463
|
+
return {
|
|
11464
|
+
promise,
|
|
11465
|
+
resolve: resolve$2
|
|
11466
|
+
};
|
|
11467
|
+
}
|
|
11468
|
+
/**
|
|
11469
|
+
* Build the targeted_files content string from a code-review issue list.
|
|
11470
|
+
* Deduplicates file paths and includes line numbers where available.
|
|
11471
|
+
* Returns empty string when no issues have file references.
|
|
11472
|
+
*/
|
|
11473
|
+
function buildTargetedFilesContent(issueList) {
|
|
11474
|
+
const seen = new Map();
|
|
10116
11475
|
for (const issue of issueList) {
|
|
10117
11476
|
const iss = issue;
|
|
10118
11477
|
if (!iss.file) continue;
|
|
@@ -10136,7 +11495,7 @@ function buildTargetedFilesContent(issueList) {
|
|
|
10136
11495
|
*/
|
|
10137
11496
|
function createImplementationOrchestrator(deps) {
|
|
10138
11497
|
const { db, pack, contextCompiler, dispatcher, eventBus, config, projectRoot, tokenCeilings, stateStore, telemetryPersistence, ingestionServer } = deps;
|
|
10139
|
-
const logger$
|
|
11498
|
+
const logger$27 = createLogger("implementation-orchestrator");
|
|
10140
11499
|
let _state = "IDLE";
|
|
10141
11500
|
let _startedAt;
|
|
10142
11501
|
let _completedAt;
|
|
@@ -10182,7 +11541,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10182
11541
|
const nowMs = Date.now();
|
|
10183
11542
|
for (const [phase, startMs] of starts) {
|
|
10184
11543
|
const endMs = ends?.get(phase);
|
|
10185
|
-
if (endMs === void 0) logger$
|
|
11544
|
+
if (endMs === void 0) logger$27.warn({
|
|
10186
11545
|
storyKey,
|
|
10187
11546
|
phase
|
|
10188
11547
|
}, "Phase has no end time — story may have errored mid-phase. Duration capped to now() and may be inflated.");
|
|
@@ -10229,7 +11588,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10229
11588
|
recordedAt: completedAt,
|
|
10230
11589
|
timestamp: completedAt
|
|
10231
11590
|
}).catch((storeErr) => {
|
|
10232
|
-
logger$
|
|
11591
|
+
logger$27.warn({
|
|
10233
11592
|
err: storeErr,
|
|
10234
11593
|
storyKey
|
|
10235
11594
|
}, "Failed to record metric to StateStore (best-effort)");
|
|
@@ -10251,7 +11610,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10251
11610
|
rationale: `Story ${storyKey} completed with result=${result} in ${wallClockSeconds}s. Tokens: ${tokenAgg.input}+${tokenAgg.output}. Review cycles: ${reviewCycles}.`
|
|
10252
11611
|
});
|
|
10253
11612
|
} catch (decisionErr) {
|
|
10254
|
-
logger$
|
|
11613
|
+
logger$27.warn({
|
|
10255
11614
|
err: decisionErr,
|
|
10256
11615
|
storyKey
|
|
10257
11616
|
}, "Failed to write story-metrics decision (best-effort)");
|
|
@@ -10279,13 +11638,13 @@ function createImplementationOrchestrator(deps) {
|
|
|
10279
11638
|
dispatches: _storyDispatches.get(storyKey) ?? 0
|
|
10280
11639
|
});
|
|
10281
11640
|
} catch (emitErr) {
|
|
10282
|
-
logger$
|
|
11641
|
+
logger$27.warn({
|
|
10283
11642
|
err: emitErr,
|
|
10284
11643
|
storyKey
|
|
10285
11644
|
}, "Failed to emit story:metrics event (best-effort)");
|
|
10286
11645
|
}
|
|
10287
11646
|
} catch (err) {
|
|
10288
|
-
logger$
|
|
11647
|
+
logger$27.warn({
|
|
10289
11648
|
err,
|
|
10290
11649
|
storyKey
|
|
10291
11650
|
}, "Failed to write story metrics (best-effort)");
|
|
@@ -10314,7 +11673,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10314
11673
|
rationale: `Story ${storyKey} ${outcome} after ${reviewCycles} review cycle(s).`
|
|
10315
11674
|
});
|
|
10316
11675
|
} catch (err) {
|
|
10317
|
-
logger$
|
|
11676
|
+
logger$27.warn({
|
|
10318
11677
|
err,
|
|
10319
11678
|
storyKey
|
|
10320
11679
|
}, "Failed to write story-outcome decision (best-effort)");
|
|
@@ -10340,7 +11699,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10340
11699
|
rationale: `Escalation diagnosis for ${payload.storyKey}: ${diagnosis.recommendedAction} — ${diagnosis.rationale}`
|
|
10341
11700
|
});
|
|
10342
11701
|
} catch (err) {
|
|
10343
|
-
logger$
|
|
11702
|
+
logger$27.warn({
|
|
10344
11703
|
err,
|
|
10345
11704
|
storyKey: payload.storyKey
|
|
10346
11705
|
}, "Failed to persist escalation diagnosis (best-effort)");
|
|
@@ -10389,7 +11748,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10389
11748
|
const existing = _stories.get(storyKey);
|
|
10390
11749
|
if (existing !== void 0) {
|
|
10391
11750
|
Object.assign(existing, updates);
|
|
10392
|
-
persistStoryState(storyKey, existing).catch((err) => logger$
|
|
11751
|
+
persistStoryState(storyKey, existing).catch((err) => logger$27.warn({
|
|
10393
11752
|
err,
|
|
10394
11753
|
storyKey
|
|
10395
11754
|
}, "StateStore write failed after updateStory"));
|
|
@@ -10398,12 +11757,12 @@ function createImplementationOrchestrator(deps) {
|
|
|
10398
11757
|
storyKey,
|
|
10399
11758
|
conflict: err
|
|
10400
11759
|
});
|
|
10401
|
-
else logger$
|
|
11760
|
+
else logger$27.warn({
|
|
10402
11761
|
err,
|
|
10403
11762
|
storyKey
|
|
10404
11763
|
}, "mergeStory failed");
|
|
10405
11764
|
});
|
|
10406
|
-
else if (updates.phase === "ESCALATED") stateStore?.rollbackStory(storyKey).catch((err) => logger$
|
|
11765
|
+
else if (updates.phase === "ESCALATED") stateStore?.rollbackStory(storyKey).catch((err) => logger$27.warn({
|
|
10407
11766
|
err,
|
|
10408
11767
|
storyKey
|
|
10409
11768
|
}, "rollbackStory failed — branch may persist"));
|
|
@@ -10430,7 +11789,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10430
11789
|
};
|
|
10431
11790
|
await stateStore.setStoryState(storyKey, record);
|
|
10432
11791
|
} catch (err) {
|
|
10433
|
-
logger$
|
|
11792
|
+
logger$27.warn({
|
|
10434
11793
|
err,
|
|
10435
11794
|
storyKey
|
|
10436
11795
|
}, "StateStore.setStoryState failed (best-effort)");
|
|
@@ -10446,7 +11805,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10446
11805
|
token_usage_json: serialized
|
|
10447
11806
|
});
|
|
10448
11807
|
} catch (err) {
|
|
10449
|
-
logger$
|
|
11808
|
+
logger$27.warn({ err }, "Failed to persist orchestrator state");
|
|
10450
11809
|
}
|
|
10451
11810
|
}
|
|
10452
11811
|
function recordProgress() {
|
|
@@ -10493,7 +11852,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10493
11852
|
}
|
|
10494
11853
|
if (childActive) {
|
|
10495
11854
|
_lastProgressTs = Date.now();
|
|
10496
|
-
logger$
|
|
11855
|
+
logger$27.debug({
|
|
10497
11856
|
storyKey: key,
|
|
10498
11857
|
phase: s.phase,
|
|
10499
11858
|
childPids
|
|
@@ -10502,7 +11861,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10502
11861
|
}
|
|
10503
11862
|
_stalledStories.add(key);
|
|
10504
11863
|
_storiesWithStall.add(key);
|
|
10505
|
-
logger$
|
|
11864
|
+
logger$27.warn({
|
|
10506
11865
|
storyKey: key,
|
|
10507
11866
|
phase: s.phase,
|
|
10508
11867
|
elapsedMs: elapsed,
|
|
@@ -10547,7 +11906,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10547
11906
|
for (let attempt = 0; attempt < MEMORY_PRESSURE_BACKOFF_MS.length; attempt++) {
|
|
10548
11907
|
const memState = dispatcher.getMemoryState();
|
|
10549
11908
|
if (!memState.isPressured) return true;
|
|
10550
|
-
logger$
|
|
11909
|
+
logger$27.warn({
|
|
10551
11910
|
storyKey,
|
|
10552
11911
|
freeMB: memState.freeMB,
|
|
10553
11912
|
thresholdMB: memState.thresholdMB,
|
|
@@ -10567,11 +11926,11 @@ function createImplementationOrchestrator(deps) {
|
|
|
10567
11926
|
* exhausted retries the story is ESCALATED.
|
|
10568
11927
|
*/
|
|
10569
11928
|
async function processStory(storyKey) {
|
|
10570
|
-
logger$
|
|
11929
|
+
logger$27.info({ storyKey }, "Processing story");
|
|
10571
11930
|
{
|
|
10572
11931
|
const memoryOk = await checkMemoryPressure(storyKey);
|
|
10573
11932
|
if (!memoryOk) {
|
|
10574
|
-
logger$
|
|
11933
|
+
logger$27.warn({ storyKey }, "Memory pressure exhausted — escalating story without dispatch");
|
|
10575
11934
|
const memPressureState = {
|
|
10576
11935
|
phase: "ESCALATED",
|
|
10577
11936
|
reviewCycles: 0,
|
|
@@ -10580,7 +11939,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10580
11939
|
completedAt: new Date().toISOString()
|
|
10581
11940
|
};
|
|
10582
11941
|
_stories.set(storyKey, memPressureState);
|
|
10583
|
-
persistStoryState(storyKey, memPressureState).catch((err) => logger$
|
|
11942
|
+
persistStoryState(storyKey, memPressureState).catch((err) => logger$27.warn({
|
|
10584
11943
|
err,
|
|
10585
11944
|
storyKey
|
|
10586
11945
|
}, "StateStore write failed after memory-pressure escalation"));
|
|
@@ -10597,7 +11956,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10597
11956
|
}
|
|
10598
11957
|
await waitIfPaused();
|
|
10599
11958
|
if (_state !== "RUNNING") return;
|
|
10600
|
-
stateStore?.branchForStory(storyKey).catch((err) => logger$
|
|
11959
|
+
stateStore?.branchForStory(storyKey).catch((err) => logger$27.warn({
|
|
10601
11960
|
err,
|
|
10602
11961
|
storyKey
|
|
10603
11962
|
}, "branchForStory failed — continuing without branch isolation"));
|
|
@@ -10614,14 +11973,14 @@ function createImplementationOrchestrator(deps) {
|
|
|
10614
11973
|
if (match) {
|
|
10615
11974
|
const candidatePath = join$1(artifactsDir, match);
|
|
10616
11975
|
const validation = await isValidStoryFile(candidatePath);
|
|
10617
|
-
if (!validation.valid) logger$
|
|
11976
|
+
if (!validation.valid) logger$27.warn({
|
|
10618
11977
|
storyKey,
|
|
10619
11978
|
storyFilePath: candidatePath,
|
|
10620
11979
|
reason: validation.reason
|
|
10621
11980
|
}, `Existing story file for ${storyKey} is invalid (${validation.reason}) — re-creating`);
|
|
10622
11981
|
else {
|
|
10623
11982
|
storyFilePath = candidatePath;
|
|
10624
|
-
logger$
|
|
11983
|
+
logger$27.info({
|
|
10625
11984
|
storyKey,
|
|
10626
11985
|
storyFilePath
|
|
10627
11986
|
}, "Found existing story file — skipping create-story");
|
|
@@ -10660,6 +12019,21 @@ function createImplementationOrchestrator(deps) {
|
|
|
10660
12019
|
phase: "IN_STORY_CREATION",
|
|
10661
12020
|
result: createResult
|
|
10662
12021
|
});
|
|
12022
|
+
if (config.pipelineRunId !== void 0 && createResult.tokenUsage !== void 0) try {
|
|
12023
|
+
addTokenUsage(db, config.pipelineRunId, {
|
|
12024
|
+
phase: "create-story",
|
|
12025
|
+
agent: "create-story",
|
|
12026
|
+
input_tokens: createResult.tokenUsage.input,
|
|
12027
|
+
output_tokens: createResult.tokenUsage.output,
|
|
12028
|
+
cost_usd: 0,
|
|
12029
|
+
metadata: JSON.stringify({ storyKey })
|
|
12030
|
+
});
|
|
12031
|
+
} catch (tokenErr) {
|
|
12032
|
+
logger$27.warn({
|
|
12033
|
+
storyKey,
|
|
12034
|
+
err: tokenErr
|
|
12035
|
+
}, "Failed to record create-story token usage");
|
|
12036
|
+
}
|
|
10663
12037
|
persistState();
|
|
10664
12038
|
if (createResult.result === "failed") {
|
|
10665
12039
|
const errMsg = createResult.error ?? "create-story failed";
|
|
@@ -10739,14 +12113,14 @@ function createImplementationOrchestrator(deps) {
|
|
|
10739
12113
|
...contract.transport !== void 0 ? { transport: contract.transport } : {}
|
|
10740
12114
|
})
|
|
10741
12115
|
});
|
|
10742
|
-
logger$
|
|
12116
|
+
logger$27.info({
|
|
10743
12117
|
storyKey,
|
|
10744
12118
|
contractCount: contracts.length,
|
|
10745
12119
|
contracts
|
|
10746
12120
|
}, "Stored interface contract declarations");
|
|
10747
12121
|
}
|
|
10748
12122
|
} catch (err) {
|
|
10749
|
-
logger$
|
|
12123
|
+
logger$27.warn({
|
|
10750
12124
|
storyKey,
|
|
10751
12125
|
error: err instanceof Error ? err.message : String(err)
|
|
10752
12126
|
}, "Failed to parse interface contracts — continuing without contract declarations");
|
|
@@ -10757,6 +12131,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10757
12131
|
updateStory(storyKey, { phase: "IN_TEST_PLANNING" });
|
|
10758
12132
|
persistState();
|
|
10759
12133
|
let testPlanPhaseResult = "failed";
|
|
12134
|
+
let testPlanTokenUsage;
|
|
10760
12135
|
try {
|
|
10761
12136
|
const testPlanResult = await runTestPlan({
|
|
10762
12137
|
db,
|
|
@@ -10772,15 +12147,31 @@ function createImplementationOrchestrator(deps) {
|
|
|
10772
12147
|
pipelineRunId: config.pipelineRunId ?? ""
|
|
10773
12148
|
});
|
|
10774
12149
|
testPlanPhaseResult = testPlanResult.result;
|
|
10775
|
-
|
|
10776
|
-
|
|
12150
|
+
testPlanTokenUsage = testPlanResult.tokenUsage;
|
|
12151
|
+
if (testPlanResult.result === "success") logger$27.info({ storyKey }, "Test plan generated successfully");
|
|
12152
|
+
else logger$27.warn({ storyKey }, "Test planning returned failed result — proceeding to dev-story without test plan");
|
|
10777
12153
|
} catch (err) {
|
|
10778
|
-
logger$
|
|
12154
|
+
logger$27.warn({
|
|
10779
12155
|
storyKey,
|
|
10780
12156
|
err
|
|
10781
12157
|
}, "Test planning failed — proceeding to dev-story without test plan");
|
|
10782
12158
|
}
|
|
10783
12159
|
endPhase(storyKey, "test-plan");
|
|
12160
|
+
if (config.pipelineRunId !== void 0 && testPlanTokenUsage !== void 0) try {
|
|
12161
|
+
addTokenUsage(db, config.pipelineRunId, {
|
|
12162
|
+
phase: "test-plan",
|
|
12163
|
+
agent: "test-plan",
|
|
12164
|
+
input_tokens: testPlanTokenUsage.input,
|
|
12165
|
+
output_tokens: testPlanTokenUsage.output,
|
|
12166
|
+
cost_usd: 0,
|
|
12167
|
+
metadata: JSON.stringify({ storyKey })
|
|
12168
|
+
});
|
|
12169
|
+
} catch (tokenErr) {
|
|
12170
|
+
logger$27.warn({
|
|
12171
|
+
storyKey,
|
|
12172
|
+
err: tokenErr
|
|
12173
|
+
}, "Failed to record test-plan token usage");
|
|
12174
|
+
}
|
|
10784
12175
|
eventBus.emit("orchestrator:story-phase-complete", {
|
|
10785
12176
|
storyKey,
|
|
10786
12177
|
phase: "IN_TEST_PLANNING",
|
|
@@ -10799,7 +12190,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10799
12190
|
try {
|
|
10800
12191
|
storyContentForAnalysis = await readFile$1(storyFilePath ?? "", "utf-8");
|
|
10801
12192
|
} catch (err) {
|
|
10802
|
-
logger$
|
|
12193
|
+
logger$27.error({
|
|
10803
12194
|
storyKey,
|
|
10804
12195
|
storyFilePath,
|
|
10805
12196
|
error: err instanceof Error ? err.message : String(err)
|
|
@@ -10807,7 +12198,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10807
12198
|
}
|
|
10808
12199
|
const analysis = analyzeStoryComplexity(storyContentForAnalysis);
|
|
10809
12200
|
const batches = planTaskBatches(analysis);
|
|
10810
|
-
logger$
|
|
12201
|
+
logger$27.info({
|
|
10811
12202
|
storyKey,
|
|
10812
12203
|
estimatedScope: analysis.estimatedScope,
|
|
10813
12204
|
batchCount: batches.length,
|
|
@@ -10825,7 +12216,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10825
12216
|
if (_state !== "RUNNING") break;
|
|
10826
12217
|
const taskScope = batch.taskIds.map((id, i) => `T${id}: ${batch.taskTitles[i] ?? ""}`).join("\n");
|
|
10827
12218
|
const priorFiles = allFilesModified.size > 0 ? Array.from(allFilesModified) : void 0;
|
|
10828
|
-
logger$
|
|
12219
|
+
logger$27.info({
|
|
10829
12220
|
storyKey,
|
|
10830
12221
|
batchIndex: batch.batchIndex,
|
|
10831
12222
|
taskCount: batch.taskIds.length
|
|
@@ -10851,7 +12242,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10851
12242
|
});
|
|
10852
12243
|
} catch (batchErr) {
|
|
10853
12244
|
const errMsg = batchErr instanceof Error ? batchErr.message : String(batchErr);
|
|
10854
|
-
logger$
|
|
12245
|
+
logger$27.warn({
|
|
10855
12246
|
storyKey,
|
|
10856
12247
|
batchIndex: batch.batchIndex,
|
|
10857
12248
|
error: errMsg
|
|
@@ -10871,7 +12262,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10871
12262
|
filesModified: batchFilesModified,
|
|
10872
12263
|
result: batchResult.result === "success" ? "success" : "failed"
|
|
10873
12264
|
};
|
|
10874
|
-
logger$
|
|
12265
|
+
logger$27.info(batchMetrics, "Batch dev-story metrics");
|
|
10875
12266
|
for (const f of batchFilesModified) allFilesModified.add(f);
|
|
10876
12267
|
if (batchFilesModified.length > 0) batchFileGroups.push({
|
|
10877
12268
|
batchIndex: batch.batchIndex,
|
|
@@ -10893,13 +12284,13 @@ function createImplementationOrchestrator(deps) {
|
|
|
10893
12284
|
})
|
|
10894
12285
|
});
|
|
10895
12286
|
} catch (tokenErr) {
|
|
10896
|
-
logger$
|
|
12287
|
+
logger$27.warn({
|
|
10897
12288
|
storyKey,
|
|
10898
12289
|
batchIndex: batch.batchIndex,
|
|
10899
12290
|
err: tokenErr
|
|
10900
12291
|
}, "Failed to record batch token usage");
|
|
10901
12292
|
}
|
|
10902
|
-
if (batchResult.result === "failed") logger$
|
|
12293
|
+
if (batchResult.result === "failed") logger$27.warn({
|
|
10903
12294
|
storyKey,
|
|
10904
12295
|
batchIndex: batch.batchIndex,
|
|
10905
12296
|
error: batchResult.error
|
|
@@ -10929,6 +12320,21 @@ function createImplementationOrchestrator(deps) {
|
|
|
10929
12320
|
pipelineRunId: config.pipelineRunId
|
|
10930
12321
|
});
|
|
10931
12322
|
devFilesModified = devResult.files_modified ?? [];
|
|
12323
|
+
if (config.pipelineRunId !== void 0 && devResult.tokenUsage !== void 0) try {
|
|
12324
|
+
addTokenUsage(db, config.pipelineRunId, {
|
|
12325
|
+
phase: "dev-story",
|
|
12326
|
+
agent: "dev-story",
|
|
12327
|
+
input_tokens: devResult.tokenUsage.input,
|
|
12328
|
+
output_tokens: devResult.tokenUsage.output,
|
|
12329
|
+
cost_usd: 0,
|
|
12330
|
+
metadata: JSON.stringify({ storyKey })
|
|
12331
|
+
});
|
|
12332
|
+
} catch (tokenErr) {
|
|
12333
|
+
logger$27.warn({
|
|
12334
|
+
storyKey,
|
|
12335
|
+
err: tokenErr
|
|
12336
|
+
}, "Failed to record dev-story token usage");
|
|
12337
|
+
}
|
|
10932
12338
|
eventBus.emit("orchestrator:story-phase-complete", {
|
|
10933
12339
|
storyKey,
|
|
10934
12340
|
phase: "IN_DEV",
|
|
@@ -10936,7 +12342,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10936
12342
|
});
|
|
10937
12343
|
persistState();
|
|
10938
12344
|
if (devResult.result === "success") devStoryWasSuccess = true;
|
|
10939
|
-
else logger$
|
|
12345
|
+
else logger$27.warn({
|
|
10940
12346
|
storyKey,
|
|
10941
12347
|
error: devResult.error,
|
|
10942
12348
|
filesModified: devFilesModified.length
|
|
@@ -10964,7 +12370,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10964
12370
|
if (devStoryWasSuccess) {
|
|
10965
12371
|
gitDiffFiles = checkGitDiffFiles(projectRoot ?? process.cwd());
|
|
10966
12372
|
if (gitDiffFiles.length === 0) {
|
|
10967
|
-
logger$
|
|
12373
|
+
logger$27.warn({ storyKey }, "Zero-diff detected after COMPLETE dev-story — no file changes in git working tree");
|
|
10968
12374
|
eventBus.emit("orchestrator:zero-diff-escalation", {
|
|
10969
12375
|
storyKey,
|
|
10970
12376
|
reason: "zero-diff-on-complete"
|
|
@@ -10995,7 +12401,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10995
12401
|
});
|
|
10996
12402
|
if (buildVerifyResult.status === "passed") {
|
|
10997
12403
|
eventBus.emit("story:build-verification-passed", { storyKey });
|
|
10998
|
-
logger$
|
|
12404
|
+
logger$27.info({ storyKey }, "Build verification passed");
|
|
10999
12405
|
} else if (buildVerifyResult.status === "failed" || buildVerifyResult.status === "timeout") {
|
|
11000
12406
|
const truncatedOutput = (buildVerifyResult.output ?? "").slice(0, 2e3);
|
|
11001
12407
|
const reason = buildVerifyResult.reason ?? "build-verification-failed";
|
|
@@ -11004,7 +12410,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11004
12410
|
exitCode: buildVerifyResult.exitCode ?? 1,
|
|
11005
12411
|
output: truncatedOutput
|
|
11006
12412
|
});
|
|
11007
|
-
logger$
|
|
12413
|
+
logger$27.warn({
|
|
11008
12414
|
storyKey,
|
|
11009
12415
|
reason,
|
|
11010
12416
|
exitCode: buildVerifyResult.exitCode
|
|
@@ -11034,7 +12440,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11034
12440
|
storyKey
|
|
11035
12441
|
});
|
|
11036
12442
|
if (icResult.potentiallyAffectedTests.length > 0) {
|
|
11037
|
-
logger$
|
|
12443
|
+
logger$27.warn({
|
|
11038
12444
|
storyKey,
|
|
11039
12445
|
modifiedInterfaces: icResult.modifiedInterfaces,
|
|
11040
12446
|
potentiallyAffectedTests: icResult.potentiallyAffectedTests
|
|
@@ -11080,7 +12486,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11080
12486
|
"NEEDS_MAJOR_REWORK": 2
|
|
11081
12487
|
};
|
|
11082
12488
|
for (const group of batchFileGroups) {
|
|
11083
|
-
logger$
|
|
12489
|
+
logger$27.info({
|
|
11084
12490
|
storyKey,
|
|
11085
12491
|
batchIndex: group.batchIndex,
|
|
11086
12492
|
fileCount: group.files.length
|
|
@@ -11119,7 +12525,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11119
12525
|
rawOutput: lastRawOutput,
|
|
11120
12526
|
tokenUsage: aggregateTokens
|
|
11121
12527
|
};
|
|
11122
|
-
logger$
|
|
12528
|
+
logger$27.info({
|
|
11123
12529
|
storyKey,
|
|
11124
12530
|
batchCount: batchFileGroups.length,
|
|
11125
12531
|
verdict: worstVerdict,
|
|
@@ -11144,10 +12550,28 @@ function createImplementationOrchestrator(deps) {
|
|
|
11144
12550
|
...previousIssueList.length > 0 ? { previousIssues: previousIssueList } : {}
|
|
11145
12551
|
});
|
|
11146
12552
|
}
|
|
12553
|
+
if (config.pipelineRunId !== void 0 && reviewResult.tokenUsage !== void 0) try {
|
|
12554
|
+
addTokenUsage(db, config.pipelineRunId, {
|
|
12555
|
+
phase: "code-review",
|
|
12556
|
+
agent: useBatchedReview ? "code-review-batched" : "code-review",
|
|
12557
|
+
input_tokens: reviewResult.tokenUsage.input,
|
|
12558
|
+
output_tokens: reviewResult.tokenUsage.output,
|
|
12559
|
+
cost_usd: 0,
|
|
12560
|
+
metadata: JSON.stringify({
|
|
12561
|
+
storyKey,
|
|
12562
|
+
reviewCycle: reviewCycles
|
|
12563
|
+
})
|
|
12564
|
+
});
|
|
12565
|
+
} catch (tokenErr) {
|
|
12566
|
+
logger$27.warn({
|
|
12567
|
+
storyKey,
|
|
12568
|
+
err: tokenErr
|
|
12569
|
+
}, "Failed to record code-review token usage");
|
|
12570
|
+
}
|
|
11147
12571
|
const isPhantomReview = reviewResult.dispatchFailed === true || reviewResult.verdict !== "SHIP_IT" && reviewResult.verdict !== "LGTM_WITH_NOTES" && (reviewResult.issue_list === void 0 || reviewResult.issue_list.length === 0) && reviewResult.error !== void 0;
|
|
11148
12572
|
if (isPhantomReview && !timeoutRetried) {
|
|
11149
12573
|
timeoutRetried = true;
|
|
11150
|
-
logger$
|
|
12574
|
+
logger$27.warn({
|
|
11151
12575
|
storyKey,
|
|
11152
12576
|
reviewCycles,
|
|
11153
12577
|
error: reviewResult.error
|
|
@@ -11157,7 +12581,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11157
12581
|
verdict = reviewResult.verdict;
|
|
11158
12582
|
issueList = reviewResult.issue_list ?? [];
|
|
11159
12583
|
if (verdict === "NEEDS_MAJOR_REWORK" && reviewCycles > 0 && previousIssueList.length > 0 && issueList.length < previousIssueList.length) {
|
|
11160
|
-
logger$
|
|
12584
|
+
logger$27.info({
|
|
11161
12585
|
storyKey,
|
|
11162
12586
|
originalVerdict: verdict,
|
|
11163
12587
|
issuesBefore: previousIssueList.length,
|
|
@@ -11193,7 +12617,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11193
12617
|
if (_decomposition !== void 0) parts.push(`decomposed: ${_decomposition.batchCount} batches`);
|
|
11194
12618
|
parts.push(`${fileCount} files`);
|
|
11195
12619
|
parts.push(`${totalTokensK} tokens`);
|
|
11196
|
-
logger$
|
|
12620
|
+
logger$27.info({
|
|
11197
12621
|
storyKey,
|
|
11198
12622
|
verdict,
|
|
11199
12623
|
agentVerdict: reviewResult.agentVerdict
|
|
@@ -11242,9 +12666,9 @@ function createImplementationOrchestrator(deps) {
|
|
|
11242
12666
|
}),
|
|
11243
12667
|
rationale: `Advisory notes from LGTM_WITH_NOTES review of ${storyKey}`
|
|
11244
12668
|
});
|
|
11245
|
-
logger$
|
|
12669
|
+
logger$27.info({ storyKey }, "Advisory notes persisted to decision store");
|
|
11246
12670
|
} catch (advisoryErr) {
|
|
11247
|
-
logger$
|
|
12671
|
+
logger$27.warn({
|
|
11248
12672
|
storyKey,
|
|
11249
12673
|
error: advisoryErr instanceof Error ? advisoryErr.message : String(advisoryErr)
|
|
11250
12674
|
}, "Failed to persist advisory notes (best-effort)");
|
|
@@ -11252,17 +12676,17 @@ function createImplementationOrchestrator(deps) {
|
|
|
11252
12676
|
if (telemetryPersistence !== void 0) try {
|
|
11253
12677
|
const turns = await telemetryPersistence.getTurnAnalysis(storyKey);
|
|
11254
12678
|
if (turns.length > 0) {
|
|
11255
|
-
const scorer = new EfficiencyScorer(logger$
|
|
12679
|
+
const scorer = new EfficiencyScorer(logger$27);
|
|
11256
12680
|
const effScore = scorer.score(storyKey, turns);
|
|
11257
12681
|
await telemetryPersistence.storeEfficiencyScore(effScore);
|
|
11258
|
-
logger$
|
|
12682
|
+
logger$27.info({
|
|
11259
12683
|
storyKey,
|
|
11260
12684
|
compositeScore: effScore.compositeScore,
|
|
11261
12685
|
modelCount: effScore.perModelBreakdown.length
|
|
11262
12686
|
}, "Efficiency score computed and persisted");
|
|
11263
|
-
} else logger$
|
|
12687
|
+
} else logger$27.debug({ storyKey }, "No turn analysis data available — skipping efficiency scoring");
|
|
11264
12688
|
} catch (effErr) {
|
|
11265
|
-
logger$
|
|
12689
|
+
logger$27.warn({
|
|
11266
12690
|
storyKey,
|
|
11267
12691
|
error: effErr instanceof Error ? effErr.message : String(effErr)
|
|
11268
12692
|
}, "Efficiency scoring failed — story verdict unchanged");
|
|
@@ -11270,10 +12694,10 @@ function createImplementationOrchestrator(deps) {
|
|
|
11270
12694
|
if (telemetryPersistence !== void 0) try {
|
|
11271
12695
|
const turns = await telemetryPersistence.getTurnAnalysis(storyKey);
|
|
11272
12696
|
const spans = [];
|
|
11273
|
-
if (spans.length === 0) logger$
|
|
12697
|
+
if (spans.length === 0) logger$27.debug({ storyKey }, "No spans for telemetry categorization — skipping");
|
|
11274
12698
|
else {
|
|
11275
|
-
const categorizer = new Categorizer(logger$
|
|
11276
|
-
const consumerAnalyzer = new ConsumerAnalyzer(categorizer, logger$
|
|
12699
|
+
const categorizer = new Categorizer(logger$27);
|
|
12700
|
+
const consumerAnalyzer = new ConsumerAnalyzer(categorizer, logger$27);
|
|
11277
12701
|
const categoryStats = categorizer.computeCategoryStats(spans, turns);
|
|
11278
12702
|
const consumerStats = consumerAnalyzer.analyze(spans);
|
|
11279
12703
|
await telemetryPersistence.storeCategoryStats(storyKey, categoryStats);
|
|
@@ -11281,7 +12705,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11281
12705
|
const growingCount = categoryStats.filter((c) => c.trend === "growing").length;
|
|
11282
12706
|
const topCategory = categoryStats[0]?.category ?? "none";
|
|
11283
12707
|
const topConsumer = consumerStats[0]?.consumerKey ?? "none";
|
|
11284
|
-
logger$
|
|
12708
|
+
logger$27.info({
|
|
11285
12709
|
storyKey,
|
|
11286
12710
|
topCategory,
|
|
11287
12711
|
topConsumer,
|
|
@@ -11289,7 +12713,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11289
12713
|
}, "Semantic categorization and consumer analysis complete");
|
|
11290
12714
|
}
|
|
11291
12715
|
} catch (catErr) {
|
|
11292
|
-
logger$
|
|
12716
|
+
logger$27.warn({
|
|
11293
12717
|
storyKey,
|
|
11294
12718
|
error: catErr instanceof Error ? catErr.message : String(catErr)
|
|
11295
12719
|
}, "Semantic categorization failed — story verdict unchanged");
|
|
@@ -11310,7 +12734,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11310
12734
|
filesModified: devFilesModified,
|
|
11311
12735
|
workingDirectory: projectRoot
|
|
11312
12736
|
});
|
|
11313
|
-
logger$
|
|
12737
|
+
logger$27.debug({
|
|
11314
12738
|
storyKey,
|
|
11315
12739
|
expansion_priority: expansionResult.expansion_priority,
|
|
11316
12740
|
coverage_gaps: expansionResult.coverage_gaps.length
|
|
@@ -11323,7 +12747,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11323
12747
|
value: JSON.stringify(expansionResult)
|
|
11324
12748
|
});
|
|
11325
12749
|
} catch (expansionErr) {
|
|
11326
|
-
logger$
|
|
12750
|
+
logger$27.warn({
|
|
11327
12751
|
storyKey,
|
|
11328
12752
|
error: expansionErr instanceof Error ? expansionErr.message : String(expansionErr)
|
|
11329
12753
|
}, "Test expansion failed — story verdict unchanged");
|
|
@@ -11350,7 +12774,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11350
12774
|
persistState();
|
|
11351
12775
|
return;
|
|
11352
12776
|
}
|
|
11353
|
-
logger$
|
|
12777
|
+
logger$27.info({
|
|
11354
12778
|
storyKey,
|
|
11355
12779
|
reviewCycles: finalReviewCycles,
|
|
11356
12780
|
issueCount: issueList.length
|
|
@@ -11410,7 +12834,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11410
12834
|
fixPrompt = assembled.prompt;
|
|
11411
12835
|
} catch {
|
|
11412
12836
|
fixPrompt = `Fix story ${storyKey}: verdict=${verdict}, minor fixes needed`;
|
|
11413
|
-
logger$
|
|
12837
|
+
logger$27.warn({ storyKey }, "Failed to assemble auto-approve fix prompt, using fallback");
|
|
11414
12838
|
}
|
|
11415
12839
|
const handle = dispatcher.dispatch({
|
|
11416
12840
|
prompt: fixPrompt,
|
|
@@ -11429,9 +12853,9 @@ function createImplementationOrchestrator(deps) {
|
|
|
11429
12853
|
output: fixResult.tokenEstimate.output
|
|
11430
12854
|
} : void 0 }
|
|
11431
12855
|
});
|
|
11432
|
-
if (fixResult.status === "timeout") logger$
|
|
12856
|
+
if (fixResult.status === "timeout") logger$27.warn({ storyKey }, "Auto-approve fix timed out — approving anyway (issues were minor)");
|
|
11433
12857
|
} catch (err) {
|
|
11434
|
-
logger$
|
|
12858
|
+
logger$27.warn({
|
|
11435
12859
|
storyKey,
|
|
11436
12860
|
err
|
|
11437
12861
|
}, "Auto-approve fix dispatch failed — approving anyway (issues were minor)");
|
|
@@ -11548,7 +12972,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11548
12972
|
fixPrompt = assembled.prompt;
|
|
11549
12973
|
} catch {
|
|
11550
12974
|
fixPrompt = `Fix story ${storyKey}: verdict=${verdict}, taskType=${taskType}`;
|
|
11551
|
-
logger$
|
|
12975
|
+
logger$27.warn({
|
|
11552
12976
|
storyKey,
|
|
11553
12977
|
taskType
|
|
11554
12978
|
}, "Failed to assemble fix prompt, using fallback");
|
|
@@ -11582,7 +13006,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11582
13006
|
} : void 0 }
|
|
11583
13007
|
});
|
|
11584
13008
|
if (fixResult.status === "timeout") {
|
|
11585
|
-
logger$
|
|
13009
|
+
logger$27.warn({
|
|
11586
13010
|
storyKey,
|
|
11587
13011
|
taskType
|
|
11588
13012
|
}, "Fix dispatch timed out — escalating story");
|
|
@@ -11604,7 +13028,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11604
13028
|
}
|
|
11605
13029
|
if (fixResult.status === "failed") {
|
|
11606
13030
|
if (isMajorRework) {
|
|
11607
|
-
logger$
|
|
13031
|
+
logger$27.warn({
|
|
11608
13032
|
storyKey,
|
|
11609
13033
|
exitCode: fixResult.exitCode
|
|
11610
13034
|
}, "Major rework dispatch failed — escalating story");
|
|
@@ -11624,14 +13048,14 @@ function createImplementationOrchestrator(deps) {
|
|
|
11624
13048
|
persistState();
|
|
11625
13049
|
return;
|
|
11626
13050
|
}
|
|
11627
|
-
logger$
|
|
13051
|
+
logger$27.warn({
|
|
11628
13052
|
storyKey,
|
|
11629
13053
|
taskType,
|
|
11630
13054
|
exitCode: fixResult.exitCode
|
|
11631
13055
|
}, "Fix dispatch failed");
|
|
11632
13056
|
}
|
|
11633
13057
|
} catch (err) {
|
|
11634
|
-
logger$
|
|
13058
|
+
logger$27.warn({
|
|
11635
13059
|
storyKey,
|
|
11636
13060
|
taskType,
|
|
11637
13061
|
err
|
|
@@ -11694,11 +13118,11 @@ function createImplementationOrchestrator(deps) {
|
|
|
11694
13118
|
}
|
|
11695
13119
|
async function run(storyKeys) {
|
|
11696
13120
|
if (_state === "RUNNING" || _state === "PAUSED") {
|
|
11697
|
-
logger$
|
|
13121
|
+
logger$27.warn({ state: _state }, "run() called while orchestrator is already running or paused — ignoring");
|
|
11698
13122
|
return getStatus();
|
|
11699
13123
|
}
|
|
11700
13124
|
if (_state === "COMPLETE") {
|
|
11701
|
-
logger$
|
|
13125
|
+
logger$27.warn({ state: _state }, "run() called on a COMPLETE orchestrator — ignoring");
|
|
11702
13126
|
return getStatus();
|
|
11703
13127
|
}
|
|
11704
13128
|
_state = "RUNNING";
|
|
@@ -11717,40 +13141,66 @@ function createImplementationOrchestrator(deps) {
|
|
|
11717
13141
|
persistState();
|
|
11718
13142
|
recordProgress();
|
|
11719
13143
|
if (config.enableHeartbeat) startHeartbeat();
|
|
13144
|
+
const _startupTimings = {};
|
|
11720
13145
|
if (projectRoot !== void 0) {
|
|
13146
|
+
const seedStart = Date.now();
|
|
11721
13147
|
const seedResult = seedMethodologyContext(db, projectRoot);
|
|
11722
|
-
|
|
13148
|
+
_startupTimings.seedMethodologyMs = Date.now() - seedStart;
|
|
13149
|
+
if (seedResult.decisionsCreated > 0) logger$27.info({
|
|
11723
13150
|
decisionsCreated: seedResult.decisionsCreated,
|
|
11724
|
-
skippedCategories: seedResult.skippedCategories
|
|
13151
|
+
skippedCategories: seedResult.skippedCategories,
|
|
13152
|
+
durationMs: _startupTimings.seedMethodologyMs
|
|
11725
13153
|
}, "Methodology context seeded from planning artifacts");
|
|
11726
13154
|
}
|
|
11727
13155
|
try {
|
|
11728
13156
|
if (stateStore !== void 0) {
|
|
13157
|
+
const stateStoreInitStart = Date.now();
|
|
11729
13158
|
await stateStore.initialize();
|
|
13159
|
+
_startupTimings.stateStoreInitMs = Date.now() - stateStoreInitStart;
|
|
11730
13160
|
for (const key of storyKeys) {
|
|
11731
13161
|
const pendingState = _stories.get(key);
|
|
11732
|
-
if (pendingState !== void 0) persistStoryState(key, pendingState).catch((err) => logger$
|
|
13162
|
+
if (pendingState !== void 0) persistStoryState(key, pendingState).catch((err) => logger$27.warn({
|
|
11733
13163
|
err,
|
|
11734
13164
|
storyKey: key
|
|
11735
13165
|
}, "StateStore write failed during PENDING init"));
|
|
11736
13166
|
}
|
|
11737
13167
|
try {
|
|
13168
|
+
const queryStoriesStart = Date.now();
|
|
11738
13169
|
const existingRecords = await stateStore.queryStories({});
|
|
13170
|
+
_startupTimings.queryStoriesMs = Date.now() - queryStoriesStart;
|
|
11739
13171
|
for (const record of existingRecords) _stateStoreCache.set(record.storyKey, record);
|
|
11740
13172
|
} catch (err) {
|
|
11741
|
-
logger$
|
|
13173
|
+
logger$27.warn({ err }, "StateStore.queryStories() failed during init — status merge will be empty (best-effort)");
|
|
11742
13174
|
}
|
|
11743
13175
|
}
|
|
11744
13176
|
if (ingestionServer !== void 0) {
|
|
11745
|
-
|
|
13177
|
+
if (telemetryPersistence !== void 0) try {
|
|
13178
|
+
const pipelineLogger = logger$27;
|
|
13179
|
+
const telemetryPipeline = new TelemetryPipeline({
|
|
13180
|
+
normalizer: new TelemetryNormalizer(pipelineLogger),
|
|
13181
|
+
turnAnalyzer: new TurnAnalyzer(pipelineLogger),
|
|
13182
|
+
categorizer: new Categorizer(pipelineLogger),
|
|
13183
|
+
consumerAnalyzer: new ConsumerAnalyzer(new Categorizer(pipelineLogger), pipelineLogger),
|
|
13184
|
+
efficiencyScorer: new EfficiencyScorer(pipelineLogger),
|
|
13185
|
+
recommender: new Recommender(pipelineLogger),
|
|
13186
|
+
persistence: telemetryPersistence
|
|
13187
|
+
});
|
|
13188
|
+
ingestionServer.setPipeline(telemetryPipeline);
|
|
13189
|
+
logger$27.info("TelemetryPipeline wired to IngestionServer");
|
|
13190
|
+
} catch (pipelineErr) {
|
|
13191
|
+
logger$27.warn({ err: pipelineErr }, "Failed to create TelemetryPipeline — continuing without analysis pipeline");
|
|
13192
|
+
}
|
|
13193
|
+
await ingestionServer.start().catch((err) => logger$27.warn({ err }, "IngestionServer.start() failed — continuing without telemetry (best-effort)"));
|
|
11746
13194
|
try {
|
|
11747
13195
|
_otlpEndpoint = ingestionServer.getOtlpEnvVars().OTEL_EXPORTER_OTLP_ENDPOINT;
|
|
11748
|
-
logger$
|
|
13196
|
+
logger$27.info({ otlpEndpoint: _otlpEndpoint }, "OTLP telemetry ingestion active");
|
|
11749
13197
|
} catch {}
|
|
11750
13198
|
}
|
|
11751
13199
|
let contractDeclarations = [];
|
|
11752
13200
|
if (stateStore !== void 0) {
|
|
13201
|
+
const queryContractsStart = Date.now();
|
|
11753
13202
|
const allContractRecords = await stateStore.queryContracts();
|
|
13203
|
+
_startupTimings.queryContractsMs = Date.now() - queryContractsStart;
|
|
11754
13204
|
contractDeclarations = allContractRecords.map((r) => ({
|
|
11755
13205
|
storyKey: r.storyKey,
|
|
11756
13206
|
contractName: r.contractName,
|
|
@@ -11780,23 +13230,27 @@ function createImplementationOrchestrator(deps) {
|
|
|
11780
13230
|
}
|
|
11781
13231
|
}).filter((d) => d !== null);
|
|
11782
13232
|
}
|
|
13233
|
+
const conflictDetectStart = Date.now();
|
|
11783
13234
|
const { batches, edges: contractEdges } = detectConflictGroupsWithContracts(storyKeys, { moduleMap: pack.manifest.conflictGroups }, contractDeclarations);
|
|
11784
|
-
|
|
13235
|
+
_startupTimings.conflictDetectMs = Date.now() - conflictDetectStart;
|
|
13236
|
+
if (contractEdges.length > 0) logger$27.info({
|
|
11785
13237
|
contractEdges,
|
|
11786
13238
|
edgeCount: contractEdges.length
|
|
11787
13239
|
}, "Contract dependency edges detected — applying contract-aware dispatch ordering");
|
|
11788
|
-
logger$
|
|
13240
|
+
logger$27.info({
|
|
11789
13241
|
storyCount: storyKeys.length,
|
|
11790
13242
|
groupCount: batches.reduce((sum, b) => sum + b.length, 0),
|
|
11791
13243
|
batchCount: batches.length,
|
|
11792
13244
|
maxConcurrency: config.maxConcurrency
|
|
11793
13245
|
}, "Orchestrator starting");
|
|
11794
13246
|
if (config.skipPreflight !== true) {
|
|
13247
|
+
const preflightStart = Date.now();
|
|
11795
13248
|
const preFlightResult = runBuildVerification({
|
|
11796
13249
|
verifyCommand: pack.manifest.verifyCommand,
|
|
11797
13250
|
verifyTimeoutMs: pack.manifest.verifyTimeoutMs,
|
|
11798
13251
|
projectRoot: projectRoot ?? process.cwd()
|
|
11799
13252
|
});
|
|
13253
|
+
_startupTimings.preflightMs = Date.now() - preflightStart;
|
|
11800
13254
|
if (preFlightResult.status === "failed" || preFlightResult.status === "timeout") {
|
|
11801
13255
|
stopHeartbeat();
|
|
11802
13256
|
const truncatedOutput = (preFlightResult.output ?? "").slice(0, 2e3);
|
|
@@ -11805,7 +13259,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11805
13259
|
exitCode,
|
|
11806
13260
|
output: truncatedOutput
|
|
11807
13261
|
});
|
|
11808
|
-
logger$
|
|
13262
|
+
logger$27.error({
|
|
11809
13263
|
exitCode,
|
|
11810
13264
|
reason: preFlightResult.reason
|
|
11811
13265
|
}, "Pre-flight build check failed — aborting pipeline before any story dispatch");
|
|
@@ -11814,8 +13268,9 @@ function createImplementationOrchestrator(deps) {
|
|
|
11814
13268
|
persistState();
|
|
11815
13269
|
return getStatus();
|
|
11816
13270
|
}
|
|
11817
|
-
if (preFlightResult.status !== "skipped") logger$
|
|
13271
|
+
if (preFlightResult.status !== "skipped") logger$27.info("Pre-flight build check passed");
|
|
11818
13272
|
}
|
|
13273
|
+
logger$27.info(_startupTimings, "Orchestrator startup timings (ms)");
|
|
11819
13274
|
try {
|
|
11820
13275
|
for (const batchGroups of batches) await runWithConcurrency(batchGroups, config.maxConcurrency);
|
|
11821
13276
|
} catch (err) {
|
|
@@ -11823,7 +13278,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11823
13278
|
_state = "FAILED";
|
|
11824
13279
|
_completedAt = new Date().toISOString();
|
|
11825
13280
|
persistState();
|
|
11826
|
-
logger$
|
|
13281
|
+
logger$27.error({ err }, "Orchestrator failed with unhandled error");
|
|
11827
13282
|
return getStatus();
|
|
11828
13283
|
}
|
|
11829
13284
|
stopHeartbeat();
|
|
@@ -11839,11 +13294,11 @@ function createImplementationOrchestrator(deps) {
|
|
|
11839
13294
|
contractName: mismatch.contractName,
|
|
11840
13295
|
mismatchDescription: mismatch.mismatchDescription
|
|
11841
13296
|
});
|
|
11842
|
-
logger$
|
|
13297
|
+
logger$27.warn({
|
|
11843
13298
|
mismatchCount: mismatches.length,
|
|
11844
13299
|
mismatches
|
|
11845
13300
|
}, "Post-sprint contract verification found mismatches — manual review required");
|
|
11846
|
-
} else logger$
|
|
13301
|
+
} else logger$27.info("Post-sprint contract verification passed — all declared contracts satisfied");
|
|
11847
13302
|
if (stateStore !== void 0) try {
|
|
11848
13303
|
const allContractsForVerification = await stateStore.queryContracts();
|
|
11849
13304
|
const verifiedAt = new Date().toISOString();
|
|
@@ -11872,12 +13327,12 @@ function createImplementationOrchestrator(deps) {
|
|
|
11872
13327
|
});
|
|
11873
13328
|
await stateStore.setContractVerification(sk, records);
|
|
11874
13329
|
}
|
|
11875
|
-
logger$
|
|
13330
|
+
logger$27.info({ storyCount: contractsByStory.size }, "Contract verification results persisted to StateStore");
|
|
11876
13331
|
} catch (persistErr) {
|
|
11877
|
-
logger$
|
|
13332
|
+
logger$27.warn({ err: persistErr }, "Failed to persist contract verification results to StateStore");
|
|
11878
13333
|
}
|
|
11879
13334
|
} catch (err) {
|
|
11880
|
-
logger$
|
|
13335
|
+
logger$27.error({ err }, "Post-sprint contract verification threw an error — skipping");
|
|
11881
13336
|
}
|
|
11882
13337
|
let completed = 0;
|
|
11883
13338
|
let escalated = 0;
|
|
@@ -11894,8 +13349,8 @@ function createImplementationOrchestrator(deps) {
|
|
|
11894
13349
|
persistState();
|
|
11895
13350
|
return getStatus();
|
|
11896
13351
|
} finally {
|
|
11897
|
-
if (stateStore !== void 0) await stateStore.close().catch((err) => logger$
|
|
11898
|
-
if (ingestionServer !== void 0) await ingestionServer.stop().catch((err) => logger$
|
|
13352
|
+
if (stateStore !== void 0) await stateStore.close().catch((err) => logger$27.warn({ err }, "StateStore.close() failed (best-effort)"));
|
|
13353
|
+
if (ingestionServer !== void 0) await ingestionServer.stop().catch((err) => logger$27.warn({ err }, "IngestionServer.stop() failed (best-effort)"));
|
|
11899
13354
|
}
|
|
11900
13355
|
}
|
|
11901
13356
|
function pause() {
|
|
@@ -11904,7 +13359,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11904
13359
|
_pauseGate = createPauseGate();
|
|
11905
13360
|
_state = "PAUSED";
|
|
11906
13361
|
eventBus.emit("orchestrator:paused", {});
|
|
11907
|
-
logger$
|
|
13362
|
+
logger$27.info("Orchestrator paused");
|
|
11908
13363
|
}
|
|
11909
13364
|
function resume() {
|
|
11910
13365
|
if (_state !== "PAUSED") return;
|
|
@@ -11915,7 +13370,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11915
13370
|
}
|
|
11916
13371
|
_state = "RUNNING";
|
|
11917
13372
|
eventBus.emit("orchestrator:resumed", {});
|
|
11918
|
-
logger$
|
|
13373
|
+
logger$27.info("Orchestrator resumed");
|
|
11919
13374
|
}
|
|
11920
13375
|
return {
|
|
11921
13376
|
run,
|
|
@@ -12162,14 +13617,17 @@ const PHASE_ARTIFACTS = [
|
|
|
12162
13617
|
* 4. The first required phase WITHOUT an artifact is where we start
|
|
12163
13618
|
* 5. If nothing exists → analysis (needs concept)
|
|
12164
13619
|
*/
|
|
12165
|
-
function detectStartPhase(db, projectRoot) {
|
|
13620
|
+
function detectStartPhase(db, projectRoot, epicNumber) {
|
|
12166
13621
|
try {
|
|
12167
|
-
const storyKeys = resolveStoryKeys(db, projectRoot);
|
|
12168
|
-
if (storyKeys.length > 0)
|
|
12169
|
-
|
|
12170
|
-
|
|
12171
|
-
|
|
12172
|
-
|
|
13622
|
+
const storyKeys = resolveStoryKeys(db, projectRoot, { epicNumber });
|
|
13623
|
+
if (storyKeys.length > 0) {
|
|
13624
|
+
const scopeLabel = epicNumber !== void 0 ? ` (epic ${epicNumber})` : "";
|
|
13625
|
+
return {
|
|
13626
|
+
phase: "implementation",
|
|
13627
|
+
reason: `${storyKeys.length} stories ready for implementation${scopeLabel}`,
|
|
13628
|
+
needsConcept: false
|
|
13629
|
+
};
|
|
13630
|
+
}
|
|
12173
13631
|
} catch {}
|
|
12174
13632
|
let lastCompletedPhase;
|
|
12175
13633
|
try {
|
|
@@ -16221,7 +17679,7 @@ async function runRunAction(options) {
|
|
|
16221
17679
|
try {
|
|
16222
17680
|
detectDb.open();
|
|
16223
17681
|
runMigrations(detectDb.db);
|
|
16224
|
-
const detection = detectStartPhase(detectDb.db, projectRoot);
|
|
17682
|
+
const detection = detectStartPhase(detectDb.db, projectRoot, epicNumber);
|
|
16225
17683
|
if (detection.phase !== "implementation") {
|
|
16226
17684
|
effectiveStartPhase = detection.phase;
|
|
16227
17685
|
if (outputFormat === "human") process.stdout.write(`[AUTO-DETECT] ${detection.reason}\n`);
|
|
@@ -16257,7 +17715,11 @@ async function runRunAction(options) {
|
|
|
16257
17715
|
...skipResearchFlag === true ? { skipResearch: true } : {},
|
|
16258
17716
|
...skipPreflight === true ? { skipPreflight: true } : {},
|
|
16259
17717
|
...epicNumber !== void 0 ? { epic: epicNumber } : {},
|
|
16260
|
-
...injectedRegistry !== void 0 ? { registry: injectedRegistry } : {}
|
|
17718
|
+
...injectedRegistry !== void 0 ? { registry: injectedRegistry } : {},
|
|
17719
|
+
...telemetryEnabled ? {
|
|
17720
|
+
telemetryEnabled: true,
|
|
17721
|
+
telemetryPort
|
|
17722
|
+
} : {}
|
|
16261
17723
|
});
|
|
16262
17724
|
let storyKeys = [...parsedStoryKeys];
|
|
16263
17725
|
if (!existsSync(dbDir)) mkdirSync(dbDir, { recursive: true });
|
|
@@ -16660,6 +18122,7 @@ async function runRunAction(options) {
|
|
|
16660
18122
|
});
|
|
16661
18123
|
}
|
|
16662
18124
|
const ingestionServer = telemetryEnabled ? new IngestionServer({ port: telemetryPort }) : void 0;
|
|
18125
|
+
const telemetryPersistence = telemetryEnabled ? new TelemetryPersistence(db) : void 0;
|
|
16663
18126
|
const orchestrator = createImplementationOrchestrator({
|
|
16664
18127
|
db,
|
|
16665
18128
|
pack,
|
|
@@ -16675,7 +18138,8 @@ async function runRunAction(options) {
|
|
|
16675
18138
|
},
|
|
16676
18139
|
projectRoot,
|
|
16677
18140
|
tokenCeilings,
|
|
16678
|
-
...ingestionServer !== void 0 ? { ingestionServer } : {}
|
|
18141
|
+
...ingestionServer !== void 0 ? { ingestionServer } : {},
|
|
18142
|
+
...telemetryPersistence !== void 0 ? { telemetryPersistence } : {}
|
|
16679
18143
|
});
|
|
16680
18144
|
if (outputFormat === "human" && progressRenderer === void 0 && ndjsonEmitter === void 0) {
|
|
16681
18145
|
process.stdout.write(`Starting pipeline: ${storyKeys.length} story/stories, concurrency=${concurrency}\n`);
|
|
@@ -16700,7 +18164,7 @@ async function runRunAction(options) {
|
|
|
16700
18164
|
writeRunMetrics(db, {
|
|
16701
18165
|
run_id: pipelineRun.id,
|
|
16702
18166
|
methodology: pack.manifest.name,
|
|
16703
|
-
status: failedKeys.length > 0
|
|
18167
|
+
status: failedKeys.length > 0 ? "failed" : escalatedKeys.length > 0 ? "completed_with_escalations" : "completed",
|
|
16704
18168
|
started_at: pipelineRun.created_at ?? "",
|
|
16705
18169
|
completed_at: new Date().toISOString(),
|
|
16706
18170
|
wall_clock_seconds: Math.round((runEndMs - runStartMs) / 1e3),
|
|
@@ -16771,7 +18235,7 @@ async function runRunAction(options) {
|
|
|
16771
18235
|
}
|
|
16772
18236
|
}
|
|
16773
18237
|
async function runFullPipeline(options) {
|
|
16774
|
-
const { packName, packPath, dbDir, dbPath, startPhase, stopAfter, concept, concurrency, outputFormat, projectRoot, events: eventsFlag, skipUx, research: researchFlag, skipResearch: skipResearchFlag, skipPreflight, registry: injectedRegistry, tokenCeilings, stories: explicitStories } = options;
|
|
18238
|
+
const { packName, packPath, dbDir, dbPath, startPhase, stopAfter, concept, concurrency, outputFormat, projectRoot, events: eventsFlag, skipUx, research: researchFlag, skipResearch: skipResearchFlag, skipPreflight, registry: injectedRegistry, tokenCeilings, stories: explicitStories, telemetryEnabled: fullTelemetryEnabled, telemetryPort: fullTelemetryPort } = options;
|
|
16775
18239
|
if (!existsSync(dbDir)) mkdirSync(dbDir, { recursive: true });
|
|
16776
18240
|
const dbWrapper = new DatabaseWrapper(dbPath);
|
|
16777
18241
|
try {
|
|
@@ -16967,6 +18431,8 @@ async function runFullPipeline(options) {
|
|
|
16967
18431
|
process.stdout.write(` Tokens: ${result.tokenUsage.input.toLocaleString()} input / ${result.tokenUsage.output.toLocaleString()} output\n`);
|
|
16968
18432
|
}
|
|
16969
18433
|
} else if (currentPhase === "implementation") {
|
|
18434
|
+
const fpIngestionServer = fullTelemetryEnabled ? new IngestionServer({ port: fullTelemetryPort ?? 4318 }) : void 0;
|
|
18435
|
+
const fpTelemetryPersistence = fullTelemetryEnabled ? new TelemetryPersistence(db) : void 0;
|
|
16970
18436
|
const orchestrator = createImplementationOrchestrator({
|
|
16971
18437
|
db,
|
|
16972
18438
|
pack,
|
|
@@ -16980,7 +18446,9 @@ async function runFullPipeline(options) {
|
|
|
16980
18446
|
skipPreflight: skipPreflight === true
|
|
16981
18447
|
},
|
|
16982
18448
|
projectRoot,
|
|
16983
|
-
tokenCeilings
|
|
18449
|
+
tokenCeilings,
|
|
18450
|
+
...fpIngestionServer !== void 0 ? { ingestionServer: fpIngestionServer } : {},
|
|
18451
|
+
...fpTelemetryPersistence !== void 0 ? { telemetryPersistence: fpTelemetryPersistence } : {}
|
|
16984
18452
|
});
|
|
16985
18453
|
eventBus.on("orchestrator:story-phase-complete", (payload) => {
|
|
16986
18454
|
try {
|
|
@@ -17119,4 +18587,4 @@ function registerRunCommand(program, _version = "0.0.0", projectRoot = process.c
|
|
|
17119
18587
|
|
|
17120
18588
|
//#endregion
|
|
17121
18589
|
export { DEFAULT_CONFIG, DEFAULT_ROUTING_POLICY, DatabaseWrapper, DoltNotInstalled, FileStateStore, SUBSTRATE_OWNED_SETTINGS_KEYS, TelemetryPersistence, VALID_PHASES, buildPipelineStatusOutput, checkDoltInstalled, createConfigSystem, createContextCompiler, createDispatcher, createDoltClient, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStateStore, createStopAfterGate, findPackageRoot, formatOutput, formatPhaseCompletionSummary, formatPipelineStatusHuman, formatPipelineSummary, formatTokenTelemetry, getAllDescendantPids, getAutoHealthData, getSubstrateDefaultSettings, initializeDolt, parseDbTimestampAsUtc, registerHealthCommand, registerRunCommand, resolveBmadMethodSrcPath, resolveBmadMethodVersion, resolveMainRepoRoot, resolveStoryKeys, runAnalysisPhase, runMigrations, runPlanningPhase, runRunAction, runSolutioningPhase, validateStopAfterFromConflict };
|
|
17122
|
-
//# sourceMappingURL=run-
|
|
18590
|
+
//# sourceMappingURL=run-8ygA8hgY.js.map
|