substrate-ai 0.3.3 → 0.3.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/adapter-registry-BBn0Rmqj.js +3 -0
- package/dist/{adapter-registry-DHl0W-YB.js → adapter-registry-a2WX0qo_.js} +4 -1
- package/dist/cli/index.js +34 -18
- package/dist/{config-migrator-CQmBdKeG.js → config-migrator-DtZW1maj.js} +1 -1
- package/dist/{decisions-DxgMpQpz.js → decisions-CbysnTi5.js} +1 -1
- package/dist/{decisions-Dq4cAA2L.js → decisions-CdpiJIm5.js} +1 -1
- package/dist/{experimenter-Br1-vzYv.js → experimenter-jto3orYl.js} +4 -4
- package/dist/{git-utils-CtmrZrHS.js → git-utils-UbKLSGsD.js} +1 -1
- package/dist/{helpers-RL22dYtn.js → helpers-BihqWgVe.js} +1 -1
- package/dist/index.d.ts +36 -2
- package/dist/index.js +2 -2
- package/dist/{operational-Bovj4fS-.js → operational-DisxqtjC.js} +1 -1
- package/dist/run-D3rMGI6c.js +8 -0
- package/dist/{run-DI9s014E.js → run-DzzmgEOd.js} +1733 -304
- package/dist/{upgrade-Ex1ukwsm.js → upgrade-BlJKjr6I.js} +3 -3
- package/dist/{upgrade-DO307rFf.js → upgrade-DTzeenA-.js} +2 -2
- package/dist/version-manager-impl-BsHqAeGT.js +4 -0
- package/dist/{version-manager-impl-33JYXsqa.js → version-manager-impl-zsJjBhak.js} +2 -2
- package/package.json +1 -1
- package/dist/run-33J0SBp1.js +0 -8
- package/dist/version-manager-impl-Dk3S31y6.js +0 -4
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
import { createLogger, deepMask } from "./logger-D2fS2ccL.js";
|
|
2
|
-
import { CURRENT_CONFIG_FORMAT_VERSION, PartialSubstrateConfigSchema, SUPPORTED_CONFIG_FORMAT_VERSIONS, SubstrateConfigSchema, defaultConfigMigrator } from "./config-migrator-
|
|
3
|
-
import { ConfigError, ConfigIncompatibleFormatError, createEventBus, createTuiApp, isTuiCapable, printNonTtyWarning, sleep } from "./helpers-
|
|
4
|
-
import { addTokenUsage, createDecision, createPipelineRun, createRequirement, getArtifactByTypeForRun, getArtifactsByRun, getDecisionsByCategory, getDecisionsByPhase, getDecisionsByPhaseForRun, getLatestRun, getPipelineRunById, getRunningPipelineRuns, getTokenUsageSummary, registerArtifact, updatePipelineRun, updatePipelineRunConfig, upsertDecision } from "./decisions-
|
|
5
|
-
import { ADVISORY_NOTES, ESCALATION_DIAGNOSIS, OPERATIONAL_FINDING, STORY_METRICS, STORY_OUTCOME, TEST_EXPANSION_FINDING, TEST_PLAN, aggregateTokenUsageForRun, aggregateTokenUsageForStory, getStoryMetricsForRun, writeRunMetrics, writeStoryMetrics } from "./operational-
|
|
2
|
+
import { CURRENT_CONFIG_FORMAT_VERSION, PartialSubstrateConfigSchema, SUPPORTED_CONFIG_FORMAT_VERSIONS, SubstrateConfigSchema, defaultConfigMigrator } from "./config-migrator-DtZW1maj.js";
|
|
3
|
+
import { ConfigError, ConfigIncompatibleFormatError, createEventBus, createTuiApp, isTuiCapable, printNonTtyWarning, sleep } from "./helpers-BihqWgVe.js";
|
|
4
|
+
import { addTokenUsage, createDecision, createPipelineRun, createRequirement, getArtifactByTypeForRun, getArtifactsByRun, getDecisionsByCategory, getDecisionsByPhase, getDecisionsByPhaseForRun, getLatestRun, getPipelineRunById, getRunningPipelineRuns, getTokenUsageSummary, registerArtifact, updatePipelineRun, updatePipelineRunConfig, upsertDecision } from "./decisions-CdpiJIm5.js";
|
|
5
|
+
import { ADVISORY_NOTES, ESCALATION_DIAGNOSIS, OPERATIONAL_FINDING, STORY_METRICS, STORY_OUTCOME, TEST_EXPANSION_FINDING, TEST_PLAN, aggregateTokenUsageForRun, aggregateTokenUsageForStory, getStoryMetricsForRun, writeRunMetrics, writeStoryMetrics } from "./operational-DisxqtjC.js";
|
|
6
6
|
import { createRequire } from "module";
|
|
7
7
|
import { dirname, join, resolve } from "path";
|
|
8
8
|
import { access, mkdir, readFile, readdir, stat, writeFile } from "fs/promises";
|
|
@@ -17,6 +17,7 @@ import { access as access$1, mkdir as mkdir$1, readFile as readFile$1, stat as s
|
|
|
17
17
|
import { fileURLToPath } from "node:url";
|
|
18
18
|
import { existsSync as existsSync$1, readFileSync as readFileSync$1, readdirSync as readdirSync$1 } from "node:fs";
|
|
19
19
|
import { homedir } from "os";
|
|
20
|
+
import { EventEmitter } from "node:events";
|
|
20
21
|
import { freemem, platform } from "node:os";
|
|
21
22
|
import { createHash, randomUUID } from "node:crypto";
|
|
22
23
|
import { createServer } from "node:http";
|
|
@@ -604,9 +605,110 @@ const migration010RunMetrics = {
|
|
|
604
605
|
}
|
|
605
606
|
};
|
|
606
607
|
|
|
608
|
+
//#endregion
|
|
609
|
+
//#region src/persistence/migrations/011-telemetry-schema.ts
|
|
610
|
+
const migration011TelemetrySchema = {
|
|
611
|
+
version: 11,
|
|
612
|
+
name: "telemetry-schema",
|
|
613
|
+
up(db) {
|
|
614
|
+
db.exec(`
|
|
615
|
+
CREATE TABLE IF NOT EXISTS turn_analysis (
|
|
616
|
+
story_key VARCHAR(64) NOT NULL,
|
|
617
|
+
span_id VARCHAR(128) NOT NULL,
|
|
618
|
+
turn_number INTEGER NOT NULL,
|
|
619
|
+
name VARCHAR(255) NOT NULL DEFAULT '',
|
|
620
|
+
timestamp BIGINT NOT NULL DEFAULT 0,
|
|
621
|
+
source VARCHAR(32) NOT NULL DEFAULT '',
|
|
622
|
+
model VARCHAR(64),
|
|
623
|
+
input_tokens INTEGER NOT NULL DEFAULT 0,
|
|
624
|
+
output_tokens INTEGER NOT NULL DEFAULT 0,
|
|
625
|
+
cache_read_tokens INTEGER NOT NULL DEFAULT 0,
|
|
626
|
+
fresh_tokens INTEGER NOT NULL DEFAULT 0,
|
|
627
|
+
cache_hit_rate DOUBLE NOT NULL DEFAULT 0,
|
|
628
|
+
cost_usd DOUBLE NOT NULL DEFAULT 0,
|
|
629
|
+
duration_ms INTEGER NOT NULL DEFAULT 0,
|
|
630
|
+
context_size INTEGER NOT NULL DEFAULT 0,
|
|
631
|
+
context_delta INTEGER NOT NULL DEFAULT 0,
|
|
632
|
+
tool_name VARCHAR(128),
|
|
633
|
+
is_context_spike BOOLEAN NOT NULL DEFAULT 0,
|
|
634
|
+
child_spans_json TEXT NOT NULL DEFAULT '[]',
|
|
635
|
+
PRIMARY KEY (story_key, span_id)
|
|
636
|
+
);
|
|
637
|
+
|
|
638
|
+
CREATE INDEX IF NOT EXISTS idx_turn_analysis_story
|
|
639
|
+
ON turn_analysis (story_key, turn_number);
|
|
640
|
+
|
|
641
|
+
CREATE TABLE IF NOT EXISTS efficiency_scores (
|
|
642
|
+
story_key VARCHAR(64) NOT NULL,
|
|
643
|
+
timestamp BIGINT NOT NULL,
|
|
644
|
+
composite_score INTEGER NOT NULL DEFAULT 0,
|
|
645
|
+
cache_hit_sub_score DOUBLE NOT NULL DEFAULT 0,
|
|
646
|
+
io_ratio_sub_score DOUBLE NOT NULL DEFAULT 0,
|
|
647
|
+
context_management_sub_score DOUBLE NOT NULL DEFAULT 0,
|
|
648
|
+
avg_cache_hit_rate DOUBLE NOT NULL DEFAULT 0,
|
|
649
|
+
avg_io_ratio DOUBLE NOT NULL DEFAULT 0,
|
|
650
|
+
context_spike_count INTEGER NOT NULL DEFAULT 0,
|
|
651
|
+
total_turns INTEGER NOT NULL DEFAULT 0,
|
|
652
|
+
per_model_json TEXT NOT NULL DEFAULT '[]',
|
|
653
|
+
per_source_json TEXT NOT NULL DEFAULT '[]',
|
|
654
|
+
PRIMARY KEY (story_key, timestamp)
|
|
655
|
+
);
|
|
656
|
+
|
|
657
|
+
CREATE INDEX IF NOT EXISTS idx_efficiency_story
|
|
658
|
+
ON efficiency_scores (story_key, timestamp DESC);
|
|
659
|
+
|
|
660
|
+
CREATE TABLE IF NOT EXISTS recommendations (
|
|
661
|
+
id VARCHAR(16) NOT NULL,
|
|
662
|
+
story_key VARCHAR(64) NOT NULL,
|
|
663
|
+
sprint_id VARCHAR(64),
|
|
664
|
+
rule_id VARCHAR(64) NOT NULL,
|
|
665
|
+
severity VARCHAR(16) NOT NULL,
|
|
666
|
+
title TEXT NOT NULL,
|
|
667
|
+
description TEXT NOT NULL,
|
|
668
|
+
potential_savings_tokens INTEGER,
|
|
669
|
+
potential_savings_usd DOUBLE,
|
|
670
|
+
action_target TEXT,
|
|
671
|
+
generated_at VARCHAR(32) NOT NULL,
|
|
672
|
+
PRIMARY KEY (id)
|
|
673
|
+
);
|
|
674
|
+
|
|
675
|
+
CREATE INDEX IF NOT EXISTS idx_recommendations_story
|
|
676
|
+
ON recommendations (story_key, severity);
|
|
677
|
+
|
|
678
|
+
CREATE TABLE IF NOT EXISTS category_stats (
|
|
679
|
+
story_key VARCHAR(100) NOT NULL,
|
|
680
|
+
category VARCHAR(30) NOT NULL,
|
|
681
|
+
total_tokens BIGINT NOT NULL DEFAULT 0,
|
|
682
|
+
percentage DECIMAL(6,3) NOT NULL DEFAULT 0,
|
|
683
|
+
event_count INTEGER NOT NULL DEFAULT 0,
|
|
684
|
+
avg_tokens_per_event DECIMAL(12,2) NOT NULL DEFAULT 0,
|
|
685
|
+
trend VARCHAR(10) NOT NULL DEFAULT 'stable',
|
|
686
|
+
PRIMARY KEY (story_key, category)
|
|
687
|
+
);
|
|
688
|
+
|
|
689
|
+
CREATE INDEX IF NOT EXISTS idx_category_stats_story
|
|
690
|
+
ON category_stats (story_key, total_tokens);
|
|
691
|
+
|
|
692
|
+
CREATE TABLE IF NOT EXISTS consumer_stats (
|
|
693
|
+
story_key VARCHAR(100) NOT NULL,
|
|
694
|
+
consumer_key VARCHAR(300) NOT NULL,
|
|
695
|
+
category VARCHAR(30) NOT NULL,
|
|
696
|
+
total_tokens BIGINT NOT NULL DEFAULT 0,
|
|
697
|
+
percentage DECIMAL(6,3) NOT NULL DEFAULT 0,
|
|
698
|
+
event_count INTEGER NOT NULL DEFAULT 0,
|
|
699
|
+
top_invocations_json TEXT,
|
|
700
|
+
PRIMARY KEY (story_key, consumer_key)
|
|
701
|
+
);
|
|
702
|
+
|
|
703
|
+
CREATE INDEX IF NOT EXISTS idx_consumer_stats_story
|
|
704
|
+
ON consumer_stats (story_key, total_tokens);
|
|
705
|
+
`);
|
|
706
|
+
}
|
|
707
|
+
};
|
|
708
|
+
|
|
607
709
|
//#endregion
|
|
608
710
|
//#region src/persistence/migrations/index.ts
|
|
609
|
-
const logger$
|
|
711
|
+
const logger$26 = createLogger("persistence:migrations");
|
|
610
712
|
const MIGRATIONS = [
|
|
611
713
|
initialSchemaMigration,
|
|
612
714
|
costTrackerSchemaMigration,
|
|
@@ -617,14 +719,15 @@ const MIGRATIONS = [
|
|
|
617
719
|
migration007DecisionStore,
|
|
618
720
|
migration008AmendmentSchema,
|
|
619
721
|
migration009TokenUsageMetadata,
|
|
620
|
-
migration010RunMetrics
|
|
722
|
+
migration010RunMetrics,
|
|
723
|
+
migration011TelemetrySchema
|
|
621
724
|
];
|
|
622
725
|
/**
|
|
623
726
|
* Ensure `schema_migrations` table exists and run any pending migrations.
|
|
624
727
|
* Safe to call multiple times — already-applied migrations are skipped.
|
|
625
728
|
*/
|
|
626
729
|
function runMigrations(db) {
|
|
627
|
-
logger$
|
|
730
|
+
logger$26.info("Starting migration runner");
|
|
628
731
|
db.exec(`
|
|
629
732
|
CREATE TABLE IF NOT EXISTS schema_migrations (
|
|
630
733
|
version INTEGER PRIMARY KEY,
|
|
@@ -635,12 +738,12 @@ function runMigrations(db) {
|
|
|
635
738
|
const appliedVersions = new Set(db.prepare("SELECT version FROM schema_migrations").all().map((row) => row.version));
|
|
636
739
|
const pending = MIGRATIONS.filter((m) => !appliedVersions.has(m.version)).sort((a, b) => a.version - b.version);
|
|
637
740
|
if (pending.length === 0) {
|
|
638
|
-
logger$
|
|
741
|
+
logger$26.info("No pending migrations");
|
|
639
742
|
return;
|
|
640
743
|
}
|
|
641
744
|
const insertMigration = db.prepare("INSERT INTO schema_migrations (version, name) VALUES (?, ?)");
|
|
642
745
|
for (const migration of pending) {
|
|
643
|
-
logger$
|
|
746
|
+
logger$26.info({
|
|
644
747
|
version: migration.version,
|
|
645
748
|
name: migration.name
|
|
646
749
|
}, "Applying migration");
|
|
@@ -654,14 +757,14 @@ function runMigrations(db) {
|
|
|
654
757
|
});
|
|
655
758
|
applyMigration();
|
|
656
759
|
}
|
|
657
|
-
logger$
|
|
760
|
+
logger$26.info({ version: migration.version }, "Migration applied successfully");
|
|
658
761
|
}
|
|
659
|
-
logger$
|
|
762
|
+
logger$26.info({ count: pending.length }, "All pending migrations applied");
|
|
660
763
|
}
|
|
661
764
|
|
|
662
765
|
//#endregion
|
|
663
766
|
//#region src/persistence/database.ts
|
|
664
|
-
const logger$
|
|
767
|
+
const logger$25 = createLogger("persistence:database");
|
|
665
768
|
/**
|
|
666
769
|
* Thin wrapper that opens a SQLite database, applies required PRAGMAs,
|
|
667
770
|
* and exposes the raw BetterSqlite3 instance.
|
|
@@ -678,14 +781,14 @@ var DatabaseWrapper = class {
|
|
|
678
781
|
*/
|
|
679
782
|
open() {
|
|
680
783
|
if (this._db !== null) return;
|
|
681
|
-
logger$
|
|
784
|
+
logger$25.info({ path: this._path }, "Opening SQLite database");
|
|
682
785
|
this._db = new Database(this._path);
|
|
683
786
|
const walResult = this._db.pragma("journal_mode = WAL");
|
|
684
|
-
if (walResult?.[0]?.journal_mode !== "wal") logger$
|
|
787
|
+
if (walResult?.[0]?.journal_mode !== "wal") logger$25.warn({ result: walResult?.[0]?.journal_mode }, "WAL pragma did not return expected \"wal\" — journal_mode may be \"memory\" or unsupported");
|
|
685
788
|
this._db.pragma("busy_timeout = 5000");
|
|
686
789
|
this._db.pragma("synchronous = NORMAL");
|
|
687
790
|
this._db.pragma("foreign_keys = ON");
|
|
688
|
-
logger$
|
|
791
|
+
logger$25.info({ path: this._path }, "SQLite database opened with WAL mode");
|
|
689
792
|
}
|
|
690
793
|
/**
|
|
691
794
|
* Close the database. Idempotent — calling close() when already closed is a no-op.
|
|
@@ -694,7 +797,7 @@ var DatabaseWrapper = class {
|
|
|
694
797
|
if (this._db === null) return;
|
|
695
798
|
this._db.close();
|
|
696
799
|
this._db = null;
|
|
697
|
-
logger$
|
|
800
|
+
logger$25.info({ path: this._path }, "SQLite database closed");
|
|
698
801
|
}
|
|
699
802
|
/**
|
|
700
803
|
* Return the raw BetterSqlite3 instance.
|
|
@@ -1611,7 +1714,7 @@ function formatUnsupportedVersionError(formatType, version, supported) {
|
|
|
1611
1714
|
|
|
1612
1715
|
//#endregion
|
|
1613
1716
|
//#region src/modules/config/config-system-impl.ts
|
|
1614
|
-
const logger$
|
|
1717
|
+
const logger$24 = createLogger("config");
|
|
1615
1718
|
function deepMerge(base, override) {
|
|
1616
1719
|
const result = { ...base };
|
|
1617
1720
|
for (const [key, val] of Object.entries(override)) if (val !== null && val !== void 0 && typeof val === "object" && !Array.isArray(val) && typeof result[key] === "object" && result[key] !== null && !Array.isArray(result[key])) result[key] = deepMerge(result[key], val);
|
|
@@ -1656,7 +1759,7 @@ function readEnvOverrides() {
|
|
|
1656
1759
|
}
|
|
1657
1760
|
const parsed = PartialSubstrateConfigSchema.safeParse(overrides);
|
|
1658
1761
|
if (!parsed.success) {
|
|
1659
|
-
logger$
|
|
1762
|
+
logger$24.warn({ errors: parsed.error.issues }, "Invalid environment variable overrides ignored");
|
|
1660
1763
|
return {};
|
|
1661
1764
|
}
|
|
1662
1765
|
return parsed.data;
|
|
@@ -1720,7 +1823,7 @@ var ConfigSystemImpl = class {
|
|
|
1720
1823
|
throw new ConfigError(`Configuration validation failed:\n${issues}`, { issues: result.error.issues });
|
|
1721
1824
|
}
|
|
1722
1825
|
this._config = result.data;
|
|
1723
|
-
logger$
|
|
1826
|
+
logger$24.debug("Configuration loaded successfully");
|
|
1724
1827
|
}
|
|
1725
1828
|
getConfig() {
|
|
1726
1829
|
if (this._config === null) throw new ConfigError("Configuration has not been loaded. Call load() before getConfig().", {});
|
|
@@ -1783,7 +1886,7 @@ var ConfigSystemImpl = class {
|
|
|
1783
1886
|
if (version !== void 0 && typeof version === "string" && !isVersionSupported(version, SUPPORTED_CONFIG_FORMAT_VERSIONS)) if (defaultConfigMigrator.canMigrate(version, CURRENT_CONFIG_FORMAT_VERSION)) {
|
|
1784
1887
|
const migrationOutput = defaultConfigMigrator.migrate(rawObj, version, CURRENT_CONFIG_FORMAT_VERSION, filePath);
|
|
1785
1888
|
if (migrationOutput.result.success) {
|
|
1786
|
-
logger$
|
|
1889
|
+
logger$24.info({
|
|
1787
1890
|
from: version,
|
|
1788
1891
|
to: CURRENT_CONFIG_FORMAT_VERSION,
|
|
1789
1892
|
backup: migrationOutput.result.backupPath
|
|
@@ -2863,6 +2966,38 @@ const PIPELINE_EVENT_METADATA = [
|
|
|
2863
2966
|
description: "Mismatch details (missing file, type error)."
|
|
2864
2967
|
}
|
|
2865
2968
|
]
|
|
2969
|
+
},
|
|
2970
|
+
{
|
|
2971
|
+
type: "pipeline:contract-verification-summary",
|
|
2972
|
+
description: "Contract verification summary. Consolidates pass/fail into a single event.",
|
|
2973
|
+
when: "After all stories complete, before pipeline:complete. Emitted once per verification pass.",
|
|
2974
|
+
fields: [
|
|
2975
|
+
{
|
|
2976
|
+
name: "ts",
|
|
2977
|
+
type: "string",
|
|
2978
|
+
description: "Timestamp."
|
|
2979
|
+
},
|
|
2980
|
+
{
|
|
2981
|
+
name: "verified",
|
|
2982
|
+
type: "number",
|
|
2983
|
+
description: "Declarations verified (current sprint)."
|
|
2984
|
+
},
|
|
2985
|
+
{
|
|
2986
|
+
name: "stalePruned",
|
|
2987
|
+
type: "number",
|
|
2988
|
+
description: "Stale declarations pruned (previous epics)."
|
|
2989
|
+
},
|
|
2990
|
+
{
|
|
2991
|
+
name: "mismatches",
|
|
2992
|
+
type: "number",
|
|
2993
|
+
description: "Real mismatches found."
|
|
2994
|
+
},
|
|
2995
|
+
{
|
|
2996
|
+
name: "verdict",
|
|
2997
|
+
type: "pass|fail",
|
|
2998
|
+
description: "Overall verification result."
|
|
2999
|
+
}
|
|
3000
|
+
]
|
|
2866
3001
|
}
|
|
2867
3002
|
];
|
|
2868
3003
|
/**
|
|
@@ -3192,7 +3327,7 @@ function truncateToTokens(text, maxTokens) {
|
|
|
3192
3327
|
|
|
3193
3328
|
//#endregion
|
|
3194
3329
|
//#region src/modules/context-compiler/context-compiler-impl.ts
|
|
3195
|
-
const logger$
|
|
3330
|
+
const logger$23 = createLogger("context-compiler");
|
|
3196
3331
|
/**
|
|
3197
3332
|
* Fraction of the original token budget that must remain (after required +
|
|
3198
3333
|
* important sections) before an optional section is included.
|
|
@@ -3284,7 +3419,7 @@ var ContextCompilerImpl = class {
|
|
|
3284
3419
|
includedParts.push(truncated);
|
|
3285
3420
|
remainingBudget -= truncatedTokens;
|
|
3286
3421
|
anyTruncated = true;
|
|
3287
|
-
logger$
|
|
3422
|
+
logger$23.warn({
|
|
3288
3423
|
section: section.name,
|
|
3289
3424
|
originalTokens: tokens,
|
|
3290
3425
|
budgetTokens: truncatedTokens
|
|
@@ -3298,7 +3433,7 @@ var ContextCompilerImpl = class {
|
|
|
3298
3433
|
});
|
|
3299
3434
|
} else {
|
|
3300
3435
|
anyTruncated = true;
|
|
3301
|
-
logger$
|
|
3436
|
+
logger$23.warn({
|
|
3302
3437
|
section: section.name,
|
|
3303
3438
|
tokens
|
|
3304
3439
|
}, "Context compiler: omitted \"important\" section — no budget remaining");
|
|
@@ -3325,7 +3460,7 @@ var ContextCompilerImpl = class {
|
|
|
3325
3460
|
} else {
|
|
3326
3461
|
if (tokens > 0) {
|
|
3327
3462
|
anyTruncated = true;
|
|
3328
|
-
logger$
|
|
3463
|
+
logger$23.warn({
|
|
3329
3464
|
section: section.name,
|
|
3330
3465
|
tokens,
|
|
3331
3466
|
budgetFractionRemaining: budgetFractionRemaining.toFixed(2)
|
|
@@ -3610,7 +3745,7 @@ function parseYamlResult(yamlText, schema) {
|
|
|
3610
3745
|
|
|
3611
3746
|
//#endregion
|
|
3612
3747
|
//#region src/modules/agent-dispatch/dispatcher-impl.ts
|
|
3613
|
-
const logger$
|
|
3748
|
+
const logger$22 = createLogger("agent-dispatch");
|
|
3614
3749
|
const SHUTDOWN_GRACE_MS = 1e4;
|
|
3615
3750
|
const SHUTDOWN_MAX_WAIT_MS = 3e4;
|
|
3616
3751
|
const CHARS_PER_TOKEN = 4;
|
|
@@ -3655,7 +3790,7 @@ function getAvailableMemory() {
|
|
|
3655
3790
|
}).trim(), 10);
|
|
3656
3791
|
_lastKnownPressureLevel = pressureLevel;
|
|
3657
3792
|
if (pressureLevel >= 4) {
|
|
3658
|
-
logger$
|
|
3793
|
+
logger$22.warn({ pressureLevel }, "macOS kernel reports critical memory pressure");
|
|
3659
3794
|
return 0;
|
|
3660
3795
|
}
|
|
3661
3796
|
} catch {}
|
|
@@ -3670,7 +3805,7 @@ function getAvailableMemory() {
|
|
|
3670
3805
|
const speculative = parseInt(vmstat.match(/Pages speculative:\s+(\d+)/)?.[1] ?? "0", 10);
|
|
3671
3806
|
const available = (free + purgeable + speculative) * pageSize;
|
|
3672
3807
|
if (pressureLevel >= 2) {
|
|
3673
|
-
logger$
|
|
3808
|
+
logger$22.warn({
|
|
3674
3809
|
pressureLevel,
|
|
3675
3810
|
availableBeforeDiscount: available
|
|
3676
3811
|
}, "macOS kernel reports memory pressure — discounting estimate");
|
|
@@ -3750,7 +3885,7 @@ var DispatcherImpl = class {
|
|
|
3750
3885
|
resolve: typedResolve,
|
|
3751
3886
|
reject
|
|
3752
3887
|
});
|
|
3753
|
-
logger$
|
|
3888
|
+
logger$22.debug({
|
|
3754
3889
|
id,
|
|
3755
3890
|
queueLength: this._queue.length
|
|
3756
3891
|
}, "Dispatch queued");
|
|
@@ -3781,7 +3916,7 @@ var DispatcherImpl = class {
|
|
|
3781
3916
|
async shutdown() {
|
|
3782
3917
|
this._shuttingDown = true;
|
|
3783
3918
|
this._stopMemoryPressureTimer();
|
|
3784
|
-
logger$
|
|
3919
|
+
logger$22.info({
|
|
3785
3920
|
running: this._running.size,
|
|
3786
3921
|
queued: this._queue.length
|
|
3787
3922
|
}, "Dispatcher shutting down");
|
|
@@ -3814,13 +3949,13 @@ var DispatcherImpl = class {
|
|
|
3814
3949
|
}
|
|
3815
3950
|
}, 50);
|
|
3816
3951
|
});
|
|
3817
|
-
logger$
|
|
3952
|
+
logger$22.info("Dispatcher shutdown complete");
|
|
3818
3953
|
}
|
|
3819
3954
|
async _startDispatch(id, request, resolve$2) {
|
|
3820
|
-
const { prompt, agent, taskType, timeout, outputSchema, workingDirectory, model, maxTurns, otlpEndpoint } = request;
|
|
3955
|
+
const { prompt, agent, taskType, timeout, outputSchema, workingDirectory, model, maxTurns, otlpEndpoint, storyKey } = request;
|
|
3821
3956
|
const adapter = this._adapterRegistry.get(agent);
|
|
3822
3957
|
if (adapter === void 0) {
|
|
3823
|
-
logger$
|
|
3958
|
+
logger$22.warn({
|
|
3824
3959
|
id,
|
|
3825
3960
|
agent
|
|
3826
3961
|
}, "No adapter found for agent");
|
|
@@ -3848,7 +3983,8 @@ var DispatcherImpl = class {
|
|
|
3848
3983
|
billingMode: "subscription",
|
|
3849
3984
|
...model !== void 0 ? { model } : {},
|
|
3850
3985
|
...resolvedMaxTurns !== void 0 ? { maxTurns: resolvedMaxTurns } : {},
|
|
3851
|
-
...otlpEndpoint !== void 0 ? { otlpEndpoint } : {}
|
|
3986
|
+
...otlpEndpoint !== void 0 ? { otlpEndpoint } : {},
|
|
3987
|
+
...storyKey !== void 0 ? { storyKey } : {}
|
|
3852
3988
|
});
|
|
3853
3989
|
const timeoutMs = timeout ?? this._config.defaultTimeouts[taskType] ?? DEFAULT_TIMEOUTS[taskType] ?? 3e5;
|
|
3854
3990
|
const env = { ...process.env };
|
|
@@ -3867,7 +4003,7 @@ var DispatcherImpl = class {
|
|
|
3867
4003
|
});
|
|
3868
4004
|
const startedAt = Date.now();
|
|
3869
4005
|
proc.on("error", (err) => {
|
|
3870
|
-
logger$
|
|
4006
|
+
logger$22.error({
|
|
3871
4007
|
id,
|
|
3872
4008
|
binary: cmd.binary,
|
|
3873
4009
|
error: err.message
|
|
@@ -3875,7 +4011,7 @@ var DispatcherImpl = class {
|
|
|
3875
4011
|
});
|
|
3876
4012
|
if (proc.stdin !== null) {
|
|
3877
4013
|
proc.stdin.on("error", (err) => {
|
|
3878
|
-
if (err.code !== "EPIPE") logger$
|
|
4014
|
+
if (err.code !== "EPIPE") logger$22.warn({
|
|
3879
4015
|
id,
|
|
3880
4016
|
error: err.message
|
|
3881
4017
|
}, "stdin write error");
|
|
@@ -3917,7 +4053,7 @@ var DispatcherImpl = class {
|
|
|
3917
4053
|
agent,
|
|
3918
4054
|
taskType
|
|
3919
4055
|
});
|
|
3920
|
-
logger$
|
|
4056
|
+
logger$22.debug({
|
|
3921
4057
|
id,
|
|
3922
4058
|
agent,
|
|
3923
4059
|
taskType,
|
|
@@ -3934,7 +4070,7 @@ var DispatcherImpl = class {
|
|
|
3934
4070
|
dispatchId: id,
|
|
3935
4071
|
timeoutMs
|
|
3936
4072
|
});
|
|
3937
|
-
logger$
|
|
4073
|
+
logger$22.warn({
|
|
3938
4074
|
id,
|
|
3939
4075
|
agent,
|
|
3940
4076
|
taskType,
|
|
@@ -3988,7 +4124,7 @@ var DispatcherImpl = class {
|
|
|
3988
4124
|
exitCode: code,
|
|
3989
4125
|
output: stdout
|
|
3990
4126
|
});
|
|
3991
|
-
logger$
|
|
4127
|
+
logger$22.debug({
|
|
3992
4128
|
id,
|
|
3993
4129
|
agent,
|
|
3994
4130
|
taskType,
|
|
@@ -4014,7 +4150,7 @@ var DispatcherImpl = class {
|
|
|
4014
4150
|
error: stderr || `Process exited with code ${String(code)}`,
|
|
4015
4151
|
exitCode: code
|
|
4016
4152
|
});
|
|
4017
|
-
logger$
|
|
4153
|
+
logger$22.debug({
|
|
4018
4154
|
id,
|
|
4019
4155
|
agent,
|
|
4020
4156
|
taskType,
|
|
@@ -4073,7 +4209,7 @@ var DispatcherImpl = class {
|
|
|
4073
4209
|
const next = this._queue.shift();
|
|
4074
4210
|
if (next === void 0) return;
|
|
4075
4211
|
next.handle.status = "running";
|
|
4076
|
-
logger$
|
|
4212
|
+
logger$22.debug({
|
|
4077
4213
|
id: next.id,
|
|
4078
4214
|
queueLength: this._queue.length
|
|
4079
4215
|
}, "Dequeued dispatch");
|
|
@@ -4086,7 +4222,7 @@ var DispatcherImpl = class {
|
|
|
4086
4222
|
_isMemoryPressured() {
|
|
4087
4223
|
const free = getAvailableMemory();
|
|
4088
4224
|
if (free < MIN_FREE_MEMORY_BYTES) {
|
|
4089
|
-
logger$
|
|
4225
|
+
logger$22.warn({
|
|
4090
4226
|
freeMB: Math.round(free / 1024 / 1024),
|
|
4091
4227
|
thresholdMB: Math.round(MIN_FREE_MEMORY_BYTES / 1024 / 1024),
|
|
4092
4228
|
pressureLevel: _lastKnownPressureLevel
|
|
@@ -4202,7 +4338,7 @@ function runBuildVerification(options) {
|
|
|
4202
4338
|
let cmd;
|
|
4203
4339
|
if (verifyCommand === void 0) {
|
|
4204
4340
|
const detection = detectPackageManager(projectRoot);
|
|
4205
|
-
logger$
|
|
4341
|
+
logger$22.info({
|
|
4206
4342
|
packageManager: detection.packageManager,
|
|
4207
4343
|
lockfile: detection.lockfile,
|
|
4208
4344
|
resolvedCommand: detection.command
|
|
@@ -4401,7 +4537,7 @@ function pickRecommendation(distribution, profile, totalIssues, reviewCycles, la
|
|
|
4401
4537
|
|
|
4402
4538
|
//#endregion
|
|
4403
4539
|
//#region src/modules/compiled-workflows/prompt-assembler.ts
|
|
4404
|
-
const logger$
|
|
4540
|
+
const logger$21 = createLogger("compiled-workflows:prompt-assembler");
|
|
4405
4541
|
/**
|
|
4406
4542
|
* Assemble a final prompt from a template and sections map.
|
|
4407
4543
|
*
|
|
@@ -4426,7 +4562,7 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
|
|
|
4426
4562
|
tokenCount,
|
|
4427
4563
|
truncated: false
|
|
4428
4564
|
};
|
|
4429
|
-
logger$
|
|
4565
|
+
logger$21.warn({
|
|
4430
4566
|
tokenCount,
|
|
4431
4567
|
ceiling: tokenCeiling
|
|
4432
4568
|
}, "Prompt exceeds token ceiling — truncating optional sections");
|
|
@@ -4442,10 +4578,10 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
|
|
|
4442
4578
|
const targetSectionTokens = Math.max(0, currentSectionTokens - overBy);
|
|
4443
4579
|
if (targetSectionTokens === 0) {
|
|
4444
4580
|
contentMap[section.name] = "";
|
|
4445
|
-
logger$
|
|
4581
|
+
logger$21.warn({ sectionName: section.name }, "Section eliminated to fit token budget");
|
|
4446
4582
|
} else {
|
|
4447
4583
|
contentMap[section.name] = truncateToTokens(section.content, targetSectionTokens);
|
|
4448
|
-
logger$
|
|
4584
|
+
logger$21.warn({
|
|
4449
4585
|
sectionName: section.name,
|
|
4450
4586
|
targetSectionTokens
|
|
4451
4587
|
}, "Section truncated to fit token budget");
|
|
@@ -4456,7 +4592,7 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
|
|
|
4456
4592
|
}
|
|
4457
4593
|
if (tokenCount <= tokenCeiling) break;
|
|
4458
4594
|
}
|
|
4459
|
-
if (tokenCount > tokenCeiling) logger$
|
|
4595
|
+
if (tokenCount > tokenCeiling) logger$21.warn({
|
|
4460
4596
|
tokenCount,
|
|
4461
4597
|
ceiling: tokenCeiling
|
|
4462
4598
|
}, "Required sections alone exceed token ceiling — returning over-budget prompt");
|
|
@@ -4754,7 +4890,7 @@ function getTokenCeiling(workflowType, tokenCeilings) {
|
|
|
4754
4890
|
|
|
4755
4891
|
//#endregion
|
|
4756
4892
|
//#region src/modules/compiled-workflows/create-story.ts
|
|
4757
|
-
const logger$
|
|
4893
|
+
const logger$20 = createLogger("compiled-workflows:create-story");
|
|
4758
4894
|
/**
|
|
4759
4895
|
* Execute the compiled create-story workflow.
|
|
4760
4896
|
*
|
|
@@ -4774,13 +4910,13 @@ const logger$19 = createLogger("compiled-workflows:create-story");
|
|
|
4774
4910
|
*/
|
|
4775
4911
|
async function runCreateStory(deps, params) {
|
|
4776
4912
|
const { epicId, storyKey, pipelineRunId } = params;
|
|
4777
|
-
logger$
|
|
4913
|
+
logger$20.debug({
|
|
4778
4914
|
epicId,
|
|
4779
4915
|
storyKey,
|
|
4780
4916
|
pipelineRunId
|
|
4781
4917
|
}, "Starting create-story workflow");
|
|
4782
4918
|
const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("create-story", deps.tokenCeilings);
|
|
4783
|
-
logger$
|
|
4919
|
+
logger$20.info({
|
|
4784
4920
|
workflow: "create-story",
|
|
4785
4921
|
ceiling: TOKEN_CEILING,
|
|
4786
4922
|
source: tokenCeilingSource
|
|
@@ -4790,7 +4926,7 @@ async function runCreateStory(deps, params) {
|
|
|
4790
4926
|
template = await deps.pack.getPrompt("create-story");
|
|
4791
4927
|
} catch (err) {
|
|
4792
4928
|
const error = err instanceof Error ? err.message : String(err);
|
|
4793
|
-
logger$
|
|
4929
|
+
logger$20.error({ error }, "Failed to retrieve create-story prompt template");
|
|
4794
4930
|
return {
|
|
4795
4931
|
result: "failed",
|
|
4796
4932
|
error: `Failed to retrieve prompt template: ${error}`,
|
|
@@ -4832,7 +4968,7 @@ async function runCreateStory(deps, params) {
|
|
|
4832
4968
|
priority: "important"
|
|
4833
4969
|
}
|
|
4834
4970
|
], TOKEN_CEILING);
|
|
4835
|
-
logger$
|
|
4971
|
+
logger$20.debug({
|
|
4836
4972
|
tokenCount,
|
|
4837
4973
|
truncated,
|
|
4838
4974
|
tokenCeiling: TOKEN_CEILING
|
|
@@ -4843,14 +4979,15 @@ async function runCreateStory(deps, params) {
|
|
|
4843
4979
|
taskType: "create-story",
|
|
4844
4980
|
outputSchema: CreateStoryResultSchema,
|
|
4845
4981
|
...deps.projectRoot !== void 0 ? { workingDirectory: deps.projectRoot } : {},
|
|
4846
|
-
...deps.otlpEndpoint !== void 0 ? { otlpEndpoint: deps.otlpEndpoint } : {}
|
|
4982
|
+
...deps.otlpEndpoint !== void 0 ? { otlpEndpoint: deps.otlpEndpoint } : {},
|
|
4983
|
+
storyKey
|
|
4847
4984
|
});
|
|
4848
4985
|
let dispatchResult;
|
|
4849
4986
|
try {
|
|
4850
4987
|
dispatchResult = await handle.result;
|
|
4851
4988
|
} catch (err) {
|
|
4852
4989
|
const error = err instanceof Error ? err.message : String(err);
|
|
4853
|
-
logger$
|
|
4990
|
+
logger$20.error({
|
|
4854
4991
|
epicId,
|
|
4855
4992
|
storyKey,
|
|
4856
4993
|
error
|
|
@@ -4871,7 +5008,7 @@ async function runCreateStory(deps, params) {
|
|
|
4871
5008
|
if (dispatchResult.status === "failed") {
|
|
4872
5009
|
const errorMsg = dispatchResult.parseError ?? `Dispatch failed with exit code ${dispatchResult.exitCode}`;
|
|
4873
5010
|
const stderrDetail = dispatchResult.output ? ` Output: ${dispatchResult.output}` : "";
|
|
4874
|
-
logger$
|
|
5011
|
+
logger$20.warn({
|
|
4875
5012
|
epicId,
|
|
4876
5013
|
storyKey,
|
|
4877
5014
|
exitCode: dispatchResult.exitCode
|
|
@@ -4883,7 +5020,7 @@ async function runCreateStory(deps, params) {
|
|
|
4883
5020
|
};
|
|
4884
5021
|
}
|
|
4885
5022
|
if (dispatchResult.status === "timeout") {
|
|
4886
|
-
logger$
|
|
5023
|
+
logger$20.warn({
|
|
4887
5024
|
epicId,
|
|
4888
5025
|
storyKey
|
|
4889
5026
|
}, "Create-story dispatch timed out");
|
|
@@ -4896,7 +5033,7 @@ async function runCreateStory(deps, params) {
|
|
|
4896
5033
|
if (dispatchResult.parsed === null) {
|
|
4897
5034
|
const details = dispatchResult.parseError ?? "No YAML block found in output";
|
|
4898
5035
|
const rawSnippet = dispatchResult.output ? dispatchResult.output.slice(0, 1e3) : "(empty)";
|
|
4899
|
-
logger$
|
|
5036
|
+
logger$20.warn({
|
|
4900
5037
|
epicId,
|
|
4901
5038
|
storyKey,
|
|
4902
5039
|
details,
|
|
@@ -4912,7 +5049,7 @@ async function runCreateStory(deps, params) {
|
|
|
4912
5049
|
const parseResult = CreateStoryResultSchema.safeParse(dispatchResult.parsed);
|
|
4913
5050
|
if (!parseResult.success) {
|
|
4914
5051
|
const details = parseResult.error.message;
|
|
4915
|
-
logger$
|
|
5052
|
+
logger$20.warn({
|
|
4916
5053
|
epicId,
|
|
4917
5054
|
storyKey,
|
|
4918
5055
|
details
|
|
@@ -4925,7 +5062,7 @@ async function runCreateStory(deps, params) {
|
|
|
4925
5062
|
};
|
|
4926
5063
|
}
|
|
4927
5064
|
const parsed = parseResult.data;
|
|
4928
|
-
logger$
|
|
5065
|
+
logger$20.info({
|
|
4929
5066
|
epicId,
|
|
4930
5067
|
storyKey,
|
|
4931
5068
|
storyFile: parsed.story_file,
|
|
@@ -4947,7 +5084,7 @@ function getImplementationDecisions(deps) {
|
|
|
4947
5084
|
try {
|
|
4948
5085
|
return getDecisionsByPhase(deps.db, "implementation");
|
|
4949
5086
|
} catch (err) {
|
|
4950
|
-
logger$
|
|
5087
|
+
logger$20.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve implementation decisions");
|
|
4951
5088
|
return [];
|
|
4952
5089
|
}
|
|
4953
5090
|
}
|
|
@@ -4990,13 +5127,13 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
|
|
|
4990
5127
|
if (storyKey) {
|
|
4991
5128
|
const storySection = extractStorySection(shardContent, storyKey);
|
|
4992
5129
|
if (storySection) {
|
|
4993
|
-
logger$
|
|
5130
|
+
logger$20.debug({
|
|
4994
5131
|
epicId,
|
|
4995
5132
|
storyKey
|
|
4996
5133
|
}, "Extracted per-story section from epic shard");
|
|
4997
5134
|
return storySection;
|
|
4998
5135
|
}
|
|
4999
|
-
logger$
|
|
5136
|
+
logger$20.debug({
|
|
5000
5137
|
epicId,
|
|
5001
5138
|
storyKey
|
|
5002
5139
|
}, "No matching story section found — using full epic shard");
|
|
@@ -5006,11 +5143,11 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
|
|
|
5006
5143
|
if (projectRoot) {
|
|
5007
5144
|
const fallback = readEpicShardFromFile(projectRoot, epicId);
|
|
5008
5145
|
if (fallback) {
|
|
5009
|
-
logger$
|
|
5146
|
+
logger$20.info({ epicId }, "Using file-based fallback for epic shard (decisions table empty)");
|
|
5010
5147
|
if (storyKey) {
|
|
5011
5148
|
const storySection = extractStorySection(fallback, storyKey);
|
|
5012
5149
|
if (storySection) {
|
|
5013
|
-
logger$
|
|
5150
|
+
logger$20.debug({
|
|
5014
5151
|
epicId,
|
|
5015
5152
|
storyKey
|
|
5016
5153
|
}, "Extracted per-story section from file-based epic shard");
|
|
@@ -5022,7 +5159,7 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
|
|
|
5022
5159
|
}
|
|
5023
5160
|
return "";
|
|
5024
5161
|
} catch (err) {
|
|
5025
|
-
logger$
|
|
5162
|
+
logger$20.warn({
|
|
5026
5163
|
epicId,
|
|
5027
5164
|
error: err instanceof Error ? err.message : String(err)
|
|
5028
5165
|
}, "Failed to retrieve epic shard");
|
|
@@ -5039,7 +5176,7 @@ function getPrevDevNotes(decisions, epicId) {
|
|
|
5039
5176
|
if (devNotes.length === 0) return "";
|
|
5040
5177
|
return devNotes[devNotes.length - 1].value;
|
|
5041
5178
|
} catch (err) {
|
|
5042
|
-
logger$
|
|
5179
|
+
logger$20.warn({
|
|
5043
5180
|
epicId,
|
|
5044
5181
|
error: err instanceof Error ? err.message : String(err)
|
|
5045
5182
|
}, "Failed to retrieve prev dev notes");
|
|
@@ -5059,13 +5196,13 @@ function getArchConstraints$3(deps) {
|
|
|
5059
5196
|
if (deps.projectRoot) {
|
|
5060
5197
|
const fallback = readArchConstraintsFromFile(deps.projectRoot);
|
|
5061
5198
|
if (fallback) {
|
|
5062
|
-
logger$
|
|
5199
|
+
logger$20.info("Using file-based fallback for architecture constraints (decisions table empty)");
|
|
5063
5200
|
return fallback;
|
|
5064
5201
|
}
|
|
5065
5202
|
}
|
|
5066
5203
|
return "";
|
|
5067
5204
|
} catch (err) {
|
|
5068
|
-
logger$
|
|
5205
|
+
logger$20.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
|
|
5069
5206
|
return "";
|
|
5070
5207
|
}
|
|
5071
5208
|
}
|
|
@@ -5085,7 +5222,7 @@ function readEpicShardFromFile(projectRoot, epicId) {
|
|
|
5085
5222
|
const match = pattern.exec(content);
|
|
5086
5223
|
return match ? match[0].trim() : "";
|
|
5087
5224
|
} catch (err) {
|
|
5088
|
-
logger$
|
|
5225
|
+
logger$20.warn({
|
|
5089
5226
|
epicId,
|
|
5090
5227
|
error: err instanceof Error ? err.message : String(err)
|
|
5091
5228
|
}, "File-based epic shard fallback failed");
|
|
@@ -5108,7 +5245,7 @@ function readArchConstraintsFromFile(projectRoot) {
|
|
|
5108
5245
|
const content = readFileSync$1(archPath, "utf-8");
|
|
5109
5246
|
return content.slice(0, 1500);
|
|
5110
5247
|
} catch (err) {
|
|
5111
|
-
logger$
|
|
5248
|
+
logger$20.warn({ error: err instanceof Error ? err.message : String(err) }, "File-based architecture fallback failed");
|
|
5112
5249
|
return "";
|
|
5113
5250
|
}
|
|
5114
5251
|
}
|
|
@@ -5121,7 +5258,7 @@ async function getStoryTemplate(deps) {
|
|
|
5121
5258
|
try {
|
|
5122
5259
|
return await deps.pack.getTemplate("story");
|
|
5123
5260
|
} catch (err) {
|
|
5124
|
-
logger$
|
|
5261
|
+
logger$20.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve story template from pack");
|
|
5125
5262
|
return "";
|
|
5126
5263
|
}
|
|
5127
5264
|
}
|
|
@@ -5158,7 +5295,7 @@ async function isValidStoryFile(filePath) {
|
|
|
5158
5295
|
|
|
5159
5296
|
//#endregion
|
|
5160
5297
|
//#region src/modules/compiled-workflows/git-helpers.ts
|
|
5161
|
-
const logger$
|
|
5298
|
+
const logger$19 = createLogger("compiled-workflows:git-helpers");
|
|
5162
5299
|
/**
|
|
5163
5300
|
* Capture the full git diff for HEAD (working tree vs current commit).
|
|
5164
5301
|
*
|
|
@@ -5254,7 +5391,7 @@ async function stageIntentToAdd(files, workingDirectory) {
|
|
|
5254
5391
|
if (files.length === 0) return;
|
|
5255
5392
|
const existing = files.filter((f) => {
|
|
5256
5393
|
const exists = existsSync$1(f);
|
|
5257
|
-
if (!exists) logger$
|
|
5394
|
+
if (!exists) logger$19.debug({ file: f }, "Skipping nonexistent file in stageIntentToAdd");
|
|
5258
5395
|
return exists;
|
|
5259
5396
|
});
|
|
5260
5397
|
if (existing.length === 0) return;
|
|
@@ -5288,7 +5425,7 @@ async function runGitCommand(args, cwd, logLabel) {
|
|
|
5288
5425
|
stderr += chunk.toString("utf-8");
|
|
5289
5426
|
});
|
|
5290
5427
|
proc.on("error", (err) => {
|
|
5291
|
-
logger$
|
|
5428
|
+
logger$19.warn({
|
|
5292
5429
|
label: logLabel,
|
|
5293
5430
|
cwd,
|
|
5294
5431
|
error: err.message
|
|
@@ -5297,7 +5434,7 @@ async function runGitCommand(args, cwd, logLabel) {
|
|
|
5297
5434
|
});
|
|
5298
5435
|
proc.on("close", (code) => {
|
|
5299
5436
|
if (code !== 0) {
|
|
5300
|
-
logger$
|
|
5437
|
+
logger$19.warn({
|
|
5301
5438
|
label: logLabel,
|
|
5302
5439
|
cwd,
|
|
5303
5440
|
code,
|
|
@@ -5313,7 +5450,7 @@ async function runGitCommand(args, cwd, logLabel) {
|
|
|
5313
5450
|
|
|
5314
5451
|
//#endregion
|
|
5315
5452
|
//#region src/modules/implementation-orchestrator/project-findings.ts
|
|
5316
|
-
const logger$
|
|
5453
|
+
const logger$18 = createLogger("project-findings");
|
|
5317
5454
|
/** Maximum character length for the findings summary */
|
|
5318
5455
|
const MAX_CHARS = 2e3;
|
|
5319
5456
|
/**
|
|
@@ -5379,7 +5516,7 @@ function getProjectFindings(db) {
|
|
|
5379
5516
|
if (summary.length > MAX_CHARS) summary = summary.slice(0, MAX_CHARS - 3) + "...";
|
|
5380
5517
|
return summary;
|
|
5381
5518
|
} catch (err) {
|
|
5382
|
-
logger$
|
|
5519
|
+
logger$18.warn({ err }, "Failed to query project findings (graceful fallback)");
|
|
5383
5520
|
return "";
|
|
5384
5521
|
}
|
|
5385
5522
|
}
|
|
@@ -5402,7 +5539,7 @@ function extractRecurringPatterns(outcomes) {
|
|
|
5402
5539
|
|
|
5403
5540
|
//#endregion
|
|
5404
5541
|
//#region src/modules/compiled-workflows/story-complexity.ts
|
|
5405
|
-
const logger$
|
|
5542
|
+
const logger$17 = createLogger("compiled-workflows:story-complexity");
|
|
5406
5543
|
/**
|
|
5407
5544
|
* Compute a complexity score from story markdown content.
|
|
5408
5545
|
*
|
|
@@ -5454,7 +5591,7 @@ function resolveFixStoryMaxTurns(complexityScore) {
|
|
|
5454
5591
|
* @param resolvedMaxTurns - Turn limit resolved for this dispatch
|
|
5455
5592
|
*/
|
|
5456
5593
|
function logComplexityResult(storyKey, complexity, resolvedMaxTurns) {
|
|
5457
|
-
logger$
|
|
5594
|
+
logger$17.info({
|
|
5458
5595
|
storyKey,
|
|
5459
5596
|
taskCount: complexity.taskCount,
|
|
5460
5597
|
subtaskCount: complexity.subtaskCount,
|
|
@@ -5512,7 +5649,7 @@ function countFilesInLayout(content) {
|
|
|
5512
5649
|
|
|
5513
5650
|
//#endregion
|
|
5514
5651
|
//#region src/modules/compiled-workflows/dev-story.ts
|
|
5515
|
-
const logger$
|
|
5652
|
+
const logger$16 = createLogger("compiled-workflows:dev-story");
|
|
5516
5653
|
/** Default timeout for dev-story dispatches in milliseconds (30 min) */
|
|
5517
5654
|
const DEFAULT_TIMEOUT_MS$1 = 18e5;
|
|
5518
5655
|
/** Default Vitest test patterns injected when no test-pattern decisions exist */
|
|
@@ -5535,12 +5672,12 @@ const DEFAULT_VITEST_PATTERNS = `## Test Patterns (defaults)
|
|
|
5535
5672
|
*/
|
|
5536
5673
|
async function runDevStory(deps, params) {
|
|
5537
5674
|
const { storyKey, storyFilePath, taskScope, priorFiles } = params;
|
|
5538
|
-
logger$
|
|
5675
|
+
logger$16.info({
|
|
5539
5676
|
storyKey,
|
|
5540
5677
|
storyFilePath
|
|
5541
5678
|
}, "Starting compiled dev-story workflow");
|
|
5542
5679
|
const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("dev-story", deps.tokenCeilings);
|
|
5543
|
-
logger$
|
|
5680
|
+
logger$16.info({
|
|
5544
5681
|
workflow: "dev-story",
|
|
5545
5682
|
ceiling: TOKEN_CEILING,
|
|
5546
5683
|
source: tokenCeilingSource
|
|
@@ -5583,10 +5720,10 @@ async function runDevStory(deps, params) {
|
|
|
5583
5720
|
let template;
|
|
5584
5721
|
try {
|
|
5585
5722
|
template = await deps.pack.getPrompt("dev-story");
|
|
5586
|
-
logger$
|
|
5723
|
+
logger$16.debug({ storyKey }, "Retrieved dev-story prompt template from pack");
|
|
5587
5724
|
} catch (err) {
|
|
5588
5725
|
const error = err instanceof Error ? err.message : String(err);
|
|
5589
|
-
logger$
|
|
5726
|
+
logger$16.error({
|
|
5590
5727
|
storyKey,
|
|
5591
5728
|
error
|
|
5592
5729
|
}, "Failed to retrieve dev-story prompt template");
|
|
@@ -5597,14 +5734,14 @@ async function runDevStory(deps, params) {
|
|
|
5597
5734
|
storyContent = await readFile$1(storyFilePath, "utf-8");
|
|
5598
5735
|
} catch (err) {
|
|
5599
5736
|
if (err.code === "ENOENT") {
|
|
5600
|
-
logger$
|
|
5737
|
+
logger$16.error({
|
|
5601
5738
|
storyKey,
|
|
5602
5739
|
storyFilePath
|
|
5603
5740
|
}, "Story file not found");
|
|
5604
5741
|
return makeFailureResult("story_file_not_found");
|
|
5605
5742
|
}
|
|
5606
5743
|
const error = err instanceof Error ? err.message : String(err);
|
|
5607
|
-
logger$
|
|
5744
|
+
logger$16.error({
|
|
5608
5745
|
storyKey,
|
|
5609
5746
|
storyFilePath,
|
|
5610
5747
|
error
|
|
@@ -5612,7 +5749,7 @@ async function runDevStory(deps, params) {
|
|
|
5612
5749
|
return makeFailureResult(`story_file_read_error: ${error}`);
|
|
5613
5750
|
}
|
|
5614
5751
|
if (storyContent.trim().length === 0) {
|
|
5615
|
-
logger$
|
|
5752
|
+
logger$16.error({
|
|
5616
5753
|
storyKey,
|
|
5617
5754
|
storyFilePath
|
|
5618
5755
|
}, "Story file is empty");
|
|
@@ -5627,17 +5764,17 @@ async function runDevStory(deps, params) {
|
|
|
5627
5764
|
const testPatternDecisions = solutioningDecisions.filter((d) => d.category === "test-patterns");
|
|
5628
5765
|
if (testPatternDecisions.length > 0) {
|
|
5629
5766
|
testPatternsContent = "## Test Patterns\n" + testPatternDecisions.map((d) => `- ${d.key}: ${d.value}`).join("\n");
|
|
5630
|
-
logger$
|
|
5767
|
+
logger$16.debug({
|
|
5631
5768
|
storyKey,
|
|
5632
5769
|
count: testPatternDecisions.length
|
|
5633
5770
|
}, "Loaded test patterns from decision store");
|
|
5634
5771
|
} else {
|
|
5635
5772
|
testPatternsContent = DEFAULT_VITEST_PATTERNS;
|
|
5636
|
-
logger$
|
|
5773
|
+
logger$16.debug({ storyKey }, "No test-pattern decisions found — using default Vitest patterns");
|
|
5637
5774
|
}
|
|
5638
5775
|
} catch (err) {
|
|
5639
5776
|
const error = err instanceof Error ? err.message : String(err);
|
|
5640
|
-
logger$
|
|
5777
|
+
logger$16.warn({
|
|
5641
5778
|
storyKey,
|
|
5642
5779
|
error
|
|
5643
5780
|
}, "Failed to load test patterns — using defaults");
|
|
@@ -5652,7 +5789,7 @@ async function runDevStory(deps, params) {
|
|
|
5652
5789
|
const findings = getProjectFindings(deps.db);
|
|
5653
5790
|
if (findings.length > 0) {
|
|
5654
5791
|
priorFindingsContent = "Previous pipeline runs encountered these issues — avoid repeating them:\n\n" + findings;
|
|
5655
|
-
logger$
|
|
5792
|
+
logger$16.debug({
|
|
5656
5793
|
storyKey,
|
|
5657
5794
|
findingsLen: findings.length
|
|
5658
5795
|
}, "Injecting prior findings into dev-story prompt");
|
|
@@ -5672,7 +5809,7 @@ async function runDevStory(deps, params) {
|
|
|
5672
5809
|
if (plan.test_categories && plan.test_categories.length > 0) parts.push(`\n### Categories: ${plan.test_categories.join(", ")}`);
|
|
5673
5810
|
if (plan.coverage_notes) parts.push(`\n### Coverage Notes\n${plan.coverage_notes}`);
|
|
5674
5811
|
testPlanContent = parts.join("\n");
|
|
5675
|
-
logger$
|
|
5812
|
+
logger$16.debug({ storyKey }, "Injecting test plan into dev-story prompt");
|
|
5676
5813
|
}
|
|
5677
5814
|
} catch {}
|
|
5678
5815
|
const sections = [
|
|
@@ -5718,7 +5855,7 @@ async function runDevStory(deps, params) {
|
|
|
5718
5855
|
}
|
|
5719
5856
|
];
|
|
5720
5857
|
const { prompt, tokenCount, truncated } = assemblePrompt(template, sections, TOKEN_CEILING);
|
|
5721
|
-
logger$
|
|
5858
|
+
logger$16.info({
|
|
5722
5859
|
storyKey,
|
|
5723
5860
|
tokenCount,
|
|
5724
5861
|
ceiling: TOKEN_CEILING,
|
|
@@ -5734,12 +5871,13 @@ async function runDevStory(deps, params) {
|
|
|
5734
5871
|
outputSchema: DevStoryResultSchema,
|
|
5735
5872
|
maxTurns: resolvedMaxTurns,
|
|
5736
5873
|
...deps.projectRoot !== void 0 ? { workingDirectory: deps.projectRoot } : {},
|
|
5737
|
-
...deps.otlpEndpoint !== void 0 ? { otlpEndpoint: deps.otlpEndpoint } : {}
|
|
5874
|
+
...deps.otlpEndpoint !== void 0 ? { otlpEndpoint: deps.otlpEndpoint } : {},
|
|
5875
|
+
storyKey
|
|
5738
5876
|
});
|
|
5739
5877
|
dispatchResult = await handle.result;
|
|
5740
5878
|
} catch (err) {
|
|
5741
5879
|
const error = err instanceof Error ? err.message : String(err);
|
|
5742
|
-
logger$
|
|
5880
|
+
logger$16.error({
|
|
5743
5881
|
storyKey,
|
|
5744
5882
|
error
|
|
5745
5883
|
}, "Dispatch threw an unexpected error");
|
|
@@ -5750,11 +5888,11 @@ async function runDevStory(deps, params) {
|
|
|
5750
5888
|
output: dispatchResult.tokenEstimate.output
|
|
5751
5889
|
};
|
|
5752
5890
|
if (dispatchResult.status === "timeout") {
|
|
5753
|
-
logger$
|
|
5891
|
+
logger$16.error({
|
|
5754
5892
|
storyKey,
|
|
5755
5893
|
durationMs: dispatchResult.durationMs
|
|
5756
5894
|
}, "Dev-story dispatch timed out");
|
|
5757
|
-
if (dispatchResult.output.length > 0) logger$
|
|
5895
|
+
if (dispatchResult.output.length > 0) logger$16.info({
|
|
5758
5896
|
storyKey,
|
|
5759
5897
|
partialOutput: dispatchResult.output.slice(0, 500)
|
|
5760
5898
|
}, "Partial output before timeout");
|
|
@@ -5764,12 +5902,12 @@ async function runDevStory(deps, params) {
|
|
|
5764
5902
|
};
|
|
5765
5903
|
}
|
|
5766
5904
|
if (dispatchResult.status === "failed" || dispatchResult.exitCode !== 0) {
|
|
5767
|
-
logger$
|
|
5905
|
+
logger$16.error({
|
|
5768
5906
|
storyKey,
|
|
5769
5907
|
exitCode: dispatchResult.exitCode,
|
|
5770
5908
|
status: dispatchResult.status
|
|
5771
5909
|
}, "Dev-story dispatch failed");
|
|
5772
|
-
if (dispatchResult.output.length > 0) logger$
|
|
5910
|
+
if (dispatchResult.output.length > 0) logger$16.info({
|
|
5773
5911
|
storyKey,
|
|
5774
5912
|
partialOutput: dispatchResult.output.slice(0, 500)
|
|
5775
5913
|
}, "Partial output from failed dispatch");
|
|
@@ -5781,7 +5919,7 @@ async function runDevStory(deps, params) {
|
|
|
5781
5919
|
if (dispatchResult.parseError !== null || dispatchResult.parsed === null) {
|
|
5782
5920
|
const details = dispatchResult.parseError ?? "parsed result was null";
|
|
5783
5921
|
const rawSnippet = dispatchResult.output ? dispatchResult.output.slice(0, 1e3) : "(empty)";
|
|
5784
|
-
logger$
|
|
5922
|
+
logger$16.error({
|
|
5785
5923
|
storyKey,
|
|
5786
5924
|
parseError: details,
|
|
5787
5925
|
rawOutputSnippet: rawSnippet
|
|
@@ -5789,12 +5927,12 @@ async function runDevStory(deps, params) {
|
|
|
5789
5927
|
let filesModified = [];
|
|
5790
5928
|
try {
|
|
5791
5929
|
filesModified = await getGitChangedFiles(deps.projectRoot ?? process.cwd());
|
|
5792
|
-
if (filesModified.length > 0) logger$
|
|
5930
|
+
if (filesModified.length > 0) logger$16.info({
|
|
5793
5931
|
storyKey,
|
|
5794
5932
|
fileCount: filesModified.length
|
|
5795
5933
|
}, "Recovered files_modified from git status (YAML fallback)");
|
|
5796
5934
|
} catch (err) {
|
|
5797
|
-
logger$
|
|
5935
|
+
logger$16.warn({
|
|
5798
5936
|
storyKey,
|
|
5799
5937
|
error: err instanceof Error ? err.message : String(err)
|
|
5800
5938
|
}, "Failed to recover files_modified from git");
|
|
@@ -5811,7 +5949,7 @@ async function runDevStory(deps, params) {
|
|
|
5811
5949
|
};
|
|
5812
5950
|
}
|
|
5813
5951
|
const parsed = dispatchResult.parsed;
|
|
5814
|
-
logger$
|
|
5952
|
+
logger$16.info({
|
|
5815
5953
|
storyKey,
|
|
5816
5954
|
result: parsed.result,
|
|
5817
5955
|
acMet: parsed.ac_met.length
|
|
@@ -5950,7 +6088,7 @@ function extractFilesInScope(storyContent) {
|
|
|
5950
6088
|
|
|
5951
6089
|
//#endregion
|
|
5952
6090
|
//#region src/modules/compiled-workflows/code-review.ts
|
|
5953
|
-
const logger$
|
|
6091
|
+
const logger$15 = createLogger("compiled-workflows:code-review");
|
|
5954
6092
|
/**
|
|
5955
6093
|
* Default fallback result when dispatch fails or times out.
|
|
5956
6094
|
* Uses NEEDS_MINOR_FIXES (not NEEDS_MAJOR_REWORK) so a parse/schema failure
|
|
@@ -5988,14 +6126,14 @@ function defaultFailResult(error, tokenUsage) {
|
|
|
5988
6126
|
async function runCodeReview(deps, params) {
|
|
5989
6127
|
const { storyKey, storyFilePath, workingDirectory, pipelineRunId, filesModified, previousIssues } = params;
|
|
5990
6128
|
const cwd = workingDirectory ?? process.cwd();
|
|
5991
|
-
logger$
|
|
6129
|
+
logger$15.debug({
|
|
5992
6130
|
storyKey,
|
|
5993
6131
|
storyFilePath,
|
|
5994
6132
|
cwd,
|
|
5995
6133
|
pipelineRunId
|
|
5996
6134
|
}, "Starting code-review workflow");
|
|
5997
6135
|
const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("code-review", deps.tokenCeilings);
|
|
5998
|
-
logger$
|
|
6136
|
+
logger$15.info({
|
|
5999
6137
|
workflow: "code-review",
|
|
6000
6138
|
ceiling: TOKEN_CEILING,
|
|
6001
6139
|
source: tokenCeilingSource
|
|
@@ -6005,7 +6143,7 @@ async function runCodeReview(deps, params) {
|
|
|
6005
6143
|
template = await deps.pack.getPrompt("code-review");
|
|
6006
6144
|
} catch (err) {
|
|
6007
6145
|
const error = err instanceof Error ? err.message : String(err);
|
|
6008
|
-
logger$
|
|
6146
|
+
logger$15.error({ error }, "Failed to retrieve code-review prompt template");
|
|
6009
6147
|
return defaultFailResult(`Failed to retrieve prompt template: ${error}`, {
|
|
6010
6148
|
input: 0,
|
|
6011
6149
|
output: 0
|
|
@@ -6016,7 +6154,7 @@ async function runCodeReview(deps, params) {
|
|
|
6016
6154
|
storyContent = await readFile$1(storyFilePath, "utf-8");
|
|
6017
6155
|
} catch (err) {
|
|
6018
6156
|
const error = err instanceof Error ? err.message : String(err);
|
|
6019
|
-
logger$
|
|
6157
|
+
logger$15.error({
|
|
6020
6158
|
storyFilePath,
|
|
6021
6159
|
error
|
|
6022
6160
|
}, "Failed to read story file");
|
|
@@ -6036,12 +6174,12 @@ async function runCodeReview(deps, params) {
|
|
|
6036
6174
|
const scopedTotal = nonDiffTokens + countTokens(scopedDiff);
|
|
6037
6175
|
if (scopedTotal <= TOKEN_CEILING) {
|
|
6038
6176
|
gitDiffContent = scopedDiff;
|
|
6039
|
-
logger$
|
|
6177
|
+
logger$15.debug({
|
|
6040
6178
|
fileCount: filesModified.length,
|
|
6041
6179
|
tokenCount: scopedTotal
|
|
6042
6180
|
}, "Using scoped file diff");
|
|
6043
6181
|
} else {
|
|
6044
|
-
logger$
|
|
6182
|
+
logger$15.warn({
|
|
6045
6183
|
estimatedTotal: scopedTotal,
|
|
6046
6184
|
ceiling: TOKEN_CEILING,
|
|
6047
6185
|
fileCount: filesModified.length
|
|
@@ -6055,7 +6193,7 @@ async function runCodeReview(deps, params) {
|
|
|
6055
6193
|
const fullTotal = nonDiffTokens + countTokens(fullDiff);
|
|
6056
6194
|
if (fullTotal <= TOKEN_CEILING) gitDiffContent = fullDiff;
|
|
6057
6195
|
else {
|
|
6058
|
-
logger$
|
|
6196
|
+
logger$15.warn({
|
|
6059
6197
|
estimatedTotal: fullTotal,
|
|
6060
6198
|
ceiling: TOKEN_CEILING
|
|
6061
6199
|
}, "Full git diff would exceed token ceiling — using stat-only summary");
|
|
@@ -6063,7 +6201,7 @@ async function runCodeReview(deps, params) {
|
|
|
6063
6201
|
}
|
|
6064
6202
|
}
|
|
6065
6203
|
if (gitDiffContent.trim().length === 0) {
|
|
6066
|
-
logger$
|
|
6204
|
+
logger$15.info({ storyKey }, "Empty git diff — skipping review with SHIP_IT");
|
|
6067
6205
|
return {
|
|
6068
6206
|
verdict: "SHIP_IT",
|
|
6069
6207
|
issues: 0,
|
|
@@ -6088,7 +6226,7 @@ async function runCodeReview(deps, params) {
|
|
|
6088
6226
|
const findings = getProjectFindings(deps.db);
|
|
6089
6227
|
if (findings.length > 0) {
|
|
6090
6228
|
priorFindingsContent = "Previous reviews found these recurring patterns — pay special attention:\n\n" + findings;
|
|
6091
|
-
logger$
|
|
6229
|
+
logger$15.debug({
|
|
6092
6230
|
storyKey,
|
|
6093
6231
|
findingsLen: findings.length
|
|
6094
6232
|
}, "Injecting prior findings into code-review prompt");
|
|
@@ -6122,11 +6260,11 @@ async function runCodeReview(deps, params) {
|
|
|
6122
6260
|
}
|
|
6123
6261
|
];
|
|
6124
6262
|
const assembleResult = assemblePrompt(template, sections, TOKEN_CEILING);
|
|
6125
|
-
if (assembleResult.truncated) logger$
|
|
6263
|
+
if (assembleResult.truncated) logger$15.warn({
|
|
6126
6264
|
storyKey,
|
|
6127
6265
|
tokenCount: assembleResult.tokenCount
|
|
6128
6266
|
}, "Code-review prompt truncated to fit token ceiling");
|
|
6129
|
-
logger$
|
|
6267
|
+
logger$15.debug({
|
|
6130
6268
|
storyKey,
|
|
6131
6269
|
tokenCount: assembleResult.tokenCount,
|
|
6132
6270
|
truncated: assembleResult.truncated
|
|
@@ -6138,14 +6276,15 @@ async function runCodeReview(deps, params) {
|
|
|
6138
6276
|
taskType: "code-review",
|
|
6139
6277
|
outputSchema: CodeReviewResultSchema,
|
|
6140
6278
|
workingDirectory: deps.projectRoot,
|
|
6141
|
-
...deps.otlpEndpoint !== void 0 ? { otlpEndpoint: deps.otlpEndpoint } : {}
|
|
6279
|
+
...deps.otlpEndpoint !== void 0 ? { otlpEndpoint: deps.otlpEndpoint } : {},
|
|
6280
|
+
storyKey
|
|
6142
6281
|
});
|
|
6143
6282
|
let dispatchResult;
|
|
6144
6283
|
try {
|
|
6145
6284
|
dispatchResult = await handle.result;
|
|
6146
6285
|
} catch (err) {
|
|
6147
6286
|
const error = err instanceof Error ? err.message : String(err);
|
|
6148
|
-
logger$
|
|
6287
|
+
logger$15.error({
|
|
6149
6288
|
storyKey,
|
|
6150
6289
|
error
|
|
6151
6290
|
}, "Code-review dispatch threw unexpected error");
|
|
@@ -6161,7 +6300,7 @@ async function runCodeReview(deps, params) {
|
|
|
6161
6300
|
const rawOutput = dispatchResult.output ?? void 0;
|
|
6162
6301
|
if (dispatchResult.status === "failed") {
|
|
6163
6302
|
const errorMsg = `Dispatch status: failed. Exit code: ${dispatchResult.exitCode}. ${dispatchResult.parseError ?? ""} ${dispatchResult.output ? `Stderr: ${dispatchResult.output}` : ""}`.trim();
|
|
6164
|
-
logger$
|
|
6303
|
+
logger$15.warn({
|
|
6165
6304
|
storyKey,
|
|
6166
6305
|
exitCode: dispatchResult.exitCode
|
|
6167
6306
|
}, "Code-review dispatch failed");
|
|
@@ -6171,7 +6310,7 @@ async function runCodeReview(deps, params) {
|
|
|
6171
6310
|
};
|
|
6172
6311
|
}
|
|
6173
6312
|
if (dispatchResult.status === "timeout") {
|
|
6174
|
-
logger$
|
|
6313
|
+
logger$15.warn({ storyKey }, "Code-review dispatch timed out");
|
|
6175
6314
|
return {
|
|
6176
6315
|
...defaultFailResult("Dispatch status: timeout. The agent did not complete within the allowed time.", tokenUsage),
|
|
6177
6316
|
rawOutput
|
|
@@ -6179,7 +6318,7 @@ async function runCodeReview(deps, params) {
|
|
|
6179
6318
|
}
|
|
6180
6319
|
if (dispatchResult.parsed === null) {
|
|
6181
6320
|
const details = dispatchResult.parseError ?? "No YAML block found in output";
|
|
6182
|
-
logger$
|
|
6321
|
+
logger$15.warn({
|
|
6183
6322
|
storyKey,
|
|
6184
6323
|
details
|
|
6185
6324
|
}, "Code-review output schema validation failed");
|
|
@@ -6196,7 +6335,7 @@ async function runCodeReview(deps, params) {
|
|
|
6196
6335
|
const parseResult = CodeReviewResultSchema.safeParse(dispatchResult.parsed);
|
|
6197
6336
|
if (!parseResult.success) {
|
|
6198
6337
|
const details = parseResult.error.message;
|
|
6199
|
-
logger$
|
|
6338
|
+
logger$15.warn({
|
|
6200
6339
|
storyKey,
|
|
6201
6340
|
details
|
|
6202
6341
|
}, "Code-review output failed schema validation");
|
|
@@ -6211,13 +6350,13 @@ async function runCodeReview(deps, params) {
|
|
|
6211
6350
|
};
|
|
6212
6351
|
}
|
|
6213
6352
|
const parsed = parseResult.data;
|
|
6214
|
-
if (parsed.agentVerdict !== parsed.verdict) logger$
|
|
6353
|
+
if (parsed.agentVerdict !== parsed.verdict) logger$15.info({
|
|
6215
6354
|
storyKey,
|
|
6216
6355
|
agentVerdict: parsed.agentVerdict,
|
|
6217
6356
|
pipelineVerdict: parsed.verdict,
|
|
6218
6357
|
issues: parsed.issues
|
|
6219
6358
|
}, "Pipeline overrode agent verdict based on issue severities");
|
|
6220
|
-
logger$
|
|
6359
|
+
logger$15.info({
|
|
6221
6360
|
storyKey,
|
|
6222
6361
|
verdict: parsed.verdict,
|
|
6223
6362
|
issues: parsed.issues
|
|
@@ -6242,14 +6381,14 @@ function getArchConstraints$2(deps) {
|
|
|
6242
6381
|
if (constraints.length === 0) return "";
|
|
6243
6382
|
return constraints.map((d) => `${d.key}: ${d.value}`).join("\n");
|
|
6244
6383
|
} catch (err) {
|
|
6245
|
-
logger$
|
|
6384
|
+
logger$15.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
|
|
6246
6385
|
return "";
|
|
6247
6386
|
}
|
|
6248
6387
|
}
|
|
6249
6388
|
|
|
6250
6389
|
//#endregion
|
|
6251
6390
|
//#region src/modules/compiled-workflows/test-plan.ts
|
|
6252
|
-
const logger$
|
|
6391
|
+
const logger$14 = createLogger("compiled-workflows:test-plan");
|
|
6253
6392
|
/** Default timeout for test-plan dispatches in milliseconds (5 min — lightweight call) */
|
|
6254
6393
|
const DEFAULT_TIMEOUT_MS = 3e5;
|
|
6255
6394
|
/**
|
|
@@ -6261,12 +6400,12 @@ const DEFAULT_TIMEOUT_MS = 3e5;
|
|
|
6261
6400
|
*/
|
|
6262
6401
|
async function runTestPlan(deps, params) {
|
|
6263
6402
|
const { storyKey, storyFilePath, pipelineRunId } = params;
|
|
6264
|
-
logger$
|
|
6403
|
+
logger$14.info({
|
|
6265
6404
|
storyKey,
|
|
6266
6405
|
storyFilePath
|
|
6267
6406
|
}, "Starting compiled test-plan workflow");
|
|
6268
6407
|
const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("test-plan", deps.tokenCeilings);
|
|
6269
|
-
logger$
|
|
6408
|
+
logger$14.info({
|
|
6270
6409
|
workflow: "test-plan",
|
|
6271
6410
|
ceiling: TOKEN_CEILING,
|
|
6272
6411
|
source: tokenCeilingSource
|
|
@@ -6274,10 +6413,10 @@ async function runTestPlan(deps, params) {
|
|
|
6274
6413
|
let template;
|
|
6275
6414
|
try {
|
|
6276
6415
|
template = await deps.pack.getPrompt("test-plan");
|
|
6277
|
-
logger$
|
|
6416
|
+
logger$14.debug({ storyKey }, "Retrieved test-plan prompt template from pack");
|
|
6278
6417
|
} catch (err) {
|
|
6279
6418
|
const error = err instanceof Error ? err.message : String(err);
|
|
6280
|
-
logger$
|
|
6419
|
+
logger$14.warn({
|
|
6281
6420
|
storyKey,
|
|
6282
6421
|
error
|
|
6283
6422
|
}, "Failed to retrieve test-plan prompt template");
|
|
@@ -6288,14 +6427,14 @@ async function runTestPlan(deps, params) {
|
|
|
6288
6427
|
storyContent = await readFile$1(storyFilePath, "utf-8");
|
|
6289
6428
|
} catch (err) {
|
|
6290
6429
|
if (err.code === "ENOENT") {
|
|
6291
|
-
logger$
|
|
6430
|
+
logger$14.warn({
|
|
6292
6431
|
storyKey,
|
|
6293
6432
|
storyFilePath
|
|
6294
6433
|
}, "Story file not found for test planning");
|
|
6295
6434
|
return makeTestPlanFailureResult("story_file_not_found");
|
|
6296
6435
|
}
|
|
6297
6436
|
const error = err instanceof Error ? err.message : String(err);
|
|
6298
|
-
logger$
|
|
6437
|
+
logger$14.warn({
|
|
6299
6438
|
storyKey,
|
|
6300
6439
|
storyFilePath,
|
|
6301
6440
|
error
|
|
@@ -6312,7 +6451,7 @@ async function runTestPlan(deps, params) {
|
|
|
6312
6451
|
content: archConstraintsContent,
|
|
6313
6452
|
priority: "optional"
|
|
6314
6453
|
}], TOKEN_CEILING);
|
|
6315
|
-
logger$
|
|
6454
|
+
logger$14.info({
|
|
6316
6455
|
storyKey,
|
|
6317
6456
|
tokenCount,
|
|
6318
6457
|
ceiling: TOKEN_CEILING,
|
|
@@ -6327,12 +6466,13 @@ async function runTestPlan(deps, params) {
|
|
|
6327
6466
|
timeout: DEFAULT_TIMEOUT_MS,
|
|
6328
6467
|
outputSchema: TestPlanResultSchema,
|
|
6329
6468
|
...deps.projectRoot !== void 0 ? { workingDirectory: deps.projectRoot } : {},
|
|
6330
|
-
...deps.otlpEndpoint !== void 0 ? { otlpEndpoint: deps.otlpEndpoint } : {}
|
|
6469
|
+
...deps.otlpEndpoint !== void 0 ? { otlpEndpoint: deps.otlpEndpoint } : {},
|
|
6470
|
+
storyKey
|
|
6331
6471
|
});
|
|
6332
6472
|
dispatchResult = await handle.result;
|
|
6333
6473
|
} catch (err) {
|
|
6334
6474
|
const error = err instanceof Error ? err.message : String(err);
|
|
6335
|
-
logger$
|
|
6475
|
+
logger$14.warn({
|
|
6336
6476
|
storyKey,
|
|
6337
6477
|
error
|
|
6338
6478
|
}, "Test-plan dispatch threw an unexpected error");
|
|
@@ -6343,7 +6483,7 @@ async function runTestPlan(deps, params) {
|
|
|
6343
6483
|
output: dispatchResult.tokenEstimate.output
|
|
6344
6484
|
};
|
|
6345
6485
|
if (dispatchResult.status === "timeout") {
|
|
6346
|
-
logger$
|
|
6486
|
+
logger$14.warn({
|
|
6347
6487
|
storyKey,
|
|
6348
6488
|
durationMs: dispatchResult.durationMs
|
|
6349
6489
|
}, "Test-plan dispatch timed out");
|
|
@@ -6353,7 +6493,7 @@ async function runTestPlan(deps, params) {
|
|
|
6353
6493
|
};
|
|
6354
6494
|
}
|
|
6355
6495
|
if (dispatchResult.status === "failed" || dispatchResult.exitCode !== 0) {
|
|
6356
|
-
logger$
|
|
6496
|
+
logger$14.warn({
|
|
6357
6497
|
storyKey,
|
|
6358
6498
|
exitCode: dispatchResult.exitCode,
|
|
6359
6499
|
status: dispatchResult.status
|
|
@@ -6365,7 +6505,7 @@ async function runTestPlan(deps, params) {
|
|
|
6365
6505
|
}
|
|
6366
6506
|
if (dispatchResult.parseError !== null || dispatchResult.parsed === null) {
|
|
6367
6507
|
const details = dispatchResult.parseError ?? "parsed result was null";
|
|
6368
|
-
logger$
|
|
6508
|
+
logger$14.warn({
|
|
6369
6509
|
storyKey,
|
|
6370
6510
|
parseError: details
|
|
6371
6511
|
}, "Test-plan YAML schema validation failed");
|
|
@@ -6388,19 +6528,19 @@ async function runTestPlan(deps, params) {
|
|
|
6388
6528
|
}),
|
|
6389
6529
|
rationale: `Test plan for ${storyKey}: ${parsed.test_files.length} test files, categories: ${parsed.test_categories.join(", ")}`
|
|
6390
6530
|
});
|
|
6391
|
-
logger$
|
|
6531
|
+
logger$14.info({
|
|
6392
6532
|
storyKey,
|
|
6393
6533
|
fileCount: parsed.test_files.length,
|
|
6394
6534
|
categories: parsed.test_categories
|
|
6395
6535
|
}, "Test plan stored in decision store");
|
|
6396
6536
|
} catch (err) {
|
|
6397
6537
|
const error = err instanceof Error ? err.message : String(err);
|
|
6398
|
-
logger$
|
|
6538
|
+
logger$14.warn({
|
|
6399
6539
|
storyKey,
|
|
6400
6540
|
error
|
|
6401
6541
|
}, "Failed to store test plan in decision store — proceeding anyway");
|
|
6402
6542
|
}
|
|
6403
|
-
logger$
|
|
6543
|
+
logger$14.info({
|
|
6404
6544
|
storyKey,
|
|
6405
6545
|
result: parsed.result
|
|
6406
6546
|
}, "Test-plan workflow completed");
|
|
@@ -6440,14 +6580,14 @@ function getArchConstraints$1(deps) {
|
|
|
6440
6580
|
if (constraints.length === 0) return "";
|
|
6441
6581
|
return constraints.map((d) => `${d.key}: ${d.value}`).join("\n");
|
|
6442
6582
|
} catch (err) {
|
|
6443
|
-
logger$
|
|
6583
|
+
logger$14.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints for test-plan — proceeding without them");
|
|
6444
6584
|
return "";
|
|
6445
6585
|
}
|
|
6446
6586
|
}
|
|
6447
6587
|
|
|
6448
6588
|
//#endregion
|
|
6449
6589
|
//#region src/modules/compiled-workflows/test-expansion.ts
|
|
6450
|
-
const logger$
|
|
6590
|
+
const logger$13 = createLogger("compiled-workflows:test-expansion");
|
|
6451
6591
|
function defaultFallbackResult(error, tokenUsage) {
|
|
6452
6592
|
return {
|
|
6453
6593
|
expansion_priority: "low",
|
|
@@ -6477,14 +6617,14 @@ function defaultFallbackResult(error, tokenUsage) {
|
|
|
6477
6617
|
async function runTestExpansion(deps, params) {
|
|
6478
6618
|
const { storyKey, storyFilePath, pipelineRunId, filesModified, workingDirectory } = params;
|
|
6479
6619
|
const cwd = workingDirectory ?? process.cwd();
|
|
6480
|
-
logger$
|
|
6620
|
+
logger$13.debug({
|
|
6481
6621
|
storyKey,
|
|
6482
6622
|
storyFilePath,
|
|
6483
6623
|
cwd,
|
|
6484
6624
|
pipelineRunId
|
|
6485
6625
|
}, "Starting test-expansion workflow");
|
|
6486
6626
|
const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("test-expansion", deps.tokenCeilings);
|
|
6487
|
-
logger$
|
|
6627
|
+
logger$13.info({
|
|
6488
6628
|
workflow: "test-expansion",
|
|
6489
6629
|
ceiling: TOKEN_CEILING,
|
|
6490
6630
|
source: tokenCeilingSource
|
|
@@ -6494,7 +6634,7 @@ async function runTestExpansion(deps, params) {
|
|
|
6494
6634
|
template = await deps.pack.getPrompt("test-expansion");
|
|
6495
6635
|
} catch (err) {
|
|
6496
6636
|
const error = err instanceof Error ? err.message : String(err);
|
|
6497
|
-
logger$
|
|
6637
|
+
logger$13.warn({ error }, "Failed to retrieve test-expansion prompt template");
|
|
6498
6638
|
return defaultFallbackResult(`Failed to retrieve prompt template: ${error}`, {
|
|
6499
6639
|
input: 0,
|
|
6500
6640
|
output: 0
|
|
@@ -6505,7 +6645,7 @@ async function runTestExpansion(deps, params) {
|
|
|
6505
6645
|
storyContent = await readFile$1(storyFilePath, "utf-8");
|
|
6506
6646
|
} catch (err) {
|
|
6507
6647
|
const error = err instanceof Error ? err.message : String(err);
|
|
6508
|
-
logger$
|
|
6648
|
+
logger$13.warn({
|
|
6509
6649
|
storyFilePath,
|
|
6510
6650
|
error
|
|
6511
6651
|
}, "Failed to read story file");
|
|
@@ -6525,12 +6665,12 @@ async function runTestExpansion(deps, params) {
|
|
|
6525
6665
|
const scopedTotal = nonDiffTokens + countTokens(scopedDiff);
|
|
6526
6666
|
if (scopedTotal <= TOKEN_CEILING) {
|
|
6527
6667
|
gitDiffContent = scopedDiff;
|
|
6528
|
-
logger$
|
|
6668
|
+
logger$13.debug({
|
|
6529
6669
|
fileCount: filesModified.length,
|
|
6530
6670
|
tokenCount: scopedTotal
|
|
6531
6671
|
}, "Using scoped file diff");
|
|
6532
6672
|
} else {
|
|
6533
|
-
logger$
|
|
6673
|
+
logger$13.warn({
|
|
6534
6674
|
estimatedTotal: scopedTotal,
|
|
6535
6675
|
ceiling: TOKEN_CEILING,
|
|
6536
6676
|
fileCount: filesModified.length
|
|
@@ -6538,7 +6678,7 @@ async function runTestExpansion(deps, params) {
|
|
|
6538
6678
|
gitDiffContent = await getGitDiffStatSummary(cwd);
|
|
6539
6679
|
}
|
|
6540
6680
|
} catch (err) {
|
|
6541
|
-
logger$
|
|
6681
|
+
logger$13.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to get git diff — proceeding with empty diff");
|
|
6542
6682
|
}
|
|
6543
6683
|
const sections = [
|
|
6544
6684
|
{
|
|
@@ -6558,11 +6698,11 @@ async function runTestExpansion(deps, params) {
|
|
|
6558
6698
|
}
|
|
6559
6699
|
];
|
|
6560
6700
|
const assembleResult = assemblePrompt(template, sections, TOKEN_CEILING);
|
|
6561
|
-
if (assembleResult.truncated) logger$
|
|
6701
|
+
if (assembleResult.truncated) logger$13.warn({
|
|
6562
6702
|
storyKey,
|
|
6563
6703
|
tokenCount: assembleResult.tokenCount
|
|
6564
6704
|
}, "Test-expansion prompt truncated to fit token ceiling");
|
|
6565
|
-
logger$
|
|
6705
|
+
logger$13.debug({
|
|
6566
6706
|
storyKey,
|
|
6567
6707
|
tokenCount: assembleResult.tokenCount,
|
|
6568
6708
|
truncated: assembleResult.truncated
|
|
@@ -6574,14 +6714,15 @@ async function runTestExpansion(deps, params) {
|
|
|
6574
6714
|
taskType: "test-expansion",
|
|
6575
6715
|
outputSchema: TestExpansionResultSchema,
|
|
6576
6716
|
workingDirectory: deps.projectRoot,
|
|
6577
|
-
...deps.otlpEndpoint !== void 0 ? { otlpEndpoint: deps.otlpEndpoint } : {}
|
|
6717
|
+
...deps.otlpEndpoint !== void 0 ? { otlpEndpoint: deps.otlpEndpoint } : {},
|
|
6718
|
+
storyKey
|
|
6578
6719
|
});
|
|
6579
6720
|
let dispatchResult;
|
|
6580
6721
|
try {
|
|
6581
6722
|
dispatchResult = await handle.result;
|
|
6582
6723
|
} catch (err) {
|
|
6583
6724
|
const error = err instanceof Error ? err.message : String(err);
|
|
6584
|
-
logger$
|
|
6725
|
+
logger$13.warn({
|
|
6585
6726
|
storyKey,
|
|
6586
6727
|
error
|
|
6587
6728
|
}, "Test-expansion dispatch threw unexpected error");
|
|
@@ -6596,19 +6737,19 @@ async function runTestExpansion(deps, params) {
|
|
|
6596
6737
|
};
|
|
6597
6738
|
if (dispatchResult.status === "failed") {
|
|
6598
6739
|
const errorMsg = `Dispatch status: failed. Exit code: ${dispatchResult.exitCode}. ${dispatchResult.parseError ?? ""}`.trim();
|
|
6599
|
-
logger$
|
|
6740
|
+
logger$13.warn({
|
|
6600
6741
|
storyKey,
|
|
6601
6742
|
exitCode: dispatchResult.exitCode
|
|
6602
6743
|
}, "Test-expansion dispatch failed");
|
|
6603
6744
|
return defaultFallbackResult(errorMsg, tokenUsage);
|
|
6604
6745
|
}
|
|
6605
6746
|
if (dispatchResult.status === "timeout") {
|
|
6606
|
-
logger$
|
|
6747
|
+
logger$13.warn({ storyKey }, "Test-expansion dispatch timed out");
|
|
6607
6748
|
return defaultFallbackResult("Dispatch status: timeout. The agent did not complete within the allowed time.", tokenUsage);
|
|
6608
6749
|
}
|
|
6609
6750
|
if (dispatchResult.parsed === null) {
|
|
6610
6751
|
const details = dispatchResult.parseError ?? "No YAML block found in output";
|
|
6611
|
-
logger$
|
|
6752
|
+
logger$13.warn({
|
|
6612
6753
|
storyKey,
|
|
6613
6754
|
details
|
|
6614
6755
|
}, "Test-expansion output has no parseable YAML");
|
|
@@ -6617,14 +6758,14 @@ async function runTestExpansion(deps, params) {
|
|
|
6617
6758
|
const parseResult = TestExpansionResultSchema.safeParse(dispatchResult.parsed);
|
|
6618
6759
|
if (!parseResult.success) {
|
|
6619
6760
|
const details = parseResult.error.message;
|
|
6620
|
-
logger$
|
|
6761
|
+
logger$13.warn({
|
|
6621
6762
|
storyKey,
|
|
6622
6763
|
details
|
|
6623
6764
|
}, "Test-expansion output failed schema validation");
|
|
6624
6765
|
return defaultFallbackResult(`schema_validation_failed: ${details}`, tokenUsage);
|
|
6625
6766
|
}
|
|
6626
6767
|
const parsed = parseResult.data;
|
|
6627
|
-
logger$
|
|
6768
|
+
logger$13.info({
|
|
6628
6769
|
storyKey,
|
|
6629
6770
|
expansion_priority: parsed.expansion_priority,
|
|
6630
6771
|
coverage_gaps: parsed.coverage_gaps.length,
|
|
@@ -6649,7 +6790,7 @@ function getArchConstraints(deps) {
|
|
|
6649
6790
|
if (constraints.length === 0) return "";
|
|
6650
6791
|
return constraints.map((d) => `${d.key}: ${d.value}`).join("\n");
|
|
6651
6792
|
} catch (err) {
|
|
6652
|
-
logger$
|
|
6793
|
+
logger$13.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
|
|
6653
6794
|
return "";
|
|
6654
6795
|
}
|
|
6655
6796
|
}
|
|
@@ -7937,7 +8078,7 @@ function createDoltClient(options) {
|
|
|
7937
8078
|
|
|
7938
8079
|
//#endregion
|
|
7939
8080
|
//#region src/modules/state/index.ts
|
|
7940
|
-
const logger$
|
|
8081
|
+
const logger$12 = createLogger("state:factory");
|
|
7941
8082
|
/**
|
|
7942
8083
|
* Synchronously check whether Dolt is available and a Dolt repo exists at the
|
|
7943
8084
|
* canonical state path under `basePath`.
|
|
@@ -7984,14 +8125,14 @@ function createStateStore(config = {}) {
|
|
|
7984
8125
|
const repoPath = config.basePath ?? process.cwd();
|
|
7985
8126
|
const detection = detectDoltAvailableSync(repoPath);
|
|
7986
8127
|
if (detection.available) {
|
|
7987
|
-
logger$
|
|
8128
|
+
logger$12.debug(`Dolt detected, using DoltStateStore (state path: ${join$1(repoPath, ".substrate", "state")})`);
|
|
7988
8129
|
const client = new DoltClient({ repoPath });
|
|
7989
8130
|
return new DoltStateStore({
|
|
7990
8131
|
repoPath,
|
|
7991
8132
|
client
|
|
7992
8133
|
});
|
|
7993
8134
|
} else {
|
|
7994
|
-
logger$
|
|
8135
|
+
logger$12.debug(`Dolt not found, using FileStateStore (reason: ${detection.reason})`);
|
|
7995
8136
|
return new FileStateStore({ basePath: config.basePath });
|
|
7996
8137
|
}
|
|
7997
8138
|
}
|
|
@@ -8000,7 +8141,7 @@ function createStateStore(config = {}) {
|
|
|
8000
8141
|
|
|
8001
8142
|
//#endregion
|
|
8002
8143
|
//#region src/cli/commands/health.ts
|
|
8003
|
-
const logger$
|
|
8144
|
+
const logger$11 = createLogger("health-cmd");
|
|
8004
8145
|
/** Default stall threshold in seconds — also used by supervisor default */
|
|
8005
8146
|
const DEFAULT_STALL_THRESHOLD_SECONDS = 600;
|
|
8006
8147
|
/**
|
|
@@ -8255,6 +8396,7 @@ async function getAutoHealthData(options) {
|
|
|
8255
8396
|
else if (processInfo.orchestrator_pid !== null && processInfo.child_pids.length > 0 && stalenessSeconds > DEFAULT_STALL_THRESHOLD_SECONDS) verdict = "HEALTHY";
|
|
8256
8397
|
else if (stalenessSeconds > DEFAULT_STALL_THRESHOLD_SECONDS) verdict = "STALLED";
|
|
8257
8398
|
else if (processInfo.orchestrator_pid !== null && processInfo.child_pids.length === 0 && active > 0) verdict = "STALLED";
|
|
8399
|
+
else if (processInfo.orchestrator_pid === null && active > 0) verdict = "STALLED";
|
|
8258
8400
|
else verdict = "HEALTHY";
|
|
8259
8401
|
else if (run.status === "completed" || run.status === "failed" || run.status === "stopped") verdict = "NO_PIPELINE_RUNNING";
|
|
8260
8402
|
const healthOutput = {
|
|
@@ -8321,7 +8463,7 @@ async function runHealthAction(options) {
|
|
|
8321
8463
|
const msg = err instanceof Error ? err.message : String(err);
|
|
8322
8464
|
if (outputFormat === "json") process.stdout.write(formatOutput(null, "json", false, msg) + "\n");
|
|
8323
8465
|
else process.stderr.write(`Error: ${msg}\n`);
|
|
8324
|
-
logger$
|
|
8466
|
+
logger$11.error({ err }, "health action failed");
|
|
8325
8467
|
return 1;
|
|
8326
8468
|
}
|
|
8327
8469
|
}
|
|
@@ -8368,7 +8510,7 @@ function registerHealthCommand(program, _version = "0.0.0", projectRoot = proces
|
|
|
8368
8510
|
|
|
8369
8511
|
//#endregion
|
|
8370
8512
|
//#region src/modules/implementation-orchestrator/seed-methodology-context.ts
|
|
8371
|
-
const logger$
|
|
8513
|
+
const logger$10 = createLogger("implementation-orchestrator:seed");
|
|
8372
8514
|
/** Max chars for the architecture summary seeded into decisions */
|
|
8373
8515
|
const MAX_ARCH_CHARS = 6e3;
|
|
8374
8516
|
/** Max chars per epic shard (fallback when per-story extraction returns null) */
|
|
@@ -8402,12 +8544,12 @@ function seedMethodologyContext(db, projectRoot) {
|
|
|
8402
8544
|
const testCount = seedTestPatterns(db, projectRoot);
|
|
8403
8545
|
if (testCount === -1) result.skippedCategories.push("test-patterns");
|
|
8404
8546
|
else result.decisionsCreated += testCount;
|
|
8405
|
-
logger$
|
|
8547
|
+
logger$10.info({
|
|
8406
8548
|
decisionsCreated: result.decisionsCreated,
|
|
8407
8549
|
skippedCategories: result.skippedCategories
|
|
8408
8550
|
}, "Methodology context seeding complete");
|
|
8409
8551
|
} catch (err) {
|
|
8410
|
-
logger$
|
|
8552
|
+
logger$10.warn({ error: err instanceof Error ? err.message : String(err) }, "Methodology context seeding failed (non-fatal)");
|
|
8411
8553
|
}
|
|
8412
8554
|
return result;
|
|
8413
8555
|
}
|
|
@@ -8451,7 +8593,7 @@ function seedArchitecture(db, projectRoot) {
|
|
|
8451
8593
|
});
|
|
8452
8594
|
count = 1;
|
|
8453
8595
|
}
|
|
8454
|
-
logger$
|
|
8596
|
+
logger$10.debug({ count }, "Seeded architecture decisions");
|
|
8455
8597
|
return count;
|
|
8456
8598
|
}
|
|
8457
8599
|
/**
|
|
@@ -8475,11 +8617,11 @@ function seedEpicShards(db, projectRoot) {
|
|
|
8475
8617
|
const storedHashDecision = implementationDecisions.find((d) => d.category === "epic-shard-hash" && d.key === "epics-file");
|
|
8476
8618
|
const storedHash = storedHashDecision?.value;
|
|
8477
8619
|
if (storedHash === currentHash) {
|
|
8478
|
-
logger$
|
|
8620
|
+
logger$10.debug({ hash: currentHash }, "Epic shards up-to-date (hash unchanged) — skipping re-seed");
|
|
8479
8621
|
return -1;
|
|
8480
8622
|
}
|
|
8481
8623
|
if (implementationDecisions.some((d) => d.category === "epic-shard")) {
|
|
8482
|
-
logger$
|
|
8624
|
+
logger$10.debug({
|
|
8483
8625
|
storedHash,
|
|
8484
8626
|
currentHash
|
|
8485
8627
|
}, "Epics file changed — deleting stale epic-shard decisions");
|
|
@@ -8507,7 +8649,7 @@ function seedEpicShards(db, projectRoot) {
|
|
|
8507
8649
|
value: currentHash,
|
|
8508
8650
|
rationale: "SHA-256 hash of epics file content for change detection"
|
|
8509
8651
|
});
|
|
8510
|
-
logger$
|
|
8652
|
+
logger$10.debug({
|
|
8511
8653
|
count,
|
|
8512
8654
|
hash: currentHash
|
|
8513
8655
|
}, "Seeded epic shard decisions");
|
|
@@ -8531,7 +8673,7 @@ function seedTestPatterns(db, projectRoot) {
|
|
|
8531
8673
|
value: patterns.slice(0, MAX_TEST_PATTERNS_CHARS),
|
|
8532
8674
|
rationale: "Detected from project configuration at orchestrator startup"
|
|
8533
8675
|
});
|
|
8534
|
-
logger$
|
|
8676
|
+
logger$10.debug("Seeded test patterns decision");
|
|
8535
8677
|
return 1;
|
|
8536
8678
|
}
|
|
8537
8679
|
/**
|
|
@@ -8704,7 +8846,7 @@ function findArtifact(projectRoot, candidates) {
|
|
|
8704
8846
|
|
|
8705
8847
|
//#endregion
|
|
8706
8848
|
//#region src/modules/agent-dispatch/interface-change-detector.ts
|
|
8707
|
-
const logger$
|
|
8849
|
+
const logger$9 = createLogger("interface-change-detector");
|
|
8708
8850
|
/**
|
|
8709
8851
|
* Extract exported interface and type names from TypeScript source content.
|
|
8710
8852
|
*
|
|
@@ -8751,7 +8893,7 @@ function detectInterfaceChanges(options) {
|
|
|
8751
8893
|
for (const name of names) allNames.add(name);
|
|
8752
8894
|
sourceDirs.push(dirname$1(relPath));
|
|
8753
8895
|
} catch {
|
|
8754
|
-
logger$
|
|
8896
|
+
logger$9.debug({
|
|
8755
8897
|
absPath,
|
|
8756
8898
|
storyKey
|
|
8757
8899
|
}, "Could not read modified file for interface extraction");
|
|
@@ -8792,7 +8934,7 @@ function detectInterfaceChanges(options) {
|
|
|
8792
8934
|
potentiallyAffectedTests: Array.from(affectedTests)
|
|
8793
8935
|
};
|
|
8794
8936
|
} catch (err) {
|
|
8795
|
-
logger$
|
|
8937
|
+
logger$9.warn({
|
|
8796
8938
|
err,
|
|
8797
8939
|
storyKey: options.storyKey
|
|
8798
8940
|
}, "Interface change detection failed — skipping");
|
|
@@ -9083,7 +9225,7 @@ const RecommendationSchema = z.object({
|
|
|
9083
9225
|
|
|
9084
9226
|
//#endregion
|
|
9085
9227
|
//#region src/modules/telemetry/persistence.ts
|
|
9086
|
-
const logger$
|
|
9228
|
+
const logger$8 = createLogger("telemetry:persistence");
|
|
9087
9229
|
/**
|
|
9088
9230
|
* Concrete SQLite-backed telemetry persistence.
|
|
9089
9231
|
*
|
|
@@ -9317,7 +9459,7 @@ var TelemetryPersistence = class {
|
|
|
9317
9459
|
for (const turn of rows) this._insertTurnAnalysis.run(storyKey, turn.spanId, turn.turnNumber, turn.name, turn.timestamp, turn.source, turn.model ?? null, turn.inputTokens, turn.outputTokens, turn.cacheReadTokens, turn.freshTokens, turn.cacheHitRate, turn.costUsd, turn.durationMs, turn.contextSize, turn.contextDelta, turn.toolName ?? null, turn.isContextSpike ? 1 : 0, JSON.stringify(turn.childSpans));
|
|
9318
9460
|
});
|
|
9319
9461
|
insertAll(turns);
|
|
9320
|
-
logger$
|
|
9462
|
+
logger$8.debug({
|
|
9321
9463
|
storyKey,
|
|
9322
9464
|
count: turns.length
|
|
9323
9465
|
}, "Stored turn analysis");
|
|
@@ -9351,7 +9493,7 @@ var TelemetryPersistence = class {
|
|
|
9351
9493
|
}
|
|
9352
9494
|
async storeEfficiencyScore(score) {
|
|
9353
9495
|
this._insertEfficiencyScore.run(score.storyKey, score.timestamp, score.compositeScore, score.cacheHitSubScore, score.ioRatioSubScore, score.contextManagementSubScore, score.avgCacheHitRate, score.avgIoRatio, score.contextSpikeCount, score.totalTurns, JSON.stringify(score.perModelBreakdown), JSON.stringify(score.perSourceBreakdown));
|
|
9354
|
-
logger$
|
|
9496
|
+
logger$8.debug({
|
|
9355
9497
|
storyKey: score.storyKey,
|
|
9356
9498
|
compositeScore: score.compositeScore
|
|
9357
9499
|
}, "Stored efficiency score");
|
|
@@ -9410,7 +9552,7 @@ var TelemetryPersistence = class {
|
|
|
9410
9552
|
for (const rec of rows) this._insertRecommendation.run(rec.id, rec.storyKey, rec.sprintId ?? null, rec.ruleId, rec.severity, rec.title, rec.description, rec.potentialSavingsTokens ?? null, rec.potentialSavingsUsd ?? null, rec.actionTarget ?? null, rec.generatedAt);
|
|
9411
9553
|
});
|
|
9412
9554
|
insertAll(recs);
|
|
9413
|
-
logger$
|
|
9555
|
+
logger$8.debug({
|
|
9414
9556
|
storyKey,
|
|
9415
9557
|
count: recs.length
|
|
9416
9558
|
}, "Saved recommendations");
|
|
@@ -9474,7 +9616,7 @@ var TelemetryPersistence = class {
|
|
|
9474
9616
|
for (const stat$2 of rows) this._insertCategoryStats.run(storyKey, stat$2.category, stat$2.totalTokens, stat$2.percentage, stat$2.eventCount, stat$2.avgTokensPerEvent, stat$2.trend);
|
|
9475
9617
|
});
|
|
9476
9618
|
insertAll(stats);
|
|
9477
|
-
logger$
|
|
9619
|
+
logger$8.debug({
|
|
9478
9620
|
storyKey,
|
|
9479
9621
|
count: stats.length
|
|
9480
9622
|
}, "Stored category stats");
|
|
@@ -9510,7 +9652,7 @@ var TelemetryPersistence = class {
|
|
|
9510
9652
|
for (const consumer of rows) this._insertConsumerStats.run(storyKey, consumer.consumerKey, consumer.category, consumer.totalTokens, consumer.percentage, consumer.eventCount, JSON.stringify(consumer.topInvocations));
|
|
9511
9653
|
});
|
|
9512
9654
|
insertAll(consumers);
|
|
9513
|
-
logger$
|
|
9655
|
+
logger$8.debug({
|
|
9514
9656
|
storyKey,
|
|
9515
9657
|
count: consumers.length
|
|
9516
9658
|
}, "Stored consumer stats");
|
|
@@ -9566,9 +9708,120 @@ var AppError = class extends Error {
|
|
|
9566
9708
|
}
|
|
9567
9709
|
};
|
|
9568
9710
|
|
|
9711
|
+
//#endregion
|
|
9712
|
+
//#region src/modules/telemetry/batch-buffer.ts
|
|
9713
|
+
var BatchBuffer = class extends EventEmitter {
|
|
9714
|
+
_items = [];
|
|
9715
|
+
_timer = null;
|
|
9716
|
+
_batchSize;
|
|
9717
|
+
_flushIntervalMs;
|
|
9718
|
+
constructor(options = {}) {
|
|
9719
|
+
super();
|
|
9720
|
+
this._batchSize = options.batchSize ?? 100;
|
|
9721
|
+
this._flushIntervalMs = options.flushIntervalMs ?? 5e3;
|
|
9722
|
+
}
|
|
9723
|
+
/**
|
|
9724
|
+
* Add an item to the buffer.
|
|
9725
|
+
* Triggers a flush immediately when the buffer reaches `batchSize`.
|
|
9726
|
+
*/
|
|
9727
|
+
push(item) {
|
|
9728
|
+
this._items.push(item);
|
|
9729
|
+
if (this._items.length >= this._batchSize) this._flush();
|
|
9730
|
+
}
|
|
9731
|
+
/**
|
|
9732
|
+
* Start the interval timer that flushes items on a schedule.
|
|
9733
|
+
* Safe to call multiple times — subsequent calls are ignored.
|
|
9734
|
+
*/
|
|
9735
|
+
start() {
|
|
9736
|
+
if (this._timer !== null) return;
|
|
9737
|
+
this._timer = setInterval(() => this._flush(), this._flushIntervalMs);
|
|
9738
|
+
if (typeof this._timer.unref === "function") this._timer.unref();
|
|
9739
|
+
}
|
|
9740
|
+
/**
|
|
9741
|
+
* Stop the interval timer and flush any remaining items.
|
|
9742
|
+
* Safe to call multiple times — subsequent calls are ignored.
|
|
9743
|
+
*/
|
|
9744
|
+
stop() {
|
|
9745
|
+
if (this._timer !== null) {
|
|
9746
|
+
clearInterval(this._timer);
|
|
9747
|
+
this._timer = null;
|
|
9748
|
+
}
|
|
9749
|
+
this._flush();
|
|
9750
|
+
}
|
|
9751
|
+
_flush() {
|
|
9752
|
+
if (this._items.length === 0) return;
|
|
9753
|
+
const items = this._items.splice(0);
|
|
9754
|
+
this.emit("flush", items);
|
|
9755
|
+
}
|
|
9756
|
+
};
|
|
9757
|
+
|
|
9758
|
+
//#endregion
|
|
9759
|
+
//#region src/modules/telemetry/source-detector.ts
|
|
9760
|
+
const SOURCE_DETECTION_TABLE = [
|
|
9761
|
+
{
|
|
9762
|
+
pattern: /claude[\s-]?code/i,
|
|
9763
|
+
source: "claude-code"
|
|
9764
|
+
},
|
|
9765
|
+
{
|
|
9766
|
+
pattern: /claude/i,
|
|
9767
|
+
source: "claude-code"
|
|
9768
|
+
},
|
|
9769
|
+
{
|
|
9770
|
+
pattern: /codex/i,
|
|
9771
|
+
source: "codex"
|
|
9772
|
+
},
|
|
9773
|
+
{
|
|
9774
|
+
pattern: /openai/i,
|
|
9775
|
+
source: "codex"
|
|
9776
|
+
},
|
|
9777
|
+
{
|
|
9778
|
+
pattern: /ollama|llama|local/i,
|
|
9779
|
+
source: "local-llm"
|
|
9780
|
+
}
|
|
9781
|
+
];
|
|
9782
|
+
/**
|
|
9783
|
+
* Extract string values for service.name and telemetry.sdk.name from
|
|
9784
|
+
* raw OTLP resource attributes, supporting both resourceSpans and resourceLogs.
|
|
9785
|
+
*/
|
|
9786
|
+
function extractAttributes(body) {
|
|
9787
|
+
if (!body || typeof body !== "object") return [];
|
|
9788
|
+
const values = [];
|
|
9789
|
+
const keysOfInterest = ["service.name", "telemetry.sdk.name"];
|
|
9790
|
+
const extractFromResources = (resources) => {
|
|
9791
|
+
if (!Array.isArray(resources)) return;
|
|
9792
|
+
for (const entry of resources) {
|
|
9793
|
+
if (!entry?.resource?.attributes) continue;
|
|
9794
|
+
for (const attr of entry.resource.attributes) {
|
|
9795
|
+
if (!attr?.key || !keysOfInterest.includes(attr.key)) continue;
|
|
9796
|
+
const v = attr.value;
|
|
9797
|
+
if (!v) continue;
|
|
9798
|
+
const str = v.stringValue ?? (v.intValue !== void 0 ? String(v.intValue) : void 0) ?? (v.doubleValue !== void 0 ? String(v.doubleValue) : void 0);
|
|
9799
|
+
if (str !== void 0) values.push(str);
|
|
9800
|
+
}
|
|
9801
|
+
}
|
|
9802
|
+
};
|
|
9803
|
+
const payload = body;
|
|
9804
|
+
extractFromResources(payload.resourceSpans);
|
|
9805
|
+
extractFromResources(payload.resourceLogs);
|
|
9806
|
+
return values;
|
|
9807
|
+
}
|
|
9808
|
+
/**
|
|
9809
|
+
* Detect the OTLP source from a raw payload.
|
|
9810
|
+
*
|
|
9811
|
+
* Inspects `service.name` and `telemetry.sdk.name` from resource attributes
|
|
9812
|
+
* in both `resourceSpans` and `resourceLogs` envelope formats.
|
|
9813
|
+
*
|
|
9814
|
+
* Returns 'unknown' when no match is found or input is malformed.
|
|
9815
|
+
*/
|
|
9816
|
+
function detectSource(body) {
|
|
9817
|
+
const values = extractAttributes(body);
|
|
9818
|
+
for (const value of values) for (const { pattern, source } of SOURCE_DETECTION_TABLE) if (pattern.test(value)) return source;
|
|
9819
|
+
return "unknown";
|
|
9820
|
+
}
|
|
9821
|
+
|
|
9569
9822
|
//#endregion
|
|
9570
9823
|
//#region src/modules/telemetry/ingestion-server.ts
|
|
9571
|
-
const logger$
|
|
9824
|
+
const logger$7 = createLogger("telemetry:ingestion-server");
|
|
9572
9825
|
/**
|
|
9573
9826
|
* Error thrown by IngestionServer for server lifecycle violations.
|
|
9574
9827
|
* Extends AppError to align with the project-standard error-handling pattern
|
|
@@ -9588,8 +9841,41 @@ var TelemetryError = class extends AppError {
|
|
|
9588
9841
|
var IngestionServer = class {
|
|
9589
9842
|
_server = null;
|
|
9590
9843
|
_port;
|
|
9844
|
+
_batchSize;
|
|
9845
|
+
_flushIntervalMs;
|
|
9846
|
+
_buffer;
|
|
9847
|
+
_pendingBatches = new Set();
|
|
9591
9848
|
constructor(options = {}) {
|
|
9592
9849
|
this._port = options.port ?? 4318;
|
|
9850
|
+
this._batchSize = options.batchSize ?? 100;
|
|
9851
|
+
this._flushIntervalMs = options.flushIntervalMs ?? 5e3;
|
|
9852
|
+
if (options.pipeline !== void 0) this._initPipeline(options.pipeline);
|
|
9853
|
+
}
|
|
9854
|
+
/**
|
|
9855
|
+
* Wire a TelemetryPipeline before the server is started.
|
|
9856
|
+
* Must be called before start() — has no effect if called after start().
|
|
9857
|
+
*/
|
|
9858
|
+
setPipeline(pipeline) {
|
|
9859
|
+
if (this._server !== null) {
|
|
9860
|
+
logger$7.warn("IngestionServer.setPipeline() called after start() — ignoring");
|
|
9861
|
+
return;
|
|
9862
|
+
}
|
|
9863
|
+
this._initPipeline(pipeline);
|
|
9864
|
+
}
|
|
9865
|
+
_initPipeline(pipeline) {
|
|
9866
|
+
this._buffer = new BatchBuffer({
|
|
9867
|
+
batchSize: this._batchSize,
|
|
9868
|
+
flushIntervalMs: this._flushIntervalMs
|
|
9869
|
+
});
|
|
9870
|
+
this._buffer.on("flush", (items) => {
|
|
9871
|
+
const pending = pipeline.processBatch(items).catch((err) => {
|
|
9872
|
+
logger$7.warn({ err }, "TelemetryPipeline.processBatch failed (batch flush)");
|
|
9873
|
+
});
|
|
9874
|
+
this._pendingBatches.add(pending);
|
|
9875
|
+
pending.then(() => {
|
|
9876
|
+
this._pendingBatches.delete(pending);
|
|
9877
|
+
});
|
|
9878
|
+
});
|
|
9593
9879
|
}
|
|
9594
9880
|
/**
|
|
9595
9881
|
* Start the HTTP ingestion server.
|
|
@@ -9597,36 +9883,40 @@ var IngestionServer = class {
|
|
|
9597
9883
|
*/
|
|
9598
9884
|
async start() {
|
|
9599
9885
|
if (this._server !== null) {
|
|
9600
|
-
logger$
|
|
9886
|
+
logger$7.warn("IngestionServer.start() called while already started — ignoring");
|
|
9601
9887
|
return;
|
|
9602
9888
|
}
|
|
9603
9889
|
return new Promise((resolve$2, reject) => {
|
|
9604
9890
|
const server = createServer(this._handleRequest.bind(this));
|
|
9605
9891
|
server.on("error", (err) => {
|
|
9606
|
-
logger$
|
|
9892
|
+
logger$7.error({ err }, "IngestionServer failed to start");
|
|
9607
9893
|
reject(err);
|
|
9608
9894
|
});
|
|
9609
9895
|
server.listen(this._port, "127.0.0.1", () => {
|
|
9610
9896
|
this._server = server;
|
|
9611
9897
|
const addr = server.address();
|
|
9612
|
-
logger$
|
|
9898
|
+
logger$7.info({ port: addr.port }, "IngestionServer listening");
|
|
9899
|
+
this._buffer?.start();
|
|
9613
9900
|
resolve$2();
|
|
9614
9901
|
});
|
|
9615
9902
|
});
|
|
9616
9903
|
}
|
|
9617
9904
|
/**
|
|
9618
9905
|
* Stop the HTTP ingestion server.
|
|
9906
|
+
* Drains the batch buffer before closing the HTTP server.
|
|
9619
9907
|
* Resolves when the server has closed all connections.
|
|
9620
9908
|
*/
|
|
9621
9909
|
async stop() {
|
|
9622
9910
|
const server = this._server;
|
|
9623
9911
|
if (server === null) return;
|
|
9624
9912
|
this._server = null;
|
|
9913
|
+
this._buffer?.stop();
|
|
9914
|
+
if (this._pendingBatches.size > 0) await Promise.all([...this._pendingBatches]);
|
|
9625
9915
|
return new Promise((resolve$2, reject) => {
|
|
9626
9916
|
server.close((err) => {
|
|
9627
9917
|
if (err !== void 0 && err !== null) reject(err);
|
|
9628
9918
|
else {
|
|
9629
|
-
logger$
|
|
9919
|
+
logger$7.info("IngestionServer stopped");
|
|
9630
9920
|
resolve$2();
|
|
9631
9921
|
}
|
|
9632
9922
|
});
|
|
@@ -9649,22 +9939,42 @@ var IngestionServer = class {
|
|
|
9649
9939
|
OTEL_EXPORTER_OTLP_ENDPOINT: endpoint
|
|
9650
9940
|
};
|
|
9651
9941
|
}
|
|
9652
|
-
_handleRequest(
|
|
9942
|
+
_handleRequest(req, res) {
|
|
9943
|
+
if (req.url === "/health" && req.method === "GET") {
|
|
9944
|
+
res.writeHead(200, { "Content-Type": "application/json" });
|
|
9945
|
+
res.end(JSON.stringify({ status: "ok" }));
|
|
9946
|
+
return;
|
|
9947
|
+
}
|
|
9653
9948
|
const chunks = [];
|
|
9654
|
-
|
|
9949
|
+
req.on("data", (chunk) => {
|
|
9655
9950
|
chunks.push(chunk);
|
|
9656
9951
|
});
|
|
9657
|
-
|
|
9658
|
-
const
|
|
9659
|
-
logger$
|
|
9660
|
-
url:
|
|
9661
|
-
bodyLength:
|
|
9952
|
+
req.on("end", () => {
|
|
9953
|
+
const bodyStr = Buffer.concat(chunks).toString("utf-8");
|
|
9954
|
+
logger$7.trace({
|
|
9955
|
+
url: req.url,
|
|
9956
|
+
bodyLength: bodyStr.length
|
|
9662
9957
|
}, "OTLP payload received");
|
|
9958
|
+
if (this._buffer !== void 0) try {
|
|
9959
|
+
const body = JSON.parse(bodyStr);
|
|
9960
|
+
const source = detectSource(body);
|
|
9961
|
+
const payload = {
|
|
9962
|
+
body,
|
|
9963
|
+
source,
|
|
9964
|
+
receivedAt: Date.now()
|
|
9965
|
+
};
|
|
9966
|
+
this._buffer.push(payload);
|
|
9967
|
+
} catch (err) {
|
|
9968
|
+
logger$7.warn({
|
|
9969
|
+
err,
|
|
9970
|
+
url: req.url
|
|
9971
|
+
}, "Failed to parse OTLP payload JSON — discarding");
|
|
9972
|
+
}
|
|
9663
9973
|
res.writeHead(200, { "Content-Type": "application/json" });
|
|
9664
9974
|
res.end("{}");
|
|
9665
9975
|
});
|
|
9666
|
-
|
|
9667
|
-
logger$
|
|
9976
|
+
req.on("error", (err) => {
|
|
9977
|
+
logger$7.warn({ err }, "Error reading OTLP request body");
|
|
9668
9978
|
res.writeHead(400);
|
|
9669
9979
|
res.end("Bad Request");
|
|
9670
9980
|
});
|
|
@@ -9675,8 +9985,8 @@ var IngestionServer = class {
|
|
|
9675
9985
|
//#region src/modules/telemetry/efficiency-scorer.ts
|
|
9676
9986
|
var EfficiencyScorer = class {
|
|
9677
9987
|
_logger;
|
|
9678
|
-
constructor(logger$
|
|
9679
|
-
this._logger = logger$
|
|
9988
|
+
constructor(logger$27) {
|
|
9989
|
+
this._logger = logger$27;
|
|
9680
9990
|
}
|
|
9681
9991
|
/**
|
|
9682
9992
|
* Compute an efficiency score for a story given its turn analyses.
|
|
@@ -9911,8 +10221,8 @@ const ALL_CATEGORIES = [
|
|
|
9911
10221
|
];
|
|
9912
10222
|
var Categorizer = class {
|
|
9913
10223
|
_logger;
|
|
9914
|
-
constructor(logger$
|
|
9915
|
-
this._logger = logger$
|
|
10224
|
+
constructor(logger$27) {
|
|
10225
|
+
this._logger = logger$27;
|
|
9916
10226
|
}
|
|
9917
10227
|
/**
|
|
9918
10228
|
* Classify an operation into a SemanticCategory using three-tier logic.
|
|
@@ -10038,9 +10348,9 @@ function extractToolNameFromSpan(span) {
|
|
|
10038
10348
|
var ConsumerAnalyzer = class {
|
|
10039
10349
|
_categorizer;
|
|
10040
10350
|
_logger;
|
|
10041
|
-
constructor(categorizer, logger$
|
|
10351
|
+
constructor(categorizer, logger$27) {
|
|
10042
10352
|
this._categorizer = categorizer;
|
|
10043
|
-
this._logger = logger$
|
|
10353
|
+
this._logger = logger$27;
|
|
10044
10354
|
}
|
|
10045
10355
|
/**
|
|
10046
10356
|
* Group spans by consumer key, rank by totalTokens descending, and return
|
|
@@ -10114,8 +10424,1076 @@ var ConsumerAnalyzer = class {
|
|
|
10114
10424
|
};
|
|
10115
10425
|
|
|
10116
10426
|
//#endregion
|
|
10117
|
-
//#region src/modules/
|
|
10118
|
-
|
|
10427
|
+
//#region src/modules/telemetry/recommender.ts
|
|
10428
|
+
var Recommender = class {
|
|
10429
|
+
_logger;
|
|
10430
|
+
constructor(logger$27) {
|
|
10431
|
+
this._logger = logger$27;
|
|
10432
|
+
}
|
|
10433
|
+
/**
|
|
10434
|
+
* Run all 8 rules against the given context and return sorted recommendations.
|
|
10435
|
+
* Output is sorted: critical → warning → info, then by potentialSavingsTokens descending.
|
|
10436
|
+
* No Date.now() or Math.random() is called — generatedAt comes from context.
|
|
10437
|
+
*/
|
|
10438
|
+
analyze(context) {
|
|
10439
|
+
const allRecs = [
|
|
10440
|
+
...this._runBiggestConsumers(context),
|
|
10441
|
+
...this._runExpensiveBash(context),
|
|
10442
|
+
...this._runLargeFileReads(context),
|
|
10443
|
+
...this._runRepeatedToolCalls(context),
|
|
10444
|
+
...this._runContextGrowthSpikes(context),
|
|
10445
|
+
...this._runGrowingCategories(context),
|
|
10446
|
+
...this._runCacheEfficiency(context),
|
|
10447
|
+
...this._runModelComparison(context)
|
|
10448
|
+
];
|
|
10449
|
+
const severityOrder = {
|
|
10450
|
+
critical: 0,
|
|
10451
|
+
warning: 1,
|
|
10452
|
+
info: 2
|
|
10453
|
+
};
|
|
10454
|
+
return allRecs.sort((a, b) => {
|
|
10455
|
+
const sA = severityOrder[a.severity];
|
|
10456
|
+
const sB = severityOrder[b.severity];
|
|
10457
|
+
if (sA !== sB) return sA - sB;
|
|
10458
|
+
const savA = a.potentialSavingsTokens ?? 0;
|
|
10459
|
+
const savB = b.potentialSavingsTokens ?? 0;
|
|
10460
|
+
return savB - savA;
|
|
10461
|
+
});
|
|
10462
|
+
}
|
|
10463
|
+
/**
|
|
10464
|
+
* Generate a 16-char hex ID from the sha256 of `ruleId:storyKey:actionTarget:index`.
|
|
10465
|
+
*/
|
|
10466
|
+
_makeId(ruleId, storyKey, actionTarget, index) {
|
|
10467
|
+
return createHash("sha256").update(`${ruleId}:${storyKey}:${actionTarget}:${index}`).digest("hex").slice(0, 16);
|
|
10468
|
+
}
|
|
10469
|
+
/**
|
|
10470
|
+
* Map a token percentage to a severity level.
|
|
10471
|
+
* >25% → critical, >10% → warning, ≤10% → info.
|
|
10472
|
+
*/
|
|
10473
|
+
_assignSeverity(tokenPercent) {
|
|
10474
|
+
if (tokenPercent > 25) return "critical";
|
|
10475
|
+
if (tokenPercent > 10) return "warning";
|
|
10476
|
+
return "info";
|
|
10477
|
+
}
|
|
10478
|
+
/**
|
|
10479
|
+
* Compute total tokens across all spans. Guards against empty arrays.
|
|
10480
|
+
*/
|
|
10481
|
+
_totalSpanTokens(spans) {
|
|
10482
|
+
return spans.reduce((sum, s) => sum + s.inputTokens + s.outputTokens, 0);
|
|
10483
|
+
}
|
|
10484
|
+
/**
|
|
10485
|
+
* Identify top 3 token consumers (by inputTokens + outputTokens) where pct >5%.
|
|
10486
|
+
* Severity based on the consumer's percentage of total tokens.
|
|
10487
|
+
*/
|
|
10488
|
+
_runBiggestConsumers(ctx) {
|
|
10489
|
+
const { consumers, storyKey, sprintId, generatedAt } = ctx;
|
|
10490
|
+
if (consumers.length === 0) return [];
|
|
10491
|
+
const grandTotal = consumers.reduce((sum, c) => sum + c.totalTokens, 0);
|
|
10492
|
+
if (grandTotal === 0) return [];
|
|
10493
|
+
const sorted = [...consumers].sort((a, b) => b.totalTokens - a.totalTokens);
|
|
10494
|
+
const top3 = sorted.slice(0, 3).filter((c) => {
|
|
10495
|
+
return c.percentage > 5;
|
|
10496
|
+
});
|
|
10497
|
+
return top3.map((consumer, index) => {
|
|
10498
|
+
const pct = consumer.percentage;
|
|
10499
|
+
const severity = this._assignSeverity(pct);
|
|
10500
|
+
const actionTarget = consumer.consumerKey;
|
|
10501
|
+
const id = this._makeId("biggest_consumers", storyKey, actionTarget, index);
|
|
10502
|
+
return {
|
|
10503
|
+
id,
|
|
10504
|
+
storyKey,
|
|
10505
|
+
sprintId,
|
|
10506
|
+
ruleId: "biggest_consumers",
|
|
10507
|
+
severity,
|
|
10508
|
+
title: `High token consumer: ${consumer.consumerKey}`,
|
|
10509
|
+
description: `"${consumer.consumerKey}" consumed ${consumer.totalTokens.toLocaleString()} tokens (${pct.toFixed(1)}% of total). Consider reducing the frequency or size of this operation.`,
|
|
10510
|
+
potentialSavingsTokens: Math.round(consumer.totalTokens * .3),
|
|
10511
|
+
actionTarget,
|
|
10512
|
+
generatedAt
|
|
10513
|
+
};
|
|
10514
|
+
});
|
|
10515
|
+
}
|
|
10516
|
+
/**
|
|
10517
|
+
* Flag file-read spans with inputTokens > 3000.
|
|
10518
|
+
* Suggest using line ranges to reduce token count.
|
|
10519
|
+
*/
|
|
10520
|
+
_runLargeFileReads(ctx) {
|
|
10521
|
+
const { allSpans, storyKey, sprintId, generatedAt } = ctx;
|
|
10522
|
+
if (allSpans.length === 0) return [];
|
|
10523
|
+
const grandTotal = this._totalSpanTokens(allSpans);
|
|
10524
|
+
const largReads = allSpans.filter((s) => s.operationName === "file_read" && s.inputTokens > 3e3);
|
|
10525
|
+
return largReads.map((span, index) => {
|
|
10526
|
+
const pct = grandTotal > 0 ? (span.inputTokens + span.outputTokens) / grandTotal * 100 : 0;
|
|
10527
|
+
const severity = this._assignSeverity(pct);
|
|
10528
|
+
const actionTarget = span.attributes?.["file.path"] ?? span.name;
|
|
10529
|
+
const id = this._makeId("large_file_reads", storyKey, actionTarget, index);
|
|
10530
|
+
return {
|
|
10531
|
+
id,
|
|
10532
|
+
storyKey,
|
|
10533
|
+
sprintId,
|
|
10534
|
+
ruleId: "large_file_reads",
|
|
10535
|
+
severity,
|
|
10536
|
+
title: `Large file read: ${actionTarget}`,
|
|
10537
|
+
description: `File read of "${actionTarget}" consumed ${span.inputTokens.toLocaleString()} input tokens. Consider specifying line ranges (e.g., offset/limit) to reduce context size.`,
|
|
10538
|
+
potentialSavingsTokens: Math.round(span.inputTokens * .5),
|
|
10539
|
+
actionTarget,
|
|
10540
|
+
generatedAt
|
|
10541
|
+
};
|
|
10542
|
+
});
|
|
10543
|
+
}
|
|
10544
|
+
/**
|
|
10545
|
+
* Flag bash/execute_command spans with outputTokens > 3000.
|
|
10546
|
+
* Suggest filtering or truncating command output.
|
|
10547
|
+
*/
|
|
10548
|
+
_runExpensiveBash(ctx) {
|
|
10549
|
+
const { allSpans, storyKey, sprintId, generatedAt } = ctx;
|
|
10550
|
+
if (allSpans.length === 0) return [];
|
|
10551
|
+
const grandTotal = this._totalSpanTokens(allSpans);
|
|
10552
|
+
const expensiveBash = allSpans.filter((s) => (s.attributes?.["tool.name"] === "bash" || s.attributes?.["tool.name"] === "execute_command" || s.name === "bash" || s.name === "execute_command" || s.operationName !== void 0 && (s.operationName === "bash" || s.operationName === "execute_command")) && s.outputTokens > 3e3);
|
|
10553
|
+
return expensiveBash.map((span, index) => {
|
|
10554
|
+
const pct = grandTotal > 0 ? (span.inputTokens + span.outputTokens) / grandTotal * 100 : 0;
|
|
10555
|
+
const severity = this._assignSeverity(pct);
|
|
10556
|
+
const actionTarget = span.attributes?.["bash.command"] ?? span.name ?? "bash";
|
|
10557
|
+
const id = this._makeId("expensive_bash", storyKey, actionTarget, index);
|
|
10558
|
+
return {
|
|
10559
|
+
id,
|
|
10560
|
+
storyKey,
|
|
10561
|
+
sprintId,
|
|
10562
|
+
ruleId: "expensive_bash",
|
|
10563
|
+
severity,
|
|
10564
|
+
title: `Expensive bash output: ${actionTarget}`,
|
|
10565
|
+
description: `Bash command "${actionTarget}" produced ${span.outputTokens.toLocaleString()} output tokens. Consider filtering output (e.g., piping to head/grep) to reduce token consumption.`,
|
|
10566
|
+
potentialSavingsTokens: Math.round(span.outputTokens * .5),
|
|
10567
|
+
actionTarget,
|
|
10568
|
+
generatedAt
|
|
10569
|
+
};
|
|
10570
|
+
});
|
|
10571
|
+
}
|
|
10572
|
+
/**
|
|
10573
|
+
* Detect tool calls with the same actionTarget appearing more than once.
|
|
10574
|
+
* Suggests caching the result to avoid redundant token consumption.
|
|
10575
|
+
*/
|
|
10576
|
+
_runRepeatedToolCalls(ctx) {
|
|
10577
|
+
const { turns, storyKey, sprintId, generatedAt, allSpans } = ctx;
|
|
10578
|
+
const allChildSpans = [];
|
|
10579
|
+
for (const turn of turns) for (const child of turn.childSpans) allChildSpans.push({
|
|
10580
|
+
toolName: child.toolName,
|
|
10581
|
+
name: child.name
|
|
10582
|
+
});
|
|
10583
|
+
if (allChildSpans.length === 0 && allSpans.length > 0) for (const span of allSpans) allChildSpans.push({
|
|
10584
|
+
toolName: span.attributes?.["tool.name"],
|
|
10585
|
+
name: span.name,
|
|
10586
|
+
actionTarget: span.attributes?.["file.path"]
|
|
10587
|
+
});
|
|
10588
|
+
const groups = new Map();
|
|
10589
|
+
for (const span of allChildSpans) {
|
|
10590
|
+
const key = `${span.toolName ?? ""}:${span.actionTarget ?? span.name}`;
|
|
10591
|
+
groups.set(key, (groups.get(key) ?? 0) + 1);
|
|
10592
|
+
}
|
|
10593
|
+
const recommendations = [];
|
|
10594
|
+
let index = 0;
|
|
10595
|
+
for (const [key, count] of groups) if (count > 1) {
|
|
10596
|
+
const id = this._makeId("repeated_tool_calls", storyKey, key, index);
|
|
10597
|
+
recommendations.push({
|
|
10598
|
+
id,
|
|
10599
|
+
storyKey,
|
|
10600
|
+
sprintId,
|
|
10601
|
+
ruleId: "repeated_tool_calls",
|
|
10602
|
+
severity: "warning",
|
|
10603
|
+
title: `Repeated tool call: ${key}`,
|
|
10604
|
+
description: `"${key}" was invoked ${count} times. Consider caching the result after the first call to avoid redundant token consumption.`,
|
|
10605
|
+
actionTarget: key,
|
|
10606
|
+
generatedAt
|
|
10607
|
+
});
|
|
10608
|
+
index++;
|
|
10609
|
+
}
|
|
10610
|
+
return recommendations;
|
|
10611
|
+
}
|
|
10612
|
+
/**
|
|
10613
|
+
* Flag turns where isContextSpike is true.
|
|
10614
|
+
* Severity is always at least 'warning'.
|
|
10615
|
+
*/
|
|
10616
|
+
_runContextGrowthSpikes(ctx) {
|
|
10617
|
+
const { turns, storyKey, sprintId, generatedAt, allSpans } = ctx;
|
|
10618
|
+
if (turns.length === 0) return [];
|
|
10619
|
+
const grandTotal = this._totalSpanTokens(allSpans);
|
|
10620
|
+
const spiketurns = turns.filter((t) => t.isContextSpike);
|
|
10621
|
+
return spiketurns.map((turn, index) => {
|
|
10622
|
+
const pct = grandTotal > 0 ? (turn.inputTokens + turn.outputTokens) / grandTotal * 100 : 0;
|
|
10623
|
+
const baseSeverity = this._assignSeverity(pct);
|
|
10624
|
+
const severity = baseSeverity === "info" ? "warning" : baseSeverity;
|
|
10625
|
+
const topContributors = [...turn.childSpans].sort((a, b) => b.inputTokens + b.outputTokens - (a.inputTokens + a.outputTokens)).slice(0, 3).map((c) => c.name);
|
|
10626
|
+
const actionTarget = `turn:${turn.turnNumber}`;
|
|
10627
|
+
const id = this._makeId("context_growth_spike", storyKey, actionTarget, index);
|
|
10628
|
+
return {
|
|
10629
|
+
id,
|
|
10630
|
+
storyKey,
|
|
10631
|
+
sprintId,
|
|
10632
|
+
ruleId: "context_growth_spike",
|
|
10633
|
+
severity,
|
|
10634
|
+
title: `Context spike at turn ${turn.turnNumber}`,
|
|
10635
|
+
description: `Turn ${turn.turnNumber} had a context spike with ${turn.inputTokens.toLocaleString()} input tokens. Top contributors: ${topContributors.length > 0 ? topContributors.join(", ") : "none identified"}. Consider compressing or evicting context before this turn.`,
|
|
10636
|
+
potentialSavingsTokens: Math.round(turn.contextDelta * .3),
|
|
10637
|
+
actionTarget,
|
|
10638
|
+
generatedAt
|
|
10639
|
+
};
|
|
10640
|
+
});
|
|
10641
|
+
}
|
|
10642
|
+
/**
|
|
10643
|
+
* Flag semantic categories with trend === 'growing'.
|
|
10644
|
+
* Severity is 'info' by default; 'warning' if percentage > 25%.
|
|
10645
|
+
*/
|
|
10646
|
+
_runGrowingCategories(ctx) {
|
|
10647
|
+
const { categories, storyKey, sprintId, generatedAt } = ctx;
|
|
10648
|
+
if (categories.length === 0) return [];
|
|
10649
|
+
const growing = categories.filter((c) => c.trend === "growing");
|
|
10650
|
+
return growing.map((cat, index) => {
|
|
10651
|
+
const severity = cat.percentage > 25 ? "warning" : "info";
|
|
10652
|
+
const actionTarget = cat.category;
|
|
10653
|
+
const id = this._makeId("growing_categories", storyKey, actionTarget, index);
|
|
10654
|
+
return {
|
|
10655
|
+
id,
|
|
10656
|
+
storyKey,
|
|
10657
|
+
sprintId,
|
|
10658
|
+
ruleId: "growing_categories",
|
|
10659
|
+
severity,
|
|
10660
|
+
title: `Growing category: ${cat.category}`,
|
|
10661
|
+
description: `The "${cat.category}" category is growing across turns, currently at ${cat.percentage.toFixed(1)}% of total tokens (${cat.totalTokens.toLocaleString()} tokens). This trend suggests increasing context pressure from this source.`,
|
|
10662
|
+
potentialSavingsTokens: Math.round(cat.totalTokens * .2),
|
|
10663
|
+
actionTarget,
|
|
10664
|
+
generatedAt
|
|
10665
|
+
};
|
|
10666
|
+
});
|
|
10667
|
+
}
|
|
10668
|
+
/**
|
|
10669
|
+
* If cache hit rate < 30%, flag the worst-performing operations and compute
|
|
10670
|
+
* potential savings as totalCacheMissTokens * 0.5.
|
|
10671
|
+
*/
|
|
10672
|
+
_runCacheEfficiency(ctx) {
|
|
10673
|
+
const { efficiencyScore, allSpans, storyKey, sprintId, generatedAt } = ctx;
|
|
10674
|
+
const cacheHitRate = isNaN(efficiencyScore.avgCacheHitRate) ? 0 : efficiencyScore.avgCacheHitRate;
|
|
10675
|
+
if (cacheHitRate >= .3) return [];
|
|
10676
|
+
if (allSpans.length === 0) return [];
|
|
10677
|
+
const totalCacheMissTokens = allSpans.reduce((sum, s) => {
|
|
10678
|
+
const missTokens = s.inputTokens - s.cacheReadTokens;
|
|
10679
|
+
return sum + Math.max(0, missTokens);
|
|
10680
|
+
}, 0);
|
|
10681
|
+
if (totalCacheMissTokens === 0) return [];
|
|
10682
|
+
const potentialSavingsTokens = Math.round(totalCacheMissTokens * .5);
|
|
10683
|
+
const spansWithRate = allSpans.filter((s) => s.inputTokens > 0).map((s) => ({
|
|
10684
|
+
span: s,
|
|
10685
|
+
hitRate: s.cacheReadTokens / s.inputTokens
|
|
10686
|
+
})).sort((a, b) => a.hitRate - b.hitRate).slice(0, 3);
|
|
10687
|
+
const worstOps = spansWithRate.map((e) => e.span.name).join(", ");
|
|
10688
|
+
const actionTarget = worstOps || "unknown";
|
|
10689
|
+
const id = this._makeId("cache_efficiency", storyKey, actionTarget, 0);
|
|
10690
|
+
this._logger.debug({
|
|
10691
|
+
storyKey,
|
|
10692
|
+
cacheHitRate,
|
|
10693
|
+
potentialSavingsTokens
|
|
10694
|
+
}, "cache_efficiency recommendation generated");
|
|
10695
|
+
return [{
|
|
10696
|
+
id,
|
|
10697
|
+
storyKey,
|
|
10698
|
+
sprintId,
|
|
10699
|
+
ruleId: "cache_efficiency",
|
|
10700
|
+
severity: "warning",
|
|
10701
|
+
title: "Low cache hit rate",
|
|
10702
|
+
description: `Overall cache hit rate is ${(cacheHitRate * 100).toFixed(1)}% (below 30% threshold). Worst performing operations: ${worstOps || "none identified"}. Potential savings if hit rate reached 50%: ${potentialSavingsTokens.toLocaleString()} tokens.`,
|
|
10703
|
+
potentialSavingsTokens,
|
|
10704
|
+
actionTarget,
|
|
10705
|
+
generatedAt
|
|
10706
|
+
}];
|
|
10707
|
+
}
|
|
10708
|
+
/**
|
|
10709
|
+
* If more than one model is present, flag the underperforming model.
|
|
10710
|
+
* Severity is 'info' by default; 'warning' if cache efficiency gap > 20pp.
|
|
10711
|
+
*/
|
|
10712
|
+
_runModelComparison(ctx) {
|
|
10713
|
+
const { efficiencyScore, storyKey, sprintId, generatedAt } = ctx;
|
|
10714
|
+
const models = efficiencyScore.perModelBreakdown;
|
|
10715
|
+
if (models.length <= 1) return [];
|
|
10716
|
+
const sorted = [...models].sort((a, b) => b.cacheHitRate - a.cacheHitRate);
|
|
10717
|
+
const best = sorted[0];
|
|
10718
|
+
const worst = sorted[sorted.length - 1];
|
|
10719
|
+
if (best.model === worst.model) return [];
|
|
10720
|
+
const gapPP = (best.cacheHitRate - worst.cacheHitRate) * 100;
|
|
10721
|
+
const severity = gapPP > 20 ? "warning" : "info";
|
|
10722
|
+
const actionTarget = worst.model;
|
|
10723
|
+
const id = this._makeId("per_model_comparison", storyKey, actionTarget, 0);
|
|
10724
|
+
return [{
|
|
10725
|
+
id,
|
|
10726
|
+
storyKey,
|
|
10727
|
+
sprintId,
|
|
10728
|
+
ruleId: "per_model_comparison",
|
|
10729
|
+
severity,
|
|
10730
|
+
title: `Underperforming model: ${worst.model}`,
|
|
10731
|
+
description: `Model "${worst.model}" has a cache hit rate of ${(worst.cacheHitRate * 100).toFixed(1)}% vs. "${best.model}" at ${(best.cacheHitRate * 100).toFixed(1)}% (gap: ${gapPP.toFixed(1)} percentage points). Consider routing tasks to the higher-performing model.`,
|
|
10732
|
+
actionTarget,
|
|
10733
|
+
generatedAt
|
|
10734
|
+
}];
|
|
10735
|
+
}
|
|
10736
|
+
_isToolNameMatch(span) {
|
|
10737
|
+
const toolName = span.attributes?.["tool.name"];
|
|
10738
|
+
return toolName === "bash" || toolName === "execute_command" || span.name === "bash" || span.name === "execute_command" || span.operationName === "bash" || span.operationName === "execute_command";
|
|
10739
|
+
}
|
|
10740
|
+
};
|
|
10741
|
+
|
|
10742
|
+
//#endregion
|
|
10743
|
+
//#region src/modules/telemetry/turn-analyzer.ts
|
|
10744
|
+
var TurnAnalyzer = class {
|
|
10745
|
+
_logger;
|
|
10746
|
+
constructor(logger$27) {
|
|
10747
|
+
this._logger = logger$27;
|
|
10748
|
+
}
|
|
10749
|
+
/**
|
|
10750
|
+
* Analyze a list of NormalizedSpan records and produce TurnAnalysis[].
|
|
10751
|
+
*
|
|
10752
|
+
* Returns an empty array immediately when spans is empty.
|
|
10753
|
+
*
|
|
10754
|
+
* @param spans - All spans for a story (root and child spans mixed)
|
|
10755
|
+
*/
|
|
10756
|
+
analyze(spans) {
|
|
10757
|
+
if (spans.length === 0) return [];
|
|
10758
|
+
const allSpanIds = new Set(spans.map((s) => s.spanId));
|
|
10759
|
+
const rootSpans = spans.filter((s) => !s.parentSpanId || !allSpanIds.has(s.parentSpanId));
|
|
10760
|
+
const ordered = [...rootSpans].sort((a, b) => a.startTime - b.startTime);
|
|
10761
|
+
const childIndex = new Map();
|
|
10762
|
+
for (const span of spans) if (span.parentSpanId && allSpanIds.has(span.parentSpanId)) {
|
|
10763
|
+
const children = childIndex.get(span.parentSpanId) ?? [];
|
|
10764
|
+
children.push(span);
|
|
10765
|
+
childIndex.set(span.parentSpanId, children);
|
|
10766
|
+
}
|
|
10767
|
+
let runningContext = 0;
|
|
10768
|
+
const turns = ordered.map((span, idx) => {
|
|
10769
|
+
const prevContext = runningContext;
|
|
10770
|
+
runningContext += span.inputTokens;
|
|
10771
|
+
const freshTokens = span.inputTokens - span.cacheReadTokens;
|
|
10772
|
+
const cacheHitRate = span.inputTokens > 0 ? span.cacheReadTokens / span.inputTokens : 0;
|
|
10773
|
+
const childSpanSummaries = (childIndex.get(span.spanId) ?? []).map((child) => ({
|
|
10774
|
+
spanId: child.spanId,
|
|
10775
|
+
name: child.name,
|
|
10776
|
+
toolName: child.attributes?.["tool.name"],
|
|
10777
|
+
inputTokens: child.inputTokens,
|
|
10778
|
+
outputTokens: child.outputTokens,
|
|
10779
|
+
durationMs: child.durationMs
|
|
10780
|
+
}));
|
|
10781
|
+
return {
|
|
10782
|
+
spanId: span.spanId,
|
|
10783
|
+
turnNumber: idx + 1,
|
|
10784
|
+
name: span.name,
|
|
10785
|
+
timestamp: span.startTime,
|
|
10786
|
+
source: span.source,
|
|
10787
|
+
model: span.model,
|
|
10788
|
+
inputTokens: span.inputTokens,
|
|
10789
|
+
outputTokens: span.outputTokens,
|
|
10790
|
+
cacheReadTokens: span.cacheReadTokens,
|
|
10791
|
+
freshTokens,
|
|
10792
|
+
cacheHitRate,
|
|
10793
|
+
costUsd: span.costUsd,
|
|
10794
|
+
durationMs: span.durationMs,
|
|
10795
|
+
contextSize: runningContext,
|
|
10796
|
+
contextDelta: runningContext - prevContext,
|
|
10797
|
+
toolName: span.attributes?.["tool.name"],
|
|
10798
|
+
isContextSpike: false,
|
|
10799
|
+
childSpans: childSpanSummaries
|
|
10800
|
+
};
|
|
10801
|
+
});
|
|
10802
|
+
const avg = turns.reduce((sum, t) => sum + t.inputTokens, 0) / turns.length;
|
|
10803
|
+
for (const turn of turns) turn.isContextSpike = avg > 0 && turn.inputTokens > 2 * avg;
|
|
10804
|
+
this._logger.debug({
|
|
10805
|
+
turnCount: turns.length,
|
|
10806
|
+
avg
|
|
10807
|
+
}, "TurnAnalyzer.analyze complete");
|
|
10808
|
+
return turns;
|
|
10809
|
+
}
|
|
10810
|
+
};
|
|
10811
|
+
|
|
10812
|
+
//#endregion
|
|
10813
|
+
//#region src/modules/telemetry/cost-table.ts
|
|
10814
|
+
/**
|
|
10815
|
+
* Per-million-token pricing for known LLM models.
|
|
10816
|
+
* All prices are in USD.
|
|
10817
|
+
*/
|
|
10818
|
+
const COST_TABLE = {
|
|
10819
|
+
"claude-3-opus-20240229": {
|
|
10820
|
+
inputPerMToken: 15,
|
|
10821
|
+
outputPerMToken: 75,
|
|
10822
|
+
cacheReadPerMToken: 1.5,
|
|
10823
|
+
cacheCreationPerMToken: 18.75
|
|
10824
|
+
},
|
|
10825
|
+
"claude-3-5-sonnet-20241022": {
|
|
10826
|
+
inputPerMToken: 3,
|
|
10827
|
+
outputPerMToken: 15,
|
|
10828
|
+
cacheReadPerMToken: .3,
|
|
10829
|
+
cacheCreationPerMToken: 3.75
|
|
10830
|
+
},
|
|
10831
|
+
"claude-3-5-haiku-20241022": {
|
|
10832
|
+
inputPerMToken: .8,
|
|
10833
|
+
outputPerMToken: 4,
|
|
10834
|
+
cacheReadPerMToken: .08,
|
|
10835
|
+
cacheCreationPerMToken: 1
|
|
10836
|
+
},
|
|
10837
|
+
"claude-3-haiku-20240307": {
|
|
10838
|
+
inputPerMToken: .25,
|
|
10839
|
+
outputPerMToken: 1.25,
|
|
10840
|
+
cacheReadPerMToken: .03,
|
|
10841
|
+
cacheCreationPerMToken: .3
|
|
10842
|
+
},
|
|
10843
|
+
"claude-3-sonnet-20240229": {
|
|
10844
|
+
inputPerMToken: 3,
|
|
10845
|
+
outputPerMToken: 15,
|
|
10846
|
+
cacheReadPerMToken: .3,
|
|
10847
|
+
cacheCreationPerMToken: 3.75
|
|
10848
|
+
},
|
|
10849
|
+
"gpt-4": {
|
|
10850
|
+
inputPerMToken: 30,
|
|
10851
|
+
outputPerMToken: 60,
|
|
10852
|
+
cacheReadPerMToken: 3,
|
|
10853
|
+
cacheCreationPerMToken: 30
|
|
10854
|
+
},
|
|
10855
|
+
"gpt-4-turbo": {
|
|
10856
|
+
inputPerMToken: 10,
|
|
10857
|
+
outputPerMToken: 30,
|
|
10858
|
+
cacheReadPerMToken: 1,
|
|
10859
|
+
cacheCreationPerMToken: 10
|
|
10860
|
+
},
|
|
10861
|
+
"gpt-3.5-turbo": {
|
|
10862
|
+
inputPerMToken: .5,
|
|
10863
|
+
outputPerMToken: 1.5,
|
|
10864
|
+
cacheReadPerMToken: .05,
|
|
10865
|
+
cacheCreationPerMToken: .5
|
|
10866
|
+
}
|
|
10867
|
+
};
|
|
10868
|
+
/**
|
|
10869
|
+
* Resolve a model string to a key in COST_TABLE.
|
|
10870
|
+
* Returns the matched key, or undefined if not found.
|
|
10871
|
+
*
|
|
10872
|
+
* Performs exact match first, then case-insensitive substring match.
|
|
10873
|
+
*/
|
|
10874
|
+
function resolveModel$1(model) {
|
|
10875
|
+
if (model in COST_TABLE) return model;
|
|
10876
|
+
const lower = model.toLowerCase();
|
|
10877
|
+
for (const key of Object.keys(COST_TABLE)) if (key.toLowerCase() === lower) return key;
|
|
10878
|
+
for (const key of Object.keys(COST_TABLE)) if (lower.includes(key.toLowerCase()) || key.toLowerCase().includes(lower)) return key;
|
|
10879
|
+
return void 0;
|
|
10880
|
+
}
|
|
10881
|
+
/**
|
|
10882
|
+
* Estimate the cost in USD for a set of token counts and a model identifier.
|
|
10883
|
+
*
|
|
10884
|
+
* - Uses `cacheReadPerMToken` from the table directly (already discounted).
|
|
10885
|
+
* - Returns 0 for unknown models without throwing.
|
|
10886
|
+
*
|
|
10887
|
+
* @param model - Model identifier string (exact or fuzzy match against COST_TABLE)
|
|
10888
|
+
* @param tokens - Token counts object
|
|
10889
|
+
* @returns Estimated cost in USD
|
|
10890
|
+
*/
|
|
10891
|
+
function estimateCost(model, tokens) {
|
|
10892
|
+
const resolvedKey = resolveModel$1(model);
|
|
10893
|
+
if (resolvedKey === void 0) return 0;
|
|
10894
|
+
const pricing = COST_TABLE[resolvedKey];
|
|
10895
|
+
const perM = 1e6;
|
|
10896
|
+
const inputCost = tokens.input / perM * pricing.inputPerMToken;
|
|
10897
|
+
const outputCost = tokens.output / perM * pricing.outputPerMToken;
|
|
10898
|
+
const cacheReadCost = tokens.cacheRead / perM * pricing.cacheReadPerMToken;
|
|
10899
|
+
const cacheCreationCost = tokens.cacheCreation / perM * pricing.cacheCreationPerMToken;
|
|
10900
|
+
return inputCost + outputCost + cacheReadCost + cacheCreationCost;
|
|
10901
|
+
}
|
|
10902
|
+
|
|
10903
|
+
//#endregion
|
|
10904
|
+
//#region src/modules/telemetry/timestamp-normalizer.ts
|
|
10905
|
+
/**
|
|
10906
|
+
* Timestamp normalization for OTLP telemetry payloads.
|
|
10907
|
+
*
|
|
10908
|
+
* OTLP payloads use nanosecond integers for timestamps (e.g. `startTimeUnixNano`).
|
|
10909
|
+
* Claude Code and other providers may emit timestamps in various formats.
|
|
10910
|
+
*
|
|
10911
|
+
* `normalizeTimestamp()` accepts any unknown value and returns a Unix millisecond
|
|
10912
|
+
* number. Null/undefined/unparseable inputs fall back to `Date.now()`.
|
|
10913
|
+
*
|
|
10914
|
+
* Detection order (after ISO string check):
|
|
10915
|
+
* 1. Nanoseconds (>= 1e18)
|
|
10916
|
+
* 2. Microseconds (>= 1e15)
|
|
10917
|
+
* 3. Milliseconds (>= 1e12)
|
|
10918
|
+
* 4. Seconds (< 1e12)
|
|
10919
|
+
*/
|
|
10920
|
+
/**
|
|
10921
|
+
* Normalize any timestamp value to Unix milliseconds.
|
|
10922
|
+
*
|
|
10923
|
+
* Handles:
|
|
10924
|
+
* - ISO 8601 strings (e.g. "2024-03-08T12:00:00Z")
|
|
10925
|
+
* - Nanosecond integers or numeric strings (>= 1e18)
|
|
10926
|
+
* - Microsecond integers or numeric strings (>= 1e15)
|
|
10927
|
+
* - Millisecond integers or numeric strings (>= 1e12)
|
|
10928
|
+
* - Second integers or numeric strings (< 1e12)
|
|
10929
|
+
* - BigInt string values from OTLP `startTimeUnixNano` (e.g. "1709900000000000000")
|
|
10930
|
+
* - null / undefined / unparseable → falls back to Date.now()
|
|
10931
|
+
*
|
|
10932
|
+
* @param value - Raw timestamp value of unknown type
|
|
10933
|
+
* @returns Unix millisecond timestamp
|
|
10934
|
+
*/
|
|
10935
|
+
function normalizeTimestamp(value) {
|
|
10936
|
+
if (value === null || value === void 0) return Date.now();
|
|
10937
|
+
if (typeof value === "string") {
|
|
10938
|
+
if (isIsoDateString(value)) {
|
|
10939
|
+
const parsed = Date.parse(value);
|
|
10940
|
+
if (!isNaN(parsed)) return parsed;
|
|
10941
|
+
}
|
|
10942
|
+
const trimmed = value.trim();
|
|
10943
|
+
if (/^\d+$/.test(trimmed)) try {
|
|
10944
|
+
const big = BigInt(trimmed);
|
|
10945
|
+
return bigIntToMillis(big);
|
|
10946
|
+
} catch {}
|
|
10947
|
+
return Date.now();
|
|
10948
|
+
}
|
|
10949
|
+
if (typeof value === "bigint") return bigIntToMillis(value);
|
|
10950
|
+
if (typeof value === "number") {
|
|
10951
|
+
if (!isFinite(value) || isNaN(value)) return Date.now();
|
|
10952
|
+
return numericToMillis(value);
|
|
10953
|
+
}
|
|
10954
|
+
return Date.now();
|
|
10955
|
+
}
|
|
10956
|
+
/**
|
|
10957
|
+
* Returns true if the string looks like an ISO 8601 date string
|
|
10958
|
+
* (contains letters or dashes/colons in date-like positions).
|
|
10959
|
+
*/
|
|
10960
|
+
function isIsoDateString(value) {
|
|
10961
|
+
return /^\d{4}-\d{2}-\d{2}/.test(value) || /Z$/.test(value) || /[+-]\d{2}:\d{2}$/.test(value) || value.includes("T");
|
|
10962
|
+
}
|
|
10963
|
+
/**
|
|
10964
|
+
* Convert a BigInt nanosecond/microsecond/millisecond/second value to milliseconds.
|
|
10965
|
+
*/
|
|
10966
|
+
function bigIntToMillis(value) {
|
|
10967
|
+
const NS_THRESHOLD = BigInt("1000000000000000000");
|
|
10968
|
+
const US_THRESHOLD = BigInt("1000000000000000");
|
|
10969
|
+
const MS_THRESHOLD = BigInt("1000000000000");
|
|
10970
|
+
if (value >= NS_THRESHOLD) return Number(value / BigInt(1e6));
|
|
10971
|
+
else if (value >= US_THRESHOLD) return Number(value / BigInt(1e3));
|
|
10972
|
+
else if (value >= MS_THRESHOLD) return Number(value);
|
|
10973
|
+
else return Number(value) * 1e3;
|
|
10974
|
+
}
|
|
10975
|
+
/**
|
|
10976
|
+
* Convert a numeric value to milliseconds based on magnitude.
|
|
10977
|
+
*/
|
|
10978
|
+
function numericToMillis(value) {
|
|
10979
|
+
if (value >= 1e18) return Math.floor(value / 1e6);
|
|
10980
|
+
else if (value >= 1e15) return Math.floor(value / 1e3);
|
|
10981
|
+
else if (value >= 1e12) return Math.floor(value);
|
|
10982
|
+
else return Math.floor(value * 1e3);
|
|
10983
|
+
}
|
|
10984
|
+
|
|
10985
|
+
//#endregion
|
|
10986
|
+
//#region src/modules/telemetry/token-extractor.ts
|
|
10987
|
+
/**
|
|
10988
|
+
* Patterns for matching attribute keys to token fields.
|
|
10989
|
+
* Each pattern is checked case-insensitively via substring match.
|
|
10990
|
+
*
|
|
10991
|
+
* IMPORTANT: more-specific patterns (cacheRead, cacheCreation) MUST come first
|
|
10992
|
+
* so that keys like `cache_read_input_tokens` match `cache_read` before `input_token`.
|
|
10993
|
+
*/
|
|
10994
|
+
const TOKEN_PATTERNS = {
|
|
10995
|
+
cacheRead: ["cache_read"],
|
|
10996
|
+
cacheCreation: ["cache_creation", "cache_write"],
|
|
10997
|
+
input: ["input_token", "prompt_token"],
|
|
10998
|
+
output: ["output_token", "completion_token"]
|
|
10999
|
+
};
|
|
11000
|
+
/**
|
|
11001
|
+
* Extract token counts from an OTLP attributes array.
|
|
11002
|
+
*
|
|
11003
|
+
* Matches attribute keys case-insensitively against known patterns.
|
|
11004
|
+
* The first matching value for each field wins.
|
|
11005
|
+
*
|
|
11006
|
+
* @param attributes - Array of OTLP attribute entries
|
|
11007
|
+
* @returns Partial token counts (only fields found in attributes)
|
|
11008
|
+
*/
|
|
11009
|
+
function extractTokensFromAttributes(attributes) {
|
|
11010
|
+
if (!Array.isArray(attributes) || attributes.length === 0) return {};
|
|
11011
|
+
const result = {};
|
|
11012
|
+
for (const attr of attributes) {
|
|
11013
|
+
if (!attr?.key || !attr?.value) continue;
|
|
11014
|
+
const keyLower = attr.key.toLowerCase();
|
|
11015
|
+
const numValue = resolveAttrValue(attr.value);
|
|
11016
|
+
if (numValue === void 0) continue;
|
|
11017
|
+
let matched = false;
|
|
11018
|
+
for (const [field, patterns] of Object.entries(TOKEN_PATTERNS)) {
|
|
11019
|
+
if (matched) break;
|
|
11020
|
+
if (result[field] !== void 0) continue;
|
|
11021
|
+
for (const pattern of patterns) if (keyLower.includes(pattern)) {
|
|
11022
|
+
result[field] = numValue;
|
|
11023
|
+
matched = true;
|
|
11024
|
+
break;
|
|
11025
|
+
}
|
|
11026
|
+
}
|
|
11027
|
+
}
|
|
11028
|
+
return result;
|
|
11029
|
+
}
|
|
11030
|
+
/**
|
|
11031
|
+
* Extract token counts from a JSON body string via recursive search.
|
|
11032
|
+
*
|
|
11033
|
+
* Parses the body as JSON and recursively walks the object tree up to
|
|
11034
|
+
* depth 4, looking for keys matching token patterns.
|
|
11035
|
+
*
|
|
11036
|
+
* @param body - Raw body string (may be JSON)
|
|
11037
|
+
* @returns Partial token counts found in body
|
|
11038
|
+
*/
|
|
11039
|
+
function extractTokensFromBody(body) {
|
|
11040
|
+
if (!body || typeof body !== "string") return {};
|
|
11041
|
+
let parsed;
|
|
11042
|
+
try {
|
|
11043
|
+
parsed = JSON.parse(body);
|
|
11044
|
+
} catch {
|
|
11045
|
+
return {};
|
|
11046
|
+
}
|
|
11047
|
+
return searchObjectForTokens(parsed, 0);
|
|
11048
|
+
}
|
|
11049
|
+
/**
|
|
11050
|
+
* Merge attribute-derived and body-derived token counts.
|
|
11051
|
+
*
|
|
11052
|
+
* Attributes take priority over body for each field.
|
|
11053
|
+
* Missing fields default to 0.
|
|
11054
|
+
*
|
|
11055
|
+
* @param fromAttributes - Token counts from attributes (higher priority)
|
|
11056
|
+
* @param fromBody - Token counts from body JSON (lower priority)
|
|
11057
|
+
* @returns Complete TokenCounts with all fields
|
|
11058
|
+
*/
|
|
11059
|
+
function mergeTokenCounts(fromAttributes, fromBody) {
|
|
11060
|
+
return {
|
|
11061
|
+
input: fromAttributes.input ?? fromBody.input ?? 0,
|
|
11062
|
+
output: fromAttributes.output ?? fromBody.output ?? 0,
|
|
11063
|
+
cacheRead: fromAttributes.cacheRead ?? fromBody.cacheRead ?? 0,
|
|
11064
|
+
cacheCreation: fromAttributes.cacheCreation ?? fromBody.cacheCreation ?? 0
|
|
11065
|
+
};
|
|
11066
|
+
}
|
|
11067
|
+
/**
|
|
11068
|
+
* Resolve an OTLP attribute value to a number.
|
|
11069
|
+
* OTLP integer values arrive as strings (e.g. `"intValue": "2048"`).
|
|
11070
|
+
*/
|
|
11071
|
+
function resolveAttrValue(value) {
|
|
11072
|
+
if (value.intValue !== void 0) {
|
|
11073
|
+
const n = Number(value.intValue);
|
|
11074
|
+
return isFinite(n) ? n : void 0;
|
|
11075
|
+
}
|
|
11076
|
+
if (value.doubleValue !== void 0) {
|
|
11077
|
+
const n = Number(value.doubleValue);
|
|
11078
|
+
return isFinite(n) ? n : void 0;
|
|
11079
|
+
}
|
|
11080
|
+
if (value.stringValue !== void 0) {
|
|
11081
|
+
const n = Number(value.stringValue);
|
|
11082
|
+
if (!isNaN(n) && isFinite(n)) return n;
|
|
11083
|
+
}
|
|
11084
|
+
return void 0;
|
|
11085
|
+
}
|
|
11086
|
+
/**
|
|
11087
|
+
* Recursively search an object for token count fields up to maxDepth.
|
|
11088
|
+
*/
|
|
11089
|
+
function searchObjectForTokens(obj, depth) {
|
|
11090
|
+
if (depth >= 4 || obj === null || typeof obj !== "object") return {};
|
|
11091
|
+
const result = {};
|
|
11092
|
+
if (Array.isArray(obj)) {
|
|
11093
|
+
for (const item of obj) {
|
|
11094
|
+
const found = searchObjectForTokens(item, depth + 1);
|
|
11095
|
+
mergePartialInto(result, found);
|
|
11096
|
+
}
|
|
11097
|
+
return result;
|
|
11098
|
+
}
|
|
11099
|
+
const record = obj;
|
|
11100
|
+
for (const [key, val] of Object.entries(record)) {
|
|
11101
|
+
const keyLower = key.toLowerCase();
|
|
11102
|
+
let keyMatched = false;
|
|
11103
|
+
for (const [field, patterns] of Object.entries(TOKEN_PATTERNS)) {
|
|
11104
|
+
if (keyMatched) break;
|
|
11105
|
+
if (result[field] !== void 0) continue;
|
|
11106
|
+
for (const pattern of patterns) if (keyLower.includes(pattern)) {
|
|
11107
|
+
const num = typeof val === "number" ? val : typeof val === "string" ? Number(val) : NaN;
|
|
11108
|
+
if (!isNaN(num) && isFinite(num)) result[field] = num;
|
|
11109
|
+
keyMatched = true;
|
|
11110
|
+
break;
|
|
11111
|
+
}
|
|
11112
|
+
}
|
|
11113
|
+
if (val !== null && typeof val === "object") {
|
|
11114
|
+
const nested = searchObjectForTokens(val, depth + 1);
|
|
11115
|
+
mergePartialInto(result, nested);
|
|
11116
|
+
}
|
|
11117
|
+
}
|
|
11118
|
+
return result;
|
|
11119
|
+
}
|
|
11120
|
+
/**
|
|
11121
|
+
* Merge source into target, only filling missing fields.
|
|
11122
|
+
*/
|
|
11123
|
+
function mergePartialInto(target, source) {
|
|
11124
|
+
if (target.input === void 0 && source.input !== void 0) target.input = source.input;
|
|
11125
|
+
if (target.output === void 0 && source.output !== void 0) target.output = source.output;
|
|
11126
|
+
if (target.cacheRead === void 0 && source.cacheRead !== void 0) target.cacheRead = source.cacheRead;
|
|
11127
|
+
if (target.cacheCreation === void 0 && source.cacheCreation !== void 0) target.cacheCreation = source.cacheCreation;
|
|
11128
|
+
}
|
|
11129
|
+
|
|
11130
|
+
//#endregion
|
|
11131
|
+
//#region src/modules/telemetry/normalizer.ts
|
|
11132
|
+
/**
|
|
11133
|
+
* Extract a string value from an OTLP attribute array by key.
|
|
11134
|
+
*/
|
|
11135
|
+
function getAttrString(attrs, key) {
|
|
11136
|
+
if (!Array.isArray(attrs)) return void 0;
|
|
11137
|
+
const entry = attrs.find((a) => a?.key === key);
|
|
11138
|
+
if (!entry?.value) return void 0;
|
|
11139
|
+
return entry.value.stringValue ?? (entry.value.intValue !== void 0 ? String(entry.value.intValue) : void 0) ?? (entry.value.doubleValue !== void 0 ? String(entry.value.doubleValue) : void 0);
|
|
11140
|
+
}
|
|
11141
|
+
/**
|
|
11142
|
+
* Determine source from resource attributes service.name.
|
|
11143
|
+
*/
|
|
11144
|
+
function resolveSource(resourceAttrs) {
|
|
11145
|
+
const serviceName = getAttrString(resourceAttrs, "service.name");
|
|
11146
|
+
if (!serviceName) return "unknown";
|
|
11147
|
+
const lower = serviceName.toLowerCase();
|
|
11148
|
+
if (lower.includes("claude")) return "claude-code";
|
|
11149
|
+
if (lower.includes("codex") || lower.includes("openai")) return "codex";
|
|
11150
|
+
if (lower.includes("local")) return "local-llm";
|
|
11151
|
+
return serviceName;
|
|
11152
|
+
}
|
|
11153
|
+
/**
|
|
11154
|
+
* Resolve model from span attributes (tries multiple known keys).
|
|
11155
|
+
*/
|
|
11156
|
+
function resolveModel(attrs) {
|
|
11157
|
+
const modelKeys = [
|
|
11158
|
+
"gen_ai.request.model",
|
|
11159
|
+
"gen_ai.response.model",
|
|
11160
|
+
"llm.request.model",
|
|
11161
|
+
"anthropic.model",
|
|
11162
|
+
"openai.model",
|
|
11163
|
+
"model"
|
|
11164
|
+
];
|
|
11165
|
+
for (const key of modelKeys) {
|
|
11166
|
+
const val = getAttrString(attrs, key);
|
|
11167
|
+
if (val) return val;
|
|
11168
|
+
}
|
|
11169
|
+
return void 0;
|
|
11170
|
+
}
|
|
11171
|
+
/**
|
|
11172
|
+
* Resolve provider from span attributes.
|
|
11173
|
+
*/
|
|
11174
|
+
function resolveProvider(attrs, source) {
|
|
11175
|
+
const providerVal = getAttrString(attrs, "gen_ai.system");
|
|
11176
|
+
if (providerVal) return providerVal;
|
|
11177
|
+
if (source === "claude-code") return "anthropic";
|
|
11178
|
+
if (source === "codex") return "openai";
|
|
11179
|
+
return void 0;
|
|
11180
|
+
}
|
|
11181
|
+
/**
|
|
11182
|
+
* Extract the body string from a log record body field.
|
|
11183
|
+
*/
|
|
11184
|
+
function extractBodyString(body) {
|
|
11185
|
+
if (!body) return void 0;
|
|
11186
|
+
if (typeof body === "string") return body;
|
|
11187
|
+
if (typeof body === "object" && body.stringValue) return body.stringValue;
|
|
11188
|
+
return void 0;
|
|
11189
|
+
}
|
|
11190
|
+
/**
|
|
11191
|
+
* Generate a unique log record ID.
|
|
11192
|
+
*/
|
|
11193
|
+
let _logIdCounter = 0;
|
|
11194
|
+
function generateLogId() {
|
|
11195
|
+
return `log-${Date.now()}-${++_logIdCounter}`;
|
|
11196
|
+
}
|
|
11197
|
+
/**
|
|
11198
|
+
* Transforms raw OTLP payloads into normalized telemetry models.
|
|
11199
|
+
*
|
|
11200
|
+
* Inject an `ILogger` (pino-compatible) for structured logging.
|
|
11201
|
+
* All public methods return empty arrays on any error — never throw.
|
|
11202
|
+
*/
|
|
11203
|
+
var TelemetryNormalizer = class {
|
|
11204
|
+
_logger;
|
|
11205
|
+
constructor(logger$27) {
|
|
11206
|
+
this._logger = logger$27;
|
|
11207
|
+
}
|
|
11208
|
+
/**
|
|
11209
|
+
* Normalize a raw OTLP trace payload into an array of `NormalizedSpan`.
|
|
11210
|
+
*
|
|
11211
|
+
* @param raw - Raw OTLP trace payload (resourceSpans structure)
|
|
11212
|
+
* @returns Array of normalized spans; empty on error or empty input
|
|
11213
|
+
*/
|
|
11214
|
+
normalizeSpan(raw) {
|
|
11215
|
+
try {
|
|
11216
|
+
return this._normalizeSpanInternal(raw);
|
|
11217
|
+
} catch (err) {
|
|
11218
|
+
this._logger.warn({ err }, "TelemetryNormalizer.normalizeSpan: unexpected error");
|
|
11219
|
+
return [];
|
|
11220
|
+
}
|
|
11221
|
+
}
|
|
11222
|
+
_normalizeSpanInternal(raw) {
|
|
11223
|
+
if (!raw || typeof raw !== "object") return [];
|
|
11224
|
+
const payload = raw;
|
|
11225
|
+
if (!Array.isArray(payload.resourceSpans)) return [];
|
|
11226
|
+
const results = [];
|
|
11227
|
+
for (const resourceSpan of payload.resourceSpans) {
|
|
11228
|
+
if (!resourceSpan) continue;
|
|
11229
|
+
const resourceAttrs = resourceSpan.resource?.attributes;
|
|
11230
|
+
const source = resolveSource(resourceAttrs);
|
|
11231
|
+
if (!Array.isArray(resourceSpan.scopeSpans)) continue;
|
|
11232
|
+
for (const scopeSpan of resourceSpan.scopeSpans) {
|
|
11233
|
+
if (!Array.isArray(scopeSpan?.spans)) continue;
|
|
11234
|
+
for (const span of scopeSpan.spans) {
|
|
11235
|
+
if (!span) continue;
|
|
11236
|
+
try {
|
|
11237
|
+
const normalized = this._normalizeOneSpan(span, resourceAttrs, source);
|
|
11238
|
+
results.push(normalized);
|
|
11239
|
+
} catch (err) {
|
|
11240
|
+
this._logger.warn({
|
|
11241
|
+
err,
|
|
11242
|
+
spanId: span.spanId
|
|
11243
|
+
}, "Failed to normalize span — skipping");
|
|
11244
|
+
}
|
|
11245
|
+
}
|
|
11246
|
+
}
|
|
11247
|
+
}
|
|
11248
|
+
return results;
|
|
11249
|
+
}
|
|
11250
|
+
_normalizeOneSpan(span, resourceAttrs, source) {
|
|
11251
|
+
const spanId = span.spanId ?? "";
|
|
11252
|
+
const traceId = span.traceId ?? "";
|
|
11253
|
+
const name = span.name ?? "";
|
|
11254
|
+
const model = resolveModel(span.attributes) ?? resolveModel(resourceAttrs);
|
|
11255
|
+
const provider = resolveProvider(span.attributes, source);
|
|
11256
|
+
const operationName = getAttrString(span.attributes, "gen_ai.operation.name") ?? name;
|
|
11257
|
+
const startTime = normalizeTimestamp(span.startTimeUnixNano);
|
|
11258
|
+
const endTime = span.endTimeUnixNano ? normalizeTimestamp(span.endTimeUnixNano) : void 0;
|
|
11259
|
+
const durationMs = endTime !== void 0 ? endTime - startTime : 0;
|
|
11260
|
+
const fromAttrs = extractTokensFromAttributes(span.attributes);
|
|
11261
|
+
const bodyStr = getAttrString(span.attributes, "llm.response.body") ?? getAttrString(span.attributes, "gen_ai.response.body");
|
|
11262
|
+
const fromBody = extractTokensFromBody(bodyStr);
|
|
11263
|
+
const tokens = mergeTokenCounts(fromAttrs, fromBody);
|
|
11264
|
+
const storyKey = getAttrString(span.attributes, "substrate.story_key") ?? getAttrString(resourceAttrs, "substrate.story_key");
|
|
11265
|
+
const costUsd = model ? estimateCost(model, tokens) : 0;
|
|
11266
|
+
const attributesRecord = {};
|
|
11267
|
+
if (Array.isArray(span.attributes)) {
|
|
11268
|
+
for (const attr of span.attributes) if (attr?.key) attributesRecord[attr.key] = attr.value?.stringValue ?? attr.value?.intValue ?? attr.value?.doubleValue ?? attr.value?.boolValue;
|
|
11269
|
+
}
|
|
11270
|
+
return {
|
|
11271
|
+
spanId,
|
|
11272
|
+
traceId,
|
|
11273
|
+
parentSpanId: span.parentSpanId,
|
|
11274
|
+
name,
|
|
11275
|
+
source,
|
|
11276
|
+
model,
|
|
11277
|
+
provider,
|
|
11278
|
+
operationName,
|
|
11279
|
+
storyKey,
|
|
11280
|
+
inputTokens: tokens.input,
|
|
11281
|
+
outputTokens: tokens.output,
|
|
11282
|
+
cacheReadTokens: tokens.cacheRead,
|
|
11283
|
+
cacheCreationTokens: tokens.cacheCreation,
|
|
11284
|
+
costUsd,
|
|
11285
|
+
durationMs,
|
|
11286
|
+
startTime,
|
|
11287
|
+
endTime,
|
|
11288
|
+
attributes: attributesRecord,
|
|
11289
|
+
events: span.events
|
|
11290
|
+
};
|
|
11291
|
+
}
|
|
11292
|
+
/**
|
|
11293
|
+
* Normalize a raw OTLP log payload into an array of `NormalizedLog`.
|
|
11294
|
+
*
|
|
11295
|
+
* @param raw - Raw OTLP log payload (resourceLogs structure)
|
|
11296
|
+
* @returns Array of normalized logs; empty on error or empty input
|
|
11297
|
+
*/
|
|
11298
|
+
normalizeLog(raw) {
|
|
11299
|
+
try {
|
|
11300
|
+
return this._normalizeLogInternal(raw);
|
|
11301
|
+
} catch (err) {
|
|
11302
|
+
this._logger.warn({ err }, "TelemetryNormalizer.normalizeLog: unexpected error");
|
|
11303
|
+
return [];
|
|
11304
|
+
}
|
|
11305
|
+
}
|
|
11306
|
+
_normalizeLogInternal(raw) {
|
|
11307
|
+
if (!raw || typeof raw !== "object") return [];
|
|
11308
|
+
const payload = raw;
|
|
11309
|
+
if (!Array.isArray(payload.resourceLogs)) return [];
|
|
11310
|
+
const results = [];
|
|
11311
|
+
for (const resourceLog of payload.resourceLogs) {
|
|
11312
|
+
if (!resourceLog) continue;
|
|
11313
|
+
const resourceAttrs = resourceLog.resource?.attributes;
|
|
11314
|
+
if (!Array.isArray(resourceLog.scopeLogs)) continue;
|
|
11315
|
+
for (const scopeLog of resourceLog.scopeLogs) {
|
|
11316
|
+
if (!Array.isArray(scopeLog?.logRecords)) continue;
|
|
11317
|
+
for (const record of scopeLog.logRecords) {
|
|
11318
|
+
if (!record) continue;
|
|
11319
|
+
try {
|
|
11320
|
+
const normalized = this._normalizeOneLog(record, resourceAttrs);
|
|
11321
|
+
results.push(normalized);
|
|
11322
|
+
} catch (err) {
|
|
11323
|
+
this._logger.warn({ err }, "Failed to normalize log record — skipping");
|
|
11324
|
+
}
|
|
11325
|
+
}
|
|
11326
|
+
}
|
|
11327
|
+
}
|
|
11328
|
+
return results;
|
|
11329
|
+
}
|
|
11330
|
+
_normalizeOneLog(record, resourceAttrs) {
|
|
11331
|
+
const logId = record.logRecordId ?? generateLogId();
|
|
11332
|
+
const timestamp = normalizeTimestamp(record.timeUnixNano);
|
|
11333
|
+
const bodyStr = extractBodyString(record.body);
|
|
11334
|
+
const fromAttrs = extractTokensFromAttributes(record.attributes);
|
|
11335
|
+
const fromBody = extractTokensFromBody(bodyStr);
|
|
11336
|
+
const tokens = mergeTokenCounts(fromAttrs, fromBody);
|
|
11337
|
+
const eventName = getAttrString(record.attributes, "event.name") ?? getAttrString(record.attributes, "gen_ai.event.name") ?? getAttrString(record.attributes, "event_name");
|
|
11338
|
+
const sessionId = getAttrString(record.attributes, "session.id") ?? getAttrString(record.attributes, "gen_ai.session.id") ?? getAttrString(resourceAttrs, "session.id");
|
|
11339
|
+
const toolName = getAttrString(record.attributes, "tool.name") ?? getAttrString(record.attributes, "gen_ai.tool.name") ?? getAttrString(record.attributes, "tool_name");
|
|
11340
|
+
const model = resolveModel(record.attributes) ?? resolveModel(resourceAttrs);
|
|
11341
|
+
const storyKey = getAttrString(record.attributes, "substrate.story_key") ?? getAttrString(resourceAttrs, "substrate.story_key");
|
|
11342
|
+
const costUsd = model ? estimateCost(model, tokens) : 0;
|
|
11343
|
+
return {
|
|
11344
|
+
logId,
|
|
11345
|
+
traceId: record.traceId,
|
|
11346
|
+
spanId: record.spanId,
|
|
11347
|
+
timestamp,
|
|
11348
|
+
severity: record.severityText,
|
|
11349
|
+
body: bodyStr,
|
|
11350
|
+
eventName,
|
|
11351
|
+
sessionId,
|
|
11352
|
+
toolName,
|
|
11353
|
+
inputTokens: tokens.input,
|
|
11354
|
+
outputTokens: tokens.output,
|
|
11355
|
+
cacheReadTokens: tokens.cacheRead,
|
|
11356
|
+
costUsd,
|
|
11357
|
+
model,
|
|
11358
|
+
storyKey
|
|
11359
|
+
};
|
|
11360
|
+
}
|
|
11361
|
+
};
|
|
11362
|
+
|
|
11363
|
+
//#endregion
|
|
11364
|
+
//#region src/modules/telemetry/telemetry-pipeline.ts
|
|
11365
|
+
const logger$6 = createLogger("telemetry:pipeline");
|
|
11366
|
+
/**
|
|
11367
|
+
* Wires together the full OTLP analysis and persistence pipeline.
|
|
11368
|
+
*
|
|
11369
|
+
* Usage:
|
|
11370
|
+
* const pipeline = new TelemetryPipeline(deps)
|
|
11371
|
+
* await pipeline.processBatch(items)
|
|
11372
|
+
*/
|
|
11373
|
+
var TelemetryPipeline = class {
|
|
11374
|
+
_normalizer;
|
|
11375
|
+
_turnAnalyzer;
|
|
11376
|
+
_categorizer;
|
|
11377
|
+
_consumerAnalyzer;
|
|
11378
|
+
_efficiencyScorer;
|
|
11379
|
+
_recommender;
|
|
11380
|
+
_persistence;
|
|
11381
|
+
constructor(deps) {
|
|
11382
|
+
this._normalizer = deps.normalizer;
|
|
11383
|
+
this._turnAnalyzer = deps.turnAnalyzer;
|
|
11384
|
+
this._categorizer = deps.categorizer;
|
|
11385
|
+
this._consumerAnalyzer = deps.consumerAnalyzer;
|
|
11386
|
+
this._efficiencyScorer = deps.efficiencyScorer;
|
|
11387
|
+
this._recommender = deps.recommender;
|
|
11388
|
+
this._persistence = deps.persistence;
|
|
11389
|
+
}
|
|
11390
|
+
/**
|
|
11391
|
+
* Process a batch of raw OTLP payloads through the full analysis pipeline.
|
|
11392
|
+
*
|
|
11393
|
+
* Each payload is normalized independently. Spans are then grouped by storyKey
|
|
11394
|
+
* for per-story analysis. Items that fail normalization are skipped with a warning.
|
|
11395
|
+
*/
|
|
11396
|
+
async processBatch(items) {
|
|
11397
|
+
if (items.length === 0) return;
|
|
11398
|
+
logger$6.debug({ count: items.length }, "TelemetryPipeline.processBatch start");
|
|
11399
|
+
const allSpans = [];
|
|
11400
|
+
const allLogs = [];
|
|
11401
|
+
for (const item of items) {
|
|
11402
|
+
try {
|
|
11403
|
+
const spans = this._normalizer.normalizeSpan(item.body);
|
|
11404
|
+
allSpans.push(...spans);
|
|
11405
|
+
} catch (err) {
|
|
11406
|
+
logger$6.warn({ err }, "TelemetryPipeline: normalizeSpan failed — skipping payload");
|
|
11407
|
+
}
|
|
11408
|
+
try {
|
|
11409
|
+
const logs = this._normalizer.normalizeLog(item.body);
|
|
11410
|
+
allLogs.push(...logs);
|
|
11411
|
+
} catch (err) {
|
|
11412
|
+
logger$6.warn({ err }, "TelemetryPipeline: normalizeLog failed — skipping payload");
|
|
11413
|
+
}
|
|
11414
|
+
}
|
|
11415
|
+
logger$6.debug({
|
|
11416
|
+
spans: allSpans.length,
|
|
11417
|
+
logs: allLogs.length
|
|
11418
|
+
}, "TelemetryPipeline: normalized batch");
|
|
11419
|
+
if (allSpans.length === 0) {
|
|
11420
|
+
logger$6.debug("TelemetryPipeline: no spans normalized from batch");
|
|
11421
|
+
return;
|
|
11422
|
+
}
|
|
11423
|
+
const spansByStory = new Map();
|
|
11424
|
+
const unknownStoryKey = "__unknown__";
|
|
11425
|
+
for (const span of allSpans) {
|
|
11426
|
+
const key = span.storyKey ?? unknownStoryKey;
|
|
11427
|
+
const existing = spansByStory.get(key);
|
|
11428
|
+
if (existing !== void 0) existing.push(span);
|
|
11429
|
+
else spansByStory.set(key, [span]);
|
|
11430
|
+
}
|
|
11431
|
+
for (const [storyKey, spans] of spansByStory) {
|
|
11432
|
+
if (storyKey === unknownStoryKey) {
|
|
11433
|
+
logger$6.debug({ spanCount: spans.length }, "TelemetryPipeline: spans without storyKey — skipping analysis");
|
|
11434
|
+
continue;
|
|
11435
|
+
}
|
|
11436
|
+
try {
|
|
11437
|
+
await this._processStory(storyKey, spans);
|
|
11438
|
+
} catch (err) {
|
|
11439
|
+
logger$6.warn({
|
|
11440
|
+
err,
|
|
11441
|
+
storyKey
|
|
11442
|
+
}, "TelemetryPipeline: story processing failed — skipping");
|
|
11443
|
+
}
|
|
11444
|
+
}
|
|
11445
|
+
logger$6.debug({ storyCount: spansByStory.size }, "TelemetryPipeline.processBatch complete");
|
|
11446
|
+
}
|
|
11447
|
+
async _processStory(storyKey, spans) {
|
|
11448
|
+
const turns = this._turnAnalyzer.analyze(spans);
|
|
11449
|
+
const categories = this._categorizer.computeCategoryStats(spans, turns);
|
|
11450
|
+
const consumers = this._consumerAnalyzer.analyze(spans);
|
|
11451
|
+
const efficiencyScore = this._efficiencyScorer.score(storyKey, turns);
|
|
11452
|
+
const generatedAt = new Date().toISOString();
|
|
11453
|
+
const context = {
|
|
11454
|
+
storyKey,
|
|
11455
|
+
generatedAt,
|
|
11456
|
+
turns,
|
|
11457
|
+
categories,
|
|
11458
|
+
consumers,
|
|
11459
|
+
efficiencyScore,
|
|
11460
|
+
allSpans: spans
|
|
11461
|
+
};
|
|
11462
|
+
const recommendations = this._recommender.analyze(context);
|
|
11463
|
+
await Promise.all([
|
|
11464
|
+
turns.length > 0 ? this._persistence.storeTurnAnalysis(storyKey, turns).catch((err) => logger$6.warn({
|
|
11465
|
+
err,
|
|
11466
|
+
storyKey
|
|
11467
|
+
}, "Failed to store turn analysis")) : Promise.resolve(),
|
|
11468
|
+
categories.length > 0 ? this._persistence.storeCategoryStats(storyKey, categories).catch((err) => logger$6.warn({
|
|
11469
|
+
err,
|
|
11470
|
+
storyKey
|
|
11471
|
+
}, "Failed to store category stats")) : Promise.resolve(),
|
|
11472
|
+
consumers.length > 0 ? this._persistence.storeConsumerStats(storyKey, consumers).catch((err) => logger$6.warn({
|
|
11473
|
+
err,
|
|
11474
|
+
storyKey
|
|
11475
|
+
}, "Failed to store consumer stats")) : Promise.resolve(),
|
|
11476
|
+
this._persistence.storeEfficiencyScore(efficiencyScore).catch((err) => logger$6.warn({
|
|
11477
|
+
err,
|
|
11478
|
+
storyKey
|
|
11479
|
+
}, "Failed to store efficiency score")),
|
|
11480
|
+
recommendations.length > 0 ? this._persistence.saveRecommendations(storyKey, recommendations).catch((err) => logger$6.warn({
|
|
11481
|
+
err,
|
|
11482
|
+
storyKey
|
|
11483
|
+
}, "Failed to save recommendations")) : Promise.resolve()
|
|
11484
|
+
]);
|
|
11485
|
+
logger$6.info({
|
|
11486
|
+
storyKey,
|
|
11487
|
+
turns: turns.length,
|
|
11488
|
+
compositeScore: efficiencyScore.compositeScore,
|
|
11489
|
+
recommendations: recommendations.length
|
|
11490
|
+
}, "TelemetryPipeline: story analysis complete");
|
|
11491
|
+
}
|
|
11492
|
+
};
|
|
11493
|
+
|
|
11494
|
+
//#endregion
|
|
11495
|
+
//#region src/modules/implementation-orchestrator/orchestrator-impl.ts
|
|
11496
|
+
function createPauseGate() {
|
|
10119
11497
|
let resolve$2;
|
|
10120
11498
|
const promise = new Promise((res) => {
|
|
10121
11499
|
resolve$2 = res;
|
|
@@ -10155,7 +11533,7 @@ function buildTargetedFilesContent(issueList) {
|
|
|
10155
11533
|
*/
|
|
10156
11534
|
function createImplementationOrchestrator(deps) {
|
|
10157
11535
|
const { db, pack, contextCompiler, dispatcher, eventBus, config, projectRoot, tokenCeilings, stateStore, telemetryPersistence, ingestionServer } = deps;
|
|
10158
|
-
const logger$
|
|
11536
|
+
const logger$27 = createLogger("implementation-orchestrator");
|
|
10159
11537
|
let _state = "IDLE";
|
|
10160
11538
|
let _startedAt;
|
|
10161
11539
|
let _completedAt;
|
|
@@ -10201,7 +11579,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10201
11579
|
const nowMs = Date.now();
|
|
10202
11580
|
for (const [phase, startMs] of starts) {
|
|
10203
11581
|
const endMs = ends?.get(phase);
|
|
10204
|
-
if (endMs === void 0) logger$
|
|
11582
|
+
if (endMs === void 0) logger$27.warn({
|
|
10205
11583
|
storyKey,
|
|
10206
11584
|
phase
|
|
10207
11585
|
}, "Phase has no end time — story may have errored mid-phase. Duration capped to now() and may be inflated.");
|
|
@@ -10248,7 +11626,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10248
11626
|
recordedAt: completedAt,
|
|
10249
11627
|
timestamp: completedAt
|
|
10250
11628
|
}).catch((storeErr) => {
|
|
10251
|
-
logger$
|
|
11629
|
+
logger$27.warn({
|
|
10252
11630
|
err: storeErr,
|
|
10253
11631
|
storyKey
|
|
10254
11632
|
}, "Failed to record metric to StateStore (best-effort)");
|
|
@@ -10270,7 +11648,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10270
11648
|
rationale: `Story ${storyKey} completed with result=${result} in ${wallClockSeconds}s. Tokens: ${tokenAgg.input}+${tokenAgg.output}. Review cycles: ${reviewCycles}.`
|
|
10271
11649
|
});
|
|
10272
11650
|
} catch (decisionErr) {
|
|
10273
|
-
logger$
|
|
11651
|
+
logger$27.warn({
|
|
10274
11652
|
err: decisionErr,
|
|
10275
11653
|
storyKey
|
|
10276
11654
|
}, "Failed to write story-metrics decision (best-effort)");
|
|
@@ -10298,13 +11676,13 @@ function createImplementationOrchestrator(deps) {
|
|
|
10298
11676
|
dispatches: _storyDispatches.get(storyKey) ?? 0
|
|
10299
11677
|
});
|
|
10300
11678
|
} catch (emitErr) {
|
|
10301
|
-
logger$
|
|
11679
|
+
logger$27.warn({
|
|
10302
11680
|
err: emitErr,
|
|
10303
11681
|
storyKey
|
|
10304
11682
|
}, "Failed to emit story:metrics event (best-effort)");
|
|
10305
11683
|
}
|
|
10306
11684
|
} catch (err) {
|
|
10307
|
-
logger$
|
|
11685
|
+
logger$27.warn({
|
|
10308
11686
|
err,
|
|
10309
11687
|
storyKey
|
|
10310
11688
|
}, "Failed to write story metrics (best-effort)");
|
|
@@ -10333,7 +11711,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10333
11711
|
rationale: `Story ${storyKey} ${outcome} after ${reviewCycles} review cycle(s).`
|
|
10334
11712
|
});
|
|
10335
11713
|
} catch (err) {
|
|
10336
|
-
logger$
|
|
11714
|
+
logger$27.warn({
|
|
10337
11715
|
err,
|
|
10338
11716
|
storyKey
|
|
10339
11717
|
}, "Failed to write story-outcome decision (best-effort)");
|
|
@@ -10359,7 +11737,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10359
11737
|
rationale: `Escalation diagnosis for ${payload.storyKey}: ${diagnosis.recommendedAction} — ${diagnosis.rationale}`
|
|
10360
11738
|
});
|
|
10361
11739
|
} catch (err) {
|
|
10362
|
-
logger$
|
|
11740
|
+
logger$27.warn({
|
|
10363
11741
|
err,
|
|
10364
11742
|
storyKey: payload.storyKey
|
|
10365
11743
|
}, "Failed to persist escalation diagnosis (best-effort)");
|
|
@@ -10408,7 +11786,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10408
11786
|
const existing = _stories.get(storyKey);
|
|
10409
11787
|
if (existing !== void 0) {
|
|
10410
11788
|
Object.assign(existing, updates);
|
|
10411
|
-
persistStoryState(storyKey, existing).catch((err) => logger$
|
|
11789
|
+
persistStoryState(storyKey, existing).catch((err) => logger$27.warn({
|
|
10412
11790
|
err,
|
|
10413
11791
|
storyKey
|
|
10414
11792
|
}, "StateStore write failed after updateStory"));
|
|
@@ -10417,12 +11795,12 @@ function createImplementationOrchestrator(deps) {
|
|
|
10417
11795
|
storyKey,
|
|
10418
11796
|
conflict: err
|
|
10419
11797
|
});
|
|
10420
|
-
else logger$
|
|
11798
|
+
else logger$27.warn({
|
|
10421
11799
|
err,
|
|
10422
11800
|
storyKey
|
|
10423
11801
|
}, "mergeStory failed");
|
|
10424
11802
|
});
|
|
10425
|
-
else if (updates.phase === "ESCALATED") stateStore?.rollbackStory(storyKey).catch((err) => logger$
|
|
11803
|
+
else if (updates.phase === "ESCALATED") stateStore?.rollbackStory(storyKey).catch((err) => logger$27.warn({
|
|
10426
11804
|
err,
|
|
10427
11805
|
storyKey
|
|
10428
11806
|
}, "rollbackStory failed — branch may persist"));
|
|
@@ -10449,7 +11827,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10449
11827
|
};
|
|
10450
11828
|
await stateStore.setStoryState(storyKey, record);
|
|
10451
11829
|
} catch (err) {
|
|
10452
|
-
logger$
|
|
11830
|
+
logger$27.warn({
|
|
10453
11831
|
err,
|
|
10454
11832
|
storyKey
|
|
10455
11833
|
}, "StateStore.setStoryState failed (best-effort)");
|
|
@@ -10465,7 +11843,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10465
11843
|
token_usage_json: serialized
|
|
10466
11844
|
});
|
|
10467
11845
|
} catch (err) {
|
|
10468
|
-
logger$
|
|
11846
|
+
logger$27.warn({ err }, "Failed to persist orchestrator state");
|
|
10469
11847
|
}
|
|
10470
11848
|
}
|
|
10471
11849
|
function recordProgress() {
|
|
@@ -10512,7 +11890,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10512
11890
|
}
|
|
10513
11891
|
if (childActive) {
|
|
10514
11892
|
_lastProgressTs = Date.now();
|
|
10515
|
-
logger$
|
|
11893
|
+
logger$27.debug({
|
|
10516
11894
|
storyKey: key,
|
|
10517
11895
|
phase: s.phase,
|
|
10518
11896
|
childPids
|
|
@@ -10521,7 +11899,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10521
11899
|
}
|
|
10522
11900
|
_stalledStories.add(key);
|
|
10523
11901
|
_storiesWithStall.add(key);
|
|
10524
|
-
logger$
|
|
11902
|
+
logger$27.warn({
|
|
10525
11903
|
storyKey: key,
|
|
10526
11904
|
phase: s.phase,
|
|
10527
11905
|
elapsedMs: elapsed,
|
|
@@ -10566,7 +11944,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10566
11944
|
for (let attempt = 0; attempt < MEMORY_PRESSURE_BACKOFF_MS.length; attempt++) {
|
|
10567
11945
|
const memState = dispatcher.getMemoryState();
|
|
10568
11946
|
if (!memState.isPressured) return true;
|
|
10569
|
-
logger$
|
|
11947
|
+
logger$27.warn({
|
|
10570
11948
|
storyKey,
|
|
10571
11949
|
freeMB: memState.freeMB,
|
|
10572
11950
|
thresholdMB: memState.thresholdMB,
|
|
@@ -10586,11 +11964,11 @@ function createImplementationOrchestrator(deps) {
|
|
|
10586
11964
|
* exhausted retries the story is ESCALATED.
|
|
10587
11965
|
*/
|
|
10588
11966
|
async function processStory(storyKey) {
|
|
10589
|
-
logger$
|
|
11967
|
+
logger$27.info({ storyKey }, "Processing story");
|
|
10590
11968
|
{
|
|
10591
11969
|
const memoryOk = await checkMemoryPressure(storyKey);
|
|
10592
11970
|
if (!memoryOk) {
|
|
10593
|
-
logger$
|
|
11971
|
+
logger$27.warn({ storyKey }, "Memory pressure exhausted — escalating story without dispatch");
|
|
10594
11972
|
const memPressureState = {
|
|
10595
11973
|
phase: "ESCALATED",
|
|
10596
11974
|
reviewCycles: 0,
|
|
@@ -10599,7 +11977,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10599
11977
|
completedAt: new Date().toISOString()
|
|
10600
11978
|
};
|
|
10601
11979
|
_stories.set(storyKey, memPressureState);
|
|
10602
|
-
persistStoryState(storyKey, memPressureState).catch((err) => logger$
|
|
11980
|
+
persistStoryState(storyKey, memPressureState).catch((err) => logger$27.warn({
|
|
10603
11981
|
err,
|
|
10604
11982
|
storyKey
|
|
10605
11983
|
}, "StateStore write failed after memory-pressure escalation"));
|
|
@@ -10616,7 +11994,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10616
11994
|
}
|
|
10617
11995
|
await waitIfPaused();
|
|
10618
11996
|
if (_state !== "RUNNING") return;
|
|
10619
|
-
stateStore?.branchForStory(storyKey).catch((err) => logger$
|
|
11997
|
+
stateStore?.branchForStory(storyKey).catch((err) => logger$27.warn({
|
|
10620
11998
|
err,
|
|
10621
11999
|
storyKey
|
|
10622
12000
|
}, "branchForStory failed — continuing without branch isolation"));
|
|
@@ -10633,14 +12011,14 @@ function createImplementationOrchestrator(deps) {
|
|
|
10633
12011
|
if (match) {
|
|
10634
12012
|
const candidatePath = join$1(artifactsDir, match);
|
|
10635
12013
|
const validation = await isValidStoryFile(candidatePath);
|
|
10636
|
-
if (!validation.valid) logger$
|
|
12014
|
+
if (!validation.valid) logger$27.warn({
|
|
10637
12015
|
storyKey,
|
|
10638
12016
|
storyFilePath: candidatePath,
|
|
10639
12017
|
reason: validation.reason
|
|
10640
12018
|
}, `Existing story file for ${storyKey} is invalid (${validation.reason}) — re-creating`);
|
|
10641
12019
|
else {
|
|
10642
12020
|
storyFilePath = candidatePath;
|
|
10643
|
-
logger$
|
|
12021
|
+
logger$27.info({
|
|
10644
12022
|
storyKey,
|
|
10645
12023
|
storyFilePath
|
|
10646
12024
|
}, "Found existing story file — skipping create-story");
|
|
@@ -10689,7 +12067,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10689
12067
|
metadata: JSON.stringify({ storyKey })
|
|
10690
12068
|
});
|
|
10691
12069
|
} catch (tokenErr) {
|
|
10692
|
-
logger$
|
|
12070
|
+
logger$27.warn({
|
|
10693
12071
|
storyKey,
|
|
10694
12072
|
err: tokenErr
|
|
10695
12073
|
}, "Failed to record create-story token usage");
|
|
@@ -10773,14 +12151,14 @@ function createImplementationOrchestrator(deps) {
|
|
|
10773
12151
|
...contract.transport !== void 0 ? { transport: contract.transport } : {}
|
|
10774
12152
|
})
|
|
10775
12153
|
});
|
|
10776
|
-
logger$
|
|
12154
|
+
logger$27.info({
|
|
10777
12155
|
storyKey,
|
|
10778
12156
|
contractCount: contracts.length,
|
|
10779
12157
|
contracts
|
|
10780
12158
|
}, "Stored interface contract declarations");
|
|
10781
12159
|
}
|
|
10782
12160
|
} catch (err) {
|
|
10783
|
-
logger$
|
|
12161
|
+
logger$27.warn({
|
|
10784
12162
|
storyKey,
|
|
10785
12163
|
error: err instanceof Error ? err.message : String(err)
|
|
10786
12164
|
}, "Failed to parse interface contracts — continuing without contract declarations");
|
|
@@ -10808,10 +12186,10 @@ function createImplementationOrchestrator(deps) {
|
|
|
10808
12186
|
});
|
|
10809
12187
|
testPlanPhaseResult = testPlanResult.result;
|
|
10810
12188
|
testPlanTokenUsage = testPlanResult.tokenUsage;
|
|
10811
|
-
if (testPlanResult.result === "success") logger$
|
|
10812
|
-
else logger$
|
|
12189
|
+
if (testPlanResult.result === "success") logger$27.info({ storyKey }, "Test plan generated successfully");
|
|
12190
|
+
else logger$27.warn({ storyKey }, "Test planning returned failed result — proceeding to dev-story without test plan");
|
|
10813
12191
|
} catch (err) {
|
|
10814
|
-
logger$
|
|
12192
|
+
logger$27.warn({
|
|
10815
12193
|
storyKey,
|
|
10816
12194
|
err
|
|
10817
12195
|
}, "Test planning failed — proceeding to dev-story without test plan");
|
|
@@ -10827,7 +12205,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10827
12205
|
metadata: JSON.stringify({ storyKey })
|
|
10828
12206
|
});
|
|
10829
12207
|
} catch (tokenErr) {
|
|
10830
|
-
logger$
|
|
12208
|
+
logger$27.warn({
|
|
10831
12209
|
storyKey,
|
|
10832
12210
|
err: tokenErr
|
|
10833
12211
|
}, "Failed to record test-plan token usage");
|
|
@@ -10850,7 +12228,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10850
12228
|
try {
|
|
10851
12229
|
storyContentForAnalysis = await readFile$1(storyFilePath ?? "", "utf-8");
|
|
10852
12230
|
} catch (err) {
|
|
10853
|
-
logger$
|
|
12231
|
+
logger$27.error({
|
|
10854
12232
|
storyKey,
|
|
10855
12233
|
storyFilePath,
|
|
10856
12234
|
error: err instanceof Error ? err.message : String(err)
|
|
@@ -10858,7 +12236,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10858
12236
|
}
|
|
10859
12237
|
const analysis = analyzeStoryComplexity(storyContentForAnalysis);
|
|
10860
12238
|
const batches = planTaskBatches(analysis);
|
|
10861
|
-
logger$
|
|
12239
|
+
logger$27.info({
|
|
10862
12240
|
storyKey,
|
|
10863
12241
|
estimatedScope: analysis.estimatedScope,
|
|
10864
12242
|
batchCount: batches.length,
|
|
@@ -10876,7 +12254,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10876
12254
|
if (_state !== "RUNNING") break;
|
|
10877
12255
|
const taskScope = batch.taskIds.map((id, i) => `T${id}: ${batch.taskTitles[i] ?? ""}`).join("\n");
|
|
10878
12256
|
const priorFiles = allFilesModified.size > 0 ? Array.from(allFilesModified) : void 0;
|
|
10879
|
-
logger$
|
|
12257
|
+
logger$27.info({
|
|
10880
12258
|
storyKey,
|
|
10881
12259
|
batchIndex: batch.batchIndex,
|
|
10882
12260
|
taskCount: batch.taskIds.length
|
|
@@ -10902,7 +12280,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10902
12280
|
});
|
|
10903
12281
|
} catch (batchErr) {
|
|
10904
12282
|
const errMsg = batchErr instanceof Error ? batchErr.message : String(batchErr);
|
|
10905
|
-
logger$
|
|
12283
|
+
logger$27.warn({
|
|
10906
12284
|
storyKey,
|
|
10907
12285
|
batchIndex: batch.batchIndex,
|
|
10908
12286
|
error: errMsg
|
|
@@ -10922,7 +12300,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10922
12300
|
filesModified: batchFilesModified,
|
|
10923
12301
|
result: batchResult.result === "success" ? "success" : "failed"
|
|
10924
12302
|
};
|
|
10925
|
-
logger$
|
|
12303
|
+
logger$27.info(batchMetrics, "Batch dev-story metrics");
|
|
10926
12304
|
for (const f of batchFilesModified) allFilesModified.add(f);
|
|
10927
12305
|
if (batchFilesModified.length > 0) batchFileGroups.push({
|
|
10928
12306
|
batchIndex: batch.batchIndex,
|
|
@@ -10944,13 +12322,13 @@ function createImplementationOrchestrator(deps) {
|
|
|
10944
12322
|
})
|
|
10945
12323
|
});
|
|
10946
12324
|
} catch (tokenErr) {
|
|
10947
|
-
logger$
|
|
12325
|
+
logger$27.warn({
|
|
10948
12326
|
storyKey,
|
|
10949
12327
|
batchIndex: batch.batchIndex,
|
|
10950
12328
|
err: tokenErr
|
|
10951
12329
|
}, "Failed to record batch token usage");
|
|
10952
12330
|
}
|
|
10953
|
-
if (batchResult.result === "failed") logger$
|
|
12331
|
+
if (batchResult.result === "failed") logger$27.warn({
|
|
10954
12332
|
storyKey,
|
|
10955
12333
|
batchIndex: batch.batchIndex,
|
|
10956
12334
|
error: batchResult.error
|
|
@@ -10990,7 +12368,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
10990
12368
|
metadata: JSON.stringify({ storyKey })
|
|
10991
12369
|
});
|
|
10992
12370
|
} catch (tokenErr) {
|
|
10993
|
-
logger$
|
|
12371
|
+
logger$27.warn({
|
|
10994
12372
|
storyKey,
|
|
10995
12373
|
err: tokenErr
|
|
10996
12374
|
}, "Failed to record dev-story token usage");
|
|
@@ -11002,7 +12380,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11002
12380
|
});
|
|
11003
12381
|
persistState();
|
|
11004
12382
|
if (devResult.result === "success") devStoryWasSuccess = true;
|
|
11005
|
-
else logger$
|
|
12383
|
+
else logger$27.warn({
|
|
11006
12384
|
storyKey,
|
|
11007
12385
|
error: devResult.error,
|
|
11008
12386
|
filesModified: devFilesModified.length
|
|
@@ -11030,7 +12408,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11030
12408
|
if (devStoryWasSuccess) {
|
|
11031
12409
|
gitDiffFiles = checkGitDiffFiles(projectRoot ?? process.cwd());
|
|
11032
12410
|
if (gitDiffFiles.length === 0) {
|
|
11033
|
-
logger$
|
|
12411
|
+
logger$27.warn({ storyKey }, "Zero-diff detected after COMPLETE dev-story — no file changes in git working tree");
|
|
11034
12412
|
eventBus.emit("orchestrator:zero-diff-escalation", {
|
|
11035
12413
|
storyKey,
|
|
11036
12414
|
reason: "zero-diff-on-complete"
|
|
@@ -11061,7 +12439,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11061
12439
|
});
|
|
11062
12440
|
if (buildVerifyResult.status === "passed") {
|
|
11063
12441
|
eventBus.emit("story:build-verification-passed", { storyKey });
|
|
11064
|
-
logger$
|
|
12442
|
+
logger$27.info({ storyKey }, "Build verification passed");
|
|
11065
12443
|
} else if (buildVerifyResult.status === "failed" || buildVerifyResult.status === "timeout") {
|
|
11066
12444
|
const truncatedOutput = (buildVerifyResult.output ?? "").slice(0, 2e3);
|
|
11067
12445
|
const reason = buildVerifyResult.reason ?? "build-verification-failed";
|
|
@@ -11070,7 +12448,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11070
12448
|
exitCode: buildVerifyResult.exitCode ?? 1,
|
|
11071
12449
|
output: truncatedOutput
|
|
11072
12450
|
});
|
|
11073
|
-
logger$
|
|
12451
|
+
logger$27.warn({
|
|
11074
12452
|
storyKey,
|
|
11075
12453
|
reason,
|
|
11076
12454
|
exitCode: buildVerifyResult.exitCode
|
|
@@ -11100,7 +12478,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11100
12478
|
storyKey
|
|
11101
12479
|
});
|
|
11102
12480
|
if (icResult.potentiallyAffectedTests.length > 0) {
|
|
11103
|
-
logger$
|
|
12481
|
+
logger$27.warn({
|
|
11104
12482
|
storyKey,
|
|
11105
12483
|
modifiedInterfaces: icResult.modifiedInterfaces,
|
|
11106
12484
|
potentiallyAffectedTests: icResult.potentiallyAffectedTests
|
|
@@ -11146,7 +12524,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11146
12524
|
"NEEDS_MAJOR_REWORK": 2
|
|
11147
12525
|
};
|
|
11148
12526
|
for (const group of batchFileGroups) {
|
|
11149
|
-
logger$
|
|
12527
|
+
logger$27.info({
|
|
11150
12528
|
storyKey,
|
|
11151
12529
|
batchIndex: group.batchIndex,
|
|
11152
12530
|
fileCount: group.files.length
|
|
@@ -11185,7 +12563,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11185
12563
|
rawOutput: lastRawOutput,
|
|
11186
12564
|
tokenUsage: aggregateTokens
|
|
11187
12565
|
};
|
|
11188
|
-
logger$
|
|
12566
|
+
logger$27.info({
|
|
11189
12567
|
storyKey,
|
|
11190
12568
|
batchCount: batchFileGroups.length,
|
|
11191
12569
|
verdict: worstVerdict,
|
|
@@ -11223,7 +12601,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11223
12601
|
})
|
|
11224
12602
|
});
|
|
11225
12603
|
} catch (tokenErr) {
|
|
11226
|
-
logger$
|
|
12604
|
+
logger$27.warn({
|
|
11227
12605
|
storyKey,
|
|
11228
12606
|
err: tokenErr
|
|
11229
12607
|
}, "Failed to record code-review token usage");
|
|
@@ -11231,7 +12609,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11231
12609
|
const isPhantomReview = reviewResult.dispatchFailed === true || reviewResult.verdict !== "SHIP_IT" && reviewResult.verdict !== "LGTM_WITH_NOTES" && (reviewResult.issue_list === void 0 || reviewResult.issue_list.length === 0) && reviewResult.error !== void 0;
|
|
11232
12610
|
if (isPhantomReview && !timeoutRetried) {
|
|
11233
12611
|
timeoutRetried = true;
|
|
11234
|
-
logger$
|
|
12612
|
+
logger$27.warn({
|
|
11235
12613
|
storyKey,
|
|
11236
12614
|
reviewCycles,
|
|
11237
12615
|
error: reviewResult.error
|
|
@@ -11241,7 +12619,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11241
12619
|
verdict = reviewResult.verdict;
|
|
11242
12620
|
issueList = reviewResult.issue_list ?? [];
|
|
11243
12621
|
if (verdict === "NEEDS_MAJOR_REWORK" && reviewCycles > 0 && previousIssueList.length > 0 && issueList.length < previousIssueList.length) {
|
|
11244
|
-
logger$
|
|
12622
|
+
logger$27.info({
|
|
11245
12623
|
storyKey,
|
|
11246
12624
|
originalVerdict: verdict,
|
|
11247
12625
|
issuesBefore: previousIssueList.length,
|
|
@@ -11277,7 +12655,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11277
12655
|
if (_decomposition !== void 0) parts.push(`decomposed: ${_decomposition.batchCount} batches`);
|
|
11278
12656
|
parts.push(`${fileCount} files`);
|
|
11279
12657
|
parts.push(`${totalTokensK} tokens`);
|
|
11280
|
-
logger$
|
|
12658
|
+
logger$27.info({
|
|
11281
12659
|
storyKey,
|
|
11282
12660
|
verdict,
|
|
11283
12661
|
agentVerdict: reviewResult.agentVerdict
|
|
@@ -11326,9 +12704,9 @@ function createImplementationOrchestrator(deps) {
|
|
|
11326
12704
|
}),
|
|
11327
12705
|
rationale: `Advisory notes from LGTM_WITH_NOTES review of ${storyKey}`
|
|
11328
12706
|
});
|
|
11329
|
-
logger$
|
|
12707
|
+
logger$27.info({ storyKey }, "Advisory notes persisted to decision store");
|
|
11330
12708
|
} catch (advisoryErr) {
|
|
11331
|
-
logger$
|
|
12709
|
+
logger$27.warn({
|
|
11332
12710
|
storyKey,
|
|
11333
12711
|
error: advisoryErr instanceof Error ? advisoryErr.message : String(advisoryErr)
|
|
11334
12712
|
}, "Failed to persist advisory notes (best-effort)");
|
|
@@ -11336,17 +12714,17 @@ function createImplementationOrchestrator(deps) {
|
|
|
11336
12714
|
if (telemetryPersistence !== void 0) try {
|
|
11337
12715
|
const turns = await telemetryPersistence.getTurnAnalysis(storyKey);
|
|
11338
12716
|
if (turns.length > 0) {
|
|
11339
|
-
const scorer = new EfficiencyScorer(logger$
|
|
12717
|
+
const scorer = new EfficiencyScorer(logger$27);
|
|
11340
12718
|
const effScore = scorer.score(storyKey, turns);
|
|
11341
12719
|
await telemetryPersistence.storeEfficiencyScore(effScore);
|
|
11342
|
-
logger$
|
|
12720
|
+
logger$27.info({
|
|
11343
12721
|
storyKey,
|
|
11344
12722
|
compositeScore: effScore.compositeScore,
|
|
11345
12723
|
modelCount: effScore.perModelBreakdown.length
|
|
11346
12724
|
}, "Efficiency score computed and persisted");
|
|
11347
|
-
} else logger$
|
|
12725
|
+
} else logger$27.debug({ storyKey }, "No turn analysis data available — skipping efficiency scoring");
|
|
11348
12726
|
} catch (effErr) {
|
|
11349
|
-
logger$
|
|
12727
|
+
logger$27.warn({
|
|
11350
12728
|
storyKey,
|
|
11351
12729
|
error: effErr instanceof Error ? effErr.message : String(effErr)
|
|
11352
12730
|
}, "Efficiency scoring failed — story verdict unchanged");
|
|
@@ -11354,10 +12732,10 @@ function createImplementationOrchestrator(deps) {
|
|
|
11354
12732
|
if (telemetryPersistence !== void 0) try {
|
|
11355
12733
|
const turns = await telemetryPersistence.getTurnAnalysis(storyKey);
|
|
11356
12734
|
const spans = [];
|
|
11357
|
-
if (spans.length === 0) logger$
|
|
12735
|
+
if (spans.length === 0) logger$27.debug({ storyKey }, "No spans for telemetry categorization — skipping");
|
|
11358
12736
|
else {
|
|
11359
|
-
const categorizer = new Categorizer(logger$
|
|
11360
|
-
const consumerAnalyzer = new ConsumerAnalyzer(categorizer, logger$
|
|
12737
|
+
const categorizer = new Categorizer(logger$27);
|
|
12738
|
+
const consumerAnalyzer = new ConsumerAnalyzer(categorizer, logger$27);
|
|
11361
12739
|
const categoryStats = categorizer.computeCategoryStats(spans, turns);
|
|
11362
12740
|
const consumerStats = consumerAnalyzer.analyze(spans);
|
|
11363
12741
|
await telemetryPersistence.storeCategoryStats(storyKey, categoryStats);
|
|
@@ -11365,7 +12743,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11365
12743
|
const growingCount = categoryStats.filter((c) => c.trend === "growing").length;
|
|
11366
12744
|
const topCategory = categoryStats[0]?.category ?? "none";
|
|
11367
12745
|
const topConsumer = consumerStats[0]?.consumerKey ?? "none";
|
|
11368
|
-
logger$
|
|
12746
|
+
logger$27.info({
|
|
11369
12747
|
storyKey,
|
|
11370
12748
|
topCategory,
|
|
11371
12749
|
topConsumer,
|
|
@@ -11373,7 +12751,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11373
12751
|
}, "Semantic categorization and consumer analysis complete");
|
|
11374
12752
|
}
|
|
11375
12753
|
} catch (catErr) {
|
|
11376
|
-
logger$
|
|
12754
|
+
logger$27.warn({
|
|
11377
12755
|
storyKey,
|
|
11378
12756
|
error: catErr instanceof Error ? catErr.message : String(catErr)
|
|
11379
12757
|
}, "Semantic categorization failed — story verdict unchanged");
|
|
@@ -11394,7 +12772,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11394
12772
|
filesModified: devFilesModified,
|
|
11395
12773
|
workingDirectory: projectRoot
|
|
11396
12774
|
});
|
|
11397
|
-
logger$
|
|
12775
|
+
logger$27.debug({
|
|
11398
12776
|
storyKey,
|
|
11399
12777
|
expansion_priority: expansionResult.expansion_priority,
|
|
11400
12778
|
coverage_gaps: expansionResult.coverage_gaps.length
|
|
@@ -11407,7 +12785,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11407
12785
|
value: JSON.stringify(expansionResult)
|
|
11408
12786
|
});
|
|
11409
12787
|
} catch (expansionErr) {
|
|
11410
|
-
logger$
|
|
12788
|
+
logger$27.warn({
|
|
11411
12789
|
storyKey,
|
|
11412
12790
|
error: expansionErr instanceof Error ? expansionErr.message : String(expansionErr)
|
|
11413
12791
|
}, "Test expansion failed — story verdict unchanged");
|
|
@@ -11434,7 +12812,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11434
12812
|
persistState();
|
|
11435
12813
|
return;
|
|
11436
12814
|
}
|
|
11437
|
-
logger$
|
|
12815
|
+
logger$27.info({
|
|
11438
12816
|
storyKey,
|
|
11439
12817
|
reviewCycles: finalReviewCycles,
|
|
11440
12818
|
issueCount: issueList.length
|
|
@@ -11494,7 +12872,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11494
12872
|
fixPrompt = assembled.prompt;
|
|
11495
12873
|
} catch {
|
|
11496
12874
|
fixPrompt = `Fix story ${storyKey}: verdict=${verdict}, minor fixes needed`;
|
|
11497
|
-
logger$
|
|
12875
|
+
logger$27.warn({ storyKey }, "Failed to assemble auto-approve fix prompt, using fallback");
|
|
11498
12876
|
}
|
|
11499
12877
|
const handle = dispatcher.dispatch({
|
|
11500
12878
|
prompt: fixPrompt,
|
|
@@ -11502,7 +12880,8 @@ function createImplementationOrchestrator(deps) {
|
|
|
11502
12880
|
taskType: "minor-fixes",
|
|
11503
12881
|
workingDirectory: projectRoot,
|
|
11504
12882
|
...autoApproveMaxTurns !== void 0 ? { maxTurns: autoApproveMaxTurns } : {},
|
|
11505
|
-
..._otlpEndpoint !== void 0 ? { otlpEndpoint: _otlpEndpoint } : {}
|
|
12883
|
+
..._otlpEndpoint !== void 0 ? { otlpEndpoint: _otlpEndpoint } : {},
|
|
12884
|
+
storyKey
|
|
11506
12885
|
});
|
|
11507
12886
|
const fixResult = await handle.result;
|
|
11508
12887
|
eventBus.emit("orchestrator:story-phase-complete", {
|
|
@@ -11513,9 +12892,9 @@ function createImplementationOrchestrator(deps) {
|
|
|
11513
12892
|
output: fixResult.tokenEstimate.output
|
|
11514
12893
|
} : void 0 }
|
|
11515
12894
|
});
|
|
11516
|
-
if (fixResult.status === "timeout") logger$
|
|
12895
|
+
if (fixResult.status === "timeout") logger$27.warn({ storyKey }, "Auto-approve fix timed out — approving anyway (issues were minor)");
|
|
11517
12896
|
} catch (err) {
|
|
11518
|
-
logger$
|
|
12897
|
+
logger$27.warn({
|
|
11519
12898
|
storyKey,
|
|
11520
12899
|
err
|
|
11521
12900
|
}, "Auto-approve fix dispatch failed — approving anyway (issues were minor)");
|
|
@@ -11632,7 +13011,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11632
13011
|
fixPrompt = assembled.prompt;
|
|
11633
13012
|
} catch {
|
|
11634
13013
|
fixPrompt = `Fix story ${storyKey}: verdict=${verdict}, taskType=${taskType}`;
|
|
11635
|
-
logger$
|
|
13014
|
+
logger$27.warn({
|
|
11636
13015
|
storyKey,
|
|
11637
13016
|
taskType
|
|
11638
13017
|
}, "Failed to assemble fix prompt, using fallback");
|
|
@@ -11666,7 +13045,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11666
13045
|
} : void 0 }
|
|
11667
13046
|
});
|
|
11668
13047
|
if (fixResult.status === "timeout") {
|
|
11669
|
-
logger$
|
|
13048
|
+
logger$27.warn({
|
|
11670
13049
|
storyKey,
|
|
11671
13050
|
taskType
|
|
11672
13051
|
}, "Fix dispatch timed out — escalating story");
|
|
@@ -11688,7 +13067,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11688
13067
|
}
|
|
11689
13068
|
if (fixResult.status === "failed") {
|
|
11690
13069
|
if (isMajorRework) {
|
|
11691
|
-
logger$
|
|
13070
|
+
logger$27.warn({
|
|
11692
13071
|
storyKey,
|
|
11693
13072
|
exitCode: fixResult.exitCode
|
|
11694
13073
|
}, "Major rework dispatch failed — escalating story");
|
|
@@ -11708,14 +13087,14 @@ function createImplementationOrchestrator(deps) {
|
|
|
11708
13087
|
persistState();
|
|
11709
13088
|
return;
|
|
11710
13089
|
}
|
|
11711
|
-
logger$
|
|
13090
|
+
logger$27.warn({
|
|
11712
13091
|
storyKey,
|
|
11713
13092
|
taskType,
|
|
11714
13093
|
exitCode: fixResult.exitCode
|
|
11715
13094
|
}, "Fix dispatch failed");
|
|
11716
13095
|
}
|
|
11717
13096
|
} catch (err) {
|
|
11718
|
-
logger$
|
|
13097
|
+
logger$27.warn({
|
|
11719
13098
|
storyKey,
|
|
11720
13099
|
taskType,
|
|
11721
13100
|
err
|
|
@@ -11778,11 +13157,11 @@ function createImplementationOrchestrator(deps) {
|
|
|
11778
13157
|
}
|
|
11779
13158
|
async function run(storyKeys) {
|
|
11780
13159
|
if (_state === "RUNNING" || _state === "PAUSED") {
|
|
11781
|
-
logger$
|
|
13160
|
+
logger$27.warn({ state: _state }, "run() called while orchestrator is already running or paused — ignoring");
|
|
11782
13161
|
return getStatus();
|
|
11783
13162
|
}
|
|
11784
13163
|
if (_state === "COMPLETE") {
|
|
11785
|
-
logger$
|
|
13164
|
+
logger$27.warn({ state: _state }, "run() called on a COMPLETE orchestrator — ignoring");
|
|
11786
13165
|
return getStatus();
|
|
11787
13166
|
}
|
|
11788
13167
|
_state = "RUNNING";
|
|
@@ -11806,7 +13185,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11806
13185
|
const seedStart = Date.now();
|
|
11807
13186
|
const seedResult = seedMethodologyContext(db, projectRoot);
|
|
11808
13187
|
_startupTimings.seedMethodologyMs = Date.now() - seedStart;
|
|
11809
|
-
if (seedResult.decisionsCreated > 0) logger$
|
|
13188
|
+
if (seedResult.decisionsCreated > 0) logger$27.info({
|
|
11810
13189
|
decisionsCreated: seedResult.decisionsCreated,
|
|
11811
13190
|
skippedCategories: seedResult.skippedCategories,
|
|
11812
13191
|
durationMs: _startupTimings.seedMethodologyMs
|
|
@@ -11819,7 +13198,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11819
13198
|
_startupTimings.stateStoreInitMs = Date.now() - stateStoreInitStart;
|
|
11820
13199
|
for (const key of storyKeys) {
|
|
11821
13200
|
const pendingState = _stories.get(key);
|
|
11822
|
-
if (pendingState !== void 0) persistStoryState(key, pendingState).catch((err) => logger$
|
|
13201
|
+
if (pendingState !== void 0) persistStoryState(key, pendingState).catch((err) => logger$27.warn({
|
|
11823
13202
|
err,
|
|
11824
13203
|
storyKey: key
|
|
11825
13204
|
}, "StateStore write failed during PENDING init"));
|
|
@@ -11830,14 +13209,30 @@ function createImplementationOrchestrator(deps) {
|
|
|
11830
13209
|
_startupTimings.queryStoriesMs = Date.now() - queryStoriesStart;
|
|
11831
13210
|
for (const record of existingRecords) _stateStoreCache.set(record.storyKey, record);
|
|
11832
13211
|
} catch (err) {
|
|
11833
|
-
logger$
|
|
13212
|
+
logger$27.warn({ err }, "StateStore.queryStories() failed during init — status merge will be empty (best-effort)");
|
|
11834
13213
|
}
|
|
11835
13214
|
}
|
|
11836
13215
|
if (ingestionServer !== void 0) {
|
|
11837
|
-
|
|
13216
|
+
if (telemetryPersistence !== void 0) try {
|
|
13217
|
+
const pipelineLogger = logger$27;
|
|
13218
|
+
const telemetryPipeline = new TelemetryPipeline({
|
|
13219
|
+
normalizer: new TelemetryNormalizer(pipelineLogger),
|
|
13220
|
+
turnAnalyzer: new TurnAnalyzer(pipelineLogger),
|
|
13221
|
+
categorizer: new Categorizer(pipelineLogger),
|
|
13222
|
+
consumerAnalyzer: new ConsumerAnalyzer(new Categorizer(pipelineLogger), pipelineLogger),
|
|
13223
|
+
efficiencyScorer: new EfficiencyScorer(pipelineLogger),
|
|
13224
|
+
recommender: new Recommender(pipelineLogger),
|
|
13225
|
+
persistence: telemetryPersistence
|
|
13226
|
+
});
|
|
13227
|
+
ingestionServer.setPipeline(telemetryPipeline);
|
|
13228
|
+
logger$27.info("TelemetryPipeline wired to IngestionServer");
|
|
13229
|
+
} catch (pipelineErr) {
|
|
13230
|
+
logger$27.warn({ err: pipelineErr }, "Failed to create TelemetryPipeline — continuing without analysis pipeline");
|
|
13231
|
+
}
|
|
13232
|
+
await ingestionServer.start().catch((err) => logger$27.warn({ err }, "IngestionServer.start() failed — continuing without telemetry (best-effort)"));
|
|
11838
13233
|
try {
|
|
11839
13234
|
_otlpEndpoint = ingestionServer.getOtlpEnvVars().OTEL_EXPORTER_OTLP_ENDPOINT;
|
|
11840
|
-
logger$
|
|
13235
|
+
logger$27.info({ otlpEndpoint: _otlpEndpoint }, "OTLP telemetry ingestion active");
|
|
11841
13236
|
} catch {}
|
|
11842
13237
|
}
|
|
11843
13238
|
let contractDeclarations = [];
|
|
@@ -11877,11 +13272,11 @@ function createImplementationOrchestrator(deps) {
|
|
|
11877
13272
|
const conflictDetectStart = Date.now();
|
|
11878
13273
|
const { batches, edges: contractEdges } = detectConflictGroupsWithContracts(storyKeys, { moduleMap: pack.manifest.conflictGroups }, contractDeclarations);
|
|
11879
13274
|
_startupTimings.conflictDetectMs = Date.now() - conflictDetectStart;
|
|
11880
|
-
if (contractEdges.length > 0) logger$
|
|
13275
|
+
if (contractEdges.length > 0) logger$27.info({
|
|
11881
13276
|
contractEdges,
|
|
11882
13277
|
edgeCount: contractEdges.length
|
|
11883
13278
|
}, "Contract dependency edges detected — applying contract-aware dispatch ordering");
|
|
11884
|
-
logger$
|
|
13279
|
+
logger$27.info({
|
|
11885
13280
|
storyCount: storyKeys.length,
|
|
11886
13281
|
groupCount: batches.reduce((sum, b) => sum + b.length, 0),
|
|
11887
13282
|
batchCount: batches.length,
|
|
@@ -11903,7 +13298,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
11903
13298
|
exitCode,
|
|
11904
13299
|
output: truncatedOutput
|
|
11905
13300
|
});
|
|
11906
|
-
logger$
|
|
13301
|
+
logger$27.error({
|
|
11907
13302
|
exitCode,
|
|
11908
13303
|
reason: preFlightResult.reason
|
|
11909
13304
|
}, "Pre-flight build check failed — aborting pipeline before any story dispatch");
|
|
@@ -11912,9 +13307,9 @@ function createImplementationOrchestrator(deps) {
|
|
|
11912
13307
|
persistState();
|
|
11913
13308
|
return getStatus();
|
|
11914
13309
|
}
|
|
11915
|
-
if (preFlightResult.status !== "skipped") logger$
|
|
13310
|
+
if (preFlightResult.status !== "skipped") logger$27.info("Pre-flight build check passed");
|
|
11916
13311
|
}
|
|
11917
|
-
logger$
|
|
13312
|
+
logger$27.info(_startupTimings, "Orchestrator startup timings (ms)");
|
|
11918
13313
|
try {
|
|
11919
13314
|
for (const batchGroups of batches) await runWithConcurrency(batchGroups, config.maxConcurrency);
|
|
11920
13315
|
} catch (err) {
|
|
@@ -11922,14 +13317,22 @@ function createImplementationOrchestrator(deps) {
|
|
|
11922
13317
|
_state = "FAILED";
|
|
11923
13318
|
_completedAt = new Date().toISOString();
|
|
11924
13319
|
persistState();
|
|
11925
|
-
logger$
|
|
13320
|
+
logger$27.error({ err }, "Orchestrator failed with unhandled error");
|
|
11926
13321
|
return getStatus();
|
|
11927
13322
|
}
|
|
11928
13323
|
stopHeartbeat();
|
|
11929
13324
|
_state = "COMPLETE";
|
|
11930
13325
|
_completedAt = new Date().toISOString();
|
|
11931
13326
|
if (projectRoot !== void 0 && contractDeclarations.length > 0) try {
|
|
11932
|
-
const
|
|
13327
|
+
const totalDeclarations = contractDeclarations.length;
|
|
13328
|
+
const currentSprintDeclarations = contractDeclarations.filter((d) => storyKeys.includes(d.storyKey));
|
|
13329
|
+
const stalePruned = totalDeclarations - currentSprintDeclarations.length;
|
|
13330
|
+
if (stalePruned > 0) logger$27.info({
|
|
13331
|
+
stalePruned,
|
|
13332
|
+
remaining: currentSprintDeclarations.length
|
|
13333
|
+
}, "Pruned stale contract declarations from previous epics");
|
|
13334
|
+
let mismatches = [];
|
|
13335
|
+
if (currentSprintDeclarations.length > 0) mismatches = verifyContracts(currentSprintDeclarations, projectRoot);
|
|
11933
13336
|
if (mismatches.length > 0) {
|
|
11934
13337
|
_contractMismatches = mismatches;
|
|
11935
13338
|
for (const mismatch of mismatches) eventBus.emit("pipeline:contract-mismatch", {
|
|
@@ -11938,16 +13341,22 @@ function createImplementationOrchestrator(deps) {
|
|
|
11938
13341
|
contractName: mismatch.contractName,
|
|
11939
13342
|
mismatchDescription: mismatch.mismatchDescription
|
|
11940
13343
|
});
|
|
11941
|
-
logger$
|
|
13344
|
+
logger$27.warn({
|
|
11942
13345
|
mismatchCount: mismatches.length,
|
|
11943
13346
|
mismatches
|
|
11944
13347
|
}, "Post-sprint contract verification found mismatches — manual review required");
|
|
11945
|
-
} else logger$
|
|
13348
|
+
} else if (currentSprintDeclarations.length > 0) logger$27.info("Post-sprint contract verification passed — all declared contracts satisfied");
|
|
13349
|
+
eventBus.emit("pipeline:contract-verification-summary", {
|
|
13350
|
+
verified: currentSprintDeclarations.length,
|
|
13351
|
+
stalePruned,
|
|
13352
|
+
mismatches: mismatches.length,
|
|
13353
|
+
verdict: mismatches.length === 0 ? "pass" : "fail"
|
|
13354
|
+
});
|
|
11946
13355
|
if (stateStore !== void 0) try {
|
|
11947
|
-
const
|
|
13356
|
+
const currentSprintContracts = (await stateStore.queryContracts()).filter((cr) => storyKeys.includes(cr.storyKey));
|
|
11948
13357
|
const verifiedAt = new Date().toISOString();
|
|
11949
13358
|
const contractsByStory = new Map();
|
|
11950
|
-
for (const cr of
|
|
13359
|
+
for (const cr of currentSprintContracts) {
|
|
11951
13360
|
const existing = contractsByStory.get(cr.storyKey) ?? [];
|
|
11952
13361
|
existing.push(cr);
|
|
11953
13362
|
contractsByStory.set(cr.storyKey, existing);
|
|
@@ -11971,12 +13380,12 @@ function createImplementationOrchestrator(deps) {
|
|
|
11971
13380
|
});
|
|
11972
13381
|
await stateStore.setContractVerification(sk, records);
|
|
11973
13382
|
}
|
|
11974
|
-
logger$
|
|
13383
|
+
logger$27.info({ storyCount: contractsByStory.size }, "Contract verification results persisted to StateStore");
|
|
11975
13384
|
} catch (persistErr) {
|
|
11976
|
-
logger$
|
|
13385
|
+
logger$27.warn({ err: persistErr }, "Failed to persist contract verification results to StateStore");
|
|
11977
13386
|
}
|
|
11978
13387
|
} catch (err) {
|
|
11979
|
-
logger$
|
|
13388
|
+
logger$27.error({ err }, "Post-sprint contract verification threw an error — skipping");
|
|
11980
13389
|
}
|
|
11981
13390
|
let completed = 0;
|
|
11982
13391
|
let escalated = 0;
|
|
@@ -11993,8 +13402,8 @@ function createImplementationOrchestrator(deps) {
|
|
|
11993
13402
|
persistState();
|
|
11994
13403
|
return getStatus();
|
|
11995
13404
|
} finally {
|
|
11996
|
-
if (stateStore !== void 0) await stateStore.close().catch((err) => logger$
|
|
11997
|
-
if (ingestionServer !== void 0) await ingestionServer.stop().catch((err) => logger$
|
|
13405
|
+
if (stateStore !== void 0) await stateStore.close().catch((err) => logger$27.warn({ err }, "StateStore.close() failed (best-effort)"));
|
|
13406
|
+
if (ingestionServer !== void 0) await ingestionServer.stop().catch((err) => logger$27.warn({ err }, "IngestionServer.stop() failed (best-effort)"));
|
|
11998
13407
|
}
|
|
11999
13408
|
}
|
|
12000
13409
|
function pause() {
|
|
@@ -12003,7 +13412,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12003
13412
|
_pauseGate = createPauseGate();
|
|
12004
13413
|
_state = "PAUSED";
|
|
12005
13414
|
eventBus.emit("orchestrator:paused", {});
|
|
12006
|
-
logger$
|
|
13415
|
+
logger$27.info("Orchestrator paused");
|
|
12007
13416
|
}
|
|
12008
13417
|
function resume() {
|
|
12009
13418
|
if (_state !== "PAUSED") return;
|
|
@@ -12014,7 +13423,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
12014
13423
|
}
|
|
12015
13424
|
_state = "RUNNING";
|
|
12016
13425
|
eventBus.emit("orchestrator:resumed", {});
|
|
12017
|
-
logger$
|
|
13426
|
+
logger$27.info("Orchestrator resumed");
|
|
12018
13427
|
}
|
|
12019
13428
|
return {
|
|
12020
13429
|
run,
|
|
@@ -16359,7 +17768,11 @@ async function runRunAction(options) {
|
|
|
16359
17768
|
...skipResearchFlag === true ? { skipResearch: true } : {},
|
|
16360
17769
|
...skipPreflight === true ? { skipPreflight: true } : {},
|
|
16361
17770
|
...epicNumber !== void 0 ? { epic: epicNumber } : {},
|
|
16362
|
-
...injectedRegistry !== void 0 ? { registry: injectedRegistry } : {}
|
|
17771
|
+
...injectedRegistry !== void 0 ? { registry: injectedRegistry } : {},
|
|
17772
|
+
...telemetryEnabled ? {
|
|
17773
|
+
telemetryEnabled: true,
|
|
17774
|
+
telemetryPort
|
|
17775
|
+
} : {}
|
|
16363
17776
|
});
|
|
16364
17777
|
let storyKeys = [...parsedStoryKeys];
|
|
16365
17778
|
if (!existsSync(dbDir)) mkdirSync(dbDir, { recursive: true });
|
|
@@ -16760,8 +18173,19 @@ async function runRunAction(options) {
|
|
|
16760
18173
|
mismatchDescription: payload.mismatchDescription
|
|
16761
18174
|
});
|
|
16762
18175
|
});
|
|
18176
|
+
eventBus.on("pipeline:contract-verification-summary", (payload) => {
|
|
18177
|
+
ndjsonEmitter.emit({
|
|
18178
|
+
type: "pipeline:contract-verification-summary",
|
|
18179
|
+
ts: new Date().toISOString(),
|
|
18180
|
+
verified: payload.verified,
|
|
18181
|
+
stalePruned: payload.stalePruned,
|
|
18182
|
+
mismatches: payload.mismatches,
|
|
18183
|
+
verdict: payload.verdict
|
|
18184
|
+
});
|
|
18185
|
+
});
|
|
16763
18186
|
}
|
|
16764
18187
|
const ingestionServer = telemetryEnabled ? new IngestionServer({ port: telemetryPort }) : void 0;
|
|
18188
|
+
const telemetryPersistence = telemetryEnabled ? new TelemetryPersistence(db) : void 0;
|
|
16765
18189
|
const orchestrator = createImplementationOrchestrator({
|
|
16766
18190
|
db,
|
|
16767
18191
|
pack,
|
|
@@ -16777,7 +18201,8 @@ async function runRunAction(options) {
|
|
|
16777
18201
|
},
|
|
16778
18202
|
projectRoot,
|
|
16779
18203
|
tokenCeilings,
|
|
16780
|
-
...ingestionServer !== void 0 ? { ingestionServer } : {}
|
|
18204
|
+
...ingestionServer !== void 0 ? { ingestionServer } : {},
|
|
18205
|
+
...telemetryPersistence !== void 0 ? { telemetryPersistence } : {}
|
|
16781
18206
|
});
|
|
16782
18207
|
if (outputFormat === "human" && progressRenderer === void 0 && ndjsonEmitter === void 0) {
|
|
16783
18208
|
process.stdout.write(`Starting pipeline: ${storyKeys.length} story/stories, concurrency=${concurrency}\n`);
|
|
@@ -16873,7 +18298,7 @@ async function runRunAction(options) {
|
|
|
16873
18298
|
}
|
|
16874
18299
|
}
|
|
16875
18300
|
async function runFullPipeline(options) {
|
|
16876
|
-
const { packName, packPath, dbDir, dbPath, startPhase, stopAfter, concept, concurrency, outputFormat, projectRoot, events: eventsFlag, skipUx, research: researchFlag, skipResearch: skipResearchFlag, skipPreflight, registry: injectedRegistry, tokenCeilings, stories: explicitStories } = options;
|
|
18301
|
+
const { packName, packPath, dbDir, dbPath, startPhase, stopAfter, concept, concurrency, outputFormat, projectRoot, events: eventsFlag, skipUx, research: researchFlag, skipResearch: skipResearchFlag, skipPreflight, registry: injectedRegistry, tokenCeilings, stories: explicitStories, telemetryEnabled: fullTelemetryEnabled, telemetryPort: fullTelemetryPort } = options;
|
|
16877
18302
|
if (!existsSync(dbDir)) mkdirSync(dbDir, { recursive: true });
|
|
16878
18303
|
const dbWrapper = new DatabaseWrapper(dbPath);
|
|
16879
18304
|
try {
|
|
@@ -17069,6 +18494,8 @@ async function runFullPipeline(options) {
|
|
|
17069
18494
|
process.stdout.write(` Tokens: ${result.tokenUsage.input.toLocaleString()} input / ${result.tokenUsage.output.toLocaleString()} output\n`);
|
|
17070
18495
|
}
|
|
17071
18496
|
} else if (currentPhase === "implementation") {
|
|
18497
|
+
const fpIngestionServer = fullTelemetryEnabled ? new IngestionServer({ port: fullTelemetryPort ?? 4318 }) : void 0;
|
|
18498
|
+
const fpTelemetryPersistence = fullTelemetryEnabled ? new TelemetryPersistence(db) : void 0;
|
|
17072
18499
|
const orchestrator = createImplementationOrchestrator({
|
|
17073
18500
|
db,
|
|
17074
18501
|
pack,
|
|
@@ -17082,7 +18509,9 @@ async function runFullPipeline(options) {
|
|
|
17082
18509
|
skipPreflight: skipPreflight === true
|
|
17083
18510
|
},
|
|
17084
18511
|
projectRoot,
|
|
17085
|
-
tokenCeilings
|
|
18512
|
+
tokenCeilings,
|
|
18513
|
+
...fpIngestionServer !== void 0 ? { ingestionServer: fpIngestionServer } : {},
|
|
18514
|
+
...fpTelemetryPersistence !== void 0 ? { telemetryPersistence: fpTelemetryPersistence } : {}
|
|
17086
18515
|
});
|
|
17087
18516
|
eventBus.on("orchestrator:story-phase-complete", (payload) => {
|
|
17088
18517
|
try {
|
|
@@ -17221,4 +18650,4 @@ function registerRunCommand(program, _version = "0.0.0", projectRoot = process.c
|
|
|
17221
18650
|
|
|
17222
18651
|
//#endregion
|
|
17223
18652
|
export { DEFAULT_CONFIG, DEFAULT_ROUTING_POLICY, DatabaseWrapper, DoltNotInstalled, FileStateStore, SUBSTRATE_OWNED_SETTINGS_KEYS, TelemetryPersistence, VALID_PHASES, buildPipelineStatusOutput, checkDoltInstalled, createConfigSystem, createContextCompiler, createDispatcher, createDoltClient, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStateStore, createStopAfterGate, findPackageRoot, formatOutput, formatPhaseCompletionSummary, formatPipelineStatusHuman, formatPipelineSummary, formatTokenTelemetry, getAllDescendantPids, getAutoHealthData, getSubstrateDefaultSettings, initializeDolt, parseDbTimestampAsUtc, registerHealthCommand, registerRunCommand, resolveBmadMethodSrcPath, resolveBmadMethodVersion, resolveMainRepoRoot, resolveStoryKeys, runAnalysisPhase, runMigrations, runPlanningPhase, runRunAction, runSolutioningPhase, validateStopAfterFromConflict };
|
|
17224
|
-
//# sourceMappingURL=run-
|
|
18653
|
+
//# sourceMappingURL=run-DzzmgEOd.js.map
|