substrate-ai 0.6.8 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli/index.js +33 -16
- package/dist/{config-migrator-DtZW1maj.js → config-migrator-CtGelIsG.js} +1 -1
- package/dist/{decisions-BxYj_a1X.js → decisions-BDLp3tJB.js} +1 -1
- package/dist/{decisions-C6MF2Cax.js → decisions-DhAA2HG2.js} +1 -1
- package/dist/{experimenter-CjfzjmwY.js → experimenter-D_N_7ZF3.js} +4 -4
- package/dist/{git-utils-C-fdrHF_.js → git-utils-DxPx6erV.js} +1 -1
- package/dist/health-4fyhDU6T.js +5 -0
- package/dist/health-Dnx-FGva.js +3247 -0
- package/dist/{helpers-BihqWgVe.js → helpers-CpMs8VZX.js} +1 -1
- package/dist/index.js +1 -1
- package/dist/{operational-BRpT8MYF.js → operational-BdcdmDqS.js} +1 -1
- package/dist/{routing-CobBiKeV.js → routing-BVrxrM6v.js} +1 -1
- package/dist/{routing-CpsRPjLE.js → routing-CD8bIci_.js} +1 -1
- package/dist/run-BdqqWU9p.js +10 -0
- package/dist/{run-tB6yG6o8.js → run-CfF0-tVP.js} +521 -3424
- package/dist/schema.sql +2 -0
- package/dist/{upgrade-C8_VcI8B.js → upgrade-B1S61VXJ.js} +2 -2
- package/dist/{upgrade-njy4XENS.js → upgrade-BK0HrKA6.js} +3 -3
- package/dist/{version-manager-impl-DTlmGvHb.js → version-manager-impl-BIxOe7gZ.js} +2 -2
- package/dist/version-manager-impl-RrWs-CI6.js +4 -0
- package/package.json +1 -1
- package/packs/bmad/prompts/build-fix.md +54 -0
- package/packs/bmad/prompts/dev-story.md +2 -0
- package/dist/run-ME16n4N_.js +0 -9
- package/dist/version-manager-impl-QwroczYS.js +0 -4
|
@@ -0,0 +1,3247 @@
|
|
|
1
|
+
import { createLogger } from "./logger-D2fS2ccL.js";
|
|
2
|
+
import { getLatestRun, getPipelineRunById } from "./decisions-DhAA2HG2.js";
|
|
3
|
+
import { createRequire } from "module";
|
|
4
|
+
import { dirname, join } from "path";
|
|
5
|
+
import { existsSync } from "fs";
|
|
6
|
+
import { createRequire as createRequire$1 } from "node:module";
|
|
7
|
+
import { execFile, spawn, spawnSync } from "node:child_process";
|
|
8
|
+
import { dirname as dirname$1, join as join$1, resolve as resolve$1 } from "node:path";
|
|
9
|
+
import { existsSync as existsSync$1, readFileSync as readFileSync$1 } from "node:fs";
|
|
10
|
+
import { access, mkdir, readFile, writeFile } from "node:fs/promises";
|
|
11
|
+
import { fileURLToPath } from "node:url";
|
|
12
|
+
|
|
13
|
+
//#region rolldown:runtime
|
|
14
|
+
var __create = Object.create;
|
|
15
|
+
var __defProp = Object.defineProperty;
|
|
16
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
17
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
18
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
19
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
20
|
+
var __commonJS = (cb, mod) => function() {
|
|
21
|
+
return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports;
|
|
22
|
+
};
|
|
23
|
+
var __copyProps = (to, from, except, desc) => {
|
|
24
|
+
if (from && typeof from === "object" || typeof from === "function") for (var keys = __getOwnPropNames(from), i = 0, n = keys.length, key; i < n; i++) {
|
|
25
|
+
key = keys[i];
|
|
26
|
+
if (!__hasOwnProp.call(to, key) && key !== except) __defProp(to, key, {
|
|
27
|
+
get: ((k) => from[k]).bind(null, key),
|
|
28
|
+
enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable
|
|
29
|
+
});
|
|
30
|
+
}
|
|
31
|
+
return to;
|
|
32
|
+
};
|
|
33
|
+
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", {
|
|
34
|
+
value: mod,
|
|
35
|
+
enumerable: true
|
|
36
|
+
}) : target, mod));
|
|
37
|
+
var __require = /* @__PURE__ */ createRequire(import.meta.url);
|
|
38
|
+
|
|
39
|
+
//#endregion
|
|
40
|
+
//#region src/utils/git-root.ts
|
|
41
|
+
/**
|
|
42
|
+
* Resolve the main git repository root, even from a linked worktree.
|
|
43
|
+
*
|
|
44
|
+
* In the main worktree, `--git-common-dir` returns `.git` (relative).
|
|
45
|
+
* In a linked worktree, it returns the absolute path to the main `.git` dir.
|
|
46
|
+
* Either way, `dirname()` of the resolved absolute path yields the repo root.
|
|
47
|
+
*
|
|
48
|
+
* Falls back to `cwd` if not in a git repo or git is unavailable.
|
|
49
|
+
*/
|
|
50
|
+
async function resolveMainRepoRoot(cwd = process.cwd()) {
|
|
51
|
+
return new Promise((res) => {
|
|
52
|
+
let stdout = "";
|
|
53
|
+
const proc = spawn("git", ["rev-parse", "--git-common-dir"], {
|
|
54
|
+
cwd,
|
|
55
|
+
stdio: [
|
|
56
|
+
"ignore",
|
|
57
|
+
"pipe",
|
|
58
|
+
"pipe"
|
|
59
|
+
]
|
|
60
|
+
});
|
|
61
|
+
if (proc.stdout !== null) proc.stdout.on("data", (chunk) => {
|
|
62
|
+
stdout += chunk.toString("utf-8");
|
|
63
|
+
});
|
|
64
|
+
proc.on("error", () => {
|
|
65
|
+
res(cwd);
|
|
66
|
+
});
|
|
67
|
+
proc.on("close", (code) => {
|
|
68
|
+
if (code !== 0) {
|
|
69
|
+
res(cwd);
|
|
70
|
+
return;
|
|
71
|
+
}
|
|
72
|
+
const commonDir = stdout.trim();
|
|
73
|
+
if (!commonDir) {
|
|
74
|
+
res(cwd);
|
|
75
|
+
return;
|
|
76
|
+
}
|
|
77
|
+
const absCommonDir = resolve$1(cwd, commonDir);
|
|
78
|
+
res(dirname$1(absCommonDir));
|
|
79
|
+
});
|
|
80
|
+
});
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
//#endregion
|
|
84
|
+
//#region src/persistence/dolt-adapter.ts
|
|
85
|
+
var DoltDatabaseAdapter = class {
|
|
86
|
+
_client;
|
|
87
|
+
/**
|
|
88
|
+
* Create a DoltDatabaseAdapter wrapping the supplied DoltClient.
|
|
89
|
+
* The caller should construct the client with the correct `repoPath`
|
|
90
|
+
* before passing it here; `connect()` is called lazily by DoltClient.
|
|
91
|
+
*/
|
|
92
|
+
constructor(client) {
|
|
93
|
+
this._client = client;
|
|
94
|
+
}
|
|
95
|
+
/**
|
|
96
|
+
* Execute a SQL query and return all result rows as typed objects.
|
|
97
|
+
*
|
|
98
|
+
* Delegates to `DoltClient.query<T>()` which supports both mysql2
|
|
99
|
+
* pool mode and CLI fallback.
|
|
100
|
+
*/
|
|
101
|
+
async query(sql, params) {
|
|
102
|
+
return this._client.query(sql, params);
|
|
103
|
+
}
|
|
104
|
+
/**
|
|
105
|
+
* Execute a SQL statement with no return value (DDL or DML).
|
|
106
|
+
*
|
|
107
|
+
* Delegates to `DoltClient.query()` and discards the result rows.
|
|
108
|
+
*/
|
|
109
|
+
async exec(sql) {
|
|
110
|
+
await this._client.query(sql, void 0);
|
|
111
|
+
}
|
|
112
|
+
/**
|
|
113
|
+
* Execute a function within an explicit SQL transaction.
|
|
114
|
+
*
|
|
115
|
+
* Issues `BEGIN` before the function and `COMMIT` on success or
|
|
116
|
+
* `ROLLBACK` on error. Works in both mysql2 pool mode (where
|
|
117
|
+
* transactions are natively supported) and CLI mode (where Dolt
|
|
118
|
+
* supports multi-statement sessions via CALL DOLT_CHECKOUT).
|
|
119
|
+
*/
|
|
120
|
+
async transaction(fn) {
|
|
121
|
+
await this._client.query("BEGIN", void 0);
|
|
122
|
+
try {
|
|
123
|
+
const result = await fn(this);
|
|
124
|
+
await this._client.query("COMMIT", void 0);
|
|
125
|
+
return result;
|
|
126
|
+
} catch (err) {
|
|
127
|
+
await this._client.query("ROLLBACK", void 0);
|
|
128
|
+
throw err;
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
/**
|
|
132
|
+
* Close the underlying DoltClient connection pool.
|
|
133
|
+
*/
|
|
134
|
+
async close() {
|
|
135
|
+
await this._client.close();
|
|
136
|
+
}
|
|
137
|
+
/**
|
|
138
|
+
* Query story keys from the `ready_stories` SQL view.
|
|
139
|
+
*
|
|
140
|
+
* Returns story keys whose status is `planned` or `ready` and whose
|
|
141
|
+
* hard dependencies are all `complete` in the work graph.
|
|
142
|
+
*
|
|
143
|
+
* On any SQL error (e.g., view not yet created by story 31-1 schema,
|
|
144
|
+
* or empty stories table), returns `[]` so the caller falls through to
|
|
145
|
+
* the legacy discovery chain.
|
|
146
|
+
*/
|
|
147
|
+
async queryReadyStories() {
|
|
148
|
+
try {
|
|
149
|
+
const rows = await this._client.query("SELECT `key` FROM ready_stories ORDER BY `key` ASC", void 0);
|
|
150
|
+
return rows.map((r) => r.key);
|
|
151
|
+
} catch {
|
|
152
|
+
return [];
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
};
|
|
156
|
+
|
|
157
|
+
//#endregion
|
|
158
|
+
//#region src/persistence/memory-adapter.ts
|
|
159
|
+
var InMemoryDatabaseAdapter = class {
|
|
160
|
+
_tables = new Map();
|
|
161
|
+
async query(sql, params) {
|
|
162
|
+
const rows = this._execute(sql.trim(), params);
|
|
163
|
+
return rows;
|
|
164
|
+
}
|
|
165
|
+
async exec(sql) {
|
|
166
|
+
this._execute(sql.trim(), void 0);
|
|
167
|
+
}
|
|
168
|
+
async transaction(fn) {
|
|
169
|
+
const snapshot = new Map();
|
|
170
|
+
for (const [name, rows] of this._tables) snapshot.set(name, rows.map((r) => ({ ...r })));
|
|
171
|
+
try {
|
|
172
|
+
const result = await fn(this);
|
|
173
|
+
return result;
|
|
174
|
+
} catch (err) {
|
|
175
|
+
this._tables = snapshot;
|
|
176
|
+
throw err;
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
async close() {
|
|
180
|
+
this._tables.clear();
|
|
181
|
+
}
|
|
182
|
+
/**
|
|
183
|
+
* Work graph not supported in InMemoryDatabaseAdapter.
|
|
184
|
+
* Returns `[]` to signal the caller to use the legacy discovery path.
|
|
185
|
+
*/
|
|
186
|
+
async queryReadyStories() {
|
|
187
|
+
return [];
|
|
188
|
+
}
|
|
189
|
+
_execute(sql, params) {
|
|
190
|
+
const resolved = this._substituteParams(sql, params);
|
|
191
|
+
const upper = resolved.trimStart().toUpperCase();
|
|
192
|
+
if (/^CREATE\s+TABLE/i.test(upper)) return this._createTable(resolved);
|
|
193
|
+
if (/^DROP\s+TABLE/i.test(upper)) return this._dropTable(resolved);
|
|
194
|
+
if (/^CREATE\s+(?:OR\s+REPLACE\s+)?VIEW/i.test(upper)) return [];
|
|
195
|
+
if (/^INSERT\s+(?:IGNORE\s+)?INTO/i.test(upper)) return this._insert(resolved, /^INSERT\s+IGNORE\s+INTO/i.test(upper));
|
|
196
|
+
if (/^SELECT/i.test(upper)) return this._select(resolved);
|
|
197
|
+
if (/^UPDATE/i.test(upper)) return this._update(resolved);
|
|
198
|
+
if (/^DELETE\s+FROM/i.test(upper)) return this._delete(resolved);
|
|
199
|
+
return [];
|
|
200
|
+
}
|
|
201
|
+
/**
|
|
202
|
+
* Replace each `?` placeholder with an escaped literal value.
|
|
203
|
+
*/
|
|
204
|
+
_substituteParams(sql, params) {
|
|
205
|
+
if (!params || params.length === 0) return sql;
|
|
206
|
+
let idx = 0;
|
|
207
|
+
return sql.replace(/\?/g, () => {
|
|
208
|
+
const val = params[idx++];
|
|
209
|
+
if (val === null || val === void 0) return "NULL";
|
|
210
|
+
if (typeof val === "number") return String(val);
|
|
211
|
+
if (typeof val === "boolean") return val ? "1" : "0";
|
|
212
|
+
return `'${String(val).replace(/'/g, "''")}'`;
|
|
213
|
+
});
|
|
214
|
+
}
|
|
215
|
+
_createTable(sql) {
|
|
216
|
+
const m = /CREATE\s+TABLE\s+(?:IF\s+NOT\s+EXISTS\s+)?(\w+)/i.exec(sql);
|
|
217
|
+
if (m) {
|
|
218
|
+
const name = m[1];
|
|
219
|
+
if (!this._tables.has(name)) this._tables.set(name, []);
|
|
220
|
+
}
|
|
221
|
+
return [];
|
|
222
|
+
}
|
|
223
|
+
_dropTable(sql) {
|
|
224
|
+
const m = /DROP\s+TABLE\s+(?:IF\s+EXISTS\s+)?(\w+)/i.exec(sql);
|
|
225
|
+
if (m) this._tables.delete(m[1]);
|
|
226
|
+
return [];
|
|
227
|
+
}
|
|
228
|
+
_insert(sql, _ignoreConflicts = false) {
|
|
229
|
+
const m = /INSERT\s+(?:IGNORE\s+)?INTO\s+(\w+)\s*\(([^)]+)\)\s*VALUES\s*\((.+)\)\s*$/is.exec(sql);
|
|
230
|
+
if (!m) return [];
|
|
231
|
+
const tableName = m[1];
|
|
232
|
+
const cols = m[2].split(",").map((c) => c.trim());
|
|
233
|
+
const valStr = m[3];
|
|
234
|
+
const vals = this._parseValueList(valStr);
|
|
235
|
+
const row = {};
|
|
236
|
+
for (let i = 0; i < cols.length; i++) row[cols[i]] = vals[i] ?? null;
|
|
237
|
+
if (!this._tables.has(tableName)) this._tables.set(tableName, []);
|
|
238
|
+
this._tables.get(tableName).push(row);
|
|
239
|
+
return [];
|
|
240
|
+
}
|
|
241
|
+
_select(sql) {
|
|
242
|
+
if (!/FROM/i.test(sql)) {
|
|
243
|
+
const m$1 = /SELECT\s+(.+)$/is.exec(sql);
|
|
244
|
+
if (!m$1) return [];
|
|
245
|
+
return [this._evalSelectExprs(m$1[1].trim())];
|
|
246
|
+
}
|
|
247
|
+
const stripped = sql.replace(/\s+ORDER\s+BY\s+.+?(?=\s+LIMIT\s|\s*$)/is, "").replace(/\s+LIMIT\s+\d+\s*$/is, "");
|
|
248
|
+
const m = /SELECT\s+(.+?)\s+FROM\s+(\w+)(?:\s+WHERE\s+(.+))?$/is.exec(stripped);
|
|
249
|
+
if (!m) return [];
|
|
250
|
+
const colsStr = m[1].trim();
|
|
251
|
+
const tableName = m[2];
|
|
252
|
+
const whereStr = m[3];
|
|
253
|
+
const table = this._tables.get(tableName) ?? [];
|
|
254
|
+
let rows = table.map((r) => ({ ...r }));
|
|
255
|
+
if (whereStr) rows = rows.filter((row) => this._matchWhere(whereStr.trim(), row));
|
|
256
|
+
if (colsStr === "*") return rows;
|
|
257
|
+
if (/\b(?:SUM|COALESCE|COUNT|AVG|MIN|MAX)\s*\(/i.test(colsStr)) return [this._evalAggregate(colsStr, rows)];
|
|
258
|
+
return rows.map((row) => this._projectCols(colsStr, row));
|
|
259
|
+
}
|
|
260
|
+
_update(sql) {
|
|
261
|
+
const m = /UPDATE\s+(\w+)\s+SET\s+(.+?)(?:\s+WHERE\s+(.+))?$/is.exec(sql);
|
|
262
|
+
if (!m) return [];
|
|
263
|
+
const tableName = m[1];
|
|
264
|
+
const setStr = m[2];
|
|
265
|
+
const whereStr = m[3];
|
|
266
|
+
const table = this._tables.get(tableName);
|
|
267
|
+
if (!table) return [];
|
|
268
|
+
const assignments = this._parseAssignments(setStr);
|
|
269
|
+
for (const row of table) if (!whereStr || this._matchWhere(whereStr.trim(), row)) for (const [col, val] of assignments) row[col] = val;
|
|
270
|
+
return [];
|
|
271
|
+
}
|
|
272
|
+
_delete(sql) {
|
|
273
|
+
const m = /DELETE\s+FROM\s+(\w+)(?:\s+WHERE\s+(.+))?$/is.exec(sql);
|
|
274
|
+
if (!m) return [];
|
|
275
|
+
const tableName = m[1];
|
|
276
|
+
const whereStr = m[2];
|
|
277
|
+
const table = this._tables.get(tableName);
|
|
278
|
+
if (!table) return [];
|
|
279
|
+
if (!whereStr) {
|
|
280
|
+
this._tables.set(tableName, []);
|
|
281
|
+
return [];
|
|
282
|
+
}
|
|
283
|
+
const kept = table.filter((row) => !this._matchWhere(whereStr.trim(), row));
|
|
284
|
+
this._tables.set(tableName, kept);
|
|
285
|
+
return [];
|
|
286
|
+
}
|
|
287
|
+
/**
|
|
288
|
+
* Evaluate a simple WHERE clause against a row.
|
|
289
|
+
* Supports: `col = val` conditions joined by AND.
|
|
290
|
+
*/
|
|
291
|
+
_matchWhere(whereClause, row) {
|
|
292
|
+
const conditions = whereClause.split(/\s+AND\s+/i);
|
|
293
|
+
for (const condition of conditions) {
|
|
294
|
+
const trimmed = condition.trim();
|
|
295
|
+
const strM = /^(\w+)\s*=\s*'(.*)'$/is.exec(trimmed);
|
|
296
|
+
if (strM) {
|
|
297
|
+
const colVal = String(row[strM[1]] ?? "");
|
|
298
|
+
const literal = strM[2].replace(/''/g, "'");
|
|
299
|
+
if (colVal !== literal) return false;
|
|
300
|
+
continue;
|
|
301
|
+
}
|
|
302
|
+
const numM = /^(\w+)\s*=\s*(-?\d+(?:\.\d+)?)$/.exec(trimmed);
|
|
303
|
+
if (numM) {
|
|
304
|
+
if (Number(row[numM[1]]) !== parseFloat(numM[2])) return false;
|
|
305
|
+
continue;
|
|
306
|
+
}
|
|
307
|
+
const nullM = /^(\w+)\s+IS\s+NULL$/i.exec(trimmed);
|
|
308
|
+
if (nullM) {
|
|
309
|
+
if (row[nullM[1]] !== null && row[nullM[1]] !== void 0) return false;
|
|
310
|
+
continue;
|
|
311
|
+
}
|
|
312
|
+
const notNullM = /^(\w+)\s+IS\s+NOT\s+NULL$/i.exec(trimmed);
|
|
313
|
+
if (notNullM) {
|
|
314
|
+
if (row[notNullM[1]] === null || row[notNullM[1]] === void 0) return false;
|
|
315
|
+
continue;
|
|
316
|
+
}
|
|
317
|
+
const likeM = /^(\w+)\s+LIKE\s+'(.*)'$/is.exec(trimmed);
|
|
318
|
+
if (likeM) {
|
|
319
|
+
const colVal = row[likeM[1]];
|
|
320
|
+
if (colVal === null || colVal === void 0) return false;
|
|
321
|
+
const pattern = likeM[2].replace(/''/g, "'");
|
|
322
|
+
const escaped = pattern.replace(/[.*+?^${}()|[\]\\]/g, (ch) => ch === "%" || ch === "_" ? ch : "\\" + ch);
|
|
323
|
+
const regex = new RegExp("^" + escaped.replace(/%/g, ".*").replace(/_/g, ".") + "$", "s");
|
|
324
|
+
if (!regex.test(String(colVal))) return false;
|
|
325
|
+
continue;
|
|
326
|
+
}
|
|
327
|
+
}
|
|
328
|
+
return true;
|
|
329
|
+
}
|
|
330
|
+
_projectCols(colsStr, row) {
|
|
331
|
+
const result = {};
|
|
332
|
+
const cols = this._splitTopLevelCommas(colsStr);
|
|
333
|
+
for (const col of cols) {
|
|
334
|
+
const aliasM = /^(.+?)\s+AS\s+(\w+)$/i.exec(col);
|
|
335
|
+
if (aliasM) result[aliasM[2]] = this._evalExprAgainstRow(aliasM[1].trim(), row);
|
|
336
|
+
else result[col] = row[col];
|
|
337
|
+
}
|
|
338
|
+
return result;
|
|
339
|
+
}
|
|
340
|
+
_evalSelectExprs(exprs) {
|
|
341
|
+
const result = {};
|
|
342
|
+
const parts = this._splitTopLevelCommas(exprs);
|
|
343
|
+
for (const part of parts) {
|
|
344
|
+
const aliasM = /^(.+?)\s+AS\s+(\w+)$/i.exec(part);
|
|
345
|
+
if (aliasM) result[aliasM[2]] = this._evalLiteral(aliasM[1].trim());
|
|
346
|
+
else result[part] = this._evalLiteral(part);
|
|
347
|
+
}
|
|
348
|
+
return result;
|
|
349
|
+
}
|
|
350
|
+
_evalLiteral(expr) {
|
|
351
|
+
const trimmed = expr.trim();
|
|
352
|
+
if (trimmed.toUpperCase() === "NULL") return null;
|
|
353
|
+
if (/^'.*'$/.test(trimmed)) return trimmed.slice(1, -1).replace(/''/g, "'");
|
|
354
|
+
if (/^-?\d+$/.test(trimmed)) return parseInt(trimmed, 10);
|
|
355
|
+
if (/^-?\d+\.\d+$/.test(trimmed)) return parseFloat(trimmed);
|
|
356
|
+
return trimmed;
|
|
357
|
+
}
|
|
358
|
+
_evalExprAgainstRow(expr, row) {
|
|
359
|
+
const literal = this._evalLiteral(expr);
|
|
360
|
+
if (typeof literal !== "string") return literal;
|
|
361
|
+
if (/^\w+$/.test(expr) && expr in row) return row[expr];
|
|
362
|
+
return literal;
|
|
363
|
+
}
|
|
364
|
+
/**
|
|
365
|
+
* Split a string by commas that are NOT inside parentheses.
|
|
366
|
+
* E.g. "COALESCE(SUM(x), 0) as a, y" → ["COALESCE(SUM(x), 0) as a", "y"]
|
|
367
|
+
*/
|
|
368
|
+
_splitTopLevelCommas(str) {
|
|
369
|
+
const parts = [];
|
|
370
|
+
let current = "";
|
|
371
|
+
let depth = 0;
|
|
372
|
+
let inStr = false;
|
|
373
|
+
for (let i = 0; i < str.length; i++) {
|
|
374
|
+
const ch = str[i];
|
|
375
|
+
if (ch === "'" && !inStr) {
|
|
376
|
+
inStr = true;
|
|
377
|
+
current += ch;
|
|
378
|
+
} else if (ch === "'" && inStr) if (str[i + 1] === "'") {
|
|
379
|
+
current += "''";
|
|
380
|
+
i++;
|
|
381
|
+
} else {
|
|
382
|
+
inStr = false;
|
|
383
|
+
current += ch;
|
|
384
|
+
}
|
|
385
|
+
else if (!inStr && ch === "(") {
|
|
386
|
+
depth++;
|
|
387
|
+
current += ch;
|
|
388
|
+
} else if (!inStr && ch === ")") {
|
|
389
|
+
depth--;
|
|
390
|
+
current += ch;
|
|
391
|
+
} else if (!inStr && ch === "," && depth === 0) {
|
|
392
|
+
parts.push(current.trim());
|
|
393
|
+
current = "";
|
|
394
|
+
} else current += ch;
|
|
395
|
+
}
|
|
396
|
+
if (current.trim() !== "") parts.push(current.trim());
|
|
397
|
+
return parts;
|
|
398
|
+
}
|
|
399
|
+
/**
|
|
400
|
+
* Evaluate aggregate SELECT expressions (SUM, COALESCE, COUNT) across
|
|
401
|
+
* a set of filtered rows, returning a single result row.
|
|
402
|
+
*/
|
|
403
|
+
_evalAggregate(colsStr, rows) {
|
|
404
|
+
const result = {};
|
|
405
|
+
const cols = this._splitTopLevelCommas(colsStr);
|
|
406
|
+
for (const col of cols) {
|
|
407
|
+
const aliasM = /^(.+?)\s+AS\s+(\w+)$/i.exec(col);
|
|
408
|
+
const expr = aliasM ? aliasM[1].trim() : col.trim();
|
|
409
|
+
const alias = aliasM ? aliasM[2] : col.trim();
|
|
410
|
+
result[alias] = this._evalAggregateExpr(expr, rows);
|
|
411
|
+
}
|
|
412
|
+
return result;
|
|
413
|
+
}
|
|
414
|
+
/**
|
|
415
|
+
* Evaluate a single aggregate expression against a set of rows.
|
|
416
|
+
* Supports: SUM(col), COALESCE(expr, default), COUNT(*).
|
|
417
|
+
*/
|
|
418
|
+
_evalAggregateExpr(expr, rows) {
|
|
419
|
+
const trimmed = expr.trim();
|
|
420
|
+
const coalesceM = /^COALESCE\((.+)\)$/i.exec(trimmed);
|
|
421
|
+
if (coalesceM) {
|
|
422
|
+
const args = this._splitTopLevelCommas(coalesceM[1]);
|
|
423
|
+
for (const arg of args) {
|
|
424
|
+
const val = this._evalAggregateExpr(arg.trim(), rows);
|
|
425
|
+
if (val !== null && val !== void 0) return val;
|
|
426
|
+
}
|
|
427
|
+
return null;
|
|
428
|
+
}
|
|
429
|
+
const sumM = /^SUM\((\w+)\)$/i.exec(trimmed);
|
|
430
|
+
if (sumM) {
|
|
431
|
+
const col = sumM[1];
|
|
432
|
+
if (rows.length === 0) return null;
|
|
433
|
+
let total = 0;
|
|
434
|
+
for (const row of rows) total += Number(row[col] ?? 0);
|
|
435
|
+
return total;
|
|
436
|
+
}
|
|
437
|
+
if (/^COUNT\(\*\)$/i.test(trimmed)) return rows.length;
|
|
438
|
+
const countM = /^COUNT\((\w+)\)$/i.exec(trimmed);
|
|
439
|
+
if (countM) {
|
|
440
|
+
const col = countM[1];
|
|
441
|
+
return rows.filter((r) => r[col] !== null && r[col] !== void 0).length;
|
|
442
|
+
}
|
|
443
|
+
return this._evalLiteral(trimmed);
|
|
444
|
+
}
|
|
445
|
+
/**
|
|
446
|
+
* Parse a comma-separated list of SQL literal values.
|
|
447
|
+
* Handles: NULL, numbers, single-quoted strings.
|
|
448
|
+
* Simple split by comma (assumes no commas inside string values).
|
|
449
|
+
*/
|
|
450
|
+
_parseValueList(valStr) {
|
|
451
|
+
const tokens = [];
|
|
452
|
+
let current = "";
|
|
453
|
+
let inStr = false;
|
|
454
|
+
for (let i = 0; i < valStr.length; i++) {
|
|
455
|
+
const ch = valStr[i];
|
|
456
|
+
if (ch === "'" && !inStr) {
|
|
457
|
+
inStr = true;
|
|
458
|
+
current += ch;
|
|
459
|
+
} else if (ch === "'" && inStr) if (valStr[i + 1] === "'") {
|
|
460
|
+
current += "''";
|
|
461
|
+
i++;
|
|
462
|
+
} else {
|
|
463
|
+
inStr = false;
|
|
464
|
+
current += ch;
|
|
465
|
+
}
|
|
466
|
+
else if (ch === "," && !inStr) {
|
|
467
|
+
tokens.push(current.trim());
|
|
468
|
+
current = "";
|
|
469
|
+
} else current += ch;
|
|
470
|
+
}
|
|
471
|
+
if (current.trim() !== "") tokens.push(current.trim());
|
|
472
|
+
return tokens.map((t) => this._evalLiteral(t));
|
|
473
|
+
}
|
|
474
|
+
/**
|
|
475
|
+
* Parse `col1 = val1, col2 = val2` assignments into an array of [col, val] pairs.
|
|
476
|
+
*/
|
|
477
|
+
_parseAssignments(setStr) {
|
|
478
|
+
const assignments = [];
|
|
479
|
+
const parts = [];
|
|
480
|
+
let current = "";
|
|
481
|
+
let inStr = false;
|
|
482
|
+
for (let i = 0; i < setStr.length; i++) {
|
|
483
|
+
const ch = setStr[i];
|
|
484
|
+
if (ch === "'" && !inStr) {
|
|
485
|
+
inStr = true;
|
|
486
|
+
current += ch;
|
|
487
|
+
} else if (ch === "'" && inStr) if (setStr[i + 1] === "'") {
|
|
488
|
+
current += "''";
|
|
489
|
+
i++;
|
|
490
|
+
} else {
|
|
491
|
+
inStr = false;
|
|
492
|
+
current += ch;
|
|
493
|
+
}
|
|
494
|
+
else if (ch === "," && !inStr) {
|
|
495
|
+
parts.push(current.trim());
|
|
496
|
+
current = "";
|
|
497
|
+
} else current += ch;
|
|
498
|
+
}
|
|
499
|
+
if (current.trim() !== "") parts.push(current.trim());
|
|
500
|
+
for (const part of parts) {
|
|
501
|
+
const eqIdx = part.indexOf("=");
|
|
502
|
+
if (eqIdx === -1) continue;
|
|
503
|
+
const col = part.slice(0, eqIdx).trim();
|
|
504
|
+
const valStr = part.slice(eqIdx + 1).trim();
|
|
505
|
+
assignments.push([col, this._evalLiteral(valStr)]);
|
|
506
|
+
}
|
|
507
|
+
return assignments;
|
|
508
|
+
}
|
|
509
|
+
};
|
|
510
|
+
|
|
511
|
+
//#endregion
|
|
512
|
+
//#region src/modules/state/errors.ts
|
|
513
|
+
/**
|
|
514
|
+
* Typed error classes for the Dolt state store.
|
|
515
|
+
*/
|
|
516
|
+
var StateStoreError = class extends Error {
|
|
517
|
+
code;
|
|
518
|
+
constructor(code, message) {
|
|
519
|
+
super(message);
|
|
520
|
+
this.name = "StateStoreError";
|
|
521
|
+
this.code = code;
|
|
522
|
+
}
|
|
523
|
+
};
|
|
524
|
+
var DoltQueryError = class extends StateStoreError {
|
|
525
|
+
sql;
|
|
526
|
+
detail;
|
|
527
|
+
constructor(sql, detail) {
|
|
528
|
+
super("DOLT_QUERY_ERROR", `Dolt query failed: ${detail}`);
|
|
529
|
+
this.name = "DoltQueryError";
|
|
530
|
+
this.sql = sql;
|
|
531
|
+
this.detail = detail;
|
|
532
|
+
}
|
|
533
|
+
};
|
|
534
|
+
var DoltMergeConflictError = class extends StateStoreError {
|
|
535
|
+
table;
|
|
536
|
+
conflictingKeys;
|
|
537
|
+
rowKey;
|
|
538
|
+
ourValue;
|
|
539
|
+
theirValue;
|
|
540
|
+
constructor(table, conflictingKeys, options) {
|
|
541
|
+
super("DOLT_MERGE_CONFLICT", `Merge conflict in table '${table}' on keys: ${conflictingKeys.join(", ")}`);
|
|
542
|
+
this.name = "DoltMergeConflictError";
|
|
543
|
+
this.table = table;
|
|
544
|
+
this.conflictingKeys = conflictingKeys;
|
|
545
|
+
if (options) {
|
|
546
|
+
this.rowKey = options.rowKey;
|
|
547
|
+
this.ourValue = options.ourValue;
|
|
548
|
+
this.theirValue = options.theirValue;
|
|
549
|
+
}
|
|
550
|
+
}
|
|
551
|
+
};
|
|
552
|
+
/** Alias for DoltMergeConflictError — used by orchestrator branch lifecycle. */
|
|
553
|
+
const DoltMergeConflict = DoltMergeConflictError;
|
|
554
|
+
|
|
555
|
+
//#endregion
|
|
556
|
+
//#region src/modules/state/dolt-client.ts
|
|
557
|
+
/**
|
|
558
|
+
* Promise-wrapper around execFile that always resolves to { stdout, stderr }.
|
|
559
|
+
* Using an explicit wrapper rather than promisify() avoids the util.promisify.custom
|
|
560
|
+
* symbol complexity when mocking in tests.
|
|
561
|
+
*/
|
|
562
|
+
function runExecFile(cmd, args, opts) {
|
|
563
|
+
return new Promise((resolve$2, reject) => {
|
|
564
|
+
execFile(cmd, args, opts, (err, stdout, stderr) => {
|
|
565
|
+
if (err) reject(err);
|
|
566
|
+
else resolve$2({
|
|
567
|
+
stdout,
|
|
568
|
+
stderr
|
|
569
|
+
});
|
|
570
|
+
});
|
|
571
|
+
});
|
|
572
|
+
}
|
|
573
|
+
const log$1 = createLogger("modules:state:dolt");
|
|
574
|
+
var DoltClient = class {
|
|
575
|
+
repoPath;
|
|
576
|
+
socketPath;
|
|
577
|
+
_pool = null;
|
|
578
|
+
_useCliMode = false;
|
|
579
|
+
_connected = false;
|
|
580
|
+
/** Promise-chain mutex that serializes all CLI operations to prevent concurrent noms manifest access */
|
|
581
|
+
_cliMutex = Promise.resolve();
|
|
582
|
+
constructor(options) {
|
|
583
|
+
this.repoPath = options.repoPath;
|
|
584
|
+
this.socketPath = options.socketPath ?? `${options.repoPath}/.dolt/dolt.sock`;
|
|
585
|
+
}
|
|
586
|
+
async connect() {
|
|
587
|
+
try {
|
|
588
|
+
await access(this.socketPath);
|
|
589
|
+
const mysql = await import("mysql2/promise");
|
|
590
|
+
this._pool = mysql.createPool({
|
|
591
|
+
socketPath: this.socketPath,
|
|
592
|
+
user: "root",
|
|
593
|
+
database: "doltdb",
|
|
594
|
+
waitForConnections: true,
|
|
595
|
+
connectionLimit: 5
|
|
596
|
+
});
|
|
597
|
+
this._useCliMode = false;
|
|
598
|
+
log$1.debug("Connected via unix socket: %s", this.socketPath);
|
|
599
|
+
} catch {
|
|
600
|
+
this._useCliMode = true;
|
|
601
|
+
log$1.debug("Unix socket not available, using CLI fallback for %s", this.repoPath);
|
|
602
|
+
}
|
|
603
|
+
this._connected = true;
|
|
604
|
+
}
|
|
605
|
+
async query(sql, params, branch) {
|
|
606
|
+
if (!this._connected) await this.connect();
|
|
607
|
+
if (this._useCliMode) return this._queryCli(sql, params, branch);
|
|
608
|
+
return this._queryPool(sql, params, branch);
|
|
609
|
+
}
|
|
610
|
+
async _queryPool(sql, params, branch) {
|
|
611
|
+
try {
|
|
612
|
+
if (branch !== void 0 && branch !== "main") {
|
|
613
|
+
const conn = await this._pool.getConnection();
|
|
614
|
+
try {
|
|
615
|
+
await conn.execute(`USE \`substrate/${branch}\``);
|
|
616
|
+
const [rows$1] = await conn.execute(sql, params);
|
|
617
|
+
return rows$1;
|
|
618
|
+
} finally {
|
|
619
|
+
conn.release();
|
|
620
|
+
}
|
|
621
|
+
}
|
|
622
|
+
const [rows] = await this._pool.execute(sql, params);
|
|
623
|
+
return rows;
|
|
624
|
+
} catch (err) {
|
|
625
|
+
const detail = err instanceof Error ? err.message : String(err);
|
|
626
|
+
throw new DoltQueryError(sql, detail);
|
|
627
|
+
}
|
|
628
|
+
}
|
|
629
|
+
/**
|
|
630
|
+
* Acquire an exclusive CLI lock. Dolt CLI takes an exclusive lock on the noms
|
|
631
|
+
* manifest, so concurrent `dolt sql -q` / `dolt <subcommand>` processes
|
|
632
|
+
* produce "cannot update manifest: database is read only" errors.
|
|
633
|
+
* Serialize all CLI operations through a single promise chain.
|
|
634
|
+
*/
|
|
635
|
+
_withCliLock(fn) {
|
|
636
|
+
const prev = this._cliMutex;
|
|
637
|
+
let release;
|
|
638
|
+
this._cliMutex = new Promise((resolve$2) => {
|
|
639
|
+
release = resolve$2;
|
|
640
|
+
});
|
|
641
|
+
return prev.then(fn).finally(() => release());
|
|
642
|
+
}
|
|
643
|
+
async _queryCli(sql, params, branch) {
|
|
644
|
+
let resolvedSql = sql;
|
|
645
|
+
if (params && params.length > 0) {
|
|
646
|
+
let i = 0;
|
|
647
|
+
resolvedSql = sql.replace(/\?/g, () => {
|
|
648
|
+
const val = params[i++];
|
|
649
|
+
if (val === null || val === void 0) return "NULL";
|
|
650
|
+
if (typeof val === "number") return String(val);
|
|
651
|
+
return `'${String(val).replace(/'/g, "''")}'`;
|
|
652
|
+
});
|
|
653
|
+
}
|
|
654
|
+
const finalSql = resolvedSql;
|
|
655
|
+
return this._withCliLock(async () => {
|
|
656
|
+
try {
|
|
657
|
+
const branchPrefix = branch ? `CALL DOLT_CHECKOUT('${branch.replace(/'/g, "''")}'); ` : "";
|
|
658
|
+
const args = [
|
|
659
|
+
"sql",
|
|
660
|
+
"-q",
|
|
661
|
+
branchPrefix + finalSql,
|
|
662
|
+
"--result-format",
|
|
663
|
+
"json"
|
|
664
|
+
];
|
|
665
|
+
const { stdout } = await runExecFile("dolt", args, { cwd: this.repoPath });
|
|
666
|
+
const lines = (stdout || "").trim().split("\n").filter(Boolean);
|
|
667
|
+
const lastLine = lines.length > 0 ? lines[lines.length - 1] : "{\"rows\":[]}";
|
|
668
|
+
const parsed = JSON.parse(lastLine);
|
|
669
|
+
return parsed.rows ?? [];
|
|
670
|
+
} catch (err) {
|
|
671
|
+
const detail = err instanceof Error ? err.message : String(err);
|
|
672
|
+
throw new DoltQueryError(finalSql, detail);
|
|
673
|
+
}
|
|
674
|
+
});
|
|
675
|
+
}
|
|
676
|
+
/**
|
|
677
|
+
* Execute a raw Dolt CLI command (e.g. `dolt diff main...story/26-1 --stat`)
|
|
678
|
+
* and return the stdout as a string.
|
|
679
|
+
*
|
|
680
|
+
* This is distinct from `query()` which runs SQL. Use `exec()` for Dolt
|
|
681
|
+
* sub-commands like `diff`, `log`, `branch`, etc.
|
|
682
|
+
*/
|
|
683
|
+
async exec(command) {
|
|
684
|
+
const parts = command.trim().split(/\s+/);
|
|
685
|
+
const cmdArgs = parts[0] === "dolt" ? parts.slice(1) : parts;
|
|
686
|
+
return this.execArgs(cmdArgs);
|
|
687
|
+
}
|
|
688
|
+
/**
|
|
689
|
+
* Execute a Dolt CLI command with pre-split arguments.
|
|
690
|
+
*
|
|
691
|
+
* Use this instead of `exec()` when arguments contain spaces (e.g. commit
|
|
692
|
+
* messages) to avoid whitespace-splitting issues.
|
|
693
|
+
*/
|
|
694
|
+
async execArgs(args) {
|
|
695
|
+
return this._withCliLock(async () => {
|
|
696
|
+
try {
|
|
697
|
+
const { stdout } = await runExecFile("dolt", args, { cwd: this.repoPath });
|
|
698
|
+
return stdout;
|
|
699
|
+
} catch (err) {
|
|
700
|
+
const detail = err instanceof Error ? err.message : String(err);
|
|
701
|
+
throw new DoltQueryError(args.join(" "), detail);
|
|
702
|
+
}
|
|
703
|
+
});
|
|
704
|
+
}
|
|
705
|
+
async close() {
|
|
706
|
+
if (this._pool) {
|
|
707
|
+
await this._pool.end();
|
|
708
|
+
this._pool = null;
|
|
709
|
+
}
|
|
710
|
+
this._connected = false;
|
|
711
|
+
}
|
|
712
|
+
};
|
|
713
|
+
function createDoltClient(options) {
|
|
714
|
+
return new DoltClient(options);
|
|
715
|
+
}
|
|
716
|
+
|
|
717
|
+
//#endregion
|
|
718
|
+
//#region src/persistence/adapter.ts
|
|
719
|
+
const logger$2 = createLogger("persistence:adapter");
|
|
720
|
+
/** Type guard: check if a DatabaseAdapter also implements SyncAdapter. */
|
|
721
|
+
function isSyncAdapter(adapter) {
|
|
722
|
+
return typeof adapter.querySync === "function";
|
|
723
|
+
}
|
|
724
|
+
/**
|
|
725
|
+
* Synchronously check whether Dolt is installed on PATH and a Dolt repo
|
|
726
|
+
* exists at the canonical state path under `basePath`.
|
|
727
|
+
*/
|
|
728
|
+
function isDoltAvailable(basePath) {
|
|
729
|
+
const result = spawnSync("dolt", ["version"], { stdio: "ignore" });
|
|
730
|
+
if (result.error != null || result.status !== 0) return false;
|
|
731
|
+
const stateDoltDir = join$1(basePath, ".substrate", "state", ".dolt");
|
|
732
|
+
return existsSync$1(stateDoltDir);
|
|
733
|
+
}
|
|
734
|
+
/**
|
|
735
|
+
* Create a `DatabaseAdapter` for the specified (or auto-detected) backend.
|
|
736
|
+
*
|
|
737
|
+
* @param config - Optional configuration. Defaults to `{ backend: 'auto' }`.
|
|
738
|
+
* @returns A `DatabaseAdapter` instance ready for use.
|
|
739
|
+
*/
|
|
740
|
+
function createDatabaseAdapter(config = { backend: "auto" }) {
|
|
741
|
+
const backend = config.backend ?? "auto";
|
|
742
|
+
const basePath = config.basePath ?? process.cwd();
|
|
743
|
+
const doltRepoPath = join$1(basePath, ".substrate", "state");
|
|
744
|
+
if (backend === "dolt") {
|
|
745
|
+
logger$2.debug("Using DoltDatabaseAdapter (explicit config)");
|
|
746
|
+
const client = new DoltClient({ repoPath: doltRepoPath });
|
|
747
|
+
return new DoltDatabaseAdapter(client);
|
|
748
|
+
}
|
|
749
|
+
if (backend === "memory") {
|
|
750
|
+
logger$2.debug("Using InMemoryDatabaseAdapter (explicit config)");
|
|
751
|
+
return new InMemoryDatabaseAdapter();
|
|
752
|
+
}
|
|
753
|
+
if (isDoltAvailable(basePath)) {
|
|
754
|
+
logger$2.debug("Dolt detected, using DoltDatabaseAdapter");
|
|
755
|
+
const client = new DoltClient({ repoPath: doltRepoPath });
|
|
756
|
+
return new DoltDatabaseAdapter(client);
|
|
757
|
+
}
|
|
758
|
+
logger$2.debug("Dolt not available, using InMemoryDatabaseAdapter");
|
|
759
|
+
return new InMemoryDatabaseAdapter();
|
|
760
|
+
}
|
|
761
|
+
|
|
762
|
+
//#endregion
|
|
763
|
+
//#region src/persistence/schema.ts
|
|
764
|
+
/**
|
|
765
|
+
* Initialize all persistence tables on the given adapter.
|
|
766
|
+
* Idempotent — safe to call multiple times.
|
|
767
|
+
*/
|
|
768
|
+
async function initSchema(adapter) {
|
|
769
|
+
await adapter.exec(`
|
|
770
|
+
CREATE TABLE IF NOT EXISTS sessions (
|
|
771
|
+
id VARCHAR(255) PRIMARY KEY,
|
|
772
|
+
name TEXT,
|
|
773
|
+
graph_file TEXT NOT NULL,
|
|
774
|
+
status VARCHAR(32) NOT NULL DEFAULT 'active',
|
|
775
|
+
budget_usd DOUBLE,
|
|
776
|
+
total_cost_usd DOUBLE NOT NULL DEFAULT 0.0,
|
|
777
|
+
planning_cost_usd DOUBLE NOT NULL DEFAULT 0.0,
|
|
778
|
+
config_snapshot TEXT,
|
|
779
|
+
base_branch TEXT NOT NULL DEFAULT 'main',
|
|
780
|
+
plan_source TEXT,
|
|
781
|
+
planning_agent TEXT,
|
|
782
|
+
planning_costs_count_against_budget INTEGER NOT NULL DEFAULT 0,
|
|
783
|
+
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
784
|
+
updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
|
|
785
|
+
)
|
|
786
|
+
`);
|
|
787
|
+
await adapter.exec(`
|
|
788
|
+
CREATE TABLE IF NOT EXISTS tasks (
|
|
789
|
+
id VARCHAR(255) PRIMARY KEY,
|
|
790
|
+
session_id VARCHAR(255) NOT NULL,
|
|
791
|
+
name TEXT NOT NULL,
|
|
792
|
+
description TEXT,
|
|
793
|
+
prompt TEXT NOT NULL,
|
|
794
|
+
status VARCHAR(32) NOT NULL DEFAULT 'pending',
|
|
795
|
+
agent VARCHAR(128),
|
|
796
|
+
model TEXT,
|
|
797
|
+
billing_mode VARCHAR(32),
|
|
798
|
+
worktree_path TEXT,
|
|
799
|
+
worktree_branch TEXT,
|
|
800
|
+
worktree_cleaned_at TEXT,
|
|
801
|
+
worker_id TEXT,
|
|
802
|
+
budget_usd DOUBLE,
|
|
803
|
+
cost_usd DOUBLE NOT NULL DEFAULT 0.0,
|
|
804
|
+
input_tokens INTEGER NOT NULL DEFAULT 0,
|
|
805
|
+
output_tokens INTEGER NOT NULL DEFAULT 0,
|
|
806
|
+
result TEXT,
|
|
807
|
+
error TEXT,
|
|
808
|
+
exit_code INTEGER,
|
|
809
|
+
retry_count INTEGER NOT NULL DEFAULT 0,
|
|
810
|
+
max_retries INTEGER NOT NULL DEFAULT 2,
|
|
811
|
+
timeout_ms INTEGER,
|
|
812
|
+
task_type TEXT,
|
|
813
|
+
metadata TEXT,
|
|
814
|
+
merge_status TEXT,
|
|
815
|
+
merged_files TEXT,
|
|
816
|
+
conflict_files TEXT,
|
|
817
|
+
budget_exceeded INTEGER NOT NULL DEFAULT 0,
|
|
818
|
+
started_at TEXT,
|
|
819
|
+
completed_at TEXT,
|
|
820
|
+
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
821
|
+
updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
|
|
822
|
+
)
|
|
823
|
+
`);
|
|
824
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_tasks_session ON tasks(session_id)");
|
|
825
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_tasks_status ON tasks(status)");
|
|
826
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_tasks_agent ON tasks(agent)");
|
|
827
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_tasks_session_status ON tasks(session_id, status)");
|
|
828
|
+
await adapter.exec(`
|
|
829
|
+
CREATE TABLE IF NOT EXISTS task_dependencies (
|
|
830
|
+
task_id VARCHAR(255) NOT NULL,
|
|
831
|
+
depends_on VARCHAR(255) NOT NULL,
|
|
832
|
+
PRIMARY KEY (task_id, depends_on),
|
|
833
|
+
CHECK (task_id != depends_on)
|
|
834
|
+
)
|
|
835
|
+
`);
|
|
836
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_deps_depends_on ON task_dependencies(depends_on)");
|
|
837
|
+
await adapter.exec(`
|
|
838
|
+
CREATE TABLE IF NOT EXISTS execution_log (
|
|
839
|
+
id INTEGER PRIMARY KEY AUTO_INCREMENT,
|
|
840
|
+
session_id VARCHAR(255) NOT NULL,
|
|
841
|
+
task_id VARCHAR(255),
|
|
842
|
+
event VARCHAR(128) NOT NULL,
|
|
843
|
+
old_status VARCHAR(32),
|
|
844
|
+
new_status VARCHAR(32),
|
|
845
|
+
agent VARCHAR(128),
|
|
846
|
+
cost_usd DOUBLE,
|
|
847
|
+
data TEXT,
|
|
848
|
+
timestamp DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
|
|
849
|
+
)
|
|
850
|
+
`);
|
|
851
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_log_session ON execution_log(session_id)");
|
|
852
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_log_task ON execution_log(task_id)");
|
|
853
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_log_event ON execution_log(event)");
|
|
854
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_log_timestamp ON execution_log(timestamp)");
|
|
855
|
+
await adapter.exec(`
|
|
856
|
+
CREATE TABLE IF NOT EXISTS cost_entries (
|
|
857
|
+
id INTEGER PRIMARY KEY AUTO_INCREMENT,
|
|
858
|
+
session_id VARCHAR(255) NOT NULL,
|
|
859
|
+
task_id VARCHAR(255),
|
|
860
|
+
agent VARCHAR(128) NOT NULL,
|
|
861
|
+
billing_mode VARCHAR(32) NOT NULL,
|
|
862
|
+
category VARCHAR(64) NOT NULL DEFAULT 'execution',
|
|
863
|
+
provider VARCHAR(64) NOT NULL DEFAULT 'unknown',
|
|
864
|
+
input_tokens INTEGER NOT NULL DEFAULT 0,
|
|
865
|
+
output_tokens INTEGER NOT NULL DEFAULT 0,
|
|
866
|
+
estimated_cost DOUBLE NOT NULL DEFAULT 0.0,
|
|
867
|
+
actual_cost DOUBLE,
|
|
868
|
+
savings_usd DOUBLE NOT NULL DEFAULT 0.0,
|
|
869
|
+
model TEXT,
|
|
870
|
+
timestamp DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
|
|
871
|
+
)
|
|
872
|
+
`);
|
|
873
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_cost_session ON cost_entries(session_id)");
|
|
874
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_cost_task ON cost_entries(task_id)");
|
|
875
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_cost_category ON cost_entries(category)");
|
|
876
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_cost_entries_session_task ON cost_entries(session_id, task_id)");
|
|
877
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_cost_entries_provider ON cost_entries(provider)");
|
|
878
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_cost_session_agent ON cost_entries(session_id, agent)");
|
|
879
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_cost_agent ON cost_entries(agent)");
|
|
880
|
+
await adapter.exec(`
|
|
881
|
+
CREATE TABLE IF NOT EXISTS session_signals (
|
|
882
|
+
id INTEGER PRIMARY KEY AUTO_INCREMENT,
|
|
883
|
+
session_id VARCHAR(255) NOT NULL,
|
|
884
|
+
\`signal\` VARCHAR(16) NOT NULL CHECK(\`signal\` IN ('pause', 'resume', 'cancel')),
|
|
885
|
+
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
886
|
+
processed_at TEXT
|
|
887
|
+
)
|
|
888
|
+
`);
|
|
889
|
+
await adapter.exec(`
|
|
890
|
+
CREATE TABLE IF NOT EXISTS plans (
|
|
891
|
+
id VARCHAR(255) PRIMARY KEY,
|
|
892
|
+
description TEXT NOT NULL,
|
|
893
|
+
task_count INTEGER NOT NULL DEFAULT 0,
|
|
894
|
+
estimated_cost_usd DOUBLE NOT NULL DEFAULT 0.0,
|
|
895
|
+
planning_agent VARCHAR(128) NOT NULL,
|
|
896
|
+
plan_yaml TEXT NOT NULL,
|
|
897
|
+
status VARCHAR(32) NOT NULL DEFAULT 'draft',
|
|
898
|
+
current_version INTEGER NOT NULL DEFAULT 1,
|
|
899
|
+
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
900
|
+
updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
|
|
901
|
+
)
|
|
902
|
+
`);
|
|
903
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_plans_status ON plans(status)");
|
|
904
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_plans_created ON plans(created_at)");
|
|
905
|
+
await adapter.exec(`
|
|
906
|
+
CREATE TABLE IF NOT EXISTS plan_versions (
|
|
907
|
+
plan_id VARCHAR(255) NOT NULL,
|
|
908
|
+
version INTEGER NOT NULL,
|
|
909
|
+
task_graph_yaml TEXT NOT NULL,
|
|
910
|
+
feedback_used TEXT,
|
|
911
|
+
planning_cost_usd DOUBLE NOT NULL DEFAULT 0.0,
|
|
912
|
+
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
913
|
+
PRIMARY KEY (plan_id, version)
|
|
914
|
+
)
|
|
915
|
+
`);
|
|
916
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_plan_versions_plan_id ON plan_versions(plan_id)");
|
|
917
|
+
await adapter.exec(`
|
|
918
|
+
CREATE TABLE IF NOT EXISTS pipeline_runs (
|
|
919
|
+
id VARCHAR(255) PRIMARY KEY,
|
|
920
|
+
methodology VARCHAR(128) NOT NULL,
|
|
921
|
+
current_phase VARCHAR(64),
|
|
922
|
+
status VARCHAR(32) NOT NULL DEFAULT 'running'
|
|
923
|
+
CHECK(status IN ('running','paused','completed','failed','stopped')),
|
|
924
|
+
config_json TEXT,
|
|
925
|
+
token_usage_json TEXT,
|
|
926
|
+
parent_run_id VARCHAR(255),
|
|
927
|
+
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
928
|
+
updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
|
|
929
|
+
)
|
|
930
|
+
`);
|
|
931
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_pipeline_runs_status ON pipeline_runs(status)");
|
|
932
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_pipeline_runs_parent_run_id ON pipeline_runs(parent_run_id)");
|
|
933
|
+
await adapter.exec(`
|
|
934
|
+
CREATE TABLE IF NOT EXISTS decisions (
|
|
935
|
+
id VARCHAR(255) PRIMARY KEY,
|
|
936
|
+
pipeline_run_id VARCHAR(255),
|
|
937
|
+
phase VARCHAR(64) NOT NULL,
|
|
938
|
+
category VARCHAR(64) NOT NULL,
|
|
939
|
+
\`key\` VARCHAR(255) NOT NULL,
|
|
940
|
+
value TEXT NOT NULL,
|
|
941
|
+
rationale TEXT,
|
|
942
|
+
superseded_by VARCHAR(255),
|
|
943
|
+
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
944
|
+
updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
|
|
945
|
+
)
|
|
946
|
+
`);
|
|
947
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_decisions_phase ON decisions(phase)");
|
|
948
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_decisions_key ON decisions(phase, `key`)");
|
|
949
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_decisions_superseded_by ON decisions(superseded_by)");
|
|
950
|
+
await adapter.exec(`
|
|
951
|
+
CREATE TABLE IF NOT EXISTS requirements (
|
|
952
|
+
id VARCHAR(255) PRIMARY KEY,
|
|
953
|
+
pipeline_run_id VARCHAR(255),
|
|
954
|
+
source VARCHAR(128) NOT NULL,
|
|
955
|
+
type VARCHAR(32) NOT NULL CHECK(type IN ('functional','non_functional','constraint')),
|
|
956
|
+
description TEXT NOT NULL,
|
|
957
|
+
priority VARCHAR(16) NOT NULL CHECK(priority IN ('must','should','could','wont')),
|
|
958
|
+
status VARCHAR(32) NOT NULL DEFAULT 'active',
|
|
959
|
+
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
|
|
960
|
+
)
|
|
961
|
+
`);
|
|
962
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_requirements_type ON requirements(type)");
|
|
963
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_requirements_status ON requirements(status)");
|
|
964
|
+
await adapter.exec(`
|
|
965
|
+
CREATE TABLE IF NOT EXISTS constraints (
|
|
966
|
+
id VARCHAR(255) PRIMARY KEY,
|
|
967
|
+
pipeline_run_id VARCHAR(255),
|
|
968
|
+
category VARCHAR(64) NOT NULL,
|
|
969
|
+
description TEXT NOT NULL,
|
|
970
|
+
source VARCHAR(128) NOT NULL,
|
|
971
|
+
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
|
|
972
|
+
)
|
|
973
|
+
`);
|
|
974
|
+
await adapter.exec(`
|
|
975
|
+
CREATE TABLE IF NOT EXISTS artifacts (
|
|
976
|
+
id VARCHAR(255) PRIMARY KEY,
|
|
977
|
+
pipeline_run_id VARCHAR(255),
|
|
978
|
+
phase VARCHAR(64) NOT NULL,
|
|
979
|
+
type VARCHAR(128) NOT NULL,
|
|
980
|
+
path TEXT NOT NULL,
|
|
981
|
+
content_hash TEXT,
|
|
982
|
+
summary TEXT,
|
|
983
|
+
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
|
|
984
|
+
)
|
|
985
|
+
`);
|
|
986
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_artifacts_phase ON artifacts(phase)");
|
|
987
|
+
await adapter.exec(`
|
|
988
|
+
CREATE TABLE IF NOT EXISTS token_usage (
|
|
989
|
+
id INTEGER PRIMARY KEY AUTO_INCREMENT,
|
|
990
|
+
pipeline_run_id VARCHAR(255),
|
|
991
|
+
phase VARCHAR(64) NOT NULL,
|
|
992
|
+
agent VARCHAR(128) NOT NULL,
|
|
993
|
+
input_tokens INTEGER NOT NULL DEFAULT 0,
|
|
994
|
+
output_tokens INTEGER NOT NULL DEFAULT 0,
|
|
995
|
+
cost_usd DOUBLE NOT NULL DEFAULT 0.0,
|
|
996
|
+
metadata TEXT,
|
|
997
|
+
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
|
|
998
|
+
)
|
|
999
|
+
`);
|
|
1000
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_token_usage_run ON token_usage(pipeline_run_id)");
|
|
1001
|
+
await adapter.exec(`
|
|
1002
|
+
CREATE TABLE IF NOT EXISTS run_metrics (
|
|
1003
|
+
run_id VARCHAR(255) PRIMARY KEY,
|
|
1004
|
+
methodology VARCHAR(128) NOT NULL,
|
|
1005
|
+
status VARCHAR(32) NOT NULL DEFAULT 'running',
|
|
1006
|
+
started_at TEXT NOT NULL,
|
|
1007
|
+
completed_at TEXT,
|
|
1008
|
+
wall_clock_seconds DOUBLE DEFAULT 0,
|
|
1009
|
+
total_input_tokens INTEGER DEFAULT 0,
|
|
1010
|
+
total_output_tokens INTEGER DEFAULT 0,
|
|
1011
|
+
total_cost_usd DOUBLE DEFAULT 0,
|
|
1012
|
+
stories_attempted INTEGER DEFAULT 0,
|
|
1013
|
+
stories_succeeded INTEGER DEFAULT 0,
|
|
1014
|
+
stories_failed INTEGER DEFAULT 0,
|
|
1015
|
+
stories_escalated INTEGER DEFAULT 0,
|
|
1016
|
+
total_review_cycles INTEGER DEFAULT 0,
|
|
1017
|
+
total_dispatches INTEGER DEFAULT 0,
|
|
1018
|
+
concurrency_setting INTEGER DEFAULT 1,
|
|
1019
|
+
max_concurrent_actual INTEGER DEFAULT 1,
|
|
1020
|
+
restarts INTEGER DEFAULT 0,
|
|
1021
|
+
is_baseline INTEGER DEFAULT 0,
|
|
1022
|
+
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
|
|
1023
|
+
)
|
|
1024
|
+
`);
|
|
1025
|
+
await adapter.exec(`
|
|
1026
|
+
CREATE TABLE IF NOT EXISTS story_metrics (
|
|
1027
|
+
id INTEGER PRIMARY KEY AUTO_INCREMENT,
|
|
1028
|
+
run_id VARCHAR(255) NOT NULL,
|
|
1029
|
+
story_key VARCHAR(255) NOT NULL,
|
|
1030
|
+
result VARCHAR(32) NOT NULL DEFAULT 'pending',
|
|
1031
|
+
phase_durations_json TEXT,
|
|
1032
|
+
started_at TEXT,
|
|
1033
|
+
completed_at TEXT,
|
|
1034
|
+
wall_clock_seconds DOUBLE DEFAULT 0,
|
|
1035
|
+
input_tokens INTEGER DEFAULT 0,
|
|
1036
|
+
output_tokens INTEGER DEFAULT 0,
|
|
1037
|
+
cost_usd DOUBLE DEFAULT 0,
|
|
1038
|
+
review_cycles INTEGER DEFAULT 0,
|
|
1039
|
+
dispatches INTEGER DEFAULT 0,
|
|
1040
|
+
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
1041
|
+
UNIQUE(run_id, story_key)
|
|
1042
|
+
)
|
|
1043
|
+
`);
|
|
1044
|
+
await adapter.exec(`
|
|
1045
|
+
CREATE TABLE IF NOT EXISTS task_metrics (
|
|
1046
|
+
task_id VARCHAR(255) NOT NULL,
|
|
1047
|
+
agent VARCHAR(128) NOT NULL,
|
|
1048
|
+
task_type VARCHAR(128) NOT NULL,
|
|
1049
|
+
outcome VARCHAR(16) NOT NULL CHECK(outcome IN ('success', 'failure')),
|
|
1050
|
+
failure_reason TEXT,
|
|
1051
|
+
input_tokens INTEGER NOT NULL DEFAULT 0,
|
|
1052
|
+
output_tokens INTEGER NOT NULL DEFAULT 0,
|
|
1053
|
+
duration_ms INTEGER NOT NULL DEFAULT 0,
|
|
1054
|
+
cost DOUBLE NOT NULL DEFAULT 0.0,
|
|
1055
|
+
estimated_cost DOUBLE NOT NULL DEFAULT 0.0,
|
|
1056
|
+
billing_mode VARCHAR(32) NOT NULL DEFAULT 'api',
|
|
1057
|
+
retries INTEGER NOT NULL DEFAULT 0,
|
|
1058
|
+
recorded_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
1059
|
+
PRIMARY KEY (task_id, recorded_at)
|
|
1060
|
+
)
|
|
1061
|
+
`);
|
|
1062
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_tm_agent ON task_metrics(agent)");
|
|
1063
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_tm_task_type ON task_metrics(task_type)");
|
|
1064
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_tm_recorded_at ON task_metrics(recorded_at)");
|
|
1065
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_tm_agent_type ON task_metrics(agent, task_type)");
|
|
1066
|
+
await adapter.exec(`
|
|
1067
|
+
CREATE TABLE IF NOT EXISTS performance_aggregates (
|
|
1068
|
+
agent VARCHAR(255) NOT NULL,
|
|
1069
|
+
task_type VARCHAR(255) NOT NULL,
|
|
1070
|
+
total_tasks INTEGER NOT NULL DEFAULT 0,
|
|
1071
|
+
successful_tasks INTEGER NOT NULL DEFAULT 0,
|
|
1072
|
+
failed_tasks INTEGER NOT NULL DEFAULT 0,
|
|
1073
|
+
total_input_tokens INTEGER NOT NULL DEFAULT 0,
|
|
1074
|
+
total_output_tokens INTEGER NOT NULL DEFAULT 0,
|
|
1075
|
+
total_duration_ms INTEGER NOT NULL DEFAULT 0,
|
|
1076
|
+
total_cost DOUBLE NOT NULL DEFAULT 0.0,
|
|
1077
|
+
total_retries INTEGER NOT NULL DEFAULT 0,
|
|
1078
|
+
last_updated DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
1079
|
+
PRIMARY KEY (agent, task_type)
|
|
1080
|
+
)
|
|
1081
|
+
`);
|
|
1082
|
+
await adapter.exec(`
|
|
1083
|
+
CREATE TABLE IF NOT EXISTS routing_recommendations (
|
|
1084
|
+
id INTEGER PRIMARY KEY AUTO_INCREMENT,
|
|
1085
|
+
task_type VARCHAR(128) NOT NULL,
|
|
1086
|
+
current_agent VARCHAR(128) NOT NULL,
|
|
1087
|
+
recommended_agent VARCHAR(128) NOT NULL,
|
|
1088
|
+
reason TEXT,
|
|
1089
|
+
confidence DOUBLE NOT NULL DEFAULT 0.0,
|
|
1090
|
+
supporting_data TEXT,
|
|
1091
|
+
generated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
1092
|
+
expires_at TEXT
|
|
1093
|
+
)
|
|
1094
|
+
`);
|
|
1095
|
+
await adapter.exec(`
|
|
1096
|
+
CREATE TABLE IF NOT EXISTS turn_analysis (
|
|
1097
|
+
story_key VARCHAR(64) NOT NULL,
|
|
1098
|
+
span_id VARCHAR(128) NOT NULL,
|
|
1099
|
+
turn_number INTEGER NOT NULL,
|
|
1100
|
+
name VARCHAR(255) NOT NULL DEFAULT '',
|
|
1101
|
+
timestamp BIGINT NOT NULL DEFAULT 0,
|
|
1102
|
+
source VARCHAR(32) NOT NULL DEFAULT '',
|
|
1103
|
+
model VARCHAR(64),
|
|
1104
|
+
input_tokens INTEGER NOT NULL DEFAULT 0,
|
|
1105
|
+
output_tokens INTEGER NOT NULL DEFAULT 0,
|
|
1106
|
+
cache_read_tokens INTEGER NOT NULL DEFAULT 0,
|
|
1107
|
+
fresh_tokens INTEGER NOT NULL DEFAULT 0,
|
|
1108
|
+
cache_hit_rate DOUBLE NOT NULL DEFAULT 0,
|
|
1109
|
+
cost_usd DOUBLE NOT NULL DEFAULT 0,
|
|
1110
|
+
duration_ms INTEGER NOT NULL DEFAULT 0,
|
|
1111
|
+
context_size INTEGER NOT NULL DEFAULT 0,
|
|
1112
|
+
context_delta INTEGER NOT NULL DEFAULT 0,
|
|
1113
|
+
tool_name VARCHAR(128),
|
|
1114
|
+
is_context_spike BOOLEAN NOT NULL DEFAULT 0,
|
|
1115
|
+
child_spans_json TEXT NOT NULL DEFAULT '[]',
|
|
1116
|
+
task_type VARCHAR(64),
|
|
1117
|
+
phase VARCHAR(64),
|
|
1118
|
+
dispatch_id VARCHAR(64),
|
|
1119
|
+
PRIMARY KEY (story_key, span_id)
|
|
1120
|
+
)
|
|
1121
|
+
`);
|
|
1122
|
+
await adapter.exec("CREATE INDEX IF NOT EXISTS idx_turn_analysis_story ON turn_analysis (story_key, turn_number)");
|
|
1123
|
+
for (const col of [
|
|
1124
|
+
"task_type",
|
|
1125
|
+
"phase",
|
|
1126
|
+
"dispatch_id"
|
|
1127
|
+
]) try {
|
|
1128
|
+
await adapter.exec(`ALTER TABLE turn_analysis ADD COLUMN ${col} VARCHAR(64)`);
|
|
1129
|
+
} catch {}
|
|
1130
|
+
await adapter.exec(`
|
|
1131
|
+
CREATE TABLE IF NOT EXISTS efficiency_scores (
|
|
1132
|
+
story_key VARCHAR(64) NOT NULL,
|
|
1133
|
+
timestamp BIGINT NOT NULL,
|
|
1134
|
+
composite_score INTEGER NOT NULL DEFAULT 0,
|
|
1135
|
+
cache_hit_sub_score DOUBLE NOT NULL DEFAULT 0,
|
|
1136
|
+
io_ratio_sub_score DOUBLE NOT NULL DEFAULT 0,
|
|
1137
|
+
context_management_sub_score DOUBLE NOT NULL DEFAULT 0,
|
|
1138
|
+
avg_cache_hit_rate DOUBLE NOT NULL DEFAULT 0,
|
|
1139
|
+
avg_io_ratio DOUBLE NOT NULL DEFAULT 0,
|
|
1140
|
+
context_spike_count INTEGER NOT NULL DEFAULT 0,
|
|
1141
|
+
total_turns INTEGER NOT NULL DEFAULT 0,
|
|
1142
|
+
per_model_json TEXT NOT NULL DEFAULT '[]',
|
|
1143
|
+
per_source_json TEXT NOT NULL DEFAULT '[]',
|
|
1144
|
+
dispatch_id TEXT,
|
|
1145
|
+
task_type TEXT,
|
|
1146
|
+
phase TEXT,
|
|
1147
|
+
PRIMARY KEY (story_key, timestamp)
|
|
1148
|
+
)
|
|
1149
|
+
`);
|
|
1150
|
+
for (const col of [
|
|
1151
|
+
"dispatch_id",
|
|
1152
|
+
"task_type",
|
|
1153
|
+
"phase"
|
|
1154
|
+
]) try {
|
|
1155
|
+
await adapter.exec(`ALTER TABLE efficiency_scores ADD COLUMN ${col} TEXT`);
|
|
1156
|
+
} catch {}
|
|
1157
|
+
await adapter.exec(`
|
|
1158
|
+
CREATE TABLE IF NOT EXISTS recommendations (
|
|
1159
|
+
id VARCHAR(16) NOT NULL,
|
|
1160
|
+
story_key VARCHAR(64) NOT NULL,
|
|
1161
|
+
sprint_id VARCHAR(64),
|
|
1162
|
+
rule_id VARCHAR(64) NOT NULL,
|
|
1163
|
+
severity VARCHAR(16) NOT NULL,
|
|
1164
|
+
title TEXT NOT NULL,
|
|
1165
|
+
description TEXT NOT NULL,
|
|
1166
|
+
potential_savings_tokens INTEGER,
|
|
1167
|
+
potential_savings_usd DOUBLE,
|
|
1168
|
+
action_target TEXT,
|
|
1169
|
+
generated_at VARCHAR(32) NOT NULL,
|
|
1170
|
+
PRIMARY KEY (id)
|
|
1171
|
+
)
|
|
1172
|
+
`);
|
|
1173
|
+
await adapter.exec(`
|
|
1174
|
+
CREATE TABLE IF NOT EXISTS category_stats (
|
|
1175
|
+
story_key VARCHAR(100) NOT NULL,
|
|
1176
|
+
category VARCHAR(30) NOT NULL,
|
|
1177
|
+
total_tokens BIGINT NOT NULL DEFAULT 0,
|
|
1178
|
+
percentage DECIMAL(6,3) NOT NULL DEFAULT 0,
|
|
1179
|
+
event_count INTEGER NOT NULL DEFAULT 0,
|
|
1180
|
+
avg_tokens_per_event DECIMAL(12,2) NOT NULL DEFAULT 0,
|
|
1181
|
+
trend VARCHAR(10) NOT NULL DEFAULT 'stable',
|
|
1182
|
+
PRIMARY KEY (story_key, category)
|
|
1183
|
+
)
|
|
1184
|
+
`);
|
|
1185
|
+
await adapter.exec(`
|
|
1186
|
+
CREATE TABLE IF NOT EXISTS consumer_stats (
|
|
1187
|
+
story_key VARCHAR(100) NOT NULL,
|
|
1188
|
+
consumer_key VARCHAR(300) NOT NULL,
|
|
1189
|
+
category VARCHAR(30) NOT NULL,
|
|
1190
|
+
total_tokens BIGINT NOT NULL DEFAULT 0,
|
|
1191
|
+
percentage DECIMAL(6,3) NOT NULL DEFAULT 0,
|
|
1192
|
+
event_count INTEGER NOT NULL DEFAULT 0,
|
|
1193
|
+
top_invocations_json TEXT,
|
|
1194
|
+
PRIMARY KEY (story_key, consumer_key)
|
|
1195
|
+
)
|
|
1196
|
+
`);
|
|
1197
|
+
await adapter.exec(`
|
|
1198
|
+
CREATE VIEW IF NOT EXISTS ready_tasks AS
|
|
1199
|
+
SELECT t.* FROM tasks t
|
|
1200
|
+
WHERE t.status = 'pending'
|
|
1201
|
+
AND NOT EXISTS (
|
|
1202
|
+
SELECT 1 FROM task_dependencies td
|
|
1203
|
+
JOIN tasks dep ON dep.id = td.depends_on
|
|
1204
|
+
WHERE td.task_id = t.id
|
|
1205
|
+
AND dep.status NOT IN ('completed', 'cancelled')
|
|
1206
|
+
)
|
|
1207
|
+
`);
|
|
1208
|
+
await adapter.exec(`
|
|
1209
|
+
CREATE VIEW IF NOT EXISTS session_cost_summary AS
|
|
1210
|
+
SELECT
|
|
1211
|
+
s.id AS session_id,
|
|
1212
|
+
s.name AS session_name,
|
|
1213
|
+
COUNT(DISTINCT t.id) AS total_tasks,
|
|
1214
|
+
SUM(CASE WHEN t.status = 'completed' THEN 1 ELSE 0 END) AS completed_tasks,
|
|
1215
|
+
SUM(CASE WHEN t.status = 'failed' THEN 1 ELSE 0 END) AS failed_tasks,
|
|
1216
|
+
SUM(CASE WHEN t.status = 'running' THEN 1 ELSE 0 END) AS running_tasks,
|
|
1217
|
+
COALESCE(SUM(t.cost_usd), 0) AS total_cost_usd,
|
|
1218
|
+
SUM(CASE WHEN t.billing_mode = 'subscription' THEN t.cost_usd ELSE 0 END) AS subscription_cost_usd,
|
|
1219
|
+
SUM(CASE WHEN t.billing_mode = 'api' THEN t.cost_usd ELSE 0 END) AS api_cost_usd,
|
|
1220
|
+
s.planning_cost_usd
|
|
1221
|
+
FROM sessions s
|
|
1222
|
+
LEFT JOIN tasks t ON t.session_id = s.id
|
|
1223
|
+
GROUP BY s.id
|
|
1224
|
+
`);
|
|
1225
|
+
await adapter.exec(`
|
|
1226
|
+
CREATE TABLE IF NOT EXISTS schema_migrations (
|
|
1227
|
+
version INTEGER PRIMARY KEY,
|
|
1228
|
+
name TEXT NOT NULL,
|
|
1229
|
+
applied_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
|
|
1230
|
+
)
|
|
1231
|
+
`);
|
|
1232
|
+
}
|
|
1233
|
+
|
|
1234
|
+
//#endregion
|
|
1235
|
+
//#region src/modules/state/dolt-init.ts
|
|
1236
|
+
/**
|
|
1237
|
+
* Thrown when the `dolt` binary cannot be found in PATH.
|
|
1238
|
+
*/
|
|
1239
|
+
var DoltNotInstalled = class extends Error {
|
|
1240
|
+
constructor() {
|
|
1241
|
+
super("Dolt CLI not found in PATH. Install Dolt from https://docs.dolthub.com/introduction/installation");
|
|
1242
|
+
this.name = "DoltNotInstalled";
|
|
1243
|
+
}
|
|
1244
|
+
};
|
|
1245
|
+
/**
|
|
1246
|
+
* Thrown when a Dolt CLI command exits with a non-zero status code.
|
|
1247
|
+
*/
|
|
1248
|
+
var DoltInitError = class extends Error {
|
|
1249
|
+
constructor(args, exitCode, stderr) {
|
|
1250
|
+
super(`Dolt command "dolt ${args.join(" ")}" failed with exit code ${exitCode}${stderr ? `: ${stderr}` : ""}`);
|
|
1251
|
+
this.name = "DoltInitError";
|
|
1252
|
+
}
|
|
1253
|
+
};
|
|
1254
|
+
/**
|
|
1255
|
+
* Verify that the `dolt` binary is installed and accessible.
|
|
1256
|
+
*
|
|
1257
|
+
* @throws {DoltNotInstalled} If the binary is not found in PATH.
|
|
1258
|
+
*/
|
|
1259
|
+
async function checkDoltInstalled() {
|
|
1260
|
+
return new Promise((resolve$2, reject) => {
|
|
1261
|
+
let child;
|
|
1262
|
+
try {
|
|
1263
|
+
child = spawn("dolt", ["version"], { stdio: "ignore" });
|
|
1264
|
+
} catch (err) {
|
|
1265
|
+
const nodeErr = err;
|
|
1266
|
+
if (nodeErr.code === "ENOENT") reject(new DoltNotInstalled());
|
|
1267
|
+
else reject(err);
|
|
1268
|
+
return;
|
|
1269
|
+
}
|
|
1270
|
+
child.on("error", (err) => {
|
|
1271
|
+
if (err.code === "ENOENT") reject(new DoltNotInstalled());
|
|
1272
|
+
else reject(err);
|
|
1273
|
+
});
|
|
1274
|
+
child.on("close", (code) => {
|
|
1275
|
+
if (code === 0) resolve$2();
|
|
1276
|
+
else resolve$2();
|
|
1277
|
+
});
|
|
1278
|
+
});
|
|
1279
|
+
}
|
|
1280
|
+
/**
|
|
1281
|
+
* Run a Dolt CLI command in the given working directory.
|
|
1282
|
+
*
|
|
1283
|
+
* @param args - Arguments to pass to `dolt` (e.g. `['init']`).
|
|
1284
|
+
* @param cwd - Working directory for the command.
|
|
1285
|
+
* @throws {DoltInitError} If the command exits with a non-zero code.
|
|
1286
|
+
*/
|
|
1287
|
+
async function runDoltCommand(args, cwd) {
|
|
1288
|
+
return new Promise((resolve$2, reject) => {
|
|
1289
|
+
const stderrChunks = [];
|
|
1290
|
+
const child = spawn("dolt", args, {
|
|
1291
|
+
cwd,
|
|
1292
|
+
stdio: [
|
|
1293
|
+
"ignore",
|
|
1294
|
+
"ignore",
|
|
1295
|
+
"pipe"
|
|
1296
|
+
]
|
|
1297
|
+
});
|
|
1298
|
+
child.stderr?.on("data", (chunk) => {
|
|
1299
|
+
stderrChunks.push(chunk);
|
|
1300
|
+
});
|
|
1301
|
+
child.on("error", (err) => {
|
|
1302
|
+
reject(err);
|
|
1303
|
+
});
|
|
1304
|
+
child.on("close", (code) => {
|
|
1305
|
+
if (code === 0) resolve$2();
|
|
1306
|
+
else {
|
|
1307
|
+
const stderr = Buffer.concat(stderrChunks).toString("utf8").trim();
|
|
1308
|
+
reject(new DoltInitError(args, code ?? -1, stderr));
|
|
1309
|
+
}
|
|
1310
|
+
});
|
|
1311
|
+
});
|
|
1312
|
+
}
|
|
1313
|
+
/**
|
|
1314
|
+
* Ensure that Dolt has a global user identity configured.
|
|
1315
|
+
* `dolt init` and `dolt commit` fail with "empty ident name not allowed"
|
|
1316
|
+
* when no identity exists. This function checks for an existing identity
|
|
1317
|
+
* and configures a default one if absent.
|
|
1318
|
+
*/
|
|
1319
|
+
async function ensureDoltIdentity() {
|
|
1320
|
+
const hasIdentity = await doltConfigGet("user.name");
|
|
1321
|
+
if (hasIdentity) return;
|
|
1322
|
+
await runDoltConfigSet("user.name", "substrate");
|
|
1323
|
+
await runDoltConfigSet("user.email", "substrate@localhost");
|
|
1324
|
+
}
|
|
1325
|
+
/**
|
|
1326
|
+
* Check if a Dolt global config key has a value set.
|
|
1327
|
+
*/
|
|
1328
|
+
async function doltConfigGet(key) {
|
|
1329
|
+
return new Promise((resolve$2) => {
|
|
1330
|
+
const child = spawn("dolt", [
|
|
1331
|
+
"config",
|
|
1332
|
+
"--global",
|
|
1333
|
+
"--get",
|
|
1334
|
+
key
|
|
1335
|
+
], { stdio: [
|
|
1336
|
+
"ignore",
|
|
1337
|
+
"ignore",
|
|
1338
|
+
"ignore"
|
|
1339
|
+
] });
|
|
1340
|
+
child.on("error", () => resolve$2(false));
|
|
1341
|
+
child.on("close", (code) => resolve$2(code === 0));
|
|
1342
|
+
});
|
|
1343
|
+
}
|
|
1344
|
+
/**
|
|
1345
|
+
* Set a Dolt global config value.
|
|
1346
|
+
*/
|
|
1347
|
+
async function runDoltConfigSet(key, value) {
|
|
1348
|
+
return new Promise((resolve$2, reject) => {
|
|
1349
|
+
const child = spawn("dolt", [
|
|
1350
|
+
"config",
|
|
1351
|
+
"--global",
|
|
1352
|
+
"--add",
|
|
1353
|
+
key,
|
|
1354
|
+
value
|
|
1355
|
+
], { stdio: [
|
|
1356
|
+
"ignore",
|
|
1357
|
+
"ignore",
|
|
1358
|
+
"pipe"
|
|
1359
|
+
] });
|
|
1360
|
+
const stderrChunks = [];
|
|
1361
|
+
child.stderr?.on("data", (chunk) => stderrChunks.push(chunk));
|
|
1362
|
+
child.on("error", reject);
|
|
1363
|
+
child.on("close", (code) => {
|
|
1364
|
+
if (code === 0) resolve$2();
|
|
1365
|
+
else {
|
|
1366
|
+
const stderr = Buffer.concat(stderrChunks).toString("utf8").trim();
|
|
1367
|
+
reject(new DoltInitError([
|
|
1368
|
+
"config",
|
|
1369
|
+
"--global",
|
|
1370
|
+
"--add",
|
|
1371
|
+
key,
|
|
1372
|
+
value
|
|
1373
|
+
], code ?? -1, stderr));
|
|
1374
|
+
}
|
|
1375
|
+
});
|
|
1376
|
+
});
|
|
1377
|
+
}
|
|
1378
|
+
/**
|
|
1379
|
+
* Initialize a Dolt repository for Substrate state storage.
|
|
1380
|
+
*
|
|
1381
|
+
* This function is idempotent: running it a second time on an already-
|
|
1382
|
+
* initialized repository is safe — `dolt init` is skipped, existing tables
|
|
1383
|
+
* are not re-created (IF NOT EXISTS guards), and the schema version row is
|
|
1384
|
+
* not duplicated (INSERT IGNORE).
|
|
1385
|
+
*
|
|
1386
|
+
* @param config - Initialization configuration.
|
|
1387
|
+
* @throws {DoltNotInstalled} If the `dolt` binary is not in PATH.
|
|
1388
|
+
* @throws {DoltInitError} If any Dolt CLI command fails.
|
|
1389
|
+
*/
|
|
1390
|
+
async function initializeDolt(config) {
|
|
1391
|
+
const statePath = config.statePath ?? join$1(config.projectRoot, ".substrate", "state");
|
|
1392
|
+
const schemaPath = config.schemaPath ?? fileURLToPath(new URL("./schema.sql", import.meta.url));
|
|
1393
|
+
await checkDoltInstalled();
|
|
1394
|
+
await mkdir(statePath, { recursive: true });
|
|
1395
|
+
await ensureDoltIdentity();
|
|
1396
|
+
const doltDir = join$1(statePath, ".dolt");
|
|
1397
|
+
let doltDirExists = false;
|
|
1398
|
+
try {
|
|
1399
|
+
await access(doltDir);
|
|
1400
|
+
doltDirExists = true;
|
|
1401
|
+
} catch {
|
|
1402
|
+
doltDirExists = false;
|
|
1403
|
+
}
|
|
1404
|
+
if (!doltDirExists) await runDoltCommand(["init"], statePath);
|
|
1405
|
+
await runDoltCommand([
|
|
1406
|
+
"sql",
|
|
1407
|
+
"-f",
|
|
1408
|
+
schemaPath
|
|
1409
|
+
], statePath);
|
|
1410
|
+
let hasCommits = false;
|
|
1411
|
+
try {
|
|
1412
|
+
await runDoltCommand(["log", "--oneline"], statePath);
|
|
1413
|
+
hasCommits = await doltLogHasCommits(statePath);
|
|
1414
|
+
} catch {
|
|
1415
|
+
hasCommits = false;
|
|
1416
|
+
}
|
|
1417
|
+
if (!hasCommits) {
|
|
1418
|
+
await runDoltCommand(["add", "-A"], statePath);
|
|
1419
|
+
await runDoltCommand([
|
|
1420
|
+
"commit",
|
|
1421
|
+
"-m",
|
|
1422
|
+
"Initialize substrate state schema v1"
|
|
1423
|
+
], statePath);
|
|
1424
|
+
}
|
|
1425
|
+
}
|
|
1426
|
+
/**
|
|
1427
|
+
* Returns `true` if there is at least one commit in the Dolt repo.
|
|
1428
|
+
*/
|
|
1429
|
+
async function doltLogHasCommits(cwd) {
|
|
1430
|
+
return new Promise((resolve$2) => {
|
|
1431
|
+
const stdoutChunks = [];
|
|
1432
|
+
const child = spawn("dolt", ["log", "--oneline"], {
|
|
1433
|
+
cwd,
|
|
1434
|
+
stdio: [
|
|
1435
|
+
"ignore",
|
|
1436
|
+
"pipe",
|
|
1437
|
+
"ignore"
|
|
1438
|
+
]
|
|
1439
|
+
});
|
|
1440
|
+
child.stdout?.on("data", (chunk) => {
|
|
1441
|
+
stdoutChunks.push(chunk);
|
|
1442
|
+
});
|
|
1443
|
+
child.on("error", () => resolve$2(false));
|
|
1444
|
+
child.on("close", (code) => {
|
|
1445
|
+
if (code !== 0) {
|
|
1446
|
+
resolve$2(false);
|
|
1447
|
+
return;
|
|
1448
|
+
}
|
|
1449
|
+
const output = Buffer.concat(stdoutChunks).toString("utf8").trim();
|
|
1450
|
+
resolve$2(output.length > 0);
|
|
1451
|
+
});
|
|
1452
|
+
});
|
|
1453
|
+
}
|
|
1454
|
+
|
|
1455
|
+
//#endregion
|
|
1456
|
+
//#region src/modules/stop-after/types.ts
|
|
1457
|
+
/**
|
|
1458
|
+
* Stop-After Gate Module — Types
|
|
1459
|
+
*
|
|
1460
|
+
* Defines the PhaseName type and all parameter/result types for the stop-after gate.
|
|
1461
|
+
* VALID_PHASES is the canonical source for pipeline phase names; auto.ts imports from here.
|
|
1462
|
+
*/
|
|
1463
|
+
/** Canonical pipeline phase names. This is the single source of truth for all phase lists. */
|
|
1464
|
+
const VALID_PHASES = [
|
|
1465
|
+
"research",
|
|
1466
|
+
"analysis",
|
|
1467
|
+
"planning",
|
|
1468
|
+
"solutioning",
|
|
1469
|
+
"implementation"
|
|
1470
|
+
];
|
|
1471
|
+
/**
|
|
1472
|
+
* Alias for VALID_PHASES retained for backward compatibility with existing imports.
|
|
1473
|
+
* @deprecated Use VALID_PHASES directly.
|
|
1474
|
+
*/
|
|
1475
|
+
const STOP_AFTER_VALID_PHASES = VALID_PHASES;
|
|
1476
|
+
|
|
1477
|
+
//#endregion
|
|
1478
|
+
//#region src/cli/commands/pipeline-shared.ts
|
|
1479
|
+
/**
|
|
1480
|
+
* Parse a DB timestamp string to a Date, correctly treating it as UTC.
|
|
1481
|
+
*
|
|
1482
|
+
* SQLite stores timestamps as "YYYY-MM-DD HH:MM:SS" without a timezone suffix.
|
|
1483
|
+
* JavaScript's Date constructor parses strings without a timezone suffix as
|
|
1484
|
+
* *local time*, which causes staleness/duration to be calculated incorrectly
|
|
1485
|
+
* on machines not in UTC.
|
|
1486
|
+
*
|
|
1487
|
+
* Fix: append 'Z' if the string has no timezone marker so it is always
|
|
1488
|
+
* parsed as UTC.
|
|
1489
|
+
*/
|
|
1490
|
+
function parseDbTimestampAsUtc(ts) {
|
|
1491
|
+
if (ts.endsWith("Z") || /[+-]\d{2}:\d{2}$/.test(ts)) return new Date(ts);
|
|
1492
|
+
return new Date(ts.replace(" ", "T") + "Z");
|
|
1493
|
+
}
|
|
1494
|
+
const __filename = fileURLToPath(import.meta.url);
|
|
1495
|
+
const __dirname = dirname(__filename);
|
|
1496
|
+
/**
|
|
1497
|
+
* Find the package root by walking up until we find package.json.
|
|
1498
|
+
* Works regardless of build output structure (tsdown bundles into
|
|
1499
|
+
* dist/cli/index.js, not dist/cli/commands/auto.js).
|
|
1500
|
+
*/
|
|
1501
|
+
function findPackageRoot(startDir) {
|
|
1502
|
+
let dir = startDir;
|
|
1503
|
+
while (dir !== dirname(dir)) {
|
|
1504
|
+
if (existsSync(join(dir, "package.json"))) return dir;
|
|
1505
|
+
dir = dirname(dir);
|
|
1506
|
+
}
|
|
1507
|
+
return startDir;
|
|
1508
|
+
}
|
|
1509
|
+
const PACKAGE_ROOT = join(__dirname, "..", "..", "..");
|
|
1510
|
+
/**
|
|
1511
|
+
* Resolve the absolute path to the bmad-method package's src/ directory.
|
|
1512
|
+
* Uses createRequire so it works in ESM without import.meta.resolve polyfills.
|
|
1513
|
+
* Returns null if bmad-method is not installed.
|
|
1514
|
+
*/
|
|
1515
|
+
function resolveBmadMethodSrcPath(fromDir = __dirname) {
|
|
1516
|
+
try {
|
|
1517
|
+
const require$1 = createRequire$1(join(fromDir, "synthetic.js"));
|
|
1518
|
+
const pkgJsonPath = require$1.resolve("bmad-method/package.json");
|
|
1519
|
+
return join(dirname(pkgJsonPath), "src");
|
|
1520
|
+
} catch {
|
|
1521
|
+
return null;
|
|
1522
|
+
}
|
|
1523
|
+
}
|
|
1524
|
+
/**
|
|
1525
|
+
* Read the version field from bmad-method's package.json.
|
|
1526
|
+
* Returns 'unknown' if not resolvable.
|
|
1527
|
+
*/
|
|
1528
|
+
function resolveBmadMethodVersion(fromDir = __dirname) {
|
|
1529
|
+
try {
|
|
1530
|
+
const require$1 = createRequire$1(join(fromDir, "synthetic.js"));
|
|
1531
|
+
const pkgJsonPath = require$1.resolve("bmad-method/package.json");
|
|
1532
|
+
const pkg = require$1(pkgJsonPath);
|
|
1533
|
+
return pkg.version ?? "unknown";
|
|
1534
|
+
} catch {
|
|
1535
|
+
return "unknown";
|
|
1536
|
+
}
|
|
1537
|
+
}
|
|
1538
|
+
/** BMAD baseline token total for full pipeline comparison (analysis+planning+solutioning+implementation) */
|
|
1539
|
+
const BMAD_BASELINE_TOKENS_FULL = 56800;
|
|
1540
|
+
/** BMAD baseline token total for create+dev+review comparison */
|
|
1541
|
+
const BMAD_BASELINE_TOKENS = 23800;
|
|
1542
|
+
/** Story key pattern: e.g. "10-1", "1-1a", "NEW-26", "E6" */
|
|
1543
|
+
const STORY_KEY_PATTERN$1 = /^[A-Za-z0-9]+(-[A-Za-z0-9]+)?$/;
|
|
1544
|
+
/**
|
|
1545
|
+
* Top-level keys in .claude/settings.json that substrate owns.
|
|
1546
|
+
* On init, these are set/updated unconditionally.
|
|
1547
|
+
* User-defined keys outside this set are never touched.
|
|
1548
|
+
*/
|
|
1549
|
+
const SUBSTRATE_OWNED_SETTINGS_KEYS = ["statusLine"];
|
|
1550
|
+
function getSubstrateDefaultSettings() {
|
|
1551
|
+
return { statusLine: {
|
|
1552
|
+
type: "command",
|
|
1553
|
+
command: "bash \"$CLAUDE_PROJECT_DIR\"/.claude/statusline.sh",
|
|
1554
|
+
padding: 0
|
|
1555
|
+
} };
|
|
1556
|
+
}
|
|
1557
|
+
/**
|
|
1558
|
+
* Format output according to the requested format.
|
|
1559
|
+
*/
|
|
1560
|
+
function formatOutput(data, format, success = true, errorMessage) {
|
|
1561
|
+
if (format === "json") {
|
|
1562
|
+
if (!success) return JSON.stringify({
|
|
1563
|
+
success: false,
|
|
1564
|
+
error: errorMessage ?? "Unknown error"
|
|
1565
|
+
});
|
|
1566
|
+
return JSON.stringify({
|
|
1567
|
+
success: true,
|
|
1568
|
+
data
|
|
1569
|
+
});
|
|
1570
|
+
}
|
|
1571
|
+
if (typeof data === "string") return data;
|
|
1572
|
+
return JSON.stringify(data, null, 2);
|
|
1573
|
+
}
|
|
1574
|
+
/**
|
|
1575
|
+
* Build a human-readable token telemetry display from summary rows.
|
|
1576
|
+
*/
|
|
1577
|
+
function formatTokenTelemetry(summary, baselineTokens = BMAD_BASELINE_TOKENS) {
|
|
1578
|
+
if (summary.length === 0) return "No token usage recorded.";
|
|
1579
|
+
let totalInput = 0;
|
|
1580
|
+
let totalOutput = 0;
|
|
1581
|
+
let totalCost = 0;
|
|
1582
|
+
const lines = ["Pipeline Token Usage:"];
|
|
1583
|
+
for (const row of summary) {
|
|
1584
|
+
totalInput += row.total_input_tokens;
|
|
1585
|
+
totalOutput += row.total_output_tokens;
|
|
1586
|
+
totalCost += row.total_cost_usd;
|
|
1587
|
+
const cost = `$${row.total_cost_usd.toFixed(4)}`;
|
|
1588
|
+
lines.push(` ${row.phase} (${row.agent}): ${row.total_input_tokens.toLocaleString()} input / ${row.total_output_tokens.toLocaleString()} output (${cost})`);
|
|
1589
|
+
}
|
|
1590
|
+
lines.push(" " + "─".repeat(55));
|
|
1591
|
+
const costDisplay = `$${totalCost.toFixed(4)}`;
|
|
1592
|
+
lines.push(` Total: ${totalInput.toLocaleString()} input / ${totalOutput.toLocaleString()} output (${costDisplay})`);
|
|
1593
|
+
const totalTokens = totalInput + totalOutput;
|
|
1594
|
+
const savingsPct = baselineTokens > 0 ? Math.round((baselineTokens - totalTokens) / baselineTokens * 100) : 0;
|
|
1595
|
+
const savingsLabel = savingsPct >= 0 ? `Savings: ${savingsPct}%` : `Overhead: +${Math.abs(savingsPct)}%`;
|
|
1596
|
+
lines.push(` BMAD Baseline: ${baselineTokens.toLocaleString()} tokens → ${savingsLabel}`);
|
|
1597
|
+
return lines.join("\n");
|
|
1598
|
+
}
|
|
1599
|
+
/**
|
|
1600
|
+
* Validate a story key has the expected format: <epic>-<story> (e.g., "10-1").
|
|
1601
|
+
*/
|
|
1602
|
+
function validateStoryKey(key) {
|
|
1603
|
+
return STORY_KEY_PATTERN$1.test(key);
|
|
1604
|
+
}
|
|
1605
|
+
/**
|
|
1606
|
+
* Build the AC5 JSON status schema for a pipeline run.
|
|
1607
|
+
*/
|
|
1608
|
+
function buildPipelineStatusOutput(run, tokenSummary, decisionsCount, storiesCount) {
|
|
1609
|
+
const phases = {};
|
|
1610
|
+
const phaseTokenMap = {};
|
|
1611
|
+
for (const row of tokenSummary) {
|
|
1612
|
+
if (!phaseTokenMap[row.phase]) phaseTokenMap[row.phase] = {
|
|
1613
|
+
input: 0,
|
|
1614
|
+
output: 0
|
|
1615
|
+
};
|
|
1616
|
+
phaseTokenMap[row.phase].input += row.total_input_tokens;
|
|
1617
|
+
phaseTokenMap[row.phase].output += row.total_output_tokens;
|
|
1618
|
+
}
|
|
1619
|
+
let phaseHistory = [];
|
|
1620
|
+
try {
|
|
1621
|
+
if (run.config_json) {
|
|
1622
|
+
const config = JSON.parse(run.config_json);
|
|
1623
|
+
phaseHistory = config.phaseHistory ?? [];
|
|
1624
|
+
}
|
|
1625
|
+
} catch {}
|
|
1626
|
+
const currentPhase = run.current_phase ?? null;
|
|
1627
|
+
for (const phaseName of VALID_PHASES) {
|
|
1628
|
+
const historyEntry = phaseHistory.find((h) => h.phase === phaseName);
|
|
1629
|
+
const tokenUsage = phaseTokenMap[phaseName] ?? {
|
|
1630
|
+
input: 0,
|
|
1631
|
+
output: 0
|
|
1632
|
+
};
|
|
1633
|
+
if (historyEntry?.completedAt) {
|
|
1634
|
+
phases[phaseName] = {
|
|
1635
|
+
status: "complete",
|
|
1636
|
+
completed_at: historyEntry.completedAt,
|
|
1637
|
+
token_usage: tokenUsage
|
|
1638
|
+
};
|
|
1639
|
+
if (historyEntry.startedAt) phases[phaseName].started_at = historyEntry.startedAt;
|
|
1640
|
+
} else if (phaseName === currentPhase || historyEntry?.startedAt) phases[phaseName] = {
|
|
1641
|
+
status: "running",
|
|
1642
|
+
started_at: historyEntry?.startedAt,
|
|
1643
|
+
token_usage: tokenUsage
|
|
1644
|
+
};
|
|
1645
|
+
else phases[phaseName] = { status: "pending" };
|
|
1646
|
+
}
|
|
1647
|
+
let totalInput = 0;
|
|
1648
|
+
let totalOutput = 0;
|
|
1649
|
+
let totalCost = 0;
|
|
1650
|
+
for (const row of tokenSummary) {
|
|
1651
|
+
totalInput += row.total_input_tokens;
|
|
1652
|
+
totalOutput += row.total_output_tokens;
|
|
1653
|
+
totalCost += row.total_cost_usd;
|
|
1654
|
+
}
|
|
1655
|
+
let activeDispatches = 0;
|
|
1656
|
+
let storiesSummary;
|
|
1657
|
+
try {
|
|
1658
|
+
if (run.token_usage_json) {
|
|
1659
|
+
const state = JSON.parse(run.token_usage_json);
|
|
1660
|
+
if (state.stories && Object.keys(state.stories).length > 0) {
|
|
1661
|
+
const now = Date.now();
|
|
1662
|
+
let completed = 0;
|
|
1663
|
+
let inProgress = 0;
|
|
1664
|
+
let escalated = 0;
|
|
1665
|
+
let pending = 0;
|
|
1666
|
+
const details = {};
|
|
1667
|
+
for (const [key, s] of Object.entries(state.stories)) {
|
|
1668
|
+
const phase = s.phase ?? "PENDING";
|
|
1669
|
+
if (phase !== "PENDING" && phase !== "COMPLETE" && phase !== "ESCALATED") activeDispatches++;
|
|
1670
|
+
if (phase === "COMPLETE") completed++;
|
|
1671
|
+
else if (phase === "ESCALATED") escalated++;
|
|
1672
|
+
else if (phase === "PENDING") pending++;
|
|
1673
|
+
else inProgress++;
|
|
1674
|
+
const elapsed = s.startedAt != null ? Math.max(0, Math.round((now - new Date(s.startedAt).getTime()) / 1e3)) : 0;
|
|
1675
|
+
details[key] = {
|
|
1676
|
+
phase,
|
|
1677
|
+
review_cycles: s.reviewCycles ?? 0,
|
|
1678
|
+
elapsed_seconds: elapsed
|
|
1679
|
+
};
|
|
1680
|
+
}
|
|
1681
|
+
storiesSummary = {
|
|
1682
|
+
completed,
|
|
1683
|
+
in_progress: inProgress,
|
|
1684
|
+
escalated,
|
|
1685
|
+
pending,
|
|
1686
|
+
details
|
|
1687
|
+
};
|
|
1688
|
+
}
|
|
1689
|
+
}
|
|
1690
|
+
} catch {}
|
|
1691
|
+
const derivedStoriesCount = storiesSummary !== void 0 ? storiesSummary.completed + storiesSummary.in_progress + storiesSummary.escalated + storiesSummary.pending : storiesCount;
|
|
1692
|
+
const derivedStoriesCompleted = storiesSummary !== void 0 ? storiesSummary.completed : 0;
|
|
1693
|
+
return {
|
|
1694
|
+
run_id: run.id,
|
|
1695
|
+
current_phase: currentPhase,
|
|
1696
|
+
phases,
|
|
1697
|
+
total_tokens: {
|
|
1698
|
+
input: totalInput,
|
|
1699
|
+
output: totalOutput,
|
|
1700
|
+
cost_usd: totalCost
|
|
1701
|
+
},
|
|
1702
|
+
decisions_count: decisionsCount,
|
|
1703
|
+
stories_count: derivedStoriesCount,
|
|
1704
|
+
stories_completed: derivedStoriesCompleted,
|
|
1705
|
+
last_activity: run.updated_at ?? "",
|
|
1706
|
+
staleness_seconds: Math.round((Date.now() - parseDbTimestampAsUtc(run.updated_at ?? "").getTime()) / 1e3),
|
|
1707
|
+
last_event_ts: run.updated_at ?? "",
|
|
1708
|
+
active_dispatches: activeDispatches,
|
|
1709
|
+
...storiesSummary !== void 0 ? { stories: storiesSummary } : {}
|
|
1710
|
+
};
|
|
1711
|
+
}
|
|
1712
|
+
/**
|
|
1713
|
+
* Format a pipeline status summary in human-readable format.
|
|
1714
|
+
*/
|
|
1715
|
+
function formatPipelineStatusHuman(status) {
|
|
1716
|
+
const lines = [];
|
|
1717
|
+
lines.push(`Pipeline Run: ${status.run_id}`);
|
|
1718
|
+
lines.push(` Current Phase: ${status.current_phase ?? "N/A"}`);
|
|
1719
|
+
lines.push("");
|
|
1720
|
+
lines.push(" Phase Status:");
|
|
1721
|
+
const statusIcons = {
|
|
1722
|
+
complete: "[DONE]",
|
|
1723
|
+
running: "[RUN] ",
|
|
1724
|
+
pending: "[ ]"
|
|
1725
|
+
};
|
|
1726
|
+
for (const [phaseName, phaseInfo] of Object.entries(status.phases)) {
|
|
1727
|
+
const icon = statusIcons[phaseInfo.status] ?? "[?]";
|
|
1728
|
+
let line = ` ${icon} ${phaseName}`;
|
|
1729
|
+
if (phaseInfo.status === "complete" && phaseInfo.completed_at) line += ` (completed: ${phaseInfo.completed_at})`;
|
|
1730
|
+
if (phaseInfo.token_usage && (phaseInfo.token_usage.input > 0 || phaseInfo.token_usage.output > 0)) line += ` — tokens: ${phaseInfo.token_usage.input.toLocaleString()} in / ${phaseInfo.token_usage.output.toLocaleString()} out`;
|
|
1731
|
+
lines.push(line);
|
|
1732
|
+
}
|
|
1733
|
+
lines.push("");
|
|
1734
|
+
lines.push(` Total Tokens: ${(status.total_tokens.input + status.total_tokens.output).toLocaleString()} (in: ${status.total_tokens.input.toLocaleString()}, out: ${status.total_tokens.output.toLocaleString()})`);
|
|
1735
|
+
lines.push(` Total Cost: $${status.total_tokens.cost_usd.toFixed(4)}`);
|
|
1736
|
+
lines.push(` Decisions: ${status.decisions_count}`);
|
|
1737
|
+
lines.push(` Stories: ${status.stories_count}`);
|
|
1738
|
+
if (status.stories !== void 0 && Object.keys(status.stories.details).length > 0) {
|
|
1739
|
+
lines.push("");
|
|
1740
|
+
lines.push(" Sprint Progress:");
|
|
1741
|
+
lines.push(" " + "─".repeat(68));
|
|
1742
|
+
lines.push(` ${"STORY".padEnd(10)} ${"PHASE".padEnd(24)} ${"CYCLES".padEnd(8)} ELAPSED`);
|
|
1743
|
+
lines.push(" " + "─".repeat(68));
|
|
1744
|
+
for (const [key, detail] of Object.entries(status.stories.details)) {
|
|
1745
|
+
const elapsed = detail.elapsed_seconds > 0 ? `${detail.elapsed_seconds}s` : "-";
|
|
1746
|
+
lines.push(` ${key.padEnd(10)} ${detail.phase.padEnd(24)} ${String(detail.review_cycles).padEnd(8)} ${elapsed}`);
|
|
1747
|
+
}
|
|
1748
|
+
lines.push(" " + "─".repeat(68));
|
|
1749
|
+
lines.push(` Completed: ${status.stories.completed} In Progress: ${status.stories.in_progress} Escalated: ${status.stories.escalated} Pending: ${status.stories.pending}`);
|
|
1750
|
+
}
|
|
1751
|
+
return lines.join("\n");
|
|
1752
|
+
}
|
|
1753
|
+
/**
|
|
1754
|
+
* Format a complete pipeline run summary.
|
|
1755
|
+
*/
|
|
1756
|
+
function formatPipelineSummary(run, tokenSummary, decisionsCount, storiesCount, durationMs, format) {
|
|
1757
|
+
let totalInput = 0;
|
|
1758
|
+
let totalOutput = 0;
|
|
1759
|
+
let totalCost = 0;
|
|
1760
|
+
for (const row of tokenSummary) {
|
|
1761
|
+
totalInput += row.total_input_tokens;
|
|
1762
|
+
totalOutput += row.total_output_tokens;
|
|
1763
|
+
totalCost += row.total_cost_usd;
|
|
1764
|
+
}
|
|
1765
|
+
const totalTokens = totalInput + totalOutput;
|
|
1766
|
+
const savingsPct = BMAD_BASELINE_TOKENS_FULL > 0 ? Math.round((BMAD_BASELINE_TOKENS_FULL - totalTokens) / BMAD_BASELINE_TOKENS_FULL * 100) : 0;
|
|
1767
|
+
const durationSec = Math.round(durationMs / 1e3);
|
|
1768
|
+
if (format === "json") return JSON.stringify({
|
|
1769
|
+
run_id: run.id,
|
|
1770
|
+
status: run.status,
|
|
1771
|
+
duration_ms: durationMs,
|
|
1772
|
+
phases_completed: VALID_PHASES.length,
|
|
1773
|
+
decisions_count: decisionsCount,
|
|
1774
|
+
stories_count: storiesCount,
|
|
1775
|
+
token_usage: {
|
|
1776
|
+
input: totalInput,
|
|
1777
|
+
output: totalOutput,
|
|
1778
|
+
total: totalTokens,
|
|
1779
|
+
cost_usd: totalCost,
|
|
1780
|
+
bmad_baseline: BMAD_BASELINE_TOKENS_FULL,
|
|
1781
|
+
savings_pct: savingsPct
|
|
1782
|
+
}
|
|
1783
|
+
});
|
|
1784
|
+
const lines = [
|
|
1785
|
+
"┌─────────────────────────────────────────────────────┐",
|
|
1786
|
+
"│ Pipeline Run Summary │",
|
|
1787
|
+
"└─────────────────────────────────────────────────────┘",
|
|
1788
|
+
` Run ID: ${run.id}`,
|
|
1789
|
+
` Status: ${run.status}`,
|
|
1790
|
+
` Duration: ${durationSec}s`,
|
|
1791
|
+
` Phases Complete: ${VALID_PHASES.length}`,
|
|
1792
|
+
` Decisions: ${decisionsCount}`,
|
|
1793
|
+
` Stories: ${storiesCount}`,
|
|
1794
|
+
"",
|
|
1795
|
+
` Token Usage: ${totalTokens.toLocaleString()} total`,
|
|
1796
|
+
` Input: ${totalInput.toLocaleString()}`,
|
|
1797
|
+
` Output: ${totalOutput.toLocaleString()}`,
|
|
1798
|
+
` Cost: $${totalCost.toFixed(4)}`,
|
|
1799
|
+
"",
|
|
1800
|
+
` BMAD Baseline: ${BMAD_BASELINE_TOKENS_FULL.toLocaleString()} tokens`,
|
|
1801
|
+
` Token Savings: ${savingsPct >= 0 ? savingsPct + "%" : "N/A (overhead)"}`
|
|
1802
|
+
];
|
|
1803
|
+
return lines.join("\n");
|
|
1804
|
+
}
|
|
1805
|
+
|
|
1806
|
+
//#endregion
|
|
1807
|
+
//#region src/modules/work-graph/cycle-detector.ts
|
|
1808
|
+
/**
|
|
1809
|
+
* detectCycles — DFS-based cycle detection for story dependency graphs.
|
|
1810
|
+
*
|
|
1811
|
+
* Story 31-7: Cycle Detection in Work Graph
|
|
1812
|
+
*
|
|
1813
|
+
* Pure function; no database or I/O dependencies.
|
|
1814
|
+
*/
|
|
1815
|
+
/**
|
|
1816
|
+
* Detect cycles in a directed dependency graph represented as an edge list.
|
|
1817
|
+
*
|
|
1818
|
+
* Each edge `{ story_key, depends_on }` means story_key depends on depends_on
|
|
1819
|
+
* (i.e. story_key → depends_on is the directed edge we traverse).
|
|
1820
|
+
*
|
|
1821
|
+
* Uses iterative DFS with an explicit stack to avoid call-stack overflows on
|
|
1822
|
+
* large graphs, but also supports a nested recursive helper for cycle path
|
|
1823
|
+
* reconstruction.
|
|
1824
|
+
*
|
|
1825
|
+
* @param edges - List of dependency edges to check.
|
|
1826
|
+
* @returns `null` if the graph is acyclic (safe to persist), or a `string[]`
|
|
1827
|
+
* containing the cycle path with the first and last element being the same
|
|
1828
|
+
* story key (e.g. `['A', 'B', 'A']`).
|
|
1829
|
+
*/
|
|
1830
|
+
function detectCycles(edges) {
|
|
1831
|
+
const adj = new Map();
|
|
1832
|
+
for (const { story_key, depends_on } of edges) {
|
|
1833
|
+
if (!adj.has(story_key)) adj.set(story_key, []);
|
|
1834
|
+
adj.get(story_key).push(depends_on);
|
|
1835
|
+
}
|
|
1836
|
+
const visited = new Set();
|
|
1837
|
+
const visiting = new Set();
|
|
1838
|
+
const path$1 = [];
|
|
1839
|
+
function dfs(node) {
|
|
1840
|
+
if (visiting.has(node)) {
|
|
1841
|
+
const cycleStart = path$1.indexOf(node);
|
|
1842
|
+
return [...path$1.slice(cycleStart), node];
|
|
1843
|
+
}
|
|
1844
|
+
if (visited.has(node)) return null;
|
|
1845
|
+
visiting.add(node);
|
|
1846
|
+
path$1.push(node);
|
|
1847
|
+
for (const neighbor of adj.get(node) ?? []) {
|
|
1848
|
+
const cycle = dfs(neighbor);
|
|
1849
|
+
if (cycle !== null) return cycle;
|
|
1850
|
+
}
|
|
1851
|
+
path$1.pop();
|
|
1852
|
+
visiting.delete(node);
|
|
1853
|
+
visited.add(node);
|
|
1854
|
+
return null;
|
|
1855
|
+
}
|
|
1856
|
+
const allNodes = new Set([...edges.map((e) => e.story_key), ...edges.map((e) => e.depends_on)]);
|
|
1857
|
+
for (const node of allNodes) if (!visited.has(node)) {
|
|
1858
|
+
const cycle = dfs(node);
|
|
1859
|
+
if (cycle !== null) return cycle;
|
|
1860
|
+
}
|
|
1861
|
+
return null;
|
|
1862
|
+
}
|
|
1863
|
+
|
|
1864
|
+
//#endregion
|
|
1865
|
+
//#region src/modules/state/work-graph-repository.ts
|
|
1866
|
+
var WorkGraphRepository = class {
|
|
1867
|
+
constructor(db) {
|
|
1868
|
+
this.db = db;
|
|
1869
|
+
}
|
|
1870
|
+
/**
|
|
1871
|
+
* Insert or replace a work-graph story node.
|
|
1872
|
+
* Uses DELETE + INSERT so it works on InMemoryDatabaseAdapter (which does
|
|
1873
|
+
* not support ON DUPLICATE KEY UPDATE).
|
|
1874
|
+
*/
|
|
1875
|
+
async upsertStory(story) {
|
|
1876
|
+
await this.db.query(`DELETE FROM wg_stories WHERE story_key = ?`, [story.story_key]);
|
|
1877
|
+
await this.db.query(`INSERT INTO wg_stories (story_key, epic, title, status, spec_path, created_at, updated_at, completed_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?)`, [
|
|
1878
|
+
story.story_key,
|
|
1879
|
+
story.epic,
|
|
1880
|
+
story.title ?? null,
|
|
1881
|
+
story.status,
|
|
1882
|
+
story.spec_path ?? null,
|
|
1883
|
+
story.created_at ?? null,
|
|
1884
|
+
story.updated_at ?? null,
|
|
1885
|
+
story.completed_at ?? null
|
|
1886
|
+
]);
|
|
1887
|
+
}
|
|
1888
|
+
/**
|
|
1889
|
+
* Insert a dependency edge. Idempotent — if a row with the same
|
|
1890
|
+
* (story_key, depends_on) already exists it is silently skipped.
|
|
1891
|
+
*/
|
|
1892
|
+
async addDependency(dep) {
|
|
1893
|
+
const existing = await this.db.query(`SELECT story_key FROM story_dependencies WHERE story_key = ? AND depends_on = ?`, [dep.story_key, dep.depends_on]);
|
|
1894
|
+
if (existing.length > 0) return;
|
|
1895
|
+
await this.db.query(`INSERT INTO story_dependencies (story_key, depends_on, dependency_type, source, created_at) VALUES (?, ?, ?, ?, ?)`, [
|
|
1896
|
+
dep.story_key,
|
|
1897
|
+
dep.depends_on,
|
|
1898
|
+
dep.dependency_type,
|
|
1899
|
+
dep.source,
|
|
1900
|
+
dep.created_at ?? null
|
|
1901
|
+
]);
|
|
1902
|
+
}
|
|
1903
|
+
/**
|
|
1904
|
+
* Persist contract-based dependency edges to `story_dependencies` as
|
|
1905
|
+
* best-effort, idempotent writes.
|
|
1906
|
+
*
|
|
1907
|
+
* - edges where `reason` does NOT start with `'dual export:'` are persisted
|
|
1908
|
+
* as `dependency_type = 'blocks'` (hard prerequisites).
|
|
1909
|
+
* - edges where `reason` starts with `'dual export:'` are persisted as
|
|
1910
|
+
* `dependency_type = 'informs'` (serialization hints, not hard gates).
|
|
1911
|
+
*
|
|
1912
|
+
* Idempotency is delegated to `addDependency()`, which skips the INSERT if
|
|
1913
|
+
* a row with the same `(story_key, depends_on)` already exists.
|
|
1914
|
+
*
|
|
1915
|
+
* @param edges - Readonly list of contract dependency edges to persist.
|
|
1916
|
+
*/
|
|
1917
|
+
async addContractDependencies(edges) {
|
|
1918
|
+
if (edges.length === 0) return;
|
|
1919
|
+
for (const edge of edges) {
|
|
1920
|
+
const dependency_type = edge.reason?.startsWith("dual export:") ? "informs" : "blocks";
|
|
1921
|
+
await this.addDependency({
|
|
1922
|
+
story_key: edge.to,
|
|
1923
|
+
depends_on: edge.from,
|
|
1924
|
+
dependency_type,
|
|
1925
|
+
source: "contract",
|
|
1926
|
+
created_at: new Date().toISOString()
|
|
1927
|
+
});
|
|
1928
|
+
}
|
|
1929
|
+
}
|
|
1930
|
+
/**
|
|
1931
|
+
* Return all work-graph stories, optionally filtered by epic and/or status.
|
|
1932
|
+
*/
|
|
1933
|
+
async listStories(filter) {
|
|
1934
|
+
if (!filter || !filter.epic && !filter.status) return this.db.query(`SELECT * FROM wg_stories`);
|
|
1935
|
+
const conditions = [];
|
|
1936
|
+
const params = [];
|
|
1937
|
+
if (filter.epic) {
|
|
1938
|
+
conditions.push(`epic = ?`);
|
|
1939
|
+
params.push(filter.epic);
|
|
1940
|
+
}
|
|
1941
|
+
if (filter.status) {
|
|
1942
|
+
conditions.push(`status = ?`);
|
|
1943
|
+
params.push(filter.status);
|
|
1944
|
+
}
|
|
1945
|
+
const where = conditions.join(" AND ");
|
|
1946
|
+
return this.db.query(`SELECT * FROM wg_stories WHERE ${where}`, params);
|
|
1947
|
+
}
|
|
1948
|
+
/**
|
|
1949
|
+
* Update the `status` (and optionally `completed_at`) of an existing
|
|
1950
|
+
* work-graph story.
|
|
1951
|
+
*
|
|
1952
|
+
* This is a read-modify-write operation: SELECT existing row → build
|
|
1953
|
+
* updated WgStory → upsertStory(). If no row exists for `storyKey` the
|
|
1954
|
+
* call is a no-op (AC4).
|
|
1955
|
+
*
|
|
1956
|
+
* @param storyKey - Story identifier, e.g. "31-4"
|
|
1957
|
+
* @param status - Target WgStoryStatus value
|
|
1958
|
+
* @param opts - Optional `completedAt` ISO string for terminal phases
|
|
1959
|
+
*/
|
|
1960
|
+
async updateStoryStatus(storyKey, status, opts) {
|
|
1961
|
+
const rows = await this.db.query(`SELECT * FROM wg_stories WHERE story_key = ?`, [storyKey]);
|
|
1962
|
+
if (rows.length === 0) return;
|
|
1963
|
+
const existing = rows[0];
|
|
1964
|
+
const now = new Date().toISOString();
|
|
1965
|
+
const isTerminal = status === "complete" || status === "escalated";
|
|
1966
|
+
const updated = {
|
|
1967
|
+
...existing,
|
|
1968
|
+
status,
|
|
1969
|
+
updated_at: now,
|
|
1970
|
+
completed_at: isTerminal ? opts?.completedAt ?? now : existing.completed_at
|
|
1971
|
+
};
|
|
1972
|
+
await this.upsertStory(updated);
|
|
1973
|
+
}
|
|
1974
|
+
/**
|
|
1975
|
+
* Return stories that are eligible for dispatch.
|
|
1976
|
+
*
|
|
1977
|
+
* A story is ready when:
|
|
1978
|
+
* 1. Its status is 'planned' or 'ready', AND
|
|
1979
|
+
* 2. It has no 'blocks' dependency whose blocking story is not 'complete'.
|
|
1980
|
+
*
|
|
1981
|
+
* Soft ('informs') dependencies never block dispatch.
|
|
1982
|
+
*
|
|
1983
|
+
* This is implemented programmatically rather than via the `ready_stories`
|
|
1984
|
+
* VIEW so that the InMemoryDatabaseAdapter can handle it without VIEW support.
|
|
1985
|
+
*/
|
|
1986
|
+
async getReadyStories() {
|
|
1987
|
+
const allStories = await this.db.query(`SELECT * FROM wg_stories`);
|
|
1988
|
+
const candidates = allStories.filter((s) => s.status === "planned" || s.status === "ready");
|
|
1989
|
+
if (candidates.length === 0) return [];
|
|
1990
|
+
const deps = await this.db.query(`SELECT story_key, depends_on FROM story_dependencies WHERE dependency_type = 'blocks'`);
|
|
1991
|
+
if (deps.length === 0) return candidates;
|
|
1992
|
+
const blockerStatus = new Map(allStories.map((s) => [s.story_key, s.status]));
|
|
1993
|
+
const depsMap = new Map();
|
|
1994
|
+
for (const d of deps) {
|
|
1995
|
+
if (!depsMap.has(d.story_key)) depsMap.set(d.story_key, []);
|
|
1996
|
+
depsMap.get(d.story_key).push(d.depends_on);
|
|
1997
|
+
}
|
|
1998
|
+
return candidates.filter((s) => {
|
|
1999
|
+
const blocking = depsMap.get(s.story_key) ?? [];
|
|
2000
|
+
return blocking.every((dep) => blockerStatus.get(dep) === "complete");
|
|
2001
|
+
});
|
|
2002
|
+
}
|
|
2003
|
+
/**
|
|
2004
|
+
* Return stories that are planned/ready but cannot be dispatched because
|
|
2005
|
+
* at least one hard-blocking ('blocks') dependency is not yet complete.
|
|
2006
|
+
*
|
|
2007
|
+
* For each blocked story, the returned object includes the full WgStory
|
|
2008
|
+
* record plus the list of unsatisfied blockers (key, title, status).
|
|
2009
|
+
*
|
|
2010
|
+
* Soft ('informs') dependencies are ignored here, matching getReadyStories().
|
|
2011
|
+
*/
|
|
2012
|
+
/**
|
|
2013
|
+
* Query the database for all 'blocks' dependency rows and run DFS cycle
|
|
2014
|
+
* detection over them.
|
|
2015
|
+
*
|
|
2016
|
+
* Returns an empty array if no cycle is found (consistent with other
|
|
2017
|
+
* repository methods that return empty arrays rather than null).
|
|
2018
|
+
*
|
|
2019
|
+
* Only 'blocks' deps are checked — soft 'informs' deps cannot cause
|
|
2020
|
+
* dispatch deadlocks (AC5).
|
|
2021
|
+
*/
|
|
2022
|
+
async detectCycles() {
|
|
2023
|
+
const rows = await this.db.query(`SELECT story_key, depends_on FROM story_dependencies WHERE dependency_type = 'blocks'`);
|
|
2024
|
+
const cycle = detectCycles(rows);
|
|
2025
|
+
return cycle ?? [];
|
|
2026
|
+
}
|
|
2027
|
+
async getBlockedStories() {
|
|
2028
|
+
const allStories = await this.db.query(`SELECT * FROM wg_stories`);
|
|
2029
|
+
const candidates = allStories.filter((s) => s.status === "planned" || s.status === "ready");
|
|
2030
|
+
if (candidates.length === 0) return [];
|
|
2031
|
+
const deps = await this.db.query(`SELECT story_key, depends_on FROM story_dependencies WHERE dependency_type = 'blocks'`);
|
|
2032
|
+
if (deps.length === 0) return [];
|
|
2033
|
+
const statusMap = new Map(allStories.map((s) => [s.story_key, s]));
|
|
2034
|
+
const depsMap = new Map();
|
|
2035
|
+
for (const d of deps) {
|
|
2036
|
+
if (!depsMap.has(d.story_key)) depsMap.set(d.story_key, []);
|
|
2037
|
+
depsMap.get(d.story_key).push(d.depends_on);
|
|
2038
|
+
}
|
|
2039
|
+
const result = [];
|
|
2040
|
+
for (const story of candidates) {
|
|
2041
|
+
const blockerKeys = depsMap.get(story.story_key) ?? [];
|
|
2042
|
+
const unsatisfied = blockerKeys.filter((key) => statusMap.get(key)?.status !== "complete").map((key) => {
|
|
2043
|
+
const s = statusMap.get(key);
|
|
2044
|
+
return {
|
|
2045
|
+
key,
|
|
2046
|
+
title: s?.title ?? key,
|
|
2047
|
+
status: s?.status ?? "unknown"
|
|
2048
|
+
};
|
|
2049
|
+
});
|
|
2050
|
+
if (unsatisfied.length > 0) result.push({
|
|
2051
|
+
story,
|
|
2052
|
+
blockers: unsatisfied
|
|
2053
|
+
});
|
|
2054
|
+
}
|
|
2055
|
+
return result;
|
|
2056
|
+
}
|
|
2057
|
+
};
|
|
2058
|
+
|
|
2059
|
+
//#endregion
|
|
2060
|
+
//#region src/modules/state/file-store.ts
|
|
2061
|
+
/**
|
|
2062
|
+
* In-memory / file-backed StateStore implementation.
|
|
2063
|
+
*
|
|
2064
|
+
* Suitable for CI environments and testing where orchestrator state is
|
|
2065
|
+
* ephemeral. Use DoltStateStore for branch-per-story isolation and versioned
|
|
2066
|
+
* history in production.
|
|
2067
|
+
*/
|
|
2068
|
+
var FileStateStore = class {
|
|
2069
|
+
_basePath;
|
|
2070
|
+
_stories = new Map();
|
|
2071
|
+
_metrics = [];
|
|
2072
|
+
_contracts = new Map();
|
|
2073
|
+
_contractVerifications = new Map();
|
|
2074
|
+
/** Key-value metrics store: outer key = runId, inner key = metric key */
|
|
2075
|
+
_kvMetrics = new Map();
|
|
2076
|
+
constructor(options = {}) {
|
|
2077
|
+
this._basePath = options.basePath;
|
|
2078
|
+
}
|
|
2079
|
+
async initialize() {}
|
|
2080
|
+
async close() {}
|
|
2081
|
+
async getStoryState(storyKey) {
|
|
2082
|
+
return this._stories.get(storyKey);
|
|
2083
|
+
}
|
|
2084
|
+
async setStoryState(storyKey, state) {
|
|
2085
|
+
this._stories.set(storyKey, {
|
|
2086
|
+
...state,
|
|
2087
|
+
storyKey
|
|
2088
|
+
});
|
|
2089
|
+
}
|
|
2090
|
+
async queryStories(filter) {
|
|
2091
|
+
const all = Array.from(this._stories.values());
|
|
2092
|
+
return all.filter((record) => {
|
|
2093
|
+
if (filter.phase !== void 0) {
|
|
2094
|
+
const phases = Array.isArray(filter.phase) ? filter.phase : [filter.phase];
|
|
2095
|
+
if (!phases.includes(record.phase)) return false;
|
|
2096
|
+
}
|
|
2097
|
+
if (filter.sprint !== void 0 && record.sprint !== filter.sprint) return false;
|
|
2098
|
+
if (filter.storyKey !== void 0 && record.storyKey !== filter.storyKey) return false;
|
|
2099
|
+
return true;
|
|
2100
|
+
});
|
|
2101
|
+
}
|
|
2102
|
+
async recordMetric(metric) {
|
|
2103
|
+
const record = {
|
|
2104
|
+
...metric,
|
|
2105
|
+
recordedAt: metric.recordedAt ?? new Date().toISOString()
|
|
2106
|
+
};
|
|
2107
|
+
this._metrics.push(record);
|
|
2108
|
+
}
|
|
2109
|
+
async queryMetrics(filter) {
|
|
2110
|
+
const storyKey = filter.storyKey ?? filter.story_key;
|
|
2111
|
+
const taskType = filter.taskType ?? filter.task_type;
|
|
2112
|
+
return this._metrics.filter((m) => {
|
|
2113
|
+
if (storyKey !== void 0 && m.storyKey !== storyKey) return false;
|
|
2114
|
+
if (taskType !== void 0 && m.taskType !== taskType) return false;
|
|
2115
|
+
if (filter.sprint !== void 0 && m.sprint !== filter.sprint) return false;
|
|
2116
|
+
if (filter.dateFrom !== void 0 && m.recordedAt !== void 0 && m.recordedAt < filter.dateFrom) return false;
|
|
2117
|
+
if (filter.dateTo !== void 0 && m.recordedAt !== void 0 && m.recordedAt > filter.dateTo) return false;
|
|
2118
|
+
if (filter.since !== void 0 && m.recordedAt !== void 0 && m.recordedAt < filter.since) return false;
|
|
2119
|
+
return true;
|
|
2120
|
+
});
|
|
2121
|
+
}
|
|
2122
|
+
/**
|
|
2123
|
+
* Persist an arbitrary key-value metric for a run.
|
|
2124
|
+
* Stored in memory AND written to `{basePath}/kv-metrics.json` when basePath is set.
|
|
2125
|
+
*/
|
|
2126
|
+
async setMetric(runId, key, value) {
|
|
2127
|
+
let runMap = this._kvMetrics.get(runId);
|
|
2128
|
+
if (runMap === void 0) {
|
|
2129
|
+
runMap = new Map();
|
|
2130
|
+
this._kvMetrics.set(runId, runMap);
|
|
2131
|
+
}
|
|
2132
|
+
runMap.set(key, value);
|
|
2133
|
+
if (this._basePath !== void 0) await this._flushKvMetrics();
|
|
2134
|
+
}
|
|
2135
|
+
/**
|
|
2136
|
+
* Retrieve a previously stored key-value metric for a run.
|
|
2137
|
+
* Reads from in-memory cache, falling back to the JSON file when basePath is set.
|
|
2138
|
+
*/
|
|
2139
|
+
async getMetric(runId, key) {
|
|
2140
|
+
const inMemory = this._kvMetrics.get(runId)?.get(key);
|
|
2141
|
+
if (inMemory !== void 0) return inMemory;
|
|
2142
|
+
if (this._basePath !== void 0) try {
|
|
2143
|
+
const filePath = join$1(this._basePath, "kv-metrics.json");
|
|
2144
|
+
const content = await readFile(filePath, "utf-8");
|
|
2145
|
+
const parsed = JSON.parse(content);
|
|
2146
|
+
return parsed[runId]?.[key] ?? void 0;
|
|
2147
|
+
} catch {}
|
|
2148
|
+
return void 0;
|
|
2149
|
+
}
|
|
2150
|
+
/** Serialize the in-memory kv metrics map to JSON on disk. */
|
|
2151
|
+
async _flushKvMetrics() {
|
|
2152
|
+
if (this._basePath === void 0) return;
|
|
2153
|
+
const serialized = {};
|
|
2154
|
+
for (const [runId, runMap] of this._kvMetrics) {
|
|
2155
|
+
serialized[runId] = {};
|
|
2156
|
+
for (const [key, value] of runMap) serialized[runId][key] = value;
|
|
2157
|
+
}
|
|
2158
|
+
const filePath = join$1(this._basePath, "kv-metrics.json");
|
|
2159
|
+
await writeFile(filePath, JSON.stringify(serialized, null, 2), "utf-8");
|
|
2160
|
+
}
|
|
2161
|
+
async getContracts(storyKey) {
|
|
2162
|
+
return this._contracts.get(storyKey) ?? [];
|
|
2163
|
+
}
|
|
2164
|
+
async setContracts(storyKey, contracts) {
|
|
2165
|
+
this._contracts.set(storyKey, contracts.map((c) => ({ ...c })));
|
|
2166
|
+
}
|
|
2167
|
+
async queryContracts(filter) {
|
|
2168
|
+
const all = [];
|
|
2169
|
+
for (const records of this._contracts.values()) for (const r of records) all.push(r);
|
|
2170
|
+
return all.filter((r) => {
|
|
2171
|
+
if (filter?.storyKey !== void 0 && r.storyKey !== filter.storyKey) return false;
|
|
2172
|
+
if (filter?.direction !== void 0 && r.direction !== filter.direction) return false;
|
|
2173
|
+
return true;
|
|
2174
|
+
});
|
|
2175
|
+
}
|
|
2176
|
+
async setContractVerification(storyKey, results) {
|
|
2177
|
+
this._contractVerifications.set(storyKey, results.map((r) => ({ ...r })));
|
|
2178
|
+
if (this._basePath !== void 0) {
|
|
2179
|
+
const serialized = {};
|
|
2180
|
+
for (const [key, records] of this._contractVerifications) serialized[key] = records;
|
|
2181
|
+
const filePath = join$1(this._basePath, "contract-verifications.json");
|
|
2182
|
+
await writeFile(filePath, JSON.stringify(serialized, null, 2), "utf-8");
|
|
2183
|
+
}
|
|
2184
|
+
}
|
|
2185
|
+
async getContractVerification(storyKey) {
|
|
2186
|
+
return this._contractVerifications.get(storyKey) ?? [];
|
|
2187
|
+
}
|
|
2188
|
+
async branchForStory(_storyKey) {}
|
|
2189
|
+
async mergeStory(_storyKey) {}
|
|
2190
|
+
async rollbackStory(_storyKey) {}
|
|
2191
|
+
async diffStory(storyKey) {
|
|
2192
|
+
return {
|
|
2193
|
+
storyKey,
|
|
2194
|
+
tables: []
|
|
2195
|
+
};
|
|
2196
|
+
}
|
|
2197
|
+
async getHistory(_limit) {
|
|
2198
|
+
return [];
|
|
2199
|
+
}
|
|
2200
|
+
};
|
|
2201
|
+
|
|
2202
|
+
//#endregion
|
|
2203
|
+
//#region src/modules/state/dolt-store.ts
|
|
2204
|
+
const log = createLogger("modules:state:dolt");
|
|
2205
|
+
/**
|
|
2206
|
+
* Validate that a story key matches the expected pattern (e.g. "26-7", "1-1a", "NEW-26", "E6").
|
|
2207
|
+
* Prevents SQL injection via string-interpolated identifiers.
|
|
2208
|
+
*/
|
|
2209
|
+
const STORY_KEY_PATTERN = /^[A-Za-z0-9]+(-[A-Za-z0-9]+)?$/;
|
|
2210
|
+
function assertValidStoryKey(storyKey) {
|
|
2211
|
+
if (!STORY_KEY_PATTERN.test(storyKey)) throw new DoltQueryError("assertValidStoryKey", `Invalid story key: '${storyKey}'. Must match pattern <key> or <epic>-<story> (e.g. "E6", "10-1", "1-1a", "NEW-26").`);
|
|
2212
|
+
}
|
|
2213
|
+
/**
|
|
2214
|
+
* Dolt-backed implementation of the StateStore interface.
|
|
2215
|
+
*
|
|
2216
|
+
* Constructor accepts a deps object for DI: `{ repoPath, client }`.
|
|
2217
|
+
* Call `initialize()` before any CRUD operations.
|
|
2218
|
+
*/
|
|
2219
|
+
var DoltStateStore = class DoltStateStore {
|
|
2220
|
+
_repoPath;
|
|
2221
|
+
_client;
|
|
2222
|
+
_storyBranches = new Map();
|
|
2223
|
+
constructor(options) {
|
|
2224
|
+
this._repoPath = options.repoPath;
|
|
2225
|
+
this._client = options.client;
|
|
2226
|
+
}
|
|
2227
|
+
/**
|
|
2228
|
+
* Return the branch name for a story if one has been created via branchForStory(),
|
|
2229
|
+
* or undefined to use the default (main) branch.
|
|
2230
|
+
*/
|
|
2231
|
+
_branchFor(storyKey) {
|
|
2232
|
+
return this._storyBranches.get(storyKey);
|
|
2233
|
+
}
|
|
2234
|
+
async initialize() {
|
|
2235
|
+
await this._client.connect();
|
|
2236
|
+
await this._runMigrations();
|
|
2237
|
+
await this.flush("substrate: schema migrations");
|
|
2238
|
+
log.debug("DoltStateStore initialized at %s", this._repoPath);
|
|
2239
|
+
}
|
|
2240
|
+
async close() {
|
|
2241
|
+
await this._client.close();
|
|
2242
|
+
}
|
|
2243
|
+
async _runMigrations() {
|
|
2244
|
+
const ddl = [
|
|
2245
|
+
`CREATE TABLE IF NOT EXISTS stories (
|
|
2246
|
+
story_key VARCHAR(100) NOT NULL,
|
|
2247
|
+
phase VARCHAR(30) NOT NULL DEFAULT 'PENDING',
|
|
2248
|
+
review_cycles INT NOT NULL DEFAULT 0,
|
|
2249
|
+
last_verdict VARCHAR(64) NULL,
|
|
2250
|
+
error TEXT NULL,
|
|
2251
|
+
started_at VARCHAR(64) NULL,
|
|
2252
|
+
completed_at VARCHAR(64) NULL,
|
|
2253
|
+
sprint VARCHAR(50) NULL,
|
|
2254
|
+
PRIMARY KEY (story_key)
|
|
2255
|
+
)`,
|
|
2256
|
+
`CREATE TABLE IF NOT EXISTS metrics (
|
|
2257
|
+
id BIGINT NOT NULL AUTO_INCREMENT,
|
|
2258
|
+
story_key VARCHAR(100) NOT NULL,
|
|
2259
|
+
task_type VARCHAR(100) NOT NULL,
|
|
2260
|
+
model VARCHAR(100) NULL,
|
|
2261
|
+
tokens_in BIGINT NULL,
|
|
2262
|
+
tokens_out BIGINT NULL,
|
|
2263
|
+
cache_read_tokens BIGINT NULL,
|
|
2264
|
+
cost_usd DOUBLE NULL,
|
|
2265
|
+
wall_clock_ms BIGINT NULL,
|
|
2266
|
+
review_cycles INT NULL,
|
|
2267
|
+
stall_count INT NULL,
|
|
2268
|
+
result VARCHAR(30) NULL,
|
|
2269
|
+
recorded_at VARCHAR(64) NULL,
|
|
2270
|
+
sprint VARCHAR(50) NULL,
|
|
2271
|
+
PRIMARY KEY (id)
|
|
2272
|
+
)`,
|
|
2273
|
+
`CREATE TABLE IF NOT EXISTS contracts (
|
|
2274
|
+
story_key VARCHAR(100) NOT NULL,
|
|
2275
|
+
contract_name VARCHAR(200) NOT NULL,
|
|
2276
|
+
direction VARCHAR(20) NOT NULL,
|
|
2277
|
+
schema_path VARCHAR(500) NULL,
|
|
2278
|
+
transport VARCHAR(200) NULL,
|
|
2279
|
+
PRIMARY KEY (story_key, contract_name, direction)
|
|
2280
|
+
)`,
|
|
2281
|
+
`CREATE TABLE IF NOT EXISTS review_verdicts (
|
|
2282
|
+
id BIGINT NOT NULL AUTO_INCREMENT,
|
|
2283
|
+
story_key VARCHAR(100) NOT NULL,
|
|
2284
|
+
task_type VARCHAR(100) NOT NULL,
|
|
2285
|
+
verdict VARCHAR(64) NOT NULL,
|
|
2286
|
+
issues_count INT NULL,
|
|
2287
|
+
notes TEXT NULL,
|
|
2288
|
+
timestamp VARCHAR(64) NULL,
|
|
2289
|
+
PRIMARY KEY (id)
|
|
2290
|
+
)`
|
|
2291
|
+
];
|
|
2292
|
+
for (const sql of ddl) await this._client.query(sql);
|
|
2293
|
+
try {
|
|
2294
|
+
const colRows = await this._client.query(`SHOW COLUMNS FROM repo_map_symbols LIKE 'dependencies'`);
|
|
2295
|
+
if (colRows.length === 0) {
|
|
2296
|
+
await this._client.query(`ALTER TABLE repo_map_symbols ADD COLUMN dependencies JSON`);
|
|
2297
|
+
await this._client.query(`INSERT IGNORE INTO _schema_version (version, description) VALUES (6, 'Add dependencies JSON column to repo_map_symbols (Epic 28-3)')`);
|
|
2298
|
+
log.info({
|
|
2299
|
+
component: "dolt-state",
|
|
2300
|
+
migration: "v5-to-v6",
|
|
2301
|
+
column: "dependencies",
|
|
2302
|
+
table: "repo_map_symbols"
|
|
2303
|
+
}, "Applied migration v5-to-v6: added dependencies column to repo_map_symbols");
|
|
2304
|
+
}
|
|
2305
|
+
} catch {
|
|
2306
|
+
log.debug("Skipping repo_map_symbols migration: table not yet created");
|
|
2307
|
+
}
|
|
2308
|
+
log.debug("Schema migrations applied");
|
|
2309
|
+
}
|
|
2310
|
+
/**
|
|
2311
|
+
* Commit pending Dolt changes on the current branch.
|
|
2312
|
+
* Callers can invoke this after a batch of writes for explicit durability.
|
|
2313
|
+
*/
|
|
2314
|
+
async flush(message = "substrate: auto-commit") {
|
|
2315
|
+
try {
|
|
2316
|
+
await this._client.execArgs(["add", "."]);
|
|
2317
|
+
await this._client.execArgs([
|
|
2318
|
+
"commit",
|
|
2319
|
+
"--allow-empty",
|
|
2320
|
+
"-m",
|
|
2321
|
+
message
|
|
2322
|
+
]);
|
|
2323
|
+
log.debug("Dolt flush committed: %s", message);
|
|
2324
|
+
} catch (err) {
|
|
2325
|
+
const detail = err instanceof Error ? err.message : String(err);
|
|
2326
|
+
log.warn({ detail }, "Dolt flush failed (non-fatal)");
|
|
2327
|
+
}
|
|
2328
|
+
}
|
|
2329
|
+
async getStoryState(storyKey) {
|
|
2330
|
+
const rows = await this._client.query("SELECT * FROM stories WHERE story_key = ?", [storyKey]);
|
|
2331
|
+
if (rows.length === 0) return void 0;
|
|
2332
|
+
return this._rowToStory(rows[0]);
|
|
2333
|
+
}
|
|
2334
|
+
async setStoryState(storyKey, state) {
|
|
2335
|
+
const branch = this._branchFor(storyKey);
|
|
2336
|
+
const sql = `REPLACE INTO stories
|
|
2337
|
+
(story_key, phase, review_cycles, last_verdict, error, started_at, completed_at, sprint)
|
|
2338
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?)`;
|
|
2339
|
+
await this._client.query(sql, [
|
|
2340
|
+
storyKey,
|
|
2341
|
+
state.phase,
|
|
2342
|
+
state.reviewCycles,
|
|
2343
|
+
state.lastVerdict ?? null,
|
|
2344
|
+
state.error ?? null,
|
|
2345
|
+
state.startedAt ?? null,
|
|
2346
|
+
state.completedAt ?? null,
|
|
2347
|
+
state.sprint ?? null
|
|
2348
|
+
], branch);
|
|
2349
|
+
}
|
|
2350
|
+
async queryStories(filter) {
|
|
2351
|
+
const conditions = [];
|
|
2352
|
+
const params = [];
|
|
2353
|
+
if (filter.phase !== void 0) {
|
|
2354
|
+
const phases = Array.isArray(filter.phase) ? filter.phase : [filter.phase];
|
|
2355
|
+
const placeholders = phases.map(() => "?").join(", ");
|
|
2356
|
+
conditions.push(`phase IN (${placeholders})`);
|
|
2357
|
+
params.push(...phases);
|
|
2358
|
+
}
|
|
2359
|
+
if (filter.sprint !== void 0) {
|
|
2360
|
+
conditions.push("sprint = ?");
|
|
2361
|
+
params.push(filter.sprint);
|
|
2362
|
+
}
|
|
2363
|
+
if (filter.storyKey !== void 0) {
|
|
2364
|
+
conditions.push("story_key = ?");
|
|
2365
|
+
params.push(filter.storyKey);
|
|
2366
|
+
}
|
|
2367
|
+
const where = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
|
|
2368
|
+
const sql = `SELECT * FROM stories ${where} ORDER BY story_key`;
|
|
2369
|
+
const rows = await this._client.query(sql, params);
|
|
2370
|
+
return rows.map((r) => this._rowToStory(r));
|
|
2371
|
+
}
|
|
2372
|
+
_rowToStory(row) {
|
|
2373
|
+
return {
|
|
2374
|
+
storyKey: row.story_key,
|
|
2375
|
+
phase: row.phase,
|
|
2376
|
+
reviewCycles: Number(row.review_cycles),
|
|
2377
|
+
lastVerdict: row.last_verdict ?? void 0,
|
|
2378
|
+
error: row.error ?? void 0,
|
|
2379
|
+
startedAt: row.started_at ?? void 0,
|
|
2380
|
+
completedAt: row.completed_at ?? void 0,
|
|
2381
|
+
sprint: row.sprint ?? void 0
|
|
2382
|
+
};
|
|
2383
|
+
}
|
|
2384
|
+
async recordMetric(metric) {
|
|
2385
|
+
const branch = this._branchFor(metric.storyKey);
|
|
2386
|
+
const recordedAt = metric.recordedAt ?? metric.timestamp ?? new Date().toISOString();
|
|
2387
|
+
const sql = `INSERT INTO metrics
|
|
2388
|
+
(story_key, task_type, model, tokens_in, tokens_out, cache_read_tokens,
|
|
2389
|
+
cost_usd, wall_clock_ms, review_cycles, stall_count, result, recorded_at, sprint)
|
|
2390
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`;
|
|
2391
|
+
await this._client.query(sql, [
|
|
2392
|
+
metric.storyKey,
|
|
2393
|
+
metric.taskType,
|
|
2394
|
+
metric.model ?? null,
|
|
2395
|
+
metric.tokensIn ?? null,
|
|
2396
|
+
metric.tokensOut ?? null,
|
|
2397
|
+
metric.cacheReadTokens ?? null,
|
|
2398
|
+
metric.costUsd ?? null,
|
|
2399
|
+
metric.wallClockMs ?? null,
|
|
2400
|
+
metric.reviewCycles ?? null,
|
|
2401
|
+
metric.stallCount ?? null,
|
|
2402
|
+
metric.result ?? null,
|
|
2403
|
+
recordedAt,
|
|
2404
|
+
metric.sprint ?? null
|
|
2405
|
+
], branch);
|
|
2406
|
+
}
|
|
2407
|
+
async queryMetrics(filter) {
|
|
2408
|
+
const conditions = [];
|
|
2409
|
+
const params = [];
|
|
2410
|
+
const storyKey = filter.storyKey ?? filter.story_key;
|
|
2411
|
+
const taskType = filter.taskType ?? filter.task_type;
|
|
2412
|
+
if (storyKey !== void 0) {
|
|
2413
|
+
conditions.push("story_key = ?");
|
|
2414
|
+
params.push(storyKey);
|
|
2415
|
+
}
|
|
2416
|
+
if (taskType !== void 0) {
|
|
2417
|
+
conditions.push("task_type = ?");
|
|
2418
|
+
params.push(taskType);
|
|
2419
|
+
}
|
|
2420
|
+
if (filter.sprint !== void 0) {
|
|
2421
|
+
conditions.push("sprint = ?");
|
|
2422
|
+
params.push(filter.sprint);
|
|
2423
|
+
}
|
|
2424
|
+
if (filter.dateFrom !== void 0) {
|
|
2425
|
+
conditions.push("recorded_at >= ?");
|
|
2426
|
+
params.push(filter.dateFrom);
|
|
2427
|
+
}
|
|
2428
|
+
if (filter.dateTo !== void 0) {
|
|
2429
|
+
conditions.push("recorded_at <= ?");
|
|
2430
|
+
params.push(filter.dateTo);
|
|
2431
|
+
}
|
|
2432
|
+
if (filter.since !== void 0) {
|
|
2433
|
+
conditions.push("recorded_at >= ?");
|
|
2434
|
+
params.push(filter.since);
|
|
2435
|
+
}
|
|
2436
|
+
const where = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
|
|
2437
|
+
if (filter.aggregate) {
|
|
2438
|
+
const sql$1 = `SELECT task_type,
|
|
2439
|
+
AVG(cost_usd) AS avg_cost_usd,
|
|
2440
|
+
SUM(tokens_in) AS sum_tokens_in,
|
|
2441
|
+
SUM(tokens_out) AS sum_tokens_out,
|
|
2442
|
+
COUNT(*) AS count
|
|
2443
|
+
FROM metrics ${where} GROUP BY task_type ORDER BY task_type`;
|
|
2444
|
+
const aggRows = await this._client.query(sql$1, params);
|
|
2445
|
+
return aggRows.map((r) => this._aggregateRowToMetric(r));
|
|
2446
|
+
}
|
|
2447
|
+
const sql = `SELECT * FROM metrics ${where} ORDER BY id`;
|
|
2448
|
+
const rows = await this._client.query(sql, params);
|
|
2449
|
+
return rows.map((r) => this._rowToMetric(r));
|
|
2450
|
+
}
|
|
2451
|
+
_aggregateRowToMetric(row) {
|
|
2452
|
+
return {
|
|
2453
|
+
storyKey: "",
|
|
2454
|
+
taskType: row.task_type,
|
|
2455
|
+
costUsd: row.avg_cost_usd ?? void 0,
|
|
2456
|
+
tokensIn: row.sum_tokens_in ?? void 0,
|
|
2457
|
+
tokensOut: row.sum_tokens_out ?? void 0,
|
|
2458
|
+
count: row.count,
|
|
2459
|
+
result: "aggregate"
|
|
2460
|
+
};
|
|
2461
|
+
}
|
|
2462
|
+
_rowToMetric(row) {
|
|
2463
|
+
return {
|
|
2464
|
+
storyKey: row.story_key,
|
|
2465
|
+
taskType: row.task_type,
|
|
2466
|
+
model: row.model ?? void 0,
|
|
2467
|
+
tokensIn: row.tokens_in ?? void 0,
|
|
2468
|
+
tokensOut: row.tokens_out ?? void 0,
|
|
2469
|
+
cacheReadTokens: row.cache_read_tokens ?? void 0,
|
|
2470
|
+
costUsd: row.cost_usd ?? void 0,
|
|
2471
|
+
wallClockMs: row.wall_clock_ms ?? void 0,
|
|
2472
|
+
reviewCycles: row.review_cycles ?? void 0,
|
|
2473
|
+
stallCount: row.stall_count ?? void 0,
|
|
2474
|
+
result: row.result ?? void 0,
|
|
2475
|
+
recordedAt: row.recorded_at ?? void 0,
|
|
2476
|
+
sprint: row.sprint ?? void 0,
|
|
2477
|
+
timestamp: row.timestamp ?? row.recorded_at ?? void 0
|
|
2478
|
+
};
|
|
2479
|
+
}
|
|
2480
|
+
async getContracts(storyKey) {
|
|
2481
|
+
const rows = await this._client.query("SELECT * FROM contracts WHERE story_key = ? ORDER BY contract_name", [storyKey]);
|
|
2482
|
+
return rows.map((r) => this._rowToContract(r));
|
|
2483
|
+
}
|
|
2484
|
+
async setContracts(storyKey, contracts) {
|
|
2485
|
+
const branch = this._branchFor(storyKey);
|
|
2486
|
+
await this._client.query("DELETE FROM contracts WHERE story_key = ?", [storyKey], branch);
|
|
2487
|
+
for (const c of contracts) await this._client.query(`INSERT INTO contracts (story_key, contract_name, direction, schema_path, transport)
|
|
2488
|
+
VALUES (?, ?, ?, ?, ?)`, [
|
|
2489
|
+
c.storyKey,
|
|
2490
|
+
c.contractName,
|
|
2491
|
+
c.direction,
|
|
2492
|
+
c.schemaPath,
|
|
2493
|
+
c.transport ?? null
|
|
2494
|
+
], branch);
|
|
2495
|
+
}
|
|
2496
|
+
_rowToContract(row) {
|
|
2497
|
+
return {
|
|
2498
|
+
storyKey: row.story_key,
|
|
2499
|
+
contractName: row.contract_name,
|
|
2500
|
+
direction: row.direction,
|
|
2501
|
+
schemaPath: row.schema_path,
|
|
2502
|
+
transport: row.transport ?? void 0
|
|
2503
|
+
};
|
|
2504
|
+
}
|
|
2505
|
+
async queryContracts(filter) {
|
|
2506
|
+
const conditions = [];
|
|
2507
|
+
const params = [];
|
|
2508
|
+
if (filter?.storyKey !== void 0) {
|
|
2509
|
+
conditions.push("story_key = ?");
|
|
2510
|
+
params.push(filter.storyKey);
|
|
2511
|
+
}
|
|
2512
|
+
if (filter?.direction !== void 0) {
|
|
2513
|
+
conditions.push("direction = ?");
|
|
2514
|
+
params.push(filter.direction);
|
|
2515
|
+
}
|
|
2516
|
+
const where = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
|
|
2517
|
+
const sql = `SELECT * FROM contracts ${where} ORDER BY story_key, contract_name`;
|
|
2518
|
+
const rows = await this._client.query(sql, params);
|
|
2519
|
+
return rows.map((r) => this._rowToContract(r));
|
|
2520
|
+
}
|
|
2521
|
+
async setContractVerification(storyKey, results) {
|
|
2522
|
+
const branch = this._branchFor(storyKey);
|
|
2523
|
+
await this._client.query(`DELETE FROM review_verdicts WHERE story_key = ? AND task_type = 'contract-verification'`, [storyKey], branch);
|
|
2524
|
+
const failCount = results.filter((r) => r.verdict === "fail").length;
|
|
2525
|
+
for (const r of results) await this._client.query(`INSERT INTO review_verdicts (story_key, task_type, verdict, issues_count, notes, timestamp)
|
|
2526
|
+
VALUES (?, 'contract-verification', ?, ?, ?, ?)`, [
|
|
2527
|
+
storyKey,
|
|
2528
|
+
r.verdict,
|
|
2529
|
+
failCount,
|
|
2530
|
+
JSON.stringify({
|
|
2531
|
+
contractName: r.contractName,
|
|
2532
|
+
mismatchDescription: r.mismatchDescription
|
|
2533
|
+
}),
|
|
2534
|
+
r.verifiedAt
|
|
2535
|
+
], branch);
|
|
2536
|
+
}
|
|
2537
|
+
async getContractVerification(storyKey) {
|
|
2538
|
+
const rows = await this._client.query(`SELECT * FROM review_verdicts WHERE story_key = ? AND task_type = 'contract-verification' ORDER BY timestamp DESC`, [storyKey]);
|
|
2539
|
+
return rows.map((row) => {
|
|
2540
|
+
let contractName = "";
|
|
2541
|
+
let mismatchDescription;
|
|
2542
|
+
if (row.notes !== null) try {
|
|
2543
|
+
const parsed = JSON.parse(row.notes);
|
|
2544
|
+
if (typeof parsed.contractName === "string") contractName = parsed.contractName;
|
|
2545
|
+
if (typeof parsed.mismatchDescription === "string") mismatchDescription = parsed.mismatchDescription;
|
|
2546
|
+
} catch {}
|
|
2547
|
+
return {
|
|
2548
|
+
storyKey: row.story_key,
|
|
2549
|
+
contractName,
|
|
2550
|
+
verdict: row.verdict,
|
|
2551
|
+
...mismatchDescription !== void 0 ? { mismatchDescription } : {},
|
|
2552
|
+
verifiedAt: row.timestamp ?? new Date().toISOString()
|
|
2553
|
+
};
|
|
2554
|
+
});
|
|
2555
|
+
}
|
|
2556
|
+
async branchForStory(storyKey) {
|
|
2557
|
+
assertValidStoryKey(storyKey);
|
|
2558
|
+
const branchName = `story/${storyKey}`;
|
|
2559
|
+
try {
|
|
2560
|
+
await this._client.query(`CALL DOLT_BRANCH('${branchName}')`, [], "main");
|
|
2561
|
+
this._storyBranches.set(storyKey, branchName);
|
|
2562
|
+
log.debug("Created Dolt branch %s for story %s", branchName, storyKey);
|
|
2563
|
+
} catch (err) {
|
|
2564
|
+
const detail = err instanceof Error ? err.message : String(err);
|
|
2565
|
+
throw new DoltQueryError(`CALL DOLT_BRANCH('${branchName}')`, detail);
|
|
2566
|
+
}
|
|
2567
|
+
}
|
|
2568
|
+
async mergeStory(storyKey) {
|
|
2569
|
+
assertValidStoryKey(storyKey);
|
|
2570
|
+
const branchName = this._storyBranches.get(storyKey);
|
|
2571
|
+
if (branchName === void 0) {
|
|
2572
|
+
log.warn({ storyKey }, "mergeStory called but no branch registered — no-op");
|
|
2573
|
+
return;
|
|
2574
|
+
}
|
|
2575
|
+
try {
|
|
2576
|
+
try {
|
|
2577
|
+
await this._client.query(`CALL DOLT_ADD('-A')`, [], branchName);
|
|
2578
|
+
await this._client.query(`CALL DOLT_COMMIT('-m', 'Story ${storyKey}: pre-merge commit', '--allow-empty')`, [], branchName);
|
|
2579
|
+
} catch {}
|
|
2580
|
+
try {
|
|
2581
|
+
await this._client.query(`CALL DOLT_ADD('-A')`, [], "main");
|
|
2582
|
+
await this._client.query(`CALL DOLT_COMMIT('-m', 'substrate: pre-merge auto-commit', '--allow-empty')`, [], "main");
|
|
2583
|
+
} catch {}
|
|
2584
|
+
const mergeRows = await this._client.query(`CALL DOLT_MERGE('${branchName}')`, [], "main");
|
|
2585
|
+
const mergeResult = mergeRows[0];
|
|
2586
|
+
if (mergeResult && (mergeResult.conflicts ?? 0) > 0) {
|
|
2587
|
+
let table = "stories";
|
|
2588
|
+
let rowKey = "unknown";
|
|
2589
|
+
let ourValue;
|
|
2590
|
+
let theirValue;
|
|
2591
|
+
try {
|
|
2592
|
+
const conflictRows = await this._client.query(`SELECT * FROM dolt_conflicts_stories LIMIT 1`, [], "main");
|
|
2593
|
+
if (conflictRows.length > 0) {
|
|
2594
|
+
const row = conflictRows[0];
|
|
2595
|
+
rowKey = String(row["base_story_key"] ?? row["our_story_key"] ?? "unknown");
|
|
2596
|
+
ourValue = JSON.stringify(row["our_status"] ?? row);
|
|
2597
|
+
theirValue = JSON.stringify(row["their_status"] ?? row);
|
|
2598
|
+
}
|
|
2599
|
+
} catch {}
|
|
2600
|
+
this._storyBranches.delete(storyKey);
|
|
2601
|
+
throw new DoltMergeConflictError(table, [rowKey], {
|
|
2602
|
+
rowKey,
|
|
2603
|
+
ourValue,
|
|
2604
|
+
theirValue
|
|
2605
|
+
});
|
|
2606
|
+
}
|
|
2607
|
+
try {
|
|
2608
|
+
await this._client.query(`CALL DOLT_COMMIT('-m', 'Merge story ${storyKey}: COMPLETE')`, [], "main");
|
|
2609
|
+
} catch (commitErr) {
|
|
2610
|
+
const msg = commitErr instanceof Error ? commitErr.message : String(commitErr);
|
|
2611
|
+
if (!msg.includes("nothing to commit")) throw commitErr;
|
|
2612
|
+
}
|
|
2613
|
+
this._storyBranches.delete(storyKey);
|
|
2614
|
+
log.debug("Merged branch %s into main for story %s", branchName, storyKey);
|
|
2615
|
+
} catch (err) {
|
|
2616
|
+
if (err instanceof DoltMergeConflictError) throw err;
|
|
2617
|
+
const detail = err instanceof Error ? err.message : String(err);
|
|
2618
|
+
throw new DoltQueryError(`CALL DOLT_MERGE('${branchName}')`, detail);
|
|
2619
|
+
}
|
|
2620
|
+
}
|
|
2621
|
+
async rollbackStory(storyKey) {
|
|
2622
|
+
assertValidStoryKey(storyKey);
|
|
2623
|
+
const branchName = this._storyBranches.get(storyKey);
|
|
2624
|
+
if (branchName === void 0) {
|
|
2625
|
+
log.warn({ storyKey }, "rollbackStory called but no branch registered — no-op");
|
|
2626
|
+
return;
|
|
2627
|
+
}
|
|
2628
|
+
try {
|
|
2629
|
+
await this._client.query(`CALL DOLT_BRANCH('-D', '${branchName}')`, [], "main");
|
|
2630
|
+
this._storyBranches.delete(storyKey);
|
|
2631
|
+
log.debug("Rolled back (deleted) branch %s for story %s", branchName, storyKey);
|
|
2632
|
+
} catch (err) {
|
|
2633
|
+
const detail = err instanceof Error ? err.message : String(err);
|
|
2634
|
+
log.warn({
|
|
2635
|
+
detail,
|
|
2636
|
+
storyKey,
|
|
2637
|
+
branchName
|
|
2638
|
+
}, "rollbackStory failed (non-fatal)");
|
|
2639
|
+
this._storyBranches.delete(storyKey);
|
|
2640
|
+
}
|
|
2641
|
+
}
|
|
2642
|
+
/**
|
|
2643
|
+
* Tables queried by diffStory(). Each table is checked for row-level changes
|
|
2644
|
+
* via SELECT * FROM DOLT_DIFF('main', branchName, tableName).
|
|
2645
|
+
*/
|
|
2646
|
+
static DIFF_TABLES = [
|
|
2647
|
+
"stories",
|
|
2648
|
+
"contracts",
|
|
2649
|
+
"metrics",
|
|
2650
|
+
"dispatch_log",
|
|
2651
|
+
"build_results",
|
|
2652
|
+
"review_verdicts"
|
|
2653
|
+
];
|
|
2654
|
+
async diffStory(storyKey) {
|
|
2655
|
+
assertValidStoryKey(storyKey);
|
|
2656
|
+
const branchName = this._storyBranches.get(storyKey);
|
|
2657
|
+
if (branchName === void 0) return this._diffMergedStory(storyKey);
|
|
2658
|
+
try {
|
|
2659
|
+
await this._client.query(`CALL DOLT_ADD('-A')`, [], branchName);
|
|
2660
|
+
await this._client.query(`CALL DOLT_COMMIT('-m', 'Story ${storyKey}: pre-diff snapshot', '--allow-empty')`, [], branchName);
|
|
2661
|
+
} catch {}
|
|
2662
|
+
return this._diffRange("main", branchName, storyKey);
|
|
2663
|
+
}
|
|
2664
|
+
/**
|
|
2665
|
+
* Diff a merged story by finding its merge commit in the Dolt log.
|
|
2666
|
+
* Queries the `dolt_log` system table for commits referencing the story,
|
|
2667
|
+
* then diffs `<hash>~1` vs `<hash>` for row-level changes.
|
|
2668
|
+
*/
|
|
2669
|
+
async _diffMergedStory(storyKey) {
|
|
2670
|
+
try {
|
|
2671
|
+
const rows = await this._client.query(`SELECT commit_hash FROM dolt_log WHERE message LIKE ? LIMIT 1`, [`%${storyKey}%`]);
|
|
2672
|
+
if (rows.length === 0) return {
|
|
2673
|
+
storyKey,
|
|
2674
|
+
tables: []
|
|
2675
|
+
};
|
|
2676
|
+
const hash = String(rows[0].commit_hash);
|
|
2677
|
+
if (!hash) return {
|
|
2678
|
+
storyKey,
|
|
2679
|
+
tables: []
|
|
2680
|
+
};
|
|
2681
|
+
return this._diffRange(`${hash}~1`, hash, storyKey);
|
|
2682
|
+
} catch {
|
|
2683
|
+
return {
|
|
2684
|
+
storyKey,
|
|
2685
|
+
tables: []
|
|
2686
|
+
};
|
|
2687
|
+
}
|
|
2688
|
+
}
|
|
2689
|
+
/**
|
|
2690
|
+
* Compute row-level diffs between two Dolt revisions (branches or commit hashes)
|
|
2691
|
+
* across all tracked tables.
|
|
2692
|
+
*/
|
|
2693
|
+
async _diffRange(fromRef, toRef, storyKey) {
|
|
2694
|
+
const tableDiffs = [];
|
|
2695
|
+
for (const table of DoltStateStore.DIFF_TABLES) try {
|
|
2696
|
+
const rows = await this._client.query(`SELECT * FROM DOLT_DIFF('${fromRef}', '${toRef}', '${table}')`, [], "main");
|
|
2697
|
+
if (rows.length === 0) continue;
|
|
2698
|
+
const added = [];
|
|
2699
|
+
const modified = [];
|
|
2700
|
+
const deleted = [];
|
|
2701
|
+
for (const row of rows) {
|
|
2702
|
+
const diffType = row["diff_type"];
|
|
2703
|
+
const rowKey = this._extractRowKey(row);
|
|
2704
|
+
const before = this._extractPrefixedFields(row, "before_");
|
|
2705
|
+
const after = this._extractPrefixedFields(row, "after_");
|
|
2706
|
+
const diffRow = {
|
|
2707
|
+
rowKey,
|
|
2708
|
+
...before !== void 0 && { before },
|
|
2709
|
+
...after !== void 0 && { after }
|
|
2710
|
+
};
|
|
2711
|
+
if (diffType === "added") added.push(diffRow);
|
|
2712
|
+
else if (diffType === "modified") modified.push(diffRow);
|
|
2713
|
+
else if (diffType === "removed") deleted.push(diffRow);
|
|
2714
|
+
}
|
|
2715
|
+
if (added.length > 0 || modified.length > 0 || deleted.length > 0) tableDiffs.push({
|
|
2716
|
+
table,
|
|
2717
|
+
added,
|
|
2718
|
+
modified,
|
|
2719
|
+
deleted
|
|
2720
|
+
});
|
|
2721
|
+
} catch {}
|
|
2722
|
+
return {
|
|
2723
|
+
storyKey,
|
|
2724
|
+
tables: tableDiffs
|
|
2725
|
+
};
|
|
2726
|
+
}
|
|
2727
|
+
/**
|
|
2728
|
+
* Extract a human-readable row key from a DOLT_DIFF result row.
|
|
2729
|
+
* Tries after_ fields first (for added/modified rows), then before_ fields
|
|
2730
|
+
* (for removed rows). Skips commit_hash pseudo-columns.
|
|
2731
|
+
*/
|
|
2732
|
+
_extractRowKey(row) {
|
|
2733
|
+
for (const prefix of ["after_", "before_"]) for (const [key, val] of Object.entries(row)) if (key.startsWith(prefix) && !key.endsWith("_commit_hash") && val !== null && val !== void 0) return String(val);
|
|
2734
|
+
return "unknown";
|
|
2735
|
+
}
|
|
2736
|
+
/**
|
|
2737
|
+
* Extract all fields with a given prefix from a DOLT_DIFF result row,
|
|
2738
|
+
* stripping the prefix from the key names. Returns undefined if no matching
|
|
2739
|
+
* fields are found.
|
|
2740
|
+
*/
|
|
2741
|
+
_extractPrefixedFields(row, prefix) {
|
|
2742
|
+
const result = {};
|
|
2743
|
+
for (const [key, val] of Object.entries(row)) if (key.startsWith(prefix)) result[key.slice(prefix.length)] = val;
|
|
2744
|
+
return Object.keys(result).length > 0 ? result : void 0;
|
|
2745
|
+
}
|
|
2746
|
+
/** In-memory KV store for per-run arbitrary metrics. Not persisted to Dolt. */
|
|
2747
|
+
_kvMetrics = new Map();
|
|
2748
|
+
async setMetric(runId, key, value) {
|
|
2749
|
+
let runMap = this._kvMetrics.get(runId);
|
|
2750
|
+
if (runMap === void 0) {
|
|
2751
|
+
runMap = new Map();
|
|
2752
|
+
this._kvMetrics.set(runId, runMap);
|
|
2753
|
+
}
|
|
2754
|
+
runMap.set(key, value);
|
|
2755
|
+
}
|
|
2756
|
+
async getMetric(runId, key) {
|
|
2757
|
+
return this._kvMetrics.get(runId)?.get(key);
|
|
2758
|
+
}
|
|
2759
|
+
async getHistory(limit) {
|
|
2760
|
+
const effectiveLimit = limit ?? 20;
|
|
2761
|
+
try {
|
|
2762
|
+
const rows = await this._client.query(`SELECT commit_hash, date, message, committer FROM dolt_log LIMIT ?`, [effectiveLimit]);
|
|
2763
|
+
const entries = [];
|
|
2764
|
+
for (const row of rows) {
|
|
2765
|
+
const hash = String(row.commit_hash ?? "");
|
|
2766
|
+
const dateVal = row.date;
|
|
2767
|
+
const timestamp = dateVal instanceof Date ? dateVal.toISOString() : String(dateVal ?? "");
|
|
2768
|
+
const message = String(row.message ?? "");
|
|
2769
|
+
const author = row.committer ? String(row.committer) : void 0;
|
|
2770
|
+
const storyKeyMatch = /story\/([0-9]+-[0-9]+)/i.exec(message);
|
|
2771
|
+
entries.push({
|
|
2772
|
+
hash,
|
|
2773
|
+
timestamp,
|
|
2774
|
+
storyKey: storyKeyMatch ? storyKeyMatch[1] : null,
|
|
2775
|
+
message,
|
|
2776
|
+
author
|
|
2777
|
+
});
|
|
2778
|
+
}
|
|
2779
|
+
return entries;
|
|
2780
|
+
} catch (err) {
|
|
2781
|
+
const detail = err instanceof Error ? err.message : String(err);
|
|
2782
|
+
throw new DoltQueryError("getHistory", detail);
|
|
2783
|
+
}
|
|
2784
|
+
}
|
|
2785
|
+
};
|
|
2786
|
+
|
|
2787
|
+
//#endregion
|
|
2788
|
+
//#region src/modules/state/index.ts
|
|
2789
|
+
const logger$1 = createLogger("state:factory");
|
|
2790
|
+
/**
|
|
2791
|
+
* Synchronously check whether Dolt is available and a Dolt repo exists at the
|
|
2792
|
+
* canonical state path under `basePath`.
|
|
2793
|
+
*
|
|
2794
|
+
* @param basePath - Project root to check (e.g. `process.cwd()`).
|
|
2795
|
+
* @returns `{ available: true, reason: '...' }` when both probes pass,
|
|
2796
|
+
* `{ available: false, reason: '...' }` otherwise.
|
|
2797
|
+
*/
|
|
2798
|
+
function detectDoltAvailableSync(basePath) {
|
|
2799
|
+
const result = spawnSync("dolt", ["version"], { stdio: "ignore" });
|
|
2800
|
+
const binaryFound = result.error == null && result.status === 0;
|
|
2801
|
+
if (!binaryFound) return {
|
|
2802
|
+
available: false,
|
|
2803
|
+
reason: "dolt binary not found on PATH"
|
|
2804
|
+
};
|
|
2805
|
+
const stateDoltDir = join$1(basePath, ".substrate", "state", ".dolt");
|
|
2806
|
+
const repoExists = existsSync$1(stateDoltDir);
|
|
2807
|
+
if (!repoExists) return {
|
|
2808
|
+
available: false,
|
|
2809
|
+
reason: `Dolt repo not initialised at ${stateDoltDir}`
|
|
2810
|
+
};
|
|
2811
|
+
return {
|
|
2812
|
+
available: true,
|
|
2813
|
+
reason: "dolt binary found and repo initialised"
|
|
2814
|
+
};
|
|
2815
|
+
}
|
|
2816
|
+
/**
|
|
2817
|
+
* Create a StateStore backed by the specified backend.
|
|
2818
|
+
*
|
|
2819
|
+
* @param config - Optional configuration. Defaults to `{ backend: 'auto' }`.
|
|
2820
|
+
* @returns A StateStore instance. Call `initialize()` before use.
|
|
2821
|
+
*/
|
|
2822
|
+
function createStateStore(config = {}) {
|
|
2823
|
+
const backend = config.backend ?? "auto";
|
|
2824
|
+
if (backend === "dolt") {
|
|
2825
|
+
const repoPath = config.basePath ?? process.cwd();
|
|
2826
|
+
const client = new DoltClient({ repoPath });
|
|
2827
|
+
return new DoltStateStore({
|
|
2828
|
+
repoPath,
|
|
2829
|
+
client
|
|
2830
|
+
});
|
|
2831
|
+
}
|
|
2832
|
+
if (backend === "auto") {
|
|
2833
|
+
const repoPath = config.basePath ?? process.cwd();
|
|
2834
|
+
const detection = detectDoltAvailableSync(repoPath);
|
|
2835
|
+
if (detection.available) {
|
|
2836
|
+
logger$1.debug(`Dolt detected, using DoltStateStore (state path: ${join$1(repoPath, ".substrate", "state")})`);
|
|
2837
|
+
const client = new DoltClient({ repoPath });
|
|
2838
|
+
return new DoltStateStore({
|
|
2839
|
+
repoPath,
|
|
2840
|
+
client
|
|
2841
|
+
});
|
|
2842
|
+
} else {
|
|
2843
|
+
logger$1.debug(`Dolt not found, using FileStateStore (reason: ${detection.reason})`);
|
|
2844
|
+
return new FileStateStore({ basePath: config.basePath });
|
|
2845
|
+
}
|
|
2846
|
+
}
|
|
2847
|
+
return new FileStateStore({ basePath: config.basePath });
|
|
2848
|
+
}
|
|
2849
|
+
|
|
2850
|
+
//#endregion
|
|
2851
|
+
//#region src/cli/commands/health.ts
|
|
2852
|
+
const logger = createLogger("health-cmd");
|
|
2853
|
+
/** Default stall threshold in seconds — also used by supervisor default */
|
|
2854
|
+
const DEFAULT_STALL_THRESHOLD_SECONDS = 600;
|
|
2855
|
+
/**
|
|
2856
|
+
* Determine whether a ps output line represents the substrate pipeline orchestrator.
|
|
2857
|
+
* Handles invocation via:
|
|
2858
|
+
* - `substrate run` (globally installed)
|
|
2859
|
+
* - `substrate-ai run`
|
|
2860
|
+
* - `node dist/cli/index.js run` (npm run substrate:dev)
|
|
2861
|
+
* - `npx substrate run`
|
|
2862
|
+
* - any node process whose command contains `run` with `--events` or `--stories`
|
|
2863
|
+
*
|
|
2864
|
+
* When `projectRoot` is provided, additionally checks that the command line
|
|
2865
|
+
* contains that path (via `--project-root` flag or as part of the binary/CWD path).
|
|
2866
|
+
* This ensures multi-project environments match the correct orchestrator.
|
|
2867
|
+
*/
|
|
2868
|
+
function isOrchestratorProcessLine(line, projectRoot) {
|
|
2869
|
+
if (line.includes("grep")) return false;
|
|
2870
|
+
let isOrchestrator = false;
|
|
2871
|
+
if (line.includes("substrate run")) isOrchestrator = true;
|
|
2872
|
+
else if (line.includes("substrate-ai run")) isOrchestrator = true;
|
|
2873
|
+
else if (line.includes("index.js run")) isOrchestrator = true;
|
|
2874
|
+
else if (line.includes("node") && /\srun(\s|$)/.test(line) && (line.includes("substrate") || line.includes("--events") || line.includes("--stories"))) isOrchestrator = true;
|
|
2875
|
+
if (!isOrchestrator) return false;
|
|
2876
|
+
if (projectRoot !== void 0) return line.includes(projectRoot);
|
|
2877
|
+
return true;
|
|
2878
|
+
}
|
|
2879
|
+
function inspectProcessTree(opts) {
|
|
2880
|
+
const { projectRoot, substrateDirPath, execFileSync: execFileSyncOverride, readFileSync: readFileSyncOverride } = opts ?? {};
|
|
2881
|
+
const result = {
|
|
2882
|
+
orchestrator_pid: null,
|
|
2883
|
+
child_pids: [],
|
|
2884
|
+
zombies: []
|
|
2885
|
+
};
|
|
2886
|
+
try {
|
|
2887
|
+
let psOutput;
|
|
2888
|
+
if (execFileSyncOverride !== void 0) psOutput = execFileSyncOverride("ps", ["-eo", "pid,ppid,stat,command"], {
|
|
2889
|
+
encoding: "utf-8",
|
|
2890
|
+
timeout: 5e3
|
|
2891
|
+
});
|
|
2892
|
+
else {
|
|
2893
|
+
const { execFileSync } = __require("node:child_process");
|
|
2894
|
+
psOutput = execFileSync("ps", ["-eo", "pid,ppid,stat,command"], {
|
|
2895
|
+
encoding: "utf-8",
|
|
2896
|
+
timeout: 5e3
|
|
2897
|
+
});
|
|
2898
|
+
}
|
|
2899
|
+
const lines = psOutput.split("\n");
|
|
2900
|
+
if (substrateDirPath !== void 0) try {
|
|
2901
|
+
const readFileSyncFn = readFileSyncOverride ?? ((path$1, encoding) => readFileSync$1(path$1, encoding));
|
|
2902
|
+
const pidContent = readFileSyncFn(join(substrateDirPath, "orchestrator.pid"), "utf-8");
|
|
2903
|
+
const pid = parseInt(pidContent.trim(), 10);
|
|
2904
|
+
if (!isNaN(pid) && pid > 0) {
|
|
2905
|
+
const isAlive = lines.some((line) => {
|
|
2906
|
+
const parts = line.trim().split(/\s+/);
|
|
2907
|
+
if (parts.length < 3) return false;
|
|
2908
|
+
return parseInt(parts[0], 10) === pid && !parts[2].includes("Z");
|
|
2909
|
+
});
|
|
2910
|
+
if (isAlive) result.orchestrator_pid = pid;
|
|
2911
|
+
}
|
|
2912
|
+
} catch {}
|
|
2913
|
+
if (result.orchestrator_pid === null) {
|
|
2914
|
+
for (const line of lines) if (isOrchestratorProcessLine(line, projectRoot)) {
|
|
2915
|
+
const match = line.trim().match(/^(\d+)/);
|
|
2916
|
+
if (match) {
|
|
2917
|
+
result.orchestrator_pid = parseInt(match[1], 10);
|
|
2918
|
+
break;
|
|
2919
|
+
}
|
|
2920
|
+
}
|
|
2921
|
+
}
|
|
2922
|
+
if (result.orchestrator_pid !== null) for (const line of lines) {
|
|
2923
|
+
const parts = line.trim().split(/\s+/);
|
|
2924
|
+
if (parts.length >= 3) {
|
|
2925
|
+
const pid = parseInt(parts[0], 10);
|
|
2926
|
+
const ppid = parseInt(parts[1], 10);
|
|
2927
|
+
const stat$1 = parts[2];
|
|
2928
|
+
if (ppid === result.orchestrator_pid && pid !== result.orchestrator_pid) {
|
|
2929
|
+
result.child_pids.push(pid);
|
|
2930
|
+
if (stat$1.includes("Z")) result.zombies.push(pid);
|
|
2931
|
+
}
|
|
2932
|
+
}
|
|
2933
|
+
}
|
|
2934
|
+
} catch {}
|
|
2935
|
+
return result;
|
|
2936
|
+
}
|
|
2937
|
+
/**
|
|
2938
|
+
* Collect all descendant PIDs of the given root PIDs by walking the process
|
|
2939
|
+
* tree recursively. This ensures that grandchildren of the orchestrator
|
|
2940
|
+
* (e.g. node subprocesses spawned by `claude -p`) are also killed during
|
|
2941
|
+
* stall recovery, leaving no orphan processes.
|
|
2942
|
+
*
|
|
2943
|
+
* Returns only the descendants — the root PIDs themselves are NOT included.
|
|
2944
|
+
*/
|
|
2945
|
+
function getAllDescendantPids(rootPids, execFileSyncOverride) {
|
|
2946
|
+
if (rootPids.length === 0) return [];
|
|
2947
|
+
try {
|
|
2948
|
+
let psOutput;
|
|
2949
|
+
if (execFileSyncOverride !== void 0) psOutput = execFileSyncOverride("ps", ["-eo", "pid,ppid"], {
|
|
2950
|
+
encoding: "utf-8",
|
|
2951
|
+
timeout: 5e3
|
|
2952
|
+
});
|
|
2953
|
+
else {
|
|
2954
|
+
const { execFileSync } = __require("node:child_process");
|
|
2955
|
+
psOutput = execFileSync("ps", ["-eo", "pid,ppid"], {
|
|
2956
|
+
encoding: "utf-8",
|
|
2957
|
+
timeout: 5e3
|
|
2958
|
+
});
|
|
2959
|
+
}
|
|
2960
|
+
const childrenOf = new Map();
|
|
2961
|
+
for (const line of psOutput.split("\n")) {
|
|
2962
|
+
const parts = line.trim().split(/\s+/);
|
|
2963
|
+
if (parts.length >= 2) {
|
|
2964
|
+
const pid = parseInt(parts[0], 10);
|
|
2965
|
+
const ppid = parseInt(parts[1], 10);
|
|
2966
|
+
if (!isNaN(pid) && !isNaN(ppid) && pid > 0) {
|
|
2967
|
+
if (!childrenOf.has(ppid)) childrenOf.set(ppid, []);
|
|
2968
|
+
childrenOf.get(ppid).push(pid);
|
|
2969
|
+
}
|
|
2970
|
+
}
|
|
2971
|
+
}
|
|
2972
|
+
const descendants = [];
|
|
2973
|
+
const seen = new Set(rootPids);
|
|
2974
|
+
const queue = [...rootPids];
|
|
2975
|
+
while (queue.length > 0) {
|
|
2976
|
+
const current = queue.shift();
|
|
2977
|
+
const children = childrenOf.get(current) ?? [];
|
|
2978
|
+
for (const child of children) if (!seen.has(child)) {
|
|
2979
|
+
seen.add(child);
|
|
2980
|
+
descendants.push(child);
|
|
2981
|
+
queue.push(child);
|
|
2982
|
+
}
|
|
2983
|
+
}
|
|
2984
|
+
return descendants;
|
|
2985
|
+
} catch {
|
|
2986
|
+
return [];
|
|
2987
|
+
}
|
|
2988
|
+
}
|
|
2989
|
+
/**
|
|
2990
|
+
* Fetch pipeline health data as a structured object without any stdout side-effects.
|
|
2991
|
+
* Used by runSupervisorAction to poll health without formatting overhead.
|
|
2992
|
+
*
|
|
2993
|
+
* Returns a NO_PIPELINE_RUNNING health object for all graceful "no data" cases
|
|
2994
|
+
* (missing DB, missing run, terminal run status). Throws only on unexpected errors.
|
|
2995
|
+
*/
|
|
2996
|
+
async function getAutoHealthData(options) {
|
|
2997
|
+
const { runId, projectRoot, stateStore, stateStoreConfig } = options;
|
|
2998
|
+
const dbRoot = await resolveMainRepoRoot(projectRoot);
|
|
2999
|
+
const dbPath = join(dbRoot, ".substrate", "substrate.db");
|
|
3000
|
+
let doltStateInfo;
|
|
3001
|
+
if (stateStoreConfig?.backend === "dolt" && stateStore) {
|
|
3002
|
+
const repoPath = stateStoreConfig.basePath ?? projectRoot;
|
|
3003
|
+
const doltDirPath = join(repoPath, ".dolt");
|
|
3004
|
+
const initialized = existsSync$1(doltDirPath);
|
|
3005
|
+
let responsive = false;
|
|
3006
|
+
let version;
|
|
3007
|
+
let branches;
|
|
3008
|
+
let currentBranch;
|
|
3009
|
+
try {
|
|
3010
|
+
await stateStore.getHistory(1);
|
|
3011
|
+
responsive = true;
|
|
3012
|
+
try {
|
|
3013
|
+
const { execFile: ef } = await import("node:child_process");
|
|
3014
|
+
const { promisify: p } = await import("node:util");
|
|
3015
|
+
const execFileAsync = p(ef);
|
|
3016
|
+
const { stdout } = await execFileAsync("dolt", ["version"]);
|
|
3017
|
+
const match = stdout.match(/dolt version (\S+)/);
|
|
3018
|
+
if (match) version = match[1];
|
|
3019
|
+
} catch {}
|
|
3020
|
+
try {
|
|
3021
|
+
const { execFile: ef } = await import("node:child_process");
|
|
3022
|
+
const { promisify: p } = await import("node:util");
|
|
3023
|
+
const execFileAsync = p(ef);
|
|
3024
|
+
const { stdout } = await execFileAsync("dolt", ["branch", "--list"], { cwd: repoPath });
|
|
3025
|
+
const lines = stdout.split("\n").filter((l) => l.trim().length > 0);
|
|
3026
|
+
branches = lines.map((l) => {
|
|
3027
|
+
const trimmed = l.trim();
|
|
3028
|
+
if (trimmed.startsWith("* ")) {
|
|
3029
|
+
currentBranch = trimmed.slice(2).trim();
|
|
3030
|
+
return currentBranch;
|
|
3031
|
+
}
|
|
3032
|
+
return trimmed;
|
|
3033
|
+
});
|
|
3034
|
+
} catch {}
|
|
3035
|
+
} catch {
|
|
3036
|
+
responsive = false;
|
|
3037
|
+
}
|
|
3038
|
+
doltStateInfo = {
|
|
3039
|
+
initialized,
|
|
3040
|
+
responsive,
|
|
3041
|
+
...version !== void 0 ? { version } : {},
|
|
3042
|
+
...branches !== void 0 ? { branches } : {},
|
|
3043
|
+
...currentBranch !== void 0 ? { current_branch: currentBranch } : {}
|
|
3044
|
+
};
|
|
3045
|
+
}
|
|
3046
|
+
const NO_PIPELINE = {
|
|
3047
|
+
verdict: "NO_PIPELINE_RUNNING",
|
|
3048
|
+
run_id: null,
|
|
3049
|
+
status: null,
|
|
3050
|
+
current_phase: null,
|
|
3051
|
+
staleness_seconds: 0,
|
|
3052
|
+
last_activity: "",
|
|
3053
|
+
process: {
|
|
3054
|
+
orchestrator_pid: null,
|
|
3055
|
+
child_pids: [],
|
|
3056
|
+
zombies: []
|
|
3057
|
+
},
|
|
3058
|
+
stories: {
|
|
3059
|
+
active: 0,
|
|
3060
|
+
completed: 0,
|
|
3061
|
+
escalated: 0,
|
|
3062
|
+
details: {}
|
|
3063
|
+
},
|
|
3064
|
+
...doltStateInfo !== void 0 ? { dolt_state: doltStateInfo } : {}
|
|
3065
|
+
};
|
|
3066
|
+
const doltDir = join(dbRoot, ".substrate", "state", ".dolt");
|
|
3067
|
+
if (!existsSync$1(dbPath) && !existsSync$1(doltDir)) return NO_PIPELINE;
|
|
3068
|
+
const adapter = createDatabaseAdapter({
|
|
3069
|
+
backend: "auto",
|
|
3070
|
+
basePath: dbRoot
|
|
3071
|
+
});
|
|
3072
|
+
try {
|
|
3073
|
+
await initSchema(adapter);
|
|
3074
|
+
let run;
|
|
3075
|
+
if (runId !== void 0) run = await getPipelineRunById(adapter, runId);
|
|
3076
|
+
else run = await getLatestRun(adapter);
|
|
3077
|
+
if (run === void 0) {
|
|
3078
|
+
const substrateDirPath$1 = join(dbRoot, ".substrate");
|
|
3079
|
+
const fallbackProcessInfo = inspectProcessTree({
|
|
3080
|
+
projectRoot: dbRoot,
|
|
3081
|
+
substrateDirPath: substrateDirPath$1
|
|
3082
|
+
});
|
|
3083
|
+
if (fallbackProcessInfo.orchestrator_pid !== null) return {
|
|
3084
|
+
verdict: "HEALTHY",
|
|
3085
|
+
run_id: null,
|
|
3086
|
+
status: "running",
|
|
3087
|
+
current_phase: "implementation",
|
|
3088
|
+
staleness_seconds: 0,
|
|
3089
|
+
last_activity: new Date().toISOString(),
|
|
3090
|
+
process: fallbackProcessInfo,
|
|
3091
|
+
stories: {
|
|
3092
|
+
active: 0,
|
|
3093
|
+
completed: 0,
|
|
3094
|
+
escalated: 0,
|
|
3095
|
+
details: {}
|
|
3096
|
+
},
|
|
3097
|
+
...doltStateInfo !== void 0 ? { dolt_state: doltStateInfo } : {}
|
|
3098
|
+
};
|
|
3099
|
+
return NO_PIPELINE;
|
|
3100
|
+
}
|
|
3101
|
+
const updatedAt = parseDbTimestampAsUtc(run.updated_at ?? "");
|
|
3102
|
+
const stalenessSeconds = Math.round((Date.now() - updatedAt.getTime()) / 1e3);
|
|
3103
|
+
let storyDetails = {};
|
|
3104
|
+
let active = 0;
|
|
3105
|
+
let completed = 0;
|
|
3106
|
+
let escalated = 0;
|
|
3107
|
+
let pending = 0;
|
|
3108
|
+
try {
|
|
3109
|
+
if (run.token_usage_json) {
|
|
3110
|
+
const state = JSON.parse(run.token_usage_json);
|
|
3111
|
+
if (state.stories) for (const [key, s] of Object.entries(state.stories)) {
|
|
3112
|
+
storyDetails[key] = {
|
|
3113
|
+
phase: s.phase,
|
|
3114
|
+
review_cycles: s.reviewCycles
|
|
3115
|
+
};
|
|
3116
|
+
if (s.phase === "COMPLETE") completed++;
|
|
3117
|
+
else if (s.phase === "ESCALATED") escalated++;
|
|
3118
|
+
else if (s.phase === "PENDING") pending++;
|
|
3119
|
+
else active++;
|
|
3120
|
+
}
|
|
3121
|
+
}
|
|
3122
|
+
} catch {}
|
|
3123
|
+
const substrateDirPath = join(dbRoot, ".substrate");
|
|
3124
|
+
const processInfo = inspectProcessTree({
|
|
3125
|
+
projectRoot,
|
|
3126
|
+
substrateDirPath
|
|
3127
|
+
});
|
|
3128
|
+
let verdict = "NO_PIPELINE_RUNNING";
|
|
3129
|
+
if (run.status === "running") if (processInfo.zombies.length > 0) verdict = "STALLED";
|
|
3130
|
+
else if (processInfo.orchestrator_pid !== null && processInfo.child_pids.length > 0 && stalenessSeconds > DEFAULT_STALL_THRESHOLD_SECONDS) verdict = "HEALTHY";
|
|
3131
|
+
else if (stalenessSeconds > DEFAULT_STALL_THRESHOLD_SECONDS) verdict = "STALLED";
|
|
3132
|
+
else if (processInfo.orchestrator_pid !== null && processInfo.child_pids.length === 0 && active > 0) verdict = "STALLED";
|
|
3133
|
+
else if (processInfo.orchestrator_pid === null && active > 0) verdict = "STALLED";
|
|
3134
|
+
else verdict = "HEALTHY";
|
|
3135
|
+
else if (run.status === "completed" || run.status === "failed" || run.status === "stopped") verdict = "NO_PIPELINE_RUNNING";
|
|
3136
|
+
const healthOutput = {
|
|
3137
|
+
verdict,
|
|
3138
|
+
run_id: run.id,
|
|
3139
|
+
status: run.status,
|
|
3140
|
+
current_phase: run.current_phase ?? null,
|
|
3141
|
+
staleness_seconds: stalenessSeconds,
|
|
3142
|
+
last_activity: run.updated_at ?? "",
|
|
3143
|
+
process: processInfo,
|
|
3144
|
+
stories: {
|
|
3145
|
+
active,
|
|
3146
|
+
completed,
|
|
3147
|
+
escalated,
|
|
3148
|
+
pending,
|
|
3149
|
+
details: storyDetails
|
|
3150
|
+
},
|
|
3151
|
+
...doltStateInfo !== void 0 ? { dolt_state: doltStateInfo } : {}
|
|
3152
|
+
};
|
|
3153
|
+
return healthOutput;
|
|
3154
|
+
} finally {
|
|
3155
|
+
try {
|
|
3156
|
+
await adapter.close();
|
|
3157
|
+
} catch {}
|
|
3158
|
+
}
|
|
3159
|
+
}
|
|
3160
|
+
async function runHealthAction(options) {
|
|
3161
|
+
const { outputFormat } = options;
|
|
3162
|
+
try {
|
|
3163
|
+
const health = await getAutoHealthData(options);
|
|
3164
|
+
if (outputFormat === "json") process.stdout.write(formatOutput(health, "json", true) + "\n");
|
|
3165
|
+
else {
|
|
3166
|
+
const verdictLabel = health.verdict === "HEALTHY" ? "HEALTHY" : health.verdict === "STALLED" ? "STALLED" : "NO PIPELINE RUNNING";
|
|
3167
|
+
process.stdout.write(`\nPipeline Health: ${verdictLabel}\n`);
|
|
3168
|
+
if (health.run_id !== null) {
|
|
3169
|
+
process.stdout.write(` Run: ${health.run_id}\n`);
|
|
3170
|
+
process.stdout.write(` Status: ${health.status}\n`);
|
|
3171
|
+
process.stdout.write(` Phase: ${health.current_phase ?? "N/A"}\n`);
|
|
3172
|
+
process.stdout.write(` Last Active: ${health.last_activity} (${health.staleness_seconds}s ago)\n`);
|
|
3173
|
+
const processInfo = health.process;
|
|
3174
|
+
if (processInfo.orchestrator_pid !== null) {
|
|
3175
|
+
process.stdout.write(` Orchestrator: PID ${processInfo.orchestrator_pid}\n`);
|
|
3176
|
+
process.stdout.write(` Children: ${processInfo.child_pids.length} active`);
|
|
3177
|
+
if (processInfo.zombies.length > 0) process.stdout.write(` (${processInfo.zombies.length} ZOMBIE)`);
|
|
3178
|
+
process.stdout.write("\n");
|
|
3179
|
+
} else process.stdout.write(" Orchestrator: not running\n");
|
|
3180
|
+
const storyDetails = health.stories.details;
|
|
3181
|
+
if (Object.keys(storyDetails).length > 0) {
|
|
3182
|
+
process.stdout.write("\n Stories:\n");
|
|
3183
|
+
for (const [key, s] of Object.entries(storyDetails)) process.stdout.write(` ${key}: ${s.phase} (${s.review_cycles} review cycles)\n`);
|
|
3184
|
+
process.stdout.write(`\n Summary: ${health.stories.active} active, ${health.stories.completed} completed, ${health.stories.escalated} escalated\n`);
|
|
3185
|
+
}
|
|
3186
|
+
}
|
|
3187
|
+
if (health.dolt_state !== void 0) {
|
|
3188
|
+
const ds = health.dolt_state;
|
|
3189
|
+
const initStr = ds.initialized ? "yes" : "no";
|
|
3190
|
+
const respStr = ds.responsive ? "yes" : "no";
|
|
3191
|
+
const verStr = ds.version !== void 0 ? ` (v${ds.version})` : "";
|
|
3192
|
+
process.stdout.write(`\n Dolt State: initialized=${initStr} responsive=${respStr}${verStr}\n`);
|
|
3193
|
+
}
|
|
3194
|
+
}
|
|
3195
|
+
return 0;
|
|
3196
|
+
} catch (err) {
|
|
3197
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
3198
|
+
if (outputFormat === "json") process.stdout.write(formatOutput(null, "json", false, msg) + "\n");
|
|
3199
|
+
else process.stderr.write(`Error: ${msg}\n`);
|
|
3200
|
+
logger.error({ err }, "health action failed");
|
|
3201
|
+
return 1;
|
|
3202
|
+
}
|
|
3203
|
+
}
|
|
3204
|
+
function registerHealthCommand(program, _version = "0.0.0", projectRoot = process.cwd()) {
|
|
3205
|
+
program.command("health").description("Check pipeline health: process status, stall detection, and verdict").option("--run-id <id>", "Pipeline run ID to query (defaults to latest)").option("--project-root <path>", "Project root directory", projectRoot).option("--output-format <format>", "Output format: human (default) or json", "human").action(async (opts) => {
|
|
3206
|
+
const outputFormat = opts.outputFormat === "json" ? "json" : "human";
|
|
3207
|
+
const root = opts.projectRoot;
|
|
3208
|
+
let stateStore;
|
|
3209
|
+
let stateStoreConfig;
|
|
3210
|
+
const doltStatePath = join(root, ".substrate", "state", ".dolt");
|
|
3211
|
+
if (existsSync$1(doltStatePath)) {
|
|
3212
|
+
const basePath = join(root, ".substrate", "state");
|
|
3213
|
+
stateStoreConfig = {
|
|
3214
|
+
backend: "dolt",
|
|
3215
|
+
basePath
|
|
3216
|
+
};
|
|
3217
|
+
try {
|
|
3218
|
+
stateStore = createStateStore({
|
|
3219
|
+
backend: "dolt",
|
|
3220
|
+
basePath
|
|
3221
|
+
});
|
|
3222
|
+
await stateStore.initialize();
|
|
3223
|
+
} catch {
|
|
3224
|
+
stateStore = void 0;
|
|
3225
|
+
stateStoreConfig = void 0;
|
|
3226
|
+
}
|
|
3227
|
+
}
|
|
3228
|
+
try {
|
|
3229
|
+
const exitCode = await runHealthAction({
|
|
3230
|
+
outputFormat,
|
|
3231
|
+
runId: opts.runId,
|
|
3232
|
+
projectRoot: root,
|
|
3233
|
+
stateStore,
|
|
3234
|
+
stateStoreConfig
|
|
3235
|
+
});
|
|
3236
|
+
process.exitCode = exitCode;
|
|
3237
|
+
} finally {
|
|
3238
|
+
try {
|
|
3239
|
+
await stateStore?.close();
|
|
3240
|
+
} catch {}
|
|
3241
|
+
}
|
|
3242
|
+
});
|
|
3243
|
+
}
|
|
3244
|
+
|
|
3245
|
+
//#endregion
|
|
3246
|
+
export { BMAD_BASELINE_TOKENS_FULL, DEFAULT_STALL_THRESHOLD_SECONDS, DoltClient, DoltMergeConflict, DoltNotInstalled, FileStateStore, STOP_AFTER_VALID_PHASES, STORY_KEY_PATTERN$1 as STORY_KEY_PATTERN, SUBSTRATE_OWNED_SETTINGS_KEYS, VALID_PHASES, WorkGraphRepository, __commonJS, __require, __toESM, buildPipelineStatusOutput, checkDoltInstalled, createDatabaseAdapter, createDoltClient, createStateStore, detectCycles, findPackageRoot, formatOutput, formatPipelineStatusHuman, formatPipelineSummary, formatTokenTelemetry, getAllDescendantPids, getAutoHealthData, getSubstrateDefaultSettings, initSchema, initializeDolt, inspectProcessTree, isOrchestratorProcessLine, isSyncAdapter, parseDbTimestampAsUtc, registerHealthCommand, resolveBmadMethodSrcPath, resolveBmadMethodVersion, resolveMainRepoRoot, runHealthAction, validateStoryKey };
|
|
3247
|
+
//# sourceMappingURL=health-Dnx-FGva.js.map
|