substrate-ai 0.6.0 → 0.6.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli/index.js +532 -16
- package/dist/cli/templates/__tests__/build-dev-notes.test.ts +376 -0
- package/dist/cli/templates/build-dev-notes.ts +256 -0
- package/dist/index.d.ts +25 -1
- package/dist/{run-IU38JGTV.js → run-CcUT8-DF.js} +1 -1
- package/dist/{run-B1WEe6SY.js → run-DCmne2q6.js} +1027 -147
- package/package.json +1 -1
|
@@ -325,7 +325,8 @@ var InMemoryDatabaseAdapter = class {
|
|
|
325
325
|
if (!m$1) return [];
|
|
326
326
|
return [this._evalSelectExprs(m$1[1].trim())];
|
|
327
327
|
}
|
|
328
|
-
const
|
|
328
|
+
const stripped = sql.replace(/\s+ORDER\s+BY\s+.+?(?=\s+LIMIT\s|\s*$)/is, "").replace(/\s+LIMIT\s+\d+\s*$/is, "");
|
|
329
|
+
const m = /SELECT\s+(.+?)\s+FROM\s+(\w+)(?:\s+WHERE\s+(.+))?$/is.exec(stripped);
|
|
329
330
|
if (!m) return [];
|
|
330
331
|
const colsStr = m[1].trim();
|
|
331
332
|
const tableName = m[2];
|
|
@@ -334,6 +335,7 @@ var InMemoryDatabaseAdapter = class {
|
|
|
334
335
|
let rows = table.map((r) => ({ ...r }));
|
|
335
336
|
if (whereStr) rows = rows.filter((row) => this._matchWhere(whereStr.trim(), row));
|
|
336
337
|
if (colsStr === "*") return rows;
|
|
338
|
+
if (/\b(?:SUM|COALESCE|COUNT|AVG|MIN|MAX)\s*\(/i.test(colsStr)) return [this._evalAggregate(colsStr, rows)];
|
|
337
339
|
return rows.map((row) => this._projectCols(colsStr, row));
|
|
338
340
|
}
|
|
339
341
|
_update(sql) {
|
|
@@ -393,12 +395,22 @@ var InMemoryDatabaseAdapter = class {
|
|
|
393
395
|
if (row[notNullM[1]] === null || row[notNullM[1]] === void 0) return false;
|
|
394
396
|
continue;
|
|
395
397
|
}
|
|
398
|
+
const likeM = /^(\w+)\s+LIKE\s+'(.*)'$/is.exec(trimmed);
|
|
399
|
+
if (likeM) {
|
|
400
|
+
const colVal = row[likeM[1]];
|
|
401
|
+
if (colVal === null || colVal === void 0) return false;
|
|
402
|
+
const pattern = likeM[2].replace(/''/g, "'");
|
|
403
|
+
const escaped = pattern.replace(/[.*+?^${}()|[\]\\]/g, (ch) => ch === "%" || ch === "_" ? ch : "\\" + ch);
|
|
404
|
+
const regex = new RegExp("^" + escaped.replace(/%/g, ".*").replace(/_/g, ".") + "$", "s");
|
|
405
|
+
if (!regex.test(String(colVal))) return false;
|
|
406
|
+
continue;
|
|
407
|
+
}
|
|
396
408
|
}
|
|
397
409
|
return true;
|
|
398
410
|
}
|
|
399
411
|
_projectCols(colsStr, row) {
|
|
400
412
|
const result = {};
|
|
401
|
-
const cols =
|
|
413
|
+
const cols = this._splitTopLevelCommas(colsStr);
|
|
402
414
|
for (const col of cols) {
|
|
403
415
|
const aliasM = /^(.+?)\s+AS\s+(\w+)$/i.exec(col);
|
|
404
416
|
if (aliasM) result[aliasM[2]] = this._evalExprAgainstRow(aliasM[1].trim(), row);
|
|
@@ -408,7 +420,7 @@ var InMemoryDatabaseAdapter = class {
|
|
|
408
420
|
}
|
|
409
421
|
_evalSelectExprs(exprs) {
|
|
410
422
|
const result = {};
|
|
411
|
-
const parts =
|
|
423
|
+
const parts = this._splitTopLevelCommas(exprs);
|
|
412
424
|
for (const part of parts) {
|
|
413
425
|
const aliasM = /^(.+?)\s+AS\s+(\w+)$/i.exec(part);
|
|
414
426
|
if (aliasM) result[aliasM[2]] = this._evalLiteral(aliasM[1].trim());
|
|
@@ -431,6 +443,87 @@ var InMemoryDatabaseAdapter = class {
|
|
|
431
443
|
return literal;
|
|
432
444
|
}
|
|
433
445
|
/**
|
|
446
|
+
* Split a string by commas that are NOT inside parentheses.
|
|
447
|
+
* E.g. "COALESCE(SUM(x), 0) as a, y" → ["COALESCE(SUM(x), 0) as a", "y"]
|
|
448
|
+
*/
|
|
449
|
+
_splitTopLevelCommas(str) {
|
|
450
|
+
const parts = [];
|
|
451
|
+
let current = "";
|
|
452
|
+
let depth = 0;
|
|
453
|
+
let inStr = false;
|
|
454
|
+
for (let i = 0; i < str.length; i++) {
|
|
455
|
+
const ch = str[i];
|
|
456
|
+
if (ch === "'" && !inStr) {
|
|
457
|
+
inStr = true;
|
|
458
|
+
current += ch;
|
|
459
|
+
} else if (ch === "'" && inStr) if (str[i + 1] === "'") {
|
|
460
|
+
current += "''";
|
|
461
|
+
i++;
|
|
462
|
+
} else {
|
|
463
|
+
inStr = false;
|
|
464
|
+
current += ch;
|
|
465
|
+
}
|
|
466
|
+
else if (!inStr && ch === "(") {
|
|
467
|
+
depth++;
|
|
468
|
+
current += ch;
|
|
469
|
+
} else if (!inStr && ch === ")") {
|
|
470
|
+
depth--;
|
|
471
|
+
current += ch;
|
|
472
|
+
} else if (!inStr && ch === "," && depth === 0) {
|
|
473
|
+
parts.push(current.trim());
|
|
474
|
+
current = "";
|
|
475
|
+
} else current += ch;
|
|
476
|
+
}
|
|
477
|
+
if (current.trim() !== "") parts.push(current.trim());
|
|
478
|
+
return parts;
|
|
479
|
+
}
|
|
480
|
+
/**
|
|
481
|
+
* Evaluate aggregate SELECT expressions (SUM, COALESCE, COUNT) across
|
|
482
|
+
* a set of filtered rows, returning a single result row.
|
|
483
|
+
*/
|
|
484
|
+
_evalAggregate(colsStr, rows) {
|
|
485
|
+
const result = {};
|
|
486
|
+
const cols = this._splitTopLevelCommas(colsStr);
|
|
487
|
+
for (const col of cols) {
|
|
488
|
+
const aliasM = /^(.+?)\s+AS\s+(\w+)$/i.exec(col);
|
|
489
|
+
const expr = aliasM ? aliasM[1].trim() : col.trim();
|
|
490
|
+
const alias = aliasM ? aliasM[2] : col.trim();
|
|
491
|
+
result[alias] = this._evalAggregateExpr(expr, rows);
|
|
492
|
+
}
|
|
493
|
+
return result;
|
|
494
|
+
}
|
|
495
|
+
/**
|
|
496
|
+
* Evaluate a single aggregate expression against a set of rows.
|
|
497
|
+
* Supports: SUM(col), COALESCE(expr, default), COUNT(*).
|
|
498
|
+
*/
|
|
499
|
+
_evalAggregateExpr(expr, rows) {
|
|
500
|
+
const trimmed = expr.trim();
|
|
501
|
+
const coalesceM = /^COALESCE\((.+)\)$/i.exec(trimmed);
|
|
502
|
+
if (coalesceM) {
|
|
503
|
+
const args = this._splitTopLevelCommas(coalesceM[1]);
|
|
504
|
+
for (const arg of args) {
|
|
505
|
+
const val = this._evalAggregateExpr(arg.trim(), rows);
|
|
506
|
+
if (val !== null && val !== void 0) return val;
|
|
507
|
+
}
|
|
508
|
+
return null;
|
|
509
|
+
}
|
|
510
|
+
const sumM = /^SUM\((\w+)\)$/i.exec(trimmed);
|
|
511
|
+
if (sumM) {
|
|
512
|
+
const col = sumM[1];
|
|
513
|
+
if (rows.length === 0) return null;
|
|
514
|
+
let total = 0;
|
|
515
|
+
for (const row of rows) total += Number(row[col] ?? 0);
|
|
516
|
+
return total;
|
|
517
|
+
}
|
|
518
|
+
if (/^COUNT\(\*\)$/i.test(trimmed)) return rows.length;
|
|
519
|
+
const countM = /^COUNT\((\w+)\)$/i.exec(trimmed);
|
|
520
|
+
if (countM) {
|
|
521
|
+
const col = countM[1];
|
|
522
|
+
return rows.filter((r) => r[col] !== null && r[col] !== void 0).length;
|
|
523
|
+
}
|
|
524
|
+
return this._evalLiteral(trimmed);
|
|
525
|
+
}
|
|
526
|
+
/**
|
|
434
527
|
* Parse a comma-separated list of SQL literal values.
|
|
435
528
|
* Handles: NULL, numbers, single-quoted strings.
|
|
436
529
|
* Simple split by comma (assumes no commas inside string values).
|
|
@@ -1829,8 +1922,8 @@ function resolveBmadMethodVersion(fromDir = __dirname) {
|
|
|
1829
1922
|
const BMAD_BASELINE_TOKENS_FULL = 56800;
|
|
1830
1923
|
/** BMAD baseline token total for create+dev+review comparison */
|
|
1831
1924
|
const BMAD_BASELINE_TOKENS = 23800;
|
|
1832
|
-
/** Story key pattern: <epic>-<story> e.g. "10-1" */
|
|
1833
|
-
const STORY_KEY_PATTERN$1 =
|
|
1925
|
+
/** Story key pattern: <epic>-<story> e.g. "10-1", "1-1a", "NEW-26" */
|
|
1926
|
+
const STORY_KEY_PATTERN$1 = /^[A-Za-z0-9]+-[A-Za-z0-9]+$/;
|
|
1834
1927
|
/**
|
|
1835
1928
|
* Top-level keys in .claude/settings.json that substrate owns.
|
|
1836
1929
|
* On init, these are set/updated unconditionally.
|
|
@@ -3441,6 +3534,28 @@ const PIPELINE_EVENT_METADATA = [
|
|
|
3441
3534
|
description: "Overall verification result."
|
|
3442
3535
|
}
|
|
3443
3536
|
]
|
|
3537
|
+
},
|
|
3538
|
+
{
|
|
3539
|
+
type: "pipeline:profile-stale",
|
|
3540
|
+
description: "Project profile may be outdated. Non-blocking warning — run `substrate init --force` to re-detect.",
|
|
3541
|
+
when: "After all stories complete, before pipeline:complete. Emitted when staleness indicators are found.",
|
|
3542
|
+
fields: [
|
|
3543
|
+
{
|
|
3544
|
+
name: "ts",
|
|
3545
|
+
type: "string",
|
|
3546
|
+
description: "Timestamp."
|
|
3547
|
+
},
|
|
3548
|
+
{
|
|
3549
|
+
name: "message",
|
|
3550
|
+
type: "string",
|
|
3551
|
+
description: "Human-readable staleness warning message."
|
|
3552
|
+
},
|
|
3553
|
+
{
|
|
3554
|
+
name: "indicators",
|
|
3555
|
+
type: "string[]",
|
|
3556
|
+
description: "List of staleness indicators (e.g., \"turbo.json exists but profile says type: single\")."
|
|
3557
|
+
}
|
|
3558
|
+
]
|
|
3444
3559
|
}
|
|
3445
3560
|
];
|
|
3446
3561
|
/**
|
|
@@ -6338,7 +6453,8 @@ var DispatcherShuttingDownError = class extends Error {
|
|
|
6338
6453
|
const YAML_ANCHOR_KEYS = [
|
|
6339
6454
|
"result:",
|
|
6340
6455
|
"verdict:",
|
|
6341
|
-
"story_file:"
|
|
6456
|
+
"story_file:",
|
|
6457
|
+
"expansion_priority:"
|
|
6342
6458
|
];
|
|
6343
6459
|
/**
|
|
6344
6460
|
* Extract the YAML result block from sub-agent output.
|
|
@@ -7059,17 +7175,35 @@ var DispatcherImpl = class {
|
|
|
7059
7175
|
/**
|
|
7060
7176
|
* Detect the package manager / build system used in a project.
|
|
7061
7177
|
*
|
|
7062
|
-
* Checks for
|
|
7063
|
-
*
|
|
7064
|
-
*
|
|
7065
|
-
*
|
|
7066
|
-
*
|
|
7067
|
-
*
|
|
7178
|
+
* Checks for build system markers in priority order:
|
|
7179
|
+
* 0. `.substrate/project-profile.yaml` → `project.buildCommand` field (most explicit, wins)
|
|
7180
|
+
* 1. `turbo.json` → `turbo build`
|
|
7181
|
+
* 2. Node.js lockfiles → corresponding `<pm> run build`
|
|
7182
|
+
* 3. Python markers (pyproject.toml, poetry.lock, setup.py) → skip (no universal build step)
|
|
7183
|
+
* 4. Rust (Cargo.toml) → skip
|
|
7184
|
+
* 5. Go (go.mod) → skip
|
|
7185
|
+
* 6. No markers found → skip (empty command)
|
|
7068
7186
|
*
|
|
7069
7187
|
* When a non-Node.js project is detected (or nothing is recognized), the
|
|
7070
7188
|
* returned command is '' which causes runBuildVerification() to skip.
|
|
7071
7189
|
*/
|
|
7072
7190
|
function detectPackageManager(projectRoot) {
|
|
7191
|
+
const profilePath = join$1(projectRoot, ".substrate", "project-profile.yaml");
|
|
7192
|
+
if (existsSync$1(profilePath)) try {
|
|
7193
|
+
const raw = readFileSync$1(profilePath, "utf-8");
|
|
7194
|
+
const parsed = yaml.load(raw);
|
|
7195
|
+
const buildCommand = parsed?.project?.buildCommand;
|
|
7196
|
+
if (typeof buildCommand === "string" && buildCommand.length > 0) return {
|
|
7197
|
+
packageManager: "none",
|
|
7198
|
+
lockfile: "project-profile.yaml",
|
|
7199
|
+
command: buildCommand
|
|
7200
|
+
};
|
|
7201
|
+
} catch {}
|
|
7202
|
+
if (existsSync$1(join$1(projectRoot, "turbo.json"))) return {
|
|
7203
|
+
packageManager: "none",
|
|
7204
|
+
lockfile: "turbo.json",
|
|
7205
|
+
command: "turbo build"
|
|
7206
|
+
};
|
|
7073
7207
|
const nodeCandidates = [
|
|
7074
7208
|
{
|
|
7075
7209
|
file: "pnpm-lock.yaml",
|
|
@@ -7170,6 +7304,16 @@ function runBuildVerification(options) {
|
|
|
7170
7304
|
output: combinedOutput,
|
|
7171
7305
|
reason: "build-verification-timeout"
|
|
7172
7306
|
};
|
|
7307
|
+
const missingScriptPattern = /Missing script[:\s]|No script found|Command "build" not found/i;
|
|
7308
|
+
if (missingScriptPattern.test(combinedOutput)) {
|
|
7309
|
+
logger$23.warn("Build script not found — skipping pre-flight (greenfield repo)");
|
|
7310
|
+
return {
|
|
7311
|
+
status: "skipped",
|
|
7312
|
+
exitCode,
|
|
7313
|
+
output: combinedOutput,
|
|
7314
|
+
reason: "build-script-not-found"
|
|
7315
|
+
};
|
|
7316
|
+
}
|
|
7173
7317
|
return {
|
|
7174
7318
|
status: "failed",
|
|
7175
7319
|
exitCode,
|
|
@@ -7647,12 +7791,12 @@ var FileStateStore = class {
|
|
|
7647
7791
|
//#region src/modules/state/dolt-store.ts
|
|
7648
7792
|
const log = createLogger("modules:state:dolt");
|
|
7649
7793
|
/**
|
|
7650
|
-
* Validate that a story key matches the expected pattern (e.g. "26-7").
|
|
7794
|
+
* Validate that a story key matches the expected pattern (e.g. "26-7", "1-1a", "NEW-26").
|
|
7651
7795
|
* Prevents SQL injection via string-interpolated identifiers.
|
|
7652
7796
|
*/
|
|
7653
|
-
const STORY_KEY_PATTERN = /^[
|
|
7797
|
+
const STORY_KEY_PATTERN = /^[A-Za-z0-9]+-[A-Za-z0-9]+$/;
|
|
7654
7798
|
function assertValidStoryKey(storyKey) {
|
|
7655
|
-
if (!STORY_KEY_PATTERN.test(storyKey)) throw new DoltQueryError("assertValidStoryKey", `Invalid story key: '${storyKey}'. Must match pattern <
|
|
7799
|
+
if (!STORY_KEY_PATTERN.test(storyKey)) throw new DoltQueryError("assertValidStoryKey", `Invalid story key: '${storyKey}'. Must match pattern <segment>-<segment> (e.g. "10-1", "1-1a", "NEW-26").`);
|
|
7656
7800
|
}
|
|
7657
7801
|
/**
|
|
7658
7802
|
* Dolt-backed implementation of the StateStore interface.
|
|
@@ -8938,6 +9082,11 @@ async function getImplementationDecisions(deps) {
|
|
|
8938
9082
|
*
|
|
8939
9083
|
* Returns the matched section content (from heading to next story heading or end),
|
|
8940
9084
|
* or null if no matching section is found (caller falls back to full shard).
|
|
9085
|
+
*
|
|
9086
|
+
* @deprecated Used only as a migration shim for pre-37-0 projects that have
|
|
9087
|
+
* per-epic (key=epicId) shards in the decision store. Post-37-0 shards are
|
|
9088
|
+
* keyed by storyKey directly and do not need extraction. Do not delete until
|
|
9089
|
+
* all per-epic shards have been superseded by per-story shards (AC6).
|
|
8941
9090
|
*/
|
|
8942
9091
|
function extractStorySection(shardContent, storyKey) {
|
|
8943
9092
|
if (!shardContent || !storyKey) return null;
|
|
@@ -8955,13 +9104,26 @@ function extractStorySection(shardContent, storyKey) {
|
|
|
8955
9104
|
}
|
|
8956
9105
|
/**
|
|
8957
9106
|
* Retrieve the epic shard from the pre-fetched implementation decisions.
|
|
8958
|
-
* Looks for decisions with category='epic-shard', key=epicId.
|
|
8959
|
-
* Falls back to reading _bmad-output/epics.md on disk if decisions are empty.
|
|
8960
9107
|
*
|
|
8961
|
-
*
|
|
9108
|
+
* Lookup order (post-37-0 schema):
|
|
9109
|
+
* 1. Direct per-story lookup: category='epic-shard', key=storyKey → AC4
|
|
9110
|
+
* If found, return content immediately — no extractStorySection() needed.
|
|
9111
|
+
* 2. Migration shim (pre-37-0 fallback): category='epic-shard', key=epicId
|
|
9112
|
+
* + extractStorySection() to narrow to the requested story. → AC6
|
|
9113
|
+
* 3. File-based fallback: read epics.md from disk + extractStorySection(). → AC6
|
|
8962
9114
|
*/
|
|
8963
9115
|
function getEpicShard(decisions, epicId, projectRoot, storyKey) {
|
|
8964
9116
|
try {
|
|
9117
|
+
if (storyKey) {
|
|
9118
|
+
const perStoryShard = decisions.find((d) => d.category === "epic-shard" && d.key === storyKey);
|
|
9119
|
+
if (perStoryShard?.value) {
|
|
9120
|
+
logger$20.debug({
|
|
9121
|
+
epicId,
|
|
9122
|
+
storyKey
|
|
9123
|
+
}, "Found per-story epic shard (direct lookup)");
|
|
9124
|
+
return perStoryShard.value;
|
|
9125
|
+
}
|
|
9126
|
+
}
|
|
8965
9127
|
const epicShard = decisions.find((d) => d.category === "epic-shard" && d.key === epicId);
|
|
8966
9128
|
const shardContent = epicShard?.value;
|
|
8967
9129
|
if (shardContent) {
|
|
@@ -8971,7 +9133,7 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
|
|
|
8971
9133
|
logger$20.debug({
|
|
8972
9134
|
epicId,
|
|
8973
9135
|
storyKey
|
|
8974
|
-
}, "Extracted per-story section from epic shard");
|
|
9136
|
+
}, "Extracted per-story section from epic shard (pre-37-0 fallback)");
|
|
8975
9137
|
return storySection;
|
|
8976
9138
|
}
|
|
8977
9139
|
logger$20.debug({
|
|
@@ -9059,9 +9221,17 @@ function readEpicShardFromFile(projectRoot, epicId) {
|
|
|
9059
9221
|
if (!epicsPath) return "";
|
|
9060
9222
|
const content = readFileSync$1(epicsPath, "utf-8");
|
|
9061
9223
|
const epicNum = epicId.replace(/^epic-/i, "");
|
|
9062
|
-
const
|
|
9063
|
-
const
|
|
9064
|
-
|
|
9224
|
+
const headingPattern = new RegExp(`^(#{2,4})\\s+(?:Epic\\s+)?${epicNum}[.:\\s]`, "m");
|
|
9225
|
+
const headingMatch = headingPattern.exec(content);
|
|
9226
|
+
if (!headingMatch) return "";
|
|
9227
|
+
const startIdx = headingMatch.index;
|
|
9228
|
+
const headingLevel = headingMatch[1].length;
|
|
9229
|
+
const hashes = "#".repeat(headingLevel);
|
|
9230
|
+
const endPattern = new RegExp(`\\n${hashes}\\s`, "g");
|
|
9231
|
+
endPattern.lastIndex = startIdx + headingMatch[0].length;
|
|
9232
|
+
const endMatch = endPattern.exec(content);
|
|
9233
|
+
const endIdx = endMatch ? endMatch.index : content.length;
|
|
9234
|
+
return content.slice(startIdx, endIdx).trim();
|
|
9065
9235
|
} catch (err) {
|
|
9066
9236
|
logger$20.warn({
|
|
9067
9237
|
epicId,
|
|
@@ -9320,9 +9490,9 @@ async function getProjectFindings(db) {
|
|
|
9320
9490
|
sections.push("**Prior escalations:**");
|
|
9321
9491
|
for (const d of diagnoses.slice(-3)) try {
|
|
9322
9492
|
const val = JSON.parse(d.value);
|
|
9323
|
-
sections.push(`- ${d.key.split(":")[0]}: ${val.recommendedAction} — ${val.rationale}`);
|
|
9493
|
+
sections.push(`- ${(d.key ?? "").split(":")[0]}: ${val.recommendedAction} — ${val.rationale}`);
|
|
9324
9494
|
} catch {
|
|
9325
|
-
sections.push(`- ${d.key}: escalated`);
|
|
9495
|
+
sections.push(`- ${d.key ?? "unknown"}: escalated`);
|
|
9326
9496
|
}
|
|
9327
9497
|
}
|
|
9328
9498
|
const highCycleStories = metrics.filter((m) => {
|
|
@@ -9337,16 +9507,16 @@ async function getProjectFindings(db) {
|
|
|
9337
9507
|
sections.push("**Stories with high review cycles:**");
|
|
9338
9508
|
for (const m of highCycleStories) try {
|
|
9339
9509
|
const val = JSON.parse(m.value);
|
|
9340
|
-
sections.push(`- ${m.key.split(":")[0]}: ${val.review_cycles} cycles`);
|
|
9510
|
+
sections.push(`- ${(m.key ?? "").split(":")[0]}: ${val.review_cycles} cycles`);
|
|
9341
9511
|
} catch {}
|
|
9342
9512
|
}
|
|
9343
|
-
const stalls = operational.filter((o) => o.key
|
|
9513
|
+
const stalls = operational.filter((o) => o.key?.startsWith("stall:"));
|
|
9344
9514
|
if (stalls.length > 0) sections.push(`**Prior stalls:** ${stalls.length} stall event(s) recorded`);
|
|
9345
9515
|
if (advisoryNotes.length > 0) {
|
|
9346
9516
|
sections.push("**Advisory notes from prior reviews (LGTM_WITH_NOTES):**");
|
|
9347
9517
|
for (const n of advisoryNotes.slice(-3)) try {
|
|
9348
9518
|
const val = JSON.parse(n.value);
|
|
9349
|
-
const storyId = n.key.split(":")[0];
|
|
9519
|
+
const storyId = (n.key ?? "").split(":")[0];
|
|
9350
9520
|
if (typeof val.notes === "string" && val.notes.length > 0) sections.push(`- ${storyId}: ${val.notes}`);
|
|
9351
9521
|
} catch {
|
|
9352
9522
|
sections.push(`- ${n.key}: advisory notes available`);
|
|
@@ -9525,12 +9695,9 @@ function detectDeprecatedStatusField(content) {
|
|
|
9525
9695
|
}
|
|
9526
9696
|
|
|
9527
9697
|
//#endregion
|
|
9528
|
-
//#region src/modules/compiled-workflows/
|
|
9529
|
-
|
|
9530
|
-
|
|
9531
|
-
const DEFAULT_TIMEOUT_MS$1 = 18e5;
|
|
9532
|
-
/** Default Vitest test patterns injected when no test-pattern decisions exist */
|
|
9533
|
-
const DEFAULT_VITEST_PATTERNS = `## Test Patterns (defaults)
|
|
9698
|
+
//#region src/modules/compiled-workflows/default-test-patterns.ts
|
|
9699
|
+
/** Default test patterns for Vitest/Jest/Mocha (Node.js ecosystem) */
|
|
9700
|
+
const VITEST_DEFAULT_PATTERNS = `## Test Patterns (defaults)
|
|
9534
9701
|
- Framework: Vitest (NOT jest — --testPathPattern flag does not work, use -- "pattern")
|
|
9535
9702
|
- Mock approach: vi.mock() with hoisting for module-level mocks
|
|
9536
9703
|
- Assertion style: expect().toBe(), expect().toEqual(), expect().toThrow()
|
|
@@ -9540,6 +9707,105 @@ const DEFAULT_VITEST_PATTERNS = `## Test Patterns (defaults)
|
|
|
9540
9707
|
npx vitest run --no-coverage -- "your-module-name"
|
|
9541
9708
|
- Final validation ONLY: npm test 2>&1 | grep -E "Test Files|Tests " | tail -3
|
|
9542
9709
|
- Do NOT run the full suite (npm test) repeatedly — it consumes excessive memory when multiple agents run in parallel`;
|
|
9710
|
+
/** Default test patterns for Go (stdlib testing) */
|
|
9711
|
+
const GO_DEFAULT_PATTERNS = `## Test Patterns (defaults)
|
|
9712
|
+
- Framework: Go test (stdlib)
|
|
9713
|
+
- Test file naming: <module>_test.go alongside source files
|
|
9714
|
+
- Test structure: table-driven tests using t.Run() subtests
|
|
9715
|
+
- Run all tests: go test ./...
|
|
9716
|
+
- Run specific test: go test ./... -v -run TestFunctionName
|
|
9717
|
+
- IMPORTANT: Run targeted tests during development: go test ./pkg/... -v -run TestSpecific
|
|
9718
|
+
- Assertion style: t.Errorf(), t.Fatalf(); use testify if already in go.mod (require.Equal, assert.NoError)`;
|
|
9719
|
+
/** Default test patterns for Gradle (JUnit 5) */
|
|
9720
|
+
const GRADLE_DEFAULT_PATTERNS = `## Test Patterns (defaults)
|
|
9721
|
+
- Framework: JUnit 5 (Gradle)
|
|
9722
|
+
- Test structure: @Test annotated methods in class under src/test/
|
|
9723
|
+
- Run all tests: ./gradlew test
|
|
9724
|
+
- Run specific test: ./gradlew test --tests "com.example.ClassName.methodName"
|
|
9725
|
+
- IMPORTANT: Run targeted tests during development: ./gradlew test --tests "ClassName"
|
|
9726
|
+
- Assertion style: assertThat(...).isEqualTo(...) (AssertJ) or assertEquals (JUnit)`;
|
|
9727
|
+
/** Default test patterns for Maven (JUnit 5) */
|
|
9728
|
+
const MAVEN_DEFAULT_PATTERNS = `## Test Patterns (defaults)
|
|
9729
|
+
- Framework: JUnit 5 (Maven)
|
|
9730
|
+
- Test structure: @Test annotated methods in class under src/test/
|
|
9731
|
+
- Run all tests: mvn test
|
|
9732
|
+
- Run specific test: mvn test -Dtest="ClassName#methodName"
|
|
9733
|
+
- IMPORTANT: Run targeted tests during development: mvn test -Dtest="ClassName"
|
|
9734
|
+
- Assertion style: assertThat(...).isEqualTo(...) (AssertJ) or assertEquals (JUnit)`;
|
|
9735
|
+
/** Default test patterns for Cargo (Rust) */
|
|
9736
|
+
const CARGO_DEFAULT_PATTERNS = `## Test Patterns (defaults)
|
|
9737
|
+
- Framework: Rust test (cargo)
|
|
9738
|
+
- Test file naming: #[cfg(test)] module in same file, or tests/ directory for integration tests
|
|
9739
|
+
- Test structure: #[test] annotated functions
|
|
9740
|
+
- Run all tests: cargo test
|
|
9741
|
+
- Run specific test: cargo test test_function_name
|
|
9742
|
+
- IMPORTANT: Run targeted tests during development: cargo test --lib test_module
|
|
9743
|
+
- Assertion style: assert_eq!, assert!, assert_ne! macros`;
|
|
9744
|
+
/** Default test patterns for pytest (Python) */
|
|
9745
|
+
const PYTEST_DEFAULT_PATTERNS = `## Test Patterns (defaults)
|
|
9746
|
+
- Framework: pytest
|
|
9747
|
+
- Test file naming: test_<module>.py or <module>_test.py
|
|
9748
|
+
- Test structure: test_* functions or Test* classes with test_* methods
|
|
9749
|
+
- Run all tests: pytest
|
|
9750
|
+
- Run specific test: pytest tests/test_foo.py::test_bar -v
|
|
9751
|
+
- IMPORTANT: Run targeted tests during development: pytest -k "test_name" -v
|
|
9752
|
+
- Assertion style: plain assert statements; use pytest.raises() for exceptions`;
|
|
9753
|
+
/**
|
|
9754
|
+
* Resolve the appropriate default test pattern block for the project.
|
|
9755
|
+
*
|
|
9756
|
+
* Algorithm:
|
|
9757
|
+
* 1. If projectRoot is undefined or empty → return VITEST_DEFAULT_PATTERNS
|
|
9758
|
+
* 2. Build profile path: join(projectRoot, '.substrate/project-profile.yaml')
|
|
9759
|
+
* 3. If file does not exist → return VITEST_DEFAULT_PATTERNS
|
|
9760
|
+
* 4. Parse YAML; on error → return VITEST_DEFAULT_PATTERNS
|
|
9761
|
+
* 5. Match project.testCommand (case-insensitive substring):
|
|
9762
|
+
* go test → GO, gradlew/gradle → GRADLE, mvn → MAVEN,
|
|
9763
|
+
* cargo test → CARGO, pytest → PYTEST, vitest/jest/mocha/npm → VITEST
|
|
9764
|
+
* 6. If testCommand unmatched, try project.language:
|
|
9765
|
+
* go → GO, kotlin/java → GRADLE, rust → CARGO, python → PYTEST,
|
|
9766
|
+
* typescript/javascript → VITEST
|
|
9767
|
+
* 7. Nothing matched → return VITEST_DEFAULT_PATTERNS
|
|
9768
|
+
*
|
|
9769
|
+
* @param projectRoot - Absolute path to the project root (or undefined)
|
|
9770
|
+
* @returns Stack-appropriate test pattern block string
|
|
9771
|
+
*/
|
|
9772
|
+
function resolveDefaultTestPatterns(projectRoot) {
|
|
9773
|
+
if (!projectRoot) return VITEST_DEFAULT_PATTERNS;
|
|
9774
|
+
const profilePath = join$1(projectRoot, ".substrate/project-profile.yaml");
|
|
9775
|
+
if (!existsSync$1(profilePath)) return VITEST_DEFAULT_PATTERNS;
|
|
9776
|
+
let profile = null;
|
|
9777
|
+
try {
|
|
9778
|
+
const content = readFileSync$1(profilePath, "utf-8");
|
|
9779
|
+
profile = yaml.load(content);
|
|
9780
|
+
} catch {
|
|
9781
|
+
return VITEST_DEFAULT_PATTERNS;
|
|
9782
|
+
}
|
|
9783
|
+
if (!profile) return VITEST_DEFAULT_PATTERNS;
|
|
9784
|
+
const project = profile["project"];
|
|
9785
|
+
if (!project) return VITEST_DEFAULT_PATTERNS;
|
|
9786
|
+
const testCommand = (project["testCommand"] ?? "").toLowerCase();
|
|
9787
|
+
if (testCommand) {
|
|
9788
|
+
if (testCommand.includes("cargo test")) return CARGO_DEFAULT_PATTERNS;
|
|
9789
|
+
if (testCommand.includes("go test")) return GO_DEFAULT_PATTERNS;
|
|
9790
|
+
if (testCommand.includes("gradlew") || testCommand.includes("gradle")) return GRADLE_DEFAULT_PATTERNS;
|
|
9791
|
+
if (testCommand.includes("mvn")) return MAVEN_DEFAULT_PATTERNS;
|
|
9792
|
+
if (testCommand.includes("pytest")) return PYTEST_DEFAULT_PATTERNS;
|
|
9793
|
+
if (testCommand.includes("vitest") || testCommand.includes("jest") || testCommand.includes("mocha") || testCommand.includes("npm")) return VITEST_DEFAULT_PATTERNS;
|
|
9794
|
+
}
|
|
9795
|
+
const language = (project["language"] ?? "").toLowerCase();
|
|
9796
|
+
if (language === "go") return GO_DEFAULT_PATTERNS;
|
|
9797
|
+
if (language === "kotlin" || language === "java") return GRADLE_DEFAULT_PATTERNS;
|
|
9798
|
+
if (language === "rust") return CARGO_DEFAULT_PATTERNS;
|
|
9799
|
+
if (language === "python") return PYTEST_DEFAULT_PATTERNS;
|
|
9800
|
+
if (language === "typescript" || language === "javascript") return VITEST_DEFAULT_PATTERNS;
|
|
9801
|
+
return VITEST_DEFAULT_PATTERNS;
|
|
9802
|
+
}
|
|
9803
|
+
|
|
9804
|
+
//#endregion
|
|
9805
|
+
//#region src/modules/compiled-workflows/dev-story.ts
|
|
9806
|
+
const logger$16 = createLogger("compiled-workflows:dev-story");
|
|
9807
|
+
/** Default timeout for dev-story dispatches in milliseconds (30 min) */
|
|
9808
|
+
const DEFAULT_TIMEOUT_MS$1 = 18e5;
|
|
9543
9809
|
/**
|
|
9544
9810
|
* Execute the compiled dev-story workflow.
|
|
9545
9811
|
*
|
|
@@ -9654,8 +9920,8 @@ async function runDevStory(deps, params) {
|
|
|
9654
9920
|
count: testPatternDecisions.length
|
|
9655
9921
|
}, "Loaded test patterns from decision store");
|
|
9656
9922
|
} else {
|
|
9657
|
-
testPatternsContent =
|
|
9658
|
-
logger$16.debug({ storyKey }, "No test-pattern decisions
|
|
9923
|
+
testPatternsContent = resolveDefaultTestPatterns(deps.projectRoot);
|
|
9924
|
+
logger$16.debug({ storyKey }, "No test-pattern decisions — using stack-aware defaults");
|
|
9659
9925
|
}
|
|
9660
9926
|
} catch (err) {
|
|
9661
9927
|
const error = err instanceof Error ? err.message : String(err);
|
|
@@ -9663,7 +9929,7 @@ async function runDevStory(deps, params) {
|
|
|
9663
9929
|
storyKey,
|
|
9664
9930
|
error
|
|
9665
9931
|
}, "Failed to load test patterns — using defaults");
|
|
9666
|
-
testPatternsContent =
|
|
9932
|
+
testPatternsContent = resolveDefaultTestPatterns(deps.projectRoot);
|
|
9667
9933
|
}
|
|
9668
9934
|
const taskScopeContent = taskScope !== void 0 && taskScope.trim().length > 0 ? `## Task Scope for This Batch\n\nImplement ONLY the following tasks from the story:\n\n${taskScope}\n\nDo NOT implement tasks outside this list. Other tasks will be handled in separate batch dispatches.` : "";
|
|
9669
9935
|
const priorFilesContent = priorFiles !== void 0 && priorFiles.length > 0 ? `## Files Modified by Previous Batches\n\nThe following files were created or modified by prior batch dispatches. Review them for context before implementing:\n\n${priorFiles.map((f) => `- ${f}`).join("\n")}` : "";
|
|
@@ -10362,15 +10628,40 @@ async function runTestPlan(deps, params) {
|
|
|
10362
10628
|
return makeTestPlanFailureResult(`story_file_read_error: ${error}`);
|
|
10363
10629
|
}
|
|
10364
10630
|
const archConstraintsContent = await getArchConstraints$1(deps);
|
|
10365
|
-
|
|
10366
|
-
|
|
10367
|
-
|
|
10368
|
-
|
|
10369
|
-
|
|
10370
|
-
|
|
10371
|
-
|
|
10372
|
-
|
|
10373
|
-
|
|
10631
|
+
let testPatternsContent = "";
|
|
10632
|
+
try {
|
|
10633
|
+
const solutioningDecisions = await getDecisionsByPhase(deps.db, "solutioning");
|
|
10634
|
+
const testPatternDecisions = solutioningDecisions.filter((d) => d.category === "test-patterns");
|
|
10635
|
+
if (testPatternDecisions.length > 0) {
|
|
10636
|
+
testPatternsContent = "## Test Patterns\n" + testPatternDecisions.map((d) => `- ${d.key}: ${d.value}`).join("\n");
|
|
10637
|
+
logger$14.debug({
|
|
10638
|
+
storyKey,
|
|
10639
|
+
count: testPatternDecisions.length
|
|
10640
|
+
}, "Loaded test patterns from decision store");
|
|
10641
|
+
} else {
|
|
10642
|
+
testPatternsContent = resolveDefaultTestPatterns(deps.projectRoot);
|
|
10643
|
+
logger$14.debug({ storyKey }, "No test-pattern decisions — using stack-aware defaults");
|
|
10644
|
+
}
|
|
10645
|
+
} catch {
|
|
10646
|
+
testPatternsContent = resolveDefaultTestPatterns(deps.projectRoot);
|
|
10647
|
+
}
|
|
10648
|
+
const { prompt, tokenCount, truncated } = assemblePrompt(template, [
|
|
10649
|
+
{
|
|
10650
|
+
name: "story_content",
|
|
10651
|
+
content: storyContent,
|
|
10652
|
+
priority: "required"
|
|
10653
|
+
},
|
|
10654
|
+
{
|
|
10655
|
+
name: "architecture_constraints",
|
|
10656
|
+
content: archConstraintsContent,
|
|
10657
|
+
priority: "optional"
|
|
10658
|
+
},
|
|
10659
|
+
{
|
|
10660
|
+
name: "test_patterns",
|
|
10661
|
+
content: testPatternsContent,
|
|
10662
|
+
priority: "optional"
|
|
10663
|
+
}
|
|
10664
|
+
], TOKEN_CEILING);
|
|
10374
10665
|
logger$14.info({
|
|
10375
10666
|
storyKey,
|
|
10376
10667
|
tokenCount,
|
|
@@ -10575,6 +10866,23 @@ async function runTestExpansion(deps, params) {
|
|
|
10575
10866
|
});
|
|
10576
10867
|
}
|
|
10577
10868
|
const archConstraintsContent = await getArchConstraints(deps);
|
|
10869
|
+
let testPatternsContent = "";
|
|
10870
|
+
try {
|
|
10871
|
+
const solutioningDecisions = await getDecisionsByPhase(deps.db, "solutioning");
|
|
10872
|
+
const testPatternDecisions = solutioningDecisions.filter((d) => d.category === "test-patterns");
|
|
10873
|
+
if (testPatternDecisions.length > 0) {
|
|
10874
|
+
testPatternsContent = "## Test Patterns\n" + testPatternDecisions.map((d) => `- ${d.key}: ${d.value}`).join("\n");
|
|
10875
|
+
logger$13.debug({
|
|
10876
|
+
storyKey,
|
|
10877
|
+
count: testPatternDecisions.length
|
|
10878
|
+
}, "Loaded test patterns from decision store");
|
|
10879
|
+
} else {
|
|
10880
|
+
testPatternsContent = resolveDefaultTestPatterns(deps.projectRoot);
|
|
10881
|
+
logger$13.debug({ storyKey }, "No test-pattern decisions — using stack-aware defaults");
|
|
10882
|
+
}
|
|
10883
|
+
} catch {
|
|
10884
|
+
testPatternsContent = resolveDefaultTestPatterns(deps.projectRoot);
|
|
10885
|
+
}
|
|
10578
10886
|
let gitDiffContent = "";
|
|
10579
10887
|
if (filesModified && filesModified.length > 0) try {
|
|
10580
10888
|
const templateTokens = countTokens(template);
|
|
@@ -10611,6 +10919,11 @@ async function runTestExpansion(deps, params) {
|
|
|
10611
10919
|
content: gitDiffContent,
|
|
10612
10920
|
priority: "important"
|
|
10613
10921
|
},
|
|
10922
|
+
{
|
|
10923
|
+
name: "test_patterns",
|
|
10924
|
+
content: testPatternsContent,
|
|
10925
|
+
priority: "optional"
|
|
10926
|
+
},
|
|
10614
10927
|
{
|
|
10615
10928
|
name: "arch_constraints",
|
|
10616
10929
|
content: archConstraintsContent,
|
|
@@ -11517,7 +11830,7 @@ function registerHealthCommand(program, _version = "0.0.0", projectRoot = proces
|
|
|
11517
11830
|
const logger$11 = createLogger("implementation-orchestrator:seed");
|
|
11518
11831
|
/** Max chars for the architecture summary seeded into decisions */
|
|
11519
11832
|
const MAX_ARCH_CHARS = 6e3;
|
|
11520
|
-
/** Max chars per epic
|
|
11833
|
+
/** Max chars per epic-shard decision value (per-story or per-epic fallback) */
|
|
11521
11834
|
const MAX_EPIC_SHARD_CHARS = 12e3;
|
|
11522
11835
|
/** Max chars for test patterns */
|
|
11523
11836
|
const MAX_TEST_PATTERNS_CHARS = 2e3;
|
|
@@ -11634,15 +11947,18 @@ async function seedEpicShards(db, projectRoot) {
|
|
|
11634
11947
|
const shards = parseEpicShards(content);
|
|
11635
11948
|
let count = 0;
|
|
11636
11949
|
for (const shard of shards) {
|
|
11637
|
-
|
|
11638
|
-
|
|
11639
|
-
|
|
11640
|
-
|
|
11641
|
-
|
|
11642
|
-
|
|
11643
|
-
|
|
11644
|
-
|
|
11645
|
-
|
|
11950
|
+
const subsections = parseStorySubsections(shard.epicId, shard.content);
|
|
11951
|
+
for (const subsection of subsections) {
|
|
11952
|
+
await createDecision(db, {
|
|
11953
|
+
pipeline_run_id: null,
|
|
11954
|
+
phase: "implementation",
|
|
11955
|
+
category: "epic-shard",
|
|
11956
|
+
key: subsection.key,
|
|
11957
|
+
value: subsection.content.slice(0, MAX_EPIC_SHARD_CHARS),
|
|
11958
|
+
rationale: "Seeded from planning artifacts at orchestrator startup"
|
|
11959
|
+
});
|
|
11960
|
+
count++;
|
|
11961
|
+
}
|
|
11646
11962
|
}
|
|
11647
11963
|
await db.exec("DELETE FROM decisions WHERE phase = 'implementation' AND category = 'epic-shard-hash' AND key = 'epics-file'");
|
|
11648
11964
|
await createDecision(db, {
|
|
@@ -11751,13 +12067,99 @@ function parseEpicShards(content) {
|
|
|
11751
12067
|
return shards;
|
|
11752
12068
|
}
|
|
11753
12069
|
/**
|
|
12070
|
+
* Parse an epic section's content into per-story subsections.
|
|
12071
|
+
*
|
|
12072
|
+
* Matches story headings using three patterns:
|
|
12073
|
+
* - Markdown headings: #{2,6} Story \d+-\d+ (e.g., ### Story 37-1: Title)
|
|
12074
|
+
* - Bold: **Story \d+-\d+** (e.g., **Story 37-1**)
|
|
12075
|
+
* - Bare key: \d+-\d+:\s (e.g., 37-1: Title — must start at line start)
|
|
12076
|
+
*
|
|
12077
|
+
* Each subsection spans from its heading to the next matching heading or EOF.
|
|
12078
|
+
*
|
|
12079
|
+
* AC3: If no story headings are found, returns a single per-epic fallback entry
|
|
12080
|
+
* keyed by epicId — preserving backward-compatible behaviour for unstructured epics.
|
|
12081
|
+
*/
|
|
12082
|
+
function parseStorySubsections(epicId, epicContent) {
|
|
12083
|
+
const storyPattern = /(?:^#{2,6}\s+Story\s+(\d+-\d+)|^\*\*Story\s+(\d+-\d+)\*\*|^(\d+-\d+):\s)/gim;
|
|
12084
|
+
const matches = [];
|
|
12085
|
+
let match$1;
|
|
12086
|
+
while ((match$1 = storyPattern.exec(epicContent)) !== null) {
|
|
12087
|
+
const storyKey = match$1[1] ?? match$1[2] ?? match$1[3];
|
|
12088
|
+
if (storyKey !== void 0) matches.push({
|
|
12089
|
+
storyKey,
|
|
12090
|
+
startIdx: match$1.index
|
|
12091
|
+
});
|
|
12092
|
+
}
|
|
12093
|
+
if (matches.length === 0) return [{
|
|
12094
|
+
key: epicId,
|
|
12095
|
+
content: epicContent
|
|
12096
|
+
}];
|
|
12097
|
+
const result = [];
|
|
12098
|
+
for (let i = 0; i < matches.length; i++) {
|
|
12099
|
+
const entry = matches[i];
|
|
12100
|
+
const nextEntry = matches[i + 1];
|
|
12101
|
+
const start = entry.startIdx;
|
|
12102
|
+
const end = nextEntry !== void 0 ? nextEntry.startIdx : epicContent.length;
|
|
12103
|
+
const sectionContent = epicContent.slice(start, end).trim();
|
|
12104
|
+
if (sectionContent.length > 0) result.push({
|
|
12105
|
+
key: entry.storyKey,
|
|
12106
|
+
content: sectionContent
|
|
12107
|
+
});
|
|
12108
|
+
}
|
|
12109
|
+
return result;
|
|
12110
|
+
}
|
|
12111
|
+
/**
|
|
12112
|
+
* Read the project profile YAML synchronously.
|
|
12113
|
+
* Returns null on missing file, parse error, or unexpected shape.
|
|
12114
|
+
* Does NOT import from src/modules/project-profile/ — inline parse only.
|
|
12115
|
+
*
|
|
12116
|
+
* @internal
|
|
12117
|
+
*/
|
|
12118
|
+
function readProfileSync(projectRoot) {
|
|
12119
|
+
const profilePath = join$1(projectRoot, ".substrate", "project-profile.yaml");
|
|
12120
|
+
if (!existsSync$1(profilePath)) return null;
|
|
12121
|
+
try {
|
|
12122
|
+
const content = readFileSync$1(profilePath, "utf-8");
|
|
12123
|
+
const parsed = yaml.load(content);
|
|
12124
|
+
if (parsed !== null && typeof parsed === "object" && !Array.isArray(parsed)) return parsed;
|
|
12125
|
+
return null;
|
|
12126
|
+
} catch {
|
|
12127
|
+
return null;
|
|
12128
|
+
}
|
|
12129
|
+
}
|
|
12130
|
+
/**
|
|
11754
12131
|
* Detect test framework and patterns from project configuration.
|
|
11755
|
-
*
|
|
12132
|
+
*
|
|
12133
|
+
* Detection priority:
|
|
12134
|
+
* 1. Profile present + packages[] non-empty → buildMonorepoTestPatterns(packages)
|
|
12135
|
+
* 2. Profile present + testCommand non-empty → mapTestCommandToPatterns(testCommand)
|
|
12136
|
+
* 3. No profile / profile fallthrough:
|
|
12137
|
+
* a. package.json present → existing vitest/jest/mocha detection
|
|
12138
|
+
* b. go.mod present → buildGoTestPatterns(projectRoot)
|
|
12139
|
+
* c. build.gradle.kts or build.gradle → buildGradleTestPatterns(projectRoot)
|
|
12140
|
+
* d. pom.xml → buildMavenTestPatterns()
|
|
12141
|
+
* e. Cargo.toml → buildCargoTestPatterns()
|
|
12142
|
+
* f. pyproject.toml OR conftest.py → buildPytestPatterns(projectRoot)
|
|
12143
|
+
* 4. Nothing matched → undefined
|
|
12144
|
+
*
|
|
12145
|
+
* @internal exported for direct unit testing
|
|
11756
12146
|
*/
|
|
11757
12147
|
function detectTestPatterns(projectRoot) {
|
|
12148
|
+
const profile = readProfileSync(projectRoot);
|
|
12149
|
+
if (profile !== null) {
|
|
12150
|
+
const project = profile["project"];
|
|
12151
|
+
if (project !== void 0) {
|
|
12152
|
+
const packages = project["packages"];
|
|
12153
|
+
if (Array.isArray(packages) && packages.length > 0) return buildMonorepoTestPatterns(packages);
|
|
12154
|
+
const testCommand = project["testCommand"];
|
|
12155
|
+
if (typeof testCommand === "string" && testCommand.length > 0) {
|
|
12156
|
+
const mapped = mapTestCommandToPatterns(testCommand);
|
|
12157
|
+
if (mapped !== void 0) return mapped;
|
|
12158
|
+
}
|
|
12159
|
+
}
|
|
12160
|
+
}
|
|
11758
12161
|
const pkgPath = join$1(projectRoot, "package.json");
|
|
11759
|
-
if (
|
|
11760
|
-
try {
|
|
12162
|
+
if (existsSync$1(pkgPath)) try {
|
|
11761
12163
|
const pkg = JSON.parse(readFileSync$1(pkgPath, "utf-8"));
|
|
11762
12164
|
const allDeps = {
|
|
11763
12165
|
...pkg.dependencies,
|
|
@@ -11777,9 +12179,17 @@ function detectTestPatterns(projectRoot) {
|
|
|
11777
12179
|
if (firstTest.includes("@jest") || firstTest.includes("jest.mock")) return buildJestPatterns(testScript);
|
|
11778
12180
|
}
|
|
11779
12181
|
return void 0;
|
|
11780
|
-
} catch {
|
|
11781
|
-
|
|
11782
|
-
|
|
12182
|
+
} catch {}
|
|
12183
|
+
if (existsSync$1(join$1(projectRoot, "go.mod"))) return buildGoTestPatterns(projectRoot);
|
|
12184
|
+
if (existsSync$1(join$1(projectRoot, "build.gradle.kts")) || existsSync$1(join$1(projectRoot, "build.gradle"))) return buildGradleTestPatterns(projectRoot);
|
|
12185
|
+
if (existsSync$1(join$1(projectRoot, "pom.xml"))) return buildMavenTestPatterns();
|
|
12186
|
+
if (existsSync$1(join$1(projectRoot, "Cargo.toml"))) return buildCargoTestPatterns();
|
|
12187
|
+
if (existsSync$1(join$1(projectRoot, "conftest.py"))) return buildPytestPatterns(projectRoot);
|
|
12188
|
+
if (existsSync$1(join$1(projectRoot, "pyproject.toml"))) try {
|
|
12189
|
+
const pyprojectContent = readFileSync$1(join$1(projectRoot, "pyproject.toml"), "utf-8");
|
|
12190
|
+
if (pyprojectContent.includes("[tool.pytest")) return buildPytestPatterns(projectRoot);
|
|
12191
|
+
} catch {}
|
|
12192
|
+
return void 0;
|
|
11783
12193
|
}
|
|
11784
12194
|
function buildVitestPatterns(testScript) {
|
|
11785
12195
|
const runCmd = testScript || "npx vitest run";
|
|
@@ -11816,6 +12226,196 @@ function buildMochaPatterns() {
|
|
|
11816
12226
|
].join("\n");
|
|
11817
12227
|
}
|
|
11818
12228
|
/**
|
|
12229
|
+
* Build Go test patterns.
|
|
12230
|
+
* Optionally detects testify from go.mod if projectRoot is non-empty.
|
|
12231
|
+
*
|
|
12232
|
+
* @internal
|
|
12233
|
+
*/
|
|
12234
|
+
function buildGoTestPatterns(projectRoot) {
|
|
12235
|
+
let hasTestify = false;
|
|
12236
|
+
if (projectRoot.length > 0) try {
|
|
12237
|
+
const goModPath = join$1(projectRoot, "go.mod");
|
|
12238
|
+
if (existsSync$1(goModPath)) {
|
|
12239
|
+
const content = readFileSync$1(goModPath, "utf-8");
|
|
12240
|
+
hasTestify = content.includes("github.com/stretchr/testify");
|
|
12241
|
+
}
|
|
12242
|
+
} catch {}
|
|
12243
|
+
return [
|
|
12244
|
+
"## Test Patterns",
|
|
12245
|
+
"- Framework: Go test (stdlib)",
|
|
12246
|
+
"- Test file naming: <module>_test.go alongside source files",
|
|
12247
|
+
"- Test structure: table-driven tests with t.Run() subtests",
|
|
12248
|
+
"- Run all tests: go test ./...",
|
|
12249
|
+
"- Run specific test: go test ./... -v -run TestFunctionName",
|
|
12250
|
+
"- Assertion style: t.Errorf(), t.Fatalf()",
|
|
12251
|
+
hasTestify ? "- testify available: use require.Equal(), assert.NoError(), etc." : ""
|
|
12252
|
+
].filter(Boolean).join("\n");
|
|
12253
|
+
}
|
|
12254
|
+
/**
|
|
12255
|
+
* Build Gradle (JVM) test patterns.
|
|
12256
|
+
* Detects JUnit5 vs JUnit4 if projectRoot is non-empty.
|
|
12257
|
+
*
|
|
12258
|
+
* @internal
|
|
12259
|
+
*/
|
|
12260
|
+
function buildGradleTestPatterns(projectRoot) {
|
|
12261
|
+
let hasJunit5 = false;
|
|
12262
|
+
if (projectRoot.length > 0) try {
|
|
12263
|
+
const ktsPath = join$1(projectRoot, "build.gradle.kts");
|
|
12264
|
+
const groovyPath = join$1(projectRoot, "build.gradle");
|
|
12265
|
+
const buildFilePath = existsSync$1(ktsPath) ? ktsPath : groovyPath;
|
|
12266
|
+
if (existsSync$1(buildFilePath)) {
|
|
12267
|
+
const content = readFileSync$1(buildFilePath, "utf-8");
|
|
12268
|
+
hasJunit5 = content.includes("junit-jupiter");
|
|
12269
|
+
}
|
|
12270
|
+
} catch {}
|
|
12271
|
+
return [
|
|
12272
|
+
"## Test Patterns",
|
|
12273
|
+
`- Framework: ${hasJunit5 ? "JUnit 5" : "JUnit"}`,
|
|
12274
|
+
"- Run all tests: ./gradlew test",
|
|
12275
|
+
"- Run specific test: ./gradlew test --tests \"com.example.ClassName\"",
|
|
12276
|
+
"- Test annotation: @Test",
|
|
12277
|
+
hasJunit5 ? "- Assertion style: assertThat() (AssertJ), assertEquals()" : "- Assertion style: assertEquals(), assertThat()"
|
|
12278
|
+
].join("\n");
|
|
12279
|
+
}
|
|
12280
|
+
/**
|
|
12281
|
+
* Build Maven (JVM) test patterns.
|
|
12282
|
+
*
|
|
12283
|
+
* @internal
|
|
12284
|
+
*/
|
|
12285
|
+
function buildMavenTestPatterns() {
|
|
12286
|
+
return [
|
|
12287
|
+
"## Test Patterns",
|
|
12288
|
+
"- Framework: JUnit (Maven)",
|
|
12289
|
+
"- Run all tests: mvn test",
|
|
12290
|
+
"- Run specific test: mvn test -Dtest=ClassName",
|
|
12291
|
+
"- Test annotation: @Test",
|
|
12292
|
+
"- Assertion style: assertEquals(), assertThat()"
|
|
12293
|
+
].join("\n");
|
|
12294
|
+
}
|
|
12295
|
+
/**
|
|
12296
|
+
* Build Cargo/Rust test patterns.
|
|
12297
|
+
*
|
|
12298
|
+
* @internal
|
|
12299
|
+
*/
|
|
12300
|
+
function buildCargoTestPatterns() {
|
|
12301
|
+
return [
|
|
12302
|
+
"## Test Patterns",
|
|
12303
|
+
"- Framework: Rust built-in test harness (cargo test)",
|
|
12304
|
+
"- Run all tests: cargo test",
|
|
12305
|
+
"- Run specific test: cargo test module_name",
|
|
12306
|
+
"- Test annotation: #[test]",
|
|
12307
|
+
"- Assertion macros: assert_eq!(), assert!(), assert_ne!()",
|
|
12308
|
+
"- Test module structure: #[cfg(test)] mod tests { ... }"
|
|
12309
|
+
].join("\n");
|
|
12310
|
+
}
|
|
12311
|
+
/**
|
|
12312
|
+
* Build pytest (Python) test patterns.
|
|
12313
|
+
* Checks for conftest.py and pyproject.toml for context.
|
|
12314
|
+
*
|
|
12315
|
+
* @internal
|
|
12316
|
+
*/
|
|
12317
|
+
function buildPytestPatterns(projectRoot) {
|
|
12318
|
+
let hasConftest = false;
|
|
12319
|
+
if (projectRoot.length > 0) try {
|
|
12320
|
+
hasConftest = existsSync$1(join$1(projectRoot, "conftest.py"));
|
|
12321
|
+
} catch {}
|
|
12322
|
+
return [
|
|
12323
|
+
"## Test Patterns",
|
|
12324
|
+
"- Framework: pytest",
|
|
12325
|
+
"- Run all tests: pytest",
|
|
12326
|
+
"- Run specific test: pytest tests/test_file.py -v -k \"test_name\"",
|
|
12327
|
+
"- Fixture pattern: @pytest.fixture (define in conftest.py for sharing)",
|
|
12328
|
+
"- Assertion style: assert statement (plain Python)",
|
|
12329
|
+
hasConftest ? "- conftest.py detected: shared fixtures available" : ""
|
|
12330
|
+
].filter(Boolean).join("\n");
|
|
12331
|
+
}
|
|
12332
|
+
/**
|
|
12333
|
+
* Map a profile testCommand string to appropriate pattern builder output.
|
|
12334
|
+
* Returns undefined for unrecognized commands.
|
|
12335
|
+
*
|
|
12336
|
+
* @internal
|
|
12337
|
+
*/
|
|
12338
|
+
function mapTestCommandToPatterns(testCommand) {
|
|
12339
|
+
if (testCommand.includes("go test")) return buildGoTestPatterns("");
|
|
12340
|
+
if (testCommand.includes("gradlew") || testCommand.includes("gradle")) return buildGradleTestPatterns("");
|
|
12341
|
+
if (testCommand.includes("mvn")) return buildMavenTestPatterns();
|
|
12342
|
+
if (testCommand.includes("cargo test")) return buildCargoTestPatterns();
|
|
12343
|
+
if (testCommand.includes("pytest")) return buildPytestPatterns("");
|
|
12344
|
+
if (testCommand.includes("vitest")) return buildVitestPatterns(testCommand);
|
|
12345
|
+
if (testCommand.includes("jest")) return buildJestPatterns(testCommand);
|
|
12346
|
+
if (testCommand.includes("mocha")) return buildMochaPatterns();
|
|
12347
|
+
return void 0;
|
|
12348
|
+
}
|
|
12349
|
+
/**
|
|
12350
|
+
* Build combined test patterns for a monorepo with multiple language packages.
|
|
12351
|
+
* Emits a concise per-language block for each distinct language, prefixed with package path.
|
|
12352
|
+
*
|
|
12353
|
+
* @internal
|
|
12354
|
+
*/
|
|
12355
|
+
function buildMonorepoTestPatterns(packages) {
|
|
12356
|
+
const seen = new Set();
|
|
12357
|
+
const entries = [];
|
|
12358
|
+
for (const pkg of packages) if (typeof pkg.language === "string" && pkg.language.length > 0 && !seen.has(pkg.language)) {
|
|
12359
|
+
seen.add(pkg.language);
|
|
12360
|
+
entries.push({
|
|
12361
|
+
language: pkg.language,
|
|
12362
|
+
path: pkg.path ?? ""
|
|
12363
|
+
});
|
|
12364
|
+
}
|
|
12365
|
+
const blocks = [];
|
|
12366
|
+
for (const entry of entries) {
|
|
12367
|
+
const header = entry.path.length > 0 ? `# ${entry.path} (${entry.language})` : `# ${entry.language}`;
|
|
12368
|
+
let block;
|
|
12369
|
+
switch (entry.language) {
|
|
12370
|
+
case "go":
|
|
12371
|
+
block = [
|
|
12372
|
+
header,
|
|
12373
|
+
"- go test ./...",
|
|
12374
|
+
"- go test ./... -v -run TestName",
|
|
12375
|
+
"- File naming: _test.go"
|
|
12376
|
+
].join("\n");
|
|
12377
|
+
break;
|
|
12378
|
+
case "typescript":
|
|
12379
|
+
case "javascript":
|
|
12380
|
+
block = [
|
|
12381
|
+
header,
|
|
12382
|
+
"- npx vitest run (or npm test)",
|
|
12383
|
+
"- vi.mock() for mocking",
|
|
12384
|
+
"- describe/it structure"
|
|
12385
|
+
].join("\n");
|
|
12386
|
+
break;
|
|
12387
|
+
case "java":
|
|
12388
|
+
case "kotlin":
|
|
12389
|
+
block = [
|
|
12390
|
+
header,
|
|
12391
|
+
"- ./gradlew test",
|
|
12392
|
+
"- @Test annotation",
|
|
12393
|
+
"- assertEquals() / assertThat()"
|
|
12394
|
+
].join("\n");
|
|
12395
|
+
break;
|
|
12396
|
+
case "rust":
|
|
12397
|
+
block = [
|
|
12398
|
+
header,
|
|
12399
|
+
"- cargo test",
|
|
12400
|
+
"- #[test] attribute",
|
|
12401
|
+
"- assert_eq!() / assert!()"
|
|
12402
|
+
].join("\n");
|
|
12403
|
+
break;
|
|
12404
|
+
case "python":
|
|
12405
|
+
block = [
|
|
12406
|
+
header,
|
|
12407
|
+
"- pytest",
|
|
12408
|
+
"- @pytest.fixture",
|
|
12409
|
+
"- assert statement style"
|
|
12410
|
+
].join("\n");
|
|
12411
|
+
break;
|
|
12412
|
+
default: block = [header, `- Run tests for ${entry.language} package`].join("\n");
|
|
12413
|
+
}
|
|
12414
|
+
blocks.push(block);
|
|
12415
|
+
}
|
|
12416
|
+
return ["## Test Patterns", ...blocks].join("\n\n");
|
|
12417
|
+
}
|
|
12418
|
+
/**
|
|
11819
12419
|
* Find a few test files in the project to help detect the test framework.
|
|
11820
12420
|
*/
|
|
11821
12421
|
function findTestFiles(projectRoot) {
|
|
@@ -11992,6 +12592,47 @@ function parseInterfaceContracts(storyContent, storyKey) {
|
|
|
11992
12592
|
//#endregion
|
|
11993
12593
|
//#region src/modules/implementation-orchestrator/contract-verifier.ts
|
|
11994
12594
|
/**
|
|
12595
|
+
* Reads .substrate/project-profile.yaml (Story 37-1) and determines whether
|
|
12596
|
+
* TypeScript type-checking is appropriate for this project.
|
|
12597
|
+
*
|
|
12598
|
+
* Detection order:
|
|
12599
|
+
* 1. No profile → true (preserve pre-37-4 behavior)
|
|
12600
|
+
* 2. `packages` array non-empty → true iff any package is typescript/javascript
|
|
12601
|
+
* 3. `packages` empty/absent → infer from `buildCommand` — true for npm/pnpm/yarn/bun/turbo/tsc
|
|
12602
|
+
* 4. Parse error → true (conservative, allow tsc)
|
|
12603
|
+
*
|
|
12604
|
+
* Uses synchronous I/O to avoid making verifyContracts async (Story 37-3 pattern).
|
|
12605
|
+
* Does NOT import from src/modules/project-profile/ to avoid circular-dependency risk.
|
|
12606
|
+
*/
|
|
12607
|
+
function shouldRunTscCheck(projectRoot) {
|
|
12608
|
+
const profilePath = join$1(projectRoot, ".substrate", "project-profile.yaml");
|
|
12609
|
+
if (!existsSync$1(profilePath)) return true;
|
|
12610
|
+
try {
|
|
12611
|
+
const raw = readFileSync$1(profilePath, "utf-8");
|
|
12612
|
+
const parsed = yaml.load(raw);
|
|
12613
|
+
if (!parsed) return true;
|
|
12614
|
+
const project = parsed?.project;
|
|
12615
|
+
if (!project) return true;
|
|
12616
|
+
const packages = project["packages"];
|
|
12617
|
+
if (Array.isArray(packages) && packages.length > 0) return packages.some((p) => p.language === "typescript" || p.language === "javascript");
|
|
12618
|
+
const buildCommand = project["buildCommand"];
|
|
12619
|
+
if (typeof buildCommand === "string" && buildCommand.length > 0) {
|
|
12620
|
+
const tsIndicators = [
|
|
12621
|
+
"npm",
|
|
12622
|
+
"pnpm",
|
|
12623
|
+
"yarn",
|
|
12624
|
+
"bun",
|
|
12625
|
+
"tsc",
|
|
12626
|
+
"turbo"
|
|
12627
|
+
];
|
|
12628
|
+
return tsIndicators.some((ind) => buildCommand.includes(ind));
|
|
12629
|
+
}
|
|
12630
|
+
return true;
|
|
12631
|
+
} catch {
|
|
12632
|
+
return true;
|
|
12633
|
+
}
|
|
12634
|
+
}
|
|
12635
|
+
/**
|
|
11995
12636
|
* Verify all declared contract export/import pairs after sprint completion.
|
|
11996
12637
|
*
|
|
11997
12638
|
* @param declarations - All ContractDeclaration entries from the decision store
|
|
@@ -12023,80 +12664,82 @@ function verifyContracts(declarations, projectRoot) {
|
|
|
12023
12664
|
});
|
|
12024
12665
|
}
|
|
12025
12666
|
}
|
|
12026
|
-
|
|
12027
|
-
|
|
12028
|
-
|
|
12029
|
-
|
|
12030
|
-
|
|
12031
|
-
|
|
12032
|
-
|
|
12033
|
-
|
|
12034
|
-
|
|
12035
|
-
|
|
12036
|
-
|
|
12037
|
-
|
|
12038
|
-
|
|
12039
|
-
|
|
12040
|
-
|
|
12041
|
-
|
|
12042
|
-
|
|
12043
|
-
|
|
12044
|
-
|
|
12045
|
-
|
|
12046
|
-
|
|
12047
|
-
|
|
12048
|
-
|
|
12049
|
-
|
|
12050
|
-
|
|
12051
|
-
}
|
|
12052
|
-
if (tscFailed) {
|
|
12053
|
-
const truncatedOutput = tscOutput.slice(0, 1e3);
|
|
12054
|
-
const matchedExports = new Set();
|
|
12055
|
-
for (const exp of exports) {
|
|
12056
|
-
if (!exp.filePath) continue;
|
|
12057
|
-
if (tscOutput.includes(exp.filePath)) {
|
|
12058
|
-
matchedExports.add(exp.contractName);
|
|
12059
|
-
const importers = imports.filter((i) => i.contractName === exp.contractName);
|
|
12060
|
-
if (importers.length > 0) for (const imp of importers) mismatches.push({
|
|
12061
|
-
exporter: exp.storyKey,
|
|
12062
|
-
importer: imp.storyKey,
|
|
12063
|
-
contractName: exp.contractName,
|
|
12064
|
-
mismatchDescription: `TypeScript type-check failed for ${exp.filePath}: ${truncatedOutput}`
|
|
12065
|
-
});
|
|
12066
|
-
else mismatches.push({
|
|
12067
|
-
exporter: exp.storyKey,
|
|
12068
|
-
importer: null,
|
|
12069
|
-
contractName: exp.contractName,
|
|
12070
|
-
mismatchDescription: `TypeScript type-check failed for ${exp.filePath}: ${truncatedOutput}`
|
|
12071
|
-
});
|
|
12667
|
+
if (shouldRunTscCheck(projectRoot)) {
|
|
12668
|
+
const tsconfigPath = join$1(projectRoot, "tsconfig.json");
|
|
12669
|
+
const tscBinPath = join$1(projectRoot, "node_modules", ".bin", "tsc");
|
|
12670
|
+
if (existsSync$1(tsconfigPath) && existsSync$1(tscBinPath)) {
|
|
12671
|
+
let tscOutput = "";
|
|
12672
|
+
let tscFailed = false;
|
|
12673
|
+
try {
|
|
12674
|
+
execSync(`"${tscBinPath}" --noEmit`, {
|
|
12675
|
+
cwd: projectRoot,
|
|
12676
|
+
timeout: 12e4,
|
|
12677
|
+
encoding: "utf-8",
|
|
12678
|
+
stdio: [
|
|
12679
|
+
"pipe",
|
|
12680
|
+
"pipe",
|
|
12681
|
+
"pipe"
|
|
12682
|
+
]
|
|
12683
|
+
});
|
|
12684
|
+
} catch (err) {
|
|
12685
|
+
tscFailed = true;
|
|
12686
|
+
if (err != null && typeof err === "object") {
|
|
12687
|
+
const e = err;
|
|
12688
|
+
const stdoutStr = typeof e.stdout === "string" ? e.stdout : Buffer.isBuffer(e.stdout) ? e.stdout.toString("utf-8") : "";
|
|
12689
|
+
const stderrStr = typeof e.stderr === "string" ? e.stderr : Buffer.isBuffer(e.stderr) ? e.stderr.toString("utf-8") : "";
|
|
12690
|
+
tscOutput = [stdoutStr, stderrStr].filter((s) => s.length > 0).join("\n");
|
|
12691
|
+
if (!tscOutput && e.message) tscOutput = e.message;
|
|
12072
12692
|
}
|
|
12073
12693
|
}
|
|
12074
|
-
if (
|
|
12075
|
-
const
|
|
12694
|
+
if (tscFailed) {
|
|
12695
|
+
const truncatedOutput = tscOutput.slice(0, 1e3);
|
|
12696
|
+
const matchedExports = new Set();
|
|
12076
12697
|
for (const exp of exports) {
|
|
12077
|
-
|
|
12078
|
-
if (
|
|
12079
|
-
|
|
12080
|
-
|
|
12081
|
-
|
|
12082
|
-
|
|
12083
|
-
|
|
12084
|
-
|
|
12085
|
-
|
|
12086
|
-
|
|
12087
|
-
|
|
12088
|
-
|
|
12698
|
+
if (!exp.filePath) continue;
|
|
12699
|
+
if (tscOutput.includes(exp.filePath)) {
|
|
12700
|
+
matchedExports.add(exp.contractName);
|
|
12701
|
+
const importers = imports.filter((i) => i.contractName === exp.contractName);
|
|
12702
|
+
if (importers.length > 0) for (const imp of importers) mismatches.push({
|
|
12703
|
+
exporter: exp.storyKey,
|
|
12704
|
+
importer: imp.storyKey,
|
|
12705
|
+
contractName: exp.contractName,
|
|
12706
|
+
mismatchDescription: `TypeScript type-check failed for ${exp.filePath}: ${truncatedOutput}`
|
|
12707
|
+
});
|
|
12708
|
+
else mismatches.push({
|
|
12709
|
+
exporter: exp.storyKey,
|
|
12710
|
+
importer: null,
|
|
12711
|
+
contractName: exp.contractName,
|
|
12712
|
+
mismatchDescription: `TypeScript type-check failed for ${exp.filePath}: ${truncatedOutput}`
|
|
12713
|
+
});
|
|
12089
12714
|
}
|
|
12090
|
-
|
|
12091
|
-
|
|
12092
|
-
|
|
12093
|
-
|
|
12094
|
-
|
|
12095
|
-
|
|
12096
|
-
|
|
12097
|
-
|
|
12098
|
-
|
|
12099
|
-
|
|
12715
|
+
}
|
|
12716
|
+
if (matchedExports.size === 0) {
|
|
12717
|
+
const reportedPairs = new Set();
|
|
12718
|
+
for (const exp of exports) {
|
|
12719
|
+
const importers = imports.filter((i) => i.contractName === exp.contractName);
|
|
12720
|
+
if (importers.length > 0) for (const imp of importers) {
|
|
12721
|
+
const pairKey = `${exp.storyKey}:${imp.storyKey}:${exp.contractName}`;
|
|
12722
|
+
if (!reportedPairs.has(pairKey)) {
|
|
12723
|
+
reportedPairs.add(pairKey);
|
|
12724
|
+
mismatches.push({
|
|
12725
|
+
exporter: exp.storyKey,
|
|
12726
|
+
importer: imp.storyKey,
|
|
12727
|
+
contractName: exp.contractName,
|
|
12728
|
+
mismatchDescription: `TypeScript type-check failed: ${truncatedOutput}`
|
|
12729
|
+
});
|
|
12730
|
+
}
|
|
12731
|
+
}
|
|
12732
|
+
else {
|
|
12733
|
+
const pairKey = `${exp.storyKey}:null:${exp.contractName}`;
|
|
12734
|
+
if (!reportedPairs.has(pairKey)) {
|
|
12735
|
+
reportedPairs.add(pairKey);
|
|
12736
|
+
mismatches.push({
|
|
12737
|
+
exporter: exp.storyKey,
|
|
12738
|
+
importer: null,
|
|
12739
|
+
contractName: exp.contractName,
|
|
12740
|
+
mismatchDescription: `TypeScript type-check failed: ${truncatedOutput}`
|
|
12741
|
+
});
|
|
12742
|
+
}
|
|
12100
12743
|
}
|
|
12101
12744
|
}
|
|
12102
12745
|
}
|
|
@@ -15393,6 +16036,69 @@ function buildTargetedFilesContent(issueList) {
|
|
|
15393
16036
|
return lines.join("\n");
|
|
15394
16037
|
}
|
|
15395
16038
|
/**
|
|
16039
|
+
* Normalize a title string into a set of meaningful words for comparison.
|
|
16040
|
+
* Strips punctuation, lowercases, and filters out very short words (<=2 chars)
|
|
16041
|
+
* and common stop words to focus on content-bearing terms.
|
|
16042
|
+
*/
|
|
16043
|
+
function titleToWordSet(title) {
|
|
16044
|
+
const stopWords = new Set([
|
|
16045
|
+
"the",
|
|
16046
|
+
"and",
|
|
16047
|
+
"for",
|
|
16048
|
+
"with",
|
|
16049
|
+
"from",
|
|
16050
|
+
"into",
|
|
16051
|
+
"that",
|
|
16052
|
+
"this",
|
|
16053
|
+
"via"
|
|
16054
|
+
]);
|
|
16055
|
+
return new Set(title.toLowerCase().replace(/[^a-z0-9\s-]/g, " ").split(/[\s-]+/).filter((w) => w.length > 2 && !stopWords.has(w)));
|
|
16056
|
+
}
|
|
16057
|
+
/**
|
|
16058
|
+
* Compute the word overlap ratio between two titles.
|
|
16059
|
+
* Returns a value between 0 and 1, where 1 means all words in the smaller set
|
|
16060
|
+
* are present in the larger set.
|
|
16061
|
+
*
|
|
16062
|
+
* Uses the smaller set as the denominator so that a generated title that is a
|
|
16063
|
+
* reasonable subset or superset of the expected title still scores well.
|
|
16064
|
+
*/
|
|
16065
|
+
function computeTitleOverlap(titleA, titleB) {
|
|
16066
|
+
const wordsA = titleToWordSet(titleA);
|
|
16067
|
+
const wordsB = titleToWordSet(titleB);
|
|
16068
|
+
if (wordsA.size === 0 || wordsB.size === 0) return 0;
|
|
16069
|
+
let shared = 0;
|
|
16070
|
+
for (const w of wordsA) if (wordsB.has(w)) shared++;
|
|
16071
|
+
const denominator = Math.min(wordsA.size, wordsB.size);
|
|
16072
|
+
return shared / denominator;
|
|
16073
|
+
}
|
|
16074
|
+
/**
|
|
16075
|
+
* Extract the expected story title from the epic shard content.
|
|
16076
|
+
*
|
|
16077
|
+
* Looks for patterns like:
|
|
16078
|
+
* - "### Story 37-1: Turborepo monorepo scaffold"
|
|
16079
|
+
* - "Story 37-1: Turborepo monorepo scaffold"
|
|
16080
|
+
* - "**37-1**: Turborepo monorepo scaffold"
|
|
16081
|
+
* - "37-1: Turborepo monorepo scaffold"
|
|
16082
|
+
*
|
|
16083
|
+
* Returns the title portion after the story key, or null if no match.
|
|
16084
|
+
*/
|
|
16085
|
+
function extractExpectedStoryTitle(shardContent, storyKey) {
|
|
16086
|
+
if (!shardContent || !storyKey) return null;
|
|
16087
|
+
const escaped = storyKey.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
|
|
16088
|
+
const patterns = [
|
|
16089
|
+
new RegExp(`^#{2,4}\\s+Story\\s+${escaped}[:\\s]+\\s*(.+)$`, "mi"),
|
|
16090
|
+
new RegExp(`^Story\\s+${escaped}[:\\s]+\\s*(.+)$`, "mi"),
|
|
16091
|
+
new RegExp(`^\\*\\*${escaped}\\*\\*[:\\s]+\\s*(.+)$`, "mi"),
|
|
16092
|
+
new RegExp(`^${escaped}[:\\s]+\\s*(.+)$`, "mi")
|
|
16093
|
+
];
|
|
16094
|
+
for (const pattern of patterns) {
|
|
16095
|
+
const match$1 = pattern.exec(shardContent);
|
|
16096
|
+
if (match$1?.[1]) return match$1[1].replace(/\*+$/, "").trim();
|
|
16097
|
+
}
|
|
16098
|
+
return null;
|
|
16099
|
+
}
|
|
16100
|
+
const TITLE_OVERLAP_WARNING_THRESHOLD = .3;
|
|
16101
|
+
/**
|
|
15396
16102
|
* Map a StoryPhase to the corresponding WgStoryStatus for wg_stories writes.
|
|
15397
16103
|
* Returns null for PENDING (no write needed).
|
|
15398
16104
|
*/
|
|
@@ -15409,6 +16115,55 @@ function wgStatusForPhase(phase) {
|
|
|
15409
16115
|
}
|
|
15410
16116
|
}
|
|
15411
16117
|
/**
|
|
16118
|
+
* Check whether `.substrate/project-profile.yaml` is stale relative to
|
|
16119
|
+
* the actual project structure.
|
|
16120
|
+
*
|
|
16121
|
+
* Returns an array of human-readable indicator strings. An empty array
|
|
16122
|
+
* means the profile appears current (or doesn't exist).
|
|
16123
|
+
*
|
|
16124
|
+
* Staleness indicators checked:
|
|
16125
|
+
* - Profile says `type: single` but `turbo.json` exists (should be monorepo)
|
|
16126
|
+
* - Profile has no Go language but `go.mod` exists
|
|
16127
|
+
* - Profile has no Python language but `pyproject.toml` exists
|
|
16128
|
+
* - Profile has no Rust language but `Cargo.toml` exists
|
|
16129
|
+
*/
|
|
16130
|
+
function checkProfileStaleness(projectRoot) {
|
|
16131
|
+
const profilePath = join$1(projectRoot, ".substrate", "project-profile.yaml");
|
|
16132
|
+
if (!existsSync$1(profilePath)) return [];
|
|
16133
|
+
let profile;
|
|
16134
|
+
try {
|
|
16135
|
+
const raw = readFileSync$1(profilePath, "utf-8");
|
|
16136
|
+
profile = yaml.load(raw) ?? {};
|
|
16137
|
+
} catch {
|
|
16138
|
+
return [];
|
|
16139
|
+
}
|
|
16140
|
+
const project = profile.project;
|
|
16141
|
+
if (project === void 0) return [];
|
|
16142
|
+
const indicators = [];
|
|
16143
|
+
const declaredLanguages = new Set();
|
|
16144
|
+
if (typeof project.language === "string") declaredLanguages.add(project.language);
|
|
16145
|
+
if (Array.isArray(project.packages)) {
|
|
16146
|
+
for (const pkg of project.packages) if (typeof pkg.language === "string") declaredLanguages.add(pkg.language);
|
|
16147
|
+
}
|
|
16148
|
+
if (project.type === "single" && existsSync$1(join$1(projectRoot, "turbo.json"))) indicators.push("turbo.json exists but profile says type: single (should be monorepo)");
|
|
16149
|
+
const languageMarkers = [
|
|
16150
|
+
{
|
|
16151
|
+
file: "go.mod",
|
|
16152
|
+
language: "go"
|
|
16153
|
+
},
|
|
16154
|
+
{
|
|
16155
|
+
file: "pyproject.toml",
|
|
16156
|
+
language: "python"
|
|
16157
|
+
},
|
|
16158
|
+
{
|
|
16159
|
+
file: "Cargo.toml",
|
|
16160
|
+
language: "rust"
|
|
16161
|
+
}
|
|
16162
|
+
];
|
|
16163
|
+
for (const marker of languageMarkers) if (existsSync$1(join$1(projectRoot, marker.file)) && !declaredLanguages.has(marker.language)) indicators.push(`${marker.file} exists but profile does not declare ${marker.language}`);
|
|
16164
|
+
return indicators;
|
|
16165
|
+
}
|
|
16166
|
+
/**
|
|
15412
16167
|
* Factory function that creates an ImplementationOrchestrator instance.
|
|
15413
16168
|
*
|
|
15414
16169
|
* @param deps - Injected dependencies (db, pack, contextCompiler, dispatcher,
|
|
@@ -15765,8 +16520,7 @@ function createImplementationOrchestrator(deps) {
|
|
|
15765
16520
|
for (const s of _stories.values()) if (s.phase === "COMPLETE" || s.phase === "ESCALATED") completed++;
|
|
15766
16521
|
else if (s.phase === "PENDING") queued++;
|
|
15767
16522
|
else active++;
|
|
15768
|
-
|
|
15769
|
-
if (timeSinceProgress >= HEARTBEAT_INTERVAL_MS) eventBus.emit("orchestrator:heartbeat", {
|
|
16523
|
+
eventBus.emit("orchestrator:heartbeat", {
|
|
15770
16524
|
runId: config.pipelineRunId ?? "",
|
|
15771
16525
|
activeDispatches: active,
|
|
15772
16526
|
completedDispatches: completed,
|
|
@@ -16010,6 +16764,46 @@ function createImplementationOrchestrator(deps) {
|
|
|
16010
16764
|
return;
|
|
16011
16765
|
}
|
|
16012
16766
|
storyFilePath = createResult.story_file;
|
|
16767
|
+
if (createResult.story_title) try {
|
|
16768
|
+
const epicId = storyKey.split("-")[0] ?? storyKey;
|
|
16769
|
+
const implDecisions = await getDecisionsByPhase(db, "implementation");
|
|
16770
|
+
let shardContent;
|
|
16771
|
+
const perStoryShard = implDecisions.find((d) => d.category === "epic-shard" && d.key === storyKey);
|
|
16772
|
+
if (perStoryShard?.value) shardContent = perStoryShard.value;
|
|
16773
|
+
else {
|
|
16774
|
+
const epicShard = implDecisions.find((d) => d.category === "epic-shard" && d.key === epicId);
|
|
16775
|
+
if (epicShard?.value) shardContent = extractStorySection(epicShard.value, storyKey) ?? epicShard.value;
|
|
16776
|
+
}
|
|
16777
|
+
if (shardContent) {
|
|
16778
|
+
const expectedTitle = extractExpectedStoryTitle(shardContent, storyKey);
|
|
16779
|
+
if (expectedTitle) {
|
|
16780
|
+
const overlap = computeTitleOverlap(expectedTitle, createResult.story_title);
|
|
16781
|
+
if (overlap < TITLE_OVERLAP_WARNING_THRESHOLD) {
|
|
16782
|
+
const msg = `Story title mismatch: expected "${expectedTitle}" but got "${createResult.story_title}" (word overlap: ${Math.round(overlap * 100)}%). This may indicate the create-story agent received truncated context.`;
|
|
16783
|
+
logger$27.warn({
|
|
16784
|
+
storyKey,
|
|
16785
|
+
expectedTitle,
|
|
16786
|
+
generatedTitle: createResult.story_title,
|
|
16787
|
+
overlap
|
|
16788
|
+
}, msg);
|
|
16789
|
+
eventBus.emit("orchestrator:story-warn", {
|
|
16790
|
+
storyKey,
|
|
16791
|
+
msg
|
|
16792
|
+
});
|
|
16793
|
+
} else logger$27.debug({
|
|
16794
|
+
storyKey,
|
|
16795
|
+
expectedTitle,
|
|
16796
|
+
generatedTitle: createResult.story_title,
|
|
16797
|
+
overlap
|
|
16798
|
+
}, "Story title validation passed");
|
|
16799
|
+
}
|
|
16800
|
+
}
|
|
16801
|
+
} catch (titleValidationErr) {
|
|
16802
|
+
logger$27.debug({
|
|
16803
|
+
storyKey,
|
|
16804
|
+
err: titleValidationErr
|
|
16805
|
+
}, "Story title validation skipped due to error");
|
|
16806
|
+
}
|
|
16013
16807
|
} catch (err) {
|
|
16014
16808
|
const errMsg = err instanceof Error ? err.message : String(err);
|
|
16015
16809
|
endPhase(storyKey, "create-story");
|
|
@@ -16532,6 +17326,28 @@ function createImplementationOrchestrator(deps) {
|
|
|
16532
17326
|
}, "Phantom review detected (0 issues + error) — retrying review once");
|
|
16533
17327
|
continue;
|
|
16534
17328
|
}
|
|
17329
|
+
if (isPhantomReview && timeoutRetried) {
|
|
17330
|
+
logger$27.warn({
|
|
17331
|
+
storyKey,
|
|
17332
|
+
reviewCycles,
|
|
17333
|
+
error: reviewResult.error
|
|
17334
|
+
}, "Consecutive review timeouts detected (original + retry both failed) — escalating immediately");
|
|
17335
|
+
endPhase(storyKey, "code-review");
|
|
17336
|
+
updateStory(storyKey, {
|
|
17337
|
+
phase: "ESCALATED",
|
|
17338
|
+
error: "consecutive-review-timeouts",
|
|
17339
|
+
completedAt: new Date().toISOString()
|
|
17340
|
+
});
|
|
17341
|
+
await writeStoryMetricsBestEffort(storyKey, "escalated", reviewCycles + 1);
|
|
17342
|
+
await emitEscalation({
|
|
17343
|
+
storyKey,
|
|
17344
|
+
lastVerdict: "consecutive-review-timeouts",
|
|
17345
|
+
reviewCycles: reviewCycles + 1,
|
|
17346
|
+
issues: ["Review dispatch failed twice consecutively (original + phantom-retry). Likely resource-constrained or diff too large for reviewer."]
|
|
17347
|
+
});
|
|
17348
|
+
await persistState();
|
|
17349
|
+
return;
|
|
17350
|
+
}
|
|
16535
17351
|
verdict = reviewResult.verdict;
|
|
16536
17352
|
issueList = reviewResult.issue_list ?? [];
|
|
16537
17353
|
if (verdict === "NEEDS_MAJOR_REWORK" && reviewCycles > 0 && previousIssueList.length > 0 && issueList.length < previousIssueList.length) {
|
|
@@ -17327,6 +18143,19 @@ function createImplementationOrchestrator(deps) {
|
|
|
17327
18143
|
} catch (err) {
|
|
17328
18144
|
logger$27.error({ err }, "Post-sprint contract verification threw an error — skipping");
|
|
17329
18145
|
}
|
|
18146
|
+
if (projectRoot !== void 0) try {
|
|
18147
|
+
const indicators = checkProfileStaleness(projectRoot);
|
|
18148
|
+
if (indicators.length > 0) {
|
|
18149
|
+
const message = "Project profile may be outdated — consider running `substrate init --force` to re-detect";
|
|
18150
|
+
eventBus.emit("pipeline:profile-stale", {
|
|
18151
|
+
message,
|
|
18152
|
+
indicators
|
|
18153
|
+
});
|
|
18154
|
+
logger$27.warn({ indicators }, message);
|
|
18155
|
+
}
|
|
18156
|
+
} catch (err) {
|
|
18157
|
+
logger$27.debug({ err }, "Profile staleness check failed (best-effort)");
|
|
18158
|
+
}
|
|
17330
18159
|
let completed = 0;
|
|
17331
18160
|
let escalated = 0;
|
|
17332
18161
|
let failed = 0;
|
|
@@ -17457,14 +18286,14 @@ async function resolveStoryKeys(db, projectRoot, opts) {
|
|
|
17457
18286
|
function parseStoryKeysFromEpics(content) {
|
|
17458
18287
|
if (content.length === 0) return [];
|
|
17459
18288
|
const keys = new Set();
|
|
17460
|
-
const explicitKeyPattern = /\*\*Story key:\*\*\s*`?(
|
|
18289
|
+
const explicitKeyPattern = /\*\*Story key:\*\*\s*`?([A-Za-z0-9]+-[A-Za-z0-9]+)(?:-[^`\s]*)?`?/g;
|
|
17461
18290
|
let match$1;
|
|
17462
18291
|
while ((match$1 = explicitKeyPattern.exec(content)) !== null) if (match$1[1] !== void 0) keys.add(match$1[1]);
|
|
17463
|
-
const headingPattern = /^###\s+Story\s+(
|
|
18292
|
+
const headingPattern = /^###\s+Story\s+([A-Za-z0-9]+)[.\-]([A-Za-z0-9]+)/gm;
|
|
17464
18293
|
while ((match$1 = headingPattern.exec(content)) !== null) if (match$1[1] !== void 0 && match$1[2] !== void 0) keys.add(`${match$1[1]}-${match$1[2]}`);
|
|
17465
|
-
const inlineStoryPattern = /Story\s+(
|
|
18294
|
+
const inlineStoryPattern = /Story\s+([A-Za-z0-9]+)-([A-Za-z0-9]+)[:\s]/g;
|
|
17466
18295
|
while ((match$1 = inlineStoryPattern.exec(content)) !== null) if (match$1[1] !== void 0 && match$1[2] !== void 0) keys.add(`${match$1[1]}-${match$1[2]}`);
|
|
17467
|
-
const filePathPattern = /_bmad-output\/implementation-artifacts\/(
|
|
18296
|
+
const filePathPattern = /_bmad-output\/implementation-artifacts\/([A-Za-z0-9]+-[A-Za-z0-9]+)-/g;
|
|
17468
18297
|
while ((match$1 = filePathPattern.exec(content)) !== null) if (match$1[1] !== void 0) keys.add(match$1[1]);
|
|
17469
18298
|
return sortStoryKeys(Array.from(keys));
|
|
17470
18299
|
}
|
|
@@ -17510,6 +18339,12 @@ function discoverPendingStoryKeys(projectRoot, epicNumber) {
|
|
|
17510
18339
|
allKeys = sortStoryKeys([...new Set(allKeys)]);
|
|
17511
18340
|
}
|
|
17512
18341
|
}
|
|
18342
|
+
const sprintKeys = parseStoryKeysFromSprintStatus(projectRoot);
|
|
18343
|
+
if (sprintKeys.length > 0) {
|
|
18344
|
+
const merged = new Set(allKeys);
|
|
18345
|
+
for (const k of sprintKeys) merged.add(k);
|
|
18346
|
+
allKeys = sortStoryKeys([...merged]);
|
|
18347
|
+
}
|
|
17513
18348
|
if (allKeys.length === 0) return [];
|
|
17514
18349
|
const existingKeys = collectExistingStoryKeys(projectRoot);
|
|
17515
18350
|
return allKeys.filter((k) => !existingKeys.has(k));
|
|
@@ -17560,7 +18395,7 @@ function collectExistingStoryKeys(projectRoot) {
|
|
|
17560
18395
|
} catch {
|
|
17561
18396
|
return existing;
|
|
17562
18397
|
}
|
|
17563
|
-
const filePattern = /^(
|
|
18398
|
+
const filePattern = /^([A-Za-z0-9]+-[A-Za-z0-9]+)-/;
|
|
17564
18399
|
for (const entry of entries) {
|
|
17565
18400
|
if (!entry.endsWith(".md")) continue;
|
|
17566
18401
|
const m = filePattern.exec(entry);
|
|
@@ -17569,6 +18404,33 @@ function collectExistingStoryKeys(projectRoot) {
|
|
|
17569
18404
|
return existing;
|
|
17570
18405
|
}
|
|
17571
18406
|
/**
|
|
18407
|
+
* Parse story keys from sprint-status.yaml.
|
|
18408
|
+
* Reads the development_status map and extracts keys that match the
|
|
18409
|
+
* alphanumeric story key pattern (e.g., 1-1a, NEW-26, E5-accessibility).
|
|
18410
|
+
* Filters out epic status entries (epic-N) and retrospective entries.
|
|
18411
|
+
*/
|
|
18412
|
+
function parseStoryKeysFromSprintStatus(projectRoot) {
|
|
18413
|
+
const candidates = [join$1(projectRoot, "_bmad-output", "implementation-artifacts", "sprint-status.yaml"), join$1(projectRoot, "_bmad-output", "sprint-status.yaml")];
|
|
18414
|
+
const statusPath = candidates.find((p) => existsSync$1(p));
|
|
18415
|
+
if (!statusPath) return [];
|
|
18416
|
+
try {
|
|
18417
|
+
const content = readFileSync$1(statusPath, "utf-8");
|
|
18418
|
+
const keys = [];
|
|
18419
|
+
const linePattern = /^\s{2}([A-Za-z0-9]+-[A-Za-z0-9]+(?:-[A-Za-z0-9-]*)?)\s*:/gm;
|
|
18420
|
+
let match$1;
|
|
18421
|
+
while ((match$1 = linePattern.exec(content)) !== null) {
|
|
18422
|
+
const fullKey = match$1[1];
|
|
18423
|
+
if (/^epic-\d+$/.test(fullKey)) continue;
|
|
18424
|
+
if (fullKey.includes("retrospective")) continue;
|
|
18425
|
+
const segments = fullKey.split("-");
|
|
18426
|
+
if (segments.length >= 2) keys.push(`${segments[0]}-${segments[1]}`);
|
|
18427
|
+
}
|
|
18428
|
+
return [...new Set(keys)];
|
|
18429
|
+
} catch {
|
|
18430
|
+
return [];
|
|
18431
|
+
}
|
|
18432
|
+
}
|
|
18433
|
+
/**
|
|
17572
18434
|
* Collect story keys already completed in previous pipeline runs.
|
|
17573
18435
|
* Scans pipeline_runs with status='completed' and extracts story keys
|
|
17574
18436
|
* with phase='COMPLETE' from their token_usage_json state.
|
|
@@ -17587,16 +18449,26 @@ async function getCompletedStoryKeys(db) {
|
|
|
17587
18449
|
return completed;
|
|
17588
18450
|
}
|
|
17589
18451
|
/**
|
|
17590
|
-
* Sort story keys
|
|
17591
|
-
*
|
|
18452
|
+
* Sort story keys: numeric keys first (by epic then story number),
|
|
18453
|
+
* then alphabetic-prefix keys (NEW-*, E-*) sorted lexicographically.
|
|
18454
|
+
* E.g. ["10-1", "1-2a", "1-2", "NEW-26", "E5-acc"] → ["1-2", "1-2a", "10-1", "E5-acc", "NEW-26"]
|
|
17592
18455
|
*/
|
|
17593
18456
|
function sortStoryKeys(keys) {
|
|
17594
18457
|
return keys.slice().sort((a, b) => {
|
|
17595
|
-
const
|
|
17596
|
-
const
|
|
17597
|
-
const
|
|
17598
|
-
|
|
17599
|
-
|
|
18458
|
+
const aParts = a.split("-");
|
|
18459
|
+
const bParts = b.split("-");
|
|
18460
|
+
const aNum = Number(aParts[0]);
|
|
18461
|
+
const bNum = Number(bParts[0]);
|
|
18462
|
+
if (!isNaN(aNum) && !isNaN(bNum)) {
|
|
18463
|
+
if (aNum !== bNum) return aNum - bNum;
|
|
18464
|
+
const aStory = Number(aParts[1]);
|
|
18465
|
+
const bStory = Number(bParts[1]);
|
|
18466
|
+
if (!isNaN(aStory) && !isNaN(bStory) && aStory !== bStory) return aStory - bStory;
|
|
18467
|
+
return (aParts[1] ?? "").localeCompare(bParts[1] ?? "");
|
|
18468
|
+
}
|
|
18469
|
+
if (!isNaN(aNum)) return -1;
|
|
18470
|
+
if (!isNaN(bNum)) return 1;
|
|
18471
|
+
return a.localeCompare(b);
|
|
17600
18472
|
});
|
|
17601
18473
|
}
|
|
17602
18474
|
|
|
@@ -21687,7 +22559,7 @@ async function runRunAction(options) {
|
|
|
21687
22559
|
if (storiesArg !== void 0 && storiesArg !== "") {
|
|
21688
22560
|
parsedStoryKeys = storiesArg.split(",").map((k) => k.trim()).filter((k) => k.length > 0);
|
|
21689
22561
|
for (const key of parsedStoryKeys) if (!validateStoryKey(key)) {
|
|
21690
|
-
const errorMsg = `Story key '${key}' is not a valid format. Expected: <epic>-<story> (e.g., 10-1)`;
|
|
22562
|
+
const errorMsg = `Story key '${key}' is not a valid format. Expected: <epic>-<story> (e.g., 10-1, 1-1a, NEW-26)`;
|
|
21691
22563
|
if (outputFormat === "json") process.stdout.write(formatOutput(null, "json", false, errorMsg) + "\n");
|
|
21692
22564
|
else process.stderr.write(`Error: ${errorMsg}\n`);
|
|
21693
22565
|
return 1;
|
|
@@ -22268,6 +23140,14 @@ async function runRunAction(options) {
|
|
|
22268
23140
|
verdict: payload.verdict
|
|
22269
23141
|
});
|
|
22270
23142
|
});
|
|
23143
|
+
eventBus.on("pipeline:profile-stale", (payload) => {
|
|
23144
|
+
ndjsonEmitter.emit({
|
|
23145
|
+
type: "pipeline:profile-stale",
|
|
23146
|
+
ts: new Date().toISOString(),
|
|
23147
|
+
message: payload.message,
|
|
23148
|
+
indicators: payload.indicators
|
|
23149
|
+
});
|
|
23150
|
+
});
|
|
22271
23151
|
}
|
|
22272
23152
|
const ingestionServer = telemetryEnabled ? new IngestionServer({ port: telemetryPort }) : void 0;
|
|
22273
23153
|
if (telemetryPersistence !== void 0) {
|
|
@@ -22792,4 +23672,4 @@ function registerRunCommand(program, _version = "0.0.0", projectRoot = process.c
|
|
|
22792
23672
|
|
|
22793
23673
|
//#endregion
|
|
22794
23674
|
export { AdapterTelemetryPersistence, AppError, DEFAULT_CONFIG, DEFAULT_ROUTING_POLICY, DoltClient, DoltNotInstalled, DoltRepoMapMetaRepository, DoltSymbolRepository, ERR_REPO_MAP_STORAGE_WRITE, FileStateStore, GitClient, GrammarLoader, IngestionServer, RepoMapInjector, RepoMapModule, RepoMapQueryEngine, RepoMapStorage, SUBSTRATE_OWNED_SETTINGS_KEYS, SymbolParser, VALID_PHASES, WorkGraphRepository, buildPipelineStatusOutput, checkDoltInstalled, createConfigSystem, createContextCompiler, createDatabaseAdapter, createDispatcher, createDoltClient, createEventEmitter, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStateStore, createStopAfterGate, createTelemetryAdvisor, detectCycles, findPackageRoot, formatOutput, formatPhaseCompletionSummary, formatPipelineStatusHuman, formatPipelineSummary, formatTokenTelemetry, getAllDescendantPids, getAutoHealthData, getSubstrateDefaultSettings, initSchema, initializeDolt, isSyncAdapter, parseDbTimestampAsUtc, registerHealthCommand, registerRunCommand, resolveBmadMethodSrcPath, resolveBmadMethodVersion, resolveMainRepoRoot, resolveStoryKeys, runAnalysisPhase, runPlanningPhase, runRunAction, runSolutioningPhase, validateStopAfterFromConflict };
|
|
22795
|
-
//# sourceMappingURL=run-
|
|
23675
|
+
//# sourceMappingURL=run-DCmne2q6.js.map
|