@jaabiri/bmad-studio 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. package/dist/index.js +1996 -0
  2. package/package.json +53 -0
  3. package/ui-dist/assets/_baseUniq-B_n4QFRL.js +1 -0
  4. package/ui-dist/assets/_workflowId.lazy-FP56r40x.js +12 -0
  5. package/ui-dist/assets/arc-D8SlLs2L.js +1 -0
  6. package/ui-dist/assets/architectureDiagram-Q4EWVU46-BhKy9qBN.js +36 -0
  7. package/ui-dist/assets/blockDiagram-DXYQGD6D-DHQ9TuqQ.js +132 -0
  8. package/ui-dist/assets/c4Diagram-AHTNJAMY-BTPifryG.js +10 -0
  9. package/ui-dist/assets/channel-qsfw27Pz.js +1 -0
  10. package/ui-dist/assets/chevron-right-C3gMQbwP.js +1 -0
  11. package/ui-dist/assets/chunk-4BX2VUAB-DJJtjuLz.js +1 -0
  12. package/ui-dist/assets/chunk-4TB4RGXK-ONYWO0S_.js +206 -0
  13. package/ui-dist/assets/chunk-55IACEB6-CKub8aEM.js +1 -0
  14. package/ui-dist/assets/chunk-EDXVE4YY-c2zVzCwr.js +1 -0
  15. package/ui-dist/assets/chunk-FMBD7UC4-CnUcBH4x.js +15 -0
  16. package/ui-dist/assets/chunk-OYMX7WX6-DI7UEGCn.js +231 -0
  17. package/ui-dist/assets/chunk-QZHKN3VN-Dlq9g4FR.js +1 -0
  18. package/ui-dist/assets/chunk-YZCP3GAM-CpE3yTZs.js +1 -0
  19. package/ui-dist/assets/classDiagram-6PBFFD2Q-C6zNtups.js +1 -0
  20. package/ui-dist/assets/classDiagram-v2-HSJHXN6E-C6zNtups.js +1 -0
  21. package/ui-dist/assets/clone-lFRGLeBs.js +1 -0
  22. package/ui-dist/assets/cose-bilkent-S5V4N54A-BcwRulTx.js +1 -0
  23. package/ui-dist/assets/cytoscape.esm-5J0xJHOV.js +321 -0
  24. package/ui-dist/assets/dagre-KV5264BT-kD6LwyTY.js +4 -0
  25. package/ui-dist/assets/defaultLocale-DX6XiGOO.js +1 -0
  26. package/ui-dist/assets/diagram-5BDNPKRD-7BtLyPf7.js +10 -0
  27. package/ui-dist/assets/diagram-G4DWMVQ6-BeU21gzV.js +24 -0
  28. package/ui-dist/assets/diagram-MMDJMWI5-BcZe7DAu.js +43 -0
  29. package/ui-dist/assets/diagram-TYMM5635-30qZErT6.js +24 -0
  30. package/ui-dist/assets/erDiagram-SMLLAGMA-DZzYsGd-.js +85 -0
  31. package/ui-dist/assets/flowDiagram-DWJPFMVM-CEyo_5fY.js +162 -0
  32. package/ui-dist/assets/ganttDiagram-T4ZO3ILL-CdxjNHta.js +292 -0
  33. package/ui-dist/assets/gitGraphDiagram-UUTBAWPF-OI6nqhRA.js +106 -0
  34. package/ui-dist/assets/graph-CJuDDeAK.js +1 -0
  35. package/ui-dist/assets/index-1QFQcLya.js +56 -0
  36. package/ui-dist/assets/index-BvVxlmMK.js +29 -0
  37. package/ui-dist/assets/index-DPA4A8ah.js +9 -0
  38. package/ui-dist/assets/index-DbkfLjQe.js +1 -0
  39. package/ui-dist/assets/index-cvXQBc8Y.css +1 -0
  40. package/ui-dist/assets/infoDiagram-42DDH7IO-Bgvk_VTG.js +2 -0
  41. package/ui-dist/assets/init-Gi6I4Gst.js +1 -0
  42. package/ui-dist/assets/inter-cyrillic-400-normal-HOLc17fK.woff +0 -0
  43. package/ui-dist/assets/inter-cyrillic-400-normal-obahsSVq.woff2 +0 -0
  44. package/ui-dist/assets/inter-cyrillic-ext-400-normal-BQZuk6qB.woff2 +0 -0
  45. package/ui-dist/assets/inter-cyrillic-ext-400-normal-DQukG94-.woff +0 -0
  46. package/ui-dist/assets/inter-greek-400-normal-B4URO6DV.woff2 +0 -0
  47. package/ui-dist/assets/inter-greek-400-normal-q2sYcFCs.woff +0 -0
  48. package/ui-dist/assets/inter-greek-ext-400-normal-DGGRlc-M.woff2 +0 -0
  49. package/ui-dist/assets/inter-greek-ext-400-normal-KugGGMne.woff +0 -0
  50. package/ui-dist/assets/inter-latin-400-normal-C38fXH4l.woff2 +0 -0
  51. package/ui-dist/assets/inter-latin-400-normal-CyCys3Eg.woff +0 -0
  52. package/ui-dist/assets/inter-latin-ext-400-normal-77YHD8bZ.woff +0 -0
  53. package/ui-dist/assets/inter-latin-ext-400-normal-C1nco2VV.woff2 +0 -0
  54. package/ui-dist/assets/inter-vietnamese-400-normal-Bbgyi5SW.woff +0 -0
  55. package/ui-dist/assets/inter-vietnamese-400-normal-DMkecbls.woff2 +0 -0
  56. package/ui-dist/assets/ishikawaDiagram-UXIWVN3A-Cz2e8g4P.js +70 -0
  57. package/ui-dist/assets/jetbrains-mono-cyrillic-400-normal-BEIGL1Tu.woff2 +0 -0
  58. package/ui-dist/assets/jetbrains-mono-cyrillic-400-normal-ugxPyKxw.woff +0 -0
  59. package/ui-dist/assets/jetbrains-mono-greek-400-normal-B9oWc5Lo.woff +0 -0
  60. package/ui-dist/assets/jetbrains-mono-greek-400-normal-C190GLew.woff2 +0 -0
  61. package/ui-dist/assets/jetbrains-mono-latin-400-normal-6-qcROiO.woff +0 -0
  62. package/ui-dist/assets/jetbrains-mono-latin-400-normal-V6pRDFza.woff2 +0 -0
  63. package/ui-dist/assets/jetbrains-mono-latin-ext-400-normal-Bc8Ftmh3.woff2 +0 -0
  64. package/ui-dist/assets/jetbrains-mono-latin-ext-400-normal-fXTG6kC5.woff +0 -0
  65. package/ui-dist/assets/jetbrains-mono-vietnamese-400-normal-CqNFfHCs.woff +0 -0
  66. package/ui-dist/assets/journeyDiagram-VCZTEJTY-B7EdpiRX.js +139 -0
  67. package/ui-dist/assets/kanban-definition-6JOO6SKY-DiUf0qIs.js +89 -0
  68. package/ui-dist/assets/katex-BzTkCl_B.js +265 -0
  69. package/ui-dist/assets/layout-BhnDrrIT.js +1 -0
  70. package/ui-dist/assets/linear-BDwQpyTv.js +1 -0
  71. package/ui-dist/assets/mermaid.core-BR4XzyQe.js +303 -0
  72. package/ui-dist/assets/min-BJhF9j1-.js +1 -0
  73. package/ui-dist/assets/mindmap-definition-QFDTVHPH-D05ylhL6.js +96 -0
  74. package/ui-dist/assets/minus-M76p8_Mr.js +1 -0
  75. package/ui-dist/assets/ordinal-Cboi1Yqb.js +1 -0
  76. package/ui-dist/assets/pieDiagram-DEJITSTG-Bn-HlZLK.js +30 -0
  77. package/ui-dist/assets/proxy-B7DZsNQK.js +1 -0
  78. package/ui-dist/assets/quadrantDiagram-34T5L4WZ-BElRpUWb.js +7 -0
  79. package/ui-dist/assets/requirementDiagram-MS252O5E-CHPWxs3l.js +84 -0
  80. package/ui-dist/assets/sankeyDiagram-XADWPNL6-tlvafq0R.js +10 -0
  81. package/ui-dist/assets/sequenceDiagram-FGHM5R23-FgarYdCI.js +157 -0
  82. package/ui-dist/assets/sprint.lazy-CPHJj3lM.js +1 -0
  83. package/ui-dist/assets/stateDiagram-FHFEXIEX-BMEGKFsF.js +1 -0
  84. package/ui-dist/assets/stateDiagram-v2-QKLJ7IA2-BzoNePd4.js +1 -0
  85. package/ui-dist/assets/timeline-definition-GMOUNBTQ-REImLpEb.js +120 -0
  86. package/ui-dist/assets/timeline.lazy-BY5ZUkY-.js +1 -0
  87. package/ui-dist/assets/tooltip-DA8LKiqR.js +1 -0
  88. package/ui-dist/assets/validators-2LNt2eLU.js +1 -0
  89. package/ui-dist/assets/vennDiagram-DHZGUBPP-DcFGfj6d.js +34 -0
  90. package/ui-dist/assets/wardley-RL74JXVD-B9TLQgzI.js +162 -0
  91. package/ui-dist/assets/wardleyDiagram-NUSXRM2D-Hcgh3jSX.js +20 -0
  92. package/ui-dist/assets/x-CPRWsjef.js +1 -0
  93. package/ui-dist/assets/xychartDiagram-5P7HB3ND-BTe2ICiH.js +7 -0
  94. package/ui-dist/favicon.ico +0 -0
  95. package/ui-dist/index.html +32 -0
  96. package/ui-dist/logo192.png +0 -0
  97. package/ui-dist/logo512.png +0 -0
  98. package/ui-dist/manifest.json +25 -0
  99. package/ui-dist/robots.txt +3 -0
package/dist/index.js ADDED
@@ -0,0 +1,1996 @@
1
+ #!/usr/bin/env node
2
+
3
+ // src/index.ts
4
+ import { readFileSync as readFileSync2 } from "fs";
5
+ import { Command } from "commander";
6
+
7
+ // src/instances.ts
8
+ import { mkdirSync, readFileSync, writeFileSync } from "fs";
9
+ import { createServer as createNetServer } from "net";
10
+ import { homedir } from "os";
11
+ import { join, resolve } from "path";
12
+ function getLockfileDir() {
13
+ return join(homedir(), ".bmad-studio");
14
+ }
15
+ function getLockfilePath() {
16
+ return join(getLockfileDir(), "instances.json");
17
+ }
18
+ function lockfilePath() {
19
+ const dir = getLockfileDir();
20
+ mkdirSync(dir, { recursive: true });
21
+ return getLockfilePath();
22
+ }
23
+ function isValidEntry(entry) {
24
+ if (typeof entry !== "object" || entry === null) return false;
25
+ const e = entry;
26
+ return typeof e.port === "number" && typeof e.pid === "number" && typeof e.projectPath === "string" && typeof e.startTime === "string";
27
+ }
28
+ function readInstances() {
29
+ try {
30
+ const raw = readFileSync(lockfilePath(), "utf-8");
31
+ const parsed = JSON.parse(raw);
32
+ if (!Array.isArray(parsed)) return [];
33
+ return parsed.filter(isValidEntry);
34
+ } catch {
35
+ return [];
36
+ }
37
+ }
38
+ function writeInstances(entries) {
39
+ const dir = getLockfileDir();
40
+ mkdirSync(dir, { recursive: true });
41
+ writeFileSync(getLockfilePath(), JSON.stringify(entries, null, 2) + "\n", "utf-8");
42
+ }
43
+ function isProcessAlive(pid) {
44
+ try {
45
+ process.kill(pid, 0);
46
+ return true;
47
+ } catch {
48
+ return false;
49
+ }
50
+ }
51
+ function cleanStaleEntries(entries) {
52
+ return entries.filter((entry) => isProcessAlive(entry.pid));
53
+ }
54
+ function registerInstance(entry) {
55
+ const entries = readInstances();
56
+ const cleaned = cleanStaleEntries(entries);
57
+ cleaned.push(entry);
58
+ writeInstances(cleaned);
59
+ }
60
+ function unregisterInstance(port) {
61
+ const entries = readInstances();
62
+ const filtered = entries.filter((entry) => entry.port !== port);
63
+ writeInstances(filtered);
64
+ }
65
+ function findInstanceByProject(projectPath) {
66
+ const entries = readInstances();
67
+ const cleaned = cleanStaleEntries(entries);
68
+ const resolved = resolve(projectPath);
69
+ return cleaned.find((entry) => entry.projectPath === resolved);
70
+ }
71
+ function isPortAvailable(port) {
72
+ return new Promise((resolve4) => {
73
+ const server = createNetServer();
74
+ server.once("error", () => {
75
+ resolve4(false);
76
+ });
77
+ server.once("listening", () => {
78
+ server.close(() => resolve4(true));
79
+ });
80
+ server.listen(port, "127.0.0.1");
81
+ });
82
+ }
83
+ var PORT_MIN = 5400;
84
+ var PORT_MAX = 5499;
85
+ async function findAvailablePort(startPort) {
86
+ const entries = readInstances();
87
+ const cleaned = cleanStaleEntries(entries);
88
+ const usedPorts = new Set(cleaned.map((e) => e.port));
89
+ const rangeStart = startPort ?? PORT_MIN;
90
+ for (let port = rangeStart; port <= PORT_MAX; port++) {
91
+ if (!usedPorts.has(port) && await isPortAvailable(port)) {
92
+ return port;
93
+ }
94
+ }
95
+ throw new Error(`All ports in range ${PORT_MIN}-${PORT_MAX} are occupied`);
96
+ }
97
+
98
+ // src/launcher.ts
99
+ import { resolve as resolve3, dirname as dirname3 } from "path";
100
+ import { fileURLToPath } from "url";
101
+ import { existsSync as existsSync2 } from "fs";
102
+
103
+ // ../server/dist/index.js
104
+ import http from "http";
105
+ import { existsSync } from "fs";
106
+ import { join as join8, dirname as dirname2 } from "path";
107
+ import express from "express";
108
+ import debugFactory6 from "debug";
109
+
110
+ // ../server/dist/routes/api.js
111
+ import { Router } from "express";
112
+ import { stat as stat2, readdir, readFile as readFile3 } from "fs/promises";
113
+ import { join as join4, isAbsolute, normalize } from "path";
114
+
115
+ // ../server/dist/config-file.js
116
+ import { readFile, writeFile } from "fs/promises";
117
+ import { join as join2 } from "path";
118
+ import debugFactory from "debug";
119
+ var debug = debugFactory("bmad-studio:config-file");
120
+ var CONFIG_FILENAME = ".bmad-studio.json";
121
+ function configPath(projectRoot) {
122
+ return join2(projectRoot, CONFIG_FILENAME);
123
+ }
124
+ async function readConfig(projectRoot) {
125
+ const filePath = configPath(projectRoot);
126
+ try {
127
+ const raw = await readFile(filePath, "utf-8");
128
+ const parsed = JSON.parse(raw);
129
+ if (!isValidConfig(parsed)) {
130
+ debug("Invalid config at %s", filePath);
131
+ return null;
132
+ }
133
+ return parsed;
134
+ } catch {
135
+ debug("No config file at %s", filePath);
136
+ return null;
137
+ }
138
+ }
139
+ async function writeConfig(projectRoot, config) {
140
+ const filePath = configPath(projectRoot);
141
+ const content = JSON.stringify(config, null, 2) + "\n";
142
+ await writeFile(filePath, content, "utf-8");
143
+ debug("Wrote config to %s", filePath);
144
+ }
145
+ function isValidConfig(value) {
146
+ if (typeof value !== "object" || value === null)
147
+ return false;
148
+ const obj = value;
149
+ return obj["version"] === 1 && typeof obj["outputDir"] === "string";
150
+ }
151
+
152
+ // ../server/dist/sprint-parser.js
153
+ import { readFile as readFile2, stat } from "fs/promises";
154
+ import { join as join3 } from "path";
155
+ import { parse as parseYaml } from "yaml";
156
+ var EPIC_STATUS_REGEX = /^epic-(\d+)$/;
157
+ var RETRO_REGEX = /^epic-(\d+)-retrospective$/;
158
+ var STORY_REGEX = /^(\d+)-(\d+)-(.+)$/;
159
+ var EPIC_TITLE_REGEX = /# Epic (\d+): (.+)/g;
160
+ function slugToTitle(slug) {
161
+ return slug.split("-").map((word) => word.charAt(0).toUpperCase() + word.slice(1)).join(" ");
162
+ }
163
+ async function fileExists(filePath) {
164
+ try {
165
+ await stat(filePath);
166
+ return true;
167
+ } catch {
168
+ return false;
169
+ }
170
+ }
171
+ async function findSprintFile(outputDir) {
172
+ const candidates = [
173
+ join3(outputDir, "implementation-artifacts", "sprint-status.yaml"),
174
+ join3(outputDir, "sprint-status.yaml")
175
+ ];
176
+ for (const candidate of candidates) {
177
+ if (await fileExists(candidate)) {
178
+ return candidate;
179
+ }
180
+ }
181
+ return null;
182
+ }
183
+ function extractEpicTitles(rawText) {
184
+ const titles = /* @__PURE__ */ new Map();
185
+ let match;
186
+ while ((match = EPIC_TITLE_REGEX.exec(rawText)) !== null) {
187
+ titles.set(parseInt(match[1]), match[2].trim());
188
+ }
189
+ EPIC_TITLE_REGEX.lastIndex = 0;
190
+ return titles;
191
+ }
192
+ function computeStats(epics) {
193
+ const allStories = epics.flatMap((e) => e.stories);
194
+ const totalStories = allStories.length;
195
+ const totalEpics = epics.length;
196
+ const byStoryStatus = {
197
+ backlog: 0,
198
+ "ready-for-dev": 0,
199
+ "in-progress": 0,
200
+ review: 0,
201
+ done: 0
202
+ };
203
+ for (const story of allStories) {
204
+ byStoryStatus[story.status]++;
205
+ }
206
+ const byEpicStatus = {
207
+ backlog: 0,
208
+ "in-progress": 0,
209
+ done: 0
210
+ };
211
+ for (const epic of epics) {
212
+ byEpicStatus[epic.status]++;
213
+ }
214
+ const doneCount = byStoryStatus["done"];
215
+ const overallProgress = totalStories > 0 ? Math.round(doneCount / totalStories * 100) : 0;
216
+ return { totalStories, totalEpics, byStoryStatus, byEpicStatus, overallProgress };
217
+ }
218
+ async function parseSprintStatus(outputDir) {
219
+ const filePath = await findSprintFile(outputDir);
220
+ if (!filePath) {
221
+ return null;
222
+ }
223
+ const rawText = await readFile2(filePath, "utf-8");
224
+ const epicTitles = extractEpicTitles(rawText);
225
+ const parsed = parseYaml(rawText);
226
+ const developmentStatus = parsed.development_status ?? {};
227
+ const storyLocation = String(parsed.story_location ?? "");
228
+ const metadata = {
229
+ generated: String(parsed.generated ?? ""),
230
+ lastUpdated: String(parsed.last_updated ?? ""),
231
+ project: String(parsed.project ?? ""),
232
+ projectKey: String(parsed.project_key ?? ""),
233
+ storyLocation
234
+ };
235
+ const epicMap = /* @__PURE__ */ new Map();
236
+ const ensureEpic = (epicNum) => {
237
+ if (!epicMap.has(epicNum)) {
238
+ epicMap.set(epicNum, { status: "backlog", retrospective: "optional", stories: [] });
239
+ }
240
+ return epicMap.get(epicNum);
241
+ };
242
+ const storyBasePath = storyLocation.startsWith("_bmad-output/") ? join3(outputDir, storyLocation.replace(/^_bmad-output\//, "")) : join3(outputDir, "..", storyLocation);
243
+ for (const [key, value] of Object.entries(developmentStatus)) {
244
+ let match;
245
+ match = EPIC_STATUS_REGEX.exec(key);
246
+ if (match) {
247
+ const epicNum = parseInt(match[1]);
248
+ ensureEpic(epicNum).status = value;
249
+ continue;
250
+ }
251
+ match = RETRO_REGEX.exec(key);
252
+ if (match) {
253
+ const epicNum = parseInt(match[1]);
254
+ ensureEpic(epicNum).retrospective = value;
255
+ continue;
256
+ }
257
+ match = STORY_REGEX.exec(key);
258
+ if (match) {
259
+ const epicNum = parseInt(match[1]);
260
+ const storyNum = parseInt(match[2]);
261
+ const remainingSlug = match[3];
262
+ const id = `${epicNum}-${storyNum}`;
263
+ const epicId = `epic-${epicNum}`;
264
+ const title = slugToTitle(remainingSlug);
265
+ const hasFile = await fileExists(join3(storyBasePath, `${key}.md`));
266
+ ensureEpic(epicNum).stories.push({
267
+ id,
268
+ slug: key,
269
+ title,
270
+ status: value,
271
+ hasFile,
272
+ epicId
273
+ });
274
+ }
275
+ }
276
+ const epicNumbers = Array.from(epicMap.keys()).sort((a, b) => a - b);
277
+ const epics = epicNumbers.map((num) => {
278
+ const data = epicMap.get(num);
279
+ data.stories.sort((a, b) => {
280
+ const [aEpic, aStory] = a.id.split("-").map(Number);
281
+ const [bEpic, bStory] = b.id.split("-").map(Number);
282
+ return aEpic !== bEpic ? aEpic - bEpic : aStory - bStory;
283
+ });
284
+ return {
285
+ id: `epic-${num}`,
286
+ number: num,
287
+ title: epicTitles.get(num) ?? `Epic ${num}`,
288
+ status: data.status,
289
+ stories: data.stories,
290
+ retrospective: data.retrospective
291
+ };
292
+ });
293
+ const stats = computeStats(epics);
294
+ return { metadata, epics, stats };
295
+ }
296
+
297
+ // ../server/dist/routes/api.js
298
+ function createApiRouter(options) {
299
+ const router = Router();
300
+ let currentOutputDir = options.outputDir;
301
+ router.get("/capabilities", (_req, res) => {
302
+ const capabilities = {
303
+ git: false,
304
+ pty: false,
305
+ version: "0.1.0"
306
+ };
307
+ res.json(capabilities);
308
+ });
309
+ router.get("/state", async (_req, res) => {
310
+ if (!currentOutputDir) {
311
+ const suggestions = await findSuggestions(options.rootDir);
312
+ res.json({
313
+ status: "unconfigured",
314
+ rootDir: options.rootDir,
315
+ suggestions
316
+ });
317
+ return;
318
+ }
319
+ const cached = options.getCachedState();
320
+ if (cached) {
321
+ res.json(cached);
322
+ return;
323
+ }
324
+ try {
325
+ const state = await options.scanner.scan(currentOutputDir, options.rootDir);
326
+ res.json(state);
327
+ } catch {
328
+ res.status(500).json({
329
+ error: { code: "SCAN_FAILED", message: "Failed to scan project" }
330
+ });
331
+ }
332
+ });
333
+ router.post("/configure", async (req, res) => {
334
+ const { outputDir } = req.body;
335
+ if (!outputDir || typeof outputDir !== "string") {
336
+ res.status(400).json({
337
+ error: { code: "INVALID_INPUT", message: "outputDir is required" }
338
+ });
339
+ return;
340
+ }
341
+ const resolvedDir = join4(options.rootDir, outputDir);
342
+ try {
343
+ const s = await stat2(resolvedDir);
344
+ if (!s.isDirectory()) {
345
+ res.status(400).json({
346
+ error: {
347
+ code: "NOT_A_DIRECTORY",
348
+ message: `${outputDir} is not a directory`
349
+ }
350
+ });
351
+ return;
352
+ }
353
+ } catch {
354
+ res.status(404).json({
355
+ error: {
356
+ code: "NOT_FOUND",
357
+ message: `Directory not found: ${outputDir}`
358
+ }
359
+ });
360
+ return;
361
+ }
362
+ try {
363
+ await writeConfig(options.rootDir, { version: 1, outputDir });
364
+ currentOutputDir = resolvedDir;
365
+ res.json({ success: true, outputDir: resolvedDir });
366
+ } catch {
367
+ res.status(500).json({
368
+ error: {
369
+ code: "WRITE_FAILED",
370
+ message: "Failed to write configuration file"
371
+ }
372
+ });
373
+ }
374
+ });
375
+ router.get("/document/*path", async (req, res) => {
376
+ if (!currentOutputDir) {
377
+ res.status(400).json({
378
+ error: { code: "NOT_CONFIGURED", message: "Output directory not configured" }
379
+ });
380
+ return;
381
+ }
382
+ const rawPath = req.params.path;
383
+ const docPath = decodeURIComponent(Array.isArray(rawPath) ? rawPath.join("/") : rawPath);
384
+ if (docPath.includes("..") || isAbsolute(docPath)) {
385
+ res.status(400).json({
386
+ error: { code: "INVALID_PATH", message: "Path must be relative and cannot contain .." }
387
+ });
388
+ return;
389
+ }
390
+ const fullPath = normalize(join4(currentOutputDir, docPath));
391
+ if (!fullPath.startsWith(normalize(currentOutputDir))) {
392
+ res.status(400).json({
393
+ error: { code: "INVALID_PATH", message: "Path escapes output directory" }
394
+ });
395
+ return;
396
+ }
397
+ try {
398
+ const fileStat = await stat2(fullPath);
399
+ if (!fileStat.isFile()) {
400
+ res.status(404).json({
401
+ error: { code: "DOCUMENT_NOT_FOUND", message: "Path is not a file" }
402
+ });
403
+ return;
404
+ }
405
+ const content = await readFile3(fullPath, "utf-8");
406
+ res.setHeader("Content-Type", "application/json");
407
+ res.json({
408
+ content,
409
+ size: fileStat.size,
410
+ lastModified: fileStat.mtime.toISOString()
411
+ });
412
+ } catch {
413
+ res.status(404).json({
414
+ error: { code: "DOCUMENT_NOT_FOUND", message: `Document not found: ${docPath}` }
415
+ });
416
+ }
417
+ });
418
+ router.get("/sprint", async (_req, res) => {
419
+ if (!currentOutputDir) {
420
+ res.status(400).json({
421
+ error: { code: "NOT_CONFIGURED", message: "Output directory not configured" }
422
+ });
423
+ return;
424
+ }
425
+ try {
426
+ const sprintStatus = await parseSprintStatus(currentOutputDir);
427
+ if (!sprintStatus) {
428
+ res.json({ status: "no-sprint-data", message: "No sprint-status.yaml found" });
429
+ return;
430
+ }
431
+ res.json(sprintStatus);
432
+ } catch {
433
+ res.status(500).json({
434
+ error: { code: "SPRINT_PARSE_FAILED", message: "Failed to parse sprint status" }
435
+ });
436
+ }
437
+ });
438
+ router.get("/git/history/:path", (_req, res) => {
439
+ res.status(501).json({ error: "Not implemented" });
440
+ });
441
+ router.get("/git/diff/:path/:hash", (_req, res) => {
442
+ res.status(501).json({ error: "Not implemented" });
443
+ });
444
+ return router;
445
+ }
446
+ async function findSuggestions(rootDir) {
447
+ const suggestions = [];
448
+ try {
449
+ const entries = await readdir(rootDir, { withFileTypes: true });
450
+ for (const entry of entries) {
451
+ if (!entry.isDirectory() || entry.name.startsWith("."))
452
+ continue;
453
+ const dirPath = join4(rootDir, entry.name);
454
+ try {
455
+ const contents = await readdir(dirPath);
456
+ if (contents.some((f) => f.endsWith(".md"))) {
457
+ suggestions.push(entry.name);
458
+ }
459
+ } catch {
460
+ }
461
+ }
462
+ } catch {
463
+ }
464
+ return suggestions;
465
+ }
466
+
467
+ // ../server/dist/detection.js
468
+ import { readFile as readFile4, stat as stat3, glob } from "fs/promises";
469
+ import { join as join5, resolve as resolve2, dirname } from "path";
470
+ import debugFactory2 from "debug";
471
+ var debug2 = debugFactory2("bmad-studio:detection");
472
+ var BMAD_ARTIFACT_PATTERNS = [
473
+ "**/prd.md",
474
+ "**/PRD.md",
475
+ "**/architecture.md",
476
+ "**/epics.md",
477
+ "**/sprint-status.yaml",
478
+ "**/product-brief.md"
479
+ ];
480
+ async function detectOutputDir(cwd, cliFlag) {
481
+ if (cliFlag) {
482
+ const resolved = resolve2(cwd, cliFlag);
483
+ debug2("Step 1: CLI flag \u2192 %s", resolved);
484
+ return resolved;
485
+ }
486
+ const config = await readConfig(cwd);
487
+ if (config?.outputDir) {
488
+ const resolved = resolve2(cwd, config.outputDir);
489
+ debug2("Step 2: .bmad-studio.json \u2192 %s", resolved);
490
+ return resolved;
491
+ }
492
+ const agentsResult = await parseAgentsMd(cwd);
493
+ if (agentsResult) {
494
+ debug2("Step 3: AGENTS.md \u2192 %s", agentsResult);
495
+ return agentsResult;
496
+ }
497
+ const bmadConfigResult = await parseBmadConfigYaml(cwd);
498
+ if (bmadConfigResult) {
499
+ debug2("Step 4: _bmad/core/config.yaml \u2192 %s", bmadConfigResult);
500
+ return bmadConfigResult;
501
+ }
502
+ const defaultDir = join5(cwd, "_bmad-output");
503
+ if (await directoryExists(defaultDir)) {
504
+ debug2("Step 5: _bmad-output/ exists \u2192 %s", defaultDir);
505
+ return defaultDir;
506
+ }
507
+ const globResult = await scanForArtifacts(cwd);
508
+ if (globResult) {
509
+ debug2("Step 6: Glob scan \u2192 %s", globResult);
510
+ return globResult;
511
+ }
512
+ debug2("Step 7: No BMAD output directory detected");
513
+ return null;
514
+ }
515
+ async function resolveOutputDir(cwd, cliFlag) {
516
+ const outputDir = await detectOutputDir(cwd, cliFlag);
517
+ if (outputDir) {
518
+ try {
519
+ await writeConfig(cwd, { version: 1, outputDir });
520
+ debug2("Persisted detected outputDir to .bmad-studio.json");
521
+ } catch (err) {
522
+ debug2("Failed to persist config: %O", err);
523
+ }
524
+ }
525
+ return outputDir;
526
+ }
527
+ async function parseAgentsMd(cwd) {
528
+ const agentsPath = join5(cwd, "AGENTS.md");
529
+ try {
530
+ const content = await readFile4(agentsPath, "utf-8");
531
+ const outputFolderMatch = content.match(/output_folder\s*[:=]\s*["']?([^\s"']+)["']?/);
532
+ if (outputFolderMatch) {
533
+ const resolved = resolve2(cwd, outputFolderMatch[1]);
534
+ if (await directoryExists(resolved))
535
+ return resolved;
536
+ }
537
+ const pathRefMatch = content.match(/\{project-root\}\/([^\s"'}\]]+)/);
538
+ if (pathRefMatch) {
539
+ const candidate = pathRefMatch[1].split("/")[0];
540
+ const resolved = resolve2(cwd, candidate);
541
+ if (await directoryExists(resolved))
542
+ return resolved;
543
+ }
544
+ return null;
545
+ } catch {
546
+ return null;
547
+ }
548
+ }
549
+ async function parseBmadConfigYaml(cwd) {
550
+ const candidates = [
551
+ join5(cwd, "_bmad", "core", "config.yaml"),
552
+ join5(cwd, "_bmad", "bmm", "config.yaml")
553
+ ];
554
+ for (const configPath2 of candidates) {
555
+ const result = await tryParseBmadConfig(cwd, configPath2);
556
+ if (result)
557
+ return result;
558
+ }
559
+ return null;
560
+ }
561
+ async function tryParseBmadConfig(cwd, configPath2) {
562
+ try {
563
+ const content = await readFile4(configPath2, "utf-8");
564
+ const match = content.match(/output_folder\s*:\s*["']?([^\s"'#]+)["']?/);
565
+ if (match) {
566
+ const rawPath = match[1].replace(/\{project-root\}/g, cwd);
567
+ const resolved = resolve2(cwd, rawPath);
568
+ if (await directoryExists(resolved))
569
+ return resolved;
570
+ }
571
+ const altMatch = content.match(/output_folder\s*:\s*["']?\{project-root\}\/([^\s"'#]+)["']?/);
572
+ if (altMatch) {
573
+ const resolved = resolve2(cwd, altMatch[1]);
574
+ if (await directoryExists(resolved))
575
+ return resolved;
576
+ }
577
+ return null;
578
+ } catch {
579
+ return null;
580
+ }
581
+ }
582
+ async function scanForArtifacts(cwd) {
583
+ for (const pattern of BMAD_ARTIFACT_PATTERNS) {
584
+ try {
585
+ for await (const match of glob(pattern, { cwd })) {
586
+ const absPath = resolve2(cwd, match);
587
+ return dirname(absPath);
588
+ }
589
+ } catch {
590
+ continue;
591
+ }
592
+ }
593
+ return null;
594
+ }
595
+ async function directoryExists(dirPath) {
596
+ try {
597
+ const s = await stat3(dirPath);
598
+ return s.isDirectory();
599
+ } catch {
600
+ return false;
601
+ }
602
+ }
603
+
604
+ // ../server/dist/scanner.js
605
+ import { readdir as readdir2, stat as stat4, readFile as readFile5 } from "fs/promises";
606
+ import { join as join6, relative } from "path";
607
+ import picomatch from "picomatch";
608
+ import debugFactory3 from "debug";
609
+
610
+ // ../shared/dist/definitions/phases.js
611
+ var phaseDefinitions = [
612
+ {
613
+ id: "analysis",
614
+ label: "Analysis",
615
+ accentColor: "#F59E0B"
616
+ },
617
+ {
618
+ id: "planning",
619
+ label: "Planning",
620
+ accentColor: "#3B82F6"
621
+ },
622
+ {
623
+ id: "solutioning",
624
+ label: "Solutioning",
625
+ accentColor: "#8B5CF6"
626
+ },
627
+ {
628
+ id: "implementation",
629
+ label: "Implementation",
630
+ accentColor: "#10B981"
631
+ }
632
+ ];
633
+
634
+ // ../shared/dist/definitions/workflows.js
635
+ var workflowDefinitions = [
636
+ // Phase 1 — Analysis (6 stations)
637
+ {
638
+ id: "brainstorming",
639
+ label: "Brainstorming",
640
+ phase: "analysis",
641
+ command: "bmad-brainstorming",
642
+ triggerCode: "brainstorm",
643
+ agent: { name: "Mary", role: "Business Analyst", skillId: "bmad-brainstorming" },
644
+ outputs: [{ glob: "**/brainstorming-report.md", description: "Brainstorming report" }],
645
+ inputs: [],
646
+ learn: {
647
+ title: "Brainstorming",
648
+ whatItDoes: "Facilitate interactive brainstorming sessions using diverse creative techniques and ideation methods.",
649
+ whenToUse: "At the start of a project when exploring ideas, or when you need fresh perspectives on a problem.",
650
+ whatItNeeds: "A topic or problem statement to brainstorm around. No prior documents required.",
651
+ whatItProduces: "A brainstorming report with organized ideas, themes, and potential directions.",
652
+ proTip: "Run multiple brainstorming sessions with different techniques for richer results."
653
+ }
654
+ },
655
+ {
656
+ id: "domain-research",
657
+ label: "Domain Research",
658
+ phase: "analysis",
659
+ command: "bmad-domain-research",
660
+ triggerCode: "domain",
661
+ agent: { name: "Mary", role: "Business Analyst", skillId: "bmad-domain-research" },
662
+ outputs: [{ glob: "**/domain-research*.md", description: "Domain research findings" }],
663
+ inputs: [],
664
+ learn: {
665
+ title: "Domain Research",
666
+ whatItDoes: "Conduct domain and industry research to understand the problem space and key concepts.",
667
+ whenToUse: "When entering an unfamiliar domain or needing to validate assumptions about an industry.",
668
+ whatItNeeds: "A domain or industry topic to research. No prior documents required.",
669
+ whatItProduces: "A domain research report with key findings, terminology, and industry context.",
670
+ proTip: "Combine with market research for a complete picture of the competitive landscape."
671
+ }
672
+ },
673
+ {
674
+ id: "market-research",
675
+ label: "Market Research",
676
+ phase: "analysis",
677
+ command: "bmad-market-research",
678
+ triggerCode: "market",
679
+ agent: { name: "Mary", role: "Business Analyst", skillId: "bmad-market-research" },
680
+ outputs: [{ glob: "**/market-research*.md", description: "Market research findings" }],
681
+ inputs: [],
682
+ learn: {
683
+ title: "Market Research",
684
+ whatItDoes: "Conduct market research on competition, customers, and market positioning.",
685
+ whenToUse: "When you need to understand competitors, target audience, or market opportunities.",
686
+ whatItNeeds: "A market or product category to research. No prior documents required.",
687
+ whatItProduces: "A market research report covering competitors, customer segments, and opportunities.",
688
+ proTip: "Feed market research findings into your product brief for stronger positioning."
689
+ }
690
+ },
691
+ {
692
+ id: "technical-research",
693
+ label: "Technical Research",
694
+ phase: "analysis",
695
+ command: "bmad-technical-research",
696
+ triggerCode: "techresearch",
697
+ agent: { name: "Winston", role: "Architect", skillId: "bmad-technical-research" },
698
+ outputs: [{ glob: "**/technical-research*.md", description: "Technical research report" }],
699
+ inputs: [],
700
+ learn: {
701
+ title: "Technical Research",
702
+ whatItDoes: "Conduct technical research on technologies, frameworks, and architecture options.",
703
+ whenToUse: "When evaluating technology choices or exploring technical approaches for your solution.",
704
+ whatItNeeds: "A technical topic or set of technologies to evaluate. No prior documents required.",
705
+ whatItProduces: "A technical research report with comparisons, recommendations, and trade-offs.",
706
+ proTip: "Run this before architecture design to make informed technology decisions."
707
+ }
708
+ },
709
+ {
710
+ id: "product-brief",
711
+ label: "Product Brief",
712
+ phase: "analysis",
713
+ command: "bmad-product-brief",
714
+ triggerCode: "brief",
715
+ agent: { name: "Mary", role: "Business Analyst", skillId: "bmad-product-brief" },
716
+ outputs: [{ glob: "**/product-brief.md", description: "Product brief document" }],
717
+ inputs: [],
718
+ learn: {
719
+ title: "Product Brief",
720
+ whatItDoes: "Create or update product briefs through guided or autonomous discovery.",
721
+ whenToUse: "When starting a new product or feature and need to capture the core vision and goals.",
722
+ whatItNeeds: "Your product idea or concept. Optionally, prior research documents for richer context.",
723
+ whatItProduces: "A structured product brief defining the problem, solution, target users, and success metrics.",
724
+ proTip: "The product brief is the foundation for everything \u2014 invest time getting it right."
725
+ }
726
+ },
727
+ {
728
+ id: "prfaq",
729
+ label: "PRFAQ",
730
+ phase: "analysis",
731
+ command: "bmad-prfaq",
732
+ triggerCode: "prfaq",
733
+ agent: { name: "Mary", role: "Business Analyst", skillId: "bmad-prfaq" },
734
+ outputs: [{ glob: "**/prfaq-*.md", description: "PRFAQ document" }],
735
+ inputs: [],
736
+ learn: {
737
+ title: "PRFAQ",
738
+ whatItDoes: "Create a press release / FAQ document to crystallize the product vision.",
739
+ whenToUse: "When you want to work backwards from the customer experience to define what to build.",
740
+ whatItNeeds: "A product concept or product brief. Works best after initial research is complete.",
741
+ whatItProduces: "A PRFAQ document with a mock press release and frequently asked questions.",
742
+ proTip: "The PRFAQ format forces clarity \u2014 if you cannot write a compelling press release, refine your vision."
743
+ }
744
+ },
745
+ // Phase 2 — Planning (2 stations)
746
+ {
747
+ id: "create-prd",
748
+ label: "Create PRD",
749
+ phase: "planning",
750
+ command: "bmad-create-prd",
751
+ triggerCode: "prd",
752
+ agent: { name: "John", role: "Product Manager", skillId: "bmad-create-prd" },
753
+ outputs: [{ glob: "**/{PRD,prd}.md", description: "Product Requirements Document" }],
754
+ inputs: [{ workflowId: "product-brief", outputPattern: "**/product-brief.md", required: true }],
755
+ validation: "prd-validator",
756
+ learn: {
757
+ title: "Product Requirements",
758
+ whatItDoes: "Create a comprehensive PRD from the product brief with detailed requirements.",
759
+ whenToUse: "After the product brief is finalized and you are ready to define detailed requirements.",
760
+ whatItNeeds: "A completed product brief document.",
761
+ whatItProduces: "A Product Requirements Document with functional requirements, user stories, and constraints.",
762
+ proTip: "Run PRD validation afterward to catch gaps before moving to design and architecture."
763
+ }
764
+ },
765
+ {
766
+ id: "ux-design",
767
+ label: "UX Design",
768
+ phase: "planning",
769
+ command: "bmad-create-ux-design",
770
+ triggerCode: "ux",
771
+ agent: { name: "Sally", role: "UX Designer", skillId: "bmad-create-ux-design" },
772
+ outputs: [{ glob: "**/{ux-spec,ux-design-specification}.md", description: "UX design specification" }],
773
+ inputs: [{ workflowId: "create-prd", outputPattern: "**/{PRD,prd}.md", required: true }],
774
+ learn: {
775
+ title: "UX Design",
776
+ whatItDoes: "Plan UX patterns, wireframes, and design specifications from the PRD.",
777
+ whenToUse: "After the PRD is complete and you need to define the user experience and interface.",
778
+ whatItNeeds: "A completed PRD document.",
779
+ whatItProduces: "A UX design specification with component patterns, layouts, and interaction flows.",
780
+ proTip: "UX specs inform both architecture decisions and developer implementation \u2014 do not skip this step."
781
+ }
782
+ },
783
+ // Phase 3 — Solutioning (3 stations)
784
+ {
785
+ id: "architecture",
786
+ label: "Architecture",
787
+ phase: "solutioning",
788
+ command: "bmad-create-architecture",
789
+ triggerCode: "arch",
790
+ agent: { name: "Winston", role: "Architect", skillId: "bmad-create-architecture" },
791
+ outputs: [{ glob: "**/architecture.md", description: "Architecture design document" }],
792
+ inputs: [
793
+ { workflowId: "create-prd", outputPattern: "**/{PRD,prd}.md", required: true },
794
+ { workflowId: "ux-design", outputPattern: "**/{ux-spec,ux-design-specification}.md", required: false }
795
+ ],
796
+ learn: {
797
+ title: "Architecture",
798
+ whatItDoes: "Create architecture and solution design decisions for consistent AI agent implementation.",
799
+ whenToUse: "After PRD and optionally UX design are complete, before breaking work into epics.",
800
+ whatItNeeds: "A completed PRD. UX design specification is optional but recommended.",
801
+ whatItProduces: "An architecture document with ADRs, system design, and technical specifications.",
802
+ proTip: "Architecture decisions recorded here guide every developer agent in the implementation phase."
803
+ }
804
+ },
805
+ {
806
+ id: "epics-and-stories",
807
+ label: "Epics & Stories",
808
+ phase: "solutioning",
809
+ command: "bmad-create-epics-and-stories",
810
+ triggerCode: "epics",
811
+ agent: { name: "John", role: "Product Manager", skillId: "bmad-create-epics-and-stories" },
812
+ outputs: [{ glob: "**/epics.md", description: "Epic and story breakdown" }],
813
+ inputs: [
814
+ { workflowId: "create-prd", outputPattern: "**/{PRD,prd}.md", required: true },
815
+ { workflowId: "architecture", outputPattern: "**/architecture.md", required: true }
816
+ ],
817
+ learn: {
818
+ title: "Epics & Stories",
819
+ whatItDoes: "Break requirements into epics and user stories with acceptance criteria.",
820
+ whenToUse: "After architecture is defined and you are ready to plan the implementation work.",
821
+ whatItNeeds: "Completed PRD and architecture documents.",
822
+ whatItProduces: "An epics document with stories, acceptance criteria, and dependency ordering.",
823
+ proTip: "Well-written stories with clear ACs are the single biggest factor in implementation quality."
824
+ }
825
+ },
826
+ {
827
+ id: "implementation-readiness",
828
+ label: "Implementation Readiness",
829
+ phase: "solutioning",
830
+ command: "bmad-check-implementation-readiness",
831
+ triggerCode: "ready",
832
+ agent: { name: "Bob", role: "Scrum Master", skillId: "bmad-check-implementation-readiness" },
833
+ outputs: [{ glob: "**/readiness-check*.md", description: "Readiness check results" }],
834
+ inputs: [
835
+ { workflowId: "create-prd", outputPattern: "**/{PRD,prd}.md", required: true },
836
+ { workflowId: "architecture", outputPattern: "**/architecture.md", required: true },
837
+ { workflowId: "epics-and-stories", outputPattern: "**/epics.md", required: true }
838
+ ],
839
+ validation: "readiness-validator",
840
+ learn: {
841
+ title: "Implementation Readiness",
842
+ whatItDoes: "Validate that PRD, UX, Architecture, and Epics are complete and consistent.",
843
+ whenToUse: "Before starting implementation \u2014 this is your final quality gate.",
844
+ whatItNeeds: "Completed PRD, architecture, and epics documents.",
845
+ whatItProduces: "A readiness check report identifying gaps, inconsistencies, and blockers.",
846
+ proTip: "Catching issues here saves 10x the effort compared to discovering them during development."
847
+ }
848
+ },
849
+ // Phase 4 — Implementation (7 stations)
850
+ {
851
+ id: "sprint-planning",
852
+ label: "Sprint Planning",
853
+ phase: "implementation",
854
+ command: "bmad-sprint-planning",
855
+ triggerCode: "sprint",
856
+ agent: { name: "Bob", role: "Scrum Master", skillId: "bmad-sprint-planning" },
857
+ outputs: [{ glob: "**/sprint-status.yaml", description: "Sprint status tracking" }],
858
+ inputs: [{ workflowId: "epics-and-stories", outputPattern: "**/epics.md", required: true }],
859
+ learn: {
860
+ title: "Sprint Planning",
861
+ whatItDoes: "Generate sprint status tracking from epics to organize implementation work.",
862
+ whenToUse: "After implementation readiness is confirmed and you are ready to start building.",
863
+ whatItNeeds: "A completed epics document with story definitions.",
864
+ whatItProduces: "A sprint-status.yaml file tracking story progress through the sprint.",
865
+ proTip: "Sprint planning creates the backbone for all dev-story and status tracking workflows."
866
+ }
867
+ },
868
+ {
869
+ id: "create-story",
870
+ label: "Create Story",
871
+ phase: "implementation",
872
+ command: "bmad-create-story",
873
+ triggerCode: "story",
874
+ agent: { name: "Bob", role: "Scrum Master", skillId: "bmad-create-story" },
875
+ outputs: [{ glob: "**/story-*.md", description: "Story specification file" }],
876
+ inputs: [
877
+ { workflowId: "sprint-planning", outputPattern: "**/sprint-status.yaml", required: true },
878
+ { workflowId: "epics-and-stories", outputPattern: "**/epics.md", required: true }
879
+ ],
880
+ learn: {
881
+ title: "Create Story",
882
+ whatItDoes: "Create a dedicated story file with all the context an agent needs for implementation.",
883
+ whenToUse: "When the next story in the sprint is ready to be developed.",
884
+ whatItNeeds: "Sprint status file and epics document with the story to be created.",
885
+ whatItProduces: "A detailed story spec file with tasks, subtasks, acceptance criteria, and dev notes.",
886
+ proTip: "Stories created here are self-contained \u2014 the dev agent should not need to look elsewhere."
887
+ }
888
+ },
889
+ {
890
+ id: "dev-story",
891
+ label: "Dev Story",
892
+ phase: "implementation",
893
+ command: "bmad-dev-story",
894
+ triggerCode: "dev",
895
+ agent: { name: "Amelia", role: "Developer", skillId: "bmad-dev-story" },
896
+ outputs: [{ glob: "**/*.{ts,tsx,js,jsx}", description: "Working code and tests" }],
897
+ inputs: [{ workflowId: "create-story", outputPattern: "**/story-*.md", required: true }],
898
+ learn: {
899
+ title: "Dev Story",
900
+ whatItDoes: "Execute story implementation following a context-filled story spec file.",
901
+ whenToUse: "When a story file is created and marked ready-for-dev in the sprint status.",
902
+ whatItNeeds: "A completed story specification file with tasks and acceptance criteria.",
903
+ whatItProduces: "Working code, tests, and an updated story file with completion notes.",
904
+ proTip: "Use a different LLM for code review than the one that implemented the story."
905
+ }
906
+ },
907
+ {
908
+ id: "code-review",
909
+ label: "Code Review",
910
+ phase: "implementation",
911
+ command: "bmad-code-review",
912
+ triggerCode: "review",
913
+ agent: { name: "Amelia", role: "Developer", skillId: "bmad-code-review" },
914
+ outputs: [{ glob: "**/review-*.md", description: "Code review results" }],
915
+ inputs: [{ workflowId: "dev-story", outputPattern: "**/*.{ts,tsx}", required: true }],
916
+ learn: {
917
+ title: "Code Review",
918
+ whatItDoes: "Review code changes adversarially using parallel review layers for thorough analysis.",
919
+ whenToUse: "After dev-story completes and the story is in review status.",
920
+ whatItNeeds: "Completed story implementation with code changes and tests.",
921
+ whatItProduces: "A code review report with findings categorized by severity and actionable items.",
922
+ proTip: "Run code review with a different LLM than the one that wrote the code for unbiased findings."
923
+ }
924
+ },
925
+ {
926
+ id: "correct-course",
927
+ label: "Correct Course",
928
+ phase: "implementation",
929
+ command: "bmad-correct-course",
930
+ triggerCode: "course",
931
+ agent: { name: "Bob", role: "Scrum Master", skillId: "bmad-correct-course" },
932
+ outputs: [{ glob: "**/course-correction*.md", description: "Course correction plan" }],
933
+ inputs: [
934
+ { workflowId: "sprint-planning", outputPattern: "**/sprint-status.yaml", required: true }
935
+ ],
936
+ learn: {
937
+ title: "Correct Course",
938
+ whatItDoes: "Manage significant changes during sprint execution when priorities shift.",
939
+ whenToUse: "When requirements change, blockers arise, or sprint priorities need adjustment.",
940
+ whatItNeeds: "Current sprint status file and description of the change needed.",
941
+ whatItProduces: "A course correction plan with updated priorities and adjusted sprint scope.",
942
+ proTip: "Course corrections are normal \u2014 it is better to adjust early than to deliver the wrong thing."
943
+ }
944
+ },
945
+ {
946
+ id: "sprint-status",
947
+ label: "Sprint Status",
948
+ phase: "implementation",
949
+ command: "bmad-sprint-status",
950
+ triggerCode: "status",
951
+ agent: { name: "Bob", role: "Scrum Master", skillId: "bmad-sprint-status" },
952
+ outputs: [{ glob: "**/sprint-status.yaml", description: "Updated sprint status" }],
953
+ inputs: [
954
+ { workflowId: "sprint-planning", outputPattern: "**/sprint-status.yaml", required: true }
955
+ ],
956
+ learn: {
957
+ title: "Sprint Status",
958
+ whatItDoes: "Summarize sprint status and surface risks across all in-flight stories.",
959
+ whenToUse: "When you want a quick overview of sprint progress or need to identify blockers.",
960
+ whatItNeeds: "Current sprint status file.",
961
+ whatItProduces: "An updated sprint status summary with risk indicators and progress metrics.",
962
+ proTip: "Check sprint status regularly to catch drift before it becomes a problem."
963
+ }
964
+ },
965
+ {
966
+ id: "retrospective",
967
+ label: "Retrospective",
968
+ phase: "implementation",
969
+ command: "bmad-retrospective",
970
+ triggerCode: "retro",
971
+ agent: { name: "Bob", role: "Scrum Master", skillId: "bmad-retrospective" },
972
+ outputs: [{ glob: "**/retrospective*.md", description: "Retrospective findings" }],
973
+ inputs: [
974
+ { workflowId: "sprint-planning", outputPattern: "**/sprint-status.yaml", required: true }
975
+ ],
976
+ learn: {
977
+ title: "Retrospective",
978
+ whatItDoes: "Post-epic review to extract lessons learned and assess what went well or poorly.",
979
+ whenToUse: "After completing an epic to capture insights before starting the next one.",
980
+ whatItNeeds: "Sprint status file and completed epic stories for review.",
981
+ whatItProduces: "A retrospective report with lessons learned, improvements, and team insights.",
982
+ proTip: "Retrospectives improve every subsequent epic \u2014 do not skip them even when time is tight."
983
+ }
984
+ },
985
+ // Express Line (1 station)
986
+ {
987
+ id: "quick-dev",
988
+ label: "Quick Dev",
989
+ phase: "implementation",
990
+ command: "bmad-quick-dev",
991
+ triggerCode: "quick",
992
+ agent: { name: "Barry", role: "Quick Flow Solo Dev", skillId: "bmad-quick-dev" },
993
+ outputs: [
994
+ { glob: "**/spec-*.md", description: "Quick spec document" },
995
+ { glob: "**/*.{ts,tsx,js,jsx}", description: "Implementation code" }
996
+ ],
997
+ inputs: [],
998
+ isExpress: true,
999
+ learn: {
1000
+ title: "Quick Dev",
1001
+ whatItDoes: "Rapid spec and implementation for smaller tasks following existing project patterns.",
1002
+ whenToUse: "For quick fixes, small features, or changes that do not warrant full sprint planning.",
1003
+ whatItNeeds: "A description of what to build or fix. No prior documents required.",
1004
+ whatItProduces: "A quick spec document and working implementation code with tests.",
1005
+ proTip: "Quick Dev is for tasks that fit in a single session \u2014 use the full pipeline for larger work."
1006
+ }
1007
+ }
1008
+ ];
1009
+
1010
+ // ../shared/dist/definitions/dependencies.js
1011
+ var inputDependencyGraph = {
1012
+ // Analysis — no upstream dependencies
1013
+ brainstorming: [],
1014
+ "domain-research": [],
1015
+ "market-research": [],
1016
+ "technical-research": [],
1017
+ "product-brief": [],
1018
+ prfaq: [],
1019
+ // Planning — depends on analysis outputs
1020
+ "create-prd": [
1021
+ { workflowId: "product-brief", outputPattern: "**/product-brief.md", required: true }
1022
+ ],
1023
+ "ux-design": [{ workflowId: "create-prd", outputPattern: "**/{PRD,prd}.md", required: true }],
1024
+ // Solutioning — depends on planning outputs
1025
+ architecture: [
1026
+ { workflowId: "create-prd", outputPattern: "**/{PRD,prd}.md", required: true },
1027
+ { workflowId: "ux-design", outputPattern: "**/{ux-spec,ux-design-specification}.md", required: false }
1028
+ ],
1029
+ "epics-and-stories": [
1030
+ { workflowId: "create-prd", outputPattern: "**/{PRD,prd}.md", required: true },
1031
+ { workflowId: "architecture", outputPattern: "**/architecture.md", required: true }
1032
+ ],
1033
+ "implementation-readiness": [
1034
+ { workflowId: "create-prd", outputPattern: "**/{PRD,prd}.md", required: true },
1035
+ { workflowId: "architecture", outputPattern: "**/architecture.md", required: true },
1036
+ { workflowId: "epics-and-stories", outputPattern: "**/epics.md", required: true }
1037
+ ],
1038
+ // Implementation — depends on solutioning outputs
1039
+ "sprint-planning": [
1040
+ { workflowId: "epics-and-stories", outputPattern: "**/epics.md", required: true }
1041
+ ],
1042
+ "create-story": [
1043
+ { workflowId: "sprint-planning", outputPattern: "**/sprint-status.yaml", required: true },
1044
+ { workflowId: "epics-and-stories", outputPattern: "**/epics.md", required: true }
1045
+ ],
1046
+ "dev-story": [{ workflowId: "create-story", outputPattern: "**/story-*.md", required: true }],
1047
+ "code-review": [{ workflowId: "dev-story", outputPattern: "**/*.{ts,tsx}", required: true }],
1048
+ "correct-course": [
1049
+ { workflowId: "sprint-planning", outputPattern: "**/sprint-status.yaml", required: true }
1050
+ ],
1051
+ "sprint-status": [
1052
+ { workflowId: "sprint-planning", outputPattern: "**/sprint-status.yaml", required: true }
1053
+ ],
1054
+ retrospective: [
1055
+ { workflowId: "sprint-planning", outputPattern: "**/sprint-status.yaml", required: true }
1056
+ ],
1057
+ // Express — no dependencies
1058
+ "quick-dev": []
1059
+ };
1060
+
1061
+ // ../shared/dist/definitions/validators.js
1062
+ function createValidator(workflowId, rules) {
1063
+ return {
1064
+ workflowId,
1065
+ rules: rules.map(({ id, description, severity }) => ({ id, description, severity })),
1066
+ validate(content) {
1067
+ const checks = rules.map((rule) => {
1068
+ const passed = rule.test(content);
1069
+ return {
1070
+ ruleId: rule.id,
1071
+ passed,
1072
+ message: passed ? rule.description : `Missing: ${rule.description}`,
1073
+ severity: rule.severity
1074
+ };
1075
+ });
1076
+ return {
1077
+ documentPath: "",
1078
+ workflowId,
1079
+ checks,
1080
+ passed: checks.every((c) => c.passed),
1081
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
1082
+ };
1083
+ }
1084
+ };
1085
+ }
1086
+ var heading = (text) => new RegExp(`^##\\s+(?:\\d+\\.?\\s+)?${text}`, "mi");
1087
+ var prdRules = [
1088
+ {
1089
+ id: "prd-exec-summary",
1090
+ description: "Executive summary present",
1091
+ severity: "error",
1092
+ test: (content) => heading("Executive Summary").test(content)
1093
+ },
1094
+ {
1095
+ id: "prd-functional-reqs",
1096
+ description: "Functional requirements section",
1097
+ severity: "error",
1098
+ test: (content) => heading("Functional Requirements").test(content)
1099
+ },
1100
+ {
1101
+ id: "prd-nonfunctional-reqs",
1102
+ description: "Non-functional requirements section",
1103
+ severity: "error",
1104
+ test: (content) => heading("Non-?[Ff]unctional Requirements").test(content)
1105
+ },
1106
+ {
1107
+ id: "prd-user-stories",
1108
+ description: "User stories or use cases",
1109
+ severity: "error",
1110
+ test: (content) => heading("(User Stories|Use Cases|Acceptance Criteria|Stakeholders)").test(content)
1111
+ },
1112
+ {
1113
+ id: "prd-success-metrics",
1114
+ description: "Success metrics",
1115
+ severity: "warning",
1116
+ test: (content) => heading("(Success Metrics|Acceptance Criteria|Constraints)").test(content)
1117
+ },
1118
+ {
1119
+ id: "prd-out-of-scope",
1120
+ description: "Out of scope section",
1121
+ severity: "warning",
1122
+ test: (content) => heading("(Out of Scope|Non-?Goals|Constraints|Open Questions)").test(content)
1123
+ }
1124
+ ];
1125
+ var prdValidator = createValidator("prd-validator", prdRules);
1126
+ var architectureRules = [
1127
+ {
1128
+ id: "arch-tech-stack",
1129
+ description: "Technology stack",
1130
+ severity: "error",
1131
+ test: (content) => heading("(Technology Stack|Tech Stack|Architecture.*Tech)").test(content) || /technology\s+stack/i.test(content)
1132
+ },
1133
+ {
1134
+ id: "arch-adrs",
1135
+ description: "ADRs present",
1136
+ severity: "error",
1137
+ test: (content) => /^###?\s+ADR-\d/mi.test(content) || heading("(Architecture Decision Records|Architectural Decisions|Core Architectural Decisions)").test(content)
1138
+ },
1139
+ {
1140
+ id: "arch-system-components",
1141
+ description: "System components",
1142
+ severity: "error",
1143
+ test: (content) => heading("(System Components?|Package Structure|Monorepo Structure)").test(content)
1144
+ },
1145
+ {
1146
+ id: "arch-data-model",
1147
+ description: "Data model",
1148
+ severity: "warning",
1149
+ test: (content) => heading("(Data Model|Data Flow|State Management)").test(content)
1150
+ },
1151
+ {
1152
+ id: "arch-api-contracts",
1153
+ description: "API contracts",
1154
+ severity: "warning",
1155
+ test: (content) => heading("(API Contracts?|API Design|Communication)").test(content) || /^###?\s+ADR-\d+.*API/mi.test(content)
1156
+ },
1157
+ {
1158
+ id: "arch-deployment",
1159
+ description: "Deployment architecture",
1160
+ severity: "warning",
1161
+ test: (content) => heading("(Deployment|Distribution|Packaging|Infrastructure)").test(content) || /^###?\s+ADR-\d+.*Packag/mi.test(content)
1162
+ }
1163
+ ];
1164
+ var architectureValidator = createValidator("architecture-validator", architectureRules);
1165
+ var storyRules = [
1166
+ {
1167
+ id: "story-acceptance-criteria",
1168
+ description: "Acceptance criteria",
1169
+ severity: "error",
1170
+ test: (content) => heading("Acceptance Criteria").test(content)
1171
+ },
1172
+ {
1173
+ id: "story-dependencies",
1174
+ description: "Dependencies",
1175
+ severity: "warning",
1176
+ test: (content) => heading("Dependencies").test(content) || /dependencies:/mi.test(content)
1177
+ },
1178
+ {
1179
+ id: "story-points",
1180
+ description: "Story points",
1181
+ severity: "info",
1182
+ test: (content) => /story\s*points?\s*[:=]\s*\d/mi.test(content) || heading("Story Points").test(content)
1183
+ }
1184
+ ];
1185
+ var storyValidator = createValidator("story-validator", storyRules);
1186
+ var readinessRules = [
1187
+ {
1188
+ id: "readiness-prd-check",
1189
+ description: "PRD review completed",
1190
+ severity: "error",
1191
+ test: (content) => /PRD/i.test(content) && /\b(pass|complete|review)/i.test(content)
1192
+ },
1193
+ {
1194
+ id: "readiness-arch-check",
1195
+ description: "Architecture review completed",
1196
+ severity: "error",
1197
+ test: (content) => /architecture/i.test(content) && /\b(pass|complete|review)/i.test(content)
1198
+ },
1199
+ {
1200
+ id: "readiness-epics-check",
1201
+ description: "Epics review completed",
1202
+ severity: "error",
1203
+ test: (content) => /epics?/i.test(content) && /\b(pass|complete|review)/i.test(content)
1204
+ }
1205
+ ];
1206
+ var readinessValidator = createValidator("readiness-validator", readinessRules);
1207
+ var documentValidators = [
1208
+ prdValidator,
1209
+ architectureValidator,
1210
+ storyValidator,
1211
+ readinessValidator
1212
+ ];
1213
+ function getValidator(validatorId) {
1214
+ return documentValidators.find((v) => v.workflowId === validatorId);
1215
+ }
1216
+
1217
+ // ../server/dist/scanner.js
1218
+ var debug3 = debugFactory3("bmad-studio:scanner");
1219
+ var ProjectScanner = class {
1220
+ mtimeCache = /* @__PURE__ */ new Map();
1221
+ /**
1222
+ * Full project scan — iterates all workflow definitions and computes status.
1223
+ */
1224
+ async scan(outputDir, rootDir) {
1225
+ debug3("Starting full scan: outputDir=%s rootDir=%s", outputDir, rootDir);
1226
+ const allFiles = await this.walkDir(outputDir, outputDir);
1227
+ debug3("Found %d files in output directory", allFiles.length);
1228
+ const workflows = {};
1229
+ for (const wf of workflowDefinitions) {
1230
+ workflows[wf.id] = await this.computeWorkflowState(wf, allFiles, outputDir);
1231
+ }
1232
+ for (const wf of workflowDefinitions) {
1233
+ workflows[wf.id].dependenciesSatisfied = this.checkDependencies(wf.id, workflows);
1234
+ }
1235
+ const expressLine = workflowDefinitions.filter((wf) => wf.isExpress).map((wf) => wf.id);
1236
+ const gitAvailable = await this.checkGitAvailable(rootDir);
1237
+ const recommendation = this.getNextRecommendation(workflows);
1238
+ const projectName = rootDir.split("/").pop() ?? "unknown";
1239
+ const state = {
1240
+ projectName,
1241
+ projectRoot: rootDir,
1242
+ outputDir,
1243
+ workflows,
1244
+ phases: [...phaseDefinitions],
1245
+ expressLine,
1246
+ gitAvailable,
1247
+ recommendation,
1248
+ lastScanned: (/* @__PURE__ */ new Date()).toISOString()
1249
+ };
1250
+ debug3("Scan complete: %d workflows evaluated", Object.keys(workflows).length);
1251
+ return state;
1252
+ }
1253
+ /**
1254
+ * Incremental single-workflow rescan.
1255
+ */
1256
+ async scanWorkflow(workflowId, outputDir, currentState) {
1257
+ const wfDef = workflowDefinitions.find((w) => w.id === workflowId);
1258
+ if (!wfDef) {
1259
+ debug3("Unknown workflow: %s", workflowId);
1260
+ return currentState;
1261
+ }
1262
+ const allFiles = await this.walkDir(outputDir, outputDir);
1263
+ const newState = await this.computeWorkflowState(wfDef, allFiles, outputDir);
1264
+ newState.dependenciesSatisfied = this.checkDependencies(workflowId, {
1265
+ ...currentState.workflows,
1266
+ [workflowId]: newState
1267
+ });
1268
+ const workflows = { ...currentState.workflows, [workflowId]: newState };
1269
+ for (const wf of workflowDefinitions) {
1270
+ const deps = inputDependencyGraph[wf.id] ?? [];
1271
+ if (deps.some((d) => d.workflowId === workflowId)) {
1272
+ workflows[wf.id] = {
1273
+ ...workflows[wf.id],
1274
+ dependenciesSatisfied: this.checkDependencies(wf.id, workflows)
1275
+ };
1276
+ }
1277
+ }
1278
+ return {
1279
+ ...currentState,
1280
+ workflows,
1281
+ recommendation: this.getNextRecommendation(workflows),
1282
+ lastScanned: (/* @__PURE__ */ new Date()).toISOString()
1283
+ };
1284
+ }
1285
+ /**
1286
+ * Compute the status of a single workflow based on its output glob patterns.
1287
+ */
1288
+ async computeWorkflowState(wfDef, allFiles, outputDir) {
1289
+ const outputs = [];
1290
+ let matchedCount = 0;
1291
+ const totalPatterns = wfDef.outputs.length;
1292
+ for (const outputPattern of wfDef.outputs) {
1293
+ const matcher = picomatch(outputPattern.glob);
1294
+ let patternMatched = false;
1295
+ for (const filePath of allFiles) {
1296
+ const relPath = filePath;
1297
+ if (matcher(relPath)) {
1298
+ patternMatched = true;
1299
+ const absPath = join6(outputDir, filePath);
1300
+ const fileStat = await this.statFile(absPath);
1301
+ outputs.push({
1302
+ path: filePath,
1303
+ exists: true,
1304
+ lastModified: fileStat?.mtime,
1305
+ sizeBytes: fileStat?.size
1306
+ });
1307
+ }
1308
+ }
1309
+ if (patternMatched) {
1310
+ matchedCount++;
1311
+ }
1312
+ }
1313
+ let status;
1314
+ let progress = 0;
1315
+ if (matchedCount === 0) {
1316
+ status = "not-started";
1317
+ progress = 0;
1318
+ } else if (wfDef.validation) {
1319
+ if (matchedCount === totalPatterns) {
1320
+ status = "completed";
1321
+ progress = 100;
1322
+ } else {
1323
+ status = "in-progress";
1324
+ progress = Math.round(matchedCount / totalPatterns * 100);
1325
+ }
1326
+ } else {
1327
+ if (matchedCount === totalPatterns) {
1328
+ status = "completed";
1329
+ progress = 100;
1330
+ } else {
1331
+ status = "in-progress";
1332
+ progress = Math.round(matchedCount / totalPatterns * 100);
1333
+ }
1334
+ }
1335
+ let validation;
1336
+ if (wfDef.validation && outputs.length > 0) {
1337
+ const validator = getValidator(wfDef.validation);
1338
+ if (validator) {
1339
+ const firstOutput = outputs.find((o) => o.exists);
1340
+ if (firstOutput) {
1341
+ const absPath = join6(outputDir, firstOutput.path);
1342
+ const content = await this.readFileCached(absPath);
1343
+ if (content) {
1344
+ const result = validator.validate(content);
1345
+ const totalChecks = result.checks.length;
1346
+ const passingChecks = result.checks.filter((c) => c.passed).length;
1347
+ const score = totalChecks > 0 ? Math.round(passingChecks / totalChecks * 100) : 0;
1348
+ validation = { checks: result.checks, score };
1349
+ }
1350
+ }
1351
+ }
1352
+ }
1353
+ return {
1354
+ workflowId: wfDef.id,
1355
+ status,
1356
+ outputs,
1357
+ dependenciesSatisfied: true,
1358
+ // computed later
1359
+ progress,
1360
+ validation
1361
+ };
1362
+ }
1363
+ /**
1364
+ * Check if all required upstream dependencies are satisfied for a workflow.
1365
+ */
1366
+ checkDependencies(workflowId, workflows) {
1367
+ const deps = inputDependencyGraph[workflowId] ?? [];
1368
+ for (const dep of deps) {
1369
+ if (!dep.required)
1370
+ continue;
1371
+ const upstream = workflows[dep.workflowId];
1372
+ if (!upstream || upstream.status === "not-started") {
1373
+ return false;
1374
+ }
1375
+ }
1376
+ return true;
1377
+ }
1378
+ /**
1379
+ * Deterministic next-step recommendation algorithm.
1380
+ * Phase order: planning → solutioning → implementation (skip analysis).
1381
+ */
1382
+ getNextRecommendation(workflows) {
1383
+ const phaseOrder = [
1384
+ "planning",
1385
+ "solutioning",
1386
+ "implementation"
1387
+ ];
1388
+ for (const phaseId of phaseOrder) {
1389
+ for (const wfDef of workflowDefinitions) {
1390
+ if (wfDef.phase !== phaseId || wfDef.isExpress)
1391
+ continue;
1392
+ const state = workflows[wfDef.id];
1393
+ if (!state)
1394
+ continue;
1395
+ if (state.status === "in-progress" && state.dependenciesSatisfied) {
1396
+ return wfDef.id;
1397
+ }
1398
+ }
1399
+ }
1400
+ for (const phaseId of phaseOrder) {
1401
+ for (const wfDef of workflowDefinitions) {
1402
+ if (wfDef.phase !== phaseId || wfDef.isExpress)
1403
+ continue;
1404
+ const state = workflows[wfDef.id];
1405
+ if (!state)
1406
+ continue;
1407
+ if (state.status === "not-started" && state.dependenciesSatisfied) {
1408
+ return wfDef.id;
1409
+ }
1410
+ }
1411
+ }
1412
+ for (const wfDef of workflowDefinitions) {
1413
+ if (!wfDef.isExpress)
1414
+ continue;
1415
+ const state = workflows[wfDef.id];
1416
+ if (!state)
1417
+ continue;
1418
+ if ((state.status === "in-progress" || state.status === "not-started") && state.dependenciesSatisfied) {
1419
+ return wfDef.id;
1420
+ }
1421
+ }
1422
+ return null;
1423
+ }
1424
+ /**
1425
+ * Recursively walk a directory and return all file paths relative to baseDir.
1426
+ */
1427
+ async walkDir(dir, baseDir) {
1428
+ const files = [];
1429
+ try {
1430
+ const entries = await readdir2(dir, { withFileTypes: true });
1431
+ for (const entry of entries) {
1432
+ const fullPath = join6(dir, entry.name);
1433
+ if (entry.isDirectory()) {
1434
+ if (entry.name.startsWith("."))
1435
+ continue;
1436
+ const subFiles = await this.walkDir(fullPath, baseDir);
1437
+ files.push(...subFiles);
1438
+ } else if (entry.isFile()) {
1439
+ files.push(relative(baseDir, fullPath));
1440
+ }
1441
+ }
1442
+ } catch (err) {
1443
+ debug3("Error walking directory %s: %O", dir, err);
1444
+ }
1445
+ return files;
1446
+ }
1447
+ /**
1448
+ * Stat a file with mtime caching — skip re-read if mtime unchanged.
1449
+ */
1450
+ async statFile(absPath) {
1451
+ try {
1452
+ const s = await stat4(absPath);
1453
+ const mtime = s.mtime.toISOString();
1454
+ return { mtime, size: s.size };
1455
+ } catch {
1456
+ return null;
1457
+ }
1458
+ }
1459
+ /**
1460
+ * Read a file with mtime-based caching. Returns cached content if mtime unchanged.
1461
+ */
1462
+ async readFileCached(absPath) {
1463
+ try {
1464
+ const s = await stat4(absPath);
1465
+ const mtime = s.mtime.toISOString();
1466
+ const cached = this.mtimeCache.get(absPath);
1467
+ if (cached && cached.mtime === mtime) {
1468
+ debug3("Cache hit: %s", absPath);
1469
+ return cached.content;
1470
+ }
1471
+ debug3("Cache miss: %s", absPath);
1472
+ const content = await readFile5(absPath, "utf-8");
1473
+ this.mtimeCache.set(absPath, { mtime, content });
1474
+ return content;
1475
+ } catch {
1476
+ return null;
1477
+ }
1478
+ }
1479
+ /**
1480
+ * Check if .git directory exists at the project root.
1481
+ */
1482
+ async checkGitAvailable(rootDir) {
1483
+ try {
1484
+ const s = await stat4(join6(rootDir, ".git"));
1485
+ return s.isDirectory();
1486
+ } catch {
1487
+ return false;
1488
+ }
1489
+ }
1490
+ };
1491
+
1492
+ // ../server/dist/websocket.js
1493
+ import { WebSocketServer, WebSocket } from "ws";
1494
+ import debugFactory4 from "debug";
1495
+ var debug4 = debugFactory4("bmad-studio:websocket");
1496
+ var WebSocketBroadcaster = class {
1497
+ scanner;
1498
+ outputDir;
1499
+ rootDir;
1500
+ getState;
1501
+ wss;
1502
+ constructor(server, scanner, outputDir, rootDir, getState) {
1503
+ this.scanner = scanner;
1504
+ this.outputDir = outputDir;
1505
+ this.rootDir = rootDir;
1506
+ this.getState = getState;
1507
+ this.wss = new WebSocketServer({ server, path: "/ws" });
1508
+ this.wss.on("connection", (ws) => {
1509
+ debug4("Client connected (total: %d)", this.wss.clients.size);
1510
+ void this.sendInitialState(ws);
1511
+ ws.on("error", (err) => {
1512
+ debug4("Client error: %O", err);
1513
+ });
1514
+ ws.on("close", () => {
1515
+ debug4("Client disconnected (remaining: %d)", this.wss.clients.size);
1516
+ });
1517
+ });
1518
+ this.wss.on("error", (err) => {
1519
+ debug4("WebSocketServer error: %O", err);
1520
+ });
1521
+ }
1522
+ async sendInitialState(ws) {
1523
+ try {
1524
+ let state = this.getState();
1525
+ if (!state) {
1526
+ state = await this.scanner.scan(this.outputDir, this.rootDir);
1527
+ }
1528
+ const event = { type: "project:state", payload: state };
1529
+ if (ws.readyState === WebSocket.OPEN) {
1530
+ ws.send(JSON.stringify(event));
1531
+ debug4("Sent project:state to new client");
1532
+ }
1533
+ } catch (err) {
1534
+ debug4("Error sending initial state: %O", err);
1535
+ }
1536
+ }
1537
+ broadcast(event) {
1538
+ const data = JSON.stringify(event);
1539
+ let sent = 0;
1540
+ for (const client of this.wss.clients) {
1541
+ if (client.readyState === WebSocket.OPEN) {
1542
+ client.send(data);
1543
+ sent++;
1544
+ }
1545
+ }
1546
+ debug4("Broadcast %s to %d clients", event.type, sent);
1547
+ }
1548
+ broadcastWorkflowUpdate(workflowState) {
1549
+ this.broadcast({ type: "workflow:updated", payload: workflowState });
1550
+ }
1551
+ broadcastMapRefresh(workflows) {
1552
+ this.broadcast({ type: "map:refresh", payload: workflows });
1553
+ }
1554
+ broadcastSprintUpdate(sprintStatus) {
1555
+ this.broadcast({ type: "sprint:updated", payload: sprintStatus });
1556
+ }
1557
+ close() {
1558
+ debug4("Closing WebSocket server");
1559
+ for (const client of this.wss.clients) {
1560
+ client.close();
1561
+ }
1562
+ this.wss.close();
1563
+ }
1564
+ };
1565
+
1566
+ // ../server/dist/watcher.js
1567
+ import { join as join7, relative as relative2 } from "path";
1568
+ import picomatch2 from "picomatch";
1569
+ import debugFactory5 from "debug";
1570
+ var debug5 = debugFactory5("bmad-studio:watcher");
1571
+ async function watchDir(path) {
1572
+ const { watch } = await import("chokidar");
1573
+ return watch(path, {
1574
+ ignoreInitial: true,
1575
+ ignored: /(^|[/\\])\../,
1576
+ // ignore dotfiles
1577
+ persistent: true
1578
+ });
1579
+ }
1580
+ var FileWatcher = class {
1581
+ outputDir;
1582
+ rootDir;
1583
+ scanner;
1584
+ broadcaster;
1585
+ watcher = null;
1586
+ workflowDebounceTimers = /* @__PURE__ */ new Map();
1587
+ mapDebounceTimer = null;
1588
+ heartbeatTimer = null;
1589
+ sprintDebounceTimer = null;
1590
+ workflowMatchers;
1591
+ currentState = null;
1592
+ constructor(outputDir, rootDir, scanner, broadcaster) {
1593
+ this.outputDir = outputDir;
1594
+ this.rootDir = rootDir;
1595
+ this.scanner = scanner;
1596
+ this.broadcaster = broadcaster;
1597
+ this.workflowMatchers = workflowDefinitions.map((wf) => ({
1598
+ id: wf.id,
1599
+ matchers: wf.outputs.map((o) => picomatch2(o.glob))
1600
+ }));
1601
+ }
1602
+ async start() {
1603
+ debug5("Starting file watcher on %s", this.outputDir);
1604
+ this.currentState = await this.scanner.scan(this.outputDir, this.rootDir);
1605
+ debug5("Initial scan complete: %d workflows", Object.keys(this.currentState.workflows).length);
1606
+ this.watcher = await watchDir(this.outputDir);
1607
+ this.watcher.on("add", (path) => this.handleFileEvent(path));
1608
+ this.watcher.on("change", (path) => this.handleFileEvent(path));
1609
+ this.watcher.on("unlink", (path) => this.handleFileEvent(path));
1610
+ this.watcher.on("error", (err) => debug5("Watcher error: %O", err));
1611
+ this.startHeartbeat();
1612
+ }
1613
+ async stop() {
1614
+ debug5("Stopping file watcher");
1615
+ for (const timer of this.workflowDebounceTimers.values()) {
1616
+ clearTimeout(timer);
1617
+ }
1618
+ this.workflowDebounceTimers.clear();
1619
+ if (this.mapDebounceTimer) {
1620
+ clearTimeout(this.mapDebounceTimer);
1621
+ this.mapDebounceTimer = null;
1622
+ }
1623
+ if (this.sprintDebounceTimer) {
1624
+ clearTimeout(this.sprintDebounceTimer);
1625
+ this.sprintDebounceTimer = null;
1626
+ }
1627
+ if (this.heartbeatTimer) {
1628
+ clearInterval(this.heartbeatTimer);
1629
+ this.heartbeatTimer = null;
1630
+ }
1631
+ if (this.watcher) {
1632
+ await this.watcher.close();
1633
+ this.watcher = null;
1634
+ }
1635
+ }
1636
+ getState() {
1637
+ return this.currentState;
1638
+ }
1639
+ handleFileEvent(absPath) {
1640
+ const relPath = relative2(this.outputDir, absPath);
1641
+ debug5("File event: %s", relPath);
1642
+ if (relPath === join7("implementation-artifacts", "sprint-status.yaml") || relPath === "sprint-status.yaml") {
1643
+ this.debounceSprintUpdate();
1644
+ }
1645
+ const affectedWorkflows = this.identifyWorkflows(relPath);
1646
+ if (affectedWorkflows.length === 0) {
1647
+ debug5("No workflow matched for %s", relPath);
1648
+ return;
1649
+ }
1650
+ debug5("Affected workflows: %o", affectedWorkflows);
1651
+ for (const workflowId of affectedWorkflows) {
1652
+ this.debounceWorkflow(workflowId);
1653
+ }
1654
+ }
1655
+ identifyWorkflows(relPath) {
1656
+ return this.workflowMatchers.filter((wm) => wm.matchers.some((m) => m(relPath))).map((wm) => wm.id);
1657
+ }
1658
+ debounceWorkflow(workflowId) {
1659
+ const existing = this.workflowDebounceTimers.get(workflowId);
1660
+ if (existing) {
1661
+ clearTimeout(existing);
1662
+ }
1663
+ const timer = setTimeout(() => {
1664
+ this.workflowDebounceTimers.delete(workflowId);
1665
+ void this.rescanWorkflow(workflowId);
1666
+ }, 500);
1667
+ this.workflowDebounceTimers.set(workflowId, timer);
1668
+ }
1669
+ async rescanWorkflow(workflowId) {
1670
+ if (!this.currentState)
1671
+ return;
1672
+ debug5("Rescanning workflow: %s", workflowId);
1673
+ const previousStatus = this.currentState.workflows[workflowId]?.status;
1674
+ this.currentState = await this.scanner.scanWorkflow(workflowId, this.outputDir, this.currentState);
1675
+ const newStatus = this.currentState.workflows[workflowId]?.status;
1676
+ if (newStatus !== previousStatus) {
1677
+ debug5("Workflow %s status changed: %s \u2192 %s", workflowId, previousStatus, newStatus);
1678
+ this.broadcaster.broadcastWorkflowUpdate(this.currentState.workflows[workflowId]);
1679
+ }
1680
+ this.resetHeartbeat();
1681
+ this.debounceMapRefresh();
1682
+ }
1683
+ debounceSprintUpdate() {
1684
+ if (this.sprintDebounceTimer) {
1685
+ clearTimeout(this.sprintDebounceTimer);
1686
+ }
1687
+ this.sprintDebounceTimer = setTimeout(() => {
1688
+ this.sprintDebounceTimer = null;
1689
+ void this.rescanSprint();
1690
+ }, 500);
1691
+ }
1692
+ async rescanSprint() {
1693
+ debug5("Rescanning sprint status");
1694
+ try {
1695
+ const sprintStatus = await parseSprintStatus(this.outputDir);
1696
+ if (sprintStatus) {
1697
+ this.broadcaster.broadcastSprintUpdate(sprintStatus);
1698
+ }
1699
+ } catch (err) {
1700
+ debug5("Sprint status rescan failed: %O", err);
1701
+ }
1702
+ }
1703
+ debounceMapRefresh() {
1704
+ if (this.mapDebounceTimer) {
1705
+ clearTimeout(this.mapDebounceTimer);
1706
+ }
1707
+ this.mapDebounceTimer = setTimeout(() => {
1708
+ this.mapDebounceTimer = null;
1709
+ if (this.currentState) {
1710
+ debug5("Map-wide debounce fired, broadcasting map:refresh");
1711
+ this.broadcaster.broadcastMapRefresh(this.currentState.workflows);
1712
+ }
1713
+ }, 2e3);
1714
+ }
1715
+ startHeartbeat() {
1716
+ this.heartbeatTimer = setInterval(() => {
1717
+ void this.heartbeatScan();
1718
+ }, 3e4);
1719
+ }
1720
+ resetHeartbeat() {
1721
+ if (this.heartbeatTimer) {
1722
+ clearInterval(this.heartbeatTimer);
1723
+ }
1724
+ this.startHeartbeat();
1725
+ }
1726
+ async heartbeatScan() {
1727
+ debug5("Heartbeat: running full scan");
1728
+ this.currentState = await this.scanner.scan(this.outputDir, this.rootDir);
1729
+ this.broadcaster.broadcastMapRefresh(this.currentState.workflows);
1730
+ }
1731
+ };
1732
+
1733
+ // ../server/dist/index.js
1734
+ var debug6 = debugFactory6("bmad-studio:server");
1735
+ function findProjectRoot(startDir) {
1736
+ let dir = startDir;
1737
+ let bmadOutputCandidate = null;
1738
+ while (dir !== dirname2(dir)) {
1739
+ if (existsSync(join8(dir, ".git"))) {
1740
+ return dir;
1741
+ }
1742
+ if (!bmadOutputCandidate && existsSync(join8(dir, "_bmad-output"))) {
1743
+ bmadOutputCandidate = dir;
1744
+ }
1745
+ dir = dirname2(dir);
1746
+ }
1747
+ return bmadOutputCandidate ?? startDir;
1748
+ }
1749
+ async function createServer(config) {
1750
+ const port = config.port ?? 5400;
1751
+ const rootDir = findProjectRoot(process.cwd());
1752
+ debug6("Project root: %s", rootDir);
1753
+ const outputDir = await resolveOutputDir(rootDir, config.outputDir);
1754
+ debug6("Detected outputDir: %s", outputDir ?? "(unconfigured)");
1755
+ const scanner = new ProjectScanner();
1756
+ let cachedState = null;
1757
+ const app = express();
1758
+ app.use(express.json());
1759
+ app.use("/api", createApiRouter({
1760
+ outputDir,
1761
+ rootDir,
1762
+ scanner,
1763
+ getCachedState: () => cachedState
1764
+ }));
1765
+ if (config.uiDistPath && existsSync(config.uiDistPath)) {
1766
+ debug6("Serving UI from %s", config.uiDistPath);
1767
+ app.use(express.static(config.uiDistPath));
1768
+ app.use((_req, res) => {
1769
+ res.sendFile(join8(config.uiDistPath, "index.html"));
1770
+ });
1771
+ }
1772
+ const server = http.createServer(app);
1773
+ let watcher;
1774
+ let broadcaster;
1775
+ return new Promise((resolve4, reject) => {
1776
+ server.on("error", reject);
1777
+ server.listen(port, "127.0.0.1", async () => {
1778
+ debug6("Server listening on http://127.0.0.1:%d", port);
1779
+ if (outputDir) {
1780
+ try {
1781
+ cachedState = await scanner.scan(outputDir, rootDir);
1782
+ debug6("Initial scan complete: %d workflows", Object.keys(cachedState.workflows).length);
1783
+ } catch (err) {
1784
+ debug6("Initial scan failed: %O", err);
1785
+ }
1786
+ broadcaster = new WebSocketBroadcaster(server, scanner, outputDir, rootDir, () => cachedState);
1787
+ watcher = new FileWatcher(outputDir, rootDir, scanner, broadcaster);
1788
+ watcher.start();
1789
+ debug6("FileWatcher and WebSocketBroadcaster started");
1790
+ }
1791
+ resolve4({
1792
+ server,
1793
+ watcher,
1794
+ broadcaster,
1795
+ close: async () => {
1796
+ watcher?.stop();
1797
+ broadcaster?.close();
1798
+ return new Promise((res, rej) => {
1799
+ server.close((err) => err ? rej(err) : res());
1800
+ });
1801
+ }
1802
+ });
1803
+ });
1804
+ });
1805
+ }
1806
+
1807
+ // src/cleanup.ts
1808
+ function registerCleanupHandlers(port) {
1809
+ let cleaned = false;
1810
+ function cleanup() {
1811
+ if (cleaned) return;
1812
+ cleaned = true;
1813
+ unregisterInstance(port);
1814
+ }
1815
+ process.on("SIGINT", () => {
1816
+ cleanup();
1817
+ process.exit(130);
1818
+ });
1819
+ process.on("SIGTERM", () => {
1820
+ cleanup();
1821
+ process.exit(143);
1822
+ });
1823
+ process.on("exit", cleanup);
1824
+ process.on("uncaughtException", (err) => {
1825
+ console.error("Uncaught exception:", err);
1826
+ cleanup();
1827
+ process.exit(1);
1828
+ });
1829
+ }
1830
+
1831
+ // src/launcher.ts
1832
+ async function pingInstance(port) {
1833
+ const controller = new AbortController();
1834
+ const timeout = setTimeout(() => controller.abort(), 1e3);
1835
+ try {
1836
+ const response = await fetch(`http://127.0.0.1:${port}/api/capabilities`, {
1837
+ signal: controller.signal
1838
+ });
1839
+ return response.ok;
1840
+ } catch {
1841
+ return false;
1842
+ } finally {
1843
+ clearTimeout(timeout);
1844
+ }
1845
+ }
1846
+ function killInstance(entry) {
1847
+ try {
1848
+ process.kill(entry.pid, "SIGTERM");
1849
+ } catch {
1850
+ }
1851
+ unregisterInstance(entry.port);
1852
+ }
1853
+ async function openBrowser(port) {
1854
+ const { default: open } = await import("open");
1855
+ await open(`http://localhost:${port}`);
1856
+ }
1857
+ async function launch(opts) {
1858
+ const cwd = resolve3(process.cwd());
1859
+ const theme = opts.dark ? "dark" : opts.light ? "light" : void 0;
1860
+ let port;
1861
+ if (opts.port) {
1862
+ port = Number(opts.port);
1863
+ } else {
1864
+ port = await findAvailablePort();
1865
+ }
1866
+ const existing = findInstanceByProject(cwd);
1867
+ if (existing) {
1868
+ if (opts.new) {
1869
+ killInstance(existing);
1870
+ } else {
1871
+ const alive = await pingInstance(existing.port);
1872
+ if (alive) {
1873
+ console.log(`Reusing existing BMAD Studio instance on port ${existing.port}`);
1874
+ if (opts.open) {
1875
+ await openBrowser(existing.port);
1876
+ } else {
1877
+ console.log(`BMAD Studio: http://localhost:${existing.port}`);
1878
+ }
1879
+ return;
1880
+ } else {
1881
+ unregisterInstance(existing.port);
1882
+ }
1883
+ }
1884
+ }
1885
+ const __dirname = dirname3(fileURLToPath(import.meta.url));
1886
+ const uiDistCandidate = resolve3(__dirname, "..", "ui-dist");
1887
+ const uiDistPath = existsSync2(uiDistCandidate) ? uiDistCandidate : void 0;
1888
+ const config = {
1889
+ port,
1890
+ outputDir: opts.outputDir,
1891
+ noOpen: !opts.open,
1892
+ theme,
1893
+ isNew: opts.new,
1894
+ uiDistPath
1895
+ };
1896
+ try {
1897
+ await createServer(config);
1898
+ registerInstance({
1899
+ port,
1900
+ pid: process.pid,
1901
+ projectPath: cwd,
1902
+ startTime: (/* @__PURE__ */ new Date()).toISOString()
1903
+ });
1904
+ registerCleanupHandlers(port);
1905
+ if (opts.open) {
1906
+ await openBrowser(port);
1907
+ } else {
1908
+ console.log(`BMAD Studio: http://localhost:${port}`);
1909
+ }
1910
+ } catch (err) {
1911
+ console.error("Failed to start BMAD Studio:", err);
1912
+ process.exit(1);
1913
+ }
1914
+ }
1915
+
1916
+ // src/index.ts
1917
+ var pkg = JSON.parse(
1918
+ readFileSync2(new URL("../package.json", import.meta.url), "utf-8")
1919
+ );
1920
+ function formatUptime(startTime) {
1921
+ const diff = Date.now() - new Date(startTime).getTime();
1922
+ const minutes = Math.floor(diff / 6e4);
1923
+ const hours = Math.floor(minutes / 60);
1924
+ const remainingMinutes = minutes % 60;
1925
+ if (hours > 0) return `${hours}h ${remainingMinutes}m`;
1926
+ if (minutes > 0) return `${minutes}m`;
1927
+ return "<1m";
1928
+ }
1929
+ var program = new Command();
1930
+ program.name("bmad-studio").version(pkg.version).description("BMAD Studio \u2014 Visual workflow dashboard for the BMAD Method").option("--port <number>", "server port").option("--no-open", "skip opening the browser").option("--dark", "use dark theme").option("--light", "use light theme").option("--output-dir <path>", "output directory path").option("--new", "kill existing instance and start fresh").action(async (opts) => {
1931
+ await launch(opts);
1932
+ });
1933
+ program.command("list").description("List running BMAD Studio instances").action(() => {
1934
+ const entries = cleanStaleEntries(readInstances());
1935
+ if (entries.length === 0) {
1936
+ console.log("No running BMAD Studio instances");
1937
+ return;
1938
+ }
1939
+ console.log("\nBMAD Studio Instances:\n");
1940
+ console.log(" PORT PID PROJECT UPTIME");
1941
+ for (const entry of entries) {
1942
+ const port = String(entry.port).padEnd(5);
1943
+ const pid = String(entry.pid).padEnd(8);
1944
+ const project = entry.projectPath.padEnd(40);
1945
+ const uptime = formatUptime(entry.startTime);
1946
+ console.log(` ${port} ${pid} ${project} ${uptime}`);
1947
+ }
1948
+ console.log(`
1949
+ Total: ${entries.length} running instance${entries.length === 1 ? "" : "s"}`);
1950
+ });
1951
+ program.command("stop [port]").description('Stop a running BMAD Studio instance (use "all" to stop all)').action((portArg) => {
1952
+ if (portArg === "all") {
1953
+ const entries2 = cleanStaleEntries(readInstances());
1954
+ if (entries2.length === 0) {
1955
+ console.log("No running BMAD Studio instances to stop");
1956
+ return;
1957
+ }
1958
+ for (const entry2 of entries2) {
1959
+ try {
1960
+ process.kill(entry2.pid, "SIGTERM");
1961
+ console.log(`Stopped instance on port ${entry2.port} (PID ${entry2.pid})`);
1962
+ } catch {
1963
+ console.log(`Instance on port ${entry2.port} (PID ${entry2.pid}) already stopped`);
1964
+ }
1965
+ unregisterInstance(entry2.port);
1966
+ }
1967
+ console.log(`Stopped ${entries2.length} instance${entries2.length === 1 ? "" : "s"}`);
1968
+ return;
1969
+ }
1970
+ if (!portArg) {
1971
+ console.error("Usage: bmad-studio stop <port> or bmad-studio stop all");
1972
+ process.exit(1);
1973
+ }
1974
+ const port = Number(portArg);
1975
+ if (Number.isNaN(port)) {
1976
+ console.error(`Invalid port: ${portArg}`);
1977
+ process.exit(1);
1978
+ }
1979
+ const entries = cleanStaleEntries(readInstances());
1980
+ const entry = entries.find((e) => e.port === port);
1981
+ if (!entry) {
1982
+ console.log(`No instance found on port ${port}`);
1983
+ return;
1984
+ }
1985
+ try {
1986
+ process.kill(entry.pid, "SIGTERM");
1987
+ console.log(`Stopped instance on port ${port} (PID ${entry.pid})`);
1988
+ } catch {
1989
+ console.log(`Instance on port ${port} (PID ${entry.pid}) already stopped`);
1990
+ }
1991
+ unregisterInstance(port);
1992
+ });
1993
+ program.parse();
1994
+ export {
1995
+ program
1996
+ };