@cubis/foundry 0.3.81 → 0.3.82

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. package/dist/cli/core.js +229 -52
  2. package/dist/cli/core.js.map +1 -1
  3. package/package.json +1 -1
  4. package/src/cli/core.ts +314 -89
  5. package/workflows/workflows/agent-environment-setup/platforms/antigravity/workflows/architecture.md +22 -5
  6. package/workflows/workflows/agent-environment-setup/platforms/antigravity/workflows/implement-track.md +1 -1
  7. package/workflows/workflows/agent-environment-setup/platforms/antigravity/workflows/migrate.md +1 -1
  8. package/workflows/workflows/agent-environment-setup/platforms/antigravity/workflows/mobile.md +1 -1
  9. package/workflows/workflows/agent-environment-setup/platforms/antigravity/workflows/onboard.md +1 -1
  10. package/workflows/workflows/agent-environment-setup/platforms/antigravity/workflows/orchestrate.md +1 -1
  11. package/workflows/workflows/agent-environment-setup/platforms/antigravity/workflows/plan.md +1 -1
  12. package/workflows/workflows/agent-environment-setup/platforms/antigravity/workflows/refactor.md +1 -1
  13. package/workflows/workflows/agent-environment-setup/platforms/antigravity/workflows/release.md +1 -1
  14. package/workflows/workflows/agent-environment-setup/platforms/antigravity/workflows/spec.md +1 -1
  15. package/workflows/workflows/agent-environment-setup/platforms/claude/workflows/architecture.md +22 -5
  16. package/workflows/workflows/agent-environment-setup/platforms/claude/workflows/implement-track.md +1 -1
  17. package/workflows/workflows/agent-environment-setup/platforms/claude/workflows/migrate.md +1 -1
  18. package/workflows/workflows/agent-environment-setup/platforms/claude/workflows/mobile.md +1 -1
  19. package/workflows/workflows/agent-environment-setup/platforms/claude/workflows/onboard.md +1 -1
  20. package/workflows/workflows/agent-environment-setup/platforms/claude/workflows/orchestrate.md +1 -1
  21. package/workflows/workflows/agent-environment-setup/platforms/claude/workflows/plan.md +1 -1
  22. package/workflows/workflows/agent-environment-setup/platforms/claude/workflows/refactor.md +1 -1
  23. package/workflows/workflows/agent-environment-setup/platforms/claude/workflows/release.md +1 -1
  24. package/workflows/workflows/agent-environment-setup/platforms/claude/workflows/spec.md +1 -1
  25. package/workflows/workflows/agent-environment-setup/platforms/codex/workflows/architecture.md +22 -5
  26. package/workflows/workflows/agent-environment-setup/platforms/codex/workflows/implement-track.md +1 -1
  27. package/workflows/workflows/agent-environment-setup/platforms/codex/workflows/migrate.md +1 -1
  28. package/workflows/workflows/agent-environment-setup/platforms/codex/workflows/mobile.md +1 -1
  29. package/workflows/workflows/agent-environment-setup/platforms/codex/workflows/onboard.md +1 -1
  30. package/workflows/workflows/agent-environment-setup/platforms/codex/workflows/orchestrate.md +1 -1
  31. package/workflows/workflows/agent-environment-setup/platforms/codex/workflows/plan.md +1 -1
  32. package/workflows/workflows/agent-environment-setup/platforms/codex/workflows/refactor.md +1 -1
  33. package/workflows/workflows/agent-environment-setup/platforms/codex/workflows/release.md +1 -1
  34. package/workflows/workflows/agent-environment-setup/platforms/codex/workflows/spec.md +1 -1
  35. package/workflows/workflows/agent-environment-setup/platforms/copilot/workflows/architecture.md +22 -5
  36. package/workflows/workflows/agent-environment-setup/platforms/copilot/workflows/implement-track.md +1 -1
  37. package/workflows/workflows/agent-environment-setup/platforms/copilot/workflows/migrate.md +1 -1
  38. package/workflows/workflows/agent-environment-setup/platforms/copilot/workflows/mobile.md +1 -1
  39. package/workflows/workflows/agent-environment-setup/platforms/copilot/workflows/onboard.md +1 -1
  40. package/workflows/workflows/agent-environment-setup/platforms/copilot/workflows/orchestrate.md +1 -1
  41. package/workflows/workflows/agent-environment-setup/platforms/copilot/workflows/plan.md +1 -1
  42. package/workflows/workflows/agent-environment-setup/platforms/copilot/workflows/refactor.md +1 -1
  43. package/workflows/workflows/agent-environment-setup/platforms/copilot/workflows/release.md +1 -1
  44. package/workflows/workflows/agent-environment-setup/platforms/copilot/workflows/spec.md +1 -1
  45. package/workflows/workflows/agent-environment-setup/platforms/gemini/workflows/architecture.md +22 -5
  46. package/workflows/workflows/agent-environment-setup/platforms/gemini/workflows/implement-track.md +1 -1
  47. package/workflows/workflows/agent-environment-setup/platforms/gemini/workflows/migrate.md +1 -1
  48. package/workflows/workflows/agent-environment-setup/platforms/gemini/workflows/mobile.md +1 -1
  49. package/workflows/workflows/agent-environment-setup/platforms/gemini/workflows/onboard.md +1 -1
  50. package/workflows/workflows/agent-environment-setup/platforms/gemini/workflows/orchestrate.md +1 -1
  51. package/workflows/workflows/agent-environment-setup/platforms/gemini/workflows/plan.md +1 -1
  52. package/workflows/workflows/agent-environment-setup/platforms/gemini/workflows/refactor.md +1 -1
  53. package/workflows/workflows/agent-environment-setup/platforms/gemini/workflows/release.md +1 -1
  54. package/workflows/workflows/agent-environment-setup/platforms/gemini/workflows/spec.md +1 -1
  55. package/workflows/workflows/agent-environment-setup/shared/workflows/architecture.md +22 -5
  56. package/workflows/workflows/agent-environment-setup/shared/workflows/implement-track.md +1 -1
  57. package/workflows/workflows/agent-environment-setup/shared/workflows/migrate.md +1 -1
  58. package/workflows/workflows/agent-environment-setup/shared/workflows/mobile.md +1 -1
  59. package/workflows/workflows/agent-environment-setup/shared/workflows/onboard.md +1 -1
  60. package/workflows/workflows/agent-environment-setup/shared/workflows/orchestrate.md +1 -1
  61. package/workflows/workflows/agent-environment-setup/shared/workflows/plan.md +1 -1
  62. package/workflows/workflows/agent-environment-setup/shared/workflows/refactor.md +1 -1
  63. package/workflows/workflows/agent-environment-setup/shared/workflows/release.md +1 -1
  64. package/workflows/workflows/agent-environment-setup/shared/workflows/spec.md +1 -1
package/dist/cli/core.js CHANGED
@@ -911,7 +911,8 @@ function inferArchitectureContractProfile(snapshot) {
911
911
  snapshot.topDirs.includes("app")
912
912
  ? "App-level UI patterns should be centralized and reused across screens."
913
913
  : null,
914
- ]) || "No dedicated design-system directory detected; infer shared UI rules from current components and screens.";
914
+ ]) ||
915
+ "No dedicated design-system directory detected; infer shared UI rules from current components and screens.";
915
916
  const testingStrategy = [];
916
917
  if (snapshot.keyScripts.some((script) => script.name === "lint")) {
917
918
  testingStrategy.push("Linting is part of the baseline quality gate.");
@@ -976,7 +977,8 @@ function inferProductFoundationProfile(snapshot, specRoots = []) {
976
977
  snapshot.topDirs.includes("web")) {
977
978
  userPersonas.push("Browser users and internal operators using web-facing flows");
978
979
  }
979
- if (snapshot.frameworks.includes("NestJS") || snapshot.topDirs.includes("api")) {
980
+ if (snapshot.frameworks.includes("NestJS") ||
981
+ snapshot.topDirs.includes("api")) {
980
982
  userPersonas.push("Internal services, operators, or partner systems consuming API boundaries");
981
983
  }
982
984
  if (userPersonas.length === 0) {
@@ -1083,12 +1085,16 @@ function buildArchitectureDocSection(snapshot, specRoots = []) {
1083
1085
  "### Bounded Contexts and Module Boundaries",
1084
1086
  ...(profile.moduleBoundaries.length > 0
1085
1087
  ? profile.moduleBoundaries.map((item) => `- ${item}`)
1086
- : ["- No strong top-level module boundaries were detected automatically."]),
1088
+ : [
1089
+ "- No strong top-level module boundaries were detected automatically.",
1090
+ ]),
1087
1091
  "",
1088
1092
  "### Architecture Signals by Surface",
1089
1093
  ...(profile.architectureSignals.length > 0
1090
1094
  ? profile.architectureSignals.map((item) => `- ${item}`)
1091
- : ["- No app-level architecture signals were inferred from the repo scan."]),
1095
+ : [
1096
+ "- No app-level architecture signals were inferred from the repo scan.",
1097
+ ]),
1092
1098
  "",
1093
1099
  "### Decision Areas to Preserve",
1094
1100
  ...profile.decisionAreas.map((item) => `- ${item}`),
@@ -1124,7 +1130,9 @@ function buildEngineeringArchitectureSection(snapshot) {
1124
1130
  "- Module and package boundaries to preserve:",
1125
1131
  ...(profile.moduleBoundaries.length > 0
1126
1132
  ? profile.moduleBoundaries.map((rule) => ` - ${rule}`)
1127
- : [" - No strong module boundary was detected automatically; keep new boundaries explicit in specs and ADRs."]),
1133
+ : [
1134
+ " - No strong module boundary was detected automatically; keep new boundaries explicit in specs and ADRs.",
1135
+ ]),
1128
1136
  "- Testability expectations:",
1129
1137
  ...profile.testingStrategy.map((rule) => ` - ${rule}`),
1130
1138
  "- Doc refresh policy:",
@@ -1185,7 +1193,9 @@ function buildTechArchitectureSection(snapshot) {
1185
1193
  "### Module / App Topology",
1186
1194
  ...(profile.moduleBoundaries.length > 0
1187
1195
  ? profile.moduleBoundaries.map((item) => `- ${item}`)
1188
- : ["- No significant top-level module boundaries detected automatically."]),
1196
+ : [
1197
+ "- No significant top-level module boundaries detected automatically.",
1198
+ ]),
1189
1199
  "",
1190
1200
  "### Flow Narratives",
1191
1201
  "- Describe the primary request, data, and background-job flows here when architecture generation runs.",
@@ -1471,6 +1481,20 @@ function buildEngineeringRulesManagedBlock({ platform, productFilePath, architec
1471
1481
  const techRef = toPosixPath(path.resolve(techMdFilePath));
1472
1482
  const roadmapRef = toPosixPath(path.resolve(roadmapFilePath));
1473
1483
  const ruleRef = toPosixPath(path.resolve(ruleFilePath));
1484
+ const supportsAtImport = platform === "claude" || platform === "gemini";
1485
+ const ruleFileDir = path.dirname(path.resolve(ruleFilePath));
1486
+ const relProduct = toPosixPath(path.relative(ruleFileDir, path.resolve(productFilePath)));
1487
+ const relArchitecture = toPosixPath(path.relative(ruleFileDir, path.resolve(architectureFilePath)));
1488
+ const relTech = toPosixPath(path.relative(ruleFileDir, path.resolve(techMdFilePath)));
1489
+ const importLines = supportsAtImport
1490
+ ? [
1491
+ "",
1492
+ "Foundation docs (auto-imported into context):",
1493
+ `@${relProduct}`,
1494
+ `@${relArchitecture}`,
1495
+ `@${relTech}`,
1496
+ ]
1497
+ : [];
1474
1498
  return [
1475
1499
  `<!-- cbx:engineering:auto:start platform=${platform} version=1 -->`,
1476
1500
  "## Engineering Guardrails (auto-managed)",
@@ -1482,13 +1506,14 @@ function buildEngineeringRulesManagedBlock({ platform, productFilePath, architec
1482
1506
  `- Project tech map: \`${techRef}\``,
1483
1507
  `- Delivery roadmap: \`${roadmapRef}\``,
1484
1508
  `- Active platform rule file: \`${ruleRef}\``,
1509
+ ...importLines,
1485
1510
  "",
1486
1511
  "Hard policy:",
1487
1512
  "1. Start from product outcomes and ship the smallest valuable slice.",
1488
1513
  "2. Keep architecture simple (KISS) and avoid speculative work (YAGNI).",
1489
1514
  "3. Apply SOLID pragmatically to reduce change risk, not add ceremony.",
1490
1515
  "4. Use clear naming with focused responsibilities and explicit boundaries.",
1491
- `5. For non-trivial work, read ${FOUNDATION_DOCS_DIR}/PRODUCT.md, ENGINEERING_RULES.md, ${FOUNDATION_DOCS_DIR}/ARCHITECTURE.md, and ${FOUNDATION_DOCS_DIR}/TECH.md in that order when they exist before planning or implementation.`,
1516
+ `5. For non-trivial work, read ${FOUNDATION_DOCS_DIR}/PRODUCT.md, ENGINEERING_RULES.md, ${FOUNDATION_DOCS_DIR}/ARCHITECTURE.md, and ${FOUNDATION_DOCS_DIR}/TECH.md in that order when they exist before planning or implementation. Check ${FOUNDATION_DOCS_DIR}/PRODUCT.md for domain glossary and ${FOUNDATION_DOCS_DIR}/TECH.md for build/validation commands.`,
1492
1517
  "6. Require validation evidence (lint/types/tests) before merge.",
1493
1518
  "7. Use Decision Log response style.",
1494
1519
  "8. Every Decision Log must include a `Skills Used` section listing skill, workflow, or agent names.",
@@ -3053,7 +3078,7 @@ function targetStateKey(platform, scope) {
3053
3078
  }
3054
3079
  function getStateFilePath(scope, cwd = process.cwd()) {
3055
3080
  if (scope === "global")
3056
- return path.join(os.homedir(), ".cbx", "state.json");
3081
+ return path.join(resolveManagedHomeDir(), ".cbx", "state.json");
3057
3082
  return path.join(cwd, ".cbx", "workflows-state.json");
3058
3083
  }
3059
3084
  async function pathExists(targetPath) {
@@ -4433,14 +4458,14 @@ async function writeGeneratedArtifact({ destination, content, dryRun = false, })
4433
4458
  }
4434
4459
  function resolveLegacyPostmanConfigPath({ scope, cwd = process.cwd() }) {
4435
4460
  if (scope === "global") {
4436
- return path.join(os.homedir(), ".cbx", LEGACY_POSTMAN_CONFIG_FILENAME);
4461
+ return path.join(resolveManagedHomeDir(), ".cbx", LEGACY_POSTMAN_CONFIG_FILENAME);
4437
4462
  }
4438
4463
  const workspaceRoot = findWorkspaceRoot(cwd);
4439
4464
  return path.join(workspaceRoot, LEGACY_POSTMAN_CONFIG_FILENAME);
4440
4465
  }
4441
4466
  function resolveCbxConfigPath({ scope, cwd = process.cwd() }) {
4442
4467
  if (scope === "global") {
4443
- return path.join(os.homedir(), ".cbx", CBX_CONFIG_FILENAME);
4468
+ return path.join(resolveManagedHomeDir(), ".cbx", CBX_CONFIG_FILENAME);
4444
4469
  }
4445
4470
  const workspaceRoot = findWorkspaceRoot(cwd);
4446
4471
  return path.join(workspaceRoot, CBX_CONFIG_FILENAME);
@@ -4456,13 +4481,19 @@ async function assertNoLegacyOnlyPostmanConfig({ scope, cwd = process.cwd() }) {
4456
4481
  }
4457
4482
  function resolveMcpRootPath({ scope, cwd = process.cwd() }) {
4458
4483
  if (scope === "global") {
4459
- return path.join(os.homedir(), ".cbx", "mcp");
4484
+ return path.join(resolveManagedHomeDir(), ".cbx", "mcp");
4460
4485
  }
4461
4486
  const workspaceRoot = findWorkspaceRoot(cwd);
4462
4487
  return path.join(workspaceRoot, ".cbx", "mcp");
4463
4488
  }
4489
+ function resolveManagedHomeDir() {
4490
+ const override = String(process.env.HOME ||
4491
+ process.env.USERPROFILE ||
4492
+ "").trim();
4493
+ return override || os.homedir();
4494
+ }
4464
4495
  function resolveManagedCredentialsEnvPath() {
4465
- return path.join(os.homedir(), ".cbx", CBX_CREDENTIALS_ENV_FILENAME);
4496
+ return path.join(resolveManagedHomeDir(), ".cbx", CBX_CREDENTIALS_ENV_FILENAME);
4466
4497
  }
4467
4498
  function parseShellEnvValue(rawValue) {
4468
4499
  const value = String(rawValue || "").trim();
@@ -5887,7 +5918,9 @@ async function configurePostmanInstallArtifacts({ platform, scope, profilePaths,
5887
5918
  })
5888
5919
  : null;
5889
5920
  const credentialEnvVarNames = [];
5890
- if (persistCredentials && shouldInstallPostman && effectiveApiKeySource === "env") {
5921
+ if (persistCredentials &&
5922
+ shouldInstallPostman &&
5923
+ effectiveApiKeySource === "env") {
5891
5924
  credentialEnvVarNames.push(effectiveApiKeyEnvVar || POSTMAN_API_KEY_ENV_VAR);
5892
5925
  }
5893
5926
  if (persistCredentials &&
@@ -6842,7 +6875,8 @@ function printPostmanSetupSummary({ postmanSetup }) {
6842
6875
  for (const ignoreResult of postmanSetup.gitIgnoreResults || []) {
6843
6876
  console.log(`- .gitignore (${ignoreResult.filePath}): ${ignoreResult.action}`);
6844
6877
  }
6845
- for (const cleanupResult of postmanSetup.legacyDefinitionCleanupResults || []) {
6878
+ for (const cleanupResult of postmanSetup.legacyDefinitionCleanupResults ||
6879
+ []) {
6846
6880
  console.log(`- Legacy direct MCP cleanup (${cleanupResult.path}): ${cleanupResult.action}`);
6847
6881
  }
6848
6882
  if (postmanSetup.mcpRuntimeResult) {
@@ -8835,7 +8869,7 @@ async function runWorkflowConfigKeysList(options) {
8835
8869
  scope,
8836
8870
  cwd,
8837
8871
  });
8838
- console.log(`Config file: ${configPath}`);
8872
+ console.log(`Config file: ${toPosixPath(configPath)}`);
8839
8873
  if (!existing.exists) {
8840
8874
  console.log("Status: missing");
8841
8875
  return;
@@ -9094,13 +9128,14 @@ async function runWorkflowConfigKeysMigrateInline(options) {
9094
9128
  }
9095
9129
  console.log(`Legacy direct MCP cleanup actions: ${cleanupResults.length}`);
9096
9130
  for (const cleanup of cleanupResults) {
9097
- console.log(`- ${cleanup.action} ${cleanup.path}`);
9131
+ console.log(`- ${cleanup.action} ${toPosixPath(cleanup.path)}`);
9098
9132
  }
9099
9133
  if (secureArtifacts?.mcpRuntimeResult) {
9100
- console.log(`Secure platform MCP target: ${secureArtifacts.mcpRuntimeResult.action} (${secureArtifacts.mcpRuntimeResult.path || "n/a"})`);
9134
+ console.log(`Secure platform MCP target: ${secureArtifacts.mcpRuntimeResult.action} (${secureArtifacts.mcpRuntimeResult.path ? toPosixPath(secureArtifacts.mcpRuntimeResult.path) : "n/a"})`);
9101
9135
  }
9102
- for (const cleanup of secureArtifacts?.legacyDefinitionCleanupResults || []) {
9103
- console.log(`- ${cleanup.action} ${cleanup.path}`);
9136
+ for (const cleanup of secureArtifacts?.legacyDefinitionCleanupResults ||
9137
+ []) {
9138
+ console.log(`- ${cleanup.action} ${toPosixPath(cleanup.path)}`);
9104
9139
  }
9105
9140
  for (const warning of secureArtifacts?.warnings || []) {
9106
9141
  console.log(`Warning: ${warning}`);
@@ -9126,13 +9161,16 @@ async function runWorkflowConfigKeysDoctor(options) {
9126
9161
  scope,
9127
9162
  cwd,
9128
9163
  });
9129
- console.log(`Config file: ${configPath}`);
9164
+ console.log(`Config file: ${toPosixPath(configPath)}`);
9130
9165
  if (!existing.exists) {
9131
9166
  console.log("Status: missing");
9132
9167
  return;
9133
9168
  }
9134
9169
  const configFindings = collectInlineCredentialFindings(existingValue);
9135
- const artifactFindings = await collectCredentialLeakFindings({ scope, cwd });
9170
+ const artifactFindings = await collectCredentialLeakFindings({
9171
+ scope,
9172
+ cwd,
9173
+ });
9136
9174
  const migrationPreview = migrateInlineCredentialsInConfig(existingValue);
9137
9175
  console.log(`Inline key findings: ${configFindings.length}`);
9138
9176
  for (const finding of configFindings) {
@@ -9140,7 +9178,7 @@ async function runWorkflowConfigKeysDoctor(options) {
9140
9178
  }
9141
9179
  console.log(`Credential leak findings: ${artifactFindings.length}`);
9142
9180
  for (const finding of artifactFindings) {
9143
- console.log(`- ${finding.filePath} [${finding.matches.join(", ")}]`);
9181
+ console.log(`- ${toPosixPath(finding.filePath)} [${finding.matches.join(", ")}]`);
9144
9182
  }
9145
9183
  if (migrationPreview.requiredEnvVars.length > 0) {
9146
9184
  console.log("Expected env vars:");
@@ -9928,7 +9966,7 @@ async function runMcpServe(options) {
9928
9966
  }
9929
9967
  function resolveCbxRootPath({ scope, cwd = process.cwd() }) {
9930
9968
  if (scope === "global") {
9931
- return path.join(os.homedir(), ".cbx");
9969
+ return path.join(resolveManagedHomeDir(), ".cbx");
9932
9970
  }
9933
9971
  const workspaceRoot = findWorkspaceRoot(cwd);
9934
9972
  return path.join(workspaceRoot, ".cbx");
@@ -10626,13 +10664,21 @@ function buildArchitecturePrompt({ platform, workspaceRoot, snapshot, specRoots,
10626
10664
  const label = item.rootPath === "." ? "repo root" : item.rootPath;
10627
10665
  return `${label}: ${item.architectureSignals.join(", ")}`;
10628
10666
  });
10667
+ const platformCapabilities = {
10668
+ codex: "You can read, write, and execute shell commands. Use `codex exec` mode.",
10669
+ claude: "You can read, write files, and run bash commands. Use non-interactive mode.",
10670
+ gemini: "You can read, write files, and run commands within your sandbox. Follow Gemini CLI conventions.",
10671
+ copilot: "You can read, write files, and use terminal commands. Follow Copilot agent conventions.",
10672
+ };
10629
10673
  return [
10630
10674
  `You are running inside ${platform}.`,
10675
+ platformCapabilities[platform] || "",
10631
10676
  "",
10632
10677
  "Objective:",
10633
10678
  `- Inspect the repository at ${toPosixPath(workspaceRoot)} and author or refresh the core foundation docs in ${productPath}, ${architecturePath}, ${techPath}, ${adrReadmePath}, and ${adrTemplatePath}.`,
10634
10679
  "- The content should be primarily AI-authored from repository inspection, not copied from placeholder scaffolding.",
10635
10680
  "- Preserve manual content outside the managed `cbx:*` markers.",
10681
+ "- The output docs must be immediately useful to any AI agent (Copilot, Claude, Gemini, Codex) inspecting this repo for the first time, reducing search and exploration time.",
10636
10682
  "",
10637
10683
  "Required skill bundle:",
10638
10684
  `- Load these exact skill IDs first: ${coreSkills.map((skillId) => `\`${skillId}\``).join(", ")}`,
@@ -10652,34 +10698,107 @@ function buildArchitecturePrompt({ platform, workspaceRoot, snapshot, specRoots,
10652
10698
  ? `- Architecture signals: ${architectureSignals.join(" | ")}`
10653
10699
  : "- Architecture signals: none confidently inferred from the repo scan",
10654
10700
  `- Entry points: ${snapshot.entryPoints.length > 0 ? snapshot.entryPoints.slice(0, 8).join(" | ") : "none detected"}`,
10655
- `- Key scripts: ${snapshot.keyScripts.length > 0 ? snapshot.keyScripts.slice(0, 8).map((item) => `${item.name}=${item.command}`).join(" | ") : "none detected"}`,
10701
+ `- Key scripts: ${snapshot.keyScripts.length > 0
10702
+ ? snapshot.keyScripts
10703
+ .slice(0, 8)
10704
+ .map((item) => `${item.name}=${item.command}`)
10705
+ .join(" | ")
10706
+ : "none detected"}`,
10656
10707
  `- Inspection anchors: ${inspectionAnchors.length > 0 ? inspectionAnchors.join(", ") : "no concrete anchors detected; inspect the repo root, main source trees, and manifest files manually"}`,
10657
10708
  "",
10709
+ "Markdown formatting rules (apply to all generated docs):",
10710
+ "- Start each file with a single `# Title` heading. Never use more than one H1 per file.",
10711
+ "- Use `## Heading` for major sections, `### Heading` for subsections. Never skip heading levels (e.g., do not jump from `##` to `####`).",
10712
+ "- Separate headings from surrounding content with exactly one blank line above and below.",
10713
+ "- Use fenced code blocks with triple backticks and a language identifier (```bash, ```typescript, ```json, ```yaml, ```mermaid) for all code, commands, and diagrams. Never use indented code blocks.",
10714
+ "- Use `-` for unordered lists. Use `1.` for ordered lists. Indent nested lists by 2 spaces.",
10715
+ "- Use `inline code` backticks for file paths, command names, env vars, config keys, and identifiers.",
10716
+ "- Use Mermaid fenced blocks (```mermaid) for diagrams. Validate that diagram syntax is correct: `graph TD`, `sequenceDiagram`, `flowchart LR`, or `C4Context` style. Every node and edge must be syntactically valid.",
10717
+ "- Tables must have a header row, a separator row with dashes and pipes, and aligned columns. Example:",
10718
+ " ```",
10719
+ " | Column A | Column B |",
10720
+ " | -------- | -------- |",
10721
+ " | value | value |",
10722
+ " ```",
10723
+ "- Use `> blockquote` only for callouts or important notes, prefixed with **Note:** or **Warning:**.",
10724
+ "- Relative links to other repo files should use repo-relative paths: `[ARCHITECTURE.md](docs/foundation/ARCHITECTURE.md)`.",
10725
+ "- End every file with a single trailing newline. No trailing whitespace on lines.",
10726
+ "",
10658
10727
  "Execution contract:",
10659
10728
  "1. Inspect the repository first before writing any backbone doc content. Derive structure, product surfaces, runtime boundaries, and technical constraints from the actual codebase.",
10660
10729
  "2. Complete a real inspection pass before drafting. At minimum, inspect the concrete anchors listed above, plus any adjacent directories needed to understand the main execution paths, data boundaries, and integration surfaces.",
10661
10730
  "3. Do not infer architecture from filenames alone when you can open representative files. Read enough source to validate the main app boundaries, runtime flows, and persistence/integration patterns.",
10662
10731
  `4. Then read ${productPath}, ${architecturePath}, and ${techPath} in that order when they exist so you can preserve useful manual context and update existing managed sections cleanly.`,
10663
10732
  `5. Replace or update only the content between the existing managed markers in ${productPath}, ${architecturePath}, and ${techPath}. Do not append a second marker block.`,
10664
- `6. In ${productPath}, write a concrete product foundation: product purpose, primary users/operators, main journeys, business capabilities, operational constraints, and what future contributors must preserve.`,
10665
- `7. In ${architecturePath}, write a lean but detailed architecture backbone in a pragmatic arc42/C4 style: system purpose and constraints, explicit architecture classification, bounded contexts, major building blocks, dependency rules, data and integration boundaries, runtime flows, deployment/operability notes, testing/debugging strategy, and only the diagram levels that add real value.`,
10666
- `8. ${architecturePath} must include a dedicated folder-structure guide that lists the important apps/packages/directories, what each owns, and how contributors should treat those boundaries when editing code.`,
10667
- `9. In ${techPath}, write the developer-facing technical map: stack, repo layout, key commands, entrypoints, data stores, external services, environment/config surfaces, MCP/tooling footprint, and change hotspots future agents should inspect before editing code.`,
10668
- `10. ${techPath} should complement ${architecturePath}; do not repeat the same structure prose unless it helps a developer act faster.`,
10669
- `11. Use exact required headings in ${productPath}: \`## Product Scope\`, \`## Product Purpose\`, \`## Primary Users And Operators\`, \`## Main Journeys\`, \`## Business Capabilities That Matter\`, \`## Operational Constraints\`, \`## Preservation Rules For Future Contributors\`.`,
10670
- `12. Use exact required headings in ${architecturePath}: \`## Architecture Type\`, \`## System Purpose\`, \`## Constraints And Architectural Drivers\`, \`## Repository Structure Guide\`, \`## Bounded Contexts\`, \`## Major Building Blocks\`, \`## Dependency Rules\`, \`## Data Boundaries\`, \`## Integration Boundaries\`, \`## Runtime Flows\`, \`## Deployment And Operability\`, \`## Testing And Debugging Strategy\`, \`## Architectural Guidance\`.`,
10671
- `13. Use exact required headings in ${techPath}: \`## Stack Snapshot\`, \`## Repository Layout\`, \`## Entrypoints\`, \`## Key Commands\`, \`## Runtime Data Stores\`, \`## External Services And Integration Surfaces\`, \`## Environment And Config Surfaces\`, \`## Generated Artifacts To Respect\`, \`## Change Hotspots\`, \`## Practical Editing Notes\`.`,
10672
- "14. Every major claim should be grounded in repository evidence. Mention concrete repo paths in the docs when a structural claim would otherwise be ambiguous.",
10673
- "15. Avoid placeholder filler, generic checklists, and duplicated content across files. Each doc should have a clear job.",
10674
- "16. Do not create ROADMAP.md, ENGINEERING_RULES.md, or other extra docs unless the prompt explicitly asks for them.",
10733
+ "",
10734
+ `6. In ${productPath}, write a concrete product foundation:`,
10735
+ " - Product purpose with a one-sentence elevator pitch an AI agent can use as context.",
10736
+ " - Primary users/operators with their key goals.",
10737
+ " - Main journeys as numbered sequences an agent can follow to understand the happy path.",
10738
+ " - Business capabilities that matter, linked to repo paths that implement them.",
10739
+ " - Operational constraints and SLA/uptime expectations if evident.",
10740
+ " - What future contributors must preserve (invariants, contracts, compatibility guarantees).",
10741
+ " - A domain glossary defining project-specific terms, abbreviations, and bounded-context language so AI agents use consistent vocabulary.",
10742
+ "",
10743
+ `7. In ${architecturePath}, write a lean but detailed architecture backbone in a pragmatic arc42/C4 style:`,
10744
+ " - Architecture classification (monolith, modular monolith, microservices, serverless, hybrid) with evidence.",
10745
+ " - System purpose, constraints, and architectural drivers.",
10746
+ " - Bounded contexts with ownership boundaries mapped to directories.",
10747
+ " - Major building blocks as a table or Mermaid C4 diagram with responsibilities.",
10748
+ " - Dependency rules: what can import what, forbidden coupling, and layering policy.",
10749
+ " - Data and integration boundaries with protocol/format details.",
10750
+ " - Runtime flows as Mermaid sequence diagrams for the top 2-3 critical paths.",
10751
+ " - Crosscutting concerns: logging, auth, error handling, i18n patterns with repo-path evidence.",
10752
+ " - Quality requirements derived from the codebase (performance budgets, test coverage, accessibility).",
10753
+ " - Known risks and tech debt visible in the codebase (TODOs, deprecated deps, missing tests).",
10754
+ " - Deployment/operability notes.",
10755
+ " - Testing/debugging strategy with concrete test commands and coverage tooling.",
10756
+ " - A dedicated folder-structure guide listing every important directory, what it owns, and contributor rules.",
10757
+ "",
10758
+ `8. In ${techPath}, write the developer-facing technical map that an AI agent can use to start working immediately:`,
10759
+ " - Stack snapshot as a table (runtime, language, framework, version if discoverable).",
10760
+ " - Repository layout: directory tree with one-line purpose per directory.",
10761
+ " - Entrypoints: the exact files that bootstrap each app/service/CLI.",
10762
+ " - Key commands: the exact shell commands for bootstrap, build, test, lint, format, run, and deploy. Validate that these commands actually exist in the project manifests. Document required order and preconditions.",
10763
+ " - Build and validation: the validated sequence of commands to go from clean clone to passing CI locally, including environment prerequisites (Node version, Python version, Docker, etc.).",
10764
+ " - CI/CD pipeline: describe the CI/CD workflow files, their triggers, and what checks must pass before merge.",
10765
+ " - Runtime data stores and migration commands.",
10766
+ " - External services and integration surfaces with protocol details.",
10767
+ " - Environment and config surfaces: list every env var the app reads, its purpose, default value if any, and whether it is required or optional.",
10768
+ " - MCP/tooling footprint if present.",
10769
+ " - Generated artifacts: files that are auto-generated and must not be hand-edited.",
10770
+ " - Error patterns and debugging: common error messages, their causes, and resolution steps discovered during inspection.",
10771
+ " - Change hotspots: files/directories that change most often or have the most coupling, so agents know where to look first.",
10772
+ " - Practical editing notes: conventions for naming, imports, test file placement, and PR hygiene.",
10773
+ "",
10774
+ `9. ${techPath} should complement ${architecturePath}; do not repeat the same structure prose unless it helps a developer act faster.`,
10775
+ "",
10776
+ `10. Use exact required headings in ${productPath}: \`## Product Scope\`, \`## Product Purpose\`, \`## Primary Users And Operators\`, \`## Main Journeys\`, \`## Business Capabilities That Matter\`, \`## Operational Constraints\`, \`## Preservation Rules For Future Contributors\`, \`## Domain Glossary\`.`,
10777
+ `11. Use exact required headings in ${architecturePath}: \`## Architecture Type\`, \`## System Purpose\`, \`## Constraints And Architectural Drivers\`, \`## Repository Structure Guide\`, \`## Bounded Contexts\`, \`## Major Building Blocks\`, \`## Dependency Rules\`, \`## Data Boundaries\`, \`## Integration Boundaries\`, \`## Runtime Flows\`, \`## Crosscutting Concerns\`, \`## Quality Requirements\`, \`## Risks And Tech Debt\`, \`## Deployment And Operability\`, \`## Testing And Debugging Strategy\`, \`## Architectural Guidance\`.`,
10778
+ `12. Use exact required headings in ${techPath}: \`## Stack Snapshot\`, \`## Repository Layout\`, \`## Entrypoints\`, \`## Key Commands\`, \`## Build And Validation\`, \`## CI CD Pipeline\`, \`## Runtime Data Stores\`, \`## External Services And Integration Surfaces\`, \`## Environment And Config Surfaces\`, \`## Generated Artifacts To Respect\`, \`## Error Patterns And Debugging\`, \`## Change Hotspots\`, \`## Practical Editing Notes\`.`,
10779
+ "",
10780
+ "13. Every major claim should be grounded in repository evidence. Mention concrete repo paths in the docs when a structural claim would otherwise be ambiguous.",
10781
+ "14. Avoid placeholder filler, generic checklists, and duplicated content across files. Each doc should have a clear job.",
10782
+ "15. Do not create ROADMAP.md, ENGINEERING_RULES.md, or other extra docs unless the prompt explicitly asks for them.",
10675
10783
  researchMode === "never"
10676
- ? "17. Stay repo-only. Do not use outside research."
10677
- : "17. Use repo evidence first. Use official docs when needed. Treat Reddit or community sources only as labeled secondary evidence.",
10784
+ ? "16. Stay repo-only. Do not use outside research."
10785
+ : "16. Use repo evidence first. Use official docs when needed. Treat Reddit or community sources only as labeled secondary evidence.",
10678
10786
  researchMode === "always"
10679
- ? `18. Include an external research evidence subsection in ${techPath} with clearly labeled primary and secondary evidence.`
10680
- : "18. Include external research notes only if they materially informed the architecture update.",
10681
- `19. If the project clearly follows Clean Architecture, feature-first modules, DDD, modular monolith, or another stable structure, make that explicit in ${architecturePath} with evidence from the repo.`,
10682
- `20. Ensure ${adrReadmePath} and ${adrTemplatePath} exist as ADR entrypoints, but keep them lean.`,
10787
+ ? `17. Include an external research evidence subsection in ${techPath} with clearly labeled primary and secondary evidence.`
10788
+ : "17. Include external research notes only if they materially informed the architecture update.",
10789
+ `18. If the project clearly follows Clean Architecture, feature-first modules, DDD, modular monolith, or another stable structure, make that explicit in ${architecturePath} with evidence from the repo.`,
10790
+ `19. Ensure ${adrReadmePath} and ${adrTemplatePath} exist as ADR entrypoints, but keep them lean.`,
10791
+ "20. In all docs, when referencing other foundation docs, use relative markdown links: `[ARCHITECTURE.md](docs/foundation/ARCHITECTURE.md)`. This lets AI agents and humans navigate between docs.",
10792
+ "21. Validate all Mermaid diagram syntax before writing. Each diagram must render without errors. Use simple node IDs (alphanumeric, no special characters) and quote labels containing spaces.",
10793
+ "22. For each key command documented, note the expected exit code (0 for success) and any common failure modes. This helps AI agents validate their own changes.",
10794
+ "",
10795
+ "Platform context-loading awareness (these docs will be @imported into agent rule files):",
10796
+ "- Claude loads CLAUDE.md at session start via @file imports; each imported doc should be concise and self-contained.",
10797
+ "- Gemini loads GEMINI.md hierarchically with JIT context; structure docs with clear H2 headings so sections are independently useful.",
10798
+ "- Codex concatenates AGENTS.md files root-to-CWD with a default 32 KiB combined limit; keep total foundation doc prose lean.",
10799
+ "- Copilot loads copilot-instructions.md automatically; headings and inline code markers aid discoverability.",
10800
+ "- Target each individual foundation doc under 300 lines so it stays effective when imported into any platform's context window.",
10801
+ "- Front-load the most actionable information (commands, paths, constraints) in each doc; put supplementary detail later.",
10683
10802
  "",
10684
10803
  "Return one JSON object on the last line with this shape:",
10685
10804
  `{"files_written":["${productPath}","${architecturePath}","${techPath}","${adrReadmePath}","${adrTemplatePath}"],"research_used":false,"gaps":[],"next_actions":[]}`,
@@ -10687,9 +10806,34 @@ function buildArchitecturePrompt({ platform, workspaceRoot, snapshot, specRoots,
10687
10806
  "Do not emit placeholder TODOs in the managed sections.",
10688
10807
  ].join("\n");
10689
10808
  }
10809
+ let architectureExecFileCaptureOverride = null;
10810
+ let architectureSpawnCaptureOverride = null;
10811
+ export function __setArchitectureCommandCaptureForTests(overrides = {}) {
10812
+ architectureExecFileCaptureOverride =
10813
+ overrides.execFileCapture || architectureExecFileCaptureOverride;
10814
+ architectureSpawnCaptureOverride =
10815
+ overrides.spawnCapture || architectureSpawnCaptureOverride;
10816
+ }
10817
+ export function __resetArchitectureCommandCaptureForTests() {
10818
+ architectureExecFileCaptureOverride = null;
10819
+ architectureSpawnCaptureOverride = null;
10820
+ }
10690
10821
  async function execFileCapture(command, args, options = {}) {
10822
+ if (architectureExecFileCaptureOverride) {
10823
+ return await architectureExecFileCaptureOverride(command, args, options);
10824
+ }
10825
+ const resolvedCommand = process.platform === "win32"
10826
+ ? await resolveWindowsCommand(command)
10827
+ : command;
10828
+ if (process.platform === "win32" &&
10829
+ /\.(cmd|bat)$/i.test(resolvedCommand)) {
10830
+ return await spawnCapture(resolvedCommand, args, {
10831
+ ...options,
10832
+ useShell: true,
10833
+ });
10834
+ }
10691
10835
  try {
10692
- const result = await execFile(command, args, {
10836
+ const result = await execFile(resolvedCommand, args, {
10693
10837
  ...options,
10694
10838
  maxBuffer: 8 * 1024 * 1024,
10695
10839
  });
@@ -10711,14 +10855,50 @@ async function execFileCapture(command, args, options = {}) {
10711
10855
  };
10712
10856
  }
10713
10857
  }
10858
+ async function resolveWindowsCommand(command) {
10859
+ if (process.platform !== "win32")
10860
+ return command;
10861
+ if (path.isAbsolute(command) || /[\\/]/.test(command))
10862
+ return command;
10863
+ try {
10864
+ const result = await execFile("where.exe", [command], {
10865
+ windowsHide: true,
10866
+ maxBuffer: 1024 * 1024,
10867
+ });
10868
+ const resolved = String(result.stdout || "")
10869
+ .split(/\r?\n/)
10870
+ .map((line) => line.trim())
10871
+ .find(Boolean);
10872
+ if (resolved)
10873
+ return resolved;
10874
+ }
10875
+ catch (error) {
10876
+ if (error?.code === "ENOENT") {
10877
+ throw new Error(`Required CLI '${command}' is not installed or not on PATH.`);
10878
+ }
10879
+ }
10880
+ const missingError = new Error(`Required CLI '${command}' is not installed or not on PATH.`);
10881
+ missingError.code = "ENOENT";
10882
+ throw missingError;
10883
+ }
10714
10884
  async function spawnCapture(command, args, options = {}) {
10715
- const { cwd, env, streamOutput = false } = options;
10885
+ if (architectureSpawnCaptureOverride) {
10886
+ return await architectureSpawnCaptureOverride(command, args, options);
10887
+ }
10888
+ const { cwd, env, streamOutput = false, useShell } = options;
10889
+ const resolvedCommand = process.platform === "win32"
10890
+ ? await resolveWindowsCommand(command)
10891
+ : command;
10892
+ const shell = typeof useShell === "boolean"
10893
+ ? useShell
10894
+ : process.platform === "win32" && /\.(cmd|bat)$/i.test(resolvedCommand);
10716
10895
  return await new Promise((resolve, reject) => {
10717
10896
  let stdout = "";
10718
10897
  let stderr = "";
10719
- const child = spawn(command, args, {
10898
+ const child = spawn(resolvedCommand, args, {
10720
10899
  cwd,
10721
10900
  env,
10901
+ shell,
10722
10902
  stdio: ["ignore", "pipe", "pipe"],
10723
10903
  });
10724
10904
  child.stdout.on("data", (chunk) => {
@@ -11147,8 +11327,10 @@ async function runBuildArchitecture(options) {
11147
11327
  const changedFiles = managedFilePaths
11148
11328
  .filter((filePath) => filesBefore[filePath] !== filesAfter[filePath])
11149
11329
  .map((filePath) => toPosixPath(path.relative(workspaceRoot, filePath)));
11150
- const techContent = filesAfter[scaffold.techMdPath] ?? (await readFile(scaffold.techMdPath, "utf8"));
11151
- const productContent = filesAfter[scaffold.productPath] ?? (await readFile(scaffold.productPath, "utf8"));
11330
+ const techContent = filesAfter[scaffold.techMdPath] ??
11331
+ (await readFile(scaffold.techMdPath, "utf8"));
11332
+ const productContent = filesAfter[scaffold.productPath] ??
11333
+ (await readFile(scaffold.productPath, "utf8"));
11152
11334
  const architectureContent = filesAfter[scaffold.architectureDocPath] ??
11153
11335
  (await readFile(scaffold.architectureDocPath, "utf8"));
11154
11336
  const metadataPath = path.join(workspaceRoot, ".cbx", ARCHITECTURE_BUILD_METADATA_FILENAME);
@@ -11219,12 +11401,7 @@ function normalizeInitPlatforms(value) {
11219
11401
  return normalized;
11220
11402
  }
11221
11403
  function normalizeInitMcpSelections(value) {
11222
- const allowed = new Set([
11223
- "cubis-foundry",
11224
- "postman",
11225
- "stitch",
11226
- "playwright",
11227
- ]);
11404
+ const allowed = new Set(["cubis-foundry", "postman", "stitch", "playwright"]);
11228
11405
  const items = Array.isArray(value) ? value : parseCsvOption(value);
11229
11406
  const normalized = [];
11230
11407
  for (const item of items) {