@llm-dev-ops/agentics-cli 1.7.2 → 1.8.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -221,6 +221,102 @@ function mergeRufloCacheIntoPhase(runDir, phaseNum) {
221
221
  * Phase 1 artifacts must already exist at ~/.agentics/runs/<traceId>/.
222
222
  * Each subsequent phase reads the prior phase's output from the same run directory.
223
223
  */
224
+ /**
225
+ * ADR-PIPELINE-028: Nuclear ADR guarantee. Call before returning from executeAutoChain.
226
+ * Checks if ADRs exist anywhere in the run directory. If not, generates them from
227
+ * the scenario query using the template builder (no LLM required).
228
+ */
229
+ async function ensureAdrsExist(runDir, traceId, scenarioQuery) {
230
+ if (!scenarioQuery)
231
+ return;
232
+ const adrLocations = [
233
+ path.join(runDir, 'phase4', 'adrs', 'adr-index.json'),
234
+ path.join(runDir, 'phase2', 'adrs', 'adr-index.json'),
235
+ ];
236
+ const hasAdrs = adrLocations.some(p => {
237
+ try {
238
+ if (!fs.existsSync(p))
239
+ return false;
240
+ const content = JSON.parse(fs.readFileSync(p, 'utf-8'));
241
+ return Array.isArray(content) && content.length > 0;
242
+ }
243
+ catch {
244
+ return false;
245
+ }
246
+ });
247
+ if (hasAdrs)
248
+ return;
249
+ console.error('');
250
+ console.error(' [ADR-028] No ADRs found — generating guaranteed set');
251
+ try {
252
+ const { buildPhase2ADRs } = await import('./phase2/phases/adr-generator.js');
253
+ const { buildPhase2DDD } = await import('./phase2/phases/ddd-generator.js');
254
+ // Load SPARC/dossier from any available location, or use empty defaults
255
+ let sparc = { architecture: { services: [] }, specification: { requirements: [], constraints: [] } };
256
+ let dossier = { items: [], crossReferences: [], summary: { totalItems: 0, bySource: {}, byCategory: {}, keyRisks: [], criticalConstraints: [] }, id: 'fallback', traceId, generatedFrom: 'adr-028-guarantee' };
257
+ for (const sub of ['phase3/sparc/sparc-combined.json', 'phase2/sparc/sparc-combined.json', 'phase2/sparc-specification.json']) {
258
+ const p = path.join(runDir, sub);
259
+ if (fs.existsSync(p)) {
260
+ try {
261
+ sparc = JSON.parse(fs.readFileSync(p, 'utf-8'));
262
+ break;
263
+ }
264
+ catch { /* skip */ }
265
+ }
266
+ }
267
+ for (const sub of ['phase2/research-dossier.json', 'research-dossier.json']) {
268
+ const p = path.join(runDir, sub);
269
+ if (fs.existsSync(p)) {
270
+ try {
271
+ dossier = JSON.parse(fs.readFileSync(p, 'utf-8'));
272
+ break;
273
+ }
274
+ catch { /* skip */ }
275
+ }
276
+ }
277
+ const adrs = buildPhase2ADRs(sparc, dossier, scenarioQuery);
278
+ if (adrs.length === 0)
279
+ return;
280
+ // Write to BOTH phase4 and phase2 so downstream consumers find them regardless of which path they check
281
+ for (const targetDir of [path.join(runDir, 'phase4'), path.join(runDir, 'phase2')]) {
282
+ const adrDir = path.join(targetDir, 'adrs');
283
+ fs.mkdirSync(adrDir, { recursive: true });
284
+ fs.writeFileSync(path.join(adrDir, 'adr-index.json'), JSON.stringify(adrs, null, 2) + '\n', 'utf-8');
285
+ for (const adr of adrs) {
286
+ const slug = (adr.title || 'untitled').toLowerCase().replace(/[^a-z0-9]+/g, '-').slice(0, 60);
287
+ const md = [
288
+ `# ${adr.id}: ${adr.title}`,
289
+ `\n**Status:** ${adr.status}`,
290
+ `**Date:** ${adr.date}`,
291
+ `\n## Context\n${adr.context}`,
292
+ `\n## Decision\n${adr.decision}`,
293
+ adr.alternatives?.length > 0 ? `\n## Alternatives Considered\n${adr.alternatives.map((a) => `- **${a.option}** ${a.rejected ? '(rejected)' : '(selected)'}: ${a.rationale}`).join('\n')}` : '',
294
+ adr.consequences?.length > 0 ? `\n## Consequences\n${adr.consequences.map((c) => `- [${c.type === 'positive' ? '+' : c.type === 'negative' ? '-' : '~'}] ${c.description}`).join('\n')}` : '',
295
+ ].filter(Boolean).join('\n');
296
+ fs.writeFileSync(path.join(adrDir, `${adr.id}-${slug}.md`), md + '\n', 'utf-8');
297
+ }
298
+ }
299
+ console.error(` [ADR-028] Generated ${adrs.length} ADRs (phase4/adrs/ + phase2/adrs/)`);
300
+ // Also generate DDD model
301
+ try {
302
+ const dddModel = buildPhase2DDD(sparc, adrs, dossier, scenarioQuery);
303
+ if (dddModel?.contexts?.length > 0) {
304
+ for (const targetDir of [path.join(runDir, 'phase4'), path.join(runDir, 'phase2')]) {
305
+ const dddDir = path.join(targetDir, 'ddd');
306
+ fs.mkdirSync(dddDir, { recursive: true });
307
+ fs.writeFileSync(path.join(dddDir, 'domain-model.json'), JSON.stringify(dddModel, null, 2) + '\n', 'utf-8');
308
+ }
309
+ console.error(` [ADR-028] Generated DDD model (${dddModel.contexts.length} bounded contexts)`);
310
+ }
311
+ }
312
+ catch {
313
+ // DDD is best-effort
314
+ }
315
+ }
316
+ catch (err) {
317
+ console.error(` [ADR-028] ADR guarantee failed: ${err instanceof Error ? err.message : String(err)}`);
318
+ }
319
+ }
224
320
  export async function executeAutoChain(traceId, options = {}) {
225
321
  const pipelineStart = Date.now();
226
322
  const homeDir = process.env['HOME'] ?? '/tmp';
@@ -234,13 +330,15 @@ export async function executeAutoChain(traceId, options = {}) {
234
330
  let scenarioBranch;
235
331
  let originalBranch;
236
332
  let scenarioQuery = '';
237
- // Read scenario query from Phase 1 manifest (needed for branch naming)
333
+ // Read scenario query and simulation ID from Phase 1 manifest
334
+ let simulationId = '';
238
335
  try {
239
336
  const manifestPath = path.join(runDir, 'manifest.json');
240
337
  if (fs.existsSync(manifestPath)) {
241
338
  const raw = fs.readFileSync(manifestPath, 'utf-8');
242
339
  const manifest = JSON.parse(raw);
243
340
  scenarioQuery = manifest.query ?? '';
341
+ simulationId = manifest.simulation_id ?? manifest.execution_id ?? '';
244
342
  }
245
343
  }
246
344
  catch {
@@ -343,6 +441,7 @@ export async function executeAutoChain(traceId, options = {}) {
343
441
  recordPhaseFailure(PHASE_AGENTS[2], traceId, errMsg);
344
442
  phases.push({ phase: 2, label: 'Deep Research', status: 'failed', timing: Date.now() - phaseStart, artifacts: [], outputDir: phaseDir, error: errMsg });
345
443
  copyPlanningArtifacts(runDir, projectRoot);
444
+ await ensureAdrsExist(runDir, traceId, scenarioQuery);
346
445
  return buildResult(traceId, runDir, phases, pipelineStart, mode);
347
446
  }
348
447
  }
@@ -439,6 +538,7 @@ export async function executeAutoChain(traceId, options = {}) {
439
538
  console.error(' [ABORT] No Phase 2 SPARC artifacts to recover from — pipeline cannot continue');
440
539
  phases.push({ phase: 3, label: 'SPARC + London TDD', status: 'failed', timing: Date.now() - phaseStart, artifacts: [], outputDir: phaseDir, error: errMsg });
441
540
  copyPlanningArtifacts(runDir, projectRoot);
541
+ await ensureAdrsExist(runDir, traceId, scenarioQuery);
442
542
  return buildResult(traceId, runDir, phases, pipelineStart, mode);
443
543
  }
444
544
  }
@@ -869,23 +969,34 @@ The prompts must be production-grade — they will be given directly to a coding
869
969
  derivedPhases.push({ title: heading, slug, content: section, folder, adrIds: matchedAdrIds });
870
970
  }
871
971
  }
872
- // If SPARC didn't produce enough sections, fall back to ADR-derived phases
873
- if (derivedPhases.length < 3) {
874
- // Group ADRs into logical clusters
972
+ // ADR-PIPELINE-031: Enrich with ADR-derived phases when SPARC sections alone are insufficient.
973
+ // Each ADR that isn't already covered by a SPARC-derived phase becomes its own build step.
974
+ if (derivedPhases.length < 8 && adrs.length > 0) {
975
+ const existingSlugs = new Set(derivedPhases.map(p => p.slug));
976
+ const existingTitleWords = new Set(derivedPhases.flatMap(p => p.title.toLowerCase().split(/\W+/).filter(w => w.length > 3)));
875
977
  for (const adr of adrs) {
876
978
  const slug = adr.title.toLowerCase().replace(/[^a-z0-9]+/g, '-').replace(/^-|-$/g, '').slice(0, 50);
979
+ // Skip if a phase with similar slug or overlapping title already exists
980
+ if (existingSlugs.has(slug))
981
+ continue;
982
+ const adrKeywords = adr.title.toLowerCase().split(/\W+/).filter(w => w.length > 3);
983
+ const overlap = adrKeywords.filter(w => existingTitleWords.has(w)).length;
984
+ if (overlap >= 2)
985
+ continue; // sufficiently covered by an existing phase
877
986
  const adrLower = `${adr.title} ${adr.context} ${adr.decision}`.toLowerCase();
878
987
  let folder = 'backend';
879
988
  if (/frontend|ui|dashboard/.test(adrLower))
880
989
  folder = 'frontend';
881
- else if (/erp|netsuite|sap|dynamics/.test(adrLower))
990
+ else if (/erp|netsuite|sap|dynamics|coupa|workday|maximo|oracle/.test(adrLower))
882
991
  folder = 'erp';
883
- else if (/integration|connector/.test(adrLower))
992
+ else if (/integration|connector|webhook/.test(adrLower))
884
993
  folder = 'integrations';
885
994
  else if (/deploy|infra|docker/.test(adrLower))
886
995
  folder = 'src';
887
996
  else if (/test|quality/.test(adrLower))
888
997
  folder = 'tests';
998
+ else if (/audit|governance|approval/.test(adrLower))
999
+ folder = 'backend';
889
1000
  derivedPhases.push({
890
1001
  title: adr.title,
891
1002
  slug,
@@ -893,6 +1004,9 @@ The prompts must be production-grade — they will be given directly to a coding
893
1004
  folder,
894
1005
  adrIds: [adr.id],
895
1006
  });
1007
+ existingSlugs.add(slug);
1008
+ for (const w of adrKeywords)
1009
+ existingTitleWords.add(w);
896
1010
  }
897
1011
  }
898
1012
  // Always add foundation (first) and testing/deployment (last) if not present
@@ -934,6 +1048,186 @@ The prompts must be production-grade — they will be given directly to a coding
934
1048
  adrIds: [],
935
1049
  });
936
1050
  }
1051
+ // ADR-PIPELINE-031: Guarantee ≥10 prompts by adding standard cross-cutting phases
1052
+ // when domain-specific + ADR-derived phases don't reach the minimum.
1053
+ const crossCutting = [
1054
+ { title: 'Observability & Structured Logging', slug: 'observability', folder: 'backend',
1055
+ content: `ADR-PIPELINE-034: Implement complete observability infrastructure.
1056
+
1057
+ ## 1. Structured Logger (src/logger.ts)
1058
+
1059
+ Create a logger factory that outputs JSON to stderr:
1060
+
1061
+ \`\`\`typescript
1062
+ export interface Logger {
1063
+ info(event: string, data?: Record<string, unknown>): void;
1064
+ warn(event: string, data?: Record<string, unknown>): void;
1065
+ error(event: string, error: Error, data?: Record<string, unknown>): void;
1066
+ }
1067
+
1068
+ export function createLogger(service: string): Logger {
1069
+ // Each log entry: { timestamp, level, service, event, correlationId?, ...data }
1070
+ // Output to stderr as single-line JSON
1071
+ }
1072
+
1073
+ // Correlation ID context — thread through entire request lifecycle
1074
+ export function withCorrelationId<T>(correlationId: string, fn: () => T): T;
1075
+ export function getCorrelationId(): string | undefined;
1076
+ \`\`\`
1077
+
1078
+ ## 2. Correlation ID Threading
1079
+
1080
+ Every analysis run generates a correlationId (UUID). This ID flows through:
1081
+ - analysis.started → analysis.excess_detected → analysis.completed
1082
+ - decision.created → erp.sync.attempted → erp.sync.succeeded/failed
1083
+
1084
+ Pass correlationId as a parameter through service functions or use AsyncLocalStorage.
1085
+
1086
+ ## 3. Instrumentation Points (minimum)
1087
+
1088
+ | Event | When | Data |
1089
+ |-------|------|------|
1090
+ | analysis.started | Analysis begins | { correlationId, supplierCount, recordCount } |
1091
+ | analysis.completed | Analysis finishes | { correlationId, durationMs, flagCount, opportunityCount } |
1092
+ | decision.created | Decision recorded | { correlationId, decisionId, opportunityId, status } |
1093
+ | erp.sync.attempted | ERP write starts | { correlationId, transactionId, supplierId, action } |
1094
+ | erp.sync.succeeded | ERP write succeeds | { correlationId, transactionId, responseTime } |
1095
+ | erp.sync.failed | ERP write fails | { correlationId, transactionId, error, retryCount } |
1096
+
1097
+ ## 4. ERP Error Handling
1098
+
1099
+ The ERP connector must handle:
1100
+ - Network timeout: configurable (default 30s), log erp.sync.failed with timeout error
1101
+ - Rate limiting (429): exponential backoff (1s, 2s, 4s, max 3 retries), log each retry
1102
+ - Schema validation: log and quarantine invalid records, don't crash the pipeline
1103
+ - Idempotency: generate idempotency key per transaction to prevent duplicate writes on retry
1104
+
1105
+ ## 5. Health Check
1106
+
1107
+ If the project includes an API layer, add GET /health returning:
1108
+ \`\`\`json
1109
+ { "status": "ok", "service": "packaging-waste-optimizer", "version": "0.1.0", "uptime": 12345, "lastAnalysisRun": "2026-04-09T..." }
1110
+ \`\`\`
1111
+
1112
+ ## 6. Tests
1113
+
1114
+ - Test that logger outputs valid JSON to stderr
1115
+ - Test that correlationId flows through analysis → decision → ERP sync
1116
+ - Test that ERP error handling retries on 429 and logs failures` },
1117
+ { title: 'Configuration & Environment Management', slug: 'configuration', folder: 'src',
1118
+ content: `ADR-PIPELINE-034: Configuration module for environment-based settings.
1119
+
1120
+ Create src/config.ts with a typed configuration object:
1121
+
1122
+ \`\`\`typescript
1123
+ export interface AppConfig {
1124
+ env: 'development' | 'staging' | 'production';
1125
+ logLevel: 'debug' | 'info' | 'warn' | 'error';
1126
+ erp: {
1127
+ baseUrl: string;
1128
+ apiVersion: string;
1129
+ clientId: string;
1130
+ timeoutMs: number; // default 30000
1131
+ maxRetries: number; // default 3
1132
+ };
1133
+ analysis: {
1134
+ // Domain-specific thresholds — externalized, not hardcoded
1135
+ [key: string]: number | string | boolean;
1136
+ };
1137
+ }
1138
+ \`\`\`
1139
+
1140
+ Requirements:
1141
+ - Read all values from environment variables (never hardcode secrets)
1142
+ - Validate at startup — fail fast with clear error messages for missing required vars
1143
+ - Provide sensible defaults for optional values (timeoutMs, maxRetries, logLevel)
1144
+ - Export a singleton config object used by all services
1145
+ - Include tests that verify validation catches missing required vars` },
1146
+ { title: 'API Layer & Request Handling', slug: 'api-layer', folder: 'backend',
1147
+ content: 'Build REST API endpoints exposing core domain operations. Input validation at API boundary (Zod or equivalent). Authentication and authorization middleware. Structured error responses with correlation IDs. Rate limiting.' },
1148
+ { title: 'Persistence & Data Access Layer', slug: 'persistence', folder: 'backend',
1149
+ content: 'Implement repository interfaces per aggregate (ports pattern). Database adapter implementations. Audit trail persistence (append-only, tamper-evident). Transaction management for multi-aggregate operations.' },
1150
+ { title: 'Demo Script & CLI Entry Point', slug: 'demo', folder: 'src',
1151
+ content: `ADR-PIPELINE-037: User-facing entry points for the prototype.
1152
+
1153
+ ## 1. Demo Script (src/demo.ts) — REQUIRED
1154
+
1155
+ Create a runnable demo (\`npx tsx src/demo.ts\`) that:
1156
+ 1. Loads all seed data
1157
+ 2. Runs the complete analysis pipeline end-to-end
1158
+ 3. Prints formatted results to stdout using tables (not raw JSON):
1159
+ - Summary table: key metrics, top findings
1160
+ - Per-entity breakdown (e.g., per-supplier, per-zone, per-employee)
1161
+ - Scenario comparison table with tradeoff columns
1162
+ 4. Demonstrates the decision/approval workflow:
1163
+ - Create a draft decision for the top opportunity
1164
+ - Approve it with a test approver
1165
+ 5. Shows the ERP sync payload:
1166
+ - Print the structured payload that would be sent to the ERP
1167
+ - Mark as "dry run" — do not actually sync
1168
+
1169
+ The demo must run without any external dependencies (no database, no API keys, no network).
1170
+
1171
+ ## 2. CLI Interface (src/cli.ts) — OPTIONAL
1172
+
1173
+ If time permits, create a minimal CLI:
1174
+ \`\`\`
1175
+ npx tsx src/cli.ts --analyze # Run analysis, print results
1176
+ npx tsx src/cli.ts --scenarios # Show scenario comparison
1177
+ npx tsx src/cli.ts --decide <id> # Create decision for opportunity
1178
+ npx tsx src/cli.ts --sync <id> # Dry-run ERP sync for decision
1179
+ npx tsx src/cli.ts --help # Usage examples
1180
+ \`\`\`
1181
+
1182
+ ## 3. Architecture Divergence ADR — REQUIRED
1183
+
1184
+ Create \`docs/ADR-001-monolithic-prototype.md\` in the generated project:
1185
+
1186
+ \`\`\`markdown
1187
+ # ADR-001: Monolithic Library for Prototype Phase
1188
+
1189
+ ## Status
1190
+ Accepted
1191
+
1192
+ ## Context
1193
+ The SPARC architecture specification describes multiple microservices with event-driven
1194
+ communication. For the prototype evaluation period (3-7 days), deploying and operating
1195
+ microservices adds unnecessary complexity without delivering proportional value.
1196
+
1197
+ ## Decision
1198
+ Build as a single TypeScript library with clean module boundaries:
1199
+ - analysis/ — Pure computation, no side effects
1200
+ - decisions/ — State management with audit trail
1201
+ - erp/ — External system integration (adapter pattern)
1202
+ - data/ — Seed data and data access
1203
+
1204
+ Each module exports a barrel (index.ts) and communicates via typed interfaces,
1205
+ not shared mutable state. This structure enables decomposition into independent
1206
+ services when the pilot validates the approach.
1207
+
1208
+ ## Consequences
1209
+ - Prototype can be built, tested, and demonstrated in days, not weeks
1210
+ - Module boundaries enforce separation of concerns without deployment overhead
1211
+ - Migration to microservices requires extracting modules into packages — not rewriting
1212
+ \`\`\`
1213
+
1214
+ ## Tests
1215
+ - Test that demo.ts runs without errors (import and execute main function)
1216
+ - Test that it produces non-empty stdout output` },
1217
+ ];
1218
+ const existingSlugSet = new Set(derivedPhases.map(p => p.slug));
1219
+ for (const cc of crossCutting) {
1220
+ if (derivedPhases.length >= 12)
1221
+ break;
1222
+ if (existingSlugSet.has(cc.slug))
1223
+ continue;
1224
+ // Don't add if a phase already covers this topic
1225
+ if (derivedPhases.some(p => p.title.toLowerCase().includes(cc.slug.split('-')[0])))
1226
+ continue;
1227
+ derivedPhases.push({ ...cc, adrIds: [] });
1228
+ existingSlugSet.add(cc.slug);
1229
+ }
1230
+ console.error(` [PROMPTS] ${derivedPhases.length} phases derived (SPARC + ADRs + cross-cutting, minimum 10 per ADR-PIPELINE-031)`);
937
1231
  // ── Helper: extract DDD context names and summaries relevant to a phase ──
938
1232
  function getDddSummaryForPhase(phaseTitle, phaseContent) {
939
1233
  if (!dddContent)
@@ -1024,6 +1318,14 @@ The prompts must be production-grade — they will be given directly to a coding
1024
1318
  `**Target folder:** \`${phase.folder}/\``,
1025
1319
  '',
1026
1320
  ];
1321
+ // ADR-PIPELINE-033: Simulation lineage in every prompt
1322
+ if (simulationId || traceId) {
1323
+ lines.push('## Simulation Lineage', '');
1324
+ if (simulationId)
1325
+ lines.push(`Originating simulation: \`${simulationId}\``);
1326
+ lines.push(`Trace ID: \`${traceId}\``);
1327
+ lines.push('');
1328
+ }
1027
1329
  // ── Narrative description — the core of each prompt ──
1028
1330
  lines.push('## Overview', '', narrative, '');
1029
1331
  // Previously completed phases (brief list)
@@ -1075,8 +1377,8 @@ The prompts must be production-grade — they will be given directly to a coding
1075
1377
  if (dddSummary) {
1076
1378
  lines.push('## Domain Model Reference', '', dddSummary, '');
1077
1379
  }
1078
- // Implementation instructions
1079
- lines.push('## Implementation Requirements', '', '- All code must be CUSTOM to this project — implement the specific business logic described above', '- Follow the technology decisions from the ADRs referenced in the overview', '- Write production-quality code with proper error handling', '- Include unit tests (London School TDD — mock at module boundaries)', '- Export public interfaces so later build phases can import them', `- Place all files in the \`${phase.folder}/\` directory`, '');
1380
+ // Implementation instructions — ADR-PIPELINE-033: include traceability requirements
1381
+ lines.push('## Implementation Requirements', '', '- All code must be CUSTOM to this project — implement the specific business logic described above', '- Follow the technology decisions from the ADRs referenced in the overview', '- Write production-quality code with proper error handling', '- Include unit tests (London School TDD — mock at module boundaries)', '- Export public interfaces so later build phases can import them', `- Place all files in the \`${phase.folder}/\` directory`, '', '## Traceability Requirements (ADR-PIPELINE-033)', '', '- All domain types that represent decisions, recommendations, or ERP actions must include optional `simulationId?: string` and `traceId?: string` fields', '- Audit trail entries must include these IDs when available', '- ERP transaction payloads must carry `simulationId` and `traceId` for end-to-end lineage', `${simulationId ? `- Use simulation ID \`${simulationId}\` as the default value when creating records in this build step` : '- Simulation ID will be injected at runtime from the pipeline context'}`, '');
1080
1382
  fs.writeFileSync(path.join(promptsDir, filename), lines.join('\n'), { mode: 0o600, encoding: 'utf-8' });
1081
1383
  completedPhases.push(`${order}. ${phase.title}`);
1082
1384
  }
@@ -1087,7 +1389,17 @@ The prompts must be production-grade — they will be given directly to a coding
1087
1389
  folder: phase.folder,
1088
1390
  adrs: phase.adrIds,
1089
1391
  }));
1090
- fs.writeFileSync(path.join(promptsDir, 'execution-plan.json'), JSON.stringify({ totalSteps, prompts: planItems }, null, 2), { mode: 0o600, encoding: 'utf-8' });
1392
+ // ADR-PIPELINE-033: Include simulation lineage in execution plan
1393
+ const plan = {
1394
+ totalSteps,
1395
+ prompts: planItems,
1396
+ lineage: {
1397
+ simulationId: simulationId || undefined,
1398
+ traceId: traceId || undefined,
1399
+ pipelineVersion: '1.7.3',
1400
+ },
1401
+ };
1402
+ fs.writeFileSync(path.join(promptsDir, 'execution-plan.json'), JSON.stringify(plan, null, 2), { mode: 0o600, encoding: 'utf-8' });
1091
1403
  console.error(` [PROMPTS] Wrote ${totalSteps} implementation prompts derived from SPARC architecture + ADRs`);
1092
1404
  }
1093
1405
  copyPlanningArtifacts(runDir, projectRoot);
@@ -1217,6 +1529,7 @@ The prompts must be production-grade — they will be given directly to a coding
1217
1529
  // Check if phase5-manifest.json exists on disk — if so, allow Phase 6 to attempt degraded mode
1218
1530
  const phase5ManifestPath = path.join(phaseDir, 'phase5-manifest.json');
1219
1531
  if (!fs.existsSync(phase5ManifestPath)) {
1532
+ await ensureAdrsExist(runDir, traceId, scenarioQuery);
1220
1533
  return buildResult(traceId, runDir, phases, pipelineStart, mode);
1221
1534
  }
1222
1535
  console.error(' [INFO] Phase 5 manifest written — Phase 6 will attempt degraded mode.');
@@ -1273,6 +1586,7 @@ The prompts must be production-grade — they will be given directly to a coding
1273
1586
  console.error(` [FAIL] Phase 6 failed: ${errMsg}`);
1274
1587
  recordPhaseFailure(PHASE_AGENTS[6], traceId, errMsg);
1275
1588
  phases.push({ phase: 6, label: 'ERP Surface Push', status: 'failed', timing: Date.now() - phaseStart, artifacts: [], outputDir: phaseDir, error: errMsg });
1589
+ await ensureAdrsExist(runDir, traceId, scenarioQuery);
1276
1590
  return buildResult(traceId, runDir, phases, pipelineStart, mode);
1277
1591
  }
1278
1592
  }
@@ -1305,6 +1619,8 @@ The prompts must be production-grade — they will be given directly to a coding
1305
1619
  console.error(` [WARN] Materialization failed: ${errMsg}`);
1306
1620
  }
1307
1621
  }
1622
+ // ADR-028: Final ADR guarantee before returning
1623
+ await ensureAdrsExist(runDir, traceId, scenarioQuery);
1308
1624
  // ── Shutdown swarm and persist metrics ──
1309
1625
  const totalTiming = Date.now() - pipelineStart;
1310
1626
  const pipelineSuccess = phases.every(p => p.status === 'completed' || p.status === 'skipped');