@atlashub/smartstack-cli 3.0.0 → 3.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/.documentation/agents.html +1 -371
  2. package/.documentation/cli-commands.html +1 -1
  3. package/.documentation/commands.html +1 -1
  4. package/.documentation/efcore.html +1 -1
  5. package/.documentation/gitflow.html +1 -1
  6. package/.documentation/hooks.html +27 -66
  7. package/.documentation/index.html +166 -166
  8. package/.documentation/init.html +6 -7
  9. package/.documentation/installation.html +1 -1
  10. package/.documentation/prd-json-v2.0.0.md +396 -0
  11. package/.documentation/ralph-loop.html +1 -9
  12. package/.documentation/test-web.html +15 -39
  13. package/.documentation/testing-ba-e2e.md +462 -0
  14. package/dist/index.js +23 -16
  15. package/dist/index.js.map +1 -1
  16. package/package.json +6 -2
  17. package/templates/agents/gitflow/merge.md +56 -6
  18. package/templates/agents/gitflow/pr.md +70 -9
  19. package/templates/project/appsettings.json.template +8 -2
  20. package/templates/skills/business-analyse/SKILL.md +34 -17
  21. package/templates/skills/business-analyse/html/ba-interactive.html +147 -84
  22. package/templates/skills/business-analyse/questionnaire.md +20 -15
  23. package/templates/skills/business-analyse/steps/step-00-init.md +80 -57
  24. package/templates/skills/business-analyse/steps/step-03-specify.md +57 -0
  25. package/templates/skills/business-analyse/steps/step-05-handoff.md +480 -14
  26. package/templates/skills/business-analyse/steps/step-06-extract.md +131 -3
  27. package/templates/skills/gitflow/steps/step-pr.md +17 -5
  28. package/templates/skills/ralph-loop/SKILL.md +158 -33
  29. package/templates/skills/ralph-loop/steps/step-01-task.md +160 -18
  30. package/templates/skills/ralph-loop/steps/step-02-execute.md +408 -23
  31. package/templates/skills/ralph-loop/steps/step-03-commit.md +82 -0
  32. package/templates/skills/ralph-loop/steps/step-04-check.md +305 -9
  33. package/templates/skills/ralph-loop/steps/step-05-report.md +115 -0
@@ -34,6 +34,97 @@ const hasBlocked = tasksBlocked > 0;
34
34
  const hasPending = tasksPending > 0;
35
35
  ```
36
36
 
37
+ ### 1.5. REGRESSION CHECK (MANDATORY AFTER EACH ITERATION)
38
+
39
+ **CRITICAL: After EVERY task completion, run full test suite to detect regressions.**
40
+
41
+ ```bash
42
+ PROJECT_NAME=$(basename $(pwd))
43
+ TEST_PROJECT="tests/${PROJECT_NAME}.Tests.Unit"
44
+
45
+ if [ -d "$TEST_PROJECT" ]; then
46
+ echo "🔍 Running regression check (full test suite)..."
47
+
48
+ # Run ALL tests to ensure nothing broke
49
+ dotnet test "$TEST_PROJECT" --no-build --verbosity minimal --logger "console;verbosity=minimal"
50
+
51
+ REGRESSION_EXIT_CODE=$?
52
+
53
+ if [ $REGRESSION_EXIT_CODE -ne 0 ]; then
54
+ echo "╔════════════════════════════════════════════════════════════╗"
55
+ echo "║ ⚠️ REGRESSION DETECTED ║"
56
+ echo "╠════════════════════════════════════════════════════════════╣"
57
+ echo "║ A previously passing test is now failing. ║"
58
+ echo "║ This indicates the last change broke existing code. ║"
59
+ echo "╠════════════════════════════════════════════════════════════╣"
60
+ echo "║ CORRECTIVE ACTION: ║"
61
+ echo "║ 1. Identify which test(s) started failing ║"
62
+ echo "║ 2. Analyze what changed in last commit ║"
63
+ echo "║ 3. Fix the regression ║"
64
+ echo "║ 4. Commit the fix ║"
65
+ echo "║ 5. Ralph will continue automatically ║"
66
+ echo "╚════════════════════════════════════════════════════════════╝"
67
+
68
+ // Parse test output to identify which tests failed
69
+ const regressionTests = parseFailedTests(testOutput);
70
+
71
+ // Log regression details to progress.txt
72
+ const progressEntry = `
73
+ [REGRESSION DETECTED - Iteration ${prd.config.current_iteration}]
74
+ Failed tests: ${regressionTests.join(', ')}
75
+ Last completed task: ${prd.tasks.find(t => t.id === lastCompletedTaskId).description}
76
+ Commit: ${lastCommitHash}
77
+
78
+ ACTION REQUIRED: Fix regression before continuing.
79
+ `;
80
+ appendToFile('.ralph/progress.txt', progressEntry);
81
+
82
+ // Mark current state as having regression
83
+ prd.regression_detected = {
84
+ iteration: prd.config.current_iteration,
85
+ failed_tests: regressionTests,
86
+ last_task: lastCompletedTaskId,
87
+ commit_hash: lastCommitHash
88
+ };
89
+ writeJSON('.ralph/prd.json', prd);
90
+
91
+ // Create a new task to fix the regression
92
+ const fixTask = {
93
+ id: prd.tasks.length + 1,
94
+ description: `Fix regression: ${regressionTests.length} test(s) failing`,
95
+ status: 'pending',
96
+ category: 'validation',
97
+ dependencies: [],
98
+ acceptance_criteria: `All tests pass: ${regressionTests.join(', ')}`,
99
+ started_at: null,
100
+ completed_at: null,
101
+ iteration: null,
102
+ commit_hash: null,
103
+ files_changed: { created: [], modified: [] },
104
+ validation: null,
105
+ error: null
106
+ };
107
+ prd.tasks.push(fixTask);
108
+ prd.updated_at = new Date().toISOString();
109
+ writeJSON('.ralph/prd.json', prd);
110
+
111
+ // DO NOT STOP - continue to next task (which is the fix task)
112
+ echo "📋 Created task ${fixTask.id}: Fix regression";
113
+ } else {
114
+ echo "✅ Regression check passed. No tests broken.";
115
+
116
+ // Update test metrics in progress.txt
117
+ const testStats = parseTestStats(testOutput);
118
+ const metricsEntry = `
119
+ [Test Metrics - Iteration ${prd.config.current_iteration}]
120
+ Total: ${testStats.total} | Passed: ${testStats.passed} | Failed: ${testStats.failed} | Skipped: ${testStats.skipped}
121
+ Duration: ${testStats.duration}
122
+ `;
123
+ appendToFile('.ralph/progress.txt', metricsEntry);
124
+ }
125
+ fi
126
+ ```
127
+
37
128
  ### 2. Check Iteration Limit
38
129
 
39
130
  **If `prd.config.current_iteration` > `prd.config.max_iterations`:**
@@ -90,13 +181,25 @@ if (hasQueue) {
90
181
  queue.modules[nextIndex].status = 'in-progress';
91
182
  writeJSON(queuePath, queue);
92
183
 
184
+ // ✅ FIX #1: Create module transition marker
185
+ // This signals to step-01 that it MUST reload even on iteration > 1
186
+ writeJSON('.ralph/module-changed.json', {
187
+ fromModule: currentModule.code,
188
+ toModule: queue.modules[nextIndex].code,
189
+ timestamp: new Date().toISOString()
190
+ });
191
+
93
192
  // Mark current prd.json as completed
94
193
  prd.status = 'completed';
95
194
  prd.updated_at = new Date().toISOString();
96
195
  writeJSON('.ralph/prd.json', prd);
97
196
 
98
- // Reset iteration counter for next module
99
- // (preserve max_iterations from config)
197
+ // FIX #2: Load next module's PRD NOW and reset iteration counter
198
+ const nextModulePrd = readJSON(queue.modules[nextIndex].prdFile);
199
+ nextModulePrd.config.current_iteration = 1;
200
+ nextModulePrd.config.max_iterations = prd.config.max_iterations; // Preserve
201
+ nextModulePrd.updated_at = new Date().toISOString();
202
+ writeJSON('.ralph/prd.json', nextModulePrd);
100
203
 
101
204
  console.log(`
102
205
  ╔══════════════════════════════════════════════════════════════════╗
@@ -106,10 +209,13 @@ if (hasQueue) {
106
209
  ║ Modules: ${queue.completedModules} / ${queue.totalModules} ║
107
210
  ╠══════════════════════════════════════════════════════════════════╣
108
211
  ║ ADVANCING TO NEXT MODULE: ${queue.modules[nextIndex].code} ║
212
+ ║ Next module PRD loaded: ${queue.modules[nextIndex].prdFile} ║
213
+ ║ Iteration counter reset: 1 ║
109
214
  ╚══════════════════════════════════════════════════════════════════╝
110
215
  `);
111
216
 
112
- // Loop back to step-01 which will load next module's prd
217
+ // Loop back to step-01 which will detect module-changed.json
218
+ // and skip the "Only Read Once" rule for this transition
113
219
  // -> Proceed to step-01-task.md
114
220
  return;
115
221
  }
@@ -191,20 +297,200 @@ writeJSON('.ralph/prd.json', prd);
191
297
 
192
298
  **Proceed to step-05-report.md**
193
299
 
194
- ### 5. More Tasks Remaining
300
+ ### 5. More Tasks Remaining — COMPACT LOOP (CRITICAL)
301
+
302
+ > **MANDATORY RULE: DO NOT STOP. DO NOT WAIT FOR USER INPUT. DO NOT RE-READ STEP FILES.**
303
+ > After the first full iteration (step-01 → step-02 → step-03 → step-04), all subsequent iterations
304
+ > MUST use this COMPACT LOOP inline. Re-reading step files wastes context and causes the loop to stall.
305
+ > **STOPPING THE LOOP = BUG.** Only completion, max iterations, or dead-end stop the loop.
195
306
 
196
307
  **If hasPending AND iteration < max_iterations:**
197
308
 
309
+ Display compact progress (ONE line):
310
+ ```
311
+ [{current_iteration}/{max_iterations}] {tasksCompleted}/{tasksTotal} done | Next: finding eligible task...
198
312
  ```
199
- Progress: {tasksCompleted} / {tasksTotal} tasks
200
- Failed: {tasksFailed} | Blocked: {tasksBlocked} | Pending: {tasksPending}
201
313
 
202
- Next eligible task will be determined by dependency resolution.
314
+ **IMMEDIATELY execute the following inline loop DO NOT re-read step files:**
315
+
316
+ #### 5a. Find Next Eligible Task (inline step-01)
317
+
318
+ ```javascript
319
+ const prd = readJSON('.ralph/prd.json');
320
+
321
+ // Block tasks whose dependencies failed
322
+ for (const task of prd.tasks) {
323
+ if (task.status !== 'pending') continue;
324
+ const depsBlocked = task.dependencies.some(depId => {
325
+ const dep = prd.tasks.find(t => t.id === depId);
326
+ return dep && (dep.status === 'failed' || dep.status === 'blocked');
327
+ });
328
+ if (depsBlocked) { task.status = 'blocked'; task.error = 'Blocked by failed dependency'; }
329
+ }
203
330
 
204
- Continuing to next iteration...
331
+ // Find ALL eligible tasks (dependencies met)
332
+ const eligible = prd.tasks.filter(task => {
333
+ if (task.status !== 'pending') return false;
334
+ return task.dependencies.every(depId => {
335
+ const dep = prd.tasks.find(t => t.id === depId);
336
+ return dep && dep.status === 'completed';
337
+ });
338
+ });
339
+
340
+ if (eligible.length === 0) {
341
+ // Dead-end or all done — re-run sections 2-4 above
342
+ goto CHECK_COMPLETION;
343
+ }
344
+
345
+ // BATCH MODE: group eligible tasks by category, take the first group
346
+ const firstCategory = eligible[0].category;
347
+ const batch = eligible.filter(t => t.category === firstCategory);
348
+ // Cap batch at 5 tasks max to keep atomic
349
+ const tasksToExecute = batch.slice(0, 5);
350
+ ```
351
+
352
+ Display:
353
+ ```
354
+ Batch: {tasksToExecute.length} task(s) [{firstCategory}]
355
+ {tasksToExecute.map(t => `[${t.id}] ${t.description}`).join('\n ')}
356
+ ```
357
+
358
+ #### 5b. Execute Batch (inline step-02)
359
+
360
+ **For EACH task in tasksToExecute:**
361
+
362
+ 1. Mark `task.status = 'in_progress'`, `task.started_at = now`
363
+ 2. ULTRA THINK: implement the task following SmartStack conventions
364
+ - Track files_created and files_modified per task
365
+ 3. **MANDATORY TEST-DRIVEN CYCLE (per task):**
366
+ ```
367
+ ┌─────────────────────────────────────────────────────────┐
368
+ │ TASK EXECUTION CYCLE (100% tests pass required) │
369
+ ├─────────────────────────────────────────────────────────┤
370
+ │ │
371
+ │ 1. GENERATE CODE │
372
+ │ ├─ Backend: Entity, Service, Repository, Controller│
373
+ │ ├─ Frontend: Page, Component, Hook │
374
+ │ ├─ Tests: Unit tests + Integration tests │
375
+ │ └─ Docs: Inline comments + tooltips + i18n │
376
+ │ │
377
+ │ 2. COMPILE CODE │
378
+ │ ├─ Backend: dotnet build --no-restore │
379
+ │ └─ Frontend: npm run typecheck │
380
+ │ │
381
+ │ 3. RUN TESTS (MANDATORY) │
382
+ │ ├─ Backend: dotnet test {TestProject} │
383
+ │ ├─ Frontend: npm test (if applicable) │
384
+ │ └─ Security: tenant isolation + RBAC checks │
385
+ │ │
386
+ │ 4. CHECK RESULT │
387
+ │ ├─ IF 100% PASS → Mark task completed, next task │
388
+ │ └─ IF ANY FAIL → Go to step 5 │
389
+ │ │
390
+ │ 5. FIX ERRORS (loop until pass) │
391
+ │ ├─ Parse test output → identify failing tests │
392
+ │ ├─ Analyze root cause (stack trace, logs) │
393
+ │ ├─ Fix code (NOT test - fix implementation) │
394
+ │ ├─ Re-compile (step 2) │
395
+ │ └─ Re-run tests (step 3) → loop until 100% pass │
396
+ │ │
397
+ │ 🎯 Goal: ZERO test failures before next task │
398
+ │ 📊 Coverage: 95-100% (tests auto-generated per task) │
399
+ │ │
400
+ └─────────────────────────────────────────────────────────┘
401
+ ```
402
+
403
+ **Implementation per task:**
404
+ a. Generate code + tests
405
+ b. Compile: `dotnet build` or `npm run typecheck`
406
+ c. Run tests: `dotnet test {TestProject}` or `npm test`
407
+ d. Parse test output → count passed/failed/skipped
408
+ e. IF failed > 0:
409
+ - Log failure details to progress.txt
410
+ - Analyze error (stack trace, assertion message)
411
+ - Fix code (modify implementation, NOT test)
412
+ - Re-compile
413
+ - Re-run tests
414
+ - Repeat until passed = total (100%)
415
+ f. IF passed = total → task.status = 'completed'
416
+
417
+ 4. Verify acceptance criteria
418
+ 5. If task execution failed (not tests, but code generation): set `task.status = 'failed'`, `task.error = reason`, continue to next task in batch
419
+
420
+ **CATEGORY-SPECIFIC EXECUTION RULES:**
421
+
422
+ **IF category = "frontend":** Follow MCP-FIRST protocol (MANDATORY):
423
+ 1. Call `mcp__smartstack__scaffold_api_client` → generates API client + types
424
+ 2. Call `mcp__smartstack__scaffold_routes` → updates routes inside Layout wrapper
425
+ 3. Create pages using SmartStack components (SmartTable, EntityCard, SmartForm, SmartFilter)
426
+ 4. CSS variables ONLY (NO `bg-blue-600`, use `bg-[var(--color-accent-600)]`)
427
+ 5. `EntityCard` for grids, `SmartTable` for lists (NO HTML `<table>` or custom `<div>` cards)
428
+ 6. All pages MUST have loading/error/empty states
429
+ 7. API client uses `@/services/api/apiClient` (NOT axios)
430
+ 8. Generate 4-language i18n (fr, en, it, de)
431
+ 9. `npm run typecheck` MUST pass
432
+
433
+ **IF category = "infrastructure":** Seed data in `Infrastructure/Persistence/Seeding/Data/{Module}/`
434
+
435
+ **IF category = "api":** Controllers in `Api/Controllers/{Context}/{App}/{Entity}Controller.cs`
436
+
437
+ After ALL tasks in batch executed:
438
+ - Run `mcp__smartstack__validate_conventions` ONCE for the whole batch
439
+ - Quick build check: `dotnet build --no-restore` (backend) or `npm run typecheck` (frontend)
440
+
441
+ #### 5c. Commit Batch (inline step-03)
442
+
443
+ ```bash
444
+ # Stage all changed files
445
+ git add {all files from batch}
446
+ git add .ralph/prd.json
447
+
448
+ # Single commit for the batch
449
+ git commit -m "$(cat <<'EOF'
450
+ feat({scope}): [{firstCategory}] {tasksToExecute.length} tasks — {short summary}
451
+
452
+ Tasks: {tasksToExecute.map(t => t.id).join(', ')} / {tasks_total}
453
+ Iteration: {current_iteration}
454
+ {current_module ? "Module: " + current_module : ""}
455
+
456
+ Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
457
+ EOF
458
+ )"
459
+
460
+ COMMIT_HASH=$(git rev-parse --short HEAD)
461
+ ```
462
+
463
+ **Finalize each task in prd.json:**
464
+ ```javascript
465
+ for (const task of tasksToExecute) {
466
+ if (task.status !== 'failed') task.status = 'completed';
467
+ task.completed_at = now;
468
+ task.iteration = prd.config.current_iteration;
469
+ task.commit_hash = COMMIT_HASH;
470
+ }
471
+ prd.history.push({
472
+ iteration: prd.config.current_iteration,
473
+ task_ids: tasksToExecute.map(t => t.id),
474
+ action: 'batch-completed',
475
+ timestamp: now,
476
+ commit_hash: COMMIT_HASH,
477
+ notes: "{What was accomplished}"
478
+ });
479
+ prd.config.current_iteration++;
480
+ prd.updated_at = now;
481
+ writeJSON('.ralph/prd.json', prd);
205
482
  ```
206
483
 
207
- **Loop back to step-01-task.md**
484
+ ```bash
485
+ git add .ralph/prd.json .ralph/progress.txt
486
+ [ -f .ralph/modules-queue.json ] && git add .ralph/modules-queue.json
487
+ git commit -m "chore(ralph): progress — iteration {current_iteration}"
488
+ ```
489
+
490
+ #### 5d. Re-check Completion (loop back to section 1)
491
+
492
+ **IMMEDIATELY go back to section 1 (Read Current State) of THIS step.**
493
+ **DO NOT stop. DO NOT wait for user. DO NOT re-read step files.**
208
494
 
209
495
  ---
210
496
 
@@ -300,6 +586,16 @@ Completion Check:
300
586
 
301
587
  **Always update `prd.status` and `prd.updated_at` before proceeding.**
302
588
 
589
+ **LOOP CONTINUATION IS MANDATORY:**
590
+ - After the first full iteration (step-01→02→03→04), ALL subsequent iterations use the COMPACT LOOP in section 5.
591
+ - DO NOT re-read step-01, step-02, step-03 files. You already know the instructions.
592
+ - DO NOT stop and wait for user input between iterations.
593
+ - DO NOT output a summary and pause. The loop is AUTONOMOUS.
594
+ - The ONLY reasons to stop: completion, max iterations, dead-end, or user interruption.
595
+ - Stopping for any other reason is a **BUG** that wastes user time and context.
596
+ - **BATCH tasks of the same category** to reduce iterations (max 5 per batch).
597
+ - Prefer compact output (1-2 lines per task) over verbose output during the loop.
598
+
303
599
  ---
304
600
 
305
601
  ## NEXT STEP:
@@ -123,6 +123,86 @@ if (hasQueue) {
123
123
  }
124
124
  ```
125
125
 
126
+ ### 1c. Extract Test Metrics (from progress.txt and test execution)
127
+
128
+ **CRITICAL: Test metrics MUST be included in the final report.**
129
+
130
+ ```bash
131
+ PROJECT_NAME=$(basename $(pwd))
132
+ TEST_PROJECT="tests/${PROJECT_NAME}.Tests.Unit"
133
+
134
+ testMetrics={
135
+ projectExists: false,
136
+ testsExecuted: false,
137
+ lastRunStatus: "unknown",
138
+ totalTests: 0,
139
+ passed: 0,
140
+ failed: 0,
141
+ skipped: 0,
142
+ coverage: 0,
143
+ duration: 0
144
+ };
145
+
146
+ if [ -d "$TEST_PROJECT" ]; then
147
+ testMetrics.projectExists = true;
148
+
149
+ # Extract latest test metrics from progress.txt
150
+ if [ -f ".ralph/progress.txt" ]; then
151
+ # Parse last "Test Metrics" entry
152
+ LAST_METRICS=$(grep -A 3 "\[Test Metrics" .ralph/progress.txt | tail -4)
153
+
154
+ # Extract values using regex
155
+ TOTAL=$(echo "$LAST_METRICS" | grep -oP "Total: \K\d+")
156
+ PASSED=$(echo "$LAST_METRICS" | grep -oP "Passed: \K\d+")
157
+ FAILED=$(echo "$LAST_METRICS" | grep -oP "Failed: \K\d+")
158
+ SKIPPED=$(echo "$LAST_METRICS" | grep -oP "Skipped: \K\d+")
159
+ DURATION=$(echo "$LAST_METRICS" | grep -oP "Duration: \K[\d\.]+")
160
+
161
+ if [ -n "$TOTAL" ]; then
162
+ testMetrics.testsExecuted = true;
163
+ testMetrics.totalTests = $TOTAL;
164
+ testMetrics.passed = $PASSED;
165
+ testMetrics.failed = $FAILED;
166
+ testMetrics.skipped = $SKIPPED;
167
+ testMetrics.duration = $DURATION;
168
+ testMetrics.lastRunStatus = [ $FAILED -eq 0 ] ? "passed" : "failed";
169
+ fi
170
+ fi
171
+
172
+ # If no metrics in progress.txt, run tests now to get final stats
173
+ if [ "$testMetrics.testsExecuted" = false ]; then
174
+ echo "Running final test suite to collect metrics...";
175
+ TEST_OUTPUT=$(dotnet test "$TEST_PROJECT" --no-build --verbosity minimal 2>&1);
176
+ TEST_EXIT_CODE=$?;
177
+
178
+ testMetrics.testsExecuted = true;
179
+ testMetrics.lastRunStatus = [ $TEST_EXIT_CODE -eq 0 ] ? "passed" : "failed";
180
+
181
+ # Parse test output
182
+ testMetrics.totalTests = parseTestCount(TEST_OUTPUT);
183
+ testMetrics.passed = parsePassedCount(TEST_OUTPUT);
184
+ testMetrics.failed = parseFailedCount(TEST_OUTPUT);
185
+ testMetrics.skipped = parseSkippedCount(TEST_OUTPUT);
186
+ testMetrics.duration = parseDuration(TEST_OUTPUT);
187
+ fi
188
+
189
+ # Get coverage using MCP
190
+ const coverageResult = mcp__smartstack__analyze_test_coverage({
191
+ project_path: process.cwd()
192
+ });
193
+
194
+ if (coverageResult && coverageResult.percentage) {
195
+ testMetrics.coverage = coverageResult.percentage;
196
+ }
197
+ fi
198
+ ```
199
+
200
+ **Store metrics for report generation:**
201
+
202
+ ```javascript
203
+ stats.tests = testMetrics;
204
+ ```
205
+
126
206
  ### 2. Collect MCP Usage (from logs if available)
127
207
 
128
208
  **Parse from verbose logs:**
@@ -187,6 +267,41 @@ const validationStats = {
187
267
  **Modules: {moduleStats.completedModules}/{moduleStats.totalModules} completed**
188
268
  {end if}
189
269
 
270
+ ## Test Metrics
271
+
272
+ {if testMetrics.projectExists:}
273
+ | Metric | Value |
274
+ |--------|-------|
275
+ | **Test Project** | ✅ `{TEST_PROJECT}` |
276
+ | **Tests Executed** | {testMetrics.testsExecuted ? '✅ Yes' : '❌ No'} |
277
+ | **Last Run Status** | {testMetrics.lastRunStatus === 'passed' ? '✅ PASSED' : testMetrics.lastRunStatus === 'failed' ? '❌ FAILED' : '⚠️ UNKNOWN'} |
278
+ | **Total Tests** | {testMetrics.totalTests} |
279
+ | **Passed** | ✅ {testMetrics.passed} |
280
+ | **Failed** | ❌ {testMetrics.failed} |
281
+ | **Skipped** | ⏭️ {testMetrics.skipped} |
282
+ | **Coverage** | {testMetrics.coverage}% {testMetrics.coverage >= 80 ? '✅' : testMetrics.coverage >= 60 ? '⚠️' : '❌'} |
283
+ | **Duration** | {testMetrics.duration}s |
284
+
285
+ {if testMetrics.failed > 0:}
286
+ ⚠️ **WARNING:** Some tests are failing. The module is NOT production-ready.
287
+ {end if}
288
+
289
+ {if testMetrics.coverage < 80:}
290
+ ⚠️ **WARNING:** Test coverage is below 80% minimum. Consider adding more tests.
291
+ {end if}
292
+
293
+ {else:}
294
+ ❌ **No test project found.** Tests were not created for this module.
295
+
296
+ **RECOMMENDATION:** Create a test project and add comprehensive tests before deploying to production.
297
+
298
+ Suggested command:
299
+ ```bash
300
+ dotnet new xunit -n {PROJECT_NAME}.Tests.Unit -o tests/{PROJECT_NAME}.Tests.Unit
301
+ dotnet sln add tests/{PROJECT_NAME}.Tests.Unit
302
+ ```
303
+ {end if}
304
+
190
305
  ## Failed Tasks
191
306
 
192
307
  {if any failed tasks:}