agentic-qe 1.5.1 → 1.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (158) hide show
  1. package/.claude/agents/.claude-flow/metrics/agent-metrics.json +1 -0
  2. package/.claude/agents/.claude-flow/metrics/performance.json +87 -0
  3. package/.claude/agents/.claude-flow/metrics/task-metrics.json +10 -0
  4. package/.claude/agents/qe-api-contract-validator.md +118 -0
  5. package/.claude/agents/qe-chaos-engineer.md +320 -5
  6. package/.claude/agents/qe-code-complexity.md +360 -0
  7. package/.claude/agents/qe-coverage-analyzer.md +112 -0
  8. package/.claude/agents/qe-deployment-readiness.md +322 -6
  9. package/.claude/agents/qe-flaky-test-hunter.md +115 -0
  10. package/.claude/agents/qe-fleet-commander.md +319 -6
  11. package/.claude/agents/qe-performance-tester.md +234 -0
  12. package/.claude/agents/qe-production-intelligence.md +114 -0
  13. package/.claude/agents/qe-quality-analyzer.md +126 -0
  14. package/.claude/agents/qe-quality-gate.md +119 -0
  15. package/.claude/agents/qe-regression-risk-analyzer.md +114 -0
  16. package/.claude/agents/qe-requirements-validator.md +114 -0
  17. package/.claude/agents/qe-security-scanner.md +118 -0
  18. package/.claude/agents/qe-test-data-architect.md +234 -0
  19. package/.claude/agents/qe-test-executor.md +115 -0
  20. package/.claude/agents/qe-test-generator.md +114 -0
  21. package/.claude/agents/qe-visual-tester.md +305 -6
  22. package/.claude/agents/subagents/qe-code-reviewer.md +0 -4
  23. package/.claude/agents/subagents/qe-data-generator.md +0 -16
  24. package/.claude/agents/subagents/qe-integration-tester.md +0 -17
  25. package/.claude/agents/subagents/qe-performance-validator.md +0 -16
  26. package/.claude/agents/subagents/qe-security-auditor.md +0 -16
  27. package/.claude/agents/subagents/qe-test-implementer.md +0 -17
  28. package/.claude/agents/subagents/qe-test-refactorer.md +0 -17
  29. package/.claude/agents/subagents/qe-test-writer.md +0 -19
  30. package/CHANGELOG.md +261 -0
  31. package/README.md +37 -5
  32. package/dist/adapters/MemoryStoreAdapter.d.ts +38 -0
  33. package/dist/adapters/MemoryStoreAdapter.d.ts.map +1 -1
  34. package/dist/adapters/MemoryStoreAdapter.js +22 -0
  35. package/dist/adapters/MemoryStoreAdapter.js.map +1 -1
  36. package/dist/agents/BaseAgent.d.ts.map +1 -1
  37. package/dist/agents/BaseAgent.js +13 -0
  38. package/dist/agents/BaseAgent.js.map +1 -1
  39. package/dist/cli/commands/init.d.ts.map +1 -1
  40. package/dist/cli/commands/init.js +32 -1
  41. package/dist/cli/commands/init.js.map +1 -1
  42. package/dist/core/memory/AgentDBService.d.ts +33 -28
  43. package/dist/core/memory/AgentDBService.d.ts.map +1 -1
  44. package/dist/core/memory/AgentDBService.js +233 -290
  45. package/dist/core/memory/AgentDBService.js.map +1 -1
  46. package/dist/core/memory/EnhancedAgentDBService.d.ts.map +1 -1
  47. package/dist/core/memory/EnhancedAgentDBService.js +5 -3
  48. package/dist/core/memory/EnhancedAgentDBService.js.map +1 -1
  49. package/dist/core/memory/RealAgentDBAdapter.d.ts +9 -2
  50. package/dist/core/memory/RealAgentDBAdapter.d.ts.map +1 -1
  51. package/dist/core/memory/RealAgentDBAdapter.js +126 -100
  52. package/dist/core/memory/RealAgentDBAdapter.js.map +1 -1
  53. package/dist/core/memory/SwarmMemoryManager.d.ts +58 -0
  54. package/dist/core/memory/SwarmMemoryManager.d.ts.map +1 -1
  55. package/dist/core/memory/SwarmMemoryManager.js +176 -0
  56. package/dist/core/memory/SwarmMemoryManager.js.map +1 -1
  57. package/dist/core/memory/index.d.ts.map +1 -1
  58. package/dist/core/memory/index.js +2 -1
  59. package/dist/core/memory/index.js.map +1 -1
  60. package/dist/learning/LearningEngine.d.ts +14 -27
  61. package/dist/learning/LearningEngine.d.ts.map +1 -1
  62. package/dist/learning/LearningEngine.js +57 -119
  63. package/dist/learning/LearningEngine.js.map +1 -1
  64. package/dist/learning/index.d.ts +0 -1
  65. package/dist/learning/index.d.ts.map +1 -1
  66. package/dist/learning/index.js +0 -1
  67. package/dist/learning/index.js.map +1 -1
  68. package/dist/mcp/handlers/learning/learning-query.d.ts +34 -0
  69. package/dist/mcp/handlers/learning/learning-query.d.ts.map +1 -0
  70. package/dist/mcp/handlers/learning/learning-query.js +156 -0
  71. package/dist/mcp/handlers/learning/learning-query.js.map +1 -0
  72. package/dist/mcp/handlers/learning/learning-store-experience.d.ts +30 -0
  73. package/dist/mcp/handlers/learning/learning-store-experience.d.ts.map +1 -0
  74. package/dist/mcp/handlers/learning/learning-store-experience.js +86 -0
  75. package/dist/mcp/handlers/learning/learning-store-experience.js.map +1 -0
  76. package/dist/mcp/handlers/learning/learning-store-pattern.d.ts +31 -0
  77. package/dist/mcp/handlers/learning/learning-store-pattern.d.ts.map +1 -0
  78. package/dist/mcp/handlers/learning/learning-store-pattern.js +126 -0
  79. package/dist/mcp/handlers/learning/learning-store-pattern.js.map +1 -0
  80. package/dist/mcp/handlers/learning/learning-store-qvalue.d.ts +30 -0
  81. package/dist/mcp/handlers/learning/learning-store-qvalue.d.ts.map +1 -0
  82. package/dist/mcp/handlers/learning/learning-store-qvalue.js +100 -0
  83. package/dist/mcp/handlers/learning/learning-store-qvalue.js.map +1 -0
  84. package/dist/mcp/server.d.ts +11 -0
  85. package/dist/mcp/server.d.ts.map +1 -1
  86. package/dist/mcp/server.js +98 -1
  87. package/dist/mcp/server.js.map +1 -1
  88. package/dist/mcp/services/LearningEventListener.d.ts +123 -0
  89. package/dist/mcp/services/LearningEventListener.d.ts.map +1 -0
  90. package/dist/mcp/services/LearningEventListener.js +322 -0
  91. package/dist/mcp/services/LearningEventListener.js.map +1 -0
  92. package/dist/mcp/tools.d.ts +4 -0
  93. package/dist/mcp/tools.d.ts.map +1 -1
  94. package/dist/mcp/tools.js +179 -0
  95. package/dist/mcp/tools.js.map +1 -1
  96. package/dist/types/memory-interfaces.d.ts +71 -0
  97. package/dist/types/memory-interfaces.d.ts.map +1 -1
  98. package/dist/utils/Calculator.d.ts +35 -0
  99. package/dist/utils/Calculator.d.ts.map +1 -0
  100. package/dist/utils/Calculator.js +50 -0
  101. package/dist/utils/Calculator.js.map +1 -0
  102. package/dist/utils/Logger.d.ts.map +1 -1
  103. package/dist/utils/Logger.js +4 -1
  104. package/dist/utils/Logger.js.map +1 -1
  105. package/package.json +7 -5
  106. package/.claude/agents/qe-api-contract-validator.md.backup +0 -1148
  107. package/.claude/agents/qe-api-contract-validator.md.backup-20251107-134747 +0 -1148
  108. package/.claude/agents/qe-api-contract-validator.md.backup-phase2-20251107-140039 +0 -1123
  109. package/.claude/agents/qe-chaos-engineer.md.backup +0 -808
  110. package/.claude/agents/qe-chaos-engineer.md.backup-20251107-134747 +0 -808
  111. package/.claude/agents/qe-chaos-engineer.md.backup-phase2-20251107-140039 +0 -787
  112. package/.claude/agents/qe-code-complexity.md.backup +0 -291
  113. package/.claude/agents/qe-code-complexity.md.backup-20251107-134747 +0 -291
  114. package/.claude/agents/qe-code-complexity.md.backup-phase2-20251107-140039 +0 -286
  115. package/.claude/agents/qe-coverage-analyzer.md.backup +0 -467
  116. package/.claude/agents/qe-coverage-analyzer.md.backup-20251107-134747 +0 -467
  117. package/.claude/agents/qe-coverage-analyzer.md.backup-phase2-20251107-140039 +0 -438
  118. package/.claude/agents/qe-deployment-readiness.md.backup +0 -1166
  119. package/.claude/agents/qe-deployment-readiness.md.backup-20251107-134747 +0 -1166
  120. package/.claude/agents/qe-deployment-readiness.md.backup-phase2-20251107-140039 +0 -1140
  121. package/.claude/agents/qe-flaky-test-hunter.md.backup +0 -1195
  122. package/.claude/agents/qe-flaky-test-hunter.md.backup-20251107-134747 +0 -1195
  123. package/.claude/agents/qe-flaky-test-hunter.md.backup-phase2-20251107-140039 +0 -1162
  124. package/.claude/agents/qe-fleet-commander.md.backup +0 -718
  125. package/.claude/agents/qe-fleet-commander.md.backup-20251107-134747 +0 -718
  126. package/.claude/agents/qe-fleet-commander.md.backup-phase2-20251107-140039 +0 -697
  127. package/.claude/agents/qe-performance-tester.md.backup +0 -428
  128. package/.claude/agents/qe-performance-tester.md.backup-20251107-134747 +0 -428
  129. package/.claude/agents/qe-performance-tester.md.backup-phase2-20251107-140039 +0 -372
  130. package/.claude/agents/qe-production-intelligence.md.backup +0 -1219
  131. package/.claude/agents/qe-production-intelligence.md.backup-20251107-134747 +0 -1219
  132. package/.claude/agents/qe-production-intelligence.md.backup-phase2-20251107-140039 +0 -1194
  133. package/.claude/agents/qe-quality-analyzer.md.backup +0 -425
  134. package/.claude/agents/qe-quality-analyzer.md.backup-20251107-134747 +0 -425
  135. package/.claude/agents/qe-quality-analyzer.md.backup-phase2-20251107-140039 +0 -394
  136. package/.claude/agents/qe-quality-gate.md.backup +0 -446
  137. package/.claude/agents/qe-quality-gate.md.backup-20251107-134747 +0 -446
  138. package/.claude/agents/qe-quality-gate.md.backup-phase2-20251107-140039 +0 -415
  139. package/.claude/agents/qe-regression-risk-analyzer.md.backup +0 -1009
  140. package/.claude/agents/qe-regression-risk-analyzer.md.backup-20251107-134747 +0 -1009
  141. package/.claude/agents/qe-regression-risk-analyzer.md.backup-phase2-20251107-140039 +0 -984
  142. package/.claude/agents/qe-requirements-validator.md.backup +0 -748
  143. package/.claude/agents/qe-requirements-validator.md.backup-20251107-134747 +0 -748
  144. package/.claude/agents/qe-requirements-validator.md.backup-phase2-20251107-140039 +0 -723
  145. package/.claude/agents/qe-security-scanner.md.backup +0 -634
  146. package/.claude/agents/qe-security-scanner.md.backup-20251107-134747 +0 -634
  147. package/.claude/agents/qe-security-scanner.md.backup-phase2-20251107-140039 +0 -573
  148. package/.claude/agents/qe-test-data-architect.md.backup +0 -1064
  149. package/.claude/agents/qe-test-data-architect.md.backup-20251107-134747 +0 -1064
  150. package/.claude/agents/qe-test-data-architect.md.backup-phase2-20251107-140039 +0 -1040
  151. package/.claude/agents/qe-test-executor.md.backup +0 -389
  152. package/.claude/agents/qe-test-executor.md.backup-20251107-134747 +0 -389
  153. package/.claude/agents/qe-test-executor.md.backup-phase2-20251107-140039 +0 -369
  154. package/.claude/agents/qe-test-generator.md.backup +0 -997
  155. package/.claude/agents/qe-test-generator.md.backup-20251107-134747 +0 -997
  156. package/.claude/agents/qe-visual-tester.md.backup +0 -777
  157. package/.claude/agents/qe-visual-tester.md.backup-20251107-134747 +0 -777
  158. package/.claude/agents/qe-visual-tester.md.backup-phase2-20251107-140039 +0 -756
@@ -1103,6 +1103,120 @@ const verification = await hookManager.executePreTaskVerification({
1103
1103
  });
1104
1104
  ```
1105
1105
 
1106
+ ## Learning Protocol (Phase 6 - Option C Implementation)
1107
+
1108
+ **⚠️ MANDATORY**: When executed via Claude Code Task tool, you MUST call learning MCP tools to persist learning data.
1109
+
1110
+ ### Required Learning Actions (Call AFTER Task Completion)
1111
+
1112
+ **1. Store Learning Experience:**
1113
+ ```typescript
1114
+ // Call this MCP tool after completing your task
1115
+ mcp__agentic_qe__learning_store_experience({
1116
+ agentId: "qe-production-intelligence",
1117
+ taskType: "production-analysis",
1118
+ reward: 0.95, // Your assessment of task success (0-1 scale)
1119
+ outcome: {
1120
+ // Your actual results (agent-specific)
1121
+ incidentsAnalyzed: 12,
1122
+ testsGenerated: 47,
1123
+ rootCausesFound: 8,
1124
+ executionTime: 12000
1125
+ },
1126
+ metadata: {
1127
+ // Additional context (agent-specific)
1128
+ dataSource: "datadog",
1129
+ analysisDepth: "comprehensive",
1130
+ rumEnabled: true
1131
+ }
1132
+ })
1133
+ ```
1134
+
1135
+ **2. Store Q-Values for Your Strategy:**
1136
+ ```typescript
1137
+ // Store Q-value for the strategy you used
1138
+ mcp__agentic_qe__learning_store_qvalue({
1139
+ agentId: "qe-production-intelligence",
1140
+ stateKey: "production-analysis-state",
1141
+ actionKey: "incident-replay",
1142
+ qValue: 0.85, // Expected value of this approach (based on results)
1143
+ metadata: {
1144
+ // Strategy details (agent-specific)
1145
+ analysisStrategy: "rum-incident-combined",
1146
+ accuracyRate: 0.95,
1147
+ coverage: 0.92
1148
+ }
1149
+ })
1150
+ ```
1151
+
1152
+ **3. Store Successful Patterns:**
1153
+ ```typescript
1154
+ // If you discovered a useful pattern, store it
1155
+ mcp__agentic_qe__learning_store_pattern({
1156
+ agentId: "qe-production-intelligence",
1157
+ pattern: "Peak hour network failures in specific regions indicate infrastructure capacity issues - correlate with RUM data for comprehensive test generation",
1158
+ confidence: 0.95, // How confident you are (0-1)
1159
+ domain: "production-intelligence",
1160
+ metadata: {
1161
+ // Pattern context (agent-specific)
1162
+ incidentPatterns: ["network-timeout", "gateway-error", "connection-refused"],
1163
+ predictionAccuracy: 0.93
1164
+ }
1165
+ })
1166
+ ```
1167
+
1168
+ ### Learning Query (Use at Task Start)
1169
+
1170
+ **Before starting your task**, query for past learnings:
1171
+
1172
+ ```typescript
1173
+ // Query for successful experiences
1174
+ const pastLearnings = await mcp__agentic_qe__learning_query({
1175
+ agentId: "qe-production-intelligence",
1176
+ taskType: "production-analysis",
1177
+ minReward: 0.8, // Only get successful experiences
1178
+ queryType: "all",
1179
+ limit: 10
1180
+ });
1181
+
1182
+ // Use the insights to optimize your current approach
1183
+ if (pastLearnings.success && pastLearnings.data) {
1184
+ const { experiences, qValues, patterns } = pastLearnings.data;
1185
+
1186
+ // Find best-performing strategy
1187
+ const bestStrategy = qValues
1188
+ .filter(qv => qv.state_key === "production-analysis-state")
1189
+ .sort((a, b) => b.q_value - a.q_value)[0];
1190
+
1191
+ console.log(`Using learned best strategy: ${bestStrategy.action_key} (Q-value: ${bestStrategy.q_value})`);
1192
+
1193
+ // Check for relevant patterns
1194
+ const relevantPatterns = patterns
1195
+ .filter(p => p.domain === "production-intelligence")
1196
+ .sort((a, b) => b.confidence * b.success_rate - a.confidence * a.success_rate);
1197
+
1198
+ if (relevantPatterns.length > 0) {
1199
+ console.log(`Applying pattern: ${relevantPatterns[0].pattern}`);
1200
+ }
1201
+ }
1202
+ ```
1203
+
1204
+ ### Success Criteria for Learning
1205
+
1206
+ **Reward Assessment (0-1 scale):**
1207
+ - **1.0**: Perfect execution (100% incident coverage, root causes identified, <5s analysis)
1208
+ - **0.9**: Excellent (95%+ coverage, most root causes found, <10s analysis)
1209
+ - **0.7**: Good (90%+ coverage, key root causes found, <20s analysis)
1210
+ - **0.5**: Acceptable (80%+ coverage, completed successfully)
1211
+ - **<0.5**: Needs improvement (Low coverage, missed root causes, slow)
1212
+
1213
+ **When to Call Learning Tools:**
1214
+ - ✅ **ALWAYS** after completing main task
1215
+ - ✅ **ALWAYS** after detecting significant findings
1216
+ - ✅ **ALWAYS** after generating recommendations
1217
+ - ✅ When discovering new effective strategies
1218
+ - ✅ When achieving exceptional performance metrics
1219
+
1106
1220
  ## Memory Keys
1107
1221
 
1108
1222
  ### Input Keys
@@ -199,6 +199,132 @@ const finalization = await hookManager.executeSessionEndFinalization({
199
199
  });
200
200
  ```
201
201
 
202
+ ## Learning Protocol (Phase 6 - Option C Implementation)
203
+
204
+ **⚠️ MANDATORY**: When executed via Claude Code Task tool, you MUST call learning MCP tools to persist learning data.
205
+
206
+ ### Required Learning Actions (Call AFTER Task Completion)
207
+
208
+ **1. Store Learning Experience:**
209
+ ```typescript
210
+ // Call this MCP tool after completing your task
211
+ mcp__agentic_qe__learning_store_experience({
212
+ agentId: "qe-quality-analyzer",
213
+ taskType: "quality-analysis",
214
+ reward: 0.95, // Your assessment of task success (0-1 scale)
215
+ outcome: {
216
+ // Your actual results (agent-specific)
217
+ metricsAnalyzed: 47,
218
+ trendsDetected: 3,
219
+ recommendations: 12,
220
+ executionTime: 8500,
221
+ overallScore: 87.3,
222
+ codeQuality: 85.2,
223
+ testQuality: 89.1,
224
+ technicalDebt: 2.5
225
+ },
226
+ metadata: {
227
+ // Additional context (agent-specific)
228
+ scope: "full-codebase",
229
+ metricsCategories: ["code-quality", "test-quality", "technical-debt"],
230
+ timeframe: "last-30-days",
231
+ toolsUsed: ["eslint", "sonarqube", "coverage"],
232
+ analysisDepth: "comprehensive"
233
+ }
234
+ })
235
+ ```
236
+
237
+ **2. Store Q-Values for Your Strategy:**
238
+ ```typescript
239
+ // Store Q-value for the strategy you used
240
+ mcp__agentic_qe__learning_store_qvalue({
241
+ agentId: "qe-quality-analyzer",
242
+ stateKey: "quality-analysis-state",
243
+ actionKey: "comprehensive-analysis",
244
+ qValue: 0.85, // Expected value of this approach (based on results)
245
+ metadata: {
246
+ // Strategy details (agent-specific)
247
+ analysisDepth: "comprehensive",
248
+ insightQuality: 0.92,
249
+ actionability: 0.88,
250
+ toolCombination: ["eslint", "sonarqube", "coverage"],
251
+ executionTime: 8500
252
+ }
253
+ })
254
+ ```
255
+
256
+ **3. Store Successful Patterns:**
257
+ ```typescript
258
+ // If you discovered a useful pattern, store it
259
+ mcp__agentic_qe__learning_store_pattern({
260
+ agentId: "qe-quality-analyzer",
261
+ pattern: "High complexity with low coverage indicates technical debt hotspot requiring immediate refactoring",
262
+ confidence: 0.95, // How confident you are (0-1)
263
+ domain: "quality-metrics",
264
+ metadata: {
265
+ // Pattern context (agent-specific)
266
+ qualityPatterns: ["complexity-coverage-correlation", "debt-hotspot-detection"],
267
+ predictiveAccuracy: 0.93,
268
+ detectedIn: "payment.service.ts",
269
+ complexity: 18.4,
270
+ coverage: 45.2,
271
+ recommendation: "Increase coverage and refactor"
272
+ }
273
+ })
274
+ ```
275
+
276
+ ### Learning Query (Use at Task Start)
277
+
278
+ **Before starting your task**, query for past learnings:
279
+
280
+ ```typescript
281
+ // Query for successful experiences
282
+ const pastLearnings = await mcp__agentic_qe__learning_query({
283
+ agentId: "qe-quality-analyzer",
284
+ taskType: "quality-analysis",
285
+ minReward: 0.8, // Only get successful experiences
286
+ queryType: "all",
287
+ limit: 10
288
+ });
289
+
290
+ // Use the insights to optimize your current approach
291
+ if (pastLearnings.success && pastLearnings.data) {
292
+ const { experiences, qValues, patterns } = pastLearnings.data;
293
+
294
+ // Find best-performing strategy
295
+ const bestStrategy = qValues
296
+ .filter(qv => qv.state_key === "quality-analysis-state")
297
+ .sort((a, b) => b.q_value - a.q_value)[0];
298
+
299
+ console.log(`Using learned best strategy: ${bestStrategy.action_key} (Q-value: ${bestStrategy.q_value})`);
300
+
301
+ // Check for relevant patterns
302
+ const relevantPatterns = patterns
303
+ .filter(p => p.domain === "quality-metrics")
304
+ .sort((a, b) => b.confidence * b.success_rate - a.confidence * a.success_rate);
305
+
306
+ if (relevantPatterns.length > 0) {
307
+ console.log(`Applying pattern: ${relevantPatterns[0].pattern}`);
308
+ }
309
+ }
310
+ ```
311
+
312
+ ### Success Criteria for Learning
313
+
314
+ **Reward Assessment (0-1 scale):**
315
+ - **1.0**: Perfect execution (All metrics analyzed, actionable insights, <5s analysis)
316
+ - **0.9**: Excellent (95%+ metrics covered, high-quality insights, <10s)
317
+ - **0.7**: Good (90%+ metrics covered, useful insights, <20s)
318
+ - **0.5**: Acceptable (80%+ metrics covered, completed successfully)
319
+ - **<0.5**: Needs improvement (Limited coverage, low-quality insights, slow)
320
+
321
+ **When to Call Learning Tools:**
322
+ - ✅ **ALWAYS** after completing main task
323
+ - ✅ **ALWAYS** after detecting significant findings
324
+ - ✅ **ALWAYS** after generating recommendations
325
+ - ✅ When discovering new effective strategies
326
+ - ✅ When achieving exceptional performance metrics
327
+
202
328
  ## Analysis Workflow
203
329
 
204
330
  ### Phase 1: Data Collection
@@ -558,4 +558,123 @@ node -e "import('agentic-qe/tools/qe/quality-gates').then(m => console.log(m.ava
558
558
  // aqe quality gate merge-decision --environment production
559
559
  ```
560
560
 
561
+ ## Learning Protocol (Phase 6 - Option C Implementation)
562
+
563
+ **⚠️ MANDATORY**: When executed via Claude Code Task tool, you MUST call learning MCP tools to persist learning data.
564
+
565
+ ### Required Learning Actions (Call AFTER Task Completion)
566
+
567
+ **1. Store Learning Experience:**
568
+ ```typescript
569
+ // Call this MCP tool after completing your task
570
+ mcp__agentic_qe__learning_store_experience({
571
+ agentId: "qe-quality-gate",
572
+ taskType: "quality-gate-evaluation",
573
+ reward: 0.95, // Your assessment of task success (0-1 scale)
574
+ outcome: {
575
+ // Your actual results (agent-specific)
576
+ gateResult: "pass",
577
+ riskLevel: "low",
578
+ metricsValidated: 15,
579
+ decisionsBlocked: 0,
580
+ executionTime: 2500
581
+ },
582
+ metadata: {
583
+ // Additional context (agent-specific)
584
+ environment: "production",
585
+ policyApplied: "strict",
586
+ thresholds: {
587
+ coverage: 90,
588
+ complexity: 15,
589
+ security: 0
590
+ }
591
+ }
592
+ })
593
+ ```
594
+
595
+ **2. Store Q-Values for Your Strategy:**
596
+ ```typescript
597
+ // Store Q-value for the strategy you used
598
+ mcp__agentic_qe__learning_store_qvalue({
599
+ agentId: "qe-quality-gate",
600
+ stateKey: "quality-gate-state",
601
+ actionKey: "risk-based-evaluation",
602
+ qValue: 0.85, // Expected value of this approach (based on results)
603
+ metadata: {
604
+ // Strategy details (agent-specific)
605
+ evaluationStrategy: "risk-based-ml",
606
+ accuracy: 0.98,
607
+ falsePositiveRate: 0.02
608
+ }
609
+ })
610
+ ```
611
+
612
+ **3. Store Successful Patterns:**
613
+ ```typescript
614
+ // If you discovered a useful pattern, store it
615
+ mcp__agentic_qe__learning_store_pattern({
616
+ agentId: "qe-quality-gate",
617
+ pattern: "Risk-based evaluation with ML scoring reduces false positives by 40% while maintaining 98% accuracy for quality gate decisions",
618
+ confidence: 0.95, // How confident you are (0-1)
619
+ domain: "quality-gate",
620
+ metadata: {
621
+ // Pattern context (agent-specific)
622
+ riskPatterns: ["coverage-trend-negative", "security-scan-new-vulns"],
623
+ decisionAccuracy: 0.98
624
+ }
625
+ })
626
+ ```
627
+
628
+ ### Learning Query (Use at Task Start)
629
+
630
+ **Before starting your task**, query for past learnings:
631
+
632
+ ```typescript
633
+ // Query for successful experiences
634
+ const pastLearnings = await mcp__agentic_qe__learning_query({
635
+ agentId: "qe-quality-gate",
636
+ taskType: "quality-gate-evaluation",
637
+ minReward: 0.8, // Only get successful experiences
638
+ queryType: "all",
639
+ limit: 10
640
+ });
641
+
642
+ // Use the insights to optimize your current approach
643
+ if (pastLearnings.success && pastLearnings.data) {
644
+ const { experiences, qValues, patterns } = pastLearnings.data;
645
+
646
+ // Find best-performing strategy
647
+ const bestStrategy = qValues
648
+ .filter(qv => qv.state_key === "quality-gate-state")
649
+ .sort((a, b) => b.q_value - a.q_value)[0];
650
+
651
+ console.log(`Using learned best strategy: ${bestStrategy.action_key} (Q-value: ${bestStrategy.q_value})`);
652
+
653
+ // Check for relevant patterns
654
+ const relevantPatterns = patterns
655
+ .filter(p => p.domain === "quality-gate")
656
+ .sort((a, b) => b.confidence * b.success_rate - a.confidence * a.success_rate);
657
+
658
+ if (relevantPatterns.length > 0) {
659
+ console.log(`Applying pattern: ${relevantPatterns[0].pattern}`);
660
+ }
661
+ }
662
+ ```
663
+
664
+ ### Success Criteria for Learning
665
+
666
+ **Reward Assessment (0-1 scale):**
667
+ - **1.0**: Perfect execution (100% accurate decisions, 0 false positives, <2s evaluation)
668
+ - **0.9**: Excellent (98%+ accuracy, <1% false positives, <5s evaluation)
669
+ - **0.7**: Good (95%+ accuracy, <3% false positives, <10s evaluation)
670
+ - **0.5**: Acceptable (90%+ accuracy, completed successfully)
671
+ - **<0.5**: Needs improvement (Low accuracy, many false positives, slow)
672
+
673
+ **When to Call Learning Tools:**
674
+ - ✅ **ALWAYS** after completing main task
675
+ - ✅ **ALWAYS** after detecting significant findings
676
+ - ✅ **ALWAYS** after generating recommendations
677
+ - ✅ When discovering new effective strategies
678
+ - ✅ When achieving exceptional performance metrics
679
+
561
680
 
@@ -783,6 +783,120 @@ const verification = await hookManager.executePreTaskVerification({
783
783
  });
784
784
  ```
785
785
 
786
+ ## Learning Protocol (Phase 6 - Option C Implementation)
787
+
788
+ **⚠️ MANDATORY**: When executed via Claude Code Task tool, you MUST call learning MCP tools to persist learning data.
789
+
790
+ ### Required Learning Actions (Call AFTER Task Completion)
791
+
792
+ **1. Store Learning Experience:**
793
+ ```typescript
794
+ // Call this MCP tool after completing your task
795
+ mcp__agentic_qe__learning_store_experience({
796
+ agentId: "qe-regression-risk-analyzer",
797
+ taskType: "regression-risk-analysis",
798
+ reward: 0.95, // Your assessment of task success (0-1 scale)
799
+ outcome: {
800
+ // Your actual results (agent-specific)
801
+ riskScore: 78.3,
802
+ testsSelected: 47,
803
+ executionTimeReduction: 0.963,
804
+ accuracy: 0.95
805
+ },
806
+ metadata: {
807
+ // Additional context (agent-specific)
808
+ algorithm: "ml-enhanced-selection",
809
+ blastRadiusAnalyzed: true,
810
+ coverageImpact: 0.90
811
+ }
812
+ })
813
+ ```
814
+
815
+ **2. Store Q-Values for Your Strategy:**
816
+ ```typescript
817
+ // Store Q-value for the strategy you used
818
+ mcp__agentic_qe__learning_store_qvalue({
819
+ agentId: "qe-regression-risk-analyzer",
820
+ stateKey: "regression-risk-state",
821
+ actionKey: "ml-risk-prediction",
822
+ qValue: 0.85, // Expected value of this approach (based on results)
823
+ metadata: {
824
+ // Strategy details (agent-specific)
825
+ selectionStrategy: "hybrid-coverage-ml",
826
+ accuracyRate: 0.95,
827
+ timeReduction: 0.963
828
+ }
829
+ })
830
+ ```
831
+
832
+ **3. Store Successful Patterns:**
833
+ ```typescript
834
+ // If you discovered a useful pattern, store it
835
+ mcp__agentic_qe__learning_store_pattern({
836
+ agentId: "qe-regression-risk-analyzer",
837
+ pattern: "Changes to payment service after 5PM require full integration suite",
838
+ confidence: 0.95, // How confident you are (0-1)
839
+ domain: "regression-analysis",
840
+ metadata: {
841
+ // Pattern context (agent-specific)
842
+ riskPatterns: ["late-day-commits", "payment-critical"],
843
+ predictionAccuracy: 0.95
844
+ }
845
+ })
846
+ ```
847
+
848
+ ### Learning Query (Use at Task Start)
849
+
850
+ **Before starting your task**, query for past learnings:
851
+
852
+ ```typescript
853
+ // Query for successful experiences
854
+ const pastLearnings = await mcp__agentic_qe__learning_query({
855
+ agentId: "qe-regression-risk-analyzer",
856
+ taskType: "regression-risk-analysis",
857
+ minReward: 0.8, // Only get successful experiences
858
+ queryType: "all",
859
+ limit: 10
860
+ });
861
+
862
+ // Use the insights to optimize your current approach
863
+ if (pastLearnings.success && pastLearnings.data) {
864
+ const { experiences, qValues, patterns } = pastLearnings.data;
865
+
866
+ // Find best-performing strategy
867
+ const bestStrategy = qValues
868
+ .filter(qv => qv.state_key === "regression-risk-state")
869
+ .sort((a, b) => b.q_value - a.q_value)[0];
870
+
871
+ console.log(`Using learned best strategy: ${bestStrategy.action_key} (Q-value: ${bestStrategy.q_value})`);
872
+
873
+ // Check for relevant patterns
874
+ const relevantPatterns = patterns
875
+ .filter(p => p.domain === "regression-analysis")
876
+ .sort((a, b) => b.confidence * b.success_rate - a.confidence * a.success_rate);
877
+
878
+ if (relevantPatterns.length > 0) {
879
+ console.log(`Applying pattern: ${relevantPatterns[0].pattern}`);
880
+ }
881
+ }
882
+ ```
883
+
884
+ ### Success Criteria for Learning
885
+
886
+ **Reward Assessment (0-1 scale):**
887
+ - **1.0**: Perfect execution (99%+ accuracy, 70%+ time reduction, 0 false negatives)
888
+ - **0.9**: Excellent (95%+ accuracy, 60%+ time reduction, <1% false negatives)
889
+ - **0.7**: Good (90%+ accuracy, 50%+ time reduction, <5% false negatives)
890
+ - **0.5**: Acceptable (85%+ accuracy, 40%+ time reduction)
891
+ - **<0.5**: Needs improvement (Low accuracy, minimal time savings, false negatives)
892
+
893
+ **When to Call Learning Tools:**
894
+ - ✅ **ALWAYS** after completing main task
895
+ - ✅ **ALWAYS** after detecting significant findings
896
+ - ✅ **ALWAYS** after generating recommendations
897
+ - ✅ When discovering new effective strategies
898
+ - ✅ When achieving exceptional performance metrics
899
+
786
900
  ## Integration Points
787
901
 
788
902
  ### Upstream Dependencies
@@ -293,6 +293,120 @@ this.registerEventHandler({
293
293
  });
294
294
  ```
295
295
 
296
+ ## Learning Protocol (Phase 6 - Option C Implementation)
297
+
298
+ **⚠️ MANDATORY**: When executed via Claude Code Task tool, you MUST call learning MCP tools to persist learning data.
299
+
300
+ ### Required Learning Actions (Call AFTER Task Completion)
301
+
302
+ **1. Store Learning Experience:**
303
+ ```typescript
304
+ // Call this MCP tool after completing your task
305
+ mcp__agentic_qe__learning_store_experience({
306
+ agentId: "qe-requirements-validator",
307
+ taskType: "requirements-validation",
308
+ reward: 0.95, // Your assessment of task success (0-1 scale)
309
+ outcome: {
310
+ // Your actual results
311
+ requirementsValidated: 12,
312
+ testabilityScore: 8.5,
313
+ bddScenariosGenerated: 32,
314
+ executionTime: 4200
315
+ },
316
+ metadata: {
317
+ // Additional context
318
+ validationFramework: "invest-smart",
319
+ strictMode: true,
320
+ criteriaChecked: ["invest", "smart", "5w"]
321
+ }
322
+ })
323
+ ```
324
+
325
+ **2. Store Q-Values for Your Strategy:**
326
+ ```typescript
327
+ // Store Q-value for the strategy you used
328
+ mcp__agentic_qe__learning_store_qvalue({
329
+ agentId: "qe-requirements-validator",
330
+ stateKey: "requirements-validation-state",
331
+ actionKey: "invest-analysis",
332
+ qValue: 0.85, // Expected value of this approach (based on results)
333
+ metadata: {
334
+ // Strategy details
335
+ validationStrategy: "invest-smart-combined",
336
+ accuracy: 0.95,
337
+ completeness: 0.92
338
+ }
339
+ })
340
+ ```
341
+
342
+ **3. Store Successful Patterns:**
343
+ ```typescript
344
+ // If you discovered a useful pattern, store it
345
+ mcp__agentic_qe__learning_store_pattern({
346
+ agentId: "qe-requirements-validator",
347
+ pattern: "Vague performance requirements converted to specific percentile-based metrics",
348
+ confidence: 0.95, // How confident you are (0-1)
349
+ domain: "requirements",
350
+ metadata: {
351
+ // Pattern context
352
+ requirementPatterns: ["vague-nfr", "missing-metrics", "unclear-sla"],
353
+ testabilityPrediction: 0.92
354
+ }
355
+ })
356
+ ```
357
+
358
+ ### Learning Query (Use at Task Start)
359
+
360
+ **Before starting your task**, query for past learnings:
361
+
362
+ ```typescript
363
+ // Query for successful experiences
364
+ const pastLearnings = await mcp__agentic_qe__learning_query({
365
+ agentId: "qe-requirements-validator",
366
+ taskType: "requirements-validation",
367
+ minReward: 0.8, // Only get successful experiences
368
+ queryType: "all",
369
+ limit: 10
370
+ });
371
+
372
+ // Use the insights to optimize your current approach
373
+ if (pastLearnings.success && pastLearnings.data) {
374
+ const { experiences, qValues, patterns } = pastLearnings.data;
375
+
376
+ // Find best-performing strategy
377
+ const bestStrategy = qValues
378
+ .filter(qv => qv.state_key === "requirements-validation-state")
379
+ .sort((a, b) => b.q_value - a.q_value)[0];
380
+
381
+ console.log(`Using learned best strategy: ${bestStrategy.action_key} (Q-value: ${bestStrategy.q_value})`);
382
+
383
+ // Check for relevant patterns
384
+ const relevantPatterns = patterns
385
+ .filter(p => p.domain === "requirements")
386
+ .sort((a, b) => b.confidence * b.success_rate - a.confidence * a.success_rate);
387
+
388
+ if (relevantPatterns.length > 0) {
389
+ console.log(`Applying pattern: ${relevantPatterns[0].pattern}`);
390
+ }
391
+ }
392
+ ```
393
+
394
+ ### Success Criteria for Learning
395
+
396
+ **Reward Assessment (0-1 scale):**
397
+ - **1.0**: Perfect execution (All requirements testable, 100% INVEST compliance, <3s validation)
398
+ - **0.9**: Excellent (95%+ testable, 95%+ INVEST compliance, <5s validation)
399
+ - **0.7**: Good (90%+ testable, 90%+ INVEST compliance, <10s validation)
400
+ - **0.5**: Acceptable (80%+ testable, 80%+ INVEST compliance)
401
+ - **<0.5**: Needs improvement (Low testability, poor INVEST compliance)
402
+
403
+ **When to Call Learning Tools:**
404
+ - ✅ **ALWAYS** after completing main task
405
+ - ✅ **ALWAYS** after detecting significant findings
406
+ - ✅ **ALWAYS** after generating recommendations
407
+ - ✅ When discovering new effective strategies
408
+ - ✅ When achieving exceptional performance metrics
409
+
296
410
  ## Integration Points
297
411
 
298
412
  ### Upstream Dependencies