agentic-qe 1.5.1 → 1.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (188) hide show
  1. package/.claude/agents/qe-api-contract-validator.md +118 -0
  2. package/.claude/agents/qe-chaos-engineer.md +320 -5
  3. package/.claude/agents/qe-code-complexity.md +360 -0
  4. package/.claude/agents/qe-coverage-analyzer.md +112 -0
  5. package/.claude/agents/qe-deployment-readiness.md +322 -6
  6. package/.claude/agents/qe-flaky-test-hunter.md +115 -0
  7. package/.claude/agents/qe-fleet-commander.md +319 -6
  8. package/.claude/agents/qe-performance-tester.md +234 -0
  9. package/.claude/agents/qe-production-intelligence.md +114 -0
  10. package/.claude/agents/qe-quality-analyzer.md +126 -0
  11. package/.claude/agents/qe-quality-gate.md +119 -0
  12. package/.claude/agents/qe-regression-risk-analyzer.md +114 -0
  13. package/.claude/agents/qe-requirements-validator.md +114 -0
  14. package/.claude/agents/qe-security-scanner.md +118 -0
  15. package/.claude/agents/qe-test-data-architect.md +234 -0
  16. package/.claude/agents/qe-test-executor.md +115 -0
  17. package/.claude/agents/qe-test-generator.md +114 -0
  18. package/.claude/agents/qe-visual-tester.md +305 -6
  19. package/.claude/agents/subagents/qe-code-reviewer.md +0 -4
  20. package/.claude/agents/subagents/qe-data-generator.md +0 -16
  21. package/.claude/agents/subagents/qe-integration-tester.md +0 -17
  22. package/.claude/agents/subagents/qe-performance-validator.md +0 -16
  23. package/.claude/agents/subagents/qe-security-auditor.md +0 -16
  24. package/.claude/agents/subagents/qe-test-implementer.md +0 -17
  25. package/.claude/agents/subagents/qe-test-refactorer.md +0 -17
  26. package/.claude/agents/subagents/qe-test-writer.md +0 -19
  27. package/.claude/skills/brutal-honesty-review/README.md +218 -0
  28. package/.claude/skills/brutal-honesty-review/SKILL.md +725 -0
  29. package/.claude/skills/brutal-honesty-review/resources/assessment-rubrics.md +295 -0
  30. package/.claude/skills/brutal-honesty-review/resources/review-template.md +102 -0
  31. package/.claude/skills/brutal-honesty-review/scripts/assess-code.sh +179 -0
  32. package/.claude/skills/brutal-honesty-review/scripts/assess-tests.sh +223 -0
  33. package/.claude/skills/cicd-pipeline-qe-orchestrator/README.md +301 -0
  34. package/.claude/skills/cicd-pipeline-qe-orchestrator/SKILL.md +510 -0
  35. package/.claude/skills/cicd-pipeline-qe-orchestrator/resources/workflows/microservice-pipeline.md +239 -0
  36. package/.claude/skills/cicd-pipeline-qe-orchestrator/resources/workflows/mobile-pipeline.md +375 -0
  37. package/.claude/skills/cicd-pipeline-qe-orchestrator/resources/workflows/monolith-pipeline.md +268 -0
  38. package/.claude/skills/six-thinking-hats/README.md +190 -0
  39. package/.claude/skills/six-thinking-hats/SKILL.md +1215 -0
  40. package/.claude/skills/six-thinking-hats/resources/examples/api-testing-example.md +345 -0
  41. package/.claude/skills/six-thinking-hats/resources/templates/solo-session-template.md +167 -0
  42. package/.claude/skills/six-thinking-hats/resources/templates/team-session-template.md +336 -0
  43. package/CHANGELOG.md +2472 -2129
  44. package/README.md +48 -10
  45. package/dist/adapters/MemoryStoreAdapter.d.ts +38 -0
  46. package/dist/adapters/MemoryStoreAdapter.d.ts.map +1 -1
  47. package/dist/adapters/MemoryStoreAdapter.js +22 -0
  48. package/dist/adapters/MemoryStoreAdapter.js.map +1 -1
  49. package/dist/agents/BaseAgent.d.ts.map +1 -1
  50. package/dist/agents/BaseAgent.js +13 -0
  51. package/dist/agents/BaseAgent.js.map +1 -1
  52. package/dist/cli/commands/init-claude-md-template.d.ts +16 -0
  53. package/dist/cli/commands/init-claude-md-template.d.ts.map +1 -0
  54. package/dist/cli/commands/init-claude-md-template.js +69 -0
  55. package/dist/cli/commands/init-claude-md-template.js.map +1 -0
  56. package/dist/cli/commands/init.d.ts +1 -1
  57. package/dist/cli/commands/init.d.ts.map +1 -1
  58. package/dist/cli/commands/init.js +509 -460
  59. package/dist/cli/commands/init.js.map +1 -1
  60. package/dist/core/memory/AgentDBService.d.ts +33 -28
  61. package/dist/core/memory/AgentDBService.d.ts.map +1 -1
  62. package/dist/core/memory/AgentDBService.js +233 -290
  63. package/dist/core/memory/AgentDBService.js.map +1 -1
  64. package/dist/core/memory/EnhancedAgentDBService.d.ts.map +1 -1
  65. package/dist/core/memory/EnhancedAgentDBService.js +5 -3
  66. package/dist/core/memory/EnhancedAgentDBService.js.map +1 -1
  67. package/dist/core/memory/RealAgentDBAdapter.d.ts +9 -2
  68. package/dist/core/memory/RealAgentDBAdapter.d.ts.map +1 -1
  69. package/dist/core/memory/RealAgentDBAdapter.js +126 -100
  70. package/dist/core/memory/RealAgentDBAdapter.js.map +1 -1
  71. package/dist/core/memory/SwarmMemoryManager.d.ts +58 -0
  72. package/dist/core/memory/SwarmMemoryManager.d.ts.map +1 -1
  73. package/dist/core/memory/SwarmMemoryManager.js +176 -0
  74. package/dist/core/memory/SwarmMemoryManager.js.map +1 -1
  75. package/dist/core/memory/index.d.ts.map +1 -1
  76. package/dist/core/memory/index.js +2 -1
  77. package/dist/core/memory/index.js.map +1 -1
  78. package/dist/learning/LearningEngine.d.ts +14 -27
  79. package/dist/learning/LearningEngine.d.ts.map +1 -1
  80. package/dist/learning/LearningEngine.js +57 -119
  81. package/dist/learning/LearningEngine.js.map +1 -1
  82. package/dist/learning/index.d.ts +0 -1
  83. package/dist/learning/index.d.ts.map +1 -1
  84. package/dist/learning/index.js +0 -1
  85. package/dist/learning/index.js.map +1 -1
  86. package/dist/mcp/handlers/learning/learning-query.d.ts +34 -0
  87. package/dist/mcp/handlers/learning/learning-query.d.ts.map +1 -0
  88. package/dist/mcp/handlers/learning/learning-query.js +156 -0
  89. package/dist/mcp/handlers/learning/learning-query.js.map +1 -0
  90. package/dist/mcp/handlers/learning/learning-store-experience.d.ts +30 -0
  91. package/dist/mcp/handlers/learning/learning-store-experience.d.ts.map +1 -0
  92. package/dist/mcp/handlers/learning/learning-store-experience.js +86 -0
  93. package/dist/mcp/handlers/learning/learning-store-experience.js.map +1 -0
  94. package/dist/mcp/handlers/learning/learning-store-pattern.d.ts +31 -0
  95. package/dist/mcp/handlers/learning/learning-store-pattern.d.ts.map +1 -0
  96. package/dist/mcp/handlers/learning/learning-store-pattern.js +126 -0
  97. package/dist/mcp/handlers/learning/learning-store-pattern.js.map +1 -0
  98. package/dist/mcp/handlers/learning/learning-store-qvalue.d.ts +30 -0
  99. package/dist/mcp/handlers/learning/learning-store-qvalue.d.ts.map +1 -0
  100. package/dist/mcp/handlers/learning/learning-store-qvalue.js +100 -0
  101. package/dist/mcp/handlers/learning/learning-store-qvalue.js.map +1 -0
  102. package/dist/mcp/server.d.ts +11 -0
  103. package/dist/mcp/server.d.ts.map +1 -1
  104. package/dist/mcp/server.js +98 -1
  105. package/dist/mcp/server.js.map +1 -1
  106. package/dist/mcp/services/LearningEventListener.d.ts +123 -0
  107. package/dist/mcp/services/LearningEventListener.d.ts.map +1 -0
  108. package/dist/mcp/services/LearningEventListener.js +322 -0
  109. package/dist/mcp/services/LearningEventListener.js.map +1 -0
  110. package/dist/mcp/tools.d.ts +4 -0
  111. package/dist/mcp/tools.d.ts.map +1 -1
  112. package/dist/mcp/tools.js +179 -0
  113. package/dist/mcp/tools.js.map +1 -1
  114. package/dist/types/memory-interfaces.d.ts +71 -0
  115. package/dist/types/memory-interfaces.d.ts.map +1 -1
  116. package/dist/utils/Calculator.d.ts +35 -0
  117. package/dist/utils/Calculator.d.ts.map +1 -0
  118. package/dist/utils/Calculator.js +50 -0
  119. package/dist/utils/Calculator.js.map +1 -0
  120. package/dist/utils/Logger.d.ts.map +1 -1
  121. package/dist/utils/Logger.js +4 -1
  122. package/dist/utils/Logger.js.map +1 -1
  123. package/package.json +7 -5
  124. package/.claude/agents/qe-api-contract-validator.md.backup +0 -1148
  125. package/.claude/agents/qe-api-contract-validator.md.backup-20251107-134747 +0 -1148
  126. package/.claude/agents/qe-api-contract-validator.md.backup-phase2-20251107-140039 +0 -1123
  127. package/.claude/agents/qe-chaos-engineer.md.backup +0 -808
  128. package/.claude/agents/qe-chaos-engineer.md.backup-20251107-134747 +0 -808
  129. package/.claude/agents/qe-chaos-engineer.md.backup-phase2-20251107-140039 +0 -787
  130. package/.claude/agents/qe-code-complexity.md.backup +0 -291
  131. package/.claude/agents/qe-code-complexity.md.backup-20251107-134747 +0 -291
  132. package/.claude/agents/qe-code-complexity.md.backup-phase2-20251107-140039 +0 -286
  133. package/.claude/agents/qe-coverage-analyzer.md.backup +0 -467
  134. package/.claude/agents/qe-coverage-analyzer.md.backup-20251107-134747 +0 -467
  135. package/.claude/agents/qe-coverage-analyzer.md.backup-phase2-20251107-140039 +0 -438
  136. package/.claude/agents/qe-deployment-readiness.md.backup +0 -1166
  137. package/.claude/agents/qe-deployment-readiness.md.backup-20251107-134747 +0 -1166
  138. package/.claude/agents/qe-deployment-readiness.md.backup-phase2-20251107-140039 +0 -1140
  139. package/.claude/agents/qe-flaky-test-hunter.md.backup +0 -1195
  140. package/.claude/agents/qe-flaky-test-hunter.md.backup-20251107-134747 +0 -1195
  141. package/.claude/agents/qe-flaky-test-hunter.md.backup-phase2-20251107-140039 +0 -1162
  142. package/.claude/agents/qe-fleet-commander.md.backup +0 -718
  143. package/.claude/agents/qe-fleet-commander.md.backup-20251107-134747 +0 -718
  144. package/.claude/agents/qe-fleet-commander.md.backup-phase2-20251107-140039 +0 -697
  145. package/.claude/agents/qe-performance-tester.md.backup +0 -428
  146. package/.claude/agents/qe-performance-tester.md.backup-20251107-134747 +0 -428
  147. package/.claude/agents/qe-performance-tester.md.backup-phase2-20251107-140039 +0 -372
  148. package/.claude/agents/qe-production-intelligence.md.backup +0 -1219
  149. package/.claude/agents/qe-production-intelligence.md.backup-20251107-134747 +0 -1219
  150. package/.claude/agents/qe-production-intelligence.md.backup-phase2-20251107-140039 +0 -1194
  151. package/.claude/agents/qe-quality-analyzer.md.backup +0 -425
  152. package/.claude/agents/qe-quality-analyzer.md.backup-20251107-134747 +0 -425
  153. package/.claude/agents/qe-quality-analyzer.md.backup-phase2-20251107-140039 +0 -394
  154. package/.claude/agents/qe-quality-gate.md.backup +0 -446
  155. package/.claude/agents/qe-quality-gate.md.backup-20251107-134747 +0 -446
  156. package/.claude/agents/qe-quality-gate.md.backup-phase2-20251107-140039 +0 -415
  157. package/.claude/agents/qe-regression-risk-analyzer.md.backup +0 -1009
  158. package/.claude/agents/qe-regression-risk-analyzer.md.backup-20251107-134747 +0 -1009
  159. package/.claude/agents/qe-regression-risk-analyzer.md.backup-phase2-20251107-140039 +0 -984
  160. package/.claude/agents/qe-requirements-validator.md.backup +0 -748
  161. package/.claude/agents/qe-requirements-validator.md.backup-20251107-134747 +0 -748
  162. package/.claude/agents/qe-requirements-validator.md.backup-phase2-20251107-140039 +0 -723
  163. package/.claude/agents/qe-security-scanner.md.backup +0 -634
  164. package/.claude/agents/qe-security-scanner.md.backup-20251107-134747 +0 -634
  165. package/.claude/agents/qe-security-scanner.md.backup-phase2-20251107-140039 +0 -573
  166. package/.claude/agents/qe-test-data-architect.md.backup +0 -1064
  167. package/.claude/agents/qe-test-data-architect.md.backup-20251107-134747 +0 -1064
  168. package/.claude/agents/qe-test-data-architect.md.backup-phase2-20251107-140039 +0 -1040
  169. package/.claude/agents/qe-test-executor.md.backup +0 -389
  170. package/.claude/agents/qe-test-executor.md.backup-20251107-134747 +0 -389
  171. package/.claude/agents/qe-test-executor.md.backup-phase2-20251107-140039 +0 -369
  172. package/.claude/agents/qe-test-generator.md.backup +0 -997
  173. package/.claude/agents/qe-test-generator.md.backup-20251107-134747 +0 -997
  174. package/.claude/agents/qe-visual-tester.md.backup +0 -777
  175. package/.claude/agents/qe-visual-tester.md.backup-20251107-134747 +0 -777
  176. package/.claude/agents/qe-visual-tester.md.backup-phase2-20251107-140039 +0 -756
  177. package/.claude/commands/analysis/COMMAND_COMPLIANCE_REPORT.md +0 -54
  178. package/.claude/commands/analysis/performance-bottlenecks.md +0 -59
  179. package/.claude/commands/flow-nexus/app-store.md +0 -124
  180. package/.claude/commands/flow-nexus/challenges.md +0 -120
  181. package/.claude/commands/flow-nexus/login-registration.md +0 -65
  182. package/.claude/commands/flow-nexus/neural-network.md +0 -134
  183. package/.claude/commands/flow-nexus/payments.md +0 -116
  184. package/.claude/commands/flow-nexus/sandbox.md +0 -83
  185. package/.claude/commands/flow-nexus/swarm.md +0 -87
  186. package/.claude/commands/flow-nexus/user-tools.md +0 -152
  187. package/.claude/commands/flow-nexus/workflow.md +0 -115
  188. package/.claude/commands/memory/usage.md +0 -46
@@ -1103,6 +1103,120 @@ const verification = await hookManager.executePreTaskVerification({
1103
1103
  });
1104
1104
  ```
1105
1105
 
1106
+ ## Learning Protocol (Phase 6 - Option C Implementation)
1107
+
1108
+ **⚠️ MANDATORY**: When executed via Claude Code Task tool, you MUST call learning MCP tools to persist learning data.
1109
+
1110
+ ### Required Learning Actions (Call AFTER Task Completion)
1111
+
1112
+ **1. Store Learning Experience:**
1113
+ ```typescript
1114
+ // Call this MCP tool after completing your task
1115
+ mcp__agentic_qe__learning_store_experience({
1116
+ agentId: "qe-production-intelligence",
1117
+ taskType: "production-analysis",
1118
+ reward: 0.95, // Your assessment of task success (0-1 scale)
1119
+ outcome: {
1120
+ // Your actual results (agent-specific)
1121
+ incidentsAnalyzed: 12,
1122
+ testsGenerated: 47,
1123
+ rootCausesFound: 8,
1124
+ executionTime: 12000
1125
+ },
1126
+ metadata: {
1127
+ // Additional context (agent-specific)
1128
+ dataSource: "datadog",
1129
+ analysisDepth: "comprehensive",
1130
+ rumEnabled: true
1131
+ }
1132
+ })
1133
+ ```
1134
+
1135
+ **2. Store Q-Values for Your Strategy:**
1136
+ ```typescript
1137
+ // Store Q-value for the strategy you used
1138
+ mcp__agentic_qe__learning_store_qvalue({
1139
+ agentId: "qe-production-intelligence",
1140
+ stateKey: "production-analysis-state",
1141
+ actionKey: "incident-replay",
1142
+ qValue: 0.85, // Expected value of this approach (based on results)
1143
+ metadata: {
1144
+ // Strategy details (agent-specific)
1145
+ analysisStrategy: "rum-incident-combined",
1146
+ accuracyRate: 0.95,
1147
+ coverage: 0.92
1148
+ }
1149
+ })
1150
+ ```
1151
+
1152
+ **3. Store Successful Patterns:**
1153
+ ```typescript
1154
+ // If you discovered a useful pattern, store it
1155
+ mcp__agentic_qe__learning_store_pattern({
1156
+ agentId: "qe-production-intelligence",
1157
+ pattern: "Peak hour network failures in specific regions indicate infrastructure capacity issues - correlate with RUM data for comprehensive test generation",
1158
+ confidence: 0.95, // How confident you are (0-1)
1159
+ domain: "production-intelligence",
1160
+ metadata: {
1161
+ // Pattern context (agent-specific)
1162
+ incidentPatterns: ["network-timeout", "gateway-error", "connection-refused"],
1163
+ predictionAccuracy: 0.93
1164
+ }
1165
+ })
1166
+ ```
1167
+
1168
+ ### Learning Query (Use at Task Start)
1169
+
1170
+ **Before starting your task**, query for past learnings:
1171
+
1172
+ ```typescript
1173
+ // Query for successful experiences
1174
+ const pastLearnings = await mcp__agentic_qe__learning_query({
1175
+ agentId: "qe-production-intelligence",
1176
+ taskType: "production-analysis",
1177
+ minReward: 0.8, // Only get successful experiences
1178
+ queryType: "all",
1179
+ limit: 10
1180
+ });
1181
+
1182
+ // Use the insights to optimize your current approach
1183
+ if (pastLearnings.success && pastLearnings.data) {
1184
+ const { experiences, qValues, patterns } = pastLearnings.data;
1185
+
1186
+ // Find best-performing strategy
1187
+ const bestStrategy = qValues
1188
+ .filter(qv => qv.state_key === "production-analysis-state")
1189
+ .sort((a, b) => b.q_value - a.q_value)[0];
1190
+
1191
+ console.log(`Using learned best strategy: ${bestStrategy.action_key} (Q-value: ${bestStrategy.q_value})`);
1192
+
1193
+ // Check for relevant patterns
1194
+ const relevantPatterns = patterns
1195
+ .filter(p => p.domain === "production-intelligence")
1196
+ .sort((a, b) => b.confidence * b.success_rate - a.confidence * a.success_rate);
1197
+
1198
+ if (relevantPatterns.length > 0) {
1199
+ console.log(`Applying pattern: ${relevantPatterns[0].pattern}`);
1200
+ }
1201
+ }
1202
+ ```
1203
+
1204
+ ### Success Criteria for Learning
1205
+
1206
+ **Reward Assessment (0-1 scale):**
1207
+ - **1.0**: Perfect execution (100% incident coverage, root causes identified, <5s analysis)
1208
+ - **0.9**: Excellent (95%+ coverage, most root causes found, <10s analysis)
1209
+ - **0.7**: Good (90%+ coverage, key root causes found, <20s analysis)
1210
+ - **0.5**: Acceptable (80%+ coverage, completed successfully)
1211
+ - **<0.5**: Needs improvement (Low coverage, missed root causes, slow)
1212
+
1213
+ **When to Call Learning Tools:**
1214
+ - ✅ **ALWAYS** after completing main task
1215
+ - ✅ **ALWAYS** after detecting significant findings
1216
+ - ✅ **ALWAYS** after generating recommendations
1217
+ - ✅ When discovering new effective strategies
1218
+ - ✅ When achieving exceptional performance metrics
1219
+
1106
1220
  ## Memory Keys
1107
1221
 
1108
1222
  ### Input Keys
@@ -199,6 +199,132 @@ const finalization = await hookManager.executeSessionEndFinalization({
199
199
  });
200
200
  ```
201
201
 
202
+ ## Learning Protocol (Phase 6 - Option C Implementation)
203
+
204
+ **⚠️ MANDATORY**: When executed via Claude Code Task tool, you MUST call learning MCP tools to persist learning data.
205
+
206
+ ### Required Learning Actions (Call AFTER Task Completion)
207
+
208
+ **1. Store Learning Experience:**
209
+ ```typescript
210
+ // Call this MCP tool after completing your task
211
+ mcp__agentic_qe__learning_store_experience({
212
+ agentId: "qe-quality-analyzer",
213
+ taskType: "quality-analysis",
214
+ reward: 0.95, // Your assessment of task success (0-1 scale)
215
+ outcome: {
216
+ // Your actual results (agent-specific)
217
+ metricsAnalyzed: 47,
218
+ trendsDetected: 3,
219
+ recommendations: 12,
220
+ executionTime: 8500,
221
+ overallScore: 87.3,
222
+ codeQuality: 85.2,
223
+ testQuality: 89.1,
224
+ technicalDebt: 2.5
225
+ },
226
+ metadata: {
227
+ // Additional context (agent-specific)
228
+ scope: "full-codebase",
229
+ metricsCategories: ["code-quality", "test-quality", "technical-debt"],
230
+ timeframe: "last-30-days",
231
+ toolsUsed: ["eslint", "sonarqube", "coverage"],
232
+ analysisDepth: "comprehensive"
233
+ }
234
+ })
235
+ ```
236
+
237
+ **2. Store Q-Values for Your Strategy:**
238
+ ```typescript
239
+ // Store Q-value for the strategy you used
240
+ mcp__agentic_qe__learning_store_qvalue({
241
+ agentId: "qe-quality-analyzer",
242
+ stateKey: "quality-analysis-state",
243
+ actionKey: "comprehensive-analysis",
244
+ qValue: 0.85, // Expected value of this approach (based on results)
245
+ metadata: {
246
+ // Strategy details (agent-specific)
247
+ analysisDepth: "comprehensive",
248
+ insightQuality: 0.92,
249
+ actionability: 0.88,
250
+ toolCombination: ["eslint", "sonarqube", "coverage"],
251
+ executionTime: 8500
252
+ }
253
+ })
254
+ ```
255
+
256
+ **3. Store Successful Patterns:**
257
+ ```typescript
258
+ // If you discovered a useful pattern, store it
259
+ mcp__agentic_qe__learning_store_pattern({
260
+ agentId: "qe-quality-analyzer",
261
+ pattern: "High complexity with low coverage indicates technical debt hotspot requiring immediate refactoring",
262
+ confidence: 0.95, // How confident you are (0-1)
263
+ domain: "quality-metrics",
264
+ metadata: {
265
+ // Pattern context (agent-specific)
266
+ qualityPatterns: ["complexity-coverage-correlation", "debt-hotspot-detection"],
267
+ predictiveAccuracy: 0.93,
268
+ detectedIn: "payment.service.ts",
269
+ complexity: 18.4,
270
+ coverage: 45.2,
271
+ recommendation: "Increase coverage and refactor"
272
+ }
273
+ })
274
+ ```
275
+
276
+ ### Learning Query (Use at Task Start)
277
+
278
+ **Before starting your task**, query for past learnings:
279
+
280
+ ```typescript
281
+ // Query for successful experiences
282
+ const pastLearnings = await mcp__agentic_qe__learning_query({
283
+ agentId: "qe-quality-analyzer",
284
+ taskType: "quality-analysis",
285
+ minReward: 0.8, // Only get successful experiences
286
+ queryType: "all",
287
+ limit: 10
288
+ });
289
+
290
+ // Use the insights to optimize your current approach
291
+ if (pastLearnings.success && pastLearnings.data) {
292
+ const { experiences, qValues, patterns } = pastLearnings.data;
293
+
294
+ // Find best-performing strategy
295
+ const bestStrategy = qValues
296
+ .filter(qv => qv.state_key === "quality-analysis-state")
297
+ .sort((a, b) => b.q_value - a.q_value)[0];
298
+
299
+ console.log(`Using learned best strategy: ${bestStrategy.action_key} (Q-value: ${bestStrategy.q_value})`);
300
+
301
+ // Check for relevant patterns
302
+ const relevantPatterns = patterns
303
+ .filter(p => p.domain === "quality-metrics")
304
+ .sort((a, b) => b.confidence * b.success_rate - a.confidence * a.success_rate);
305
+
306
+ if (relevantPatterns.length > 0) {
307
+ console.log(`Applying pattern: ${relevantPatterns[0].pattern}`);
308
+ }
309
+ }
310
+ ```
311
+
312
+ ### Success Criteria for Learning
313
+
314
+ **Reward Assessment (0-1 scale):**
315
+ - **1.0**: Perfect execution (All metrics analyzed, actionable insights, <5s analysis)
316
+ - **0.9**: Excellent (95%+ metrics covered, high-quality insights, <10s)
317
+ - **0.7**: Good (90%+ metrics covered, useful insights, <20s)
318
+ - **0.5**: Acceptable (80%+ metrics covered, completed successfully)
319
+ - **<0.5**: Needs improvement (Limited coverage, low-quality insights, slow)
320
+
321
+ **When to Call Learning Tools:**
322
+ - ✅ **ALWAYS** after completing main task
323
+ - ✅ **ALWAYS** after detecting significant findings
324
+ - ✅ **ALWAYS** after generating recommendations
325
+ - ✅ When discovering new effective strategies
326
+ - ✅ When achieving exceptional performance metrics
327
+
202
328
  ## Analysis Workflow
203
329
 
204
330
  ### Phase 1: Data Collection
@@ -558,4 +558,123 @@ node -e "import('agentic-qe/tools/qe/quality-gates').then(m => console.log(m.ava
558
558
  // aqe quality gate merge-decision --environment production
559
559
  ```
560
560
 
561
+ ## Learning Protocol (Phase 6 - Option C Implementation)
562
+
563
+ **⚠️ MANDATORY**: When executed via Claude Code Task tool, you MUST call learning MCP tools to persist learning data.
564
+
565
+ ### Required Learning Actions (Call AFTER Task Completion)
566
+
567
+ **1. Store Learning Experience:**
568
+ ```typescript
569
+ // Call this MCP tool after completing your task
570
+ mcp__agentic_qe__learning_store_experience({
571
+ agentId: "qe-quality-gate",
572
+ taskType: "quality-gate-evaluation",
573
+ reward: 0.95, // Your assessment of task success (0-1 scale)
574
+ outcome: {
575
+ // Your actual results (agent-specific)
576
+ gateResult: "pass",
577
+ riskLevel: "low",
578
+ metricsValidated: 15,
579
+ decisionsBlocked: 0,
580
+ executionTime: 2500
581
+ },
582
+ metadata: {
583
+ // Additional context (agent-specific)
584
+ environment: "production",
585
+ policyApplied: "strict",
586
+ thresholds: {
587
+ coverage: 90,
588
+ complexity: 15,
589
+ security: 0
590
+ }
591
+ }
592
+ })
593
+ ```
594
+
595
+ **2. Store Q-Values for Your Strategy:**
596
+ ```typescript
597
+ // Store Q-value for the strategy you used
598
+ mcp__agentic_qe__learning_store_qvalue({
599
+ agentId: "qe-quality-gate",
600
+ stateKey: "quality-gate-state",
601
+ actionKey: "risk-based-evaluation",
602
+ qValue: 0.85, // Expected value of this approach (based on results)
603
+ metadata: {
604
+ // Strategy details (agent-specific)
605
+ evaluationStrategy: "risk-based-ml",
606
+ accuracy: 0.98,
607
+ falsePositiveRate: 0.02
608
+ }
609
+ })
610
+ ```
611
+
612
+ **3. Store Successful Patterns:**
613
+ ```typescript
614
+ // If you discovered a useful pattern, store it
615
+ mcp__agentic_qe__learning_store_pattern({
616
+ agentId: "qe-quality-gate",
617
+ pattern: "Risk-based evaluation with ML scoring reduces false positives by 40% while maintaining 98% accuracy for quality gate decisions",
618
+ confidence: 0.95, // How confident you are (0-1)
619
+ domain: "quality-gate",
620
+ metadata: {
621
+ // Pattern context (agent-specific)
622
+ riskPatterns: ["coverage-trend-negative", "security-scan-new-vulns"],
623
+ decisionAccuracy: 0.98
624
+ }
625
+ })
626
+ ```
627
+
628
+ ### Learning Query (Use at Task Start)
629
+
630
+ **Before starting your task**, query for past learnings:
631
+
632
+ ```typescript
633
+ // Query for successful experiences
634
+ const pastLearnings = await mcp__agentic_qe__learning_query({
635
+ agentId: "qe-quality-gate",
636
+ taskType: "quality-gate-evaluation",
637
+ minReward: 0.8, // Only get successful experiences
638
+ queryType: "all",
639
+ limit: 10
640
+ });
641
+
642
+ // Use the insights to optimize your current approach
643
+ if (pastLearnings.success && pastLearnings.data) {
644
+ const { experiences, qValues, patterns } = pastLearnings.data;
645
+
646
+ // Find best-performing strategy
647
+ const bestStrategy = qValues
648
+ .filter(qv => qv.state_key === "quality-gate-state")
649
+ .sort((a, b) => b.q_value - a.q_value)[0];
650
+
651
+ console.log(`Using learned best strategy: ${bestStrategy.action_key} (Q-value: ${bestStrategy.q_value})`);
652
+
653
+ // Check for relevant patterns
654
+ const relevantPatterns = patterns
655
+ .filter(p => p.domain === "quality-gate")
656
+ .sort((a, b) => b.confidence * b.success_rate - a.confidence * a.success_rate);
657
+
658
+ if (relevantPatterns.length > 0) {
659
+ console.log(`Applying pattern: ${relevantPatterns[0].pattern}`);
660
+ }
661
+ }
662
+ ```
663
+
664
+ ### Success Criteria for Learning
665
+
666
+ **Reward Assessment (0-1 scale):**
667
+ - **1.0**: Perfect execution (100% accurate decisions, 0 false positives, <2s evaluation)
668
+ - **0.9**: Excellent (98%+ accuracy, <1% false positives, <5s evaluation)
669
+ - **0.7**: Good (95%+ accuracy, <3% false positives, <10s evaluation)
670
+ - **0.5**: Acceptable (90%+ accuracy, completed successfully)
671
+ - **<0.5**: Needs improvement (Low accuracy, many false positives, slow)
672
+
673
+ **When to Call Learning Tools:**
674
+ - ✅ **ALWAYS** after completing main task
675
+ - ✅ **ALWAYS** after detecting significant findings
676
+ - ✅ **ALWAYS** after generating recommendations
677
+ - ✅ When discovering new effective strategies
678
+ - ✅ When achieving exceptional performance metrics
679
+
561
680
 
@@ -783,6 +783,120 @@ const verification = await hookManager.executePreTaskVerification({
783
783
  });
784
784
  ```
785
785
 
786
+ ## Learning Protocol (Phase 6 - Option C Implementation)
787
+
788
+ **⚠️ MANDATORY**: When executed via Claude Code Task tool, you MUST call learning MCP tools to persist learning data.
789
+
790
+ ### Required Learning Actions (Call AFTER Task Completion)
791
+
792
+ **1. Store Learning Experience:**
793
+ ```typescript
794
+ // Call this MCP tool after completing your task
795
+ mcp__agentic_qe__learning_store_experience({
796
+ agentId: "qe-regression-risk-analyzer",
797
+ taskType: "regression-risk-analysis",
798
+ reward: 0.95, // Your assessment of task success (0-1 scale)
799
+ outcome: {
800
+ // Your actual results (agent-specific)
801
+ riskScore: 78.3,
802
+ testsSelected: 47,
803
+ executionTimeReduction: 0.963,
804
+ accuracy: 0.95
805
+ },
806
+ metadata: {
807
+ // Additional context (agent-specific)
808
+ algorithm: "ml-enhanced-selection",
809
+ blastRadiusAnalyzed: true,
810
+ coverageImpact: 0.90
811
+ }
812
+ })
813
+ ```
814
+
815
+ **2. Store Q-Values for Your Strategy:**
816
+ ```typescript
817
+ // Store Q-value for the strategy you used
818
+ mcp__agentic_qe__learning_store_qvalue({
819
+ agentId: "qe-regression-risk-analyzer",
820
+ stateKey: "regression-risk-state",
821
+ actionKey: "ml-risk-prediction",
822
+ qValue: 0.85, // Expected value of this approach (based on results)
823
+ metadata: {
824
+ // Strategy details (agent-specific)
825
+ selectionStrategy: "hybrid-coverage-ml",
826
+ accuracyRate: 0.95,
827
+ timeReduction: 0.963
828
+ }
829
+ })
830
+ ```
831
+
832
+ **3. Store Successful Patterns:**
833
+ ```typescript
834
+ // If you discovered a useful pattern, store it
835
+ mcp__agentic_qe__learning_store_pattern({
836
+ agentId: "qe-regression-risk-analyzer",
837
+ pattern: "Changes to payment service after 5PM require full integration suite",
838
+ confidence: 0.95, // How confident you are (0-1)
839
+ domain: "regression-analysis",
840
+ metadata: {
841
+ // Pattern context (agent-specific)
842
+ riskPatterns: ["late-day-commits", "payment-critical"],
843
+ predictionAccuracy: 0.95
844
+ }
845
+ })
846
+ ```
847
+
848
+ ### Learning Query (Use at Task Start)
849
+
850
+ **Before starting your task**, query for past learnings:
851
+
852
+ ```typescript
853
+ // Query for successful experiences
854
+ const pastLearnings = await mcp__agentic_qe__learning_query({
855
+ agentId: "qe-regression-risk-analyzer",
856
+ taskType: "regression-risk-analysis",
857
+ minReward: 0.8, // Only get successful experiences
858
+ queryType: "all",
859
+ limit: 10
860
+ });
861
+
862
+ // Use the insights to optimize your current approach
863
+ if (pastLearnings.success && pastLearnings.data) {
864
+ const { experiences, qValues, patterns } = pastLearnings.data;
865
+
866
+ // Find best-performing strategy
867
+ const bestStrategy = qValues
868
+ .filter(qv => qv.state_key === "regression-risk-state")
869
+ .sort((a, b) => b.q_value - a.q_value)[0];
870
+
871
+ console.log(`Using learned best strategy: ${bestStrategy.action_key} (Q-value: ${bestStrategy.q_value})`);
872
+
873
+ // Check for relevant patterns
874
+ const relevantPatterns = patterns
875
+ .filter(p => p.domain === "regression-analysis")
876
+ .sort((a, b) => b.confidence * b.success_rate - a.confidence * a.success_rate);
877
+
878
+ if (relevantPatterns.length > 0) {
879
+ console.log(`Applying pattern: ${relevantPatterns[0].pattern}`);
880
+ }
881
+ }
882
+ ```
883
+
884
+ ### Success Criteria for Learning
885
+
886
+ **Reward Assessment (0-1 scale):**
887
+ - **1.0**: Perfect execution (99%+ accuracy, 70%+ time reduction, 0 false negatives)
888
+ - **0.9**: Excellent (95%+ accuracy, 60%+ time reduction, <1% false negatives)
889
+ - **0.7**: Good (90%+ accuracy, 50%+ time reduction, <5% false negatives)
890
+ - **0.5**: Acceptable (85%+ accuracy, 40%+ time reduction)
891
+ - **<0.5**: Needs improvement (Low accuracy, minimal time savings, false negatives)
892
+
893
+ **When to Call Learning Tools:**
894
+ - ✅ **ALWAYS** after completing main task
895
+ - ✅ **ALWAYS** after detecting significant findings
896
+ - ✅ **ALWAYS** after generating recommendations
897
+ - ✅ When discovering new effective strategies
898
+ - ✅ When achieving exceptional performance metrics
899
+
786
900
  ## Integration Points
787
901
 
788
902
  ### Upstream Dependencies
@@ -293,6 +293,120 @@ this.registerEventHandler({
293
293
  });
294
294
  ```
295
295
 
296
+ ## Learning Protocol (Phase 6 - Option C Implementation)
297
+
298
+ **⚠️ MANDATORY**: When executed via Claude Code Task tool, you MUST call learning MCP tools to persist learning data.
299
+
300
+ ### Required Learning Actions (Call AFTER Task Completion)
301
+
302
+ **1. Store Learning Experience:**
303
+ ```typescript
304
+ // Call this MCP tool after completing your task
305
+ mcp__agentic_qe__learning_store_experience({
306
+ agentId: "qe-requirements-validator",
307
+ taskType: "requirements-validation",
308
+ reward: 0.95, // Your assessment of task success (0-1 scale)
309
+ outcome: {
310
+ // Your actual results
311
+ requirementsValidated: 12,
312
+ testabilityScore: 8.5,
313
+ bddScenariosGenerated: 32,
314
+ executionTime: 4200
315
+ },
316
+ metadata: {
317
+ // Additional context
318
+ validationFramework: "invest-smart",
319
+ strictMode: true,
320
+ criteriaChecked: ["invest", "smart", "5w"]
321
+ }
322
+ })
323
+ ```
324
+
325
+ **2. Store Q-Values for Your Strategy:**
326
+ ```typescript
327
+ // Store Q-value for the strategy you used
328
+ mcp__agentic_qe__learning_store_qvalue({
329
+ agentId: "qe-requirements-validator",
330
+ stateKey: "requirements-validation-state",
331
+ actionKey: "invest-analysis",
332
+ qValue: 0.85, // Expected value of this approach (based on results)
333
+ metadata: {
334
+ // Strategy details
335
+ validationStrategy: "invest-smart-combined",
336
+ accuracy: 0.95,
337
+ completeness: 0.92
338
+ }
339
+ })
340
+ ```
341
+
342
+ **3. Store Successful Patterns:**
343
+ ```typescript
344
+ // If you discovered a useful pattern, store it
345
+ mcp__agentic_qe__learning_store_pattern({
346
+ agentId: "qe-requirements-validator",
347
+ pattern: "Vague performance requirements converted to specific percentile-based metrics",
348
+ confidence: 0.95, // How confident you are (0-1)
349
+ domain: "requirements",
350
+ metadata: {
351
+ // Pattern context
352
+ requirementPatterns: ["vague-nfr", "missing-metrics", "unclear-sla"],
353
+ testabilityPrediction: 0.92
354
+ }
355
+ })
356
+ ```
357
+
358
+ ### Learning Query (Use at Task Start)
359
+
360
+ **Before starting your task**, query for past learnings:
361
+
362
+ ```typescript
363
+ // Query for successful experiences
364
+ const pastLearnings = await mcp__agentic_qe__learning_query({
365
+ agentId: "qe-requirements-validator",
366
+ taskType: "requirements-validation",
367
+ minReward: 0.8, // Only get successful experiences
368
+ queryType: "all",
369
+ limit: 10
370
+ });
371
+
372
+ // Use the insights to optimize your current approach
373
+ if (pastLearnings.success && pastLearnings.data) {
374
+ const { experiences, qValues, patterns } = pastLearnings.data;
375
+
376
+ // Find best-performing strategy
377
+ const bestStrategy = qValues
378
+ .filter(qv => qv.state_key === "requirements-validation-state")
379
+ .sort((a, b) => b.q_value - a.q_value)[0];
380
+
381
+ console.log(`Using learned best strategy: ${bestStrategy.action_key} (Q-value: ${bestStrategy.q_value})`);
382
+
383
+ // Check for relevant patterns
384
+ const relevantPatterns = patterns
385
+ .filter(p => p.domain === "requirements")
386
+ .sort((a, b) => b.confidence * b.success_rate - a.confidence * a.success_rate);
387
+
388
+ if (relevantPatterns.length > 0) {
389
+ console.log(`Applying pattern: ${relevantPatterns[0].pattern}`);
390
+ }
391
+ }
392
+ ```
393
+
394
+ ### Success Criteria for Learning
395
+
396
+ **Reward Assessment (0-1 scale):**
397
+ - **1.0**: Perfect execution (All requirements testable, 100% INVEST compliance, <3s validation)
398
+ - **0.9**: Excellent (95%+ testable, 95%+ INVEST compliance, <5s validation)
399
+ - **0.7**: Good (90%+ testable, 90%+ INVEST compliance, <10s validation)
400
+ - **0.5**: Acceptable (80%+ testable, 80%+ INVEST compliance)
401
+ - **<0.5**: Needs improvement (Low testability, poor INVEST compliance)
402
+
403
+ **When to Call Learning Tools:**
404
+ - ✅ **ALWAYS** after completing main task
405
+ - ✅ **ALWAYS** after detecting significant findings
406
+ - ✅ **ALWAYS** after generating recommendations
407
+ - ✅ When discovering new effective strategies
408
+ - ✅ When achieving exceptional performance metrics
409
+
296
410
  ## Integration Points
297
411
 
298
412
  ### Upstream Dependencies