agentic-qe 1.5.1 → 1.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/agents/qe-api-contract-validator.md +118 -0
- package/.claude/agents/qe-chaos-engineer.md +320 -5
- package/.claude/agents/qe-code-complexity.md +360 -0
- package/.claude/agents/qe-coverage-analyzer.md +112 -0
- package/.claude/agents/qe-deployment-readiness.md +322 -6
- package/.claude/agents/qe-flaky-test-hunter.md +115 -0
- package/.claude/agents/qe-fleet-commander.md +319 -6
- package/.claude/agents/qe-performance-tester.md +234 -0
- package/.claude/agents/qe-production-intelligence.md +114 -0
- package/.claude/agents/qe-quality-analyzer.md +126 -0
- package/.claude/agents/qe-quality-gate.md +119 -0
- package/.claude/agents/qe-regression-risk-analyzer.md +114 -0
- package/.claude/agents/qe-requirements-validator.md +114 -0
- package/.claude/agents/qe-security-scanner.md +118 -0
- package/.claude/agents/qe-test-data-architect.md +234 -0
- package/.claude/agents/qe-test-executor.md +115 -0
- package/.claude/agents/qe-test-generator.md +114 -0
- package/.claude/agents/qe-visual-tester.md +305 -6
- package/.claude/agents/subagents/qe-code-reviewer.md +0 -4
- package/.claude/agents/subagents/qe-data-generator.md +0 -16
- package/.claude/agents/subagents/qe-integration-tester.md +0 -17
- package/.claude/agents/subagents/qe-performance-validator.md +0 -16
- package/.claude/agents/subagents/qe-security-auditor.md +0 -16
- package/.claude/agents/subagents/qe-test-implementer.md +0 -17
- package/.claude/agents/subagents/qe-test-refactorer.md +0 -17
- package/.claude/agents/subagents/qe-test-writer.md +0 -19
- package/.claude/skills/brutal-honesty-review/README.md +218 -0
- package/.claude/skills/brutal-honesty-review/SKILL.md +725 -0
- package/.claude/skills/brutal-honesty-review/resources/assessment-rubrics.md +295 -0
- package/.claude/skills/brutal-honesty-review/resources/review-template.md +102 -0
- package/.claude/skills/brutal-honesty-review/scripts/assess-code.sh +179 -0
- package/.claude/skills/brutal-honesty-review/scripts/assess-tests.sh +223 -0
- package/.claude/skills/cicd-pipeline-qe-orchestrator/README.md +301 -0
- package/.claude/skills/cicd-pipeline-qe-orchestrator/SKILL.md +510 -0
- package/.claude/skills/cicd-pipeline-qe-orchestrator/resources/workflows/microservice-pipeline.md +239 -0
- package/.claude/skills/cicd-pipeline-qe-orchestrator/resources/workflows/mobile-pipeline.md +375 -0
- package/.claude/skills/cicd-pipeline-qe-orchestrator/resources/workflows/monolith-pipeline.md +268 -0
- package/.claude/skills/six-thinking-hats/README.md +190 -0
- package/.claude/skills/six-thinking-hats/SKILL.md +1215 -0
- package/.claude/skills/six-thinking-hats/resources/examples/api-testing-example.md +345 -0
- package/.claude/skills/six-thinking-hats/resources/templates/solo-session-template.md +167 -0
- package/.claude/skills/six-thinking-hats/resources/templates/team-session-template.md +336 -0
- package/CHANGELOG.md +2472 -2129
- package/README.md +48 -10
- package/dist/adapters/MemoryStoreAdapter.d.ts +38 -0
- package/dist/adapters/MemoryStoreAdapter.d.ts.map +1 -1
- package/dist/adapters/MemoryStoreAdapter.js +22 -0
- package/dist/adapters/MemoryStoreAdapter.js.map +1 -1
- package/dist/agents/BaseAgent.d.ts.map +1 -1
- package/dist/agents/BaseAgent.js +13 -0
- package/dist/agents/BaseAgent.js.map +1 -1
- package/dist/cli/commands/init-claude-md-template.d.ts +16 -0
- package/dist/cli/commands/init-claude-md-template.d.ts.map +1 -0
- package/dist/cli/commands/init-claude-md-template.js +69 -0
- package/dist/cli/commands/init-claude-md-template.js.map +1 -0
- package/dist/cli/commands/init.d.ts +1 -1
- package/dist/cli/commands/init.d.ts.map +1 -1
- package/dist/cli/commands/init.js +509 -460
- package/dist/cli/commands/init.js.map +1 -1
- package/dist/core/memory/AgentDBService.d.ts +33 -28
- package/dist/core/memory/AgentDBService.d.ts.map +1 -1
- package/dist/core/memory/AgentDBService.js +233 -290
- package/dist/core/memory/AgentDBService.js.map +1 -1
- package/dist/core/memory/EnhancedAgentDBService.d.ts.map +1 -1
- package/dist/core/memory/EnhancedAgentDBService.js +5 -3
- package/dist/core/memory/EnhancedAgentDBService.js.map +1 -1
- package/dist/core/memory/RealAgentDBAdapter.d.ts +9 -2
- package/dist/core/memory/RealAgentDBAdapter.d.ts.map +1 -1
- package/dist/core/memory/RealAgentDBAdapter.js +126 -100
- package/dist/core/memory/RealAgentDBAdapter.js.map +1 -1
- package/dist/core/memory/SwarmMemoryManager.d.ts +58 -0
- package/dist/core/memory/SwarmMemoryManager.d.ts.map +1 -1
- package/dist/core/memory/SwarmMemoryManager.js +176 -0
- package/dist/core/memory/SwarmMemoryManager.js.map +1 -1
- package/dist/core/memory/index.d.ts.map +1 -1
- package/dist/core/memory/index.js +2 -1
- package/dist/core/memory/index.js.map +1 -1
- package/dist/learning/LearningEngine.d.ts +14 -27
- package/dist/learning/LearningEngine.d.ts.map +1 -1
- package/dist/learning/LearningEngine.js +57 -119
- package/dist/learning/LearningEngine.js.map +1 -1
- package/dist/learning/index.d.ts +0 -1
- package/dist/learning/index.d.ts.map +1 -1
- package/dist/learning/index.js +0 -1
- package/dist/learning/index.js.map +1 -1
- package/dist/mcp/handlers/learning/learning-query.d.ts +34 -0
- package/dist/mcp/handlers/learning/learning-query.d.ts.map +1 -0
- package/dist/mcp/handlers/learning/learning-query.js +156 -0
- package/dist/mcp/handlers/learning/learning-query.js.map +1 -0
- package/dist/mcp/handlers/learning/learning-store-experience.d.ts +30 -0
- package/dist/mcp/handlers/learning/learning-store-experience.d.ts.map +1 -0
- package/dist/mcp/handlers/learning/learning-store-experience.js +86 -0
- package/dist/mcp/handlers/learning/learning-store-experience.js.map +1 -0
- package/dist/mcp/handlers/learning/learning-store-pattern.d.ts +31 -0
- package/dist/mcp/handlers/learning/learning-store-pattern.d.ts.map +1 -0
- package/dist/mcp/handlers/learning/learning-store-pattern.js +126 -0
- package/dist/mcp/handlers/learning/learning-store-pattern.js.map +1 -0
- package/dist/mcp/handlers/learning/learning-store-qvalue.d.ts +30 -0
- package/dist/mcp/handlers/learning/learning-store-qvalue.d.ts.map +1 -0
- package/dist/mcp/handlers/learning/learning-store-qvalue.js +100 -0
- package/dist/mcp/handlers/learning/learning-store-qvalue.js.map +1 -0
- package/dist/mcp/server.d.ts +11 -0
- package/dist/mcp/server.d.ts.map +1 -1
- package/dist/mcp/server.js +98 -1
- package/dist/mcp/server.js.map +1 -1
- package/dist/mcp/services/LearningEventListener.d.ts +123 -0
- package/dist/mcp/services/LearningEventListener.d.ts.map +1 -0
- package/dist/mcp/services/LearningEventListener.js +322 -0
- package/dist/mcp/services/LearningEventListener.js.map +1 -0
- package/dist/mcp/tools.d.ts +4 -0
- package/dist/mcp/tools.d.ts.map +1 -1
- package/dist/mcp/tools.js +179 -0
- package/dist/mcp/tools.js.map +1 -1
- package/dist/types/memory-interfaces.d.ts +71 -0
- package/dist/types/memory-interfaces.d.ts.map +1 -1
- package/dist/utils/Calculator.d.ts +35 -0
- package/dist/utils/Calculator.d.ts.map +1 -0
- package/dist/utils/Calculator.js +50 -0
- package/dist/utils/Calculator.js.map +1 -0
- package/dist/utils/Logger.d.ts.map +1 -1
- package/dist/utils/Logger.js +4 -1
- package/dist/utils/Logger.js.map +1 -1
- package/package.json +7 -5
- package/.claude/agents/qe-api-contract-validator.md.backup +0 -1148
- package/.claude/agents/qe-api-contract-validator.md.backup-20251107-134747 +0 -1148
- package/.claude/agents/qe-api-contract-validator.md.backup-phase2-20251107-140039 +0 -1123
- package/.claude/agents/qe-chaos-engineer.md.backup +0 -808
- package/.claude/agents/qe-chaos-engineer.md.backup-20251107-134747 +0 -808
- package/.claude/agents/qe-chaos-engineer.md.backup-phase2-20251107-140039 +0 -787
- package/.claude/agents/qe-code-complexity.md.backup +0 -291
- package/.claude/agents/qe-code-complexity.md.backup-20251107-134747 +0 -291
- package/.claude/agents/qe-code-complexity.md.backup-phase2-20251107-140039 +0 -286
- package/.claude/agents/qe-coverage-analyzer.md.backup +0 -467
- package/.claude/agents/qe-coverage-analyzer.md.backup-20251107-134747 +0 -467
- package/.claude/agents/qe-coverage-analyzer.md.backup-phase2-20251107-140039 +0 -438
- package/.claude/agents/qe-deployment-readiness.md.backup +0 -1166
- package/.claude/agents/qe-deployment-readiness.md.backup-20251107-134747 +0 -1166
- package/.claude/agents/qe-deployment-readiness.md.backup-phase2-20251107-140039 +0 -1140
- package/.claude/agents/qe-flaky-test-hunter.md.backup +0 -1195
- package/.claude/agents/qe-flaky-test-hunter.md.backup-20251107-134747 +0 -1195
- package/.claude/agents/qe-flaky-test-hunter.md.backup-phase2-20251107-140039 +0 -1162
- package/.claude/agents/qe-fleet-commander.md.backup +0 -718
- package/.claude/agents/qe-fleet-commander.md.backup-20251107-134747 +0 -718
- package/.claude/agents/qe-fleet-commander.md.backup-phase2-20251107-140039 +0 -697
- package/.claude/agents/qe-performance-tester.md.backup +0 -428
- package/.claude/agents/qe-performance-tester.md.backup-20251107-134747 +0 -428
- package/.claude/agents/qe-performance-tester.md.backup-phase2-20251107-140039 +0 -372
- package/.claude/agents/qe-production-intelligence.md.backup +0 -1219
- package/.claude/agents/qe-production-intelligence.md.backup-20251107-134747 +0 -1219
- package/.claude/agents/qe-production-intelligence.md.backup-phase2-20251107-140039 +0 -1194
- package/.claude/agents/qe-quality-analyzer.md.backup +0 -425
- package/.claude/agents/qe-quality-analyzer.md.backup-20251107-134747 +0 -425
- package/.claude/agents/qe-quality-analyzer.md.backup-phase2-20251107-140039 +0 -394
- package/.claude/agents/qe-quality-gate.md.backup +0 -446
- package/.claude/agents/qe-quality-gate.md.backup-20251107-134747 +0 -446
- package/.claude/agents/qe-quality-gate.md.backup-phase2-20251107-140039 +0 -415
- package/.claude/agents/qe-regression-risk-analyzer.md.backup +0 -1009
- package/.claude/agents/qe-regression-risk-analyzer.md.backup-20251107-134747 +0 -1009
- package/.claude/agents/qe-regression-risk-analyzer.md.backup-phase2-20251107-140039 +0 -984
- package/.claude/agents/qe-requirements-validator.md.backup +0 -748
- package/.claude/agents/qe-requirements-validator.md.backup-20251107-134747 +0 -748
- package/.claude/agents/qe-requirements-validator.md.backup-phase2-20251107-140039 +0 -723
- package/.claude/agents/qe-security-scanner.md.backup +0 -634
- package/.claude/agents/qe-security-scanner.md.backup-20251107-134747 +0 -634
- package/.claude/agents/qe-security-scanner.md.backup-phase2-20251107-140039 +0 -573
- package/.claude/agents/qe-test-data-architect.md.backup +0 -1064
- package/.claude/agents/qe-test-data-architect.md.backup-20251107-134747 +0 -1064
- package/.claude/agents/qe-test-data-architect.md.backup-phase2-20251107-140039 +0 -1040
- package/.claude/agents/qe-test-executor.md.backup +0 -389
- package/.claude/agents/qe-test-executor.md.backup-20251107-134747 +0 -389
- package/.claude/agents/qe-test-executor.md.backup-phase2-20251107-140039 +0 -369
- package/.claude/agents/qe-test-generator.md.backup +0 -997
- package/.claude/agents/qe-test-generator.md.backup-20251107-134747 +0 -997
- package/.claude/agents/qe-visual-tester.md.backup +0 -777
- package/.claude/agents/qe-visual-tester.md.backup-20251107-134747 +0 -777
- package/.claude/agents/qe-visual-tester.md.backup-phase2-20251107-140039 +0 -756
- package/.claude/commands/analysis/COMMAND_COMPLIANCE_REPORT.md +0 -54
- package/.claude/commands/analysis/performance-bottlenecks.md +0 -59
- package/.claude/commands/flow-nexus/app-store.md +0 -124
- package/.claude/commands/flow-nexus/challenges.md +0 -120
- package/.claude/commands/flow-nexus/login-registration.md +0 -65
- package/.claude/commands/flow-nexus/neural-network.md +0 -134
- package/.claude/commands/flow-nexus/payments.md +0 -116
- package/.claude/commands/flow-nexus/sandbox.md +0 -83
- package/.claude/commands/flow-nexus/swarm.md +0 -87
- package/.claude/commands/flow-nexus/user-tools.md +0 -152
- package/.claude/commands/flow-nexus/workflow.md +0 -115
- package/.claude/commands/memory/usage.md +0 -46
|
@@ -51,6 +51,124 @@ Skill("shift-left-testing")
|
|
|
51
51
|
- **Remediation Guidance**: Automated fix suggestions and documentation
|
|
52
52
|
- **Trend Analysis**: Security posture tracking over time
|
|
53
53
|
|
|
54
|
+
## Learning Protocol
|
|
55
|
+
|
|
56
|
+
**⚠️ MANDATORY**: When executed via Claude Code Task tool, you MUST call learning MCP tools to persist learning data.
|
|
57
|
+
|
|
58
|
+
### Required Learning Actions (Call AFTER Task Completion)
|
|
59
|
+
|
|
60
|
+
**1. Store Learning Experience:**
|
|
61
|
+
```typescript
|
|
62
|
+
// Call this MCP tool after completing security scanning
|
|
63
|
+
mcp__agentic_qe__learning_store_experience({
|
|
64
|
+
agentId: "qe-security-scanner",
|
|
65
|
+
taskType: "security-scanning",
|
|
66
|
+
reward: 0.94, // Your assessment of task success (0-1 scale)
|
|
67
|
+
outcome: {
|
|
68
|
+
vulnerabilitiesFound: 8,
|
|
69
|
+
criticalVulnerabilities: 0,
|
|
70
|
+
highVulnerabilities: 2,
|
|
71
|
+
complianceScore: 0.95,
|
|
72
|
+
falsePositives: 1,
|
|
73
|
+
scanCoverage: "comprehensive"
|
|
74
|
+
},
|
|
75
|
+
metadata: {
|
|
76
|
+
scanType: "sast-dast",
|
|
77
|
+
tools: ["snyk", "zap"],
|
|
78
|
+
duration: 1200,
|
|
79
|
+
standards: ["owasp", "pci-dss"]
|
|
80
|
+
}
|
|
81
|
+
})
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
**2. Store Q-Values for Your Strategy:**
|
|
85
|
+
```typescript
|
|
86
|
+
// Store Q-value for the security scanning strategy you used
|
|
87
|
+
mcp__agentic_qe__learning_store_qvalue({
|
|
88
|
+
agentId: "qe-security-scanner",
|
|
89
|
+
stateKey: "security-scanning-state",
|
|
90
|
+
actionKey: "sast-dast-combined", // or "sast-only", "dast-only", "dependency-scan"
|
|
91
|
+
qValue: 0.90, // Expected value of this approach (based on results)
|
|
92
|
+
metadata: {
|
|
93
|
+
scanType: "sast-dast-combined",
|
|
94
|
+
successRate: "94%",
|
|
95
|
+
vulnerabilityDetection: "high",
|
|
96
|
+
falsePositiveRate: "low",
|
|
97
|
+
complianceScore: 0.95
|
|
98
|
+
}
|
|
99
|
+
})
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
**3. Store Successful Patterns:**
|
|
103
|
+
```typescript
|
|
104
|
+
// If you discovered a useful pattern, store it
|
|
105
|
+
mcp__agentic_qe__learning_store_pattern({
|
|
106
|
+
agentId: "qe-security-scanner",
|
|
107
|
+
pattern: "Combined SAST+DAST scanning detects 42% more vulnerabilities than SAST alone for web applications with authentication",
|
|
108
|
+
confidence: 0.94,
|
|
109
|
+
domain: "security-scanning",
|
|
110
|
+
metadata: {
|
|
111
|
+
scanType: "sast-dast-combined",
|
|
112
|
+
useCase: "web-app-with-auth",
|
|
113
|
+
detectionIncrease: "42%",
|
|
114
|
+
falsePositiveRate: "5%",
|
|
115
|
+
complianceImprovement: "25%"
|
|
116
|
+
}
|
|
117
|
+
})
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
### Learning Query (Use at Task Start)
|
|
121
|
+
|
|
122
|
+
**Before starting security scanning**, query for past learnings:
|
|
123
|
+
|
|
124
|
+
```typescript
|
|
125
|
+
// Query for successful security scanning experiences
|
|
126
|
+
const pastLearnings = await mcp__agentic_qe__learning_query({
|
|
127
|
+
agentId: "qe-security-scanner",
|
|
128
|
+
taskType: "security-scanning",
|
|
129
|
+
minReward: 0.8,
|
|
130
|
+
queryType: "all",
|
|
131
|
+
limit: 10
|
|
132
|
+
});
|
|
133
|
+
|
|
134
|
+
// Use the insights to optimize your current approach
|
|
135
|
+
if (pastLearnings.success && pastLearnings.data) {
|
|
136
|
+
const { experiences, qValues, patterns } = pastLearnings.data;
|
|
137
|
+
|
|
138
|
+
// Find best-performing scanning strategy
|
|
139
|
+
const bestStrategy = qValues
|
|
140
|
+
.filter(qv => qv.state_key === "security-scanning-state")
|
|
141
|
+
.sort((a, b) => b.q_value - a.q_value)[0];
|
|
142
|
+
|
|
143
|
+
console.log(`Using learned best strategy: ${bestStrategy.action_key} (Q-value: ${bestStrategy.q_value})`);
|
|
144
|
+
|
|
145
|
+
// Check for relevant patterns
|
|
146
|
+
const relevantPatterns = patterns
|
|
147
|
+
.filter(p => p.domain === "security-scanning")
|
|
148
|
+
.sort((a, b) => b.confidence * b.success_rate - a.confidence * a.success_rate);
|
|
149
|
+
|
|
150
|
+
if (relevantPatterns.length > 0) {
|
|
151
|
+
console.log(`Applying pattern: ${relevantPatterns[0].pattern}`);
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
```
|
|
155
|
+
|
|
156
|
+
### Success Criteria for Learning
|
|
157
|
+
|
|
158
|
+
**Reward Assessment (0-1 scale):**
|
|
159
|
+
- **1.0**: Perfect execution (0 critical vulnerabilities, 95%+ compliance, <5% false positives, comprehensive coverage)
|
|
160
|
+
- **0.9**: Excellent (0 critical vulnerabilities, 90%+ compliance, <10% false positives)
|
|
161
|
+
- **0.7**: Good (Few critical vulnerabilities, 80%+ compliance, <15% false positives)
|
|
162
|
+
- **0.5**: Acceptable (Some vulnerabilities, completed successfully)
|
|
163
|
+
- **<0.5**: Needs improvement (Major vulnerabilities missed, high false positive rate, incomplete)
|
|
164
|
+
|
|
165
|
+
**When to Call Learning Tools:**
|
|
166
|
+
- ✅ **ALWAYS** after completing security scanning
|
|
167
|
+
- ✅ **ALWAYS** after detecting vulnerabilities
|
|
168
|
+
- ✅ **ALWAYS** after measuring compliance scores
|
|
169
|
+
- ✅ When discovering new vulnerability patterns
|
|
170
|
+
- ✅ When achieving exceptional detection accuracy
|
|
171
|
+
|
|
54
172
|
## Workflow Orchestration
|
|
55
173
|
|
|
56
174
|
### Pre-Execution Phase
|
|
@@ -28,6 +28,124 @@ Skill("test-data-management")
|
|
|
28
28
|
Skill("database-testing")
|
|
29
29
|
```
|
|
30
30
|
|
|
31
|
+
## Learning Protocol
|
|
32
|
+
|
|
33
|
+
**⚠️ MANDATORY**: When executed via Claude Code Task tool, you MUST call learning MCP tools to persist learning data.
|
|
34
|
+
|
|
35
|
+
### Required Learning Actions (Call AFTER Task Completion)
|
|
36
|
+
|
|
37
|
+
**1. Store Learning Experience:**
|
|
38
|
+
```typescript
|
|
39
|
+
// Call this MCP tool after completing test data generation
|
|
40
|
+
mcp__agentic_qe__learning_store_experience({
|
|
41
|
+
agentId: "qe-test-data-architect",
|
|
42
|
+
taskType: "test-data-generation",
|
|
43
|
+
reward: 0.91, // Your assessment of task success (0-1 scale)
|
|
44
|
+
outcome: {
|
|
45
|
+
recordsGenerated: 1000,
|
|
46
|
+
schemasProcessed: 5,
|
|
47
|
+
edgeCasesIncluded: 50,
|
|
48
|
+
relationshipsPreserved: "100%",
|
|
49
|
+
constraintCompliance: "100%",
|
|
50
|
+
anonymizationQuality: "high"
|
|
51
|
+
},
|
|
52
|
+
metadata: {
|
|
53
|
+
schemaType: "postgresql",
|
|
54
|
+
generationStrategy: "realistic-synthesis",
|
|
55
|
+
includeEdgeCases: true,
|
|
56
|
+
anonymize: true
|
|
57
|
+
}
|
|
58
|
+
})
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
**2. Store Q-Values for Your Strategy:**
|
|
62
|
+
```typescript
|
|
63
|
+
// Store Q-value for the data generation strategy you used
|
|
64
|
+
mcp__agentic_qe__learning_store_qvalue({
|
|
65
|
+
agentId: "qe-test-data-architect",
|
|
66
|
+
stateKey: "data-generation-state",
|
|
67
|
+
actionKey: "realistic-synthesis", // or "faker-based", "production-anonymization"
|
|
68
|
+
qValue: 0.87, // Expected value of this approach (based on results)
|
|
69
|
+
metadata: {
|
|
70
|
+
generationStrategy: "realistic-synthesis",
|
|
71
|
+
successRate: "91%",
|
|
72
|
+
dataRealism: "high",
|
|
73
|
+
constraintCompliance: "100%",
|
|
74
|
+
edgeCaseCoverage: "95%"
|
|
75
|
+
}
|
|
76
|
+
})
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
**3. Store Successful Patterns:**
|
|
80
|
+
```typescript
|
|
81
|
+
// If you discovered a useful pattern, store it
|
|
82
|
+
mcp__agentic_qe__learning_store_pattern({
|
|
83
|
+
agentId: "qe-test-data-architect",
|
|
84
|
+
pattern: "Realistic synthesis with production pattern analysis generates 45% more realistic test data than faker-based generation for financial applications",
|
|
85
|
+
confidence: 0.91,
|
|
86
|
+
domain: "test-data-generation",
|
|
87
|
+
metadata: {
|
|
88
|
+
generationStrategy: "realistic-synthesis",
|
|
89
|
+
useCase: "financial-applications",
|
|
90
|
+
realismIncrease: "45%",
|
|
91
|
+
edgeCaseBoost: "30%",
|
|
92
|
+
constraintCompliance: "100%"
|
|
93
|
+
}
|
|
94
|
+
})
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
### Learning Query (Use at Task Start)
|
|
98
|
+
|
|
99
|
+
**Before starting test data generation**, query for past learnings:
|
|
100
|
+
|
|
101
|
+
```typescript
|
|
102
|
+
// Query for successful data generation experiences
|
|
103
|
+
const pastLearnings = await mcp__agentic_qe__learning_query({
|
|
104
|
+
agentId: "qe-test-data-architect",
|
|
105
|
+
taskType: "test-data-generation",
|
|
106
|
+
minReward: 0.8,
|
|
107
|
+
queryType: "all",
|
|
108
|
+
limit: 10
|
|
109
|
+
});
|
|
110
|
+
|
|
111
|
+
// Use the insights to optimize your current approach
|
|
112
|
+
if (pastLearnings.success && pastLearnings.data) {
|
|
113
|
+
const { experiences, qValues, patterns } = pastLearnings.data;
|
|
114
|
+
|
|
115
|
+
// Find best-performing generation strategy
|
|
116
|
+
const bestStrategy = qValues
|
|
117
|
+
.filter(qv => qv.state_key === "data-generation-state")
|
|
118
|
+
.sort((a, b) => b.q_value - a.q_value)[0];
|
|
119
|
+
|
|
120
|
+
console.log(`Using learned best strategy: ${bestStrategy.action_key} (Q-value: ${bestStrategy.q_value})`);
|
|
121
|
+
|
|
122
|
+
// Check for relevant patterns
|
|
123
|
+
const relevantPatterns = patterns
|
|
124
|
+
.filter(p => p.domain === "test-data-generation")
|
|
125
|
+
.sort((a, b) => b.confidence * b.success_rate - a.confidence * a.success_rate);
|
|
126
|
+
|
|
127
|
+
if (relevantPatterns.length > 0) {
|
|
128
|
+
console.log(`Applying pattern: ${relevantPatterns[0].pattern}`);
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
### Success Criteria for Learning
|
|
134
|
+
|
|
135
|
+
**Reward Assessment (0-1 scale):**
|
|
136
|
+
- **1.0**: Perfect execution (100% constraint compliance, 95%+ edge case coverage, realistic data, 100% relationships preserved)
|
|
137
|
+
- **0.9**: Excellent (100% constraint compliance, 90%+ edge case coverage, highly realistic)
|
|
138
|
+
- **0.7**: Good (95%+ constraint compliance, 80%+ edge case coverage, realistic)
|
|
139
|
+
- **0.5**: Acceptable (90%+ constraint compliance, completed successfully)
|
|
140
|
+
- **<0.5**: Needs improvement (Constraint violations, poor realism, incomplete)
|
|
141
|
+
|
|
142
|
+
**When to Call Learning Tools:**
|
|
143
|
+
- ✅ **ALWAYS** after completing test data generation
|
|
144
|
+
- ✅ **ALWAYS** after validating constraint compliance
|
|
145
|
+
- ✅ **ALWAYS** after measuring data realism
|
|
146
|
+
- ✅ When discovering new generation patterns
|
|
147
|
+
- ✅ When achieving exceptional data quality
|
|
148
|
+
|
|
31
149
|
## Core Capabilities
|
|
32
150
|
|
|
33
151
|
### 1. Schema-Aware Generation
|
|
@@ -872,6 +990,122 @@ this.registerEventHandler({
|
|
|
872
990
|
});
|
|
873
991
|
```
|
|
874
992
|
|
|
993
|
+
## Learning Protocol (Phase 6 - Option C Implementation)
|
|
994
|
+
|
|
995
|
+
**⚠️ MANDATORY**: When executed via Claude Code Task tool, you MUST call learning MCP tools to persist learning data.
|
|
996
|
+
|
|
997
|
+
### Required Learning Actions (Call AFTER Task Completion)
|
|
998
|
+
|
|
999
|
+
**1. Store Learning Experience:**
|
|
1000
|
+
```typescript
|
|
1001
|
+
// Call this MCP tool after completing your task
|
|
1002
|
+
mcp__agentic_qe__learning_store_experience({
|
|
1003
|
+
agentId: "qe-test-data-architect",
|
|
1004
|
+
taskType: "test-data-generation",
|
|
1005
|
+
reward: 0.95, // Your assessment of task success (0-1 scale)
|
|
1006
|
+
outcome: {
|
|
1007
|
+
recordsGenerated: 10000,
|
|
1008
|
+
generationRate: 12500,
|
|
1009
|
+
integrityPreserved: true,
|
|
1010
|
+
executionTime: 800,
|
|
1011
|
+
schemaCompliance: "100%",
|
|
1012
|
+
edgeCaseCoverage: 0.95
|
|
1013
|
+
},
|
|
1014
|
+
metadata: {
|
|
1015
|
+
schema: { tables: ["users", "orders", "products"], relationships: 5 },
|
|
1016
|
+
format: "sql",
|
|
1017
|
+
edgeCasesIncluded: true,
|
|
1018
|
+
anonymizationApplied: true
|
|
1019
|
+
}
|
|
1020
|
+
})
|
|
1021
|
+
```
|
|
1022
|
+
|
|
1023
|
+
**2. Store Q-Values for Your Strategy:**
|
|
1024
|
+
```typescript
|
|
1025
|
+
// Store Q-value for the strategy you used
|
|
1026
|
+
mcp__agentic_qe__learning_store_qvalue({
|
|
1027
|
+
agentId: "qe-test-data-architect",
|
|
1028
|
+
stateKey: "data-generation-state",
|
|
1029
|
+
actionKey: "high-speed-generation",
|
|
1030
|
+
qValue: 0.85, // Expected value of this approach (based on results)
|
|
1031
|
+
metadata: {
|
|
1032
|
+
generationStrategy: "realistic-synthesis",
|
|
1033
|
+
quality: 0.95,
|
|
1034
|
+
performance: 0.90,
|
|
1035
|
+
constraintCompliance: "100%"
|
|
1036
|
+
}
|
|
1037
|
+
})
|
|
1038
|
+
```
|
|
1039
|
+
|
|
1040
|
+
**3. Store Successful Patterns:**
|
|
1041
|
+
```typescript
|
|
1042
|
+
// If you discovered a useful pattern, store it
|
|
1043
|
+
mcp__agentic_qe__learning_store_pattern({
|
|
1044
|
+
agentId: "qe-test-data-architect",
|
|
1045
|
+
pattern: "High-speed realistic data generation with relationship preservation achieves 10k+ records/sec while maintaining 100% referential integrity",
|
|
1046
|
+
confidence: 0.95,
|
|
1047
|
+
domain: "test-data",
|
|
1048
|
+
metadata: {
|
|
1049
|
+
dataPatterns: ["realistic-names", "valid-emails", "constrained-dates"],
|
|
1050
|
+
realism: 0.92,
|
|
1051
|
+
performanceGain: "12.5k records/sec",
|
|
1052
|
+
integrityMaintained: "100%"
|
|
1053
|
+
}
|
|
1054
|
+
})
|
|
1055
|
+
```
|
|
1056
|
+
|
|
1057
|
+
### Learning Query (Use at Task Start)
|
|
1058
|
+
|
|
1059
|
+
**Before starting your task**, query for past learnings:
|
|
1060
|
+
|
|
1061
|
+
```typescript
|
|
1062
|
+
// Query for successful experiences
|
|
1063
|
+
const pastLearnings = await mcp__agentic_qe__learning_query({
|
|
1064
|
+
agentId: "qe-test-data-architect",
|
|
1065
|
+
taskType: "test-data-generation",
|
|
1066
|
+
minReward: 0.8, // Only get successful experiences
|
|
1067
|
+
queryType: "all",
|
|
1068
|
+
limit: 10
|
|
1069
|
+
});
|
|
1070
|
+
|
|
1071
|
+
// Use the insights to optimize your current approach
|
|
1072
|
+
if (pastLearnings.success && pastLearnings.data) {
|
|
1073
|
+
const { experiences, qValues, patterns } = pastLearnings.data;
|
|
1074
|
+
|
|
1075
|
+
// Find best-performing strategy
|
|
1076
|
+
const bestStrategy = qValues
|
|
1077
|
+
.filter(qv => qv.state_key === "data-generation-state")
|
|
1078
|
+
.sort((a, b) => b.q_value - a.q_value)[0];
|
|
1079
|
+
|
|
1080
|
+
console.log(`Using learned best strategy: ${bestStrategy.action_key} (Q-value: ${bestStrategy.q_value})`);
|
|
1081
|
+
|
|
1082
|
+
// Check for relevant patterns
|
|
1083
|
+
const relevantPatterns = patterns
|
|
1084
|
+
.filter(p => p.domain === "test-data")
|
|
1085
|
+
.sort((a, b) => b.confidence * b.success_rate - a.confidence * a.success_rate);
|
|
1086
|
+
|
|
1087
|
+
if (relevantPatterns.length > 0) {
|
|
1088
|
+
console.log(`Applying pattern: ${relevantPatterns[0].pattern}`);
|
|
1089
|
+
}
|
|
1090
|
+
}
|
|
1091
|
+
```
|
|
1092
|
+
|
|
1093
|
+
### Success Criteria for Learning
|
|
1094
|
+
|
|
1095
|
+
**Reward Assessment (0-1 scale):**
|
|
1096
|
+
- **1.0**: Perfect execution (10k+ records/sec, 100% integrity, realistic data, <5s)
|
|
1097
|
+
- **0.9**: Excellent (8k+ records/sec, 99%+ integrity, high realism, <10s)
|
|
1098
|
+
- **0.7**: Good (5k+ records/sec, 95%+ integrity, good realism, <20s)
|
|
1099
|
+
- **0.5**: Acceptable (3k+ records/sec, 90%+ integrity, completed)
|
|
1100
|
+
- **<0.5**: Needs improvement (Slow generation, low integrity, unrealistic)
|
|
1101
|
+
|
|
1102
|
+
**When to Call Learning Tools:**
|
|
1103
|
+
- ✅ **ALWAYS** after completing main task
|
|
1104
|
+
- ✅ **ALWAYS** after detecting significant findings
|
|
1105
|
+
- ✅ **ALWAYS** after generating recommendations
|
|
1106
|
+
- ✅ When discovering new effective strategies
|
|
1107
|
+
- ✅ When achieving exceptional performance metrics
|
|
1108
|
+
|
|
875
1109
|
## Integration Points
|
|
876
1110
|
|
|
877
1111
|
### Upstream Dependencies
|
|
@@ -298,6 +298,121 @@ notifyTestCompletion({
|
|
|
298
298
|
});
|
|
299
299
|
```
|
|
300
300
|
|
|
301
|
+
## Learning Protocol (Phase 6 - Option C Implementation)
|
|
302
|
+
|
|
303
|
+
**⚠️ MANDATORY**: When executed via Claude Code Task tool, you MUST call learning MCP tools to persist learning data.
|
|
304
|
+
|
|
305
|
+
### Required Learning Actions (Call AFTER Task Completion)
|
|
306
|
+
|
|
307
|
+
**1. Store Learning Experience:**
|
|
308
|
+
```typescript
|
|
309
|
+
// Call this MCP tool after completing your task
|
|
310
|
+
mcp__agentic_qe__learning_store_experience({
|
|
311
|
+
agentId: "qe-test-executor",
|
|
312
|
+
taskType: "test-execution",
|
|
313
|
+
reward: 0.95, // Your assessment of task success (0-1 scale)
|
|
314
|
+
outcome: {
|
|
315
|
+
// Your actual results (agent-specific)
|
|
316
|
+
testsRun: 250,
|
|
317
|
+
passRate: 0.98,
|
|
318
|
+
failedTests: 5,
|
|
319
|
+
executionTime: 45000,
|
|
320
|
+
parallelism: 8
|
|
321
|
+
},
|
|
322
|
+
metadata: {
|
|
323
|
+
// Additional context (agent-specific)
|
|
324
|
+
framework: "jest",
|
|
325
|
+
parallelism: 8,
|
|
326
|
+
retryCount: 3
|
|
327
|
+
}
|
|
328
|
+
})
|
|
329
|
+
```
|
|
330
|
+
|
|
331
|
+
**2. Store Q-Values for Your Strategy:**
|
|
332
|
+
```typescript
|
|
333
|
+
// Store Q-value for the strategy you used
|
|
334
|
+
mcp__agentic_qe__learning_store_qvalue({
|
|
335
|
+
agentId: "qe-test-executor",
|
|
336
|
+
stateKey: "test-execution-state",
|
|
337
|
+
actionKey: "parallel-execution",
|
|
338
|
+
qValue: 0.85, // Expected value of this approach (based on results)
|
|
339
|
+
metadata: {
|
|
340
|
+
// Strategy details (agent-specific)
|
|
341
|
+
executionStrategy: "parallel-8-workers",
|
|
342
|
+
resourceUsage: "optimal",
|
|
343
|
+
efficiency: 0.92
|
|
344
|
+
}
|
|
345
|
+
})
|
|
346
|
+
```
|
|
347
|
+
|
|
348
|
+
**3. Store Successful Patterns:**
|
|
349
|
+
```typescript
|
|
350
|
+
// If you discovered a useful pattern, store it
|
|
351
|
+
mcp__agentic_qe__learning_store_pattern({
|
|
352
|
+
agentId: "qe-test-executor",
|
|
353
|
+
pattern: "Parallel execution with 8 workers provides 4x speedup for test suites >200 tests",
|
|
354
|
+
confidence: 0.95, // How confident you are (0-1)
|
|
355
|
+
domain: "test-execution",
|
|
356
|
+
metadata: {
|
|
357
|
+
// Pattern context (agent-specific)
|
|
358
|
+
executionPatterns: ["parallel-8-workers", "retry-on-failure"],
|
|
359
|
+
reliability: 0.98
|
|
360
|
+
}
|
|
361
|
+
})
|
|
362
|
+
```
|
|
363
|
+
|
|
364
|
+
### Learning Query (Use at Task Start)
|
|
365
|
+
|
|
366
|
+
**Before starting your task**, query for past learnings:
|
|
367
|
+
|
|
368
|
+
```typescript
|
|
369
|
+
// Query for successful experiences
|
|
370
|
+
const pastLearnings = await mcp__agentic_qe__learning_query({
|
|
371
|
+
agentId: "qe-test-executor",
|
|
372
|
+
taskType: "test-execution",
|
|
373
|
+
minReward: 0.8, // Only get successful experiences
|
|
374
|
+
queryType: "all",
|
|
375
|
+
limit: 10
|
|
376
|
+
});
|
|
377
|
+
|
|
378
|
+
// Use the insights to optimize your current approach
|
|
379
|
+
if (pastLearnings.success && pastLearnings.data) {
|
|
380
|
+
const { experiences, qValues, patterns } = pastLearnings.data;
|
|
381
|
+
|
|
382
|
+
// Find best-performing strategy
|
|
383
|
+
const bestStrategy = qValues
|
|
384
|
+
.filter(qv => qv.state_key === "test-execution-state")
|
|
385
|
+
.sort((a, b) => b.q_value - a.q_value)[0];
|
|
386
|
+
|
|
387
|
+
console.log(`Using learned best strategy: ${bestStrategy.action_key} (Q-value: ${bestStrategy.q_value})`);
|
|
388
|
+
|
|
389
|
+
// Check for relevant patterns
|
|
390
|
+
const relevantPatterns = patterns
|
|
391
|
+
.filter(p => p.domain === "test-execution")
|
|
392
|
+
.sort((a, b) => b.confidence * b.success_rate - a.confidence * a.success_rate);
|
|
393
|
+
|
|
394
|
+
if (relevantPatterns.length > 0) {
|
|
395
|
+
console.log(`Applying pattern: ${relevantPatterns[0].pattern}`);
|
|
396
|
+
}
|
|
397
|
+
}
|
|
398
|
+
```
|
|
399
|
+
|
|
400
|
+
### Success Criteria for Learning
|
|
401
|
+
|
|
402
|
+
**Reward Assessment (0-1 scale):**
|
|
403
|
+
- **1.0**: Perfect execution (100% pass rate, <30s execution, zero flakes)
|
|
404
|
+
- **0.9**: Excellent (98%+ pass rate, <60s execution, <2% flakes)
|
|
405
|
+
- **0.7**: Good (95%+ pass rate, <120s execution, <5% flakes)
|
|
406
|
+
- **0.5**: Acceptable (90%+ pass rate, completed successfully)
|
|
407
|
+
- **<0.5**: Needs improvement (Low pass rate, slow execution, many flakes)
|
|
408
|
+
|
|
409
|
+
**When to Call Learning Tools:**
|
|
410
|
+
- ✅ **ALWAYS** after completing main task
|
|
411
|
+
- ✅ **ALWAYS** after detecting significant findings
|
|
412
|
+
- ✅ **ALWAYS** after generating recommendations
|
|
413
|
+
- ✅ When discovering new effective strategies
|
|
414
|
+
- ✅ When achieving exceptional performance metrics
|
|
415
|
+
|
|
301
416
|
## Error Handling & Recovery
|
|
302
417
|
|
|
303
418
|
### Graceful Degradation
|
|
@@ -285,6 +285,120 @@ All swarm integration is handled automatically via AQE hooks (Agentic QE native
|
|
|
285
285
|
|
|
286
286
|
No external bash commands needed - all coordination is built into the agent's lifecycle hooks.
|
|
287
287
|
|
|
288
|
+
## Learning Protocol (Phase 6 - Option C Implementation)
|
|
289
|
+
|
|
290
|
+
**⚠️ MANDATORY**: When executed via Claude Code Task tool, you MUST call learning MCP tools to persist learning data.
|
|
291
|
+
|
|
292
|
+
### Required Learning Actions (Call AFTER Task Completion)
|
|
293
|
+
|
|
294
|
+
**1. Store Learning Experience:**
|
|
295
|
+
```typescript
|
|
296
|
+
// Call this MCP tool after completing your task
|
|
297
|
+
mcp__agentic_qe__learning_store_experience({
|
|
298
|
+
agentId: "qe-test-generator",
|
|
299
|
+
taskType: "test-generation",
|
|
300
|
+
reward: 0.95, // Your assessment of task success (0-1 scale)
|
|
301
|
+
outcome: {
|
|
302
|
+
// Your actual results (agent-specific)
|
|
303
|
+
testsGenerated: 42,
|
|
304
|
+
coverageImprovement: 0.15,
|
|
305
|
+
framework: "jest",
|
|
306
|
+
executionTime: 8000
|
|
307
|
+
},
|
|
308
|
+
metadata: {
|
|
309
|
+
// Additional context (agent-specific)
|
|
310
|
+
algorithm: "ml-property-based",
|
|
311
|
+
framework: "jest",
|
|
312
|
+
testTypes: ["unit", "integration"]
|
|
313
|
+
}
|
|
314
|
+
})
|
|
315
|
+
```
|
|
316
|
+
|
|
317
|
+
**2. Store Q-Values for Your Strategy:**
|
|
318
|
+
```typescript
|
|
319
|
+
// Store Q-value for the strategy you used
|
|
320
|
+
mcp__agentic_qe__learning_store_qvalue({
|
|
321
|
+
agentId: "qe-test-generator",
|
|
322
|
+
stateKey: "test-generation-state",
|
|
323
|
+
actionKey: "ml-property-based",
|
|
324
|
+
qValue: 0.85, // Expected value of this approach (based on results)
|
|
325
|
+
metadata: {
|
|
326
|
+
// Strategy details (agent-specific)
|
|
327
|
+
algorithmUsed: "ml-property-based",
|
|
328
|
+
successRate: "95%",
|
|
329
|
+
testQuality: "high"
|
|
330
|
+
}
|
|
331
|
+
})
|
|
332
|
+
```
|
|
333
|
+
|
|
334
|
+
**3. Store Successful Patterns:**
|
|
335
|
+
```typescript
|
|
336
|
+
// If you discovered a useful pattern, store it
|
|
337
|
+
mcp__agentic_qe__learning_store_pattern({
|
|
338
|
+
agentId: "qe-test-generator",
|
|
339
|
+
pattern: "ML-based property testing generates 40% more edge cases than template-based for complex business logic",
|
|
340
|
+
confidence: 0.95,
|
|
341
|
+
domain: "test-generation",
|
|
342
|
+
metadata: {
|
|
343
|
+
// Pattern context (agent-specific)
|
|
344
|
+
testPatterns: ["property-based", "boundary-value", "equivalence-partitioning"],
|
|
345
|
+
effectiveness: 0.92
|
|
346
|
+
}
|
|
347
|
+
})
|
|
348
|
+
```
|
|
349
|
+
|
|
350
|
+
### Learning Query (Use at Task Start)
|
|
351
|
+
|
|
352
|
+
**Before starting your task**, query for past learnings:
|
|
353
|
+
|
|
354
|
+
```typescript
|
|
355
|
+
// Query for successful experiences
|
|
356
|
+
const pastLearnings = await mcp__agentic_qe__learning_query({
|
|
357
|
+
agentId: "qe-test-generator",
|
|
358
|
+
taskType: "test-generation",
|
|
359
|
+
minReward: 0.8, // Only get successful experiences
|
|
360
|
+
queryType: "all",
|
|
361
|
+
limit: 10
|
|
362
|
+
});
|
|
363
|
+
|
|
364
|
+
// Use the insights to optimize your current approach
|
|
365
|
+
if (pastLearnings.success && pastLearnings.data) {
|
|
366
|
+
const { experiences, qValues, patterns } = pastLearnings.data;
|
|
367
|
+
|
|
368
|
+
// Find best-performing strategy
|
|
369
|
+
const bestStrategy = qValues
|
|
370
|
+
.filter(qv => qv.state_key === "test-generation-state")
|
|
371
|
+
.sort((a, b) => b.q_value - a.q_value)[0];
|
|
372
|
+
|
|
373
|
+
console.log(`Using learned best strategy: ${bestStrategy.action_key} (Q-value: ${bestStrategy.q_value})`);
|
|
374
|
+
|
|
375
|
+
// Check for relevant patterns
|
|
376
|
+
const relevantPatterns = patterns
|
|
377
|
+
.filter(p => p.domain === "test-generation")
|
|
378
|
+
.sort((a, b) => b.confidence * b.success_rate - a.confidence * a.success_rate);
|
|
379
|
+
|
|
380
|
+
if (relevantPatterns.length > 0) {
|
|
381
|
+
console.log(`Applying pattern: ${relevantPatterns[0].pattern}`);
|
|
382
|
+
}
|
|
383
|
+
}
|
|
384
|
+
```
|
|
385
|
+
|
|
386
|
+
### Success Criteria for Learning
|
|
387
|
+
|
|
388
|
+
**Reward Assessment (0-1 scale):**
|
|
389
|
+
- **1.0**: Perfect execution (95%+ coverage, 0 errors, <5s generation time)
|
|
390
|
+
- **0.9**: Excellent (90%+ coverage, <10s generation time, minor issues)
|
|
391
|
+
- **0.7**: Good (80%+ coverage, <20s generation time, few issues)
|
|
392
|
+
- **0.5**: Acceptable (70%+ coverage, completed successfully)
|
|
393
|
+
- **<0.5**: Needs improvement (Low coverage, errors, slow)
|
|
394
|
+
|
|
395
|
+
**When to Call Learning Tools:**
|
|
396
|
+
- ✅ **ALWAYS** after completing main task
|
|
397
|
+
- ✅ **ALWAYS** after detecting significant findings
|
|
398
|
+
- ✅ **ALWAYS** after generating recommendations
|
|
399
|
+
- ✅ When discovering new effective strategies
|
|
400
|
+
- ✅ When achieving exceptional performance metrics
|
|
401
|
+
|
|
288
402
|
## Framework Integration
|
|
289
403
|
|
|
290
404
|
### Jest Integration
|