agentic-qe 1.5.1 → 1.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/agents/qe-api-contract-validator.md +118 -0
- package/.claude/agents/qe-chaos-engineer.md +320 -5
- package/.claude/agents/qe-code-complexity.md +360 -0
- package/.claude/agents/qe-coverage-analyzer.md +112 -0
- package/.claude/agents/qe-deployment-readiness.md +322 -6
- package/.claude/agents/qe-flaky-test-hunter.md +115 -0
- package/.claude/agents/qe-fleet-commander.md +319 -6
- package/.claude/agents/qe-performance-tester.md +234 -0
- package/.claude/agents/qe-production-intelligence.md +114 -0
- package/.claude/agents/qe-quality-analyzer.md +126 -0
- package/.claude/agents/qe-quality-gate.md +119 -0
- package/.claude/agents/qe-regression-risk-analyzer.md +114 -0
- package/.claude/agents/qe-requirements-validator.md +114 -0
- package/.claude/agents/qe-security-scanner.md +118 -0
- package/.claude/agents/qe-test-data-architect.md +234 -0
- package/.claude/agents/qe-test-executor.md +115 -0
- package/.claude/agents/qe-test-generator.md +114 -0
- package/.claude/agents/qe-visual-tester.md +305 -6
- package/.claude/agents/subagents/qe-code-reviewer.md +0 -4
- package/.claude/agents/subagents/qe-data-generator.md +0 -16
- package/.claude/agents/subagents/qe-integration-tester.md +0 -17
- package/.claude/agents/subagents/qe-performance-validator.md +0 -16
- package/.claude/agents/subagents/qe-security-auditor.md +0 -16
- package/.claude/agents/subagents/qe-test-implementer.md +0 -17
- package/.claude/agents/subagents/qe-test-refactorer.md +0 -17
- package/.claude/agents/subagents/qe-test-writer.md +0 -19
- package/.claude/skills/brutal-honesty-review/README.md +218 -0
- package/.claude/skills/brutal-honesty-review/SKILL.md +725 -0
- package/.claude/skills/brutal-honesty-review/resources/assessment-rubrics.md +295 -0
- package/.claude/skills/brutal-honesty-review/resources/review-template.md +102 -0
- package/.claude/skills/brutal-honesty-review/scripts/assess-code.sh +179 -0
- package/.claude/skills/brutal-honesty-review/scripts/assess-tests.sh +223 -0
- package/.claude/skills/cicd-pipeline-qe-orchestrator/README.md +301 -0
- package/.claude/skills/cicd-pipeline-qe-orchestrator/SKILL.md +510 -0
- package/.claude/skills/cicd-pipeline-qe-orchestrator/resources/workflows/microservice-pipeline.md +239 -0
- package/.claude/skills/cicd-pipeline-qe-orchestrator/resources/workflows/mobile-pipeline.md +375 -0
- package/.claude/skills/cicd-pipeline-qe-orchestrator/resources/workflows/monolith-pipeline.md +268 -0
- package/.claude/skills/six-thinking-hats/README.md +190 -0
- package/.claude/skills/six-thinking-hats/SKILL.md +1215 -0
- package/.claude/skills/six-thinking-hats/resources/examples/api-testing-example.md +345 -0
- package/.claude/skills/six-thinking-hats/resources/templates/solo-session-template.md +167 -0
- package/.claude/skills/six-thinking-hats/resources/templates/team-session-template.md +336 -0
- package/CHANGELOG.md +2472 -2129
- package/README.md +48 -10
- package/dist/adapters/MemoryStoreAdapter.d.ts +38 -0
- package/dist/adapters/MemoryStoreAdapter.d.ts.map +1 -1
- package/dist/adapters/MemoryStoreAdapter.js +22 -0
- package/dist/adapters/MemoryStoreAdapter.js.map +1 -1
- package/dist/agents/BaseAgent.d.ts.map +1 -1
- package/dist/agents/BaseAgent.js +13 -0
- package/dist/agents/BaseAgent.js.map +1 -1
- package/dist/cli/commands/init-claude-md-template.d.ts +16 -0
- package/dist/cli/commands/init-claude-md-template.d.ts.map +1 -0
- package/dist/cli/commands/init-claude-md-template.js +69 -0
- package/dist/cli/commands/init-claude-md-template.js.map +1 -0
- package/dist/cli/commands/init.d.ts +1 -1
- package/dist/cli/commands/init.d.ts.map +1 -1
- package/dist/cli/commands/init.js +509 -460
- package/dist/cli/commands/init.js.map +1 -1
- package/dist/core/memory/AgentDBService.d.ts +33 -28
- package/dist/core/memory/AgentDBService.d.ts.map +1 -1
- package/dist/core/memory/AgentDBService.js +233 -290
- package/dist/core/memory/AgentDBService.js.map +1 -1
- package/dist/core/memory/EnhancedAgentDBService.d.ts.map +1 -1
- package/dist/core/memory/EnhancedAgentDBService.js +5 -3
- package/dist/core/memory/EnhancedAgentDBService.js.map +1 -1
- package/dist/core/memory/RealAgentDBAdapter.d.ts +9 -2
- package/dist/core/memory/RealAgentDBAdapter.d.ts.map +1 -1
- package/dist/core/memory/RealAgentDBAdapter.js +126 -100
- package/dist/core/memory/RealAgentDBAdapter.js.map +1 -1
- package/dist/core/memory/SwarmMemoryManager.d.ts +58 -0
- package/dist/core/memory/SwarmMemoryManager.d.ts.map +1 -1
- package/dist/core/memory/SwarmMemoryManager.js +176 -0
- package/dist/core/memory/SwarmMemoryManager.js.map +1 -1
- package/dist/core/memory/index.d.ts.map +1 -1
- package/dist/core/memory/index.js +2 -1
- package/dist/core/memory/index.js.map +1 -1
- package/dist/learning/LearningEngine.d.ts +14 -27
- package/dist/learning/LearningEngine.d.ts.map +1 -1
- package/dist/learning/LearningEngine.js +57 -119
- package/dist/learning/LearningEngine.js.map +1 -1
- package/dist/learning/index.d.ts +0 -1
- package/dist/learning/index.d.ts.map +1 -1
- package/dist/learning/index.js +0 -1
- package/dist/learning/index.js.map +1 -1
- package/dist/mcp/handlers/learning/learning-query.d.ts +34 -0
- package/dist/mcp/handlers/learning/learning-query.d.ts.map +1 -0
- package/dist/mcp/handlers/learning/learning-query.js +156 -0
- package/dist/mcp/handlers/learning/learning-query.js.map +1 -0
- package/dist/mcp/handlers/learning/learning-store-experience.d.ts +30 -0
- package/dist/mcp/handlers/learning/learning-store-experience.d.ts.map +1 -0
- package/dist/mcp/handlers/learning/learning-store-experience.js +86 -0
- package/dist/mcp/handlers/learning/learning-store-experience.js.map +1 -0
- package/dist/mcp/handlers/learning/learning-store-pattern.d.ts +31 -0
- package/dist/mcp/handlers/learning/learning-store-pattern.d.ts.map +1 -0
- package/dist/mcp/handlers/learning/learning-store-pattern.js +126 -0
- package/dist/mcp/handlers/learning/learning-store-pattern.js.map +1 -0
- package/dist/mcp/handlers/learning/learning-store-qvalue.d.ts +30 -0
- package/dist/mcp/handlers/learning/learning-store-qvalue.d.ts.map +1 -0
- package/dist/mcp/handlers/learning/learning-store-qvalue.js +100 -0
- package/dist/mcp/handlers/learning/learning-store-qvalue.js.map +1 -0
- package/dist/mcp/server.d.ts +11 -0
- package/dist/mcp/server.d.ts.map +1 -1
- package/dist/mcp/server.js +98 -1
- package/dist/mcp/server.js.map +1 -1
- package/dist/mcp/services/LearningEventListener.d.ts +123 -0
- package/dist/mcp/services/LearningEventListener.d.ts.map +1 -0
- package/dist/mcp/services/LearningEventListener.js +322 -0
- package/dist/mcp/services/LearningEventListener.js.map +1 -0
- package/dist/mcp/tools.d.ts +4 -0
- package/dist/mcp/tools.d.ts.map +1 -1
- package/dist/mcp/tools.js +179 -0
- package/dist/mcp/tools.js.map +1 -1
- package/dist/types/memory-interfaces.d.ts +71 -0
- package/dist/types/memory-interfaces.d.ts.map +1 -1
- package/dist/utils/Calculator.d.ts +35 -0
- package/dist/utils/Calculator.d.ts.map +1 -0
- package/dist/utils/Calculator.js +50 -0
- package/dist/utils/Calculator.js.map +1 -0
- package/dist/utils/Logger.d.ts.map +1 -1
- package/dist/utils/Logger.js +4 -1
- package/dist/utils/Logger.js.map +1 -1
- package/package.json +7 -5
- package/.claude/agents/qe-api-contract-validator.md.backup +0 -1148
- package/.claude/agents/qe-api-contract-validator.md.backup-20251107-134747 +0 -1148
- package/.claude/agents/qe-api-contract-validator.md.backup-phase2-20251107-140039 +0 -1123
- package/.claude/agents/qe-chaos-engineer.md.backup +0 -808
- package/.claude/agents/qe-chaos-engineer.md.backup-20251107-134747 +0 -808
- package/.claude/agents/qe-chaos-engineer.md.backup-phase2-20251107-140039 +0 -787
- package/.claude/agents/qe-code-complexity.md.backup +0 -291
- package/.claude/agents/qe-code-complexity.md.backup-20251107-134747 +0 -291
- package/.claude/agents/qe-code-complexity.md.backup-phase2-20251107-140039 +0 -286
- package/.claude/agents/qe-coverage-analyzer.md.backup +0 -467
- package/.claude/agents/qe-coverage-analyzer.md.backup-20251107-134747 +0 -467
- package/.claude/agents/qe-coverage-analyzer.md.backup-phase2-20251107-140039 +0 -438
- package/.claude/agents/qe-deployment-readiness.md.backup +0 -1166
- package/.claude/agents/qe-deployment-readiness.md.backup-20251107-134747 +0 -1166
- package/.claude/agents/qe-deployment-readiness.md.backup-phase2-20251107-140039 +0 -1140
- package/.claude/agents/qe-flaky-test-hunter.md.backup +0 -1195
- package/.claude/agents/qe-flaky-test-hunter.md.backup-20251107-134747 +0 -1195
- package/.claude/agents/qe-flaky-test-hunter.md.backup-phase2-20251107-140039 +0 -1162
- package/.claude/agents/qe-fleet-commander.md.backup +0 -718
- package/.claude/agents/qe-fleet-commander.md.backup-20251107-134747 +0 -718
- package/.claude/agents/qe-fleet-commander.md.backup-phase2-20251107-140039 +0 -697
- package/.claude/agents/qe-performance-tester.md.backup +0 -428
- package/.claude/agents/qe-performance-tester.md.backup-20251107-134747 +0 -428
- package/.claude/agents/qe-performance-tester.md.backup-phase2-20251107-140039 +0 -372
- package/.claude/agents/qe-production-intelligence.md.backup +0 -1219
- package/.claude/agents/qe-production-intelligence.md.backup-20251107-134747 +0 -1219
- package/.claude/agents/qe-production-intelligence.md.backup-phase2-20251107-140039 +0 -1194
- package/.claude/agents/qe-quality-analyzer.md.backup +0 -425
- package/.claude/agents/qe-quality-analyzer.md.backup-20251107-134747 +0 -425
- package/.claude/agents/qe-quality-analyzer.md.backup-phase2-20251107-140039 +0 -394
- package/.claude/agents/qe-quality-gate.md.backup +0 -446
- package/.claude/agents/qe-quality-gate.md.backup-20251107-134747 +0 -446
- package/.claude/agents/qe-quality-gate.md.backup-phase2-20251107-140039 +0 -415
- package/.claude/agents/qe-regression-risk-analyzer.md.backup +0 -1009
- package/.claude/agents/qe-regression-risk-analyzer.md.backup-20251107-134747 +0 -1009
- package/.claude/agents/qe-regression-risk-analyzer.md.backup-phase2-20251107-140039 +0 -984
- package/.claude/agents/qe-requirements-validator.md.backup +0 -748
- package/.claude/agents/qe-requirements-validator.md.backup-20251107-134747 +0 -748
- package/.claude/agents/qe-requirements-validator.md.backup-phase2-20251107-140039 +0 -723
- package/.claude/agents/qe-security-scanner.md.backup +0 -634
- package/.claude/agents/qe-security-scanner.md.backup-20251107-134747 +0 -634
- package/.claude/agents/qe-security-scanner.md.backup-phase2-20251107-140039 +0 -573
- package/.claude/agents/qe-test-data-architect.md.backup +0 -1064
- package/.claude/agents/qe-test-data-architect.md.backup-20251107-134747 +0 -1064
- package/.claude/agents/qe-test-data-architect.md.backup-phase2-20251107-140039 +0 -1040
- package/.claude/agents/qe-test-executor.md.backup +0 -389
- package/.claude/agents/qe-test-executor.md.backup-20251107-134747 +0 -389
- package/.claude/agents/qe-test-executor.md.backup-phase2-20251107-140039 +0 -369
- package/.claude/agents/qe-test-generator.md.backup +0 -997
- package/.claude/agents/qe-test-generator.md.backup-20251107-134747 +0 -997
- package/.claude/agents/qe-visual-tester.md.backup +0 -777
- package/.claude/agents/qe-visual-tester.md.backup-20251107-134747 +0 -777
- package/.claude/agents/qe-visual-tester.md.backup-phase2-20251107-140039 +0 -756
- package/.claude/commands/analysis/COMMAND_COMPLIANCE_REPORT.md +0 -54
- package/.claude/commands/analysis/performance-bottlenecks.md +0 -59
- package/.claude/commands/flow-nexus/app-store.md +0 -124
- package/.claude/commands/flow-nexus/challenges.md +0 -120
- package/.claude/commands/flow-nexus/login-registration.md +0 -65
- package/.claude/commands/flow-nexus/neural-network.md +0 -134
- package/.claude/commands/flow-nexus/payments.md +0 -116
- package/.claude/commands/flow-nexus/sandbox.md +0 -83
- package/.claude/commands/flow-nexus/swarm.md +0 -87
- package/.claude/commands/flow-nexus/user-tools.md +0 -152
- package/.claude/commands/flow-nexus/workflow.md +0 -115
- package/.claude/commands/memory/usage.md +0 -46
|
@@ -191,36 +191,123 @@ This agent uses **AQE hooks (Agentic QE native hooks)** for coordination (zero e
|
|
|
191
191
|
|
|
192
192
|
**Automatic Lifecycle Hooks:**
|
|
193
193
|
```typescript
|
|
194
|
-
//
|
|
194
|
+
// Called automatically by BaseAgent
|
|
195
195
|
protected async onPreTask(data: { assignment: TaskAssignment }): Promise<void> {
|
|
196
196
|
// Load fleet topology and active agents
|
|
197
197
|
const topology = await this.memoryStore.retrieve('aqe/fleet/topology');
|
|
198
198
|
const activeAgents = await this.memoryStore.retrieve('aqe/fleet/agents/active');
|
|
199
199
|
|
|
200
|
+
// Verify environment for fleet orchestration
|
|
201
|
+
const verification = await this.hookManager.executePreTaskVerification({
|
|
202
|
+
task: 'fleet-orchestration',
|
|
203
|
+
context: {
|
|
204
|
+
requiredVars: ['FLEET_MODE', 'MAX_AGENTS'],
|
|
205
|
+
minMemoryMB: 2048,
|
|
206
|
+
requiredKeys: ['aqe/fleet/topology']
|
|
207
|
+
}
|
|
208
|
+
});
|
|
209
|
+
|
|
210
|
+
// Emit fleet coordination starting event
|
|
211
|
+
this.eventBus.emit('fleet-commander:starting', {
|
|
212
|
+
agentId: this.agentId,
|
|
213
|
+
topology: topology?.mode || 'hierarchical',
|
|
214
|
+
activeAgents: activeAgents?.length || 0
|
|
215
|
+
});
|
|
216
|
+
|
|
200
217
|
this.logger.info('Fleet coordination initialized', {
|
|
201
218
|
topology: topology?.mode || 'hierarchical',
|
|
202
219
|
activeAgents: activeAgents?.length || 0,
|
|
203
|
-
maxAgents: 50
|
|
220
|
+
maxAgents: 50,
|
|
221
|
+
verification: verification.passed
|
|
204
222
|
});
|
|
205
223
|
}
|
|
206
224
|
|
|
207
225
|
protected async onPostTask(data: { assignment: TaskAssignment; result: any }): Promise<void> {
|
|
208
226
|
// Store coordination results and fleet metrics
|
|
209
|
-
await this.memoryStore.store('aqe/fleet/coordination/results', data.result.coordinationOutcomes
|
|
210
|
-
|
|
211
|
-
|
|
227
|
+
await this.memoryStore.store('aqe/fleet/coordination/results', data.result.coordinationOutcomes, {
|
|
228
|
+
partition: 'agent_results',
|
|
229
|
+
ttl: 86400 // 24 hours
|
|
230
|
+
});
|
|
231
|
+
|
|
232
|
+
await this.memoryStore.store('aqe/fleet/metrics/performance', data.result.fleetMetrics, {
|
|
233
|
+
partition: 'metrics',
|
|
234
|
+
ttl: 604800 // 7 days
|
|
235
|
+
});
|
|
236
|
+
|
|
237
|
+
await this.memoryStore.store('aqe/fleet/agents/active', data.result.activeAgents, {
|
|
238
|
+
partition: 'coordination',
|
|
239
|
+
ttl: 3600 // 1 hour
|
|
240
|
+
});
|
|
241
|
+
|
|
242
|
+
// Store fleet coordination metrics
|
|
243
|
+
await this.memoryStore.store('aqe/fleet/metrics/coordination', {
|
|
244
|
+
timestamp: Date.now(),
|
|
245
|
+
agentsOrchestrated: data.result.activeAgents.length,
|
|
246
|
+
throughput: data.result.fleetMetrics.throughput,
|
|
247
|
+
efficiency: data.result.fleetMetrics.efficiency
|
|
248
|
+
}, {
|
|
249
|
+
partition: 'metrics',
|
|
250
|
+
ttl: 604800 // 7 days
|
|
251
|
+
});
|
|
212
252
|
|
|
213
|
-
// Emit fleet coordination
|
|
253
|
+
// Emit completion event with fleet coordination results
|
|
214
254
|
this.eventBus.emit('fleet-commander:coordinated', {
|
|
255
|
+
agentId: this.agentId,
|
|
215
256
|
agentsOrchestrated: data.result.activeAgents.length,
|
|
216
257
|
throughput: data.result.fleetMetrics.throughput,
|
|
217
258
|
efficiency: data.result.fleetMetrics.efficiency
|
|
218
259
|
});
|
|
260
|
+
|
|
261
|
+
// Validate fleet coordination results
|
|
262
|
+
const validation = await this.hookManager.executePostTaskValidation({
|
|
263
|
+
task: 'fleet-orchestration',
|
|
264
|
+
result: {
|
|
265
|
+
output: data.result,
|
|
266
|
+
activeAgents: data.result.activeAgents,
|
|
267
|
+
metrics: {
|
|
268
|
+
throughput: data.result.fleetMetrics.throughput,
|
|
269
|
+
efficiency: data.result.fleetMetrics.efficiency
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
});
|
|
273
|
+
|
|
274
|
+
this.logger.info('Fleet coordination completed', {
|
|
275
|
+
agentsOrchestrated: data.result.activeAgents.length,
|
|
276
|
+
throughput: data.result.fleetMetrics.throughput,
|
|
277
|
+
validated: validation.passed
|
|
278
|
+
});
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
protected async onTaskError(data: { assignment: TaskAssignment; error: Error }): Promise<void> {
|
|
282
|
+
// Store error for fleet analysis
|
|
283
|
+
await this.memoryStore.store(`aqe/errors/${data.assignment.task.id}`, {
|
|
284
|
+
error: data.error.message,
|
|
285
|
+
timestamp: Date.now(),
|
|
286
|
+
agent: this.agentId,
|
|
287
|
+
taskType: 'fleet-coordination',
|
|
288
|
+
topology: data.assignment.task.metadata.topology
|
|
289
|
+
}, {
|
|
290
|
+
partition: 'errors',
|
|
291
|
+
ttl: 604800 // 7 days
|
|
292
|
+
});
|
|
293
|
+
|
|
294
|
+
// Emit error event for fleet coordination
|
|
295
|
+
this.eventBus.emit('fleet-commander:error', {
|
|
296
|
+
agentId: this.agentId,
|
|
297
|
+
error: data.error.message,
|
|
298
|
+
taskId: data.assignment.task.id
|
|
299
|
+
});
|
|
300
|
+
|
|
301
|
+
this.logger.error('Fleet coordination failed', {
|
|
302
|
+
error: data.error.message,
|
|
303
|
+
stack: data.error.stack
|
|
304
|
+
});
|
|
219
305
|
}
|
|
220
306
|
```
|
|
221
307
|
|
|
222
308
|
**Advanced Verification (Optional):**
|
|
223
309
|
```typescript
|
|
310
|
+
// Use VerificationHookManager for comprehensive validation
|
|
224
311
|
const hookManager = new VerificationHookManager(this.memoryStore);
|
|
225
312
|
const verification = await hookManager.executePreTaskVerification({
|
|
226
313
|
task: 'fleet-orchestration',
|
|
@@ -232,6 +319,118 @@ const verification = await hookManager.executePreTaskVerification({
|
|
|
232
319
|
});
|
|
233
320
|
```
|
|
234
321
|
|
|
322
|
+
## Learning Integration (Phase 6)
|
|
323
|
+
|
|
324
|
+
This agent integrates with the **Learning Engine** to continuously improve fleet orchestration and resource allocation.
|
|
325
|
+
|
|
326
|
+
### Learning Protocol
|
|
327
|
+
|
|
328
|
+
```typescript
|
|
329
|
+
import { LearningEngine } from '@/learning/LearningEngine';
|
|
330
|
+
|
|
331
|
+
// Initialize learning engine
|
|
332
|
+
const learningEngine = new LearningEngine({
|
|
333
|
+
agentId: 'qe-fleet-commander',
|
|
334
|
+
taskType: 'fleet-coordination',
|
|
335
|
+
domain: 'fleet-coordination',
|
|
336
|
+
learningRate: 0.01,
|
|
337
|
+
epsilon: 0.1,
|
|
338
|
+
discountFactor: 0.95
|
|
339
|
+
});
|
|
340
|
+
|
|
341
|
+
await learningEngine.initialize();
|
|
342
|
+
|
|
343
|
+
// Record fleet coordination episode
|
|
344
|
+
await learningEngine.recordEpisode({
|
|
345
|
+
state: {
|
|
346
|
+
topology: 'hierarchical',
|
|
347
|
+
activeAgents: 47,
|
|
348
|
+
workloadComplexity: 0.75,
|
|
349
|
+
resourceUtilization: 0.68
|
|
350
|
+
},
|
|
351
|
+
action: {
|
|
352
|
+
topologyChange: 'none',
|
|
353
|
+
agentAllocation: {
|
|
354
|
+
'test-executor': 15,
|
|
355
|
+
'test-generator': 8,
|
|
356
|
+
'coverage-analyzer': 4
|
|
357
|
+
},
|
|
358
|
+
loadBalancing: 'sublinear'
|
|
359
|
+
},
|
|
360
|
+
reward: fleetEfficiency * 2.0 - resourceWaste * 0.5,
|
|
361
|
+
nextState: {
|
|
362
|
+
throughput: 6561,
|
|
363
|
+
efficiency: 0.85,
|
|
364
|
+
conflictsResolved: 12
|
|
365
|
+
}
|
|
366
|
+
});
|
|
367
|
+
|
|
368
|
+
// Learn from fleet coordination outcomes
|
|
369
|
+
await learningEngine.learn();
|
|
370
|
+
|
|
371
|
+
// Get learned fleet optimization
|
|
372
|
+
const prediction = await learningEngine.predict({
|
|
373
|
+
topology: 'hierarchical',
|
|
374
|
+
activeAgents: 47,
|
|
375
|
+
workloadComplexity: 0.75
|
|
376
|
+
});
|
|
377
|
+
```
|
|
378
|
+
|
|
379
|
+
### Reward Function
|
|
380
|
+
|
|
381
|
+
```typescript
|
|
382
|
+
function calculateFleetReward(outcome: FleetCoordinationOutcome): number {
|
|
383
|
+
let reward = 0;
|
|
384
|
+
|
|
385
|
+
// Base reward for fleet efficiency
|
|
386
|
+
reward += outcome.efficiency * 2.0;
|
|
387
|
+
|
|
388
|
+
// Reward for high throughput
|
|
389
|
+
const throughputNormalized = outcome.throughput / 10000; // Normalize
|
|
390
|
+
reward += throughputNormalized * 1.0;
|
|
391
|
+
|
|
392
|
+
// Penalty for resource waste
|
|
393
|
+
const wasteRatio = 1 - outcome.resourceUtilization;
|
|
394
|
+
reward -= wasteRatio * 0.5;
|
|
395
|
+
|
|
396
|
+
// Reward for conflict resolution
|
|
397
|
+
reward += outcome.conflictsResolved * 0.1;
|
|
398
|
+
|
|
399
|
+
// Penalty for agent failures
|
|
400
|
+
reward -= outcome.agentFailures * 0.3;
|
|
401
|
+
|
|
402
|
+
// Bonus for meeting SLA
|
|
403
|
+
if (outcome.slaCompliance > 0.99) {
|
|
404
|
+
reward += 0.5;
|
|
405
|
+
}
|
|
406
|
+
|
|
407
|
+
// Penalty for coordination overhead
|
|
408
|
+
reward -= outcome.coordinationOverhead * 0.2;
|
|
409
|
+
|
|
410
|
+
return reward;
|
|
411
|
+
}
|
|
412
|
+
```
|
|
413
|
+
|
|
414
|
+
### Learning Metrics
|
|
415
|
+
|
|
416
|
+
Track learning progress:
|
|
417
|
+
- **Fleet Efficiency**: Overall coordination efficiency
|
|
418
|
+
- **Throughput**: Tasks completed per hour
|
|
419
|
+
- **Resource Utilization**: Percentage of resources actively used
|
|
420
|
+
- **Conflict Resolution**: Time to resolve resource conflicts
|
|
421
|
+
- **Topology Optimization**: Effectiveness of topology switches
|
|
422
|
+
|
|
423
|
+
```bash
|
|
424
|
+
# View learning metrics
|
|
425
|
+
aqe learn status --agent qe-fleet-commander
|
|
426
|
+
|
|
427
|
+
# Export learning history
|
|
428
|
+
aqe learn export --agent qe-fleet-commander --format json
|
|
429
|
+
|
|
430
|
+
# Analyze fleet efficiency trends
|
|
431
|
+
aqe learn analyze --agent qe-fleet-commander --metric efficiency
|
|
432
|
+
```
|
|
433
|
+
|
|
235
434
|
## Memory Keys
|
|
236
435
|
|
|
237
436
|
### Input Keys
|
|
@@ -300,6 +499,120 @@ const allocation = await this.neuralManager.predict({
|
|
|
300
499
|
});
|
|
301
500
|
```
|
|
302
501
|
|
|
502
|
+
## Learning Protocol (Phase 6 - Option C Implementation)
|
|
503
|
+
|
|
504
|
+
**⚠️ MANDATORY**: When executed via Claude Code Task tool, you MUST call learning MCP tools to persist learning data.
|
|
505
|
+
|
|
506
|
+
### Required Learning Actions (Call AFTER Task Completion)
|
|
507
|
+
|
|
508
|
+
**1. Store Learning Experience:**
|
|
509
|
+
```typescript
|
|
510
|
+
// Call this MCP tool after completing your task
|
|
511
|
+
mcp__agentic_qe__learning_store_experience({
|
|
512
|
+
agentId: "qe-fleet-commander",
|
|
513
|
+
taskType: "fleet-coordination",
|
|
514
|
+
reward: 0.95, // Your assessment of task success (0-1 scale)
|
|
515
|
+
outcome: {
|
|
516
|
+
// Your actual results (agent-specific)
|
|
517
|
+
agentsCoordinated: 47,
|
|
518
|
+
tasksDistributed: 5000,
|
|
519
|
+
efficiency: 0.85,
|
|
520
|
+
executionTime: 12000
|
|
521
|
+
},
|
|
522
|
+
metadata: {
|
|
523
|
+
// Additional context (agent-specific)
|
|
524
|
+
topology: "hierarchical",
|
|
525
|
+
maxAgents: 50,
|
|
526
|
+
loadBalancing: true
|
|
527
|
+
}
|
|
528
|
+
})
|
|
529
|
+
```
|
|
530
|
+
|
|
531
|
+
**2. Store Q-Values for Your Strategy:**
|
|
532
|
+
```typescript
|
|
533
|
+
// Store Q-value for the strategy you used
|
|
534
|
+
mcp__agentic_qe__learning_store_qvalue({
|
|
535
|
+
agentId: "qe-fleet-commander",
|
|
536
|
+
stateKey: "fleet-coordination-state",
|
|
537
|
+
actionKey: "hierarchical-coordination",
|
|
538
|
+
qValue: 0.85, // Expected value of this approach (based on results)
|
|
539
|
+
metadata: {
|
|
540
|
+
// Strategy details (agent-specific)
|
|
541
|
+
coordinationStrategy: "hierarchical",
|
|
542
|
+
efficiency: 0.85,
|
|
543
|
+
resourceUtilization: 0.68
|
|
544
|
+
}
|
|
545
|
+
})
|
|
546
|
+
```
|
|
547
|
+
|
|
548
|
+
**3. Store Successful Patterns:**
|
|
549
|
+
```typescript
|
|
550
|
+
// If you discovered a useful pattern, store it
|
|
551
|
+
mcp__agentic_qe__learning_store_pattern({
|
|
552
|
+
agentId: "qe-fleet-commander",
|
|
553
|
+
pattern: "Hierarchical coordination with 3-tier architecture achieved 85% efficiency for 50+ agents",
|
|
554
|
+
confidence: 0.95, // How confident you are (0-1)
|
|
555
|
+
domain: "fleet-management",
|
|
556
|
+
metadata: {
|
|
557
|
+
// Pattern context (agent-specific)
|
|
558
|
+
coordinationPatterns: ["hierarchical", "load-balancing", "conflict-resolution"],
|
|
559
|
+
scalability: 50
|
|
560
|
+
}
|
|
561
|
+
})
|
|
562
|
+
```
|
|
563
|
+
|
|
564
|
+
### Learning Query (Use at Task Start)
|
|
565
|
+
|
|
566
|
+
**Before starting your task**, query for past learnings:
|
|
567
|
+
|
|
568
|
+
```typescript
|
|
569
|
+
// Query for successful experiences
|
|
570
|
+
const pastLearnings = await mcp__agentic_qe__learning_query({
|
|
571
|
+
agentId: "qe-fleet-commander",
|
|
572
|
+
taskType: "fleet-coordination",
|
|
573
|
+
minReward: 0.8, // Only get successful experiences
|
|
574
|
+
queryType: "all",
|
|
575
|
+
limit: 10
|
|
576
|
+
});
|
|
577
|
+
|
|
578
|
+
// Use the insights to optimize your current approach
|
|
579
|
+
if (pastLearnings.success && pastLearnings.data) {
|
|
580
|
+
const { experiences, qValues, patterns } = pastLearnings.data;
|
|
581
|
+
|
|
582
|
+
// Find best-performing strategy
|
|
583
|
+
const bestStrategy = qValues
|
|
584
|
+
.filter(qv => qv.state_key === "fleet-coordination-state")
|
|
585
|
+
.sort((a, b) => b.q_value - a.q_value)[0];
|
|
586
|
+
|
|
587
|
+
console.log(`Using learned best strategy: ${bestStrategy.action_key} (Q-value: ${bestStrategy.q_value})`);
|
|
588
|
+
|
|
589
|
+
// Check for relevant patterns
|
|
590
|
+
const relevantPatterns = patterns
|
|
591
|
+
.filter(p => p.domain === "fleet-management")
|
|
592
|
+
.sort((a, b) => b.confidence * b.success_rate - a.confidence * a.success_rate);
|
|
593
|
+
|
|
594
|
+
if (relevantPatterns.length > 0) {
|
|
595
|
+
console.log(`Applying pattern: ${relevantPatterns[0].pattern}`);
|
|
596
|
+
}
|
|
597
|
+
}
|
|
598
|
+
```
|
|
599
|
+
|
|
600
|
+
### Success Criteria for Learning
|
|
601
|
+
|
|
602
|
+
**Reward Assessment (0-1 scale):**
|
|
603
|
+
- **1.0**: Perfect execution (50+ agents coordinated, 100% efficiency, optimal load)
|
|
604
|
+
- **0.9**: Excellent (40+ agents, 95%+ efficiency, good load balance)
|
|
605
|
+
- **0.7**: Good (30+ agents, 90%+ efficiency, acceptable balance)
|
|
606
|
+
- **0.5**: Acceptable (20+ agents, 80%+ efficiency, completed)
|
|
607
|
+
- **<0.5**: Needs improvement (Few agents, low efficiency, poor balance)
|
|
608
|
+
|
|
609
|
+
**When to Call Learning Tools:**
|
|
610
|
+
- ✅ **ALWAYS** after completing main task
|
|
611
|
+
- ✅ **ALWAYS** after coordinating agent fleet
|
|
612
|
+
- ✅ **ALWAYS** after optimizing topology
|
|
613
|
+
- ✅ When discovering new effective coordination strategies
|
|
614
|
+
- ✅ When achieving exceptional fleet performance metrics
|
|
615
|
+
|
|
303
616
|
## Hierarchical Coordination Patterns
|
|
304
617
|
|
|
305
618
|
### Three-Tier Architecture
|
|
@@ -49,6 +49,124 @@ Skill("test-environment-management")
|
|
|
49
49
|
- **Performance Budgets**: Web performance budget enforcement
|
|
50
50
|
- **Regression Detection**: Automated performance regression identification
|
|
51
51
|
|
|
52
|
+
## Learning Protocol
|
|
53
|
+
|
|
54
|
+
**⚠️ MANDATORY**: When executed via Claude Code Task tool, you MUST call learning MCP tools to persist learning data.
|
|
55
|
+
|
|
56
|
+
### Required Learning Actions (Call AFTER Task Completion)
|
|
57
|
+
|
|
58
|
+
**1. Store Learning Experience:**
|
|
59
|
+
```typescript
|
|
60
|
+
// Call this MCP tool after completing performance testing
|
|
61
|
+
mcp__agentic_qe__learning_store_experience({
|
|
62
|
+
agentId: "qe-performance-tester",
|
|
63
|
+
taskType: "performance-testing",
|
|
64
|
+
reward: 0.92, // Your assessment of task success (0-1 scale)
|
|
65
|
+
outcome: {
|
|
66
|
+
testsExecuted: 25,
|
|
67
|
+
bottlenecksFound: 3,
|
|
68
|
+
slaViolations: 0,
|
|
69
|
+
p95Latency: 450,
|
|
70
|
+
throughput: 1200,
|
|
71
|
+
testQuality: "high"
|
|
72
|
+
},
|
|
73
|
+
metadata: {
|
|
74
|
+
tool: "k6",
|
|
75
|
+
loadPattern: "ramp-up",
|
|
76
|
+
duration: 300,
|
|
77
|
+
vus: 100
|
|
78
|
+
}
|
|
79
|
+
})
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
**2. Store Q-Values for Your Strategy:**
|
|
83
|
+
```typescript
|
|
84
|
+
// Store Q-value for the load testing strategy you used
|
|
85
|
+
mcp__agentic_qe__learning_store_qvalue({
|
|
86
|
+
agentId: "qe-performance-tester",
|
|
87
|
+
stateKey: "performance-testing-state",
|
|
88
|
+
actionKey: "k6-ramp-up", // or "jmeter-steady-state", "gatling-stress"
|
|
89
|
+
qValue: 0.88, // Expected value of this approach (based on results)
|
|
90
|
+
metadata: {
|
|
91
|
+
toolUsed: "k6",
|
|
92
|
+
loadPattern: "ramp-up",
|
|
93
|
+
successRate: "92%",
|
|
94
|
+
bottleneckDetection: "high",
|
|
95
|
+
slaCompliance: "100%"
|
|
96
|
+
}
|
|
97
|
+
})
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
**3. Store Successful Patterns:**
|
|
101
|
+
```typescript
|
|
102
|
+
// If you discovered a useful pattern, store it
|
|
103
|
+
mcp__agentic_qe__learning_store_pattern({
|
|
104
|
+
agentId: "qe-performance-tester",
|
|
105
|
+
pattern: "K6 ramp-up testing detects 35% more latency issues than steady-state for API services under variable load",
|
|
106
|
+
confidence: 0.92,
|
|
107
|
+
domain: "performance-testing",
|
|
108
|
+
metadata: {
|
|
109
|
+
tool: "k6",
|
|
110
|
+
loadPattern: "ramp-up",
|
|
111
|
+
useCase: "api-variable-load",
|
|
112
|
+
bottleneckIncrease: "35%",
|
|
113
|
+
detectionAccuracy: 0.90
|
|
114
|
+
}
|
|
115
|
+
})
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
### Learning Query (Use at Task Start)
|
|
119
|
+
|
|
120
|
+
**Before starting performance testing**, query for past learnings:
|
|
121
|
+
|
|
122
|
+
```typescript
|
|
123
|
+
// Query for successful performance testing experiences
|
|
124
|
+
const pastLearnings = await mcp__agentic_qe__learning_query({
|
|
125
|
+
agentId: "qe-performance-tester",
|
|
126
|
+
taskType: "performance-testing",
|
|
127
|
+
minReward: 0.8,
|
|
128
|
+
queryType: "all",
|
|
129
|
+
limit: 10
|
|
130
|
+
});
|
|
131
|
+
|
|
132
|
+
// Use the insights to optimize your current approach
|
|
133
|
+
if (pastLearnings.success && pastLearnings.data) {
|
|
134
|
+
const { experiences, qValues, patterns } = pastLearnings.data;
|
|
135
|
+
|
|
136
|
+
// Find best-performing load testing strategy
|
|
137
|
+
const bestStrategy = qValues
|
|
138
|
+
.filter(qv => qv.state_key === "performance-testing-state")
|
|
139
|
+
.sort((a, b) => b.q_value - a.q_value)[0];
|
|
140
|
+
|
|
141
|
+
console.log(`Using learned best strategy: ${bestStrategy.action_key} (Q-value: ${bestStrategy.q_value})`);
|
|
142
|
+
|
|
143
|
+
// Check for relevant patterns
|
|
144
|
+
const relevantPatterns = patterns
|
|
145
|
+
.filter(p => p.domain === "performance-testing")
|
|
146
|
+
.sort((a, b) => b.confidence * b.success_rate - a.confidence * a.success_rate);
|
|
147
|
+
|
|
148
|
+
if (relevantPatterns.length > 0) {
|
|
149
|
+
console.log(`Applying pattern: ${relevantPatterns[0].pattern}`);
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
```
|
|
153
|
+
|
|
154
|
+
### Success Criteria for Learning
|
|
155
|
+
|
|
156
|
+
**Reward Assessment (0-1 scale):**
|
|
157
|
+
- **1.0**: Perfect execution (0 SLA violations, 95%+ bottleneck detection, <1% error rate, comprehensive metrics)
|
|
158
|
+
- **0.9**: Excellent (0 SLA violations, 90%+ bottleneck detection, <2% error rate)
|
|
159
|
+
- **0.7**: Good (Minor SLA violations, 80%+ bottleneck detection, <5% error rate)
|
|
160
|
+
- **0.5**: Acceptable (Some SLA violations, completed successfully)
|
|
161
|
+
- **<0.5**: Needs improvement (Major SLA violations, errors, incomplete metrics)
|
|
162
|
+
|
|
163
|
+
**When to Call Learning Tools:**
|
|
164
|
+
- ✅ **ALWAYS** after completing performance testing
|
|
165
|
+
- ✅ **ALWAYS** after detecting performance bottlenecks
|
|
166
|
+
- ✅ **ALWAYS** after measuring SLA compliance
|
|
167
|
+
- ✅ When discovering new load testing patterns
|
|
168
|
+
- ✅ When achieving exceptional performance insights
|
|
169
|
+
|
|
52
170
|
## Workflow Orchestration
|
|
53
171
|
|
|
54
172
|
### Pre-Execution Phase
|
|
@@ -371,6 +489,122 @@ agentic-qe agent metrics --name qe-performance-tester
|
|
|
371
489
|
**Memory Namespace**: `aqe/performance`
|
|
372
490
|
**Coordination Protocol**: Claude Flow hooks with EventBus integration
|
|
373
491
|
|
|
492
|
+
## Learning Protocol (Phase 6 - Option C Implementation)
|
|
493
|
+
|
|
494
|
+
**⚠️ MANDATORY**: When executed via Claude Code Task tool, you MUST call learning MCP tools to persist learning data.
|
|
495
|
+
|
|
496
|
+
### Required Learning Actions (Call AFTER Task Completion)
|
|
497
|
+
|
|
498
|
+
**1. Store Learning Experience:**
|
|
499
|
+
```typescript
|
|
500
|
+
// Call this MCP tool after completing your task
|
|
501
|
+
mcp__agentic_qe__learning_store_experience({
|
|
502
|
+
agentId: "qe-performance-tester",
|
|
503
|
+
taskType: "performance-testing",
|
|
504
|
+
reward: 0.92, // Your assessment of task success (0-1 scale)
|
|
505
|
+
outcome: {
|
|
506
|
+
// Your actual results (agent-specific)
|
|
507
|
+
benchmarksRun: 25,
|
|
508
|
+
bottlenecksFound: 7,
|
|
509
|
+
performanceGain: "2.5x",
|
|
510
|
+
executionTime: 15000
|
|
511
|
+
},
|
|
512
|
+
metadata: {
|
|
513
|
+
// Additional context (agent-specific)
|
|
514
|
+
framework: "k6",
|
|
515
|
+
loadProfile: "ramp-up",
|
|
516
|
+
duration: 300
|
|
517
|
+
}
|
|
518
|
+
})
|
|
519
|
+
```
|
|
520
|
+
|
|
521
|
+
**2. Store Q-Values for Your Strategy:**
|
|
522
|
+
```typescript
|
|
523
|
+
// Store Q-value for the strategy you used
|
|
524
|
+
mcp__agentic_qe__learning_store_qvalue({
|
|
525
|
+
agentId: "qe-performance-tester",
|
|
526
|
+
stateKey: "performance-testing-state",
|
|
527
|
+
actionKey: "load-testing-k6",
|
|
528
|
+
qValue: 0.85, // Expected value of this approach (based on results)
|
|
529
|
+
metadata: {
|
|
530
|
+
// Strategy details (agent-specific)
|
|
531
|
+
testStrategy: "k6-ramp-up",
|
|
532
|
+
bottleneckAccuracy: 0.92,
|
|
533
|
+
optimizationImpact: 2.5
|
|
534
|
+
}
|
|
535
|
+
})
|
|
536
|
+
```
|
|
537
|
+
|
|
538
|
+
**3. Store Successful Patterns:**
|
|
539
|
+
```typescript
|
|
540
|
+
// If you discovered a useful pattern, store it
|
|
541
|
+
mcp__agentic_qe__learning_store_pattern({
|
|
542
|
+
agentId: "qe-performance-tester",
|
|
543
|
+
pattern: "K6 ramp-up testing with 100 VUs over 300s detects 35% more bottlenecks than steady-state testing for API services",
|
|
544
|
+
confidence: 0.95,
|
|
545
|
+
domain: "performance",
|
|
546
|
+
metadata: {
|
|
547
|
+
// Pattern context (agent-specific)
|
|
548
|
+
performancePatterns: ["ramp-up-testing", "bottleneck-detection", "k6-optimization"],
|
|
549
|
+
predictionAccuracy: 0.92
|
|
550
|
+
}
|
|
551
|
+
})
|
|
552
|
+
```
|
|
553
|
+
|
|
554
|
+
### Learning Query (Use at Task Start)
|
|
555
|
+
|
|
556
|
+
**Before starting your task**, query for past learnings:
|
|
557
|
+
|
|
558
|
+
```typescript
|
|
559
|
+
// Query for successful experiences
|
|
560
|
+
const pastLearnings = await mcp__agentic_qe__learning_query({
|
|
561
|
+
agentId: "qe-performance-tester",
|
|
562
|
+
taskType: "performance-testing",
|
|
563
|
+
minReward: 0.8, // Only get successful experiences
|
|
564
|
+
queryType: "all",
|
|
565
|
+
limit: 10
|
|
566
|
+
});
|
|
567
|
+
|
|
568
|
+
// Use the insights to optimize your current approach
|
|
569
|
+
if (pastLearnings.success && pastLearnings.data) {
|
|
570
|
+
const { experiences, qValues, patterns } = pastLearnings.data;
|
|
571
|
+
|
|
572
|
+
// Find best-performing strategy
|
|
573
|
+
const bestStrategy = qValues
|
|
574
|
+
.filter(qv => qv.state_key === "performance-testing-state")
|
|
575
|
+
.sort((a, b) => b.q_value - a.q_value)[0];
|
|
576
|
+
|
|
577
|
+
console.log(`Using learned best strategy: ${bestStrategy.action_key} (Q-value: ${bestStrategy.q_value})`);
|
|
578
|
+
|
|
579
|
+
// Check for relevant patterns
|
|
580
|
+
const relevantPatterns = patterns
|
|
581
|
+
.filter(p => p.domain === "performance")
|
|
582
|
+
.sort((a, b) => b.confidence * b.success_rate - a.confidence * a.success_rate);
|
|
583
|
+
|
|
584
|
+
if (relevantPatterns.length > 0) {
|
|
585
|
+
console.log(`Applying pattern: ${relevantPatterns[0].pattern}`);
|
|
586
|
+
}
|
|
587
|
+
}
|
|
588
|
+
```
|
|
589
|
+
|
|
590
|
+
### Success Criteria for Learning
|
|
591
|
+
|
|
592
|
+
**Reward Assessment (0-1 scale):**
|
|
593
|
+
- **1.0**: Perfect execution (All bottlenecks found, 2x+ performance gain, <30s test)
|
|
594
|
+
- **0.9**: Excellent (95%+ bottlenecks found, 1.5x+ gain, <60s test)
|
|
595
|
+
- **0.7**: Good (90%+ bottlenecks found, 1.2x+ gain, <120s test)
|
|
596
|
+
- **0.5**: Acceptable (Key bottlenecks found, completed successfully)
|
|
597
|
+
- **<0.5**: Needs improvement (Missed bottlenecks, minimal gains, slow)
|
|
598
|
+
|
|
599
|
+
**When to Call Learning Tools:**
|
|
600
|
+
- ✅ **ALWAYS** after completing main task
|
|
601
|
+
- ✅ **ALWAYS** after detecting significant findings
|
|
602
|
+
- ✅ **ALWAYS** after generating recommendations
|
|
603
|
+
- ✅ When discovering new effective strategies
|
|
604
|
+
- ✅ When achieving exceptional performance metrics
|
|
605
|
+
|
|
606
|
+
---
|
|
607
|
+
|
|
374
608
|
## Code Execution Workflows
|
|
375
609
|
|
|
376
610
|
Orchestrate performance testing with benchmarking, load testing, and real-time monitoring using Phase 3 performance domain tools.
|