agentic-qe 1.6.0 → 1.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/skills/brutal-honesty-review/README.md +218 -0
- package/.claude/skills/brutal-honesty-review/SKILL.md +725 -0
- package/.claude/skills/brutal-honesty-review/resources/assessment-rubrics.md +295 -0
- package/.claude/skills/brutal-honesty-review/resources/review-template.md +102 -0
- package/.claude/skills/brutal-honesty-review/scripts/assess-code.sh +179 -0
- package/.claude/skills/brutal-honesty-review/scripts/assess-tests.sh +223 -0
- package/.claude/skills/cicd-pipeline-qe-orchestrator/README.md +301 -0
- package/.claude/skills/cicd-pipeline-qe-orchestrator/SKILL.md +510 -0
- package/.claude/skills/cicd-pipeline-qe-orchestrator/resources/workflows/microservice-pipeline.md +239 -0
- package/.claude/skills/cicd-pipeline-qe-orchestrator/resources/workflows/mobile-pipeline.md +375 -0
- package/.claude/skills/cicd-pipeline-qe-orchestrator/resources/workflows/monolith-pipeline.md +268 -0
- package/.claude/skills/six-thinking-hats/README.md +190 -0
- package/.claude/skills/six-thinking-hats/SKILL.md +1215 -0
- package/.claude/skills/six-thinking-hats/resources/examples/api-testing-example.md +345 -0
- package/.claude/skills/six-thinking-hats/resources/templates/solo-session-template.md +167 -0
- package/.claude/skills/six-thinking-hats/resources/templates/team-session-template.md +336 -0
- package/CHANGELOG.md +2239 -2157
- package/README.md +12 -6
- package/dist/cli/commands/init-claude-md-template.d.ts +16 -0
- package/dist/cli/commands/init-claude-md-template.d.ts.map +1 -0
- package/dist/cli/commands/init-claude-md-template.js +69 -0
- package/dist/cli/commands/init-claude-md-template.js.map +1 -0
- package/dist/cli/commands/init.d.ts +1 -1
- package/dist/cli/commands/init.d.ts.map +1 -1
- package/dist/cli/commands/init.js +479 -461
- package/dist/cli/commands/init.js.map +1 -1
- package/package.json +2 -2
- package/.claude/agents/.claude-flow/metrics/agent-metrics.json +0 -1
- package/.claude/agents/.claude-flow/metrics/performance.json +0 -87
- package/.claude/agents/.claude-flow/metrics/task-metrics.json +0 -10
- package/.claude/commands/analysis/COMMAND_COMPLIANCE_REPORT.md +0 -54
- package/.claude/commands/analysis/performance-bottlenecks.md +0 -59
- package/.claude/commands/flow-nexus/app-store.md +0 -124
- package/.claude/commands/flow-nexus/challenges.md +0 -120
- package/.claude/commands/flow-nexus/login-registration.md +0 -65
- package/.claude/commands/flow-nexus/neural-network.md +0 -134
- package/.claude/commands/flow-nexus/payments.md +0 -116
- package/.claude/commands/flow-nexus/sandbox.md +0 -83
- package/.claude/commands/flow-nexus/swarm.md +0 -87
- package/.claude/commands/flow-nexus/user-tools.md +0 -152
- package/.claude/commands/flow-nexus/workflow.md +0 -115
- package/.claude/commands/memory/usage.md +0 -46
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
# Brutal Honesty Test Assessment Script (Ramsay Mode)
|
|
3
|
+
|
|
4
|
+
set -e
|
|
5
|
+
|
|
6
|
+
# Colors
|
|
7
|
+
RED='\033[0;31m'
|
|
8
|
+
YELLOW='\033[1;33m'
|
|
9
|
+
GREEN='\033[0;32m'
|
|
10
|
+
NC='\033[0m' # No Color
|
|
11
|
+
|
|
12
|
+
echo "👨🍳 BRUTAL HONESTY TEST ASSESSMENT (Ramsay Mode)"
|
|
13
|
+
echo "=================================================="
|
|
14
|
+
echo ""
|
|
15
|
+
|
|
16
|
+
# Check if test directory argument provided
|
|
17
|
+
if [ -z "$1" ]; then
|
|
18
|
+
echo "Usage: $0 <test-directory>"
|
|
19
|
+
exit 1
|
|
20
|
+
fi
|
|
21
|
+
|
|
22
|
+
TEST_DIR="$1"
|
|
23
|
+
|
|
24
|
+
# Check if test directory exists
|
|
25
|
+
if [ ! -d "$TEST_DIR" ]; then
|
|
26
|
+
echo -e "${RED}🔴 FAILING: Test directory '$TEST_DIR' doesn't exist${NC}"
|
|
27
|
+
echo " → Where are the tests? Did you even write any?"
|
|
28
|
+
exit 1
|
|
29
|
+
fi
|
|
30
|
+
|
|
31
|
+
# Function to assess coverage
|
|
32
|
+
assess_coverage() {
|
|
33
|
+
echo "📊 COVERAGE CHECK"
|
|
34
|
+
echo "----------------"
|
|
35
|
+
|
|
36
|
+
# Run coverage if npm test with coverage exists
|
|
37
|
+
if [ -f "package.json" ] && grep -q "test:coverage" package.json; then
|
|
38
|
+
echo "Running coverage analysis..."
|
|
39
|
+
npm run test:coverage 2>/dev/null || true
|
|
40
|
+
|
|
41
|
+
# Extract coverage percentage
|
|
42
|
+
coverage=$(npm run test:coverage 2>&1 | grep -oP '\d+\.\d+(?=%)' | head -1 || echo "0")
|
|
43
|
+
|
|
44
|
+
if (( $(echo "$coverage < 50" | bc -l) )); then
|
|
45
|
+
echo -e "${RED}🔴 RAW: ${coverage}% coverage${NC}"
|
|
46
|
+
echo " → This is embarrassing. You're barely testing anything."
|
|
47
|
+
elif (( $(echo "$coverage < 80" | bc -l) )); then
|
|
48
|
+
echo -e "${YELLOW}🟡 ACCEPTABLE: ${coverage}% coverage${NC}"
|
|
49
|
+
echo " → Minimum is 80%. You're not there yet."
|
|
50
|
+
else
|
|
51
|
+
echo -e "${GREEN}🟢 MICHELIN STAR: ${coverage}% coverage${NC}"
|
|
52
|
+
fi
|
|
53
|
+
else
|
|
54
|
+
echo -e "${YELLOW}⚠️ No coverage command found${NC}"
|
|
55
|
+
echo " → Add 'test:coverage' script to package.json"
|
|
56
|
+
fi
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
# Function to assess edge cases
|
|
60
|
+
assess_edge_cases() {
|
|
61
|
+
echo ""
|
|
62
|
+
echo "🎯 EDGE CASE CHECK"
|
|
63
|
+
echo "-----------------"
|
|
64
|
+
|
|
65
|
+
# Check for common edge case patterns
|
|
66
|
+
edge_case_patterns=(
|
|
67
|
+
"null"
|
|
68
|
+
"undefined"
|
|
69
|
+
"empty"
|
|
70
|
+
"zero"
|
|
71
|
+
"negative"
|
|
72
|
+
"max"
|
|
73
|
+
"min"
|
|
74
|
+
"overflow"
|
|
75
|
+
"boundary"
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
found_count=0
|
|
79
|
+
for pattern in "${edge_case_patterns[@]}"; do
|
|
80
|
+
if grep -ri "$pattern" "$TEST_DIR" > /dev/null 2>&1; then
|
|
81
|
+
((found_count++))
|
|
82
|
+
fi
|
|
83
|
+
done
|
|
84
|
+
|
|
85
|
+
if [ "$found_count" -eq 0 ]; then
|
|
86
|
+
echo -e "${RED}🔴 RAW: No edge cases tested${NC}"
|
|
87
|
+
echo " → You're only testing the happy path. That's not testing."
|
|
88
|
+
elif [ "$found_count" -lt 3 ]; then
|
|
89
|
+
echo -e "${YELLOW}🟡 ACCEPTABLE: Found $found_count edge case patterns${NC}"
|
|
90
|
+
echo " → Test more: null, empty, boundaries, overflow"
|
|
91
|
+
else
|
|
92
|
+
echo -e "${GREEN}🟢 MICHELIN STAR: Found $found_count edge case patterns${NC}"
|
|
93
|
+
fi
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
# Function to assess test clarity
|
|
97
|
+
assess_clarity() {
|
|
98
|
+
echo ""
|
|
99
|
+
echo "📖 CLARITY CHECK"
|
|
100
|
+
echo "---------------"
|
|
101
|
+
|
|
102
|
+
# Check for descriptive test names
|
|
103
|
+
unclear_tests=$(grep -r "test('test" "$TEST_DIR" 2>/dev/null | wc -l)
|
|
104
|
+
if [ "$unclear_tests" -gt 0 ]; then
|
|
105
|
+
echo -e "${RED}🔴 RAW: Found $unclear_tests unclear test names${NC}"
|
|
106
|
+
echo " → 'test1', 'test2' - What are you testing? Use descriptive names."
|
|
107
|
+
fi
|
|
108
|
+
|
|
109
|
+
# Check for describe/it blocks
|
|
110
|
+
if grep -r "describe\|it\|test" "$TEST_DIR" > /dev/null 2>&1; then
|
|
111
|
+
echo -e "${GREEN}✓ Tests have structure${NC}"
|
|
112
|
+
else
|
|
113
|
+
echo -e "${YELLOW}⚠️ No test framework patterns detected${NC}"
|
|
114
|
+
fi
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
# Function to assess test speed
|
|
118
|
+
assess_speed() {
|
|
119
|
+
echo ""
|
|
120
|
+
echo "⚡ SPEED CHECK"
|
|
121
|
+
echo "-------------"
|
|
122
|
+
|
|
123
|
+
echo "Running tests..."
|
|
124
|
+
start_time=$(date +%s)
|
|
125
|
+
|
|
126
|
+
# Run tests (suppress output)
|
|
127
|
+
if npm test > /dev/null 2>&1; then
|
|
128
|
+
end_time=$(date +%s)
|
|
129
|
+
duration=$((end_time - start_time))
|
|
130
|
+
|
|
131
|
+
if [ "$duration" -gt 60 ]; then
|
|
132
|
+
echo -e "${RED}🔴 RAW: Tests took ${duration}s${NC}"
|
|
133
|
+
echo " → Unit tests should run in seconds, not minutes."
|
|
134
|
+
echo " → Are you calling real databases/networks?"
|
|
135
|
+
elif [ "$duration" -gt 10 ]; then
|
|
136
|
+
echo -e "${YELLOW}🟡 ACCEPTABLE: Tests took ${duration}s${NC}"
|
|
137
|
+
echo " → Aim for <10s. Use mocks and in-memory operations."
|
|
138
|
+
else
|
|
139
|
+
echo -e "${GREEN}🟢 MICHELIN STAR: Tests took ${duration}s${NC}"
|
|
140
|
+
fi
|
|
141
|
+
else
|
|
142
|
+
echo -e "${RED}🔴 FAILING: Tests don't even pass${NC}"
|
|
143
|
+
echo " → Fix your broken tests before worrying about speed."
|
|
144
|
+
fi
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
# Function to assess stability
|
|
148
|
+
assess_stability() {
|
|
149
|
+
echo ""
|
|
150
|
+
echo "🎲 STABILITY CHECK"
|
|
151
|
+
echo "-----------------"
|
|
152
|
+
|
|
153
|
+
# Check for flaky patterns
|
|
154
|
+
if grep -ri "setTimeout\|sleep\|wait" "$TEST_DIR" > /dev/null 2>&1; then
|
|
155
|
+
echo -e "${RED}🔴 RAW: Timing-based tests detected${NC}"
|
|
156
|
+
echo " → You're creating flaky tests. Use proper async/await."
|
|
157
|
+
fi
|
|
158
|
+
|
|
159
|
+
# Run tests multiple times to detect flakes
|
|
160
|
+
echo "Running tests 3x to detect flakes..."
|
|
161
|
+
failures=0
|
|
162
|
+
for i in {1..3}; do
|
|
163
|
+
if ! npm test > /dev/null 2>&1; then
|
|
164
|
+
((failures++))
|
|
165
|
+
fi
|
|
166
|
+
done
|
|
167
|
+
|
|
168
|
+
if [ "$failures" -gt 0 ]; then
|
|
169
|
+
echo -e "${RED}🔴 RAW: Tests failed $failures/3 times${NC}"
|
|
170
|
+
echo " → FLAKY TESTS. These are worse than no tests."
|
|
171
|
+
echo " → Fix the non-determinism before merging."
|
|
172
|
+
else
|
|
173
|
+
echo -e "${GREEN}🟢 MICHELIN STAR: Tests are stable${NC}"
|
|
174
|
+
fi
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
# Function to assess isolation
|
|
178
|
+
assess_isolation() {
|
|
179
|
+
echo ""
|
|
180
|
+
echo "🏝️ ISOLATION CHECK"
|
|
181
|
+
echo "------------------"
|
|
182
|
+
|
|
183
|
+
# Check for shared state patterns
|
|
184
|
+
if grep -ri "global\|beforeAll\|shared" "$TEST_DIR" > /dev/null 2>&1; then
|
|
185
|
+
echo -e "${YELLOW}🟡 WARNING: Shared state patterns detected${NC}"
|
|
186
|
+
echo " → Are your tests independent? Can they run in any order?"
|
|
187
|
+
fi
|
|
188
|
+
|
|
189
|
+
# Check for test order dependencies
|
|
190
|
+
if grep -r "\.only\|\.skip" "$TEST_DIR" > /dev/null 2>&1; then
|
|
191
|
+
echo -e "${YELLOW}🟡 WARNING: .only or .skip found${NC}"
|
|
192
|
+
echo " → Don't commit tests with .only or .skip"
|
|
193
|
+
fi
|
|
194
|
+
|
|
195
|
+
echo -e "${GREEN}✓ Review test isolation manually${NC}"
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
# Run all assessments
|
|
199
|
+
assess_coverage
|
|
200
|
+
assess_edge_cases
|
|
201
|
+
assess_clarity
|
|
202
|
+
assess_speed
|
|
203
|
+
assess_stability
|
|
204
|
+
assess_isolation
|
|
205
|
+
|
|
206
|
+
# Final verdict
|
|
207
|
+
echo ""
|
|
208
|
+
echo "=================================================="
|
|
209
|
+
echo "🎯 FINAL VERDICT"
|
|
210
|
+
echo "=================================================="
|
|
211
|
+
echo ""
|
|
212
|
+
echo "Look at the results above. If you see multiple 🔴 RAW marks,"
|
|
213
|
+
echo "these tests are NOT production-ready."
|
|
214
|
+
echo ""
|
|
215
|
+
echo "Expected standards:"
|
|
216
|
+
echo " - 80%+ branch coverage"
|
|
217
|
+
echo " - Edge cases tested (null, empty, boundaries)"
|
|
218
|
+
echo " - Clear test names"
|
|
219
|
+
echo " - <10s to run"
|
|
220
|
+
echo " - 0% flaky"
|
|
221
|
+
echo " - Independent tests"
|
|
222
|
+
echo ""
|
|
223
|
+
echo "You know what good tests look like. Why aren't you writing them?"
|
|
@@ -0,0 +1,301 @@
|
|
|
1
|
+
# CI/CD Pipeline QE Orchestrator
|
|
2
|
+
|
|
3
|
+
**Comprehensive quality engineering orchestration across all CI/CD pipeline phases.**
|
|
4
|
+
|
|
5
|
+
## What is This?
|
|
6
|
+
|
|
7
|
+
The CI/CD Pipeline QE Orchestrator is an advanced Claude Code skill that provides intelligent, phase-based quality engineering across your entire software delivery pipeline. It intelligently selects from 37 QE skills and coordinates 18 specialized QE agents to ensure holistic quality coverage from commit to production.
|
|
8
|
+
|
|
9
|
+
## Quick Start
|
|
10
|
+
|
|
11
|
+
### 1. Invoke the Skill
|
|
12
|
+
|
|
13
|
+
```javascript
|
|
14
|
+
// In Claude Code
|
|
15
|
+
Skill("cicd-pipeline-qe-orchestrator")
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
### 2. Analyze Your Pipeline
|
|
19
|
+
|
|
20
|
+
```javascript
|
|
21
|
+
Task("Pipeline Analysis",
|
|
22
|
+
"Analyze our CI/CD pipeline and recommend quality strategy for all phases",
|
|
23
|
+
"qe-fleet-commander")
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
### 3. Get Phase-Specific Recommendations
|
|
27
|
+
|
|
28
|
+
The orchestrator will provide:
|
|
29
|
+
- Skill selections per pipeline phase
|
|
30
|
+
- Agent coordination patterns
|
|
31
|
+
- Quality gate configurations
|
|
32
|
+
- Timing budgets
|
|
33
|
+
- Rollback strategies
|
|
34
|
+
|
|
35
|
+
## What Makes This Different?
|
|
36
|
+
|
|
37
|
+
### Traditional QE Approach
|
|
38
|
+
```
|
|
39
|
+
┌─────────────┐
|
|
40
|
+
│ Run Tests │ Manual selection
|
|
41
|
+
│ Manual QA │ Ad-hoc coverage
|
|
42
|
+
│ Deploy │ Hope for the best
|
|
43
|
+
└─────────────┘
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
### Orchestrated QE Approach
|
|
47
|
+
```
|
|
48
|
+
┌──────────────────────────────────────────────┐
|
|
49
|
+
│ Commit Phase (Shift-Left) │
|
|
50
|
+
│ ├─ Skills: shift-left-testing, tdd │
|
|
51
|
+
│ ├─ Agents: qe-test-generator (parallel) │
|
|
52
|
+
│ └─ Gates: 80% coverage, 0 critical issues │
|
|
53
|
+
├──────────────────────────────────────────────┤
|
|
54
|
+
│ Build Phase │
|
|
55
|
+
│ ├─ Skills: test-automation, mutation │
|
|
56
|
+
│ ├─ Agents: qe-test-executor (batched) │
|
|
57
|
+
│ └─ Gates: 90% coverage, mutation > 70% │
|
|
58
|
+
├──────────────────────────────────────────────┤
|
|
59
|
+
│ Integration Phase │
|
|
60
|
+
│ ├─ Skills: api-testing, performance │
|
|
61
|
+
│ ├─ Agents: qe-performance-tester (parallel) │
|
|
62
|
+
│ └─ Gates: p95 < 200ms, 0 security critical │
|
|
63
|
+
├──────────────────────────────────────────────┤
|
|
64
|
+
│ Staging Phase │
|
|
65
|
+
│ ├─ Skills: chaos-engineering, visual │
|
|
66
|
+
│ ├─ Agents: qe-chaos-engineer (sequential) │
|
|
67
|
+
│ └─ Gates: resilience validated, 0 visual │
|
|
68
|
+
├──────────────────────────────────────────────┤
|
|
69
|
+
│ Production Phase (Shift-Right) │
|
|
70
|
+
│ ├─ Skills: shift-right-testing │
|
|
71
|
+
│ ├─ Agents: qe-production-intelligence │
|
|
72
|
+
│ └─ Gates: error rate < 0.1%, monitors pass │
|
|
73
|
+
└──────────────────────────────────────────────┘
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
## Key Features
|
|
77
|
+
|
|
78
|
+
### 🎯 Intelligent Skill Selection
|
|
79
|
+
|
|
80
|
+
Automatically selects optimal skills based on:
|
|
81
|
+
- Pipeline phase (commit, build, test, staging, production)
|
|
82
|
+
- Application type (API, web, mobile, backend)
|
|
83
|
+
- Risk level (critical, high, medium, low)
|
|
84
|
+
- Deployment frequency (continuous, daily, weekly, monthly)
|
|
85
|
+
|
|
86
|
+
### 🤖 Agent Coordination
|
|
87
|
+
|
|
88
|
+
Orchestrates 18 specialized QE agents:
|
|
89
|
+
- Parallel execution for speed
|
|
90
|
+
- Sequential execution for dependencies
|
|
91
|
+
- Memory-based coordination via `aqe/*` namespace
|
|
92
|
+
- Smart batching to avoid OOM issues
|
|
93
|
+
|
|
94
|
+
### 📊 Quality Gates
|
|
95
|
+
|
|
96
|
+
Configurable gates per phase:
|
|
97
|
+
- Coverage thresholds
|
|
98
|
+
- Performance SLAs
|
|
99
|
+
- Security vulnerability limits
|
|
100
|
+
- Deployment readiness scores
|
|
101
|
+
|
|
102
|
+
### 🔄 Adaptive Strategies
|
|
103
|
+
|
|
104
|
+
Adapts testing approach based on:
|
|
105
|
+
- Code change risk analysis
|
|
106
|
+
- Historical failure patterns
|
|
107
|
+
- Resource constraints
|
|
108
|
+
- Time budgets
|
|
109
|
+
|
|
110
|
+
## Use Cases
|
|
111
|
+
|
|
112
|
+
### Microservices Pipeline
|
|
113
|
+
```javascript
|
|
114
|
+
// See resources/workflows/microservice-pipeline.md
|
|
115
|
+
// Fast feedback, contract testing, chaos engineering
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
### Monolith Pipeline
|
|
119
|
+
```javascript
|
|
120
|
+
// See resources/workflows/monolith-pipeline.md
|
|
121
|
+
// Smart test selection, comprehensive regression, DB migrations
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
### Mobile App Pipeline
|
|
125
|
+
```javascript
|
|
126
|
+
// See resources/workflows/mobile-pipeline.md
|
|
127
|
+
// Device testing, accessibility, localization, staged rollout
|
|
128
|
+
```
|
|
129
|
+
|
|
130
|
+
## Integration with AQE Fleet
|
|
131
|
+
|
|
132
|
+
### All 37 Skills Available
|
|
133
|
+
|
|
134
|
+
The orchestrator can invoke any of the 37 QE skills:
|
|
135
|
+
|
|
136
|
+
**Phase 1 Skills (18)**:
|
|
137
|
+
- Core Testing: agentic-quality-engineering, context-driven-testing, holistic-testing-pact
|
|
138
|
+
- Methodologies: tdd-london-chicago, xp-practices, risk-based-testing, test-automation-strategy
|
|
139
|
+
- Techniques: api-testing-patterns, exploratory-testing-advanced, performance-testing, security-testing
|
|
140
|
+
- Code Quality: code-review-quality, refactoring-patterns, quality-metrics
|
|
141
|
+
- Communication: bug-reporting-excellence, technical-writing, consultancy-practices
|
|
142
|
+
|
|
143
|
+
**Phase 2 Skills (16)**:
|
|
144
|
+
- Methodologies: regression-testing, shift-left-testing, shift-right-testing, test-design-techniques, mutation-testing, test-data-management
|
|
145
|
+
- Specialized: accessibility-testing, mobile-testing, database-testing, contract-testing, chaos-engineering-resilience, compatibility-testing, localization-testing, compliance-testing, visual-testing-advanced
|
|
146
|
+
- Infrastructure: test-environment-management, test-reporting-analytics
|
|
147
|
+
|
|
148
|
+
**Phase 3 Skills (3)**:
|
|
149
|
+
- Strategic: six-thinking-hats, brutal-honesty-review, cicd-pipeline-qe-orchestrator
|
|
150
|
+
|
|
151
|
+
### All 18 Agents Available
|
|
152
|
+
|
|
153
|
+
The orchestrator coordinates all 18 QE agents:
|
|
154
|
+
|
|
155
|
+
**Core Testing (5)**: test-generator, test-executor, coverage-analyzer, quality-gate, quality-analyzer
|
|
156
|
+
**Performance & Security (2)**: performance-tester, security-scanner
|
|
157
|
+
**Strategic Planning (3)**: requirements-validator, production-intelligence, fleet-commander
|
|
158
|
+
**Deployment (1)**: deployment-readiness
|
|
159
|
+
**Advanced Testing (4)**: regression-risk-analyzer, test-data-architect, api-contract-validator, flaky-test-hunter
|
|
160
|
+
**Specialized (2)**: visual-tester, chaos-engineer
|
|
161
|
+
|
|
162
|
+
## Example Workflows
|
|
163
|
+
|
|
164
|
+
### Example 1: New Feature Development
|
|
165
|
+
|
|
166
|
+
```javascript
|
|
167
|
+
// Commit phase
|
|
168
|
+
Task("TDD Tests", "Generate tests for new UserService.createUser()", "qe-test-generator")
|
|
169
|
+
Skill("tdd-london-chicago")
|
|
170
|
+
|
|
171
|
+
// Build phase
|
|
172
|
+
Task("Run Tests", "Execute full suite with coverage", "qe-test-executor")
|
|
173
|
+
Task("Coverage Check", "Analyze and report gaps", "qe-coverage-analyzer")
|
|
174
|
+
|
|
175
|
+
// Integration phase
|
|
176
|
+
Task("API Tests", "Validate new API contracts", "qe-api-contract-validator")
|
|
177
|
+
Task("Performance", "Load test new endpoint", "qe-performance-tester")
|
|
178
|
+
|
|
179
|
+
// Production
|
|
180
|
+
Task("Monitor", "Track new feature adoption", "qe-production-intelligence")
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
### Example 2: Hotfix Deployment
|
|
184
|
+
|
|
185
|
+
```javascript
|
|
186
|
+
// Commit phase (minimal)
|
|
187
|
+
Task("Risk Analysis", "Identify affected code", "qe-regression-risk-analyzer")
|
|
188
|
+
Task("Targeted Tests", "Run minimal test suite", "qe-test-executor")
|
|
189
|
+
|
|
190
|
+
// Build phase (fast)
|
|
191
|
+
Task("Smoke Tests", "Critical path validation", "qe-test-executor")
|
|
192
|
+
|
|
193
|
+
// Production (monitored)
|
|
194
|
+
Task("Canary Deploy", "Monitor hotfix rollout", "qe-production-intelligence")
|
|
195
|
+
```
|
|
196
|
+
|
|
197
|
+
### Example 3: Comprehensive Release
|
|
198
|
+
|
|
199
|
+
```javascript
|
|
200
|
+
// Full pipeline orchestration
|
|
201
|
+
Skill("cicd-pipeline-qe-orchestrator")
|
|
202
|
+
|
|
203
|
+
// The skill will guide you through:
|
|
204
|
+
// 1. Commit phase: shift-left testing, TDD
|
|
205
|
+
// 2. Build phase: full regression, mutation testing
|
|
206
|
+
// 3. Integration: API, performance, security testing
|
|
207
|
+
// 4. Staging: chaos testing, visual regression
|
|
208
|
+
// 5. Production: staged rollout, monitoring
|
|
209
|
+
```
|
|
210
|
+
|
|
211
|
+
## Configuration
|
|
212
|
+
|
|
213
|
+
### Pipeline Phase Mapping
|
|
214
|
+
|
|
215
|
+
Customize phase names to match your pipeline:
|
|
216
|
+
|
|
217
|
+
```javascript
|
|
218
|
+
const pipelinePhases = {
|
|
219
|
+
commit: "pre-merge",
|
|
220
|
+
build: "ci-build",
|
|
221
|
+
integration: "test-env",
|
|
222
|
+
staging: "uat",
|
|
223
|
+
production: "prod"
|
|
224
|
+
};
|
|
225
|
+
```
|
|
226
|
+
|
|
227
|
+
### Quality Gate Thresholds
|
|
228
|
+
|
|
229
|
+
Adjust thresholds per phase:
|
|
230
|
+
|
|
231
|
+
```json
|
|
232
|
+
{
|
|
233
|
+
"commit": { "coverage": 80, "max_violations": 0 },
|
|
234
|
+
"build": { "coverage": 90, "mutation_score": 70 },
|
|
235
|
+
"integration": { "p95_ms": 200, "security_critical": 0 },
|
|
236
|
+
"staging": { "readiness_score": 85 },
|
|
237
|
+
"production": { "error_rate": 0.001 }
|
|
238
|
+
}
|
|
239
|
+
```
|
|
240
|
+
|
|
241
|
+
## Best Practices
|
|
242
|
+
|
|
243
|
+
### 1. Start with Quick Start
|
|
244
|
+
```javascript
|
|
245
|
+
// Get recommendations first
|
|
246
|
+
Skill("cicd-pipeline-qe-orchestrator")
|
|
247
|
+
```
|
|
248
|
+
|
|
249
|
+
### 2. Adapt to Your Context
|
|
250
|
+
```javascript
|
|
251
|
+
// Use context-driven testing skill
|
|
252
|
+
Skill("context-driven-testing")
|
|
253
|
+
```
|
|
254
|
+
|
|
255
|
+
### 3. Measure and Improve
|
|
256
|
+
```javascript
|
|
257
|
+
// Track quality metrics
|
|
258
|
+
Task("Quality Trends", "Analyze quality improvements", "qe-quality-analyzer")
|
|
259
|
+
```
|
|
260
|
+
|
|
261
|
+
### 4. Learn from Production
|
|
262
|
+
```javascript
|
|
263
|
+
// Convert incidents to tests
|
|
264
|
+
Task("Production Intelligence", "Turn incidents into test scenarios", "qe-production-intelligence")
|
|
265
|
+
```
|
|
266
|
+
|
|
267
|
+
## Troubleshooting
|
|
268
|
+
|
|
269
|
+
See main SKILL.md [Troubleshooting section](SKILL.md#troubleshooting) for:
|
|
270
|
+
- Too many tests running (OOM)
|
|
271
|
+
- Pipeline takes too long
|
|
272
|
+
- Quality gates failing
|
|
273
|
+
|
|
274
|
+
## Resources
|
|
275
|
+
|
|
276
|
+
- [Main Skill Documentation](SKILL.md)
|
|
277
|
+
- [Microservice Pipeline Workflow](resources/workflows/microservice-pipeline.md)
|
|
278
|
+
- [Monolith Pipeline Workflow](resources/workflows/monolith-pipeline.md)
|
|
279
|
+
- [Mobile Pipeline Workflow](resources/workflows/mobile-pipeline.md)
|
|
280
|
+
- [All 37 QE Skills Reference](https://github.com/ruvnet/agentic-qe-cf/blob/main/docs/reference/skills.md)
|
|
281
|
+
- [All 18 QE Agents Reference](https://github.com/ruvnet/agentic-qe-cf/blob/main/docs/reference/agents.md)
|
|
282
|
+
|
|
283
|
+
## Contributing
|
|
284
|
+
|
|
285
|
+
To add new pipeline workflows:
|
|
286
|
+
1. Create workflow in `resources/workflows/[name]-pipeline.md`
|
|
287
|
+
2. Follow existing template structure
|
|
288
|
+
3. Include phase-by-phase breakdown
|
|
289
|
+
4. Add skill and agent selections
|
|
290
|
+
5. Define quality gates
|
|
291
|
+
6. Provide complete code examples
|
|
292
|
+
|
|
293
|
+
## License
|
|
294
|
+
|
|
295
|
+
Part of the Agentic QE Fleet - MIT License
|
|
296
|
+
|
|
297
|
+
---
|
|
298
|
+
|
|
299
|
+
**Created**: 2025-11-13
|
|
300
|
+
**Version**: 1.0.0
|
|
301
|
+
**Integrations**: 37 QE Skills, 18 QE Agents, All CI/CD platforms
|