codingbuddy-rules 1.0.0 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.ai-rules/adapters/claude-code.md +30 -0
- package/.ai-rules/adapters/codex.md +25 -0
- package/.ai-rules/adapters/cursor.md +23 -0
- package/.ai-rules/skills/README.md +112 -0
- package/.ai-rules/skills/brainstorming/SKILL.md +54 -0
- package/.ai-rules/skills/dispatching-parallel-agents/SKILL.md +180 -0
- package/.ai-rules/skills/executing-plans/SKILL.md +76 -0
- package/.ai-rules/skills/frontend-design/SKILL.md +42 -0
- package/.ai-rules/skills/subagent-driven-development/SKILL.md +240 -0
- package/.ai-rules/skills/systematic-debugging/SKILL.md +296 -0
- package/.ai-rules/skills/test-driven-development/SKILL.md +371 -0
- package/.ai-rules/skills/writing-plans/SKILL.md +116 -0
- package/package.json +1 -1
|
@@ -0,0 +1,371 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: test-driven-development
|
|
3
|
+
description: Use when implementing any feature or bugfix, before writing implementation code
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Test-Driven Development (TDD)
|
|
7
|
+
|
|
8
|
+
## Overview
|
|
9
|
+
|
|
10
|
+
Write the test first. Watch it fail. Write minimal code to pass.
|
|
11
|
+
|
|
12
|
+
**Core principle:** If you didn't watch the test fail, you don't know if it tests the right thing.
|
|
13
|
+
|
|
14
|
+
**Violating the letter of the rules is violating the spirit of the rules.**
|
|
15
|
+
|
|
16
|
+
## When to Use
|
|
17
|
+
|
|
18
|
+
**Always:**
|
|
19
|
+
- New features
|
|
20
|
+
- Bug fixes
|
|
21
|
+
- Refactoring
|
|
22
|
+
- Behavior changes
|
|
23
|
+
|
|
24
|
+
**Exceptions (ask your human partner):**
|
|
25
|
+
- Throwaway prototypes
|
|
26
|
+
- Generated code
|
|
27
|
+
- Configuration files
|
|
28
|
+
|
|
29
|
+
Thinking "skip TDD just this once"? Stop. That's rationalization.
|
|
30
|
+
|
|
31
|
+
## The Iron Law
|
|
32
|
+
|
|
33
|
+
```
|
|
34
|
+
NO PRODUCTION CODE WITHOUT A FAILING TEST FIRST
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
Write code before the test? Delete it. Start over.
|
|
38
|
+
|
|
39
|
+
**No exceptions:**
|
|
40
|
+
- Don't keep it as "reference"
|
|
41
|
+
- Don't "adapt" it while writing tests
|
|
42
|
+
- Don't look at it
|
|
43
|
+
- Delete means delete
|
|
44
|
+
|
|
45
|
+
Implement fresh from tests. Period.
|
|
46
|
+
|
|
47
|
+
## Red-Green-Refactor
|
|
48
|
+
|
|
49
|
+
```dot
|
|
50
|
+
digraph tdd_cycle {
|
|
51
|
+
rankdir=LR;
|
|
52
|
+
red [label="RED\nWrite failing test", shape=box, style=filled, fillcolor="#ffcccc"];
|
|
53
|
+
verify_red [label="Verify fails\ncorrectly", shape=diamond];
|
|
54
|
+
green [label="GREEN\nMinimal code", shape=box, style=filled, fillcolor="#ccffcc"];
|
|
55
|
+
verify_green [label="Verify passes\nAll green", shape=diamond];
|
|
56
|
+
refactor [label="REFACTOR\nClean up", shape=box, style=filled, fillcolor="#ccccff"];
|
|
57
|
+
next [label="Next", shape=ellipse];
|
|
58
|
+
|
|
59
|
+
red -> verify_red;
|
|
60
|
+
verify_red -> green [label="yes"];
|
|
61
|
+
verify_red -> red [label="wrong\nfailure"];
|
|
62
|
+
green -> verify_green;
|
|
63
|
+
verify_green -> refactor [label="yes"];
|
|
64
|
+
verify_green -> green [label="no"];
|
|
65
|
+
refactor -> verify_green [label="stay\ngreen"];
|
|
66
|
+
verify_green -> next;
|
|
67
|
+
next -> red;
|
|
68
|
+
}
|
|
69
|
+
```
|
|
70
|
+
|
|
71
|
+
### RED - Write Failing Test
|
|
72
|
+
|
|
73
|
+
Write one minimal test showing what should happen.
|
|
74
|
+
|
|
75
|
+
<Good>
|
|
76
|
+
```typescript
|
|
77
|
+
test('retries failed operations 3 times', async () => {
|
|
78
|
+
let attempts = 0;
|
|
79
|
+
const operation = () => {
|
|
80
|
+
attempts++;
|
|
81
|
+
if (attempts < 3) throw new Error('fail');
|
|
82
|
+
return 'success';
|
|
83
|
+
};
|
|
84
|
+
|
|
85
|
+
const result = await retryOperation(operation);
|
|
86
|
+
|
|
87
|
+
expect(result).toBe('success');
|
|
88
|
+
expect(attempts).toBe(3);
|
|
89
|
+
});
|
|
90
|
+
```
|
|
91
|
+
Clear name, tests real behavior, one thing
|
|
92
|
+
</Good>
|
|
93
|
+
|
|
94
|
+
<Bad>
|
|
95
|
+
```typescript
|
|
96
|
+
test('retry works', async () => {
|
|
97
|
+
const mock = jest.fn()
|
|
98
|
+
.mockRejectedValueOnce(new Error())
|
|
99
|
+
.mockRejectedValueOnce(new Error())
|
|
100
|
+
.mockResolvedValueOnce('success');
|
|
101
|
+
await retryOperation(mock);
|
|
102
|
+
expect(mock).toHaveBeenCalledTimes(3);
|
|
103
|
+
});
|
|
104
|
+
```
|
|
105
|
+
Vague name, tests mock not code
|
|
106
|
+
</Bad>
|
|
107
|
+
|
|
108
|
+
**Requirements:**
|
|
109
|
+
- One behavior
|
|
110
|
+
- Clear name
|
|
111
|
+
- Real code (no mocks unless unavoidable)
|
|
112
|
+
|
|
113
|
+
### Verify RED - Watch It Fail
|
|
114
|
+
|
|
115
|
+
**MANDATORY. Never skip.**
|
|
116
|
+
|
|
117
|
+
```bash
|
|
118
|
+
npm test path/to/test.test.ts
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
Confirm:
|
|
122
|
+
- Test fails (not errors)
|
|
123
|
+
- Failure message is expected
|
|
124
|
+
- Fails because feature missing (not typos)
|
|
125
|
+
|
|
126
|
+
**Test passes?** You're testing existing behavior. Fix test.
|
|
127
|
+
|
|
128
|
+
**Test errors?** Fix error, re-run until it fails correctly.
|
|
129
|
+
|
|
130
|
+
### GREEN - Minimal Code
|
|
131
|
+
|
|
132
|
+
Write simplest code to pass the test.
|
|
133
|
+
|
|
134
|
+
<Good>
|
|
135
|
+
```typescript
|
|
136
|
+
async function retryOperation<T>(fn: () => Promise<T>): Promise<T> {
|
|
137
|
+
for (let i = 0; i < 3; i++) {
|
|
138
|
+
try {
|
|
139
|
+
return await fn();
|
|
140
|
+
} catch (e) {
|
|
141
|
+
if (i === 2) throw e;
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
throw new Error('unreachable');
|
|
145
|
+
}
|
|
146
|
+
```
|
|
147
|
+
Just enough to pass
|
|
148
|
+
</Good>
|
|
149
|
+
|
|
150
|
+
<Bad>
|
|
151
|
+
```typescript
|
|
152
|
+
async function retryOperation<T>(
|
|
153
|
+
fn: () => Promise<T>,
|
|
154
|
+
options?: {
|
|
155
|
+
maxRetries?: number;
|
|
156
|
+
backoff?: 'linear' | 'exponential';
|
|
157
|
+
onRetry?: (attempt: number) => void;
|
|
158
|
+
}
|
|
159
|
+
): Promise<T> {
|
|
160
|
+
// YAGNI
|
|
161
|
+
}
|
|
162
|
+
```
|
|
163
|
+
Over-engineered
|
|
164
|
+
</Bad>
|
|
165
|
+
|
|
166
|
+
Don't add features, refactor other code, or "improve" beyond the test.
|
|
167
|
+
|
|
168
|
+
### Verify GREEN - Watch It Pass
|
|
169
|
+
|
|
170
|
+
**MANDATORY.**
|
|
171
|
+
|
|
172
|
+
```bash
|
|
173
|
+
npm test path/to/test.test.ts
|
|
174
|
+
```
|
|
175
|
+
|
|
176
|
+
Confirm:
|
|
177
|
+
- Test passes
|
|
178
|
+
- Other tests still pass
|
|
179
|
+
- Output pristine (no errors, warnings)
|
|
180
|
+
|
|
181
|
+
**Test fails?** Fix code, not test.
|
|
182
|
+
|
|
183
|
+
**Other tests fail?** Fix now.
|
|
184
|
+
|
|
185
|
+
### REFACTOR - Clean Up
|
|
186
|
+
|
|
187
|
+
After green only:
|
|
188
|
+
- Remove duplication
|
|
189
|
+
- Improve names
|
|
190
|
+
- Extract helpers
|
|
191
|
+
|
|
192
|
+
Keep tests green. Don't add behavior.
|
|
193
|
+
|
|
194
|
+
### Repeat
|
|
195
|
+
|
|
196
|
+
Next failing test for next feature.
|
|
197
|
+
|
|
198
|
+
## Good Tests
|
|
199
|
+
|
|
200
|
+
| Quality | Good | Bad |
|
|
201
|
+
|---------|------|-----|
|
|
202
|
+
| **Minimal** | One thing. "and" in name? Split it. | `test('validates email and domain and whitespace')` |
|
|
203
|
+
| **Clear** | Name describes behavior | `test('test1')` |
|
|
204
|
+
| **Shows intent** | Demonstrates desired API | Obscures what code should do |
|
|
205
|
+
|
|
206
|
+
## Why Order Matters
|
|
207
|
+
|
|
208
|
+
**"I'll write tests after to verify it works"**
|
|
209
|
+
|
|
210
|
+
Tests written after code pass immediately. Passing immediately proves nothing:
|
|
211
|
+
- Might test wrong thing
|
|
212
|
+
- Might test implementation, not behavior
|
|
213
|
+
- Might miss edge cases you forgot
|
|
214
|
+
- You never saw it catch the bug
|
|
215
|
+
|
|
216
|
+
Test-first forces you to see the test fail, proving it actually tests something.
|
|
217
|
+
|
|
218
|
+
**"I already manually tested all the edge cases"**
|
|
219
|
+
|
|
220
|
+
Manual testing is ad-hoc. You think you tested everything but:
|
|
221
|
+
- No record of what you tested
|
|
222
|
+
- Can't re-run when code changes
|
|
223
|
+
- Easy to forget cases under pressure
|
|
224
|
+
- "It worked when I tried it" ≠ comprehensive
|
|
225
|
+
|
|
226
|
+
Automated tests are systematic. They run the same way every time.
|
|
227
|
+
|
|
228
|
+
**"Deleting X hours of work is wasteful"**
|
|
229
|
+
|
|
230
|
+
Sunk cost fallacy. The time is already gone. Your choice now:
|
|
231
|
+
- Delete and rewrite with TDD (X more hours, high confidence)
|
|
232
|
+
- Keep it and add tests after (30 min, low confidence, likely bugs)
|
|
233
|
+
|
|
234
|
+
The "waste" is keeping code you can't trust. Working code without real tests is technical debt.
|
|
235
|
+
|
|
236
|
+
**"TDD is dogmatic, being pragmatic means adapting"**
|
|
237
|
+
|
|
238
|
+
TDD IS pragmatic:
|
|
239
|
+
- Finds bugs before commit (faster than debugging after)
|
|
240
|
+
- Prevents regressions (tests catch breaks immediately)
|
|
241
|
+
- Documents behavior (tests show how to use code)
|
|
242
|
+
- Enables refactoring (change freely, tests catch breaks)
|
|
243
|
+
|
|
244
|
+
"Pragmatic" shortcuts = debugging in production = slower.
|
|
245
|
+
|
|
246
|
+
**"Tests after achieve the same goals - it's spirit not ritual"**
|
|
247
|
+
|
|
248
|
+
No. Tests-after answer "What does this do?" Tests-first answer "What should this do?"
|
|
249
|
+
|
|
250
|
+
Tests-after are biased by your implementation. You test what you built, not what's required. You verify remembered edge cases, not discovered ones.
|
|
251
|
+
|
|
252
|
+
Tests-first force edge case discovery before implementing. Tests-after verify you remembered everything (you didn't).
|
|
253
|
+
|
|
254
|
+
30 minutes of tests after ≠ TDD. You get coverage, lose proof tests work.
|
|
255
|
+
|
|
256
|
+
## Common Rationalizations
|
|
257
|
+
|
|
258
|
+
| Excuse | Reality |
|
|
259
|
+
|--------|---------|
|
|
260
|
+
| "Too simple to test" | Simple code breaks. Test takes 30 seconds. |
|
|
261
|
+
| "I'll test after" | Tests passing immediately prove nothing. |
|
|
262
|
+
| "Tests after achieve same goals" | Tests-after = "what does this do?" Tests-first = "what should this do?" |
|
|
263
|
+
| "Already manually tested" | Ad-hoc ≠ systematic. No record, can't re-run. |
|
|
264
|
+
| "Deleting X hours is wasteful" | Sunk cost fallacy. Keeping unverified code is technical debt. |
|
|
265
|
+
| "Keep as reference, write tests first" | You'll adapt it. That's testing after. Delete means delete. |
|
|
266
|
+
| "Need to explore first" | Fine. Throw away exploration, start with TDD. |
|
|
267
|
+
| "Test hard = design unclear" | Listen to test. Hard to test = hard to use. |
|
|
268
|
+
| "TDD will slow me down" | TDD faster than debugging. Pragmatic = test-first. |
|
|
269
|
+
| "Manual test faster" | Manual doesn't prove edge cases. You'll re-test every change. |
|
|
270
|
+
| "Existing code has no tests" | You're improving it. Add tests for existing code. |
|
|
271
|
+
|
|
272
|
+
## Red Flags - STOP and Start Over
|
|
273
|
+
|
|
274
|
+
- Code before test
|
|
275
|
+
- Test after implementation
|
|
276
|
+
- Test passes immediately
|
|
277
|
+
- Can't explain why test failed
|
|
278
|
+
- Tests added "later"
|
|
279
|
+
- Rationalizing "just this once"
|
|
280
|
+
- "I already manually tested it"
|
|
281
|
+
- "Tests after achieve the same purpose"
|
|
282
|
+
- "It's about spirit not ritual"
|
|
283
|
+
- "Keep as reference" or "adapt existing code"
|
|
284
|
+
- "Already spent X hours, deleting is wasteful"
|
|
285
|
+
- "TDD is dogmatic, I'm being pragmatic"
|
|
286
|
+
- "This is different because..."
|
|
287
|
+
|
|
288
|
+
**All of these mean: Delete code. Start over with TDD.**
|
|
289
|
+
|
|
290
|
+
## Example: Bug Fix
|
|
291
|
+
|
|
292
|
+
**Bug:** Empty email accepted
|
|
293
|
+
|
|
294
|
+
**RED**
|
|
295
|
+
```typescript
|
|
296
|
+
test('rejects empty email', async () => {
|
|
297
|
+
const result = await submitForm({ email: '' });
|
|
298
|
+
expect(result.error).toBe('Email required');
|
|
299
|
+
});
|
|
300
|
+
```
|
|
301
|
+
|
|
302
|
+
**Verify RED**
|
|
303
|
+
```bash
|
|
304
|
+
$ npm test
|
|
305
|
+
FAIL: expected 'Email required', got undefined
|
|
306
|
+
```
|
|
307
|
+
|
|
308
|
+
**GREEN**
|
|
309
|
+
```typescript
|
|
310
|
+
function submitForm(data: FormData) {
|
|
311
|
+
if (!data.email?.trim()) {
|
|
312
|
+
return { error: 'Email required' };
|
|
313
|
+
}
|
|
314
|
+
// ...
|
|
315
|
+
}
|
|
316
|
+
```
|
|
317
|
+
|
|
318
|
+
**Verify GREEN**
|
|
319
|
+
```bash
|
|
320
|
+
$ npm test
|
|
321
|
+
PASS
|
|
322
|
+
```
|
|
323
|
+
|
|
324
|
+
**REFACTOR**
|
|
325
|
+
Extract validation for multiple fields if needed.
|
|
326
|
+
|
|
327
|
+
## Verification Checklist
|
|
328
|
+
|
|
329
|
+
Before marking work complete:
|
|
330
|
+
|
|
331
|
+
- [ ] Every new function/method has a test
|
|
332
|
+
- [ ] Watched each test fail before implementing
|
|
333
|
+
- [ ] Each test failed for expected reason (feature missing, not typo)
|
|
334
|
+
- [ ] Wrote minimal code to pass each test
|
|
335
|
+
- [ ] All tests pass
|
|
336
|
+
- [ ] Output pristine (no errors, warnings)
|
|
337
|
+
- [ ] Tests use real code (mocks only if unavoidable)
|
|
338
|
+
- [ ] Edge cases and errors covered
|
|
339
|
+
|
|
340
|
+
Can't check all boxes? You skipped TDD. Start over.
|
|
341
|
+
|
|
342
|
+
## When Stuck
|
|
343
|
+
|
|
344
|
+
| Problem | Solution |
|
|
345
|
+
|---------|----------|
|
|
346
|
+
| Don't know how to test | Write wished-for API. Write assertion first. Ask your human partner. |
|
|
347
|
+
| Test too complicated | Design too complicated. Simplify interface. |
|
|
348
|
+
| Must mock everything | Code too coupled. Use dependency injection. |
|
|
349
|
+
| Test setup huge | Extract helpers. Still complex? Simplify design. |
|
|
350
|
+
|
|
351
|
+
## Debugging Integration
|
|
352
|
+
|
|
353
|
+
Bug found? Write failing test reproducing it. Follow TDD cycle. Test proves fix and prevents regression.
|
|
354
|
+
|
|
355
|
+
Never fix bugs without a test.
|
|
356
|
+
|
|
357
|
+
## Testing Anti-Patterns
|
|
358
|
+
|
|
359
|
+
When adding mocks or test utilities, read @testing-anti-patterns.md to avoid common pitfalls:
|
|
360
|
+
- Testing mock behavior instead of real behavior
|
|
361
|
+
- Adding test-only methods to production classes
|
|
362
|
+
- Mocking without understanding dependencies
|
|
363
|
+
|
|
364
|
+
## Final Rule
|
|
365
|
+
|
|
366
|
+
```
|
|
367
|
+
Production code → test exists and failed first
|
|
368
|
+
Otherwise → not TDD
|
|
369
|
+
```
|
|
370
|
+
|
|
371
|
+
No exceptions without your human partner's permission.
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: writing-plans
|
|
3
|
+
description: Use when you have a spec or requirements for a multi-step task, before touching code
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Writing Plans
|
|
7
|
+
|
|
8
|
+
## Overview
|
|
9
|
+
|
|
10
|
+
Write comprehensive implementation plans assuming the engineer has zero context for our codebase and questionable taste. Document everything they need to know: which files to touch for each task, code, testing, docs they might need to check, how to test it. Give them the whole plan as bite-sized tasks. DRY. YAGNI. TDD. Frequent commits.
|
|
11
|
+
|
|
12
|
+
Assume they are a skilled developer, but know almost nothing about our toolset or problem domain. Assume they don't know good test design very well.
|
|
13
|
+
|
|
14
|
+
**Announce at start:** "I'm using the writing-plans skill to create the implementation plan."
|
|
15
|
+
|
|
16
|
+
**Context:** This should be run in a dedicated worktree (created by brainstorming skill).
|
|
17
|
+
|
|
18
|
+
**Save plans to:** `docs/plans/YYYY-MM-DD-<feature-name>.md`
|
|
19
|
+
|
|
20
|
+
## Bite-Sized Task Granularity
|
|
21
|
+
|
|
22
|
+
**Each step is one action (2-5 minutes):**
|
|
23
|
+
- "Write the failing test" - step
|
|
24
|
+
- "Run it to make sure it fails" - step
|
|
25
|
+
- "Implement the minimal code to make the test pass" - step
|
|
26
|
+
- "Run the tests and make sure they pass" - step
|
|
27
|
+
- "Commit" - step
|
|
28
|
+
|
|
29
|
+
## Plan Document Header
|
|
30
|
+
|
|
31
|
+
**Every plan MUST start with this header:**
|
|
32
|
+
|
|
33
|
+
```markdown
|
|
34
|
+
# [Feature Name] Implementation Plan
|
|
35
|
+
|
|
36
|
+
> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task.
|
|
37
|
+
|
|
38
|
+
**Goal:** [One sentence describing what this builds]
|
|
39
|
+
|
|
40
|
+
**Architecture:** [2-3 sentences about approach]
|
|
41
|
+
|
|
42
|
+
**Tech Stack:** [Key technologies/libraries]
|
|
43
|
+
|
|
44
|
+
---
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
## Task Structure
|
|
48
|
+
|
|
49
|
+
```markdown
|
|
50
|
+
### Task N: [Component Name]
|
|
51
|
+
|
|
52
|
+
**Files:**
|
|
53
|
+
- Create: `exact/path/to/file.py`
|
|
54
|
+
- Modify: `exact/path/to/existing.py:123-145`
|
|
55
|
+
- Test: `tests/exact/path/to/test.py`
|
|
56
|
+
|
|
57
|
+
**Step 1: Write the failing test**
|
|
58
|
+
|
|
59
|
+
```python
|
|
60
|
+
def test_specific_behavior():
|
|
61
|
+
result = function(input)
|
|
62
|
+
assert result == expected
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
**Step 2: Run test to verify it fails**
|
|
66
|
+
|
|
67
|
+
Run: `pytest tests/path/test.py::test_name -v`
|
|
68
|
+
Expected: FAIL with "function not defined"
|
|
69
|
+
|
|
70
|
+
**Step 3: Write minimal implementation**
|
|
71
|
+
|
|
72
|
+
```python
|
|
73
|
+
def function(input):
|
|
74
|
+
return expected
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
**Step 4: Run test to verify it passes**
|
|
78
|
+
|
|
79
|
+
Run: `pytest tests/path/test.py::test_name -v`
|
|
80
|
+
Expected: PASS
|
|
81
|
+
|
|
82
|
+
**Step 5: Commit**
|
|
83
|
+
|
|
84
|
+
```bash
|
|
85
|
+
git add tests/path/test.py src/path/file.py
|
|
86
|
+
git commit -m "feat: add specific feature"
|
|
87
|
+
```
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
## Remember
|
|
91
|
+
- Exact file paths always
|
|
92
|
+
- Complete code in plan (not "add validation")
|
|
93
|
+
- Exact commands with expected output
|
|
94
|
+
- Reference relevant skills with @ syntax
|
|
95
|
+
- DRY, YAGNI, TDD, frequent commits
|
|
96
|
+
|
|
97
|
+
## Execution Handoff
|
|
98
|
+
|
|
99
|
+
After saving the plan, offer execution choice:
|
|
100
|
+
|
|
101
|
+
**"Plan complete and saved to `docs/plans/<filename>.md`. Two execution options:**
|
|
102
|
+
|
|
103
|
+
**1. Subagent-Driven (this session)** - I dispatch fresh subagent per task, review between tasks, fast iteration
|
|
104
|
+
|
|
105
|
+
**2. Parallel Session (separate)** - Open new session with executing-plans, batch execution with checkpoints
|
|
106
|
+
|
|
107
|
+
**Which approach?"**
|
|
108
|
+
|
|
109
|
+
**If Subagent-Driven chosen:**
|
|
110
|
+
- **REQUIRED SUB-SKILL:** Use superpowers:subagent-driven-development
|
|
111
|
+
- Stay in this session
|
|
112
|
+
- Fresh subagent per task + code review
|
|
113
|
+
|
|
114
|
+
**If Parallel Session chosen:**
|
|
115
|
+
- Guide them to open new session in worktree
|
|
116
|
+
- **REQUIRED SUB-SKILL:** New session uses superpowers:executing-plans
|