cc-discipline 2.0.0 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/init.sh CHANGED
@@ -355,9 +355,13 @@ echo -e "${GREEN}Installing skills...${NC}"
355
355
  cp -r "$SCRIPT_DIR/templates/.claude/skills/commit" .claude/skills/
356
356
  cp -r "$SCRIPT_DIR/templates/.claude/skills/self-check" .claude/skills/
357
357
  cp -r "$SCRIPT_DIR/templates/.claude/skills/evaluate" .claude/skills/
358
+ cp -r "$SCRIPT_DIR/templates/.claude/skills/think" .claude/skills/
359
+ cp -r "$SCRIPT_DIR/templates/.claude/skills/retro" .claude/skills/
358
360
  echo " ✓ /commit — smart commit (test → update memory → commit)"
359
361
  echo " ✓ /self-check — periodic discipline check (use with /loop 10m /self-check)"
360
362
  echo " ✓ /evaluate — evaluate external review/advice against codebase context"
363
+ echo " ✓ /think — stop and think before coding (ask → propose → wait)"
364
+ echo " ✓ /retro — post-task retrospective (project + framework feedback)"
361
365
 
362
366
  # ─── Handle CLAUDE.md ───
363
367
  if [ ! -f "CLAUDE.md" ]; then
@@ -437,6 +441,8 @@ if [ "$INSTALL_MODE" = "fresh" ]; then
437
441
  echo -e " ${GREEN}.claude/skills/commit/${NC} ← /commit smart commit"
438
442
  echo -e " ${GREEN}.claude/skills/self-check/${NC} ← /self-check periodic discipline check"
439
443
  echo -e " ${GREEN}.claude/skills/evaluate/${NC} ← /evaluate assess external review advice"
444
+ echo -e " ${GREEN}.claude/skills/think/${NC} ← /think stop and think before coding"
445
+ echo -e " ${GREEN}.claude/skills/retro/${NC} ← /retro post-task retrospective"
440
446
  echo -e " ${GREEN}.claude/settings.json${NC} ← Hooks configuration"
441
447
  echo -e " ${GREEN}docs/progress.md${NC} ← Progress log (maintained by Claude)"
442
448
  echo -e " ${GREEN}docs/debug-log.md${NC} ← Debug log (maintained by Claude)"
@@ -455,6 +461,8 @@ else
455
461
  echo -e " ${GREEN}.claude/skills/commit/${NC} ← /commit skill installed/updated"
456
462
  echo -e " ${GREEN}.claude/skills/self-check/${NC} ← /self-check discipline check installed"
457
463
  echo -e " ${GREEN}.claude/skills/evaluate/${NC} ← /evaluate external review assessment"
464
+ echo -e " ${GREEN}.claude/skills/think/${NC} ← /think stop and think before coding"
465
+ echo -e " ${GREEN}.claude/skills/retro/${NC} ← /retro post-task retrospective"
458
466
  if [ ! -f "$BACKUP_DIR/settings.json" ] || [ -f ".claude/.cc-discipline-settings-template.json" ]; then
459
467
  echo -e " ${YELLOW}.claude/settings.json${NC} ← See notes above"
460
468
  else
package/lib/doctor.sh CHANGED
@@ -95,7 +95,7 @@ done
95
95
  # 6. Skills
96
96
  echo ""
97
97
  echo "Skills:"
98
- for skill in commit self-check evaluate; do
98
+ for skill in commit self-check evaluate think retro; do
99
99
  if [ -d ".claude/skills/${skill}" ]; then
100
100
  ok "/${skill}"
101
101
  else
package/lib/status.sh CHANGED
@@ -68,8 +68,10 @@ SKILLS=""
68
68
  [ -d ".claude/skills/commit" ] && SKILLS="${SKILLS}/commit "
69
69
  [ -d ".claude/skills/self-check" ] && SKILLS="${SKILLS}/self-check "
70
70
  [ -d ".claude/skills/evaluate" ] && SKILLS="${SKILLS}/evaluate "
71
+ [ -d ".claude/skills/think" ] && SKILLS="${SKILLS}/think "
72
+ [ -d ".claude/skills/retro" ] && SKILLS="${SKILLS}/retro "
71
73
  SKILL_COUNT=$(echo "$SKILLS" | wc -w | tr -d ' ')
72
- echo -e "${GREEN}${SKILL_COUNT}/3${NC} (${SKILLS% })"
74
+ echo -e "${GREEN}${SKILL_COUNT}/5${NC} (${SKILLS% })"
73
75
 
74
76
  # Settings
75
77
  echo -n "Settings: "
package/package.json CHANGED
@@ -1,9 +1,9 @@
1
1
  {
2
2
  "name": "cc-discipline",
3
- "version": "2.0.0",
3
+ "version": "2.1.0",
4
4
  "description": "Discipline framework for Claude Code — rules, hooks, and agents that keep AI on track",
5
5
  "bin": {
6
- "cc-discipline": "./bin/cli.sh"
6
+ "cc-discipline": "bin/cli.sh"
7
7
  },
8
8
  "files": [
9
9
  "bin/",
@@ -28,7 +28,7 @@
28
28
  "license": "MIT",
29
29
  "repository": {
30
30
  "type": "git",
31
- "url": "https://github.com/techhu/cc-discipline"
31
+ "url": "git+https://github.com/techhu/cc-discipline.git"
32
32
  },
33
33
  "homepage": "https://github.com/techhu/cc-discipline#readme"
34
34
  }
@@ -21,7 +21,7 @@ echo "$COUNT" > "$COUNT_FILE"
21
21
 
22
22
  if [ $((COUNT % THRESHOLD)) -eq 0 ]; then
23
23
  cat <<JSONEOF
24
- {"hookSpecificOutput":{"hookEventName":"PreToolUse","additionalContext":"AUTO SELF-CHECK (#${COUNT} actions): Pause and verify: (1) Am I still serving the user's original request, or have I drifted? (2) Am I fixing the same thing repeatedly (mole-whacking)? (3) Have I claimed anything as 'verified' without actually running it? (4) Am I making changes the user didn't ask for? (5) Have I updated docs/progress.md with current status and completed milestones? If progress.md is stale, update it NOW before continuing. If ANY answer is concerning, STOP and report to the user before continuing."}}
24
+ {"hookSpecificOutput":{"hookEventName":"PreToolUse","additionalContext":"AUTO SELF-CHECK (#${COUNT} actions): Pause and verify: (1) Am I still serving the user's original request, or have I drifted? (2) Am I fixing the same thing repeatedly (mole-whacking)? (3) Have I claimed anything as 'verified' without actually running it? (4) Am I making changes the user didn't ask for? (5) Have I updated docs/progress.md with current status and completed milestones? If progress.md is stale, update it NOW before continuing. (6) Plan fidelity: if executing a plan, compare what the current step asked for vs what I actually delivered — am I cutting corners or simplifying the intent? Check the acceptance criteria. (7) Friction check: did any hook/rule get in the way or miss something since last check? If so, note it in one line for /retro later. If ANY answer is concerning, STOP and report to the user before continuing."}}
25
25
  JSONEOF
26
26
  fi
27
27
 
@@ -0,0 +1,40 @@
1
+ ---
2
+ name: retro
3
+ description: Find friction, remove friction. Quick post-task review that makes this project's workflow smoother and feeds improvements back to cc-discipline.
4
+ ---
5
+
6
+ Find friction. Remove friction. That's it.
7
+
8
+ ## What to do
9
+
10
+ Quickly scan what just happened — `git log --oneline -10` and any hook triggers you remember. Then output **only friction and insights**, in this format:
11
+
12
+ ```
13
+ RETRO — [date]
14
+
15
+ Friction:
16
+ - [what got in the way] → fix: [specific actionable change]
17
+
18
+ Insights:
19
+ - [something learned that should survive this session]
20
+
21
+ Framework:
22
+ - [cc-discipline improvement, if any — skip if none]
23
+ ```
24
+
25
+ Rules:
26
+ - **Only friction** — Don't list what went well. Smooth things don't need attention.
27
+ - **Only actionable** — Every friction item must have a "→ fix:" with a concrete change (adjust a threshold, add to CLAUDE.md, update memory, exempt a path).
28
+ - **Only new** — Don't repeat friction that's already been addressed or recorded in memory.
29
+ - **Be specific** — "streak-breaker was annoying" is not useful. "streak-breaker triggered 3x on config.yaml during template fill → fix: add config.yaml to docs/ exempt path, or raise config threshold to 10" is useful.
30
+ - **Framework items are rare** — Most friction is project-specific. Only flag framework issues if the same problem would hit other projects too.
31
+ - **Keep it short** — 3-5 items max. If you can't find friction, say "no friction found" and move on. An empty retro is a good sign.
32
+
33
+ ## After output
34
+
35
+ Present the items. User decides:
36
+ - "fix it" → apply the changes
37
+ - "remember it" → write to memory via /commit
38
+ - "skip" → move on
39
+
40
+ Do not auto-apply. Do not pad. Do not turn this into a report.
@@ -0,0 +1,104 @@
1
+ ---
2
+ name: think
3
+ description: Stop and think before coding. Read context, ask questions, propose approaches, self-review, wait for user choice. Use when starting any non-trivial task.
4
+ ---
5
+
6
+ You are about to start a task. **Do NOT write any code yet.** Your job right now is to think clearly and align with the user before acting.
7
+
8
+ ## Step 0: Read first
9
+
10
+ Before asking anything, read the relevant code and docs. Spend 1-2 minutes understanding what already exists:
11
+ - Read files that will be affected
12
+ - Check existing patterns, conventions, and architecture
13
+ - Look at recent commits for context on current direction
14
+
15
+ This turns your questions from generic ("what framework?") into informed ("I see you're using X pattern in module Y — should the new feature follow the same pattern, or is there a reason to diverge?").
16
+
17
+ **Skip this step only if the task is purely conceptual with no existing code.**
18
+
19
+ ## Step 1: Gauge complexity
20
+
21
+ Before diving deep, assess the task size:
22
+
23
+ - **Small** (rename, fix typo, add a config field): Skip to Step 4 — just state what you'll do and do it. Don't over-process trivial work.
24
+ - **Medium** (new feature, refactor a module, fix a non-obvious bug): Run Steps 2-5 normally.
25
+ - **Large** (crosses multiple subsystems, changes architecture, affects many files): Flag it. Propose decomposition into smaller tasks before designing a monolith solution.
26
+
27
+ State your assessment: "This looks [small/medium/large] because [reason]."
28
+
29
+ ## Step 2: Understand
30
+
31
+ Ask 2-3 clarifying questions about the task. Focus on:
32
+ - What exactly should change? (scope)
33
+ - What should NOT change? (boundaries)
34
+ - How will we know it's done? (success criteria)
35
+
36
+ Rules:
37
+ - One message, all questions at once (don't drip-feed)
38
+ - Prefer multiple-choice over open-ended when possible
39
+ - Reference what you learned in Step 0 — ask about gaps in your understanding, not things you can read yourself
40
+ - If the task is already crystal clear from Step 0, say so and skip to Step 3
41
+
42
+ ## Step 3: Propose
43
+
44
+ After the user answers (or if you skipped Step 2), propose **2-3 approaches**:
45
+
46
+ ```
47
+ **Approach A: [name]**
48
+ - How: [1-2 sentences]
49
+ - Pros: ...
50
+ - Cons: ...
51
+ - Steps:
52
+ 1. [step] — done when: [acceptance criteria]
53
+ 2. [step] — done when: [acceptance criteria]
54
+
55
+ **Approach B: [name]**
56
+ - How: [1-2 sentences]
57
+ - Pros: ...
58
+ - Cons: ...
59
+ - Steps:
60
+ 1. [step] — done when: [acceptance criteria]
61
+ 2. [step] — done when: [acceptance criteria]
62
+
63
+ **Recommendation: [A/B/C] because [reason]**
64
+ ```
65
+
66
+ Rules:
67
+ - Approaches must be genuinely different, not trivial variations
68
+ - If there's truly only one reasonable approach, say so — don't invent fake alternatives
69
+ - Include effort estimate for each (small / medium / large)
70
+ - Follow existing codebase patterns unless there's a strong reason not to
71
+ - Flag risks or unknowns you've spotted
72
+ - **Every step must have acceptance criteria** — "done when" must be observable and verifiable, not vague. Bad: "done when refactored". Good: "done when 3 methods extracted, each ≤20 lines, all tests pass"
73
+
74
+ ## Step 4: Self-review
75
+
76
+ Before presenting to the user, challenge your own proposals:
77
+
78
+ - Am I overcomplicating this? Is there a simpler way I haven't considered?
79
+ - Am I solving the right problem, or a problem I invented?
80
+ - Does this match existing patterns in the codebase, or am I introducing unnecessary novelty?
81
+ - Did I miss an obvious approach that someone more familiar with this codebase would see?
82
+ - Would I be embarrassed by this proposal after reading more code?
83
+
84
+ If self-review reveals issues, revise your proposals before presenting. Don't show the user your first draft if you can already see it's flawed.
85
+
86
+ ## Step 5: Wait
87
+
88
+ **Stop here. Do not proceed until the user picks an approach or gives a green light.**
89
+
90
+ Do not:
91
+ - Start coding "while waiting"
92
+ - Create files or scaffolding
93
+ - Run exploratory commands beyond what's needed for Step 0-3
94
+ - Say "I'll go ahead and start with Approach A" — that's the user's call
95
+
96
+ ## When this skill ends
97
+
98
+ When the user confirms an approach, transition to implementation. You now have:
99
+ - Clear scope and boundaries from Step 0-2
100
+ - An agreed approach from Step 3
101
+ - Self-reviewed quality from Step 4
102
+ - Known risks flagged upfront
103
+
104
+ Carry these forward. If scope changes during implementation, pause and re-align rather than silently expanding.