@sandrinio/vbounce 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +107 -0
- package/bin/vbounce.mjs +165 -0
- package/brains/AGENTS.md +129 -0
- package/brains/CLAUDE.md +146 -0
- package/brains/GEMINI.md +134 -0
- package/brains/SETUP.md +180 -0
- package/brains/claude-agents/architect.md +140 -0
- package/brains/claude-agents/developer.md +69 -0
- package/brains/claude-agents/devops.md +219 -0
- package/brains/claude-agents/qa.md +112 -0
- package/brains/claude-agents/scribe.md +141 -0
- package/brains/cursor-rules/vbounce-docs.mdc +41 -0
- package/brains/cursor-rules/vbounce-process.mdc +45 -0
- package/brains/cursor-rules/vbounce-rules.mdc +26 -0
- package/package.json +40 -0
- package/skills/agent-team/SKILL.md +425 -0
- package/skills/doc-manager/SKILL.md +278 -0
- package/skills/lesson/SKILL.md +90 -0
- package/skills/react-best-practices/SKILL.md +3014 -0
- package/skills/react-best-practices/rules/_sections.md +46 -0
- package/skills/react-best-practices/rules/_template.md +28 -0
- package/skills/react-best-practices/rules/advanced-event-handler-refs.md +55 -0
- package/skills/react-best-practices/rules/advanced-init-once.md +42 -0
- package/skills/react-best-practices/rules/advanced-use-latest.md +39 -0
- package/skills/react-best-practices/rules/async-api-routes.md +38 -0
- package/skills/react-best-practices/rules/async-defer-await.md +80 -0
- package/skills/react-best-practices/rules/async-dependencies.md +51 -0
- package/skills/react-best-practices/rules/async-parallel.md +28 -0
- package/skills/react-best-practices/rules/async-suspense-boundaries.md +99 -0
- package/skills/react-best-practices/rules/bundle-barrel-imports.md +59 -0
- package/skills/react-best-practices/rules/bundle-conditional.md +31 -0
- package/skills/react-best-practices/rules/bundle-defer-third-party.md +49 -0
- package/skills/react-best-practices/rules/bundle-dynamic-imports.md +35 -0
- package/skills/react-best-practices/rules/bundle-preload.md +50 -0
- package/skills/react-best-practices/rules/client-event-listeners.md +74 -0
- package/skills/react-best-practices/rules/client-localstorage-schema.md +71 -0
- package/skills/react-best-practices/rules/client-passive-event-listeners.md +48 -0
- package/skills/react-best-practices/rules/client-swr-dedup.md +56 -0
- package/skills/react-best-practices/rules/js-batch-dom-css.md +107 -0
- package/skills/react-best-practices/rules/js-cache-function-results.md +80 -0
- package/skills/react-best-practices/rules/js-cache-property-access.md +28 -0
- package/skills/react-best-practices/rules/js-cache-storage.md +70 -0
- package/skills/react-best-practices/rules/js-combine-iterations.md +32 -0
- package/skills/react-best-practices/rules/js-early-exit.md +50 -0
- package/skills/react-best-practices/rules/js-hoist-regexp.md +45 -0
- package/skills/react-best-practices/rules/js-index-maps.md +37 -0
- package/skills/react-best-practices/rules/js-length-check-first.md +49 -0
- package/skills/react-best-practices/rules/js-min-max-loop.md +82 -0
- package/skills/react-best-practices/rules/js-set-map-lookups.md +24 -0
- package/skills/react-best-practices/rules/js-tosorted-immutable.md +57 -0
- package/skills/react-best-practices/rules/rendering-activity.md +26 -0
- package/skills/react-best-practices/rules/rendering-animate-svg-wrapper.md +47 -0
- package/skills/react-best-practices/rules/rendering-conditional-render.md +40 -0
- package/skills/react-best-practices/rules/rendering-content-visibility.md +38 -0
- package/skills/react-best-practices/rules/rendering-hoist-jsx.md +46 -0
- package/skills/react-best-practices/rules/rendering-hydration-no-flicker.md +82 -0
- package/skills/react-best-practices/rules/rendering-hydration-suppress-warning.md +30 -0
- package/skills/react-best-practices/rules/rendering-svg-precision.md +28 -0
- package/skills/react-best-practices/rules/rendering-usetransition-loading.md +75 -0
- package/skills/react-best-practices/rules/rerender-defer-reads.md +39 -0
- package/skills/react-best-practices/rules/rerender-dependencies.md +45 -0
- package/skills/react-best-practices/rules/rerender-derived-state-no-effect.md +40 -0
- package/skills/react-best-practices/rules/rerender-derived-state.md +29 -0
- package/skills/react-best-practices/rules/rerender-functional-setstate.md +74 -0
- package/skills/react-best-practices/rules/rerender-lazy-state-init.md +58 -0
- package/skills/react-best-practices/rules/rerender-memo-with-default-value.md +38 -0
- package/skills/react-best-practices/rules/rerender-memo.md +44 -0
- package/skills/react-best-practices/rules/rerender-move-effect-to-event.md +45 -0
- package/skills/react-best-practices/rules/rerender-simple-expression-in-memo.md +35 -0
- package/skills/react-best-practices/rules/rerender-transitions.md +40 -0
- package/skills/react-best-practices/rules/rerender-use-ref-transient-values.md +73 -0
- package/skills/react-best-practices/rules/server-after-nonblocking.md +73 -0
- package/skills/react-best-practices/rules/server-auth-actions.md +96 -0
- package/skills/react-best-practices/rules/server-cache-lru.md +41 -0
- package/skills/react-best-practices/rules/server-cache-react.md +76 -0
- package/skills/react-best-practices/rules/server-dedup-props.md +65 -0
- package/skills/react-best-practices/rules/server-parallel-fetching.md +83 -0
- package/skills/react-best-practices/rules/server-serialization.md +38 -0
- package/skills/vibe-code-review/SKILL.md +70 -0
- package/skills/vibe-code-review/references/deep-audit.md +259 -0
- package/skills/vibe-code-review/references/pr-review.md +234 -0
- package/skills/vibe-code-review/references/quick-scan.md +178 -0
- package/skills/vibe-code-review/references/report-template.md +189 -0
- package/skills/vibe-code-review/references/trend-check.md +224 -0
- package/skills/vibe-code-review/scripts/generate-snapshot.sh +89 -0
- package/skills/vibe-code-review/scripts/pr-analyze.sh +180 -0
- package/skills/write-skill/SKILL.md +133 -0
- package/templates/charter.md +144 -0
- package/templates/delivery_plan.md +188 -0
- package/templates/epic.md +200 -0
- package/templates/hotfix.md +57 -0
- package/templates/risk_registry.md +89 -0
- package/templates/roadmap.md +176 -0
- package/templates/sprint_report.md +151 -0
- package/templates/story.md +150 -0
|
@@ -0,0 +1,189 @@
|
|
|
1
|
+
# Report Template
|
|
2
|
+
|
|
3
|
+
Use this template structure for all code review outputs. Adapt sections based on the review mode.
|
|
4
|
+
|
|
5
|
+
## Universal Report Header
|
|
6
|
+
|
|
7
|
+
```markdown
|
|
8
|
+
# 🔍 Vibe Code Review Report
|
|
9
|
+
|
|
10
|
+
**Project:** [project name]
|
|
11
|
+
**Date:** [date]
|
|
12
|
+
**Mode:** [Quick Scan | PR Review | Deep Audit | Trend Check]
|
|
13
|
+
**Stack:** [detected tech stack]
|
|
14
|
+
|
|
15
|
+
---
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
## Severity Definitions
|
|
19
|
+
|
|
20
|
+
Use consistently across all reports:
|
|
21
|
+
|
|
22
|
+
- 🔴 **Critical** — Must fix before shipping. Active risk to reliability, security, or sustainability.
|
|
23
|
+
- 🟡 **Warning** — Technical debt accumulating. Safe for now, but will cause problems if ignored.
|
|
24
|
+
- 🟢 **Healthy** — No action needed. Meets or exceeds standards.
|
|
25
|
+
- ℹ️ **Info** — Not a problem, but worth knowing about.
|
|
26
|
+
|
|
27
|
+
## Quick Scan Report Structure
|
|
28
|
+
|
|
29
|
+
```markdown
|
|
30
|
+
## Summary
|
|
31
|
+
|
|
32
|
+
[2-3 sentence plain-language verdict. Use a building inspection analogy.]
|
|
33
|
+
|
|
34
|
+
## Findings
|
|
35
|
+
|
|
36
|
+
### 🔴 Critical Issues
|
|
37
|
+
[List each with a one-line description and a plain-language "what this means" explanation]
|
|
38
|
+
|
|
39
|
+
### 🟡 Warnings
|
|
40
|
+
[Same format]
|
|
41
|
+
|
|
42
|
+
### 🟢 Healthy Areas
|
|
43
|
+
[Brief acknowledgment of what's working well]
|
|
44
|
+
|
|
45
|
+
## Metrics Snapshot
|
|
46
|
+
|
|
47
|
+
| Metric | Value | Status |
|
|
48
|
+
|--------|-------|--------|
|
|
49
|
+
| Source files | X | ℹ️ |
|
|
50
|
+
| Total LOC | X | ℹ️ |
|
|
51
|
+
| Files over 400 lines | X | 🟢/🟡/🔴 |
|
|
52
|
+
| Dependencies | X | 🟢/🟡/🔴 |
|
|
53
|
+
| Test files | X | 🟢/🟡/🔴 |
|
|
54
|
+
| Empty catch blocks | X | 🟢/🟡/🔴 |
|
|
55
|
+
| Architectural patterns | X competing | 🟢/🟡/🔴 |
|
|
56
|
+
|
|
57
|
+
## Recommended Actions
|
|
58
|
+
|
|
59
|
+
1. [Highest priority action — what and why]
|
|
60
|
+
2. [Second priority]
|
|
61
|
+
3. [Third priority]
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
## PR Review Report Structure
|
|
65
|
+
|
|
66
|
+
```markdown
|
|
67
|
+
## Verdict: [✅ Ship It | ⚠️ Ship With Notes | 🛑 Hold]
|
|
68
|
+
|
|
69
|
+
[One sentence explaining the verdict]
|
|
70
|
+
|
|
71
|
+
## Change Summary
|
|
72
|
+
|
|
73
|
+
- **Files changed:** X
|
|
74
|
+
- **Lines added/removed:** +X / -X
|
|
75
|
+
- **Directories touched:** X
|
|
76
|
+
- **New dependencies:** X
|
|
77
|
+
|
|
78
|
+
## Findings
|
|
79
|
+
|
|
80
|
+
### [Each finding with file path and line reference]
|
|
81
|
+
|
|
82
|
+
**File:** `path/to/file.ts`
|
|
83
|
+
**Severity:** 🔴/🟡
|
|
84
|
+
**Issue:** [description]
|
|
85
|
+
**What this means:** [plain-language explanation]
|
|
86
|
+
**Suggested fix:** [concrete action]
|
|
87
|
+
|
|
88
|
+
## Checklist
|
|
89
|
+
|
|
90
|
+
- [ ] No new empty catch blocks
|
|
91
|
+
- [ ] New code has corresponding tests
|
|
92
|
+
- [ ] No new dependencies without justification
|
|
93
|
+
- [ ] Duplication check passed
|
|
94
|
+
- [ ] Cross-module impact is acceptable
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
## Deep Audit Report Structure
|
|
98
|
+
|
|
99
|
+
```markdown
|
|
100
|
+
## Executive Summary
|
|
101
|
+
|
|
102
|
+
[3-5 sentence overview. A non-technical stakeholder should understand the state of the project from this alone.]
|
|
103
|
+
|
|
104
|
+
## Architecture
|
|
105
|
+
|
|
106
|
+
### Pattern Map
|
|
107
|
+
[Table or list of all detected patterns and their usage counts]
|
|
108
|
+
|
|
109
|
+
### Consistency Score: [X/10]
|
|
110
|
+
[Explanation of competing patterns found]
|
|
111
|
+
|
|
112
|
+
### Coupling Analysis
|
|
113
|
+
[Most-imported modules, circular dependencies, god modules]
|
|
114
|
+
|
|
115
|
+
## Code Health
|
|
116
|
+
|
|
117
|
+
### Duplication: [X%]
|
|
118
|
+
[Top duplicated blocks with file references]
|
|
119
|
+
|
|
120
|
+
### Dead Code
|
|
121
|
+
[Orphaned files and unused exports]
|
|
122
|
+
|
|
123
|
+
### File Size Distribution
|
|
124
|
+
[How many files fall into each size bucket]
|
|
125
|
+
|
|
126
|
+
## Reliability
|
|
127
|
+
|
|
128
|
+
### Error Handling Score: [X/10]
|
|
129
|
+
[Empty catches, console-only handling, missing validation]
|
|
130
|
+
|
|
131
|
+
### Test Quality Score: [X/10]
|
|
132
|
+
[Test ratio, assertion quality, weak assertions, snapshot tests]
|
|
133
|
+
|
|
134
|
+
## Sustainability
|
|
135
|
+
|
|
136
|
+
### Dependency Health
|
|
137
|
+
[Count, known vulnerabilities, unnecessary packages]
|
|
138
|
+
|
|
139
|
+
### Complexity Hotspots
|
|
140
|
+
[Top 5 most complex files/functions]
|
|
141
|
+
|
|
142
|
+
## Recommendations (Prioritized)
|
|
143
|
+
|
|
144
|
+
| Priority | Action | Effort | Impact |
|
|
145
|
+
|----------|--------|--------|--------|
|
|
146
|
+
| 1 | [action] | [hours/days] | [what improves] |
|
|
147
|
+
| 2 | [action] | [hours/days] | [what improves] |
|
|
148
|
+
| 3 | [action] | [hours/days] | [what improves] |
|
|
149
|
+
|
|
150
|
+
## Plain-Language Summary
|
|
151
|
+
|
|
152
|
+
If this codebase were a building:
|
|
153
|
+
- **Foundation:** [solid/cracking/unstable]
|
|
154
|
+
- **Plumbing (data flow):** [clean/leaky/clogged]
|
|
155
|
+
- **Electrical (error handling):** [up to code/some dead outlets/fire hazard]
|
|
156
|
+
- **Layout (architecture):** [coherent/quirky but functional/maze]
|
|
157
|
+
- **Maintenance access:** [easy/tight/sealed shut]
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
## Trend Check Report Structure
|
|
161
|
+
|
|
162
|
+
```markdown
|
|
163
|
+
## Trajectory: [📈 Improving | ➡️ Stable | 📉 Degrading]
|
|
164
|
+
|
|
165
|
+
[One sentence summary of the overall trend]
|
|
166
|
+
|
|
167
|
+
## Metrics Over Time
|
|
168
|
+
|
|
169
|
+
| Date | LOC | Deps | Large Files | Empty Catches | Dup % | Test Ratio |
|
|
170
|
+
|------|-----|------|-------------|---------------|-------|------------|
|
|
171
|
+
| [date] | X | X | X | X | X% | X |
|
|
172
|
+
|
|
173
|
+
## Trend Signals
|
|
174
|
+
|
|
175
|
+
[For each metric that changed significantly, explain what the trend means]
|
|
176
|
+
|
|
177
|
+
## Recommended Actions
|
|
178
|
+
|
|
179
|
+
1. [Based on trends, not just current state]
|
|
180
|
+
```
|
|
181
|
+
|
|
182
|
+
## Writing Guidelines
|
|
183
|
+
|
|
184
|
+
- **Lead with the verdict** — don't bury it
|
|
185
|
+
- **Use analogies** — the user may not read code
|
|
186
|
+
- **Be specific** — file paths, line numbers, concrete examples
|
|
187
|
+
- **Prioritize recommendations** — the user needs to know what to fix first
|
|
188
|
+
- **Don't overwhelm** — for Quick Scan, keep to top 5 findings max. Save exhaustive lists for Deep Audit.
|
|
189
|
+
- **Celebrate wins** — if something is well-structured, say so. Positive reinforcement matters in vibe coding.
|
|
@@ -0,0 +1,224 @@
|
|
|
1
|
+
# Trend Check Mode
|
|
2
|
+
|
|
3
|
+
Compare metrics over time to catch gradual degradation. Individual snapshots tell you the current state — trends tell you the trajectory.
|
|
4
|
+
|
|
5
|
+
## When to Use
|
|
6
|
+
|
|
7
|
+
- "Is my codebase getting better or worse?"
|
|
8
|
+
- Weekly/monthly quality check-ins
|
|
9
|
+
- After a sprint or feature push, compare before/after
|
|
10
|
+
- Tracking the "Effort Paradox" — are new features getting harder to add?
|
|
11
|
+
|
|
12
|
+
## Concept
|
|
13
|
+
|
|
14
|
+
This mode generates a metrics snapshot and compares it against previous snapshots stored in a `.quality/` directory in the project root. Each snapshot is a JSON file timestamped with the scan date.
|
|
15
|
+
|
|
16
|
+
## Steps
|
|
17
|
+
|
|
18
|
+
### 1. Generate Current Snapshot
|
|
19
|
+
|
|
20
|
+
Run this script to produce a metrics JSON:
|
|
21
|
+
|
|
22
|
+
```bash
|
|
23
|
+
#!/bin/bash
|
|
24
|
+
# generate-snapshot.sh — produces a quality metrics snapshot
|
|
25
|
+
|
|
26
|
+
TIMESTAMP=$(date +%Y-%m-%d)
|
|
27
|
+
OUTPUT_DIR=".quality"
|
|
28
|
+
OUTPUT_FILE="$OUTPUT_DIR/snapshot-$TIMESTAMP.json"
|
|
29
|
+
|
|
30
|
+
mkdir -p "$OUTPUT_DIR"
|
|
31
|
+
|
|
32
|
+
# Count source files
|
|
33
|
+
SRC_FILES=$(find . -name "*.ts" -o -name "*.tsx" -o -name "*.js" -o -name "*.jsx" -o -name "*.py" \
|
|
34
|
+
| grep -v node_modules | grep -v __pycache__ | grep -v .next | grep -v dist | grep -v test | grep -v spec | wc -l | tr -d ' ')
|
|
35
|
+
|
|
36
|
+
# Count test files
|
|
37
|
+
TEST_FILES=$(find . -name "*.test.*" -o -name "*.spec.*" -o -name "test_*" -o -name "*_test.*" \
|
|
38
|
+
| grep -v node_modules | grep -v __pycache__ | wc -l | tr -d ' ')
|
|
39
|
+
|
|
40
|
+
# Count total lines of code
|
|
41
|
+
TOTAL_LOC=$(find . -name "*.ts" -o -name "*.tsx" -o -name "*.js" -o -name "*.jsx" -o -name "*.py" \
|
|
42
|
+
| grep -v node_modules | grep -v __pycache__ | grep -v .next | grep -v dist \
|
|
43
|
+
| xargs wc -l 2>/dev/null | tail -1 | awk '{print $1}')
|
|
44
|
+
|
|
45
|
+
# Count dependencies
|
|
46
|
+
if [ -f package.json ]; then
|
|
47
|
+
DEPS=$(python3 -c "import json; d=json.load(open('package.json')); print(len(d.get('dependencies',{})))" 2>/dev/null || echo "0")
|
|
48
|
+
DEV_DEPS=$(python3 -c "import json; d=json.load(open('package.json')); print(len(d.get('devDependencies',{})))" 2>/dev/null || echo "0")
|
|
49
|
+
else
|
|
50
|
+
DEPS=0
|
|
51
|
+
DEV_DEPS=0
|
|
52
|
+
fi
|
|
53
|
+
|
|
54
|
+
# Count files over 400 lines
|
|
55
|
+
LARGE_FILES=$(find . -name "*.ts" -o -name "*.tsx" -o -name "*.js" -o -name "*.jsx" -o -name "*.py" \
|
|
56
|
+
| grep -v node_modules | grep -v __pycache__ | grep -v .next \
|
|
57
|
+
| while read f; do
|
|
58
|
+
lines=$(wc -l < "$f" 2>/dev/null)
|
|
59
|
+
if [ "$lines" -gt 400 ]; then echo "$f"; fi
|
|
60
|
+
done | wc -l | tr -d ' ')
|
|
61
|
+
|
|
62
|
+
# Count empty catch blocks
|
|
63
|
+
EMPTY_CATCHES=$(grep -rn "catch" --include="*.ts" --include="*.tsx" --include="*.js" --include="*.jsx" -A 2 . \
|
|
64
|
+
| grep -v node_modules | grep -E "catch.*\{$" -A 1 | grep -c "^\s*\}" 2>/dev/null || echo "0")
|
|
65
|
+
|
|
66
|
+
# Count TODO/FIXME markers
|
|
67
|
+
TODOS=$(grep -rn "TODO\|FIXME\|HACK\|XXX" --include="*.ts" --include="*.tsx" --include="*.js" --include="*.jsx" --include="*.py" . \
|
|
68
|
+
| grep -v node_modules | grep -v __pycache__ | wc -l | tr -d ' ')
|
|
69
|
+
|
|
70
|
+
# Count directories (proxy for module count)
|
|
71
|
+
MODULES=$(find . -maxdepth 2 -type d \
|
|
72
|
+
-not -path "*/node_modules/*" -not -path "*/.git/*" -not -path "*/.next/*" -not -path "*/dist/*" \
|
|
73
|
+
| wc -l | tr -d ' ')
|
|
74
|
+
|
|
75
|
+
# Duplication percentage (if jscpd available)
|
|
76
|
+
DUP_PCT=$(npx jscpd . --min-lines 5 --min-tokens 50 --reporters json --output /tmp/jscpd-trend \
|
|
77
|
+
--ignore "node_modules,dist,build,.next,__pycache__,coverage" 2>/dev/null \
|
|
78
|
+
&& python3 -c "import json; d=json.load(open('/tmp/jscpd-trend/jscpd-report.json')); print(d['statistics']['total']['percentage'])" 2>/dev/null || echo "null")
|
|
79
|
+
|
|
80
|
+
# Write snapshot
|
|
81
|
+
cat > "$OUTPUT_FILE" << EOF
|
|
82
|
+
{
|
|
83
|
+
"date": "$TIMESTAMP",
|
|
84
|
+
"source_files": $SRC_FILES,
|
|
85
|
+
"test_files": $TEST_FILES,
|
|
86
|
+
"test_ratio": $(python3 -c "print(round($TEST_FILES / max($SRC_FILES, 1), 2))"),
|
|
87
|
+
"total_loc": $TOTAL_LOC,
|
|
88
|
+
"dependencies": $DEPS,
|
|
89
|
+
"dev_dependencies": $DEV_DEPS,
|
|
90
|
+
"large_files_over_400": $LARGE_FILES,
|
|
91
|
+
"empty_catch_blocks": $EMPTY_CATCHES,
|
|
92
|
+
"todo_fixme_count": $TODOS,
|
|
93
|
+
"module_count": $MODULES,
|
|
94
|
+
"duplication_pct": $DUP_PCT
|
|
95
|
+
}
|
|
96
|
+
EOF
|
|
97
|
+
|
|
98
|
+
echo "Snapshot saved to $OUTPUT_FILE"
|
|
99
|
+
cat "$OUTPUT_FILE" | python3 -m json.tool
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
### 2. Compare Against Previous Snapshots
|
|
103
|
+
|
|
104
|
+
```bash
|
|
105
|
+
#!/bin/bash
|
|
106
|
+
# compare-snapshots.sh — compare latest snapshot against previous ones
|
|
107
|
+
|
|
108
|
+
QUALITY_DIR=".quality"
|
|
109
|
+
SNAPSHOTS=$(ls "$QUALITY_DIR"/snapshot-*.json 2>/dev/null | sort)
|
|
110
|
+
COUNT=$(echo "$SNAPSHOTS" | wc -l)
|
|
111
|
+
|
|
112
|
+
if [ "$COUNT" -lt 2 ]; then
|
|
113
|
+
echo "Need at least 2 snapshots to compare. Run a scan first."
|
|
114
|
+
exit 0
|
|
115
|
+
fi
|
|
116
|
+
|
|
117
|
+
LATEST=$(echo "$SNAPSHOTS" | tail -1)
|
|
118
|
+
PREVIOUS=$(echo "$SNAPSHOTS" | tail -2 | head -1)
|
|
119
|
+
|
|
120
|
+
python3 << 'PYEOF'
|
|
121
|
+
import json, sys
|
|
122
|
+
|
|
123
|
+
with open("LATEST_FILE") as f:
|
|
124
|
+
latest = json.load(f)
|
|
125
|
+
with open("PREVIOUS_FILE") as f:
|
|
126
|
+
prev = json.load(f)
|
|
127
|
+
|
|
128
|
+
print(f"Comparing: {prev['date']} → {latest['date']}")
|
|
129
|
+
print("=" * 60)
|
|
130
|
+
|
|
131
|
+
metrics = [
|
|
132
|
+
("source_files", "Source files", "neutral"),
|
|
133
|
+
("test_files", "Test files", "higher_better"),
|
|
134
|
+
("test_ratio", "Test ratio", "higher_better"),
|
|
135
|
+
("total_loc", "Total LOC", "neutral"),
|
|
136
|
+
("dependencies", "Dependencies", "lower_better"),
|
|
137
|
+
("large_files_over_400", "Large files (>400 lines)", "lower_better"),
|
|
138
|
+
("empty_catch_blocks", "Empty catch blocks", "lower_better"),
|
|
139
|
+
("todo_fixme_count", "TODO/FIXME markers", "lower_better"),
|
|
140
|
+
("module_count", "Module count", "neutral"),
|
|
141
|
+
("duplication_pct", "Duplication %", "lower_better"),
|
|
142
|
+
]
|
|
143
|
+
|
|
144
|
+
for key, label, direction in metrics:
|
|
145
|
+
old = prev.get(key)
|
|
146
|
+
new = latest.get(key)
|
|
147
|
+
if old is None or new is None or old == "null" or new == "null":
|
|
148
|
+
print(f" {label}: no data")
|
|
149
|
+
continue
|
|
150
|
+
|
|
151
|
+
old, new = float(old), float(new)
|
|
152
|
+
delta = new - old
|
|
153
|
+
pct = (delta / old * 100) if old != 0 else 0
|
|
154
|
+
|
|
155
|
+
if direction == "lower_better":
|
|
156
|
+
icon = "🟢" if delta <= 0 else "🔴"
|
|
157
|
+
elif direction == "higher_better":
|
|
158
|
+
icon = "🟢" if delta >= 0 else "🔴"
|
|
159
|
+
else:
|
|
160
|
+
icon = "➡️"
|
|
161
|
+
|
|
162
|
+
sign = "+" if delta > 0 else ""
|
|
163
|
+
print(f" {icon} {label}: {old} → {new} ({sign}{delta:.1f}, {sign}{pct:.1f}%)")
|
|
164
|
+
|
|
165
|
+
PYEOF
|
|
166
|
+
```
|
|
167
|
+
|
|
168
|
+
Replace `LATEST_FILE` and `PREVIOUS_FILE` with actual paths when running.
|
|
169
|
+
|
|
170
|
+
### 3. Trend Visualization
|
|
171
|
+
|
|
172
|
+
If the user wants a visual trend, generate a simple markdown table or ASCII chart:
|
|
173
|
+
|
|
174
|
+
```python
|
|
175
|
+
import json, os, glob
|
|
176
|
+
|
|
177
|
+
snapshots = sorted(glob.glob(".quality/snapshot-*.json"))
|
|
178
|
+
if not snapshots:
|
|
179
|
+
print("No snapshots found")
|
|
180
|
+
exit()
|
|
181
|
+
|
|
182
|
+
data = []
|
|
183
|
+
for s in snapshots:
|
|
184
|
+
with open(s) as f:
|
|
185
|
+
data.append(json.load(f))
|
|
186
|
+
|
|
187
|
+
# Print trend table
|
|
188
|
+
print("| Date | LOC | Deps | Large Files | Empty Catches | Duplication | Test Ratio |")
|
|
189
|
+
print("|------|-----|------|-------------|---------------|-------------|------------|")
|
|
190
|
+
for d in data:
|
|
191
|
+
dup = f"{d.get('duplication_pct', 'N/A')}%" if d.get('duplication_pct') not in (None, 'null') else "N/A"
|
|
192
|
+
print(f"| {d['date']} | {d['total_loc']} | {d['dependencies']} | {d['large_files_over_400']} | {d['empty_catch_blocks']} | {dup} | {d['test_ratio']} |")
|
|
193
|
+
```
|
|
194
|
+
|
|
195
|
+
### Key Trend Signals
|
|
196
|
+
|
|
197
|
+
| Signal | What It Means | Action |
|
|
198
|
+
|--------|---------------|--------|
|
|
199
|
+
| LOC rising, test ratio falling | Building without testing | Pause features, write tests |
|
|
200
|
+
| Dependencies climbing steadily | AI agents adding packages every session | Audit and remove unused deps |
|
|
201
|
+
| Large files increasing | Monoliths forming | Decompose before they calcify |
|
|
202
|
+
| Empty catches increasing | Reliability degrading | Add error handling sprint |
|
|
203
|
+
| Duplication climbing | AI reinventing existing solutions | Document shared utilities, update AI context |
|
|
204
|
+
| Feature time increasing | Architecture is fighting you | Deep audit needed |
|
|
205
|
+
|
|
206
|
+
### 4. Generate Trend Report
|
|
207
|
+
|
|
208
|
+
Output a markdown report with:
|
|
209
|
+
1. The comparison table
|
|
210
|
+
2. Traffic-light summary (🟢🟡🔴) for each metric's trajectory
|
|
211
|
+
3. Top 3 recommended actions based on the trends
|
|
212
|
+
4. A plain-language summary: "Your codebase is [improving/stable/degrading] in these areas..."
|
|
213
|
+
|
|
214
|
+
## Automation
|
|
215
|
+
|
|
216
|
+
Recommend the user add the snapshot script to their CI pipeline or run it weekly:
|
|
217
|
+
|
|
218
|
+
```bash
|
|
219
|
+
# Add to .github/workflows/quality-snapshot.yml or run manually
|
|
220
|
+
# The .quality/ directory should be committed to the repo
|
|
221
|
+
# so trends persist across machines and sessions
|
|
222
|
+
```
|
|
223
|
+
|
|
224
|
+
This way, every time they come back to review, historical data is already there.
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
# generate-snapshot.sh — Produces a quality metrics snapshot for trend tracking
|
|
3
|
+
# Usage: bash scripts/generate-snapshot.sh [project-path]
|
|
4
|
+
#
|
|
5
|
+
# Saves a JSON snapshot to .quality/snapshot-YYYY-MM-DD.json in the project root.
|
|
6
|
+
# Commit the .quality/ directory to your repo so trends persist.
|
|
7
|
+
|
|
8
|
+
set -e
|
|
9
|
+
|
|
10
|
+
PROJECT_DIR="${1:-.}"
|
|
11
|
+
cd "$PROJECT_DIR"
|
|
12
|
+
|
|
13
|
+
TIMESTAMP=$(date +%Y-%m-%d)
|
|
14
|
+
OUTPUT_DIR=".quality"
|
|
15
|
+
OUTPUT_FILE="$OUTPUT_DIR/snapshot-$TIMESTAMP.json"
|
|
16
|
+
|
|
17
|
+
mkdir -p "$OUTPUT_DIR"
|
|
18
|
+
|
|
19
|
+
echo "📊 Generating quality snapshot for $(pwd)..."
|
|
20
|
+
|
|
21
|
+
# Count source files
|
|
22
|
+
SRC_FILES=$(find . -name "*.ts" -o -name "*.tsx" -o -name "*.js" -o -name "*.jsx" -o -name "*.py" -o -name "*.go" -o -name "*.rs" \
|
|
23
|
+
| grep -v node_modules | grep -v __pycache__ | grep -v .next | grep -v dist | grep -v build \
|
|
24
|
+
| grep -v test | grep -v spec | wc -l | tr -d ' ')
|
|
25
|
+
|
|
26
|
+
# Count test files
|
|
27
|
+
TEST_FILES=$(find . -name "*.test.*" -o -name "*.spec.*" -o -name "test_*" -o -name "*_test.*" \
|
|
28
|
+
| grep -v node_modules | grep -v __pycache__ | wc -l | tr -d ' ')
|
|
29
|
+
|
|
30
|
+
# Total lines of code
|
|
31
|
+
TOTAL_LOC=$(find . \( -name "*.ts" -o -name "*.tsx" -o -name "*.js" -o -name "*.jsx" -o -name "*.py" -o -name "*.go" -o -name "*.rs" \) \
|
|
32
|
+
-not -path "*/node_modules/*" -not -path "*/__pycache__/*" -not -path "*/.next/*" -not -path "*/dist/*" \
|
|
33
|
+
| xargs wc -l 2>/dev/null | tail -1 | awk '{print $1}')
|
|
34
|
+
TOTAL_LOC=${TOTAL_LOC:-0}
|
|
35
|
+
|
|
36
|
+
# Count dependencies
|
|
37
|
+
if [ -f package.json ]; then
|
|
38
|
+
DEPS=$(python3 -c "import json; d=json.load(open('package.json')); print(len(d.get('dependencies',{})))" 2>/dev/null || echo "0")
|
|
39
|
+
DEV_DEPS=$(python3 -c "import json; d=json.load(open('package.json')); print(len(d.get('devDependencies',{})))" 2>/dev/null || echo "0")
|
|
40
|
+
elif [ -f requirements.txt ]; then
|
|
41
|
+
DEPS=$(grep -v "^#" requirements.txt | grep -v "^$" | wc -l | tr -d ' ')
|
|
42
|
+
DEV_DEPS=0
|
|
43
|
+
elif [ -f go.mod ]; then
|
|
44
|
+
DEPS=$(grep "^\t" go.mod | grep -v "indirect" | wc -l | tr -d ' ')
|
|
45
|
+
DEV_DEPS=0
|
|
46
|
+
else
|
|
47
|
+
DEPS=0
|
|
48
|
+
DEV_DEPS=0
|
|
49
|
+
fi
|
|
50
|
+
|
|
51
|
+
# Count files over 400 lines
|
|
52
|
+
LARGE_FILES=$(find . \( -name "*.ts" -o -name "*.tsx" -o -name "*.js" -o -name "*.jsx" -o -name "*.py" -o -name "*.go" -o -name "*.rs" \) \
|
|
53
|
+
-not -path "*/node_modules/*" -not -path "*/__pycache__/*" -not -path "*/.next/*" \
|
|
54
|
+
| while read f; do
|
|
55
|
+
lines=$(wc -l < "$f" 2>/dev/null || echo 0)
|
|
56
|
+
if [ "$lines" -gt 400 ]; then echo "$f"; fi
|
|
57
|
+
done | wc -l | tr -d ' ')
|
|
58
|
+
|
|
59
|
+
# Count empty catch blocks
|
|
60
|
+
EMPTY_CATCHES=$(grep -rn "catch" --include="*.ts" --include="*.tsx" --include="*.js" --include="*.jsx" --include="*.py" -A 2 . \
|
|
61
|
+
2>/dev/null | grep -v node_modules | grep -E "catch.*\{$" -A 1 | grep -c "^\s*\}" 2>/dev/null || echo "0")
|
|
62
|
+
|
|
63
|
+
# Count TODO/FIXME markers
|
|
64
|
+
TODOS=$(grep -rn "TODO\|FIXME\|HACK\|XXX" \
|
|
65
|
+
--include="*.ts" --include="*.tsx" --include="*.js" --include="*.jsx" --include="*.py" --include="*.go" --include="*.rs" . \
|
|
66
|
+
2>/dev/null | grep -v node_modules | grep -v __pycache__ | wc -l | tr -d ' ')
|
|
67
|
+
|
|
68
|
+
# Test ratio
|
|
69
|
+
TEST_RATIO=$(python3 -c "print(round($TEST_FILES / max($SRC_FILES, 1), 2))")
|
|
70
|
+
|
|
71
|
+
# Write snapshot
|
|
72
|
+
cat > "$OUTPUT_FILE" << EOF
|
|
73
|
+
{
|
|
74
|
+
"date": "$TIMESTAMP",
|
|
75
|
+
"source_files": $SRC_FILES,
|
|
76
|
+
"test_files": $TEST_FILES,
|
|
77
|
+
"test_ratio": $TEST_RATIO,
|
|
78
|
+
"total_loc": $TOTAL_LOC,
|
|
79
|
+
"dependencies": $DEPS,
|
|
80
|
+
"dev_dependencies": $DEV_DEPS,
|
|
81
|
+
"large_files_over_400": $LARGE_FILES,
|
|
82
|
+
"empty_catch_blocks": $EMPTY_CATCHES,
|
|
83
|
+
"todo_fixme_count": $TODOS
|
|
84
|
+
}
|
|
85
|
+
EOF
|
|
86
|
+
|
|
87
|
+
echo "✅ Snapshot saved to $OUTPUT_FILE"
|
|
88
|
+
echo ""
|
|
89
|
+
python3 -m json.tool "$OUTPUT_FILE"
|
|
@@ -0,0 +1,180 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
# pr-analyze.sh — Analyze a git diff for code quality issues
|
|
3
|
+
# Usage: bash scripts/pr-analyze.sh [base-branch]
|
|
4
|
+
#
|
|
5
|
+
# Defaults to comparing against 'main'. Pass a different branch name if needed.
|
|
6
|
+
# Outputs a markdown report to stdout.
|
|
7
|
+
|
|
8
|
+
set -e
|
|
9
|
+
|
|
10
|
+
BASE="${1:-main}"
|
|
11
|
+
REPORT=""
|
|
12
|
+
|
|
13
|
+
add() { REPORT="$REPORT$1\n"; }
|
|
14
|
+
|
|
15
|
+
add "# 🔍 PR Review Report"
|
|
16
|
+
add ""
|
|
17
|
+
add "**Date:** $(date +%Y-%m-%d)"
|
|
18
|
+
add "**Comparing:** \`$BASE...HEAD\`"
|
|
19
|
+
add ""
|
|
20
|
+
|
|
21
|
+
# Get changed files
|
|
22
|
+
CHANGED=$(git diff --name-only "$BASE"...HEAD 2>/dev/null || git diff --name-only HEAD~1)
|
|
23
|
+
SOURCE_CHANGED=$(echo "$CHANGED" | grep -E '\.(ts|tsx|js|jsx|py|go|rs)$' | grep -v node_modules || true)
|
|
24
|
+
FILE_COUNT=$(echo "$CHANGED" | grep -v "^$" | wc -l | tr -d ' ')
|
|
25
|
+
DIR_COUNT=$(echo "$CHANGED" | grep -v "^$" | xargs -I{} dirname {} 2>/dev/null | sort -u | wc -l | tr -d ' ')
|
|
26
|
+
|
|
27
|
+
add "## Change Summary"
|
|
28
|
+
add ""
|
|
29
|
+
add "- **Files changed:** $FILE_COUNT"
|
|
30
|
+
add "- **Directories touched:** $DIR_COUNT"
|
|
31
|
+
add "- **Source files in diff:** $(echo "$SOURCE_CHANGED" | grep -v "^$" | wc -l | tr -d ' ')"
|
|
32
|
+
add ""
|
|
33
|
+
|
|
34
|
+
# Stats
|
|
35
|
+
STATS=$(git diff --stat "$BASE"...HEAD 2>/dev/null || git diff --stat HEAD~1)
|
|
36
|
+
add "\`\`\`"
|
|
37
|
+
add "$STATS"
|
|
38
|
+
add "\`\`\`"
|
|
39
|
+
add ""
|
|
40
|
+
|
|
41
|
+
# Cross-module impact
|
|
42
|
+
if [ "$DIR_COUNT" -gt 5 ]; then
|
|
43
|
+
add "### 🔴 High Cross-Module Impact"
|
|
44
|
+
add ""
|
|
45
|
+
add "This PR touches $DIR_COUNT directories. Consider breaking into smaller, focused PRs."
|
|
46
|
+
add ""
|
|
47
|
+
elif [ "$DIR_COUNT" -gt 3 ]; then
|
|
48
|
+
add "### 🟡 Moderate Cross-Module Impact"
|
|
49
|
+
add ""
|
|
50
|
+
add "This PR touches $DIR_COUNT directories."
|
|
51
|
+
add ""
|
|
52
|
+
fi
|
|
53
|
+
|
|
54
|
+
add "## Findings"
|
|
55
|
+
add ""
|
|
56
|
+
|
|
57
|
+
# Check for new dependencies
|
|
58
|
+
if echo "$CHANGED" | grep -q "package.json"; then
|
|
59
|
+
NEW_DEPS=$(git diff "$BASE"...HEAD -- package.json 2>/dev/null | grep "^+" | grep -v "^+++" | grep -E '"[^"]+":' || true)
|
|
60
|
+
if [ -n "$NEW_DEPS" ]; then
|
|
61
|
+
add "### 🟡 New Dependencies Detected"
|
|
62
|
+
add ""
|
|
63
|
+
add "\`\`\`"
|
|
64
|
+
add "$NEW_DEPS"
|
|
65
|
+
add "\`\`\`"
|
|
66
|
+
add ""
|
|
67
|
+
add "**What this means:** Every new dependency is a future maintenance burden and potential security risk. Verify each is necessary."
|
|
68
|
+
add ""
|
|
69
|
+
fi
|
|
70
|
+
fi
|
|
71
|
+
|
|
72
|
+
if echo "$CHANGED" | grep -q "requirements.txt"; then
|
|
73
|
+
NEW_PY_DEPS=$(git diff "$BASE"...HEAD -- requirements.txt 2>/dev/null | grep "^+" | grep -v "^+++" || true)
|
|
74
|
+
if [ -n "$NEW_PY_DEPS" ]; then
|
|
75
|
+
add "### 🟡 New Python Dependencies"
|
|
76
|
+
add ""
|
|
77
|
+
add "\`\`\`"
|
|
78
|
+
add "$NEW_PY_DEPS"
|
|
79
|
+
add "\`\`\`"
|
|
80
|
+
add ""
|
|
81
|
+
fi
|
|
82
|
+
fi
|
|
83
|
+
|
|
84
|
+
# Error handling in diff
|
|
85
|
+
DIFF_CONTENT=$(git diff "$BASE"...HEAD -- $SOURCE_CHANGED 2>/dev/null || true)
|
|
86
|
+
EMPTY_CATCHES=$(echo "$DIFF_CONTENT" | grep "^+" | grep -E "catch.*\{" -A 2 | grep -c "^\+\s*\}" 2>/dev/null || echo "0")
|
|
87
|
+
CONSOLE_CATCHES=$(echo "$DIFF_CONTENT" | grep "^+" | grep -B1 -A3 "catch" | grep -c "console\.\(log\|error\)" 2>/dev/null || echo "0")
|
|
88
|
+
NEW_TODOS=$(echo "$DIFF_CONTENT" | grep "^+" | grep -ci "TODO\|FIXME\|HACK\|XXX" 2>/dev/null || echo "0")
|
|
89
|
+
|
|
90
|
+
if [ "$EMPTY_CATCHES" -gt 0 ]; then
|
|
91
|
+
add "### 🔴 Empty Catch Blocks in New Code"
|
|
92
|
+
add ""
|
|
93
|
+
add "Found **$EMPTY_CATCHES** empty catch blocks. Errors will be swallowed silently."
|
|
94
|
+
add ""
|
|
95
|
+
add "**What this means:** Your smoke detectors have dead batteries — failures happen silently."
|
|
96
|
+
add ""
|
|
97
|
+
fi
|
|
98
|
+
|
|
99
|
+
if [ "$CONSOLE_CATCHES" -gt 0 ]; then
|
|
100
|
+
add "### 🟡 Console-Only Error Handling"
|
|
101
|
+
add ""
|
|
102
|
+
add "Found **$CONSOLE_CATCHES** catch blocks that only console.log/error."
|
|
103
|
+
add ""
|
|
104
|
+
add "**What this means:** Errors are acknowledged but not actually handled. In production, nobody reads the console."
|
|
105
|
+
add ""
|
|
106
|
+
fi
|
|
107
|
+
|
|
108
|
+
if [ "$NEW_TODOS" -gt 0 ]; then
|
|
109
|
+
add "### 🟡 TODO/FIXME Markers"
|
|
110
|
+
add ""
|
|
111
|
+
add "Found **$NEW_TODOS** new TODO/FIXME/HACK markers. This is unfinished work entering the codebase."
|
|
112
|
+
add ""
|
|
113
|
+
fi
|
|
114
|
+
|
|
115
|
+
# Test coverage for changed files
|
|
116
|
+
add "## Test Coverage for Changed Files"
|
|
117
|
+
add ""
|
|
118
|
+
UNTESTED=0
|
|
119
|
+
TESTED=0
|
|
120
|
+
UPDATED=0
|
|
121
|
+
|
|
122
|
+
for file in $(echo "$SOURCE_CHANGED" | grep -v test | grep -v spec | grep -v "^$"); do
|
|
123
|
+
BASENAME=$(basename "$file" | sed 's/\.\(ts\|tsx\|js\|jsx\|py\|go\|rs\)$//')
|
|
124
|
+
TEST_EXISTS=$(find . -name "${BASENAME}.test.*" -o -name "${BASENAME}.spec.*" -o -name "test_${BASENAME}.*" 2>/dev/null | grep -v node_modules | head -1)
|
|
125
|
+
|
|
126
|
+
if [ -n "$TEST_EXISTS" ]; then
|
|
127
|
+
if echo "$CHANGED" | grep -q "$(basename "$TEST_EXISTS")"; then
|
|
128
|
+
add "- 🟢 \`$file\` — test exists and was updated"
|
|
129
|
+
UPDATED=$((UPDATED + 1))
|
|
130
|
+
else
|
|
131
|
+
add "- 🟡 \`$file\` — test exists but NOT updated"
|
|
132
|
+
TESTED=$((TESTED + 1))
|
|
133
|
+
fi
|
|
134
|
+
else
|
|
135
|
+
add "- 🔴 \`$file\` — no test file found"
|
|
136
|
+
UNTESTED=$((UNTESTED + 1))
|
|
137
|
+
fi
|
|
138
|
+
done
|
|
139
|
+
|
|
140
|
+
add ""
|
|
141
|
+
add "**Summary:** $UPDATED tested & updated, $TESTED tested but stale, $UNTESTED untested"
|
|
142
|
+
add ""
|
|
143
|
+
|
|
144
|
+
# Large files in diff
|
|
145
|
+
add "## File Size Check"
|
|
146
|
+
add ""
|
|
147
|
+
for file in $(echo "$SOURCE_CHANGED" | grep -v "^$"); do
|
|
148
|
+
if [ -f "$file" ]; then
|
|
149
|
+
LINES=$(wc -l < "$file")
|
|
150
|
+
if [ "$LINES" -gt 400 ]; then
|
|
151
|
+
add "- 🔴 \`$file\` — $LINES lines (over 400 threshold)"
|
|
152
|
+
elif [ "$LINES" -gt 200 ]; then
|
|
153
|
+
add "- 🟡 \`$file\` — $LINES lines (approaching threshold)"
|
|
154
|
+
fi
|
|
155
|
+
fi
|
|
156
|
+
done
|
|
157
|
+
add ""
|
|
158
|
+
|
|
159
|
+
# Verdict
|
|
160
|
+
CRITICAL=0
|
|
161
|
+
[ "$EMPTY_CATCHES" -gt 0 ] && CRITICAL=$((CRITICAL + 1))
|
|
162
|
+
[ "$UNTESTED" -gt 3 ] && CRITICAL=$((CRITICAL + 1))
|
|
163
|
+
[ "$DIR_COUNT" -gt 7 ] && CRITICAL=$((CRITICAL + 1))
|
|
164
|
+
|
|
165
|
+
if [ "$CRITICAL" -gt 0 ]; then
|
|
166
|
+
add "## Verdict: 🛑 Hold"
|
|
167
|
+
add ""
|
|
168
|
+
add "**$CRITICAL critical issues** should be addressed before merging."
|
|
169
|
+
elif [ "$NEW_TODOS" -gt 0 ] || [ "$CONSOLE_CATCHES" -gt 0 ] || [ "$TESTED" -gt 2 ]; then
|
|
170
|
+
add "## Verdict: ⚠️ Ship With Notes"
|
|
171
|
+
add ""
|
|
172
|
+
add "No blocking issues, but address the warnings above soon."
|
|
173
|
+
else
|
|
174
|
+
add "## Verdict: ✅ Ship It"
|
|
175
|
+
add ""
|
|
176
|
+
add "No significant issues found in this diff."
|
|
177
|
+
fi
|
|
178
|
+
|
|
179
|
+
# Output
|
|
180
|
+
echo -e "$REPORT"
|