prjct-cli 0.19.0 → 0.20.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +42 -0
- package/package.json +1 -1
- package/templates/agentic/agents/uxui.md +210 -0
- package/templates/commands/bug.md +219 -41
- package/templates/commands/feature.md +368 -80
- package/templates/commands/ship.md +139 -12
- package/templates/commands/sync.md +62 -3
- package/templates/commands/test.md +17 -184
- package/templates/mcp-config.json +0 -36
|
@@ -1,31 +1,52 @@
|
|
|
1
1
|
---
|
|
2
|
-
allowed-tools: [Read, Write, Bash]
|
|
2
|
+
allowed-tools: [Read, Write, Bash, Glob, Grep]
|
|
3
3
|
description: 'Ship feature with automated workflow'
|
|
4
4
|
---
|
|
5
5
|
|
|
6
6
|
# /p:ship - Ship Feature
|
|
7
7
|
|
|
8
|
-
Ship completed work with
|
|
8
|
+
Ship completed work with pre-flight checks, code review, and quality gates.
|
|
9
9
|
|
|
10
10
|
## Usage
|
|
11
11
|
```
|
|
12
|
-
/p:ship [feature] [--blocking]
|
|
12
|
+
/p:ship [feature] [--blocking] [--skip-review]
|
|
13
13
|
```
|
|
14
14
|
- `feature`: Name of the feature being shipped (required)
|
|
15
|
-
- `--blocking`: Abort if
|
|
15
|
+
- `--blocking`: Abort if any check fails
|
|
16
|
+
- `--skip-review`: Skip code review (for trivial changes)
|
|
16
17
|
|
|
17
18
|
## Flow
|
|
18
19
|
|
|
19
|
-
### Step 1: Validate
|
|
20
|
+
### Step 1: Validate Project
|
|
20
21
|
READ: `.prjct/prjct.config.json` → extract `projectId`
|
|
21
22
|
IF missing: "No prjct project. Run /p:init first." → STOP
|
|
22
23
|
|
|
23
24
|
SET: `{globalPath}` = `~/.prjct-cli/projects/{projectId}`
|
|
24
25
|
|
|
26
|
+
### Step 2: Pre-flight Checks
|
|
27
|
+
|
|
28
|
+
#### 2.1 Check for changes
|
|
25
29
|
BASH: `git status --porcelain`
|
|
26
30
|
IF empty: "No changes to ship." → STOP
|
|
27
31
|
|
|
28
|
-
|
|
32
|
+
#### 2.2 Analyze change scope
|
|
33
|
+
BASH: `git diff --stat HEAD`
|
|
34
|
+
EXTRACT: {filesChanged}, {insertions}, {deletions}
|
|
35
|
+
SET: {totalLines} = {insertions} + {deletions}
|
|
36
|
+
|
|
37
|
+
#### 2.3 Detect trivial changes
|
|
38
|
+
IF {totalLines} < 10 AND {filesChanged} <= 2:
|
|
39
|
+
SET: {changeType} = "trivial"
|
|
40
|
+
OUTPUT: "Trivial changes detected ({totalLines} lines in {filesChanged} files)"
|
|
41
|
+
ELSE IF {totalLines} < 50:
|
|
42
|
+
SET: {changeType} = "small"
|
|
43
|
+
ELSE IF {totalLines} < 200:
|
|
44
|
+
SET: {changeType} = "medium"
|
|
45
|
+
ELSE:
|
|
46
|
+
SET: {changeType} = "large"
|
|
47
|
+
OUTPUT: "Large change detected ({totalLines} lines). Full review recommended."
|
|
48
|
+
|
|
49
|
+
### Step 3: Quality Checks
|
|
29
50
|
BASH: `npm run lint 2>&1 || echo "LINT_SKIP"`
|
|
30
51
|
BASH: `npm test 2>&1 || echo "TEST_SKIP"`
|
|
31
52
|
|
|
@@ -33,7 +54,61 @@ IF `--blocking` AND (lint failed OR tests failed):
|
|
|
33
54
|
OUTPUT: "Quality checks failed. Ship blocked."
|
|
34
55
|
STOP
|
|
35
56
|
|
|
36
|
-
### Step
|
|
57
|
+
### Step 4: Code Review with Confidence Scoring
|
|
58
|
+
|
|
59
|
+
IF `--skip-review` OR {changeType} == "trivial":
|
|
60
|
+
OUTPUT: "Skipping code review."
|
|
61
|
+
→ Go to Step 5
|
|
62
|
+
ELSE:
|
|
63
|
+
OUTPUT: "Running code review..."
|
|
64
|
+
|
|
65
|
+
#### 4.1 Get changed files
|
|
66
|
+
BASH: `git diff --name-only HEAD`
|
|
67
|
+
SET: {changedFiles} = result
|
|
68
|
+
|
|
69
|
+
#### 4.2 Review each file for issues
|
|
70
|
+
FOR each file in {changedFiles}:
|
|
71
|
+
BASH: `git diff HEAD -- {file}`
|
|
72
|
+
|
|
73
|
+
Analyze diff for:
|
|
74
|
+
- Missing error handling
|
|
75
|
+
- Security issues (hardcoded secrets, SQL injection, XSS)
|
|
76
|
+
- Logic errors
|
|
77
|
+
- Missing null checks
|
|
78
|
+
- Resource leaks
|
|
79
|
+
|
|
80
|
+
FOR each issue found:
|
|
81
|
+
ASSIGN confidence score (0-100):
|
|
82
|
+
- 90-100: Definite bug/security issue
|
|
83
|
+
- 70-89: Likely problem, should fix
|
|
84
|
+
- 50-69: Maybe a problem
|
|
85
|
+
- 0-49: Nitpick/style preference
|
|
86
|
+
|
|
87
|
+
#### 4.3 Filter and report
|
|
88
|
+
SET: {issues} = issues with confidence >= 70
|
|
89
|
+
|
|
90
|
+
IF {issues}.length > 0:
|
|
91
|
+
OUTPUT: """
|
|
92
|
+
## Code Review Results
|
|
93
|
+
|
|
94
|
+
Found {issues.length} issues (confidence >= 70%):
|
|
95
|
+
|
|
96
|
+
{FOR each issue:}
|
|
97
|
+
- [{confidence}%] {description}
|
|
98
|
+
File: {file}:{line}
|
|
99
|
+
{END FOR}
|
|
100
|
+
"""
|
|
101
|
+
|
|
102
|
+
IF `--blocking`:
|
|
103
|
+
OUTPUT: "Fix issues before shipping."
|
|
104
|
+
STOP
|
|
105
|
+
ELSE:
|
|
106
|
+
PROMPT: "Continue with ship? (y/n)"
|
|
107
|
+
IF no: STOP
|
|
108
|
+
ELSE:
|
|
109
|
+
OUTPUT: "Code review passed. No high-confidence issues found."
|
|
110
|
+
|
|
111
|
+
### Step 5: Version Bump
|
|
37
112
|
READ: `package.json` (or Cargo.toml, pyproject.toml)
|
|
38
113
|
EXTRACT: current version
|
|
39
114
|
|
|
@@ -45,11 +120,11 @@ Determine bump type:
|
|
|
45
120
|
|
|
46
121
|
UPDATE version file with new version
|
|
47
122
|
|
|
48
|
-
### Step
|
|
123
|
+
### Step 6: Update CHANGELOG
|
|
49
124
|
BASH: `git log --oneline -20 --pretty=format:"- %s"`
|
|
50
125
|
INSERT new entry in CHANGELOG.md
|
|
51
126
|
|
|
52
|
-
### Step
|
|
127
|
+
### Step 7: Git Commit & Push
|
|
53
128
|
BASH: `git add .`
|
|
54
129
|
BASH: `git commit -m "feat: Ship {feature} v{newVersion}
|
|
55
130
|
|
|
@@ -58,7 +133,7 @@ Designed for [Claude](https://www.anthropic.com/claude)
|
|
|
58
133
|
"`
|
|
59
134
|
BASH: `git push`
|
|
60
135
|
|
|
61
|
-
### Step
|
|
136
|
+
### Step 8: Update Storage
|
|
62
137
|
GET timestamp: `bun -e "console.log(new Date().toISOString())" 2>/dev/null || node -e "console.log(new Date().toISOString())"`
|
|
63
138
|
GET uuid: `bun -e "console.log(crypto.randomUUID())" 2>/dev/null || node -e "console.log(require('crypto').randomUUID())"`
|
|
64
139
|
|
|
@@ -85,12 +160,14 @@ APPEND to `{globalPath}/sync/pending.json`
|
|
|
85
160
|
Log to memory:
|
|
86
161
|
APPEND to `{globalPath}/memory/events.jsonl`
|
|
87
162
|
|
|
88
|
-
### Step
|
|
163
|
+
### Step 9: Output
|
|
89
164
|
```
|
|
90
165
|
🚀 Shipped: {feature}
|
|
91
166
|
|
|
92
167
|
Version: {oldVersion} → {newVersion}
|
|
93
|
-
|
|
168
|
+
Changes: {changeType} ({totalLines} lines in {filesChanged} files)
|
|
169
|
+
Quality: Lint {lintStatus} | Tests {testStatus}
|
|
170
|
+
Review: {reviewStatus}
|
|
94
171
|
|
|
95
172
|
Next: /p:feature | /p:recap
|
|
96
173
|
```
|
|
@@ -103,8 +180,58 @@ Next: /p:feature | /p:recap
|
|
|
103
180
|
| No changes | "No changes to ship" | STOP |
|
|
104
181
|
| Lint/test fails (blocking) | "Quality checks failed" | STOP |
|
|
105
182
|
| Lint/test fails (non-blocking) | Show warning | CONTINUE |
|
|
183
|
+
| Code review finds issues (blocking) | "Fix issues before shipping" | STOP |
|
|
184
|
+
| Code review finds issues (non-blocking) | Prompt to continue | ASK |
|
|
106
185
|
| Push fails | "Push failed. Try: git pull --rebase" | CONTINUE |
|
|
107
186
|
|
|
187
|
+
## Examples
|
|
188
|
+
|
|
189
|
+
### Example 1: Trivial Change (auto-skip review)
|
|
190
|
+
```
|
|
191
|
+
/p:ship "fix typo"
|
|
192
|
+
|
|
193
|
+
Pre-flight: Trivial changes (3 lines in 1 file)
|
|
194
|
+
Quality: Lint ✅ | Tests ✅
|
|
195
|
+
Review: Skipped (trivial)
|
|
196
|
+
|
|
197
|
+
🚀 Shipped: fix typo
|
|
198
|
+
Version: 1.2.0 → 1.2.1
|
|
199
|
+
```
|
|
200
|
+
|
|
201
|
+
### Example 2: Medium Change (with review)
|
|
202
|
+
```
|
|
203
|
+
/p:ship "add auth"
|
|
204
|
+
|
|
205
|
+
Pre-flight: Medium changes (87 lines in 5 files)
|
|
206
|
+
Quality: Lint ✅ | Tests ✅
|
|
207
|
+
Review: Running...
|
|
208
|
+
|
|
209
|
+
## Code Review Results
|
|
210
|
+
Found 2 issues (confidence >= 70%):
|
|
211
|
+
- [85%] Missing error handling in OAuth callback
|
|
212
|
+
File: src/auth/oauth.ts:67
|
|
213
|
+
- [72%] Token not validated before use
|
|
214
|
+
File: src/auth/validate.ts:23
|
|
215
|
+
|
|
216
|
+
Continue with ship? (y/n)
|
|
217
|
+
```
|
|
218
|
+
|
|
219
|
+
### Example 3: Blocking Mode
|
|
220
|
+
```
|
|
221
|
+
/p:ship "deploy script" --blocking
|
|
222
|
+
|
|
223
|
+
Pre-flight: Small changes (28 lines in 2 files)
|
|
224
|
+
Quality: Lint ✅ | Tests ✅
|
|
225
|
+
Review: Running...
|
|
226
|
+
|
|
227
|
+
## Code Review Results
|
|
228
|
+
Found 1 issue (confidence >= 70%):
|
|
229
|
+
- [95%] Hardcoded credentials detected
|
|
230
|
+
File: scripts/deploy.sh:12
|
|
231
|
+
|
|
232
|
+
Fix issues before shipping.
|
|
233
|
+
```
|
|
234
|
+
|
|
108
235
|
## References
|
|
109
236
|
- Architecture details: `~/.prjct-cli/docs/architecture.md`
|
|
110
237
|
- Validation patterns: `~/.prjct-cli/docs/validation.md`
|
|
@@ -167,6 +167,42 @@ GLOB for config files and analyze:
|
|
|
167
167
|
|
|
168
168
|
EXTRACT: `{languages}`, `{frameworks}`, `{techStack}`
|
|
169
169
|
|
|
170
|
+
### Detect Frontend/UI Stack (for UX/UI Agent)
|
|
171
|
+
|
|
172
|
+
**CRITICAL**: If ANY frontend technology is detected, generate the UX/UI agent.
|
|
173
|
+
|
|
174
|
+
#### Web Frontend Detection
|
|
175
|
+
```bash
|
|
176
|
+
# Check package.json for web frameworks
|
|
177
|
+
grep -E '"(react|react-dom|next|vue|nuxt|svelte|@sveltejs/kit|@angular/core)"' package.json 2>/dev/null
|
|
178
|
+
```
|
|
179
|
+
|
|
180
|
+
SET: `{hasWebFrontend}` = true if any match
|
|
181
|
+
|
|
182
|
+
#### Mobile Frontend Detection
|
|
183
|
+
```bash
|
|
184
|
+
# React Native / Expo
|
|
185
|
+
grep -E '"(react-native|expo)"' package.json 2>/dev/null
|
|
186
|
+
|
|
187
|
+
# Flutter
|
|
188
|
+
test -f pubspec.yaml && echo "flutter"
|
|
189
|
+
|
|
190
|
+
# SwiftUI (iOS)
|
|
191
|
+
find . -name "*.swift" -exec grep -l "import SwiftUI" {} \; 2>/dev/null | head -1
|
|
192
|
+
|
|
193
|
+
# Jetpack Compose (Android)
|
|
194
|
+
find . -name "*.kt" -exec grep -l "androidx.compose" {} \; 2>/dev/null | head -1
|
|
195
|
+
```
|
|
196
|
+
|
|
197
|
+
SET: `{hasMobileFrontend}` = true if any match
|
|
198
|
+
|
|
199
|
+
#### Combined Frontend Flag
|
|
200
|
+
```
|
|
201
|
+
{hasFrontendUI} = {hasWebFrontend} OR {hasMobileFrontend}
|
|
202
|
+
```
|
|
203
|
+
|
|
204
|
+
EXTRACT: `{frontendType}` = "web" | "mobile" | "both" | null
|
|
205
|
+
|
|
170
206
|
---
|
|
171
207
|
|
|
172
208
|
## Step 4: Regenerate ALL Context Files
|
|
@@ -421,13 +457,32 @@ Analyze `{techStack}` from Step 3 and generate ONLY relevant domain agents:
|
|
|
421
457
|
| PostgreSQL, MySQL, MongoDB, Prisma | `database.md` | `templates/subagents/domain/database.md` |
|
|
422
458
|
| Docker, Kubernetes, GitHub Actions | `devops.md` | `templates/subagents/domain/devops.md` |
|
|
423
459
|
| Jest, Pytest, Vitest, testing | `testing.md` | `templates/subagents/domain/testing.md` |
|
|
460
|
+
| **{hasFrontendUI} = true** | `uxui.md` | `templates/agentic/agents/uxui.md` |
|
|
424
461
|
|
|
425
462
|
For EACH detected stack:
|
|
426
463
|
1. READ template from `templates/subagents/domain/{name}.md`
|
|
427
464
|
2. ADAPT description with detected frameworks (e.g., "React specialist" not just "frontend")
|
|
428
465
|
3. WRITE to `{globalPath}/agents/{name}.md`
|
|
429
466
|
|
|
430
|
-
### 7.5
|
|
467
|
+
### 7.5 Generate UX/UI Agent (CRITICAL for Frontend Projects)
|
|
468
|
+
|
|
469
|
+
**Priority: UX > UI** - User experience is more important than visuals.
|
|
470
|
+
|
|
471
|
+
IF `{hasFrontendUI}` == true:
|
|
472
|
+
|
|
473
|
+
1. READ template: `templates/agentic/agents/uxui.md`
|
|
474
|
+
2. WRITE to: `{globalPath}/agents/uxui.md`
|
|
475
|
+
3. ADD to `{domainAgents}`: "uxui"
|
|
476
|
+
|
|
477
|
+
OUTPUT: "🎨 Generated UX/UI agent for {frontendType} ({frameworks detected})"
|
|
478
|
+
|
|
479
|
+
The UX/UI agent ensures:
|
|
480
|
+
- **UX First**: Clarity, feedback, reduced friction, error handling, accessibility
|
|
481
|
+
- **Modern UI**: Distinctive typography, bold colors, purposeful animation
|
|
482
|
+
- **Anti-patterns avoided**: No "AI slop" (Inter font, purple gradients, generic layouts)
|
|
483
|
+
- **Checklists**: UX and UI quality gates before shipping
|
|
484
|
+
|
|
485
|
+
### 7.6 Report Generated Agents
|
|
431
486
|
|
|
432
487
|
Track which agents were generated for output:
|
|
433
488
|
- `{workflowAgents}`: Always 3 (prjct-workflow, prjct-planner, prjct-shipper)
|
|
@@ -516,7 +571,10 @@ IF cloudSync AND no syncError:
|
|
|
516
571
|
|
|
517
572
|
🤖 Claude Code Sub-Agents ({workflowAgents.length + domainAgents.length})
|
|
518
573
|
├── Workflow: prjct-workflow, prjct-planner, prjct-shipper
|
|
519
|
-
|
|
574
|
+
├── Domain: {domainAgents.join(', ') || 'none'}
|
|
575
|
+
{IF hasFrontendUI}
|
|
576
|
+
└── 🎨 UX/UI: uxui.md (Priority: UX > UI)
|
|
577
|
+
{ENDIF}
|
|
520
578
|
|
|
521
579
|
{IF cloudSync}
|
|
522
580
|
☁️ Cloud Sync
|
|
@@ -581,5 +639,6 @@ Next: /p:now to start a new task
|
|
|
581
639
|
├── backend.md # (if Node/Go/Python API detected)
|
|
582
640
|
├── database.md # (if DB detected)
|
|
583
641
|
├── devops.md # (if Docker/K8s detected)
|
|
584
|
-
|
|
642
|
+
├── testing.md # (if test framework detected)
|
|
643
|
+
└── uxui.md # (if ANY frontend UI detected - web or mobile)
|
|
585
644
|
```
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
---
|
|
2
2
|
allowed-tools: [Bash, Read, Write, Edit]
|
|
3
|
-
description: 'Run tests with auto-fix
|
|
3
|
+
description: 'Run tests with auto-fix'
|
|
4
4
|
timestamp-rule: 'GetTimestamp() for ALL timestamps'
|
|
5
5
|
---
|
|
6
6
|
|
|
@@ -9,10 +9,8 @@ timestamp-rule: 'GetTimestamp() for ALL timestamps'
|
|
|
9
9
|
## Usage
|
|
10
10
|
|
|
11
11
|
```
|
|
12
|
-
/p:test [all|unit|e2e|failed|fix
|
|
12
|
+
/p:test [all|unit|e2e|failed|fix]
|
|
13
13
|
[--blocking] # Exit with error if tests fail
|
|
14
|
-
[--testsprite] # Force TestSprite even if native runner exists
|
|
15
|
-
[--scope=codebase|diff] # TestSprite: test all or only changed files
|
|
16
14
|
```
|
|
17
15
|
|
|
18
16
|
## Context Variables
|
|
@@ -35,90 +33,12 @@ IF file not found:
|
|
|
35
33
|
SET defaults:
|
|
36
34
|
- {testMode} = "all"
|
|
37
35
|
- {blocking} = false
|
|
38
|
-
- {useTestSprite} = false
|
|
39
|
-
- {testScope} = "diff"
|
|
40
36
|
|
|
41
37
|
PARSE args:
|
|
42
|
-
- IF arg is "ai": {useTestSprite} = true
|
|
43
38
|
- IF arg is "all|unit|e2e|failed|fix": {testMode} = arg
|
|
44
39
|
- IF arg contains "--blocking": {blocking} = true
|
|
45
|
-
- IF arg contains "--testsprite": {useTestSprite} = true
|
|
46
|
-
- IF arg contains "--scope=codebase": {testScope} = "codebase"
|
|
47
40
|
|
|
48
|
-
## Step 3:
|
|
49
|
-
|
|
50
|
-
IF {useTestSprite}:
|
|
51
|
-
READ: `{configPath}`
|
|
52
|
-
|
|
53
|
-
### Check if user dismissed TestSprite
|
|
54
|
-
IF config.testspriteSkip == "never":
|
|
55
|
-
OUTPUT: "ℹ️ TestSprite disabled. Using native tests."
|
|
56
|
-
{useTestSprite} = false
|
|
57
|
-
→ Go to Step 4
|
|
58
|
-
|
|
59
|
-
IF config.testspriteSkip == "later" AND config.testspriteSkipUntil > now:
|
|
60
|
-
OUTPUT: "ℹ️ TestSprite reminder snoozed. Using native tests."
|
|
61
|
-
{useTestSprite} = false
|
|
62
|
-
→ Go to Step 4
|
|
63
|
-
|
|
64
|
-
### Check for existing API key
|
|
65
|
-
IF config.testspriteApiKey exists AND is not empty:
|
|
66
|
-
SET: {apiKey} = config.testspriteApiKey
|
|
67
|
-
→ Continue to Step 4
|
|
68
|
-
|
|
69
|
-
### No API key - show options (NON-BLOCKING)
|
|
70
|
-
OUTPUT: "🤖 TestSprite AI Testing (Optional)"
|
|
71
|
-
OUTPUT: ""
|
|
72
|
-
OUTPUT: "TestSprite can generate and run AI-powered tests."
|
|
73
|
-
OUTPUT: "Free API key at: https://testsprite.com/dashboard/api-keys"
|
|
74
|
-
OUTPUT: ""
|
|
75
|
-
|
|
76
|
-
ASK with options:
|
|
77
|
-
1. "Enter API key" → Prompt for key
|
|
78
|
-
2. "Skip for now" → Use native tests this time
|
|
79
|
-
3. "Remind me in a week" → Snooze reminder
|
|
80
|
-
4. "Never ask again" → Disable permanently
|
|
81
|
-
|
|
82
|
-
HANDLE response:
|
|
83
|
-
|
|
84
|
-
IF option 1 (Enter API key):
|
|
85
|
-
ASK: "Paste your TestSprite API key:"
|
|
86
|
-
SET: {apiKey} = user input
|
|
87
|
-
|
|
88
|
-
UPDATE config.json:
|
|
89
|
-
- testspriteApiKey: {apiKey}
|
|
90
|
-
|
|
91
|
-
OUTPUT: "✅ API key saved"
|
|
92
|
-
→ Continue to Step 4
|
|
93
|
-
|
|
94
|
-
IF option 2 (Skip for now):
|
|
95
|
-
OUTPUT: "⏭️ Skipping TestSprite. Using native tests."
|
|
96
|
-
{useTestSprite} = false
|
|
97
|
-
→ Go to Step 4
|
|
98
|
-
|
|
99
|
-
IF option 3 (Remind me in a week):
|
|
100
|
-
SET: {skipUntil} = now + 7 days
|
|
101
|
-
|
|
102
|
-
UPDATE config.json:
|
|
103
|
-
- testspriteSkip: "later"
|
|
104
|
-
- testspriteSkipUntil: {skipUntil}
|
|
105
|
-
|
|
106
|
-
OUTPUT: "⏰ Will ask again in a week. Using native tests."
|
|
107
|
-
{useTestSprite} = false
|
|
108
|
-
→ Go to Step 4
|
|
109
|
-
|
|
110
|
-
IF option 4 (Never ask again):
|
|
111
|
-
UPDATE config.json:
|
|
112
|
-
- testspriteSkip: "never"
|
|
113
|
-
|
|
114
|
-
OUTPUT: "🔕 Won't ask again. Use --testsprite flag to enable manually."
|
|
115
|
-
{useTestSprite} = false
|
|
116
|
-
→ Go to Step 4
|
|
117
|
-
|
|
118
|
-
## Step 4: Detect Testing Strategy
|
|
119
|
-
|
|
120
|
-
IF {useTestSprite}:
|
|
121
|
-
→ Go to Step 6 (TestSprite AI Testing)
|
|
41
|
+
## Step 3: Detect Testing Strategy
|
|
122
42
|
|
|
123
43
|
### Check for Native Test Runner
|
|
124
44
|
|
|
@@ -126,27 +46,28 @@ READ: `package.json`
|
|
|
126
46
|
IF has "scripts.test":
|
|
127
47
|
{runner} = "npm"
|
|
128
48
|
{runnerCmd} = "npm test"
|
|
129
|
-
→ Go to Step
|
|
49
|
+
→ Go to Step 4 (Native Testing)
|
|
130
50
|
|
|
131
51
|
IF file exists: `pytest.ini` OR `pyproject.toml` with pytest:
|
|
132
52
|
{runner} = "pytest"
|
|
133
53
|
{runnerCmd} = "pytest"
|
|
134
|
-
→ Go to Step
|
|
54
|
+
→ Go to Step 4
|
|
135
55
|
|
|
136
56
|
IF file exists: `Cargo.toml`:
|
|
137
57
|
{runner} = "cargo"
|
|
138
58
|
{runnerCmd} = "cargo test"
|
|
139
|
-
→ Go to Step
|
|
59
|
+
→ Go to Step 4
|
|
140
60
|
|
|
141
61
|
IF no runner found:
|
|
142
62
|
OUTPUT: "No test runner detected."
|
|
143
63
|
OUTPUT: ""
|
|
144
64
|
OUTPUT: "Options:"
|
|
145
|
-
OUTPUT: "• /p:test ai - Generate tests with AI (TestSprite)"
|
|
146
65
|
OUTPUT: "• Add 'test' script to package.json"
|
|
66
|
+
OUTPUT: "• Add pytest.ini for Python projects"
|
|
67
|
+
OUTPUT: "• Add Cargo.toml for Rust projects"
|
|
147
68
|
STOP
|
|
148
69
|
|
|
149
|
-
## Step
|
|
70
|
+
## Step 4: Native Test Runner
|
|
150
71
|
|
|
151
72
|
OUTPUT: "🧪 Running tests with {runner}..."
|
|
152
73
|
|
|
@@ -177,62 +98,9 @@ IF {testMode} == "fix" AND {testStatus} == "failed":
|
|
|
177
98
|
BASH: `{runnerCmd} 2>&1`
|
|
178
99
|
CAPTURE and re-parse results
|
|
179
100
|
|
|
180
|
-
→ Go to Step
|
|
181
|
-
|
|
182
|
-
## Step 6: TestSprite AI Testing
|
|
183
|
-
|
|
184
|
-
OUTPUT: "🤖 Running AI-powered tests with TestSprite..."
|
|
185
|
-
|
|
186
|
-
### 6.1 Bootstrap Tests
|
|
187
|
-
|
|
188
|
-
CALL MCP TOOL: `testsprite_bootstrap_tests`
|
|
189
|
-
PARAMETERS:
|
|
190
|
-
- projectPath: current working directory (absolute path)
|
|
191
|
-
- type: auto-detect based on project (frontend/backend)
|
|
192
|
-
- testScope: {testScope}
|
|
193
|
-
|
|
194
|
-
CAPTURE: bootstrap result
|
|
195
|
-
|
|
196
|
-
### 6.2 Generate and Execute Tests
|
|
197
|
-
|
|
198
|
-
CALL MCP TOOL: `testsprite_generate_code_and_execute`
|
|
199
|
-
PARAMETERS:
|
|
200
|
-
- projectName: from package.json "name" or directory name
|
|
201
|
-
- projectPath: current working directory (absolute path)
|
|
202
|
-
- testIds: [] (run all)
|
|
203
|
-
|
|
204
|
-
CAPTURE: execution result
|
|
101
|
+
→ Go to Step 5 (Results)
|
|
205
102
|
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
READ: `testsprite_tests/test_results.json`
|
|
209
|
-
|
|
210
|
-
EXTRACT:
|
|
211
|
-
- {passed}: count of passed tests
|
|
212
|
-
- {failed}: count of failed tests
|
|
213
|
-
- {coverage}: coverage percentage if available
|
|
214
|
-
|
|
215
|
-
IF {failed} == 0:
|
|
216
|
-
{testStatus} = "passed"
|
|
217
|
-
ELSE:
|
|
218
|
-
{testStatus} = "failed"
|
|
219
|
-
|
|
220
|
-
### Handle Fix Mode with TestSprite
|
|
221
|
-
IF {testMode} == "fix" AND {testStatus} == "failed":
|
|
222
|
-
OUTPUT: "🔧 Running auto-healing..."
|
|
223
|
-
|
|
224
|
-
CALL MCP TOOL: `testsprite_rerun_tests`
|
|
225
|
-
PARAMETERS:
|
|
226
|
-
- projectPath: current working directory
|
|
227
|
-
|
|
228
|
-
READ: `testsprite_tests/test_results.json` (updated)
|
|
229
|
-
Re-parse results
|
|
230
|
-
|
|
231
|
-
→ Go to Step 8 (Results)
|
|
232
|
-
|
|
233
|
-
## Step 7: (Reserved for future expansion)
|
|
234
|
-
|
|
235
|
-
## Step 8: Results & Response
|
|
103
|
+
## Step 5: Results & Response
|
|
236
104
|
|
|
237
105
|
### Log to Memory
|
|
238
106
|
|
|
@@ -240,7 +108,7 @@ SET: {now} = GetTimestamp()
|
|
|
240
108
|
|
|
241
109
|
APPEND to `{memoryPath}`:
|
|
242
110
|
```json
|
|
243
|
-
{"ts":"{now}","type":"test_run","tool":"{runner
|
|
111
|
+
{"ts":"{now}","type":"test_run","tool":"{runner}","passed":{passed},"failed":{failed},"mode":"{testMode}"}
|
|
244
112
|
```
|
|
245
113
|
|
|
246
114
|
### Check Blocking Mode
|
|
@@ -268,10 +136,7 @@ IF {testStatus} == "passed":
|
|
|
268
136
|
IF {testStatus} == "failed":
|
|
269
137
|
OUTPUT: "❌ {failed} tests failing"
|
|
270
138
|
OUTPUT: ""
|
|
271
|
-
|
|
272
|
-
OUTPUT: "📋 Report: testsprite_tests/TestSprite_MCP_Test_Report.html"
|
|
273
|
-
ELSE:
|
|
274
|
-
OUTPUT: "{testOutput}" # Show relevant failure output
|
|
139
|
+
OUTPUT: "{testOutput}" # Show relevant failure output
|
|
275
140
|
OUTPUT: ""
|
|
276
141
|
OUTPUT: "💡 Auto-fix: /p:test fix"
|
|
277
142
|
OUTPUT: ""
|
|
@@ -282,11 +147,8 @@ IF {testStatus} == "failed":
|
|
|
282
147
|
| Error | Response | Action |
|
|
283
148
|
|-------|----------|--------|
|
|
284
149
|
| No project | "No prjct project" | STOP |
|
|
285
|
-
| No test runner | "No test runner detected" |
|
|
286
|
-
| TestSprite API key missing | Show options | Fallback to native (non-blocking) |
|
|
287
|
-
| TestSprite dismissed | Skip silently | Use native tests |
|
|
150
|
+
| No test runner | "No test runner detected" | Show setup options |
|
|
288
151
|
| Tests timeout | "Tests timed out" | Suggest increasing timeout |
|
|
289
|
-
| MCP tool not available | "TestSprite not configured" | Fallback to native tests |
|
|
290
152
|
|
|
291
153
|
## Examples
|
|
292
154
|
|
|
@@ -303,13 +165,13 @@ IF {testStatus} == "failed":
|
|
|
303
165
|
🎯 Next: /p:ship
|
|
304
166
|
```
|
|
305
167
|
|
|
306
|
-
### Example 2:
|
|
168
|
+
### Example 2: Tests with Failures
|
|
307
169
|
```
|
|
308
|
-
|
|
170
|
+
🧪 Running tests with npm...
|
|
309
171
|
|
|
310
172
|
❌ 3 tests failing
|
|
311
173
|
|
|
312
|
-
|
|
174
|
+
[test output here]
|
|
313
175
|
|
|
314
176
|
💡 Auto-fix: /p:test fix
|
|
315
177
|
|
|
@@ -324,32 +186,3 @@ IF {testStatus} == "failed":
|
|
|
324
186
|
|
|
325
187
|
Fix failing tests or run without --blocking flag.
|
|
326
188
|
```
|
|
327
|
-
|
|
328
|
-
### Example 4: First Time TestSprite (No API Key)
|
|
329
|
-
```
|
|
330
|
-
🤖 TestSprite AI Testing (Optional)
|
|
331
|
-
|
|
332
|
-
TestSprite can generate and run AI-powered tests.
|
|
333
|
-
Free API key at: https://testsprite.com/dashboard/api-keys
|
|
334
|
-
|
|
335
|
-
Options:
|
|
336
|
-
1. Enter API key
|
|
337
|
-
2. Skip for now
|
|
338
|
-
3. Remind me in a week
|
|
339
|
-
4. Never ask again
|
|
340
|
-
|
|
341
|
-
> 2
|
|
342
|
-
|
|
343
|
-
⏭️ Skipping TestSprite. Using native tests.
|
|
344
|
-
|
|
345
|
-
🧪 Running tests with npm...
|
|
346
|
-
...
|
|
347
|
-
```
|
|
348
|
-
|
|
349
|
-
### Example 5: TestSprite Snoozed
|
|
350
|
-
```
|
|
351
|
-
ℹ️ TestSprite reminder snoozed. Using native tests.
|
|
352
|
-
|
|
353
|
-
🧪 Running tests with npm...
|
|
354
|
-
...
|
|
355
|
-
```
|
|
@@ -4,14 +4,6 @@
|
|
|
4
4
|
"command": "npx",
|
|
5
5
|
"args": ["-y", "@upstash/context7-mcp@latest"],
|
|
6
6
|
"description": "Library documentation lookup - use for framework/library docs"
|
|
7
|
-
},
|
|
8
|
-
"testsprite": {
|
|
9
|
-
"command": "npx",
|
|
10
|
-
"args": ["-y", "@testsprite/testsprite-mcp@latest"],
|
|
11
|
-
"env": {
|
|
12
|
-
"API_KEY": "${TESTSPRITE_API_KEY}"
|
|
13
|
-
},
|
|
14
|
-
"description": "AI-powered test generation and execution"
|
|
15
7
|
}
|
|
16
8
|
},
|
|
17
9
|
"usage": {
|
|
@@ -30,34 +22,6 @@
|
|
|
30
22
|
"resolve-library-id('nextjs') → get-library-docs('/vercel/next.js', 'app router')",
|
|
31
23
|
"resolve-library-id('tailwindcss') → get-library-docs('/tailwindlabs/tailwindcss', 'configuration')"
|
|
32
24
|
]
|
|
33
|
-
},
|
|
34
|
-
"testsprite": {
|
|
35
|
-
"when": [
|
|
36
|
-
"User runs /p:test ai or /p:test --testsprite",
|
|
37
|
-
"Project needs AI-generated tests for frontend or backend",
|
|
38
|
-
"Auto-fixing flaky or broken tests",
|
|
39
|
-
"Generating comprehensive test coverage"
|
|
40
|
-
],
|
|
41
|
-
"tools": [
|
|
42
|
-
"testsprite_bootstrap_tests: Initialize testing environment",
|
|
43
|
-
"testsprite_generate_code_summary: Analyze project architecture",
|
|
44
|
-
"testsprite_generate_standardized_prd: Generate structured requirements",
|
|
45
|
-
"testsprite_generate_frontend_test_plan: Create frontend test plan",
|
|
46
|
-
"testsprite_generate_backend_test_plan: Create backend test plan",
|
|
47
|
-
"testsprite_generate_code_and_execute: Generate and run tests",
|
|
48
|
-
"testsprite_rerun_tests: Re-run tests with auto-healing"
|
|
49
|
-
],
|
|
50
|
-
"examples": [
|
|
51
|
-
"testsprite_bootstrap_tests(projectPath, 'frontend', 'diff')",
|
|
52
|
-
"testsprite_generate_code_and_execute(projectName, projectPath)"
|
|
53
|
-
],
|
|
54
|
-
"apiKeySetup": "Get free API key at https://testsprite.com/dashboard/api-keys",
|
|
55
|
-
"configFields": {
|
|
56
|
-
"testspriteApiKey": "API key (optional)",
|
|
57
|
-
"testspriteSkip": "'never' | 'later' | null",
|
|
58
|
-
"testspriteSkipUntil": "ISO timestamp for snooze"
|
|
59
|
-
},
|
|
60
|
-
"nonBlocking": true
|
|
61
25
|
}
|
|
62
26
|
}
|
|
63
27
|
}
|