create-dss-project 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/create-dss-project.js +4 -0
- package/lib/index.js +80 -0
- package/lib/project-types.js +74 -0
- package/lib/prompts.js +42 -0
- package/lib/scaffold.js +169 -0
- package/package.json +30 -0
- package/template/.github/workflows/dashboard-build.yml +27 -0
- package/template/.github/workflows/template-lint.yml +71 -0
- package/template/CHANGELOG.md +43 -0
- package/template/CLAUDE.md +145 -0
- package/template/LICENSE +21 -0
- package/template/README.md +201 -0
- package/template/STATUS.md +34 -0
- package/template/context/competitor-snapshot.md +27 -0
- package/template/context/market-snapshot.md +32 -0
- package/template/context/pipeline-state.md +36 -0
- package/template/context/project-state.md +45 -0
- package/template/dashboard/CLAUDE.md +36 -0
- package/template/dashboard/DEPLOY.md +60 -0
- package/template/dashboard/build-data.js +395 -0
- package/template/dashboard/competitors.html +143 -0
- package/template/dashboard/css/styles.css +143 -0
- package/template/dashboard/data/.gitkeep +0 -0
- package/template/dashboard/decisions.html +132 -0
- package/template/dashboard/index.html +152 -0
- package/template/dashboard/js/app.js +59 -0
- package/template/dashboard/js/overview.js +50 -0
- package/template/dashboard/js/sidebar.js +62 -0
- package/template/dashboard/js/tailwind-config.js +52 -0
- package/template/dashboard/package-lock.json +351 -0
- package/template/dashboard/package.json +17 -0
- package/template/dashboard/pipeline.html +149 -0
- package/template/dashboard/research.html +215 -0
- package/template/dashboard/robots.txt +2 -0
- package/template/dashboard/scoring.html +187 -0
- package/template/dashboard/timeline.html +165 -0
- package/template/dashboard/vercel.json +5 -0
- package/template/dashboard/watch.js +57 -0
- package/template/data/.gitkeep +0 -0
- package/template/discovery/calls/.gitkeep +0 -0
- package/template/discovery/outreach/.gitkeep +0 -0
- package/template/discovery/prep/.gitkeep +0 -0
- package/template/docs/decks/.gitkeep +0 -0
- package/template/docs/executive-summary.md +104 -0
- package/template/docs/getting-started.md +274 -0
- package/template/docs/memos/evidence-grading.md +27 -0
- package/template/docs/memos/housekeeping-reference.md +101 -0
- package/template/docs/memos/reference-context.md +30 -0
- package/template/docs/output/project-activity.md +8 -0
- package/template/docs/output/status-blurb.md +4 -0
- package/template/docs/output/work-log.md +8 -0
- package/template/docs/skill-authoring-guide.md +212 -0
- package/template/memory/MEMORY.md +84 -0
- package/template/memory/decisions.md +13 -0
- package/template/memory/discovery.md +48 -0
- package/template/memory/research.md +33 -0
- package/template/memory/scoring.md +34 -0
- package/template/project.config.example.json +31 -0
- package/template/research/competitors/.gitkeep +0 -0
- package/template/research/market/.gitkeep +0 -0
- package/template/research/technical/.gitkeep +0 -0
- package/template/scripts/.gitkeep +0 -0
- package/template/scripts/build-cli-template.sh +32 -0
- package/template/scripts/health-check.sh +152 -0
- package/template/scripts/reset-to-template.sh +115 -0
- package/template/scripts/validate-placeholders.sh +47 -0
- package/template/skills/compare-options/SKILL.md +97 -0
- package/template/skills/critical-reasoning/SKILL.md +107 -0
- package/template/skills/decision/SKILL.md +75 -0
- package/template/skills/enrich-entity/SKILL.md +107 -0
- package/template/skills/health-check/SKILL.md +144 -0
- package/template/skills/onboard/SKILL.md +434 -0
- package/template/skills/outreach-sequence/SKILL.md +79 -0
- package/template/skills/pipeline-update/SKILL.md +90 -0
- package/template/skills/process-call/SKILL.md +96 -0
- package/template/skills/rebuild-snapshots/SKILL.md +88 -0
- package/template/skills/session-end/SKILL.md +120 -0
- package/template/skills/session-start/SKILL.md +93 -0
- package/template/skills/synthesise/SKILL.md +108 -0
- package/template/skills/weekly-report/SKILL.md +79 -0
- package/template/templates/call-notes.md +67 -0
- package/template/templates/call-prep.md +65 -0
- package/template/templates/entity-teardown.md +58 -0
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# reset-to-template.sh
|
|
3
|
+
# Strips project-specific data and restores the template to a clean state.
|
|
4
|
+
# WARNING: This is destructive — all research, discovery, and dashboard data will be deleted.
|
|
5
|
+
# Uses Australian English throughout.
|
|
6
|
+
|
|
7
|
+
set -euo pipefail
|
|
8
|
+
|
|
9
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
10
|
+
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
11
|
+
|
|
12
|
+
echo "============================================="
|
|
13
|
+
echo " TEMPLATE RESET — DESTRUCTIVE OPERATION"
|
|
14
|
+
echo "============================================="
|
|
15
|
+
echo ""
|
|
16
|
+
echo "This will permanently delete:"
|
|
17
|
+
echo " - All files in research/competitors/, research/market/, research/technical/"
|
|
18
|
+
echo " - All files in discovery/calls/, discovery/prep/, discovery/outreach/"
|
|
19
|
+
echo " - data/entities.csv"
|
|
20
|
+
echo " - All files in dashboard/data/ (generated JSON)"
|
|
21
|
+
echo " - project.config.json"
|
|
22
|
+
echo " - docs/output/weekly-report-*.md"
|
|
23
|
+
echo ""
|
|
24
|
+
echo "This action CANNOT be undone (unless you have a git history to revert to)."
|
|
25
|
+
echo ""
|
|
26
|
+
|
|
27
|
+
read -p "Are you sure you want to proceed? Type 'yes' to confirm: " CONFIRM
|
|
28
|
+
|
|
29
|
+
if [ "$CONFIRM" != "yes" ]; then
|
|
30
|
+
echo ""
|
|
31
|
+
echo "Aborted. No files were deleted."
|
|
32
|
+
exit 0
|
|
33
|
+
fi
|
|
34
|
+
|
|
35
|
+
echo ""
|
|
36
|
+
echo "Resetting project..."
|
|
37
|
+
DELETED_COUNT=0
|
|
38
|
+
|
|
39
|
+
# Helper: remove all files in a directory except .gitkeep
|
|
40
|
+
clean_directory() {
|
|
41
|
+
local dir="$1"
|
|
42
|
+
if [ -d "$dir" ]; then
|
|
43
|
+
local files
|
|
44
|
+
files=$(find "$dir" -type f -not -name ".gitkeep" 2>/dev/null || true)
|
|
45
|
+
if [ -n "$files" ]; then
|
|
46
|
+
echo "$files" | while read -r f; do
|
|
47
|
+
echo " Deleted: ${f#$PROJECT_ROOT/}"
|
|
48
|
+
rm -f "$f"
|
|
49
|
+
done
|
|
50
|
+
# Ensure .gitkeep exists so the directory is preserved in git
|
|
51
|
+
touch "$dir/.gitkeep"
|
|
52
|
+
fi
|
|
53
|
+
fi
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
# Research directories
|
|
57
|
+
echo ""
|
|
58
|
+
echo "--- Cleaning research/ ---"
|
|
59
|
+
clean_directory "$PROJECT_ROOT/research/competitors"
|
|
60
|
+
clean_directory "$PROJECT_ROOT/research/market"
|
|
61
|
+
clean_directory "$PROJECT_ROOT/research/technical"
|
|
62
|
+
|
|
63
|
+
# Discovery directories
|
|
64
|
+
echo ""
|
|
65
|
+
echo "--- Cleaning discovery/ ---"
|
|
66
|
+
clean_directory "$PROJECT_ROOT/discovery/calls"
|
|
67
|
+
clean_directory "$PROJECT_ROOT/discovery/prep"
|
|
68
|
+
clean_directory "$PROJECT_ROOT/discovery/outreach"
|
|
69
|
+
|
|
70
|
+
# Data directory
|
|
71
|
+
echo ""
|
|
72
|
+
echo "--- Cleaning data/ ---"
|
|
73
|
+
if [ -f "$PROJECT_ROOT/data/entities.csv" ]; then
|
|
74
|
+
echo " Deleted: data/entities.csv"
|
|
75
|
+
rm -f "$PROJECT_ROOT/data/entities.csv"
|
|
76
|
+
fi
|
|
77
|
+
|
|
78
|
+
# Dashboard data
|
|
79
|
+
echo ""
|
|
80
|
+
echo "--- Cleaning dashboard/data/ ---"
|
|
81
|
+
clean_directory "$PROJECT_ROOT/dashboard/data"
|
|
82
|
+
|
|
83
|
+
# Project config
|
|
84
|
+
echo ""
|
|
85
|
+
echo "--- Removing project.config.json ---"
|
|
86
|
+
if [ -f "$PROJECT_ROOT/project.config.json" ]; then
|
|
87
|
+
echo " Deleted: project.config.json"
|
|
88
|
+
rm -f "$PROJECT_ROOT/project.config.json"
|
|
89
|
+
else
|
|
90
|
+
echo " (not present, skipping)"
|
|
91
|
+
fi
|
|
92
|
+
|
|
93
|
+
# Weekly reports
|
|
94
|
+
echo ""
|
|
95
|
+
echo "--- Removing weekly reports ---"
|
|
96
|
+
REPORTS=$(find "$PROJECT_ROOT/docs/output" -name "weekly-report-*.md" 2>/dev/null || true)
|
|
97
|
+
if [ -n "$REPORTS" ]; then
|
|
98
|
+
echo "$REPORTS" | while read -r f; do
|
|
99
|
+
echo " Deleted: ${f#$PROJECT_ROOT/}"
|
|
100
|
+
rm -f "$f"
|
|
101
|
+
done
|
|
102
|
+
else
|
|
103
|
+
echo " (none found, skipping)"
|
|
104
|
+
fi
|
|
105
|
+
|
|
106
|
+
echo ""
|
|
107
|
+
echo "============================================="
|
|
108
|
+
echo " Reset Complete"
|
|
109
|
+
echo "============================================="
|
|
110
|
+
echo ""
|
|
111
|
+
echo "NOTE: Template placeholder values (e.g. {{PROJECT_NAME}}) have NOT been"
|
|
112
|
+
echo "restored in markdown files. For a fully clean template, re-clone the"
|
|
113
|
+
echo "repository from the original source."
|
|
114
|
+
echo ""
|
|
115
|
+
echo "Next step: Run /onboard to set up a new project."
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# validate-placeholders.sh
|
|
3
|
+
# Scans the project for unreplaced {{PLACEHOLDER}} tokens.
|
|
4
|
+
# Exits 0 if none found, 1 if any remain.
|
|
5
|
+
# Uses Australian English throughout.
|
|
6
|
+
|
|
7
|
+
set -euo pipefail
|
|
8
|
+
|
|
9
|
+
# Determine the project root (one level up from scripts/)
|
|
10
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
11
|
+
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
12
|
+
|
|
13
|
+
# If a directory argument is provided, search only that path
|
|
14
|
+
SEARCH_PATH="${1:-$PROJECT_ROOT}"
|
|
15
|
+
|
|
16
|
+
echo "=== Placeholder Validation ==="
|
|
17
|
+
echo "Searching: $SEARCH_PATH"
|
|
18
|
+
echo ""
|
|
19
|
+
|
|
20
|
+
# Find all matching files, excluding node_modules and .git
|
|
21
|
+
MATCHES=$(grep -rn '{{[A-Z_]*}}' \
|
|
22
|
+
--include="*.md" \
|
|
23
|
+
--include="*.js" \
|
|
24
|
+
--include="*.html" \
|
|
25
|
+
--include="*.json" \
|
|
26
|
+
"$SEARCH_PATH" 2>/dev/null \
|
|
27
|
+
| grep -v 'node_modules' \
|
|
28
|
+
| grep -v '\.git/' \
|
|
29
|
+
| grep -v 'project.config.example.json' \
|
|
30
|
+
|| true)
|
|
31
|
+
|
|
32
|
+
if [ -z "$MATCHES" ]; then
|
|
33
|
+
echo "No unreplaced placeholders found. All clear."
|
|
34
|
+
exit 0
|
|
35
|
+
fi
|
|
36
|
+
|
|
37
|
+
# Count the total number of placeholder occurrences
|
|
38
|
+
TOTAL=$(echo "$MATCHES" | wc -l | tr -d ' ')
|
|
39
|
+
|
|
40
|
+
echo "Found $TOTAL unreplaced placeholder(s):"
|
|
41
|
+
echo ""
|
|
42
|
+
echo "$MATCHES"
|
|
43
|
+
echo ""
|
|
44
|
+
echo "FAIL: $TOTAL placeholder(s) still need replacing."
|
|
45
|
+
echo "Run /onboard or manually replace each {{PLACEHOLDER}} with the correct value."
|
|
46
|
+
|
|
47
|
+
exit 1
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: compare-options
|
|
3
|
+
description: Score and compare 2-5 strategic options using the project's scoring framework. Produces a side-by-side comparison with evidence-backed scores. Use when choosing between strategic directions, entry angles, or product approaches.
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Compare Options
|
|
7
|
+
|
|
8
|
+
Structured comparison of strategic options using the scoring framework defined in `memory/scoring.md`.
|
|
9
|
+
|
|
10
|
+
---
|
|
11
|
+
|
|
12
|
+
## Step 0 — Define Options
|
|
13
|
+
|
|
14
|
+
Ask the user to describe 2–5 options. For each option, capture:
|
|
15
|
+
- **Code** (short identifier, e.g., "A", "SaaS-first", "Partner-led")
|
|
16
|
+
- **Description** (1–2 sentences)
|
|
17
|
+
|
|
18
|
+
If options are already defined in `memory/scoring.md` → use those.
|
|
19
|
+
|
|
20
|
+
## Step 1 — Load Scoring Framework
|
|
21
|
+
|
|
22
|
+
Read `memory/scoring.md` to get:
|
|
23
|
+
- The scoring dimensions (e.g., White Space, Urgency, Feasibility, Defensibility, Revenue Potential)
|
|
24
|
+
- Any existing scores
|
|
25
|
+
- The current recommended strategy (if any)
|
|
26
|
+
|
|
27
|
+
## Step 2 — Score Each Option
|
|
28
|
+
|
|
29
|
+
For each option, score against each dimension (1–5):
|
|
30
|
+
|
|
31
|
+
| Score | Meaning |
|
|
32
|
+
|-------|---------|
|
|
33
|
+
| 5 | Exceptional — clear, strong evidence |
|
|
34
|
+
| 4 | Good — solid evidence, minor gaps |
|
|
35
|
+
| 3 | Moderate — mixed evidence |
|
|
36
|
+
| 2 | Weak — limited evidence, significant concerns |
|
|
37
|
+
| 1 | Poor — evidence against, or no evidence |
|
|
38
|
+
|
|
39
|
+
**Every score must cite evidence.** Reference specific findings from:
|
|
40
|
+
- `research/competitors/*.md`
|
|
41
|
+
- `research/market/*.md`
|
|
42
|
+
- `discovery/calls/*.md`
|
|
43
|
+
- `memory/research.md`
|
|
44
|
+
- `memory/discovery.md`
|
|
45
|
+
|
|
46
|
+
Format:
|
|
47
|
+
```
|
|
48
|
+
### Option {code}: {name}
|
|
49
|
+
|
|
50
|
+
| Dimension | Score | Evidence |
|
|
51
|
+
|-----------|-------|----------|
|
|
52
|
+
| {dim 1} | {1-5} | {specific evidence with source and grade} |
|
|
53
|
+
| {dim 2} | {1-5} | {specific evidence} |
|
|
54
|
+
...
|
|
55
|
+
|
|
56
|
+
**Total**: {sum} / {max possible}
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
## Step 3 — Side-by-Side Comparison
|
|
60
|
+
|
|
61
|
+
Produce a comparison matrix:
|
|
62
|
+
|
|
63
|
+
```
|
|
64
|
+
| Dimension | {Option A} | {Option B} | {Option C} | ... |
|
|
65
|
+
|-----------|-----------|-----------|-----------|-----|
|
|
66
|
+
| {dim 1} | {score} | {score} | {score} | |
|
|
67
|
+
| {dim 2} | {score} | {score} | {score} | |
|
|
68
|
+
...
|
|
69
|
+
| **Total** | **{sum}** | **{sum}** | **{sum}** | |
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
## Step 4 — Pressure-Test the Leader
|
|
73
|
+
|
|
74
|
+
The highest-scoring option gets a `/critical-reasoning` pass. Ask the user:
|
|
75
|
+
"Option {X} scores highest. Want to pressure-test it before recommending?"
|
|
76
|
+
|
|
77
|
+
If yes → run `/critical-reasoning` on that option specifically.
|
|
78
|
+
|
|
79
|
+
## Step 5 — Update Scoring Memory
|
|
80
|
+
|
|
81
|
+
Write results to `memory/scoring.md`:
|
|
82
|
+
- Update the Options Reference with any new options
|
|
83
|
+
- Update the Scoring Matrix with all scores and evidence
|
|
84
|
+
- Update the Recommended Strategy section:
|
|
85
|
+
- **Lead option**: {code} — {name}
|
|
86
|
+
- **Rationale**: {why it scored highest, 2-3 sentences}
|
|
87
|
+
- **Key risks**: {from the scoring and critical reasoning}
|
|
88
|
+
- **Fallback**: {second-highest option and when to switch}
|
|
89
|
+
|
|
90
|
+
## Step 6 — Report
|
|
91
|
+
|
|
92
|
+
> **Options compared — {count} options scored across {count} dimensions**
|
|
93
|
+
> - **Winner**: {option code} — {name} ({score}/{max})
|
|
94
|
+
> - **Runner-up**: {option code} — {name} ({score}/{max})
|
|
95
|
+
> - **Biggest differentiator**: {dimension where the gap is largest}
|
|
96
|
+
> - **Biggest risk for winner**: {from critical reasoning or lowest-scoring dimension}
|
|
97
|
+
> - Output: `memory/scoring.md` updated
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: critical-reasoning
|
|
3
|
+
description: Pressure-test any idea, strategy, decision, or plan. Use when you want to challenge assumptions, find blind spots, or stress-test thinking before committing. Works for any domain — not just strategy.
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Critical Reasoning
|
|
7
|
+
|
|
8
|
+
Pressure-test an idea, strategy, decision, or plan before committing to it. This skill helps you find what you might be missing.
|
|
9
|
+
|
|
10
|
+
**When to use**: Before making a decision, after forming a hypothesis, when something feels too good to be true, or when you want a second opinion on your thinking.
|
|
11
|
+
|
|
12
|
+
---
|
|
13
|
+
|
|
14
|
+
## How to Run
|
|
15
|
+
|
|
16
|
+
1. Identify what's being tested — a strategy, hypothesis, decision, plan, or assumption
|
|
17
|
+
2. Run through all 4 lenses in order
|
|
18
|
+
3. Write findings directly into the relevant project file (executive summary, decisions.md, or a new memo)
|
|
19
|
+
|
|
20
|
+
---
|
|
21
|
+
|
|
22
|
+
## Lens 1 — Is It True?
|
|
23
|
+
|
|
24
|
+
Challenge the evidence behind the idea.
|
|
25
|
+
|
|
26
|
+
Ask:
|
|
27
|
+
- **What's being claimed?** State it in one sentence.
|
|
28
|
+
- **What evidence supports it?** List each piece with its source and evidence grade.
|
|
29
|
+
- **What would prove it wrong?** If nothing could prove it wrong, it's not a testable claim.
|
|
30
|
+
- **Are we confusing correlation with causation?** Just because two things happened together doesn't mean one caused the other.
|
|
31
|
+
- **What's the base rate?** How often does this type of thing actually succeed? (e.g., "Most market entries fail" is a useful base rate.)
|
|
32
|
+
- **What are we assuming without evidence?** List every hidden assumption. Which ones, if wrong, would collapse the entire argument?
|
|
33
|
+
|
|
34
|
+
For each assumption found, rate it:
|
|
35
|
+
- **Load-bearing**: If this is wrong, the whole idea falls apart
|
|
36
|
+
- **Important**: Changes the approach significantly
|
|
37
|
+
- **Minor**: Can be adjusted without major impact
|
|
38
|
+
|
|
39
|
+
---
|
|
40
|
+
|
|
41
|
+
## Lens 2 — What Happens Next?
|
|
42
|
+
|
|
43
|
+
Think through the consequences — intended and unintended.
|
|
44
|
+
|
|
45
|
+
Ask:
|
|
46
|
+
- **First-order effects**: What happens immediately if we do this?
|
|
47
|
+
- **Second-order effects**: How will others react? Competitors, customers, partners, team members?
|
|
48
|
+
- **Third-order effects**: What feedback loops could this create? Does success make the next step easier or harder?
|
|
49
|
+
- **Who benefits and who loses?** Map the stakeholders. Those who lose will push back.
|
|
50
|
+
- **What changes in the environment?** Will the market, technology, or regulatory landscape shift in ways that help or hurt this plan?
|
|
51
|
+
- **Does this create lock-in?** If we go down this path, what options does it close off?
|
|
52
|
+
|
|
53
|
+
---
|
|
54
|
+
|
|
55
|
+
## Lens 3 — What Could Go Wrong?
|
|
56
|
+
|
|
57
|
+
Identify risks, objections, and failure modes.
|
|
58
|
+
|
|
59
|
+
Ask:
|
|
60
|
+
- **What's the worst realistic outcome?** Not the absolute worst — the worst that could plausibly happen.
|
|
61
|
+
- **Why hasn't someone already done this?** If the opportunity is obvious, there's usually a reason it hasn't been seized.
|
|
62
|
+
- **Why would a smart, informed person disagree?** Steel-man the opposing view.
|
|
63
|
+
- **What external shocks could derail this?** Economic changes, competitor moves, regulatory shifts, technology disruption.
|
|
64
|
+
- **What are we depending on that we don't control?** Partners, market timing, customer behaviour, team capacity.
|
|
65
|
+
- **How fast could things go wrong?** Some risks unfold slowly (giving time to react). Others hit suddenly with no warning.
|
|
66
|
+
|
|
67
|
+
Rate each risk:
|
|
68
|
+
- **Probability**: Low / Medium / High
|
|
69
|
+
- **Impact**: Minor / Significant / Catastrophic
|
|
70
|
+
- **Speed**: Gradual / Sudden
|
|
71
|
+
- **Our control**: Full / Partial / None
|
|
72
|
+
|
|
73
|
+
---
|
|
74
|
+
|
|
75
|
+
## Lens 4 — Can It Actually Be Done?
|
|
76
|
+
|
|
77
|
+
Test practical feasibility and execution risk.
|
|
78
|
+
|
|
79
|
+
Ask:
|
|
80
|
+
- **Do we have the skills, people, and resources?** Be honest — not what we wish we had, but what we actually have today.
|
|
81
|
+
- **What's the hardest part?** Every plan has a crux. What is it, and how confident are we in solving it?
|
|
82
|
+
- **What has to happen first?** Map the sequence. What's the critical path? What's blocking?
|
|
83
|
+
- **Is the timeline realistic?** Plans almost always take longer than expected. Where are the bottlenecks?
|
|
84
|
+
- **Can we start small and test?** Is there a way to validate the core assumption with minimal investment before going all-in?
|
|
85
|
+
- **What would make us stop?** Define the exit criteria up front. What evidence would tell us to pivot or abandon?
|
|
86
|
+
|
|
87
|
+
---
|
|
88
|
+
|
|
89
|
+
## Synthesis
|
|
90
|
+
|
|
91
|
+
After running all 4 lenses, produce a summary:
|
|
92
|
+
|
|
93
|
+
1. **Verdict**: Strong / Promising but risky / Needs more evidence / Weak — with a one-sentence explanation
|
|
94
|
+
2. **Load-bearing assumptions**: The 2-3 assumptions that matter most (from Lens 1)
|
|
95
|
+
3. **Top risks**: The 2-3 biggest risks (from Lens 3), rated by probability x impact
|
|
96
|
+
4. **Strongest objection**: The single best argument against this idea (from Lens 3)
|
|
97
|
+
5. **Recommended actions**: What to do next — validate assumptions, mitigate risks, test small, or proceed with confidence
|
|
98
|
+
6. **Confidence level**: How confident are we? What would increase or decrease confidence?
|
|
99
|
+
|
|
100
|
+
---
|
|
101
|
+
|
|
102
|
+
## Tips
|
|
103
|
+
|
|
104
|
+
- **Don't skip lenses because they feel redundant.** Each one catches different things.
|
|
105
|
+
- **Write down findings as you go.** Don't just think through them — the act of writing reveals gaps.
|
|
106
|
+
- **Be specific.** "It might not work" is useless. "Conversion rates below 3% would make unit economics negative" is useful.
|
|
107
|
+
- **It's OK to conclude the idea is strong.** Critical reasoning isn't about finding fault — it's about finding truth.
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: decision
|
|
3
|
+
description: Record a strategic decision with full rationale, alternatives considered, evidence, and reversibility assessment. Updates memory files and executive summary. Use whenever a meaningful strategic choice is made.
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Decision
|
|
7
|
+
|
|
8
|
+
Record a strategic decision with full context so future sessions understand what was decided and why.
|
|
9
|
+
|
|
10
|
+
---
|
|
11
|
+
|
|
12
|
+
## Step 0 — Capture the Decision
|
|
13
|
+
|
|
14
|
+
Ask the user:
|
|
15
|
+
|
|
16
|
+
1. **What was decided?** — one clear sentence
|
|
17
|
+
2. **What alternatives were considered?** — list the options that were rejected
|
|
18
|
+
3. **What evidence supports this choice?** — specific findings, data, or call insights
|
|
19
|
+
4. **Is this reversible?** — Easy (change anytime), Moderate (some rework), Hard (significant sunk cost), Irreversible
|
|
20
|
+
5. **What would cause us to revisit this?** — specific trigger or evidence that would reopen the decision
|
|
21
|
+
|
|
22
|
+
## Step 1 — Optional: Pressure-Test
|
|
23
|
+
|
|
24
|
+
If the decision is rated Moderate, Hard, or Irreversible → ask the user:
|
|
25
|
+
"This is a {difficulty}-to-reverse decision. Want to run `/critical-reasoning` on it before recording?"
|
|
26
|
+
|
|
27
|
+
If yes → run `/critical-reasoning` first. If no → proceed.
|
|
28
|
+
|
|
29
|
+
## Step 2 — Write to Decision Log
|
|
30
|
+
|
|
31
|
+
Append to `memory/decisions.md`:
|
|
32
|
+
|
|
33
|
+
```
|
|
34
|
+
### Decision {N}: {title}
|
|
35
|
+
|
|
36
|
+
**Date**: {date}
|
|
37
|
+
**Decided**: {what was decided}
|
|
38
|
+
**Alternatives considered**:
|
|
39
|
+
- {option A} — rejected because {reason}
|
|
40
|
+
- {option B} — rejected because {reason}
|
|
41
|
+
|
|
42
|
+
**Evidence**:
|
|
43
|
+
- {evidence 1} [{grade}]
|
|
44
|
+
- {evidence 2} [{grade}]
|
|
45
|
+
|
|
46
|
+
**Reversibility**: {Easy/Moderate/Hard/Irreversible}
|
|
47
|
+
**Revisit trigger**: {what would cause us to reopen this}
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
## Step 3 — Update MEMORY.md
|
|
51
|
+
|
|
52
|
+
Add a one-liner to `memory/MEMORY.md` Key Decisions section:
|
|
53
|
+
|
|
54
|
+
```
|
|
55
|
+
{N}. {decision summary} ({reversibility}) — {date}
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
## Step 4 — Update Executive Summary
|
|
59
|
+
|
|
60
|
+
If the decision affects the project strategy (product, GTM, positioning, kill conditions):
|
|
61
|
+
- Update the relevant section of `docs/executive-summary.md`
|
|
62
|
+
- Note the decision and its rationale
|
|
63
|
+
|
|
64
|
+
If the decision is operational (tooling, process, scheduling) → skip this step.
|
|
65
|
+
|
|
66
|
+
## Step 5 — Rebuild Snapshots
|
|
67
|
+
|
|
68
|
+
If the decision affects strategy → run `/rebuild-snapshots` for `context/project-state.md` at minimum.
|
|
69
|
+
|
|
70
|
+
## Step 6 — Confirm
|
|
71
|
+
|
|
72
|
+
> **Decision recorded — #{N}: {title}**
|
|
73
|
+
> - Reversibility: {rating}
|
|
74
|
+
> - Revisit trigger: {trigger}
|
|
75
|
+
> - Files updated: memory/decisions.md, memory/MEMORY.md{, docs/executive-summary.md if applicable}
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: enrich-entity
|
|
3
|
+
description: Produce complete enrichment for a {{ENTITY_TYPE}} so it appears fully populated in the dashboard and research files. Use when adding a new entity to the project or when an entity has incomplete research.
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Enrich Entity
|
|
7
|
+
|
|
8
|
+
Produce complete enrichment for a {{ENTITY_TYPE}} so it appears fully populated in the dashboard grid. Empty columns in the capability map mean either the research files are missing or the map hasn't been updated.
|
|
9
|
+
|
|
10
|
+
---
|
|
11
|
+
|
|
12
|
+
## How the Dashboard Gets Its Data
|
|
13
|
+
|
|
14
|
+
```
|
|
15
|
+
research/competitors/{slug}-*.md ──▶ build-data.js ──▶ competitors.json
|
|
16
|
+
memory/research.md (capability map table)
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
An entity needs entries in **both** the capability map (memory/research.md) and individual research files to be fully enriched.
|
|
20
|
+
|
|
21
|
+
---
|
|
22
|
+
|
|
23
|
+
## Step 1 — Check What Already Exists
|
|
24
|
+
|
|
25
|
+
```bash
|
|
26
|
+
# Which research files exist?
|
|
27
|
+
ls research/competitors/ | grep SLUG
|
|
28
|
+
|
|
29
|
+
# Is it in the capability map table?
|
|
30
|
+
grep -i "NAME" memory/research.md
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
If all fields are populated → no action needed. If files exist but columns are empty → the capability map needs updating (Step 4). If no files → full research required (Step 2–5).
|
|
34
|
+
|
|
35
|
+
---
|
|
36
|
+
|
|
37
|
+
## Step 2 — Research (if files are missing)
|
|
38
|
+
|
|
39
|
+
Minimum sources before writing anything:
|
|
40
|
+
|
|
41
|
+
| Source | What to get |
|
|
42
|
+
|--------|-------------|
|
|
43
|
+
| Company website | Features, pricing page, target customers, platform support |
|
|
44
|
+
| LinkedIn (company + founders) | Team size, employee count, prior employers |
|
|
45
|
+
| Review platforms (G2, Capterra, Trustpilot) | Customer sentiment, praise/complaint themes, ratings |
|
|
46
|
+
| Industry press | Funding, acquisitions, product launches |
|
|
47
|
+
| Funding databases (Crunchbase, PitchBook) | Funding rounds, investors, revenue estimates |
|
|
48
|
+
|
|
49
|
+
---
|
|
50
|
+
|
|
51
|
+
## Step 3 — Create Research Files
|
|
52
|
+
|
|
53
|
+
Files go in `research/competitors/`. Slug = lowercase name, spaces replaced with hyphens.
|
|
54
|
+
|
|
55
|
+
| File | Key sections |
|
|
56
|
+
|------|-------------|
|
|
57
|
+
| `{slug}-overview.md` | Founders, founding problem, entry wedge, metrics, competitive position |
|
|
58
|
+
| `{slug}-capabilities.md` | Feature comparison, pricing model, cost ranges, GTM motion, lock-in analysis |
|
|
59
|
+
| `{slug}-trajectory.md` | Investment signals (funding/hiring/product), roadmap, convergence risk, 12-month outlook |
|
|
60
|
+
| `{slug}-relevance.md` | How this entity relates to {{PROJECT_NAME}} — complement, competitor, or partner |
|
|
61
|
+
|
|
62
|
+
Each file must start with:
|
|
63
|
+
```markdown
|
|
64
|
+
# {Name} — {Topic}
|
|
65
|
+
|
|
66
|
+
**Research Date**: YYYY-MM-DD
|
|
67
|
+
**Analyst**: {{PROJECT_NAME}} Pod
|
|
68
|
+
|
|
69
|
+
## TL;DR
|
|
70
|
+
|
|
71
|
+
<!-- 2–3 sentence summary -->
|
|
72
|
+
|
|
73
|
+
---
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
Every factual claim must be tagged with an evidence grade: [CONFIRMED], [SECONDARY], [INFERENCE], or [ASSUMPTION]. See `docs/memos/evidence-grading.md`.
|
|
77
|
+
|
|
78
|
+
---
|
|
79
|
+
|
|
80
|
+
## Step 4 — Update Capability Map
|
|
81
|
+
|
|
82
|
+
Add or update the entity's row in `memory/research.md` Entity Capability Map table. Fill all columns.
|
|
83
|
+
|
|
84
|
+
---
|
|
85
|
+
|
|
86
|
+
## Step 5 — Build and Verify
|
|
87
|
+
|
|
88
|
+
```bash
|
|
89
|
+
cd dashboard && node build-data.js
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
**Verification checklist**:
|
|
93
|
+
- [ ] Entity appears in `dashboard/data/competitors.json`
|
|
94
|
+
- [ ] All capability map columns populated
|
|
95
|
+
- [ ] All 4 research files exist in `research/competitors/`
|
|
96
|
+
- [ ] Each file has a TL;DR section
|
|
97
|
+
- [ ] Evidence grades applied to factual claims
|
|
98
|
+
- [ ] No [ASSUMPTION] tags where research could provide [SECONDARY] or better
|
|
99
|
+
|
|
100
|
+
---
|
|
101
|
+
|
|
102
|
+
## Common Mistakes
|
|
103
|
+
|
|
104
|
+
- Creating research files but forgetting to update the capability map in `memory/research.md`
|
|
105
|
+
- Forgetting to run `build-data.js` after updating files
|
|
106
|
+
- Using the entity name inconsistently (capitalisation, abbreviations) across files
|
|
107
|
+
- Missing the TL;DR section that `build-data.js` extracts
|
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: health-check
|
|
3
|
+
description: Run a project integrity audit. Checks for unreplaced placeholders, memory consistency, orphaned files, stale dashboard data, and structural issues. Reports a health score and actionable fixes.
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Health Check
|
|
7
|
+
|
|
8
|
+
Run a comprehensive integrity audit of the project. Use this after onboarding, periodically during a project, or when something feels off.
|
|
9
|
+
|
|
10
|
+
---
|
|
11
|
+
|
|
12
|
+
## Check 1 — Unreplaced Placeholders
|
|
13
|
+
|
|
14
|
+
```bash
|
|
15
|
+
grep -r "{{" --include="*.md" --include="*.js" --include="*.html" --include="*.json" . | grep -v node_modules | grep -v .git
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
- If any `{{PLACEHOLDER}}` tokens remain → **FAIL**
|
|
19
|
+
- List each unreplaced placeholder with file path and line number
|
|
20
|
+
- Severity: **Critical** (project not fully onboarded)
|
|
21
|
+
|
|
22
|
+
## Check 2 — Project Configuration
|
|
23
|
+
|
|
24
|
+
Check if `project.config.json` exists at the repo root.
|
|
25
|
+
- If missing → **WARN** (older template version or onboarding incomplete)
|
|
26
|
+
- If present → validate all required fields are non-empty: projectName, projectSlug, entityType, modules
|
|
27
|
+
|
|
28
|
+
## Check 3 — Memory File Consistency
|
|
29
|
+
|
|
30
|
+
### 3a. Kill Conditions Alignment
|
|
31
|
+
Compare kill conditions across all locations where they appear:
|
|
32
|
+
- `memory/MEMORY.md` Kill Conditions table
|
|
33
|
+
- `docs/executive-summary.md` §6
|
|
34
|
+
- `memory/discovery.md` Kill Condition Tracker (if discovery module active)
|
|
35
|
+
- `context/project-state.md`
|
|
36
|
+
|
|
37
|
+
All four should have the same kill conditions with consistent statuses.
|
|
38
|
+
- Mismatches → **FAIL** with details of which files disagree
|
|
39
|
+
|
|
40
|
+
### 3b. Entity Capability Map vs Research Files
|
|
41
|
+
Read `memory/research.md` Entity Capability Map. For each entity listed:
|
|
42
|
+
- Check that research files exist in `research/competitors/{slug}-*.md`
|
|
43
|
+
- Entities in the map with no research files → **WARN**
|
|
44
|
+
- Research files that exist but aren't in the capability map → **WARN**
|
|
45
|
+
|
|
46
|
+
### 3c. MEMORY.md Size
|
|
47
|
+
Count lines in `memory/MEMORY.md`.
|
|
48
|
+
- Over 200 lines → **WARN** (context loading will be slow, needs pruning)
|
|
49
|
+
- Over 300 lines → **FAIL** (must prune)
|
|
50
|
+
|
|
51
|
+
## Check 4 — Dashboard Data Freshness
|
|
52
|
+
|
|
53
|
+
Compare timestamps:
|
|
54
|
+
```bash
|
|
55
|
+
# Source file last modified
|
|
56
|
+
stat -f %m memory/research.md
|
|
57
|
+
stat -f %m docs/output/status-blurb.md
|
|
58
|
+
stat -f %m docs/executive-summary.md
|
|
59
|
+
|
|
60
|
+
# Dashboard JSON last modified
|
|
61
|
+
stat -f %m dashboard/data/overview.json
|
|
62
|
+
stat -f %m dashboard/data/competitors.json
|
|
63
|
+
stat -f %m dashboard/data/entities.json
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
- If any source file is newer than its corresponding JSON → **WARN** (dashboard is stale, run `cd dashboard && node build-data.js`)
|
|
67
|
+
|
|
68
|
+
## Check 5 — File Structure
|
|
69
|
+
|
|
70
|
+
### 5a. Orphaned Files
|
|
71
|
+
Check for files in the repo root that should be in a subdirectory:
|
|
72
|
+
- `.md` files that aren't README.md, CLAUDE.md, STATUS.md, CHANGELOG.md, CONTRIBUTING.md, LICENSE → **WARN**
|
|
73
|
+
- `.csv` or `.json` files in root → **WARN**
|
|
74
|
+
|
|
75
|
+
### 5b. Versioned Files
|
|
76
|
+
```bash
|
|
77
|
+
find . -name "*_v[0-9]*" -o -name "*_old*" -o -name "*_backup*" -o -name "*_new*" | grep -v node_modules | grep -v .git
|
|
78
|
+
```
|
|
79
|
+
- Any versioned files found → **WARN** (keep only latest, rename clean, delete outdated)
|
|
80
|
+
|
|
81
|
+
### 5c. Empty Directories
|
|
82
|
+
Check that key directories have content (not just `.gitkeep`):
|
|
83
|
+
- After onboarding: `research/competitors/`, `data/` should have files within a few sessions
|
|
84
|
+
- Flag directories that are still empty after the project has been active for multiple sessions
|
|
85
|
+
|
|
86
|
+
## Check 6 — Template Usage
|
|
87
|
+
|
|
88
|
+
For each call notes file in `discovery/calls/`:
|
|
89
|
+
- Check it follows the structure in `templates/call-notes.md` (has Key Takeaways, Pain Points, WTP Signals sections)
|
|
90
|
+
- Missing sections → **WARN**
|
|
91
|
+
|
|
92
|
+
For each competitor file in `research/competitors/`:
|
|
93
|
+
- Check it has a TL;DR section and evidence grades
|
|
94
|
+
- Missing → **WARN**
|
|
95
|
+
|
|
96
|
+
## Check 7 — Evidence Grading Coverage
|
|
97
|
+
|
|
98
|
+
Scan all files in `research/` and `discovery/calls/` for factual claims without evidence grades:
|
|
99
|
+
- Count claims tagged [CONFIRMED], [SECONDARY], [INFERENCE], [ASSUMPTION]
|
|
100
|
+
- Count untagged factual statements (heuristic: sentences with numbers, dates, or company names)
|
|
101
|
+
- Report the ratio: `{tagged} / {tagged + untagged}` as evidence coverage percentage
|
|
102
|
+
- Below 70% → **WARN**
|
|
103
|
+
|
|
104
|
+
## Check 8 — Module Consistency
|
|
105
|
+
|
|
106
|
+
If `project.config.json` exists, check module configuration:
|
|
107
|
+
- If `discovery: false` → verify `discovery/` directory doesn't contain working files (only `.gitkeep`)
|
|
108
|
+
- If `dashboard: false` → verify `dashboard/` was removed
|
|
109
|
+
- If `pipeline: false` → verify pipeline-related skills are removed
|
|
110
|
+
|
|
111
|
+
---
|
|
112
|
+
|
|
113
|
+
## Health Score
|
|
114
|
+
|
|
115
|
+
Calculate and report:
|
|
116
|
+
|
|
117
|
+
| Rating | Criteria |
|
|
118
|
+
|--------|----------|
|
|
119
|
+
| **Healthy** (100%) | Zero FAILs, zero WARNs |
|
|
120
|
+
| **Good** (80–99%) | Zero FAILs, some WARNs |
|
|
121
|
+
| **Needs Attention** (50–79%) | 1–2 FAILs or many WARNs |
|
|
122
|
+
| **Unhealthy** (<50%) | 3+ FAILs |
|
|
123
|
+
|
|
124
|
+
## Output Format
|
|
125
|
+
|
|
126
|
+
```
|
|
127
|
+
## Project Health Report — {date}
|
|
128
|
+
|
|
129
|
+
**Score**: {Healthy/Good/Needs Attention/Unhealthy} ({percentage}%)
|
|
130
|
+
|
|
131
|
+
### Critical Issues (must fix)
|
|
132
|
+
- {issue description} — {file path}
|
|
133
|
+
|
|
134
|
+
### Warnings (should fix)
|
|
135
|
+
- {issue description} — {file path}
|
|
136
|
+
|
|
137
|
+
### Passed
|
|
138
|
+
- {check name} ✓
|
|
139
|
+
|
|
140
|
+
### Recommended Actions
|
|
141
|
+
1. {most important fix}
|
|
142
|
+
2. {second most important}
|
|
143
|
+
3. ...
|
|
144
|
+
```
|