@shipfast-ai/shipfast 0.6.1 → 1.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +262 -124
- package/agents/architect.md +130 -54
- package/agents/builder.md +132 -125
- package/agents/critic.md +85 -88
- package/agents/scout.md +83 -59
- package/agents/scribe.md +62 -76
- package/bin/install.js +168 -12
- package/brain/index.cjs +2 -1
- package/brain/indexer.cjs +12 -1
- package/brain/schema.sql +27 -0
- package/commands/sf/check-plan.md +76 -0
- package/commands/sf/do.md +53 -19
- package/commands/sf/help.md +30 -22
- package/commands/sf/map.md +84 -0
- package/commands/sf/plan.md +106 -0
- package/commands/sf/project.md +16 -0
- package/commands/sf/verify.md +140 -0
- package/commands/sf/workstream.md +51 -0
- package/core/architecture.cjs +272 -0
- package/core/verify.cjs +130 -1
- package/hooks/sf-first-run.js +1 -1
- package/hooks/sf-prompt-guard.js +59 -0
- package/mcp/server.cjs +233 -5
- package/package.json +2 -2
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: sf:plan
|
|
3
|
+
description: "Research and plan a phase. Scout gathers findings, Architect creates task list. Stores tasks in brain.db."
|
|
4
|
+
argument-hint: "<describe what to build>"
|
|
5
|
+
allowed-tools:
|
|
6
|
+
- Read
|
|
7
|
+
- Bash
|
|
8
|
+
- Glob
|
|
9
|
+
- Grep
|
|
10
|
+
- Agent
|
|
11
|
+
- AskUserQuestion
|
|
12
|
+
---
|
|
13
|
+
|
|
14
|
+
<objective>
|
|
15
|
+
Dedicated planning command. Produces a precise task list stored in brain.db.
|
|
16
|
+
Does NOT execute — that's /sf-do's job.
|
|
17
|
+
|
|
18
|
+
Separation matters: planning uses different context than execution.
|
|
19
|
+
Fresh context for each phase = no degradation.
|
|
20
|
+
</objective>
|
|
21
|
+
|
|
22
|
+
<process>
|
|
23
|
+
|
|
24
|
+
## Step 1: Analyze
|
|
25
|
+
|
|
26
|
+
Classify intent and complexity (same as /sf-do Step 1):
|
|
27
|
+
- fix/feature/refactor/test/ship/perf/security/style/data/remove
|
|
28
|
+
- trivial/medium/complex
|
|
29
|
+
|
|
30
|
+
If trivial: skip planning. Tell user to run `/sf-do` directly.
|
|
31
|
+
|
|
32
|
+
## Step 2: Scout (fresh agent)
|
|
33
|
+
|
|
34
|
+
Launch sf-scout agent to research the task:
|
|
35
|
+
- Provide: task description + brain.db context (decisions, learnings, hot files)
|
|
36
|
+
- Scout returns: files, functions, consumers, conventions, risks, recommendation
|
|
37
|
+
- Scout tags findings with confidence: [VERIFIED], [CITED], [ASSUMED]
|
|
38
|
+
|
|
39
|
+
**Scout runs in its own agent = fresh context, no pollution.**
|
|
40
|
+
|
|
41
|
+
Wait for Scout to complete before proceeding.
|
|
42
|
+
|
|
43
|
+
## Step 3: Discuss (if complex or ambiguous)
|
|
44
|
+
|
|
45
|
+
Check for ambiguity (rule-based, zero tokens):
|
|
46
|
+
- WHERE: no file paths mentioned
|
|
47
|
+
- WHAT: no behavior described
|
|
48
|
+
- HOW: multiple approaches possible
|
|
49
|
+
- RISK: touches auth/payment/data
|
|
50
|
+
- SCOPE: >30 words with conjunctions
|
|
51
|
+
|
|
52
|
+
If ambiguous: ask 2-5 targeted questions. Store answers as locked decisions in brain.db:
|
|
53
|
+
```bash
|
|
54
|
+
sqlite3 .shipfast/brain.db "INSERT INTO decisions (question, decision, reasoning, phase) VALUES ('[Q]', '[A]', '[why]', '[phase]');"
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
## Step 4: Architect (fresh agent)
|
|
58
|
+
|
|
59
|
+
Launch sf-architect agent to create task list:
|
|
60
|
+
- Provide: task description + Scout findings + locked decisions from brain.db
|
|
61
|
+
- Architect returns: must-haves (truths/artifacts/links) + ordered task list
|
|
62
|
+
|
|
63
|
+
**Architect runs in its own agent = fresh context, no pollution.**
|
|
64
|
+
|
|
65
|
+
Architect's output must include for EACH task:
|
|
66
|
+
- Exact file paths
|
|
67
|
+
- Consumer list (who uses what's being changed)
|
|
68
|
+
- Specific action instructions
|
|
69
|
+
- Verify command
|
|
70
|
+
- Measurable done criteria
|
|
71
|
+
|
|
72
|
+
## Step 5: Store tasks in brain.db
|
|
73
|
+
|
|
74
|
+
For each task from Architect, store in brain.db:
|
|
75
|
+
```bash
|
|
76
|
+
sqlite3 .shipfast/brain.db "INSERT INTO tasks (id, phase, description, plan_text, status) VALUES ('[id]', '[phase]', '[description]', '[full task details]', 'pending');"
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
Also store must-haves:
|
|
80
|
+
```bash
|
|
81
|
+
sqlite3 .shipfast/brain.db "INSERT OR REPLACE INTO context (id, scope, key, value, version, updated_at) VALUES ('phase:[name]:must_haves', 'phase', 'must_haves:[name]', '[JSON must-haves]', 1, strftime('%s', 'now'));"
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
## Step 6: Report
|
|
85
|
+
|
|
86
|
+
```
|
|
87
|
+
Plan ready: [N] tasks stored in brain.db
|
|
88
|
+
|
|
89
|
+
Must-haves:
|
|
90
|
+
Truths: [list]
|
|
91
|
+
Artifacts: [list]
|
|
92
|
+
Key links: [list]
|
|
93
|
+
|
|
94
|
+
Tasks:
|
|
95
|
+
1. [description] — [files] — [size]
|
|
96
|
+
2. [description] — [files] — [size]
|
|
97
|
+
...
|
|
98
|
+
|
|
99
|
+
Run /sf-do to execute. Tasks will run with fresh context per task.
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
</process>
|
|
103
|
+
|
|
104
|
+
<context>
|
|
105
|
+
$ARGUMENTS
|
|
106
|
+
</context>
|
package/commands/sf/project.md
CHANGED
|
@@ -30,6 +30,22 @@ Read the user's project description. If brain.db exists, load:
|
|
|
30
30
|
|
|
31
31
|
If the project description is ambiguous, run the ambiguity detection from /sf-discuss first.
|
|
32
32
|
|
|
33
|
+
## Step 1.5: Parallel Domain Research (for new/complex projects)
|
|
34
|
+
|
|
35
|
+
If the project involves unfamiliar technology or external integrations, launch **up to 4 Scout agents in parallel** to research:
|
|
36
|
+
|
|
37
|
+
1. **Stack Scout** — What's the standard stack for this domain? Libraries, versions, frameworks.
|
|
38
|
+
2. **Architecture Scout** — How are similar systems typically structured? Patterns, tiers, boundaries.
|
|
39
|
+
3. **Pitfalls Scout** — What do projects like this commonly get wrong? Gotchas, anti-patterns.
|
|
40
|
+
4. **Integration Scout** — What external services/APIs are needed? Auth, webhooks, SDKs.
|
|
41
|
+
|
|
42
|
+
Each Scout runs in its own context. Findings are stored in brain.db:
|
|
43
|
+
```bash
|
|
44
|
+
sqlite3 .shipfast/brain.db "INSERT OR REPLACE INTO context (id, scope, key, value, version, updated_at) VALUES ('project:research:[topic]', 'project', 'research:[topic]', '[findings JSON]', 1, strftime('%s', 'now'));"
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
Skip this step for simple projects or projects where brain.db already has relevant decisions.
|
|
48
|
+
|
|
33
49
|
### Multi-Repo Detection
|
|
34
50
|
Check if the workspace contains multiple git repositories (submodules, monorepo packages):
|
|
35
51
|
```bash
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: sf:verify
|
|
3
|
+
description: "Verify completed work against must-haves. Checks artifacts, data flow, stubs, build, consumers."
|
|
4
|
+
allowed-tools:
|
|
5
|
+
- Read
|
|
6
|
+
- Bash
|
|
7
|
+
- Glob
|
|
8
|
+
- Grep
|
|
9
|
+
- AskUserQuestion
|
|
10
|
+
---
|
|
11
|
+
|
|
12
|
+
<objective>
|
|
13
|
+
Dedicated verification command. Runs AFTER /sf-do completes.
|
|
14
|
+
Checks the codebase delivers what was planned — not just "tests pass".
|
|
15
|
+
|
|
16
|
+
Separation matters: verification needs fresh context to see the code objectively,
|
|
17
|
+
without the biases accumulated during execution.
|
|
18
|
+
</objective>
|
|
19
|
+
|
|
20
|
+
<process>
|
|
21
|
+
|
|
22
|
+
## Step 1: Load must-haves from brain.db
|
|
23
|
+
|
|
24
|
+
```bash
|
|
25
|
+
sqlite3 -json .shipfast/brain.db "SELECT value FROM context WHERE key LIKE 'must_haves:%' ORDER BY updated_at DESC LIMIT 1;" 2>/dev/null
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
If no must-haves stored, extract from the task descriptions:
|
|
29
|
+
```bash
|
|
30
|
+
sqlite3 -json .shipfast/brain.db "SELECT description FROM tasks WHERE status = 'passed' ORDER BY created_at;" 2>/dev/null
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
## Step 2: Check observable truths
|
|
34
|
+
|
|
35
|
+
For each truth in must-haves, verify:
|
|
36
|
+
- Does the code actually implement this?
|
|
37
|
+
- Grep for the function/component/route that delivers it
|
|
38
|
+
- Is it wired (imported and used), not just existing?
|
|
39
|
+
|
|
40
|
+
Score: VERIFIED / FAILED / NEEDS_HUMAN
|
|
41
|
+
|
|
42
|
+
## Step 3: 3-Level artifact validation
|
|
43
|
+
|
|
44
|
+
For each artifact in must-haves:
|
|
45
|
+
|
|
46
|
+
**Level 1 — Exists**: `[ -f path ] && echo OK || echo MISSING`
|
|
47
|
+
**Level 2 — Substantive**: File has >3 non-comment lines (not empty/stub)
|
|
48
|
+
**Level 3 — Wired**: `grep -r "basename" --include="*.ts" .` shows imports from other files
|
|
49
|
+
|
|
50
|
+
Score per artifact: L1/L2/L3 or MISSING
|
|
51
|
+
|
|
52
|
+
## Step 4: Data flow check
|
|
53
|
+
|
|
54
|
+
For new components/APIs, check they receive real data:
|
|
55
|
+
- Not hardcoded empty arrays: `grep "data: \[\]" [file]`
|
|
56
|
+
- Not returning null: `grep "return null" [file]`
|
|
57
|
+
- Not empty handlers: `grep "() => {}" [file]`
|
|
58
|
+
- Fetch calls have response handling
|
|
59
|
+
|
|
60
|
+
## Step 5: Stub detection (deep)
|
|
61
|
+
|
|
62
|
+
Scan all files changed in this session:
|
|
63
|
+
```bash
|
|
64
|
+
git diff --name-only HEAD~[N commits]
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
Check each for:
|
|
68
|
+
- TODO, FIXME, HACK, "not implemented", "placeholder"
|
|
69
|
+
- Empty click/submit handlers
|
|
70
|
+
- console.log debug statements
|
|
71
|
+
- debugger statements
|
|
72
|
+
- Commented-out code blocks
|
|
73
|
+
|
|
74
|
+
## Step 6: Build verification
|
|
75
|
+
|
|
76
|
+
```bash
|
|
77
|
+
npm run build 2>&1 | tail -5
|
|
78
|
+
# or: tsc --noEmit
|
|
79
|
+
# or: cargo check
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
## Step 7: Consumer integrity
|
|
83
|
+
|
|
84
|
+
For every function/type/export that was modified or removed:
|
|
85
|
+
```bash
|
|
86
|
+
grep -r "removed_function_name" --include="*.ts" --include="*.tsx" .
|
|
87
|
+
```
|
|
88
|
+
Any remaining consumers = CRITICAL failure.
|
|
89
|
+
|
|
90
|
+
## Step 8: Score and report
|
|
91
|
+
|
|
92
|
+
```
|
|
93
|
+
Verification Results
|
|
94
|
+
====================
|
|
95
|
+
|
|
96
|
+
Truths: [N]/[M] verified
|
|
97
|
+
Artifacts: [N]/[M] at Level 3 (wired)
|
|
98
|
+
Data flow: [PASS/ISSUES]
|
|
99
|
+
Stubs: [N] found
|
|
100
|
+
Build: [PASS/FAIL]
|
|
101
|
+
Consumers: [CLEAN/BROKEN]
|
|
102
|
+
|
|
103
|
+
Verdict: PASS | PASS_WITH_WARNINGS | FAIL
|
|
104
|
+
|
|
105
|
+
[If FAIL:]
|
|
106
|
+
Failed items:
|
|
107
|
+
- [truth/artifact]: [what's wrong]
|
|
108
|
+
- [truth/artifact]: [what's wrong]
|
|
109
|
+
|
|
110
|
+
Fix with: /sf-do [fix description]
|
|
111
|
+
```
|
|
112
|
+
|
|
113
|
+
## Step 9: Store results
|
|
114
|
+
|
|
115
|
+
```bash
|
|
116
|
+
sqlite3 .shipfast/brain.db "INSERT OR REPLACE INTO context (id, scope, key, value, version, updated_at) VALUES ('verify:latest', 'session', 'verification', '[JSON results]', 1, strftime('%s', 'now'));"
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
## Step 10: Interactive UAT (if complex)
|
|
120
|
+
|
|
121
|
+
For complex features, offer manual testing:
|
|
122
|
+
```
|
|
123
|
+
Manual checks (answer pass/issue/skip):
|
|
124
|
+
|
|
125
|
+
Test 1: [what to test]
|
|
126
|
+
Expected: [behavior]
|
|
127
|
+
Result?
|
|
128
|
+
|
|
129
|
+
Test 2: [what to test]
|
|
130
|
+
Expected: [behavior]
|
|
131
|
+
Result?
|
|
132
|
+
```
|
|
133
|
+
|
|
134
|
+
For each issue reported, generate a fix task and store in brain.db.
|
|
135
|
+
|
|
136
|
+
</process>
|
|
137
|
+
|
|
138
|
+
<context>
|
|
139
|
+
$ARGUMENTS
|
|
140
|
+
</context>
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: sf:workstream
|
|
3
|
+
description: "Manage parallel workstreams — create, list, switch, complete."
|
|
4
|
+
argument-hint: "list | create <name> | switch <name> | complete <name>"
|
|
5
|
+
allowed-tools:
|
|
6
|
+
- Bash
|
|
7
|
+
- AskUserQuestion
|
|
8
|
+
---
|
|
9
|
+
|
|
10
|
+
<objective>
|
|
11
|
+
Workstreams let you work on multiple features in parallel, each with its own branch and task tracking.
|
|
12
|
+
Each workstream gets a namespaced set of tasks in brain.db.
|
|
13
|
+
</objective>
|
|
14
|
+
|
|
15
|
+
<process>
|
|
16
|
+
|
|
17
|
+
## Parse subcommand from $ARGUMENTS
|
|
18
|
+
|
|
19
|
+
### list
|
|
20
|
+
```bash
|
|
21
|
+
sqlite3 -json .shipfast/brain.db "SELECT key, value FROM context WHERE scope = 'workstream' ORDER BY updated_at DESC;" 2>/dev/null
|
|
22
|
+
git branch --list "sf/*" 2>/dev/null
|
|
23
|
+
```
|
|
24
|
+
Show all workstreams with status (active/complete) and branch name.
|
|
25
|
+
|
|
26
|
+
### create <name>
|
|
27
|
+
1. Create git branch: `git checkout -b sf/[name]`
|
|
28
|
+
2. Store in brain.db:
|
|
29
|
+
```bash
|
|
30
|
+
sqlite3 .shipfast/brain.db "INSERT OR REPLACE INTO context (id, scope, key, value, version, updated_at) VALUES ('workstream:[name]', 'workstream', '[name]', '{\"status\":\"active\",\"branch\":\"sf/[name]\",\"created\":\"[timestamp]\"}', 1, strftime('%s', 'now'));"
|
|
31
|
+
```
|
|
32
|
+
3. Report: `Workstream [name] created on branch sf/[name]`
|
|
33
|
+
|
|
34
|
+
### switch <name>
|
|
35
|
+
1. `git checkout sf/[name]`
|
|
36
|
+
2. Report: `Switched to workstream [name]`
|
|
37
|
+
|
|
38
|
+
### complete <name>
|
|
39
|
+
1. Ask: "Merge sf/[name] into current branch? [y/n]"
|
|
40
|
+
2. If yes: `git merge sf/[name]` then `git branch -d sf/[name]`
|
|
41
|
+
3. Update brain.db:
|
|
42
|
+
```bash
|
|
43
|
+
sqlite3 .shipfast/brain.db "UPDATE context SET value = replace(value, 'active', 'complete') WHERE id = 'workstream:[name]';"
|
|
44
|
+
```
|
|
45
|
+
4. Report: `Workstream [name] completed and merged.`
|
|
46
|
+
|
|
47
|
+
</process>
|
|
48
|
+
|
|
49
|
+
<context>
|
|
50
|
+
$ARGUMENTS
|
|
51
|
+
</context>
|
|
@@ -0,0 +1,272 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* ShipFast Architecture Layer Computation
|
|
3
|
+
*
|
|
4
|
+
* Derives architecture layers purely from the import graph + directory structure.
|
|
5
|
+
* ZERO hardcoded patterns — works with any project, any language, any structure.
|
|
6
|
+
*
|
|
7
|
+
* How it works:
|
|
8
|
+
* 1. Build import graph from brain.db edges
|
|
9
|
+
* 2. Compute layer from graph depth (L0 = nothing imports it, LN = imports nothing)
|
|
10
|
+
* 3. Index directory tree as folders with aggregated stats
|
|
11
|
+
* 4. Detect folder roles from content (most exports = util, most imports = entry)
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
'use strict';
|
|
15
|
+
|
|
16
|
+
const { execFileSync: safeRun } = require('child_process');
|
|
17
|
+
const path = require('path');
|
|
18
|
+
const brain = require('../brain/index.cjs');
|
|
19
|
+
|
|
20
|
+
// ============================================================
|
|
21
|
+
// Ensure architecture table exists
|
|
22
|
+
// ============================================================
|
|
23
|
+
|
|
24
|
+
function ensureTable(cwd) {
|
|
25
|
+
const dbPath = brain.getBrainPath(cwd);
|
|
26
|
+
safeRun('sqlite3', [dbPath], {
|
|
27
|
+
input: [
|
|
28
|
+
"DROP TABLE IF EXISTS architecture;",
|
|
29
|
+
"DROP TABLE IF EXISTS folders;",
|
|
30
|
+
"CREATE TABLE IF NOT EXISTS architecture (file_path TEXT PRIMARY KEY, layer INTEGER NOT NULL, folder TEXT, imports_count INTEGER DEFAULT 0, imported_by_count INTEGER DEFAULT 0, updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')));",
|
|
31
|
+
"CREATE TABLE IF NOT EXISTS folders (folder_path TEXT PRIMARY KEY, file_count INTEGER DEFAULT 0, total_imports INTEGER DEFAULT 0, total_imported_by INTEGER DEFAULT 0, avg_layer REAL, role TEXT, updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')));",
|
|
32
|
+
"CREATE INDEX IF NOT EXISTS idx_arch_layer ON architecture(layer);",
|
|
33
|
+
"CREATE INDEX IF NOT EXISTS idx_arch_folder ON architecture(folder);",
|
|
34
|
+
"CREATE INDEX IF NOT EXISTS idx_folders_role ON folders(role);",
|
|
35
|
+
].join('\n'),
|
|
36
|
+
stdio: ['pipe', 'pipe', 'pipe']
|
|
37
|
+
});
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
// ============================================================
|
|
41
|
+
// Compute layers from import graph (zero hardcoding)
|
|
42
|
+
// ============================================================
|
|
43
|
+
|
|
44
|
+
function computeArchitecture(cwd) {
|
|
45
|
+
if (!brain.brainExists(cwd)) return { computed: 0 };
|
|
46
|
+
|
|
47
|
+
ensureTable(cwd);
|
|
48
|
+
|
|
49
|
+
// Get all file nodes
|
|
50
|
+
const files = brain.query(cwd, "SELECT file_path FROM nodes WHERE kind = 'file'");
|
|
51
|
+
if (!files.length) return { computed: 0 };
|
|
52
|
+
|
|
53
|
+
// Get all import edges
|
|
54
|
+
const edges = brain.query(cwd, "SELECT source, target FROM edges WHERE kind = 'imports'");
|
|
55
|
+
|
|
56
|
+
// Build adjacency maps
|
|
57
|
+
const outbound = {}; // file → what it imports
|
|
58
|
+
const inbound = {}; // file → who imports it
|
|
59
|
+
const allFiles = new Set();
|
|
60
|
+
|
|
61
|
+
for (const { file_path } of files) {
|
|
62
|
+
allFiles.add(file_path);
|
|
63
|
+
outbound[file_path] = [];
|
|
64
|
+
inbound[file_path] = [];
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
// Build basename→filepath lookup for fuzzy matching unresolved imports
|
|
68
|
+
const basenameMap = {};
|
|
69
|
+
for (const f of allFiles) {
|
|
70
|
+
const base = f.split('/').pop().replace(/\.(ts|tsx|js|jsx|mjs|cjs|rs|py)$/, '');
|
|
71
|
+
if (!basenameMap[base]) basenameMap[base] = [];
|
|
72
|
+
basenameMap[base].push(f);
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
function resolveTarget(src, tgt) {
|
|
76
|
+
// Direct match
|
|
77
|
+
if (allFiles.has(tgt)) return tgt;
|
|
78
|
+
// Try with common extensions
|
|
79
|
+
for (const ext of ['.ts', '.tsx', '.js', '.jsx', '/index.ts', '/index.tsx', '/index.js']) {
|
|
80
|
+
if (allFiles.has(tgt + ext)) return tgt + ext;
|
|
81
|
+
}
|
|
82
|
+
// Fuzzy: match basename in same project
|
|
83
|
+
const base = tgt.split('/').pop().replace(/\.(ts|tsx|js|jsx)$/, '');
|
|
84
|
+
const srcProject = src.split('/')[0]; // e.g., 'desktop-app'
|
|
85
|
+
const candidates = (basenameMap[base] || []).filter(f => f.startsWith(srcProject));
|
|
86
|
+
if (candidates.length === 1) return candidates[0];
|
|
87
|
+
return null;
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
for (const edge of edges) {
|
|
91
|
+
const src = edge.source.replace('file:', '');
|
|
92
|
+
const tgt = edge.target.replace('file:', '');
|
|
93
|
+
const resolved = resolveTarget(src, tgt);
|
|
94
|
+
if (allFiles.has(src) && resolved) {
|
|
95
|
+
outbound[src].push(resolved);
|
|
96
|
+
inbound[resolved].push(src);
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
// Compute layer: BFS from entry points (files with zero importers)
|
|
101
|
+
const layers = {};
|
|
102
|
+
const entryPoints = [...allFiles].filter(f => inbound[f].length === 0);
|
|
103
|
+
|
|
104
|
+
// BFS to assign depth from entry points
|
|
105
|
+
const queue = entryPoints.map(f => ({ file: f, depth: 0 }));
|
|
106
|
+
const visited = new Set();
|
|
107
|
+
|
|
108
|
+
while (queue.length > 0) {
|
|
109
|
+
const { file, depth } = queue.shift();
|
|
110
|
+
if (visited.has(file)) continue;
|
|
111
|
+
visited.add(file);
|
|
112
|
+
layers[file] = depth;
|
|
113
|
+
|
|
114
|
+
for (const dep of (outbound[file] || [])) {
|
|
115
|
+
if (!visited.has(dep)) {
|
|
116
|
+
queue.push({ file: dep, depth: depth + 1 });
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
// Files not reachable from entry points get layer based on their own import count
|
|
122
|
+
for (const f of allFiles) {
|
|
123
|
+
if (!(f in layers)) {
|
|
124
|
+
// Isolated file — assign layer based on outbound/inbound ratio
|
|
125
|
+
const out = (outbound[f] || []).length;
|
|
126
|
+
const inc = (inbound[f] || []).length;
|
|
127
|
+
if (inc === 0 && out === 0) layers[f] = 0; // standalone
|
|
128
|
+
else if (inc === 0) layers[f] = 0; // entry-like
|
|
129
|
+
else if (out === 0) layers[f] = 99; // leaf (type/constant)
|
|
130
|
+
else layers[f] = Math.round(out / (inc + 1)); // ratio-based
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
// Normalize layers to 0-based consecutive
|
|
135
|
+
const uniqueLayers = [...new Set(Object.values(layers))].sort((a, b) => a - b);
|
|
136
|
+
const layerMap = {};
|
|
137
|
+
uniqueLayers.forEach((l, i) => layerMap[l] = i);
|
|
138
|
+
for (const f of allFiles) {
|
|
139
|
+
layers[f] = layerMap[layers[f]] || 0;
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
// Extract folder from file path
|
|
143
|
+
function getFolder(filePath) {
|
|
144
|
+
const parts = filePath.split('/');
|
|
145
|
+
return parts.length > 1 ? parts.slice(0, -1).join('/') : '.';
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
// Aggregate folder stats
|
|
149
|
+
const folderStats = {};
|
|
150
|
+
for (const f of allFiles) {
|
|
151
|
+
const folder = getFolder(f);
|
|
152
|
+
if (!folderStats[folder]) {
|
|
153
|
+
folderStats[folder] = { count: 0, totalImports: 0, totalImportedBy: 0, layerSum: 0 };
|
|
154
|
+
}
|
|
155
|
+
folderStats[folder].count++;
|
|
156
|
+
folderStats[folder].totalImports += (outbound[f] || []).length;
|
|
157
|
+
folderStats[folder].totalImportedBy += (inbound[f] || []).length;
|
|
158
|
+
folderStats[folder].layerSum += layers[f];
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
// Detect folder role from stats (auto-derived, not hardcoded)
|
|
162
|
+
function detectRole(stats) {
|
|
163
|
+
const avgLayer = stats.layerSum / stats.count;
|
|
164
|
+
const importRatio = stats.totalImports / Math.max(stats.totalImportedBy, 1);
|
|
165
|
+
|
|
166
|
+
if (stats.totalImportedBy === 0 && stats.totalImports > 0) return 'entry';
|
|
167
|
+
if (stats.totalImports === 0 && stats.totalImportedBy > 0) return 'leaf';
|
|
168
|
+
if (importRatio < 0.3) return 'shared'; // heavily imported by others = shared/util
|
|
169
|
+
if (importRatio > 3) return 'consumer'; // imports many, few import it = consumer/page
|
|
170
|
+
if (avgLayer < 1) return 'top';
|
|
171
|
+
if (avgLayer > 4) return 'foundation';
|
|
172
|
+
return 'middle';
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
// Build SQL statements
|
|
176
|
+
const dbPath = brain.getBrainPath(cwd);
|
|
177
|
+
const statements = ['BEGIN TRANSACTION;', 'DELETE FROM architecture;', 'DELETE FROM folders;'];
|
|
178
|
+
|
|
179
|
+
for (const f of allFiles) {
|
|
180
|
+
const folder = getFolder(f);
|
|
181
|
+
const layer = layers[f];
|
|
182
|
+
const importsCount = (outbound[f] || []).length;
|
|
183
|
+
const importedByCount = (inbound[f] || []).length;
|
|
184
|
+
const esc = s => s.replace(/'/g, "''");
|
|
185
|
+
|
|
186
|
+
statements.push(
|
|
187
|
+
`INSERT OR REPLACE INTO architecture (file_path, layer, folder, imports_count, imported_by_count, updated_at) ` +
|
|
188
|
+
`VALUES ('${esc(f)}', ${layer}, '${esc(folder)}', ${importsCount}, ${importedByCount}, strftime('%s', 'now'));`
|
|
189
|
+
);
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
for (const [folder, stats] of Object.entries(folderStats)) {
|
|
193
|
+
const role = detectRole(stats);
|
|
194
|
+
const avgLayer = (stats.layerSum / stats.count).toFixed(1);
|
|
195
|
+
const esc = s => s.replace(/'/g, "''");
|
|
196
|
+
|
|
197
|
+
statements.push(
|
|
198
|
+
`INSERT OR REPLACE INTO folders (folder_path, file_count, total_imports, total_imported_by, avg_layer, role, updated_at) ` +
|
|
199
|
+
`VALUES ('${esc(folder)}', ${stats.count}, ${stats.totalImports}, ${stats.totalImportedBy}, ${avgLayer}, '${role}', strftime('%s', 'now'));`
|
|
200
|
+
);
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
statements.push('COMMIT;');
|
|
204
|
+
safeRun('sqlite3', [dbPath], { input: statements.join('\n'), stdio: ['pipe', 'pipe', 'pipe'] });
|
|
205
|
+
|
|
206
|
+
return { computed: files.length, folders: Object.keys(folderStats).length };
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
// ============================================================
|
|
210
|
+
// Query helpers
|
|
211
|
+
// ============================================================
|
|
212
|
+
|
|
213
|
+
function getLayerSummary(cwd) {
|
|
214
|
+
return brain.query(cwd,
|
|
215
|
+
"SELECT layer, COUNT(*) as files, SUM(imports_count) as total_imports, SUM(imported_by_count) as total_consumers " +
|
|
216
|
+
"FROM architecture GROUP BY layer ORDER BY layer"
|
|
217
|
+
);
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
function getFolderRoles(cwd) {
|
|
221
|
+
return brain.query(cwd,
|
|
222
|
+
"SELECT folder_path, file_count, total_imports, total_imported_by, avg_layer, role " +
|
|
223
|
+
"FROM folders ORDER BY avg_layer, folder_path LIMIT 40"
|
|
224
|
+
);
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
function getFileLayer(cwd, filePath) {
|
|
228
|
+
return brain.query(cwd,
|
|
229
|
+
`SELECT a.*, f.role as folder_role FROM architecture a LEFT JOIN folders f ON a.folder = f.folder_path ` +
|
|
230
|
+
`WHERE a.file_path LIKE '%${brain.esc(filePath)}%' LIMIT 5`
|
|
231
|
+
);
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
function getDataFlow(cwd, filePath) {
|
|
235
|
+
const file = brain.query(cwd,
|
|
236
|
+
`SELECT * FROM architecture WHERE file_path LIKE '%${brain.esc(filePath)}%' LIMIT 1`
|
|
237
|
+
);
|
|
238
|
+
if (!file.length) return { error: 'File not found' };
|
|
239
|
+
|
|
240
|
+
const upstream = brain.query(cwd,
|
|
241
|
+
`SELECT a.file_path, a.layer, a.folder FROM architecture a ` +
|
|
242
|
+
`JOIN edges e ON ('file:' || a.file_path) = e.source ` +
|
|
243
|
+
`WHERE e.target LIKE '%${brain.esc(filePath)}%' AND e.kind = 'imports' ` +
|
|
244
|
+
`ORDER BY a.layer ASC LIMIT 10`
|
|
245
|
+
);
|
|
246
|
+
|
|
247
|
+
const downstream = brain.query(cwd,
|
|
248
|
+
`SELECT a.file_path, a.layer, a.folder FROM architecture a ` +
|
|
249
|
+
`JOIN edges e ON ('file:' || a.file_path) = e.target ` +
|
|
250
|
+
`WHERE e.source LIKE '%${brain.esc(filePath)}%' AND e.kind = 'imports' ` +
|
|
251
|
+
`ORDER BY a.layer DESC LIMIT 10`
|
|
252
|
+
);
|
|
253
|
+
|
|
254
|
+
return { file: file[0], upstream, downstream };
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
function getMostConnected(cwd, limit) {
|
|
258
|
+
return brain.query(cwd,
|
|
259
|
+
`SELECT file_path, layer, folder, imports_count, imported_by_count, ` +
|
|
260
|
+
`(imports_count + imported_by_count) as total FROM architecture ` +
|
|
261
|
+
`ORDER BY total DESC LIMIT ${parseInt(limit) || 15}`
|
|
262
|
+
);
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
module.exports = {
|
|
266
|
+
computeArchitecture,
|
|
267
|
+
getLayerSummary,
|
|
268
|
+
getFolderRoles,
|
|
269
|
+
getFileLayer,
|
|
270
|
+
getDataFlow,
|
|
271
|
+
getMostConnected
|
|
272
|
+
};
|