soloship 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +252 -0
- package/bin/soloship.js +2 -0
- package/dist/artifacts.d.ts +83 -0
- package/dist/artifacts.js +241 -0
- package/dist/ci.d.ts +2 -0
- package/dist/ci.js +184 -0
- package/dist/cli.d.ts +1 -0
- package/dist/cli.js +63 -0
- package/dist/detect.d.ts +30 -0
- package/dist/detect.js +127 -0
- package/dist/doctor.d.ts +10 -0
- package/dist/doctor.js +205 -0
- package/dist/hooks.d.ts +2 -0
- package/dist/hooks.js +477 -0
- package/dist/init.d.ts +5 -0
- package/dist/init.js +94 -0
- package/dist/manifest.d.ts +63 -0
- package/dist/manifest.js +90 -0
- package/dist/pkg.d.ts +1 -0
- package/dist/pkg.js +9 -0
- package/dist/rollback.d.ts +12 -0
- package/dist/rollback.js +129 -0
- package/dist/rules.d.ts +1 -0
- package/dist/rules.js +119 -0
- package/dist/scaffold.d.ts +7 -0
- package/dist/scaffold.js +138 -0
- package/dist/templates.d.ts +5 -0
- package/dist/templates.js +175 -0
- package/dist/upgrade.d.ts +12 -0
- package/dist/upgrade.js +62 -0
- package/package.json +38 -0
package/dist/manifest.js
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Soloship dependency manifest.
|
|
3
|
+
*
|
|
4
|
+
* Declares the companion plugins, MCP servers, user-scope skills, and global
|
|
5
|
+
* hooks that Soloship either REQUIRES (its skills will break without them) or
|
|
6
|
+
* RECOMMENDS (non-coder workflow is materially improved by having them).
|
|
7
|
+
*
|
|
8
|
+
* The `doctor` command audits this list against the user's actual
|
|
9
|
+
* `~/.claude/` environment and reports what's missing with install commands.
|
|
10
|
+
*
|
|
11
|
+
* Edit this file when Soloship adds or drops companion dependencies. The
|
|
12
|
+
* manifest is the single source of truth for dependency information — do not
|
|
13
|
+
* hard-code checks elsewhere.
|
|
14
|
+
*/
|
|
15
|
+
export const SOLOSHIP_MANIFEST = {
|
|
16
|
+
plugins: [
|
|
17
|
+
{
|
|
18
|
+
id: "superpowers",
|
|
19
|
+
source: "superpowers-marketplace",
|
|
20
|
+
severity: "required",
|
|
21
|
+
purpose: "Brainstorming, plan-writing, debugging, and parallel-agent primitives.",
|
|
22
|
+
usedBy: [
|
|
23
|
+
"soloship-brainstorm",
|
|
24
|
+
"soloship-plan",
|
|
25
|
+
"soloship-debug",
|
|
26
|
+
"soloship-implement",
|
|
27
|
+
],
|
|
28
|
+
install: "Install 'superpowers' from the Claude Code plugin marketplace. Soloship routers will silently fail without it.",
|
|
29
|
+
},
|
|
30
|
+
{
|
|
31
|
+
id: "compound-engineering",
|
|
32
|
+
source: "every-marketplace",
|
|
33
|
+
severity: "required",
|
|
34
|
+
purpose: "Solution doc authoring, agent-native workflows, review orchestration.",
|
|
35
|
+
usedBy: ["soloship-learn", "soloship-review"],
|
|
36
|
+
install: "Install 'compound-engineering' from the Every marketplace in Claude Code.",
|
|
37
|
+
},
|
|
38
|
+
],
|
|
39
|
+
mcpServers: [
|
|
40
|
+
{
|
|
41
|
+
name: "obsidian",
|
|
42
|
+
severity: "recommended",
|
|
43
|
+
purpose: "Cross-project read/search access to an Obsidian vault.",
|
|
44
|
+
install: 'claude mcp add -s user obsidian -- npx -y obsidian-mcp "<VAULT_PATH>"',
|
|
45
|
+
notes: "Replace <VAULT_PATH> with the absolute path to your vault (e.g., '/Users/you/Documents/vault').",
|
|
46
|
+
},
|
|
47
|
+
{
|
|
48
|
+
name: "context7",
|
|
49
|
+
severity: "recommended",
|
|
50
|
+
purpose: "Up-to-date documentation for 9,000+ libraries injected into prompts.",
|
|
51
|
+
install: "claude mcp add -s user context7 -- npx -y @upstash/context7-mcp",
|
|
52
|
+
},
|
|
53
|
+
{
|
|
54
|
+
name: "chrome-devtools",
|
|
55
|
+
severity: "recommended",
|
|
56
|
+
purpose: "Headless browser automation for QA and testing flows.",
|
|
57
|
+
install: "claude mcp add -s user chrome-devtools -- npx -y chrome-devtools-mcp@latest",
|
|
58
|
+
},
|
|
59
|
+
],
|
|
60
|
+
skills: [
|
|
61
|
+
{
|
|
62
|
+
name: "gstack",
|
|
63
|
+
severity: "recommended",
|
|
64
|
+
purpose: "Command bundle that provides office-hours, plan reviews (eng/CEO/design), QA, CSO, design-review, retro, and others. Soloship skills delegate to gstack for review and QA workflows.",
|
|
65
|
+
install: "gstack has its own install path — check its README. It typically deploys as a directory under ~/.claude/skills/gstack plus individual symlinked skills (office-hours, plan-eng-review, qa, cso, etc.).",
|
|
66
|
+
},
|
|
67
|
+
{
|
|
68
|
+
name: "log",
|
|
69
|
+
severity: "recommended",
|
|
70
|
+
purpose: "Session capture with decisions, rationale, and alternatives.",
|
|
71
|
+
install: "The Soloship log skill is shipped as part of Soloship. If missing, re-run `soloship init` from a project directory.",
|
|
72
|
+
},
|
|
73
|
+
{
|
|
74
|
+
name: "obsidian-second-brain",
|
|
75
|
+
severity: "recommended",
|
|
76
|
+
purpose: "Vault-aware thinking commands (/challenge, /emerge, /connect) plus ~20 content-ingestion and synthesis commands.",
|
|
77
|
+
install: 'git clone https://github.com/eugeniughelbur/obsidian-second-brain ~/.claude/skills/obsidian-second-brain && bash ~/.claude/skills/obsidian-second-brain/scripts/setup.sh "<VAULT_PATH>"',
|
|
78
|
+
},
|
|
79
|
+
],
|
|
80
|
+
hooks: [
|
|
81
|
+
{
|
|
82
|
+
event: "SessionStart",
|
|
83
|
+
matcher: "compact",
|
|
84
|
+
commandContains: "reinject-claude-md-after-compact",
|
|
85
|
+
severity: "recommended",
|
|
86
|
+
purpose: "Re-injects the nearest CLAUDE.md after auto-compaction so the agent doesn't forget project rules mid-session.",
|
|
87
|
+
install: "Add a SessionStart hook with matcher 'compact' in ~/.claude/settings.json pointing at a script that reads stdin, walks up from `cwd` to find CLAUDE.md, and outputs it as `additionalContext` JSON. See the Soloship docs for a reference script.",
|
|
88
|
+
},
|
|
89
|
+
],
|
|
90
|
+
};
|
package/dist/pkg.d.ts
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export declare function getVersion(): string;
|
package/dist/pkg.js
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
import { readFileSync } from "node:fs";
|
|
2
|
+
import { dirname, join } from "node:path";
|
|
3
|
+
import { fileURLToPath } from "node:url";
|
|
4
|
+
export function getVersion() {
|
|
5
|
+
const here = dirname(fileURLToPath(import.meta.url));
|
|
6
|
+
const pkgPath = join(here, "..", "package.json");
|
|
7
|
+
const pkg = JSON.parse(readFileSync(pkgPath, "utf-8"));
|
|
8
|
+
return pkg.version;
|
|
9
|
+
}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Rollback to the last Soloship safety snapshot.
|
|
3
|
+
*
|
|
4
|
+
* Checkpoints are created automatically at session start by the checkpoint hook.
|
|
5
|
+
* The checkpoint reference (a commit SHA) is stored in .ai/.last-checkpoint.
|
|
6
|
+
*
|
|
7
|
+
* Rollback strategy:
|
|
8
|
+
* 1. If .ai/.last-checkpoint exists, reset to that commit
|
|
9
|
+
* 2. All changes after the checkpoint become uncommitted (soft reset)
|
|
10
|
+
* 3. User can then discard or keep specific changes
|
|
11
|
+
*/
|
|
12
|
+
export declare function runRollback(): Promise<void>;
|
package/dist/rollback.js
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
import { existsSync, readFileSync } from "node:fs";
|
|
2
|
+
import { join } from "node:path";
|
|
3
|
+
import { execSync } from "node:child_process";
|
|
4
|
+
import chalk from "chalk";
|
|
5
|
+
/**
|
|
6
|
+
* Rollback to the last Soloship safety snapshot.
|
|
7
|
+
*
|
|
8
|
+
* Checkpoints are created automatically at session start by the checkpoint hook.
|
|
9
|
+
* The checkpoint reference (a commit SHA) is stored in .ai/.last-checkpoint.
|
|
10
|
+
*
|
|
11
|
+
* Rollback strategy:
|
|
12
|
+
* 1. If .ai/.last-checkpoint exists, reset to that commit
|
|
13
|
+
* 2. All changes after the checkpoint become uncommitted (soft reset)
|
|
14
|
+
* 3. User can then discard or keep specific changes
|
|
15
|
+
*/
|
|
16
|
+
export async function runRollback() {
|
|
17
|
+
const root = process.cwd();
|
|
18
|
+
// Verify git repo
|
|
19
|
+
try {
|
|
20
|
+
execSync("git rev-parse --is-inside-work-tree", {
|
|
21
|
+
cwd: root,
|
|
22
|
+
stdio: "pipe",
|
|
23
|
+
});
|
|
24
|
+
}
|
|
25
|
+
catch {
|
|
26
|
+
console.error(chalk.red("Not a git repository. Rollback requires git."));
|
|
27
|
+
process.exit(1);
|
|
28
|
+
}
|
|
29
|
+
const checkpointFile = join(root, ".ai", ".last-checkpoint");
|
|
30
|
+
if (!existsSync(checkpointFile)) {
|
|
31
|
+
console.error(chalk.red("No checkpoint found.") +
|
|
32
|
+
" Checkpoints are created automatically when a Claude Code session starts.");
|
|
33
|
+
console.error(chalk.dim("If this is a fresh project, run a Claude Code session first to create a checkpoint."));
|
|
34
|
+
process.exit(1);
|
|
35
|
+
}
|
|
36
|
+
const checkpointSha = readFileSync(checkpointFile, "utf-8").trim();
|
|
37
|
+
if (!checkpointSha) {
|
|
38
|
+
console.error(chalk.red("Checkpoint file is empty."));
|
|
39
|
+
process.exit(1);
|
|
40
|
+
}
|
|
41
|
+
// Verify the checkpoint commit exists
|
|
42
|
+
try {
|
|
43
|
+
execSync(`git cat-file -e ${checkpointSha}`, {
|
|
44
|
+
cwd: root,
|
|
45
|
+
stdio: "pipe",
|
|
46
|
+
});
|
|
47
|
+
}
|
|
48
|
+
catch {
|
|
49
|
+
console.error(chalk.red(`Checkpoint commit ${checkpointSha} not found.`) +
|
|
50
|
+
" It may have been garbage-collected or the history was rewritten.");
|
|
51
|
+
process.exit(1);
|
|
52
|
+
}
|
|
53
|
+
// Check for pre-session stash snapshot
|
|
54
|
+
const stashFile = join(root, ".ai", ".last-checkpoint-stash");
|
|
55
|
+
const stashSha = existsSync(stashFile)
|
|
56
|
+
? readFileSync(stashFile, "utf-8").trim()
|
|
57
|
+
: null;
|
|
58
|
+
// Show what will be rolled back
|
|
59
|
+
const shortSha = checkpointSha.substring(0, 7);
|
|
60
|
+
const currentSha = execSync("git rev-parse --short HEAD", {
|
|
61
|
+
cwd: root,
|
|
62
|
+
encoding: "utf-8",
|
|
63
|
+
}).trim();
|
|
64
|
+
console.log("");
|
|
65
|
+
console.log(chalk.bold("Soloship Rollback"));
|
|
66
|
+
console.log("");
|
|
67
|
+
console.log(` Restore point: ${chalk.cyan(shortSha)} (saved when AI session started)`);
|
|
68
|
+
console.log(` You are now at: ${chalk.cyan(currentSha)}`);
|
|
69
|
+
if (stashSha) {
|
|
70
|
+
console.log(` Your pre-session work: ${chalk.cyan("preserved")}`);
|
|
71
|
+
}
|
|
72
|
+
console.log("");
|
|
73
|
+
// Count commits between checkpoint and HEAD
|
|
74
|
+
try {
|
|
75
|
+
const commitCount = execSync(`git rev-list --count ${checkpointSha}..HEAD`, { cwd: root, encoding: "utf-8" }).trim();
|
|
76
|
+
if (commitCount === "0") {
|
|
77
|
+
// Check for uncommitted changes
|
|
78
|
+
const hasChanges = execSync("git status --porcelain", {
|
|
79
|
+
cwd: root,
|
|
80
|
+
encoding: "utf-8",
|
|
81
|
+
}).trim().length > 0;
|
|
82
|
+
if (!hasChanges) {
|
|
83
|
+
console.log(chalk.green("Already at checkpoint.") + " Nothing to roll back.");
|
|
84
|
+
return;
|
|
85
|
+
}
|
|
86
|
+
console.log(` Removing changes made during this session...`);
|
|
87
|
+
// Discard all current changes
|
|
88
|
+
execSync("git checkout -- .", { cwd: root, stdio: "pipe" });
|
|
89
|
+
execSync("git clean -fd", { cwd: root, stdio: "pipe" });
|
|
90
|
+
// Restore pre-session changes if they were captured
|
|
91
|
+
if (stashSha) {
|
|
92
|
+
restorePreSessionChanges(root, stashSha);
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
else {
|
|
96
|
+
console.log(` Undoing ${chalk.yellow(commitCount)} commit(s) from this session...`);
|
|
97
|
+
// Hard reset to checkpoint — discards agent's commits and working tree changes
|
|
98
|
+
execSync(`git reset --hard ${checkpointSha}`, {
|
|
99
|
+
cwd: root,
|
|
100
|
+
stdio: "pipe",
|
|
101
|
+
});
|
|
102
|
+
// Restore pre-session changes if they were captured
|
|
103
|
+
if (stashSha) {
|
|
104
|
+
restorePreSessionChanges(root, stashSha);
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
catch (err) {
|
|
109
|
+
console.error(chalk.red("Rollback failed:"), err);
|
|
110
|
+
process.exit(1);
|
|
111
|
+
}
|
|
112
|
+
console.log("");
|
|
113
|
+
console.log(chalk.green.bold("Rolled back to checkpoint."));
|
|
114
|
+
if (stashSha) {
|
|
115
|
+
console.log(chalk.dim("Your pre-session changes have been restored."));
|
|
116
|
+
}
|
|
117
|
+
console.log("");
|
|
118
|
+
}
|
|
119
|
+
function restorePreSessionChanges(root, stashSha) {
|
|
120
|
+
try {
|
|
121
|
+
// Verify the stash object still exists (git gc could have collected it)
|
|
122
|
+
execSync(`git cat-file -e ${stashSha}`, { cwd: root, stdio: "pipe" });
|
|
123
|
+
execSync(`git stash apply ${stashSha}`, { cwd: root, stdio: "pipe" });
|
|
124
|
+
}
|
|
125
|
+
catch {
|
|
126
|
+
console.log(chalk.yellow("Could not restore pre-session changes (snapshot may have been cleaned up)." +
|
|
127
|
+
" Working tree is clean at the checkpoint commit."));
|
|
128
|
+
}
|
|
129
|
+
}
|
package/dist/rules.d.ts
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export declare function installRules(root: string): Promise<string[]>;
|
package/dist/rules.js
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
import { existsSync, mkdirSync, writeFileSync } from "node:fs";
|
|
2
|
+
import { join } from "node:path";
|
|
3
|
+
export async function installRules(root) {
|
|
4
|
+
const results = [];
|
|
5
|
+
const rulesDir = join(root, ".claude", "rules");
|
|
6
|
+
if (!existsSync(rulesDir)) {
|
|
7
|
+
mkdirSync(rulesDir, { recursive: true });
|
|
8
|
+
}
|
|
9
|
+
const rules = {
|
|
10
|
+
"solution-search.md": RULE_SOLUTION_SEARCH,
|
|
11
|
+
"plan-materialization.md": RULE_PLAN_MATERIALIZATION,
|
|
12
|
+
"plan-rationale.md": RULE_PLAN_RATIONALE,
|
|
13
|
+
"plan-lifecycle.md": RULE_PLAN_LIFECYCLE,
|
|
14
|
+
};
|
|
15
|
+
for (const [filename, content] of Object.entries(rules)) {
|
|
16
|
+
const path = join(rulesDir, filename);
|
|
17
|
+
if (!existsSync(path)) {
|
|
18
|
+
writeFileSync(path, content);
|
|
19
|
+
results.push(filename);
|
|
20
|
+
}
|
|
21
|
+
else {
|
|
22
|
+
results.push(`${filename} (exists, skipped)`);
|
|
23
|
+
}
|
|
24
|
+
}
|
|
25
|
+
return results;
|
|
26
|
+
}
|
|
27
|
+
const RULE_SOLUTION_SEARCH = `# Solution Search Before Work (Auto-Loaded)
|
|
28
|
+
|
|
29
|
+
## The Rule
|
|
30
|
+
|
|
31
|
+
Before planning, debugging, or reviewing any implementation, check if \`docs/solutions/\` exists in the project. If it does, search it for prior art related to the current task.
|
|
32
|
+
|
|
33
|
+
## When to Search
|
|
34
|
+
|
|
35
|
+
- Before starting any plan (Think or Plan phase)
|
|
36
|
+
- At the start of any debugging session
|
|
37
|
+
- When reviewing an implementation against a plan
|
|
38
|
+
- When encountering an error message
|
|
39
|
+
|
|
40
|
+
## How to Search
|
|
41
|
+
|
|
42
|
+
1. Grep \`docs/solutions/\` for keywords: component names, error messages, file paths, symptoms
|
|
43
|
+
2. Search the entire directory — never limit to a single category
|
|
44
|
+
3. Read frontmatter of matches to assess relevance
|
|
45
|
+
4. Read full doc if relevant, and apply its prevention strategies
|
|
46
|
+
|
|
47
|
+
## What to Do With Results
|
|
48
|
+
|
|
49
|
+
- Reference relevant solutions in plans and reviews
|
|
50
|
+
- Apply prevention strategies from past solutions
|
|
51
|
+
- If the current problem matches a documented one, follow the existing solution
|
|
52
|
+
`;
|
|
53
|
+
const RULE_PLAN_MATERIALIZATION = `# Plan Materialization (Auto-Loaded)
|
|
54
|
+
|
|
55
|
+
## The Rule
|
|
56
|
+
|
|
57
|
+
**Planning mode is for thinking. The plan file is the deliverable.**
|
|
58
|
+
|
|
59
|
+
After exiting planning mode, the FIRST action — before offering to clear context or implement — is writing the plan to \`docs/plans/YYYY-MM-DD-<slug>.md\`.
|
|
60
|
+
|
|
61
|
+
## Sequence
|
|
62
|
+
|
|
63
|
+
1. Enter planning mode (think, design, iterate with user)
|
|
64
|
+
2. Exit planning mode
|
|
65
|
+
3. IMMEDIATELY write plan to docs/plans/YYYY-MM-DD-<slug>.md
|
|
66
|
+
4. THEN offer to clear context and implement
|
|
67
|
+
|
|
68
|
+
Never skip step 3. Never say "I'll write the plan after we clear." The plan file must exist before the session boundary.
|
|
69
|
+
|
|
70
|
+
## Why This Exists
|
|
71
|
+
|
|
72
|
+
Planning mode disables file writes. This creates a gap where good planning work stays in conversation context but never reaches the filesystem. Context clears destroy it. This rule closes that gap.
|
|
73
|
+
`;
|
|
74
|
+
const RULE_PLAN_RATIONALE = `# Plan Rationale Requirements (Auto-Loaded)
|
|
75
|
+
|
|
76
|
+
Every implementation plan must carry enough reasoning for a fresh agent with zero context to understand why decisions were made.
|
|
77
|
+
|
|
78
|
+
## Inline Rationale
|
|
79
|
+
|
|
80
|
+
Each phase or major step must include a **Why** line explaining the motivation. Not just "delete these files" but "delete these files because they are dead code — no imports reference them."
|
|
81
|
+
|
|
82
|
+
## Key Decisions Section
|
|
83
|
+
|
|
84
|
+
Every plan must end with a **Key Decisions** section listing non-obvious choices and their reasoning. A decision qualifies as "key" if:
|
|
85
|
+
- Choosing between two or more reasonable approaches
|
|
86
|
+
- Deleting code or removing functionality
|
|
87
|
+
- Changing defaults or stored state schema
|
|
88
|
+
- Imposing architectural constraints
|
|
89
|
+
- Anything a reviewer might question
|
|
90
|
+
`;
|
|
91
|
+
const RULE_PLAN_LIFECYCLE = `# Plan Lifecycle (Auto-Loaded)
|
|
92
|
+
|
|
93
|
+
## Location
|
|
94
|
+
|
|
95
|
+
All plans go in \`docs/plans/\`. Naming: \`YYYY-MM-DD-<slug>.md\`
|
|
96
|
+
|
|
97
|
+
## Cleanup After Completion
|
|
98
|
+
|
|
99
|
+
### Small Plans (delete after commit)
|
|
100
|
+
|
|
101
|
+
ALL of these must be true:
|
|
102
|
+
- Single phase / fewer than 3 tasks
|
|
103
|
+
- Touches fewer than 5 files
|
|
104
|
+
- No architectural decisions worth preserving
|
|
105
|
+
|
|
106
|
+
**Action:** \`git rm\` the plan file after the final commit.
|
|
107
|
+
|
|
108
|
+
### Large Plans (archive)
|
|
109
|
+
|
|
110
|
+
ANY of these is true:
|
|
111
|
+
- Multiple phases or 3+ tasks
|
|
112
|
+
- Touches 5+ files
|
|
113
|
+
- Contains architectural decisions
|
|
114
|
+
- Spans multiple sessions
|
|
115
|
+
|
|
116
|
+
**Action:** \`git mv\` to \`docs/plans/archive/\`
|
|
117
|
+
|
|
118
|
+
When in doubt, archive. Deleting knowledge is worse than keeping a small file.
|
|
119
|
+
`;
|
package/dist/scaffold.js
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
import { existsSync, mkdirSync, writeFileSync } from "node:fs";
|
|
2
|
+
import { join } from "node:path";
|
|
3
|
+
import { generateClaudeMd, generateAgentsMd, generateSolutionGuide, generateChangelog, } from "./templates.js";
|
|
4
|
+
export async function scaffoldDocs(root, project) {
|
|
5
|
+
const results = [];
|
|
6
|
+
// Create directory structure
|
|
7
|
+
const dirs = [
|
|
8
|
+
"docs/plans",
|
|
9
|
+
"docs/plans/archive",
|
|
10
|
+
"docs/solutions",
|
|
11
|
+
"docs/architecture",
|
|
12
|
+
"docs/architecture/decisions",
|
|
13
|
+
"docs/audit",
|
|
14
|
+
];
|
|
15
|
+
for (const dir of dirs) {
|
|
16
|
+
const fullPath = join(root, dir);
|
|
17
|
+
if (!existsSync(fullPath)) {
|
|
18
|
+
mkdirSync(fullPath, { recursive: true });
|
|
19
|
+
results.push({ path: dir + "/", action: "created" });
|
|
20
|
+
}
|
|
21
|
+
else {
|
|
22
|
+
results.push({ path: dir + "/", action: "exists" });
|
|
23
|
+
}
|
|
24
|
+
}
|
|
25
|
+
// CLAUDE.md — only create if doesn't exist
|
|
26
|
+
if (!project.existingDocs.hasClaudeMd) {
|
|
27
|
+
const content = generateClaudeMd(project);
|
|
28
|
+
writeFileSync(join(root, "CLAUDE.md"), content);
|
|
29
|
+
results.push({ path: "CLAUDE.md", action: "created" });
|
|
30
|
+
}
|
|
31
|
+
else {
|
|
32
|
+
results.push({ path: "CLAUDE.md", action: "exists" });
|
|
33
|
+
}
|
|
34
|
+
// AGENTS.md — root level, only if doesn't exist
|
|
35
|
+
if (!project.existingDocs.hasAgentsMd) {
|
|
36
|
+
const content = generateAgentsMd(project);
|
|
37
|
+
writeFileSync(join(root, "AGENTS.md"), content);
|
|
38
|
+
results.push({ path: "AGENTS.md", action: "created" });
|
|
39
|
+
}
|
|
40
|
+
else {
|
|
41
|
+
results.push({ path: "AGENTS.md", action: "exists" });
|
|
42
|
+
}
|
|
43
|
+
// CHANGELOG.md — only if doesn't exist
|
|
44
|
+
if (!project.existingDocs.hasChangelog) {
|
|
45
|
+
const content = generateChangelog(project);
|
|
46
|
+
writeFileSync(join(root, "CHANGELOG.md"), content);
|
|
47
|
+
results.push({ path: "CHANGELOG.md", action: "created" });
|
|
48
|
+
}
|
|
49
|
+
else {
|
|
50
|
+
results.push({ path: "CHANGELOG.md", action: "exists" });
|
|
51
|
+
}
|
|
52
|
+
// Solution Guide — always create (it's a reference doc)
|
|
53
|
+
const solutionGuidePath = join(root, "docs", "SOLUTION_GUIDE.md");
|
|
54
|
+
if (!existsSync(solutionGuidePath)) {
|
|
55
|
+
writeFileSync(solutionGuidePath, generateSolutionGuide());
|
|
56
|
+
results.push({ path: "docs/SOLUTION_GUIDE.md", action: "created" });
|
|
57
|
+
}
|
|
58
|
+
else {
|
|
59
|
+
results.push({ path: "docs/SOLUTION_GUIDE.md", action: "exists" });
|
|
60
|
+
}
|
|
61
|
+
// Semgrep config for automated security scanning
|
|
62
|
+
const semgrepPath = join(root, ".semgrep.yml");
|
|
63
|
+
if (!existsSync(semgrepPath)) {
|
|
64
|
+
writeFileSync(semgrepPath, generateSemgrepConfig());
|
|
65
|
+
results.push({ path: ".semgrep.yml", action: "created" });
|
|
66
|
+
}
|
|
67
|
+
else {
|
|
68
|
+
results.push({ path: ".semgrep.yml", action: "exists" });
|
|
69
|
+
}
|
|
70
|
+
return results;
|
|
71
|
+
}
|
|
72
|
+
function generateSemgrepConfig() {
|
|
73
|
+
return `# Semgrep configuration — Soloship automated security scanning
|
|
74
|
+
# Runs automatically on every commit via Claude Code hook.
|
|
75
|
+
# Critical findings block the commit. Medium findings warn.
|
|
76
|
+
#
|
|
77
|
+
# Install semgrep: pip install semgrep (or pipx install semgrep)
|
|
78
|
+
# Manual scan: semgrep --config .semgrep.yml src/
|
|
79
|
+
|
|
80
|
+
rules:
|
|
81
|
+
# --- Injection ---
|
|
82
|
+
- id: hardcoded-secret
|
|
83
|
+
pattern-either:
|
|
84
|
+
- pattern: $KEY = "..."
|
|
85
|
+
- pattern: $KEY = '...'
|
|
86
|
+
metavariable-regex:
|
|
87
|
+
metavariable: $KEY
|
|
88
|
+
regex: (?i)(api_key|secret|password|token|credential|private_key)
|
|
89
|
+
message: "Possible hardcoded secret in $KEY. Use environment variables instead."
|
|
90
|
+
severity: ERROR
|
|
91
|
+
languages: [javascript, typescript, python, ruby]
|
|
92
|
+
|
|
93
|
+
- id: sql-string-concat
|
|
94
|
+
pattern-either:
|
|
95
|
+
- pattern: |
|
|
96
|
+
$QUERY = "..." + $INPUT + "..."
|
|
97
|
+
- pattern: |
|
|
98
|
+
$QUERY = \`...\${$INPUT}...\`
|
|
99
|
+
message: "SQL query built with string concatenation. Use parameterized queries."
|
|
100
|
+
severity: ERROR
|
|
101
|
+
languages: [javascript, typescript]
|
|
102
|
+
|
|
103
|
+
- id: eval-usage
|
|
104
|
+
pattern-either:
|
|
105
|
+
- pattern: eval(...)
|
|
106
|
+
- pattern: new Function(...)
|
|
107
|
+
message: "eval() or new Function() detected. This enables code injection."
|
|
108
|
+
severity: ERROR
|
|
109
|
+
languages: [javascript, typescript]
|
|
110
|
+
|
|
111
|
+
# --- XSS ---
|
|
112
|
+
- id: innerhtml-usage
|
|
113
|
+
pattern: $EL.innerHTML = $VALUE
|
|
114
|
+
message: "innerHTML assignment detected. Use textContent or sanitize input."
|
|
115
|
+
severity: WARNING
|
|
116
|
+
languages: [javascript, typescript]
|
|
117
|
+
|
|
118
|
+
- id: dangerously-set-html
|
|
119
|
+
pattern: dangerouslySetInnerHTML={...}
|
|
120
|
+
message: "dangerouslySetInnerHTML usage. Ensure input is sanitized."
|
|
121
|
+
severity: WARNING
|
|
122
|
+
languages: [javascript, typescript]
|
|
123
|
+
|
|
124
|
+
# --- Auth ---
|
|
125
|
+
- id: jwt-none-algorithm
|
|
126
|
+
pattern-either:
|
|
127
|
+
- pattern: |
|
|
128
|
+
jwt.sign($PAYLOAD, ..., {algorithm: "none"})
|
|
129
|
+
- pattern: |
|
|
130
|
+
jwt.verify($TOKEN, ..., {algorithms: ["none"]})
|
|
131
|
+
message: "JWT with 'none' algorithm is insecure."
|
|
132
|
+
severity: ERROR
|
|
133
|
+
languages: [javascript, typescript]
|
|
134
|
+
|
|
135
|
+
# Extend with p/owasp-top-ten for comprehensive coverage:
|
|
136
|
+
# semgrep --config p/owasp-top-ten --config .semgrep.yml src/
|
|
137
|
+
`;
|
|
138
|
+
}
|
|
@@ -0,0 +1,5 @@
|
|
|
1
|
+
import type { ProjectInfo } from "./detect.js";
|
|
2
|
+
export declare function generateClaudeMd(project: ProjectInfo): string;
|
|
3
|
+
export declare function generateAgentsMd(project: ProjectInfo): string;
|
|
4
|
+
export declare function generateChangelog(project: ProjectInfo): string;
|
|
5
|
+
export declare function generateSolutionGuide(): string;
|