claude-code-scanner 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/DOCUMENTATION.md +1210 -0
- package/LICENSE +21 -0
- package/README.md +306 -0
- package/bin/cli.js +305 -0
- package/package.json +43 -0
- package/template/.claude/agents/api-builder.md +64 -0
- package/template/.claude/agents/architect.md +92 -0
- package/template/.claude/agents/debugger.md +69 -0
- package/template/.claude/agents/explorer.md +71 -0
- package/template/.claude/agents/frontend.md +61 -0
- package/template/.claude/agents/infra.md +66 -0
- package/template/.claude/agents/product-owner.md +73 -0
- package/template/.claude/agents/qa-lead.md +102 -0
- package/template/.claude/agents/reviewer.md +77 -0
- package/template/.claude/agents/security.md +81 -0
- package/template/.claude/agents/team-lead.md +128 -0
- package/template/.claude/agents/tester.md +72 -0
- package/template/.claude/docs/agent-error-protocol.md +89 -0
- package/template/.claude/docs/best-practices.md +93 -0
- package/template/.claude/docs/commands-template.md +73 -0
- package/template/.claude/docs/conflict-resolution-protocol.md +82 -0
- package/template/.claude/docs/context-budget.md +54 -0
- package/template/.claude/docs/execution-metrics-protocol.md +105 -0
- package/template/.claude/docs/flow-engine.md +475 -0
- package/template/.claude/docs/smithery-setup.md +51 -0
- package/template/.claude/docs/task-record-schema.md +196 -0
- package/template/.claude/hooks/drift-detector.js +143 -0
- package/template/.claude/hooks/execution-report.js +114 -0
- package/template/.claude/hooks/notify-approval.js +30 -0
- package/template/.claude/hooks/post-compact-recovery.js +68 -0
- package/template/.claude/hooks/post-edit-format.js +43 -0
- package/template/.claude/hooks/pre-compact-save.js +94 -0
- package/template/.claude/hooks/protect-files.js +39 -0
- package/template/.claude/hooks/session-start.js +76 -0
- package/template/.claude/hooks/stop-failure-handler.js +77 -0
- package/template/.claude/hooks/tool-failure-tracker.js +54 -0
- package/template/.claude/hooks/track-file-changes.js +34 -0
- package/template/.claude/hooks/validate-bash.js +34 -0
- package/template/.claude/manifest.json +22 -0
- package/template/.claude/profiles/backend.md +34 -0
- package/template/.claude/profiles/devops.md +36 -0
- package/template/.claude/profiles/frontend.md +34 -0
- package/template/.claude/rules/context-budget.md +34 -0
- package/template/.claude/scripts/verify-setup.js +210 -0
- package/template/.claude/settings.json +154 -0
- package/template/.claude/skills/context-check/SKILL.md +112 -0
- package/template/.claude/skills/execution-report/SKILL.md +229 -0
- package/template/.claude/skills/generate-environment/SKILL.md +128 -0
- package/template/.claude/skills/generate-environment/additional-skills.md +276 -0
- package/template/.claude/skills/generate-environment/artifact-templates.md +386 -0
- package/template/.claude/skills/generate-environment/domain-agents.md +202 -0
- package/template/.claude/skills/impact-analysis/SKILL.md +17 -0
- package/template/.claude/skills/metrics/SKILL.md +19 -0
- package/template/.claude/skills/progress-report/SKILL.md +27 -0
- package/template/.claude/skills/rollback/SKILL.md +75 -0
- package/template/.claude/skills/scan-codebase/SKILL.md +59 -0
- package/template/.claude/skills/scan-codebase/deep-scan-instructions.md +101 -0
- package/template/.claude/skills/scan-codebase/tech-markers.md +87 -0
- package/template/.claude/skills/setup-smithery/SKILL.md +38 -0
- package/template/.claude/skills/sync/SKILL.md +239 -0
- package/template/.claude/skills/task-tracker/SKILL.md +40 -0
- package/template/.claude/skills/validate-setup/SKILL.md +30 -0
- package/template/.claude/skills/workflow/SKILL.md +333 -0
- package/template/.claude/templates/README.md +42 -0
- package/template/CLAUDE.md +67 -0
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
// Pre-tool hook: block edits to protected files
|
|
3
|
+
const path = require('path');
|
|
4
|
+
|
|
5
|
+
// Timeout: exit if stdin hangs
|
|
6
|
+
setTimeout(() => { process.stderr.write('BLOCKED: Hook timeout.\n'); process.exit(2); }, 10000);
|
|
7
|
+
|
|
8
|
+
let input = '';
|
|
9
|
+
process.stdin.setEncoding('utf-8');
|
|
10
|
+
process.stdin.on('data', chunk => { input += chunk; });
|
|
11
|
+
process.stdin.on('end', () => {
|
|
12
|
+
try {
|
|
13
|
+
const data = JSON.parse(input);
|
|
14
|
+
const file = (data.tool_input && data.tool_input.file_path) || '';
|
|
15
|
+
if (!file) process.exit(0);
|
|
16
|
+
|
|
17
|
+
// Normalize path separators for cross-platform matching
|
|
18
|
+
const normalized = path.resolve(file).split(path.sep).join('/');
|
|
19
|
+
|
|
20
|
+
const PROTECTED_DIRS = ['.github/workflows/'];
|
|
21
|
+
for (const p of PROTECTED_DIRS) {
|
|
22
|
+
if (normalized.includes(p)) {
|
|
23
|
+
process.stderr.write(`BLOCKED: ${file} is protected.\n`);
|
|
24
|
+
process.exit(2);
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
const basename = path.basename(normalized);
|
|
28
|
+
const PROTECTED_EXACT = ['.env', '.env.local', 'package-lock.json', 'yarn.lock', 'pnpm-lock.yaml'];
|
|
29
|
+
for (const p of PROTECTED_EXACT) {
|
|
30
|
+
if (basename === p) {
|
|
31
|
+
process.stderr.write(`BLOCKED: ${file} is protected.\n`);
|
|
32
|
+
process.exit(2);
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
} catch {
|
|
36
|
+
process.stderr.write('BLOCKED: Failed to parse hook input.\n');
|
|
37
|
+
process.exit(2);
|
|
38
|
+
}
|
|
39
|
+
});
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
// Re-inject critical context on session start, resume, and after compaction
|
|
3
|
+
const fs = require('fs');
|
|
4
|
+
const path = require('path');
|
|
5
|
+
|
|
6
|
+
const tasksDir = path.join(process.cwd(), '.claude', 'tasks');
|
|
7
|
+
if (!fs.existsSync(tasksDir)) process.exit(0);
|
|
8
|
+
|
|
9
|
+
const files = fs.readdirSync(tasksDir).filter(f => f.endsWith('.md'));
|
|
10
|
+
let activeFound = false;
|
|
11
|
+
|
|
12
|
+
for (const file of files) {
|
|
13
|
+
const filePath = path.join(tasksDir, file);
|
|
14
|
+
const content = fs.readFileSync(filePath, 'utf-8');
|
|
15
|
+
|
|
16
|
+
// Show active tasks
|
|
17
|
+
if (/status:\s*(DEVELOPING|DEV_TESTING|REVIEWING|CI_PENDING|QA_TESTING|QA_SIGNOFF|BIZ_SIGNOFF|TECH_SIGNOFF|DEPLOYING|MONITORING)/.test(content)) {
|
|
18
|
+
const titleMatch = content.match(/^title:\s*(.+)$/m);
|
|
19
|
+
const statusMatch = content.match(/^status:\s*(.+)$/m);
|
|
20
|
+
if (titleMatch && statusMatch) {
|
|
21
|
+
console.log(`ACTIVE TASK: ${titleMatch[1].trim()} | STATUS: ${statusMatch[1].trim()}`);
|
|
22
|
+
console.log('Check .claude/tasks/ for full details.');
|
|
23
|
+
activeFound = true;
|
|
24
|
+
}
|
|
25
|
+
break;
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
// Warn about ON_HOLD tasks
|
|
30
|
+
for (const file of files) {
|
|
31
|
+
const filePath = path.join(tasksDir, file);
|
|
32
|
+
const content = fs.readFileSync(filePath, 'utf-8');
|
|
33
|
+
|
|
34
|
+
if (/status:\s*ON_HOLD/.test(content)) {
|
|
35
|
+
const idMatch = content.match(/^id:\s*(.+)$/m);
|
|
36
|
+
const titleMatch = content.match(/^title:\s*(.+)$/m);
|
|
37
|
+
const updatedMatch = content.match(/^updated:\s*(.+)$/m);
|
|
38
|
+
|
|
39
|
+
if (idMatch && titleMatch) {
|
|
40
|
+
const id = idMatch[1].trim();
|
|
41
|
+
const title = titleMatch[1].trim();
|
|
42
|
+
|
|
43
|
+
// Check how long it's been on hold
|
|
44
|
+
let daysOnHold = 0;
|
|
45
|
+
if (updatedMatch) {
|
|
46
|
+
const updated = new Date(updatedMatch[1].trim());
|
|
47
|
+
daysOnHold = Math.floor((Date.now() - updated.getTime()) / (1000 * 60 * 60 * 24));
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
if (daysOnHold > 30) {
|
|
51
|
+
console.log(`WARNING: ${id} "${title}" has been ON_HOLD for ${daysOnHold} days. Consider cancelling: /workflow cancel ${id}`);
|
|
52
|
+
} else if (daysOnHold > 7) {
|
|
53
|
+
console.log(`REMINDER: ${id} "${title}" is ON_HOLD (${daysOnHold} days). Resume: /workflow resume ${id}`);
|
|
54
|
+
} else {
|
|
55
|
+
console.log(`ON_HOLD: ${id} "${title}". Resume: /workflow resume ${id}`);
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
if (!activeFound) {
|
|
62
|
+
// Check for blocked tasks
|
|
63
|
+
for (const file of files) {
|
|
64
|
+
const filePath = path.join(tasksDir, file);
|
|
65
|
+
const content = fs.readFileSync(filePath, 'utf-8');
|
|
66
|
+
if (/status:\s*BLOCKED/.test(content)) {
|
|
67
|
+
const idMatch = content.match(/^id:\s*(.+)$/m);
|
|
68
|
+
const titleMatch = content.match(/^title:\s*(.+)$/m);
|
|
69
|
+
if (idMatch && titleMatch) {
|
|
70
|
+
console.log(`BLOCKED: ${idMatch[1].trim()} "${titleMatch[1].trim()}". Check blockers in task file.`);
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
process.exit(0);
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
// StopFailure hook: handle rate limits, auth failures, max tokens, and other session-ending errors
|
|
3
|
+
// Preserves task state and provides recovery instructions
|
|
4
|
+
|
|
5
|
+
const fs = require('fs');
|
|
6
|
+
const path = require('path');
|
|
7
|
+
|
|
8
|
+
// Timeout: exit if stdin hangs
|
|
9
|
+
setTimeout(() => process.exit(0), 10000);
|
|
10
|
+
|
|
11
|
+
let input = '';
|
|
12
|
+
process.stdin.setEncoding('utf-8');
|
|
13
|
+
process.stdin.on('data', chunk => { input += chunk; });
|
|
14
|
+
process.stdin.on('end', () => {
|
|
15
|
+
try {
|
|
16
|
+
const data = JSON.parse(input);
|
|
17
|
+
const errorType = data.stop_failure_error_type || data.error_type || 'unknown';
|
|
18
|
+
|
|
19
|
+
// Log the failure
|
|
20
|
+
const reportsDir = path.join(process.cwd(), '.claude', 'reports');
|
|
21
|
+
if (!fs.existsSync(reportsDir)) {
|
|
22
|
+
fs.mkdirSync(reportsDir, { recursive: true });
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
const timestamp = new Date().toISOString();
|
|
26
|
+
const logPath = path.join(reportsDir, 'session-failures.log');
|
|
27
|
+
fs.appendFileSync(logPath, `| ${timestamp} | ${errorType} | ${JSON.stringify(data).substring(0, 500)} |\n`);
|
|
28
|
+
|
|
29
|
+
// Save task state snapshot for recovery
|
|
30
|
+
const tasksDir = path.join(process.cwd(), '.claude', 'tasks');
|
|
31
|
+
if (fs.existsSync(tasksDir)) {
|
|
32
|
+
const taskFiles = fs.readdirSync(tasksDir).filter(f => f.endsWith('.md'));
|
|
33
|
+
for (const tf of taskFiles) {
|
|
34
|
+
const taskPath = path.join(tasksDir, tf);
|
|
35
|
+
const content = fs.readFileSync(taskPath, 'utf-8');
|
|
36
|
+
if (/status:\s*(DEVELOPING|DEV_TESTING|REVIEWING|CI_PENDING|QA_TESTING)/.test(content)) {
|
|
37
|
+
// Mark task as interrupted
|
|
38
|
+
const id = (content.match(/^id:\s*(.+)$/m) || [])[1] || 'UNKNOWN';
|
|
39
|
+
const snapshotDir = path.join(reportsDir, 'executions');
|
|
40
|
+
if (!fs.existsSync(snapshotDir)) {
|
|
41
|
+
fs.mkdirSync(snapshotDir, { recursive: true });
|
|
42
|
+
}
|
|
43
|
+
const snapshot = {
|
|
44
|
+
timestamp,
|
|
45
|
+
task_id: id.trim(),
|
|
46
|
+
error_type: errorType,
|
|
47
|
+
recovery_action: getRecoveryAction(errorType),
|
|
48
|
+
state_preserved: true
|
|
49
|
+
};
|
|
50
|
+
const snapshotPath = path.join(snapshotDir, `${id.trim()}_interrupted_${Date.now()}.json`);
|
|
51
|
+
fs.writeFileSync(snapshotPath, JSON.stringify(snapshot, null, 2));
|
|
52
|
+
break;
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
// Output recovery instructions
|
|
58
|
+
console.log(`\nSESSION FAILURE: ${errorType}`);
|
|
59
|
+
console.log(`Recovery: ${getRecoveryAction(errorType)}`);
|
|
60
|
+
console.log('Task state has been preserved. Resume with: claude --continue');
|
|
61
|
+
} catch (e) {
|
|
62
|
+
console.log('Session ended unexpectedly. Resume with: claude --continue');
|
|
63
|
+
}
|
|
64
|
+
process.exit(0);
|
|
65
|
+
});
|
|
66
|
+
|
|
67
|
+
function getRecoveryAction(errorType) {
|
|
68
|
+
const actions = {
|
|
69
|
+
'rate_limit': 'Wait 60 seconds, then resume: claude --continue',
|
|
70
|
+
'authentication_failed': 'Re-authenticate: check API key or run claude auth login',
|
|
71
|
+
'billing_error': 'Check billing at console.anthropic.com, then resume',
|
|
72
|
+
'invalid_request': 'Review last action — may need to reduce context. Resume with /compact first',
|
|
73
|
+
'server_error': 'Transient error — retry: claude --continue',
|
|
74
|
+
'max_output_tokens': 'Output was truncated. Resume to continue generation: claude --continue'
|
|
75
|
+
};
|
|
76
|
+
return actions[errorType] || 'Resume session: claude --continue';
|
|
77
|
+
}
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
// PostToolUseFailure hook: track tool failures for debugging and execution reports
|
|
3
|
+
// Logs every tool failure with context so patterns can be identified
|
|
4
|
+
|
|
5
|
+
const fs = require('fs');
|
|
6
|
+
const path = require('path');
|
|
7
|
+
|
|
8
|
+
// Timeout: exit if stdin hangs
|
|
9
|
+
setTimeout(() => process.exit(0), 10000);
|
|
10
|
+
|
|
11
|
+
let input = '';
|
|
12
|
+
process.stdin.setEncoding('utf-8');
|
|
13
|
+
process.stdin.on('data', chunk => { input += chunk; });
|
|
14
|
+
process.stdin.on('end', () => {
|
|
15
|
+
try {
|
|
16
|
+
const data = JSON.parse(input);
|
|
17
|
+
const toolName = data.tool_name || 'unknown';
|
|
18
|
+
const error = data.tool_error || data.error || 'unknown error';
|
|
19
|
+
const toolInput = data.tool_input || {};
|
|
20
|
+
|
|
21
|
+
// Log to failure tracking file
|
|
22
|
+
const reportsDir = path.join(process.cwd(), '.claude', 'reports');
|
|
23
|
+
if (!fs.existsSync(reportsDir)) {
|
|
24
|
+
fs.mkdirSync(reportsDir, { recursive: true });
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
const logPath = path.join(reportsDir, 'tool-failures.log');
|
|
28
|
+
const timestamp = new Date().toISOString().replace(/\.\d{3}Z$/, 'Z');
|
|
29
|
+
const entry = `| ${timestamp} | ${toolName} | ${String(error).substring(0, 200).replace(/\n/g, ' ')} | ${JSON.stringify(toolInput).substring(0, 200)} |\n`;
|
|
30
|
+
|
|
31
|
+
fs.appendFileSync(logPath, entry);
|
|
32
|
+
|
|
33
|
+
// Also log to active task's changes log if one exists
|
|
34
|
+
const tasksDir = path.join(process.cwd(), '.claude', 'tasks');
|
|
35
|
+
if (fs.existsSync(tasksDir)) {
|
|
36
|
+
const taskFiles = fs.readdirSync(tasksDir).filter(f => f.endsWith('.md'));
|
|
37
|
+
for (const tf of taskFiles) {
|
|
38
|
+
const taskPath = path.join(tasksDir, tf);
|
|
39
|
+
const content = fs.readFileSync(taskPath, 'utf-8');
|
|
40
|
+
if (/status:\s*(DEVELOPING|DEV_TESTING|REVIEWING|CI_PENDING|QA_TESTING)/.test(content)) {
|
|
41
|
+
const taskLogPath = taskPath.replace(/\.md$/, '_changes.log');
|
|
42
|
+
fs.appendFileSync(taskLogPath, `| ${timestamp} | TOOL_FAILURE | ${toolName}: ${String(error).substring(0, 100)} |\n`);
|
|
43
|
+
break;
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
// Output warning to stderr (visible to user)
|
|
49
|
+
process.stderr.write(`Tool failure tracked: ${toolName} — ${String(error).substring(0, 100)}\n`);
|
|
50
|
+
} catch (e) {
|
|
51
|
+
// Don't block on tracking failure
|
|
52
|
+
}
|
|
53
|
+
process.exit(0);
|
|
54
|
+
});
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
// Post-tool hook: track file changes for active task
|
|
3
|
+
const fs = require('fs');
|
|
4
|
+
const path = require('path');
|
|
5
|
+
|
|
6
|
+
// Timeout: exit if stdin hangs
|
|
7
|
+
setTimeout(() => process.exit(0), 10000);
|
|
8
|
+
|
|
9
|
+
let input = '';
|
|
10
|
+
process.stdin.setEncoding('utf-8');
|
|
11
|
+
process.stdin.on('data', chunk => { input += chunk; });
|
|
12
|
+
process.stdin.on('end', () => {
|
|
13
|
+
try {
|
|
14
|
+
const data = JSON.parse(input);
|
|
15
|
+
const file = (data.tool_input && data.tool_input.file_path) || '';
|
|
16
|
+
if (!file) process.exit(0);
|
|
17
|
+
|
|
18
|
+
const tasksDir = path.join(process.cwd(), '.claude', 'tasks');
|
|
19
|
+
if (!fs.existsSync(tasksDir)) process.exit(0);
|
|
20
|
+
|
|
21
|
+
const taskFiles = fs.readdirSync(tasksDir).filter(f => f.endsWith('.md'));
|
|
22
|
+
for (const tf of taskFiles) {
|
|
23
|
+
const taskPath = path.join(tasksDir, tf);
|
|
24
|
+
const content = fs.readFileSync(taskPath, 'utf-8');
|
|
25
|
+
if (/status:\s*(DEVELOPING|DEV_TESTING|REVIEWING|CI_PENDING|QA_TESTING)/.test(content)) {
|
|
26
|
+
const logPath = taskPath.replace(/\.md$/, '_changes.log');
|
|
27
|
+
const timestamp = new Date().toISOString().replace(/\.\d{3}Z$/, 'Z');
|
|
28
|
+
fs.appendFileSync(logPath, `| ${timestamp} | file_changed | ${file} |\n`);
|
|
29
|
+
break;
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
} catch {}
|
|
33
|
+
process.exit(0);
|
|
34
|
+
});
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
// Pre-tool hook: block dangerous bash commands
|
|
3
|
+
|
|
4
|
+
// Timeout: exit if stdin hangs
|
|
5
|
+
setTimeout(() => { process.stderr.write('BLOCKED: Hook timeout.\n'); process.exit(2); }, 10000);
|
|
6
|
+
|
|
7
|
+
let input = '';
|
|
8
|
+
process.stdin.setEncoding('utf-8');
|
|
9
|
+
process.stdin.on('data', chunk => { input += chunk; });
|
|
10
|
+
process.stdin.on('end', () => {
|
|
11
|
+
try {
|
|
12
|
+
const data = JSON.parse(input);
|
|
13
|
+
const cmd = (data.tool_input && data.tool_input.command) || '';
|
|
14
|
+
if (!cmd) process.exit(0);
|
|
15
|
+
|
|
16
|
+
const DANGEROUS_STRINGS = ['rm -rf /', ':(){ :|:& };:', '> /dev/sda', 'mkfs', 'dd if='];
|
|
17
|
+
const DANGEROUS_PATTERNS = [/curl\s.*\|\s*bash/, /wget\s.*\|\s*bash/];
|
|
18
|
+
for (const p of DANGEROUS_STRINGS) {
|
|
19
|
+
if (cmd.includes(p)) {
|
|
20
|
+
process.stderr.write('BLOCKED: Dangerous command.\n');
|
|
21
|
+
process.exit(2);
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
for (const rx of DANGEROUS_PATTERNS) {
|
|
25
|
+
if (rx.test(cmd)) {
|
|
26
|
+
process.stderr.write('BLOCKED: Dangerous command.\n');
|
|
27
|
+
process.exit(2);
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
} catch {
|
|
31
|
+
process.stderr.write('BLOCKED: Failed to parse hook input.\n');
|
|
32
|
+
process.exit(2);
|
|
33
|
+
}
|
|
34
|
+
});
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
{
|
|
2
|
+
"last_sync": null,
|
|
3
|
+
"last_scan": null,
|
|
4
|
+
"environment_version": "1.0.0",
|
|
5
|
+
"scanner_version": "1.0.0",
|
|
6
|
+
"agents": {},
|
|
7
|
+
"skills": {},
|
|
8
|
+
"hooks": {},
|
|
9
|
+
"rules": {},
|
|
10
|
+
"tech_stack": {},
|
|
11
|
+
"claude_md": {
|
|
12
|
+
"hash": null,
|
|
13
|
+
"line_count": null,
|
|
14
|
+
"agents_listed": null
|
|
15
|
+
},
|
|
16
|
+
"project_structure": {
|
|
17
|
+
"source_dirs": [],
|
|
18
|
+
"test_dirs": [],
|
|
19
|
+
"config_files": []
|
|
20
|
+
},
|
|
21
|
+
"_note": "This file is auto-generated by /sync. Do not edit manually."
|
|
22
|
+
}
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
# Backend Developer Profile
|
|
2
|
+
|
|
3
|
+
## Role
|
|
4
|
+
Backend developer focused on API endpoints, services, data layer, and server-side logic.
|
|
5
|
+
|
|
6
|
+
## Primary Agents
|
|
7
|
+
- `@api-builder` — your main development agent
|
|
8
|
+
- `@debugger` — for investigating backend issues
|
|
9
|
+
- `@tester` — for writing integration tests
|
|
10
|
+
|
|
11
|
+
## Key Skills
|
|
12
|
+
- `/add-endpoint` — scaffold new API endpoints
|
|
13
|
+
- `/fix-bug` — systematic debugging
|
|
14
|
+
- `/migrate` — database migrations
|
|
15
|
+
|
|
16
|
+
## Typical Workflow
|
|
17
|
+
```
|
|
18
|
+
/workflow new "backend feature description"
|
|
19
|
+
# Phase 5 routes to @api-builder automatically
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
## Focus Areas
|
|
23
|
+
- API routes, handlers, middleware
|
|
24
|
+
- Database queries, migrations, seeds
|
|
25
|
+
- Background jobs, queues
|
|
26
|
+
- External service integrations
|
|
27
|
+
- Input validation, error handling
|
|
28
|
+
|
|
29
|
+
## Context Loading
|
|
30
|
+
Your session loads:
|
|
31
|
+
- CLAUDE.md (project overview)
|
|
32
|
+
- `.claude/rules/api.md` (endpoint conventions)
|
|
33
|
+
- `.claude/rules/database.md` (data layer rules)
|
|
34
|
+
- `.claude/rules/security.md` (when touching auth)
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
# DevOps Profile
|
|
2
|
+
|
|
3
|
+
## Role
|
|
4
|
+
DevOps engineer focused on infrastructure, CI/CD, deployment, monitoring, and cloud resources.
|
|
5
|
+
|
|
6
|
+
## Primary Agents
|
|
7
|
+
- `@infra` — your main agent
|
|
8
|
+
- `@security` — for infrastructure security review
|
|
9
|
+
- `@explorer` — for investigating system architecture
|
|
10
|
+
|
|
11
|
+
## Key Skills
|
|
12
|
+
- `/deploy` — deploy with checks
|
|
13
|
+
- `/rollback` — rollback failed deployments
|
|
14
|
+
- `/architecture` — system architecture diagrams
|
|
15
|
+
|
|
16
|
+
## Typical Workflow
|
|
17
|
+
```
|
|
18
|
+
/workflow new --hotfix "infrastructure issue"
|
|
19
|
+
# Or for infra tasks:
|
|
20
|
+
/workflow new "infrastructure change description"
|
|
21
|
+
# Phase 5 routes to @infra automatically
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
## Focus Areas
|
|
25
|
+
- Dockerfile, docker-compose
|
|
26
|
+
- CI/CD pipelines (GitHub Actions, GitLab CI)
|
|
27
|
+
- Cloud resources (AWS, GCP, Azure)
|
|
28
|
+
- Infrastructure as Code (Terraform, Pulumi)
|
|
29
|
+
- Monitoring, logging, alerting
|
|
30
|
+
- Environment configuration
|
|
31
|
+
|
|
32
|
+
## Context Loading
|
|
33
|
+
Your session loads:
|
|
34
|
+
- CLAUDE.md (project overview)
|
|
35
|
+
- `.claude/rules/infrastructure.md` (deployment patterns)
|
|
36
|
+
- `.claude/rules/security.md` (when touching secrets/auth)
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
# Frontend Developer Profile
|
|
2
|
+
|
|
3
|
+
## Role
|
|
4
|
+
Frontend developer focused on UI components, pages, styling, state management, and accessibility.
|
|
5
|
+
|
|
6
|
+
## Primary Agents
|
|
7
|
+
- `@frontend` — your main development agent
|
|
8
|
+
- `@debugger` — for investigating UI issues
|
|
9
|
+
- `@tester` — for writing component tests
|
|
10
|
+
|
|
11
|
+
## Key Skills
|
|
12
|
+
- `/add-component` — scaffold new UI components
|
|
13
|
+
- `/add-page` — create new routes/pages
|
|
14
|
+
- `/fix-bug` — systematic debugging
|
|
15
|
+
|
|
16
|
+
## Typical Workflow
|
|
17
|
+
```
|
|
18
|
+
/workflow new "frontend feature description"
|
|
19
|
+
# Phase 5 routes to @frontend automatically
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
## Focus Areas
|
|
23
|
+
- Components, pages, layouts
|
|
24
|
+
- Styling (CSS modules, Tailwind, styled-components)
|
|
25
|
+
- State management (Redux, Zustand, Context)
|
|
26
|
+
- Client-side routing
|
|
27
|
+
- Accessibility (ARIA, keyboard, screen readers)
|
|
28
|
+
- Responsive design
|
|
29
|
+
|
|
30
|
+
## Context Loading
|
|
31
|
+
Your session loads:
|
|
32
|
+
- CLAUDE.md (project overview)
|
|
33
|
+
- `.claude/rules/frontend.md` (component patterns)
|
|
34
|
+
- `.claude/rules/testing.md` (when writing tests)
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
---
|
|
2
|
+
paths:
|
|
3
|
+
- "**/*"
|
|
4
|
+
---
|
|
5
|
+
# Context Budget Enforcement
|
|
6
|
+
|
|
7
|
+
## Limits
|
|
8
|
+
- Root CLAUDE.md: max 150 lines (hard limit 200)
|
|
9
|
+
- Module CLAUDE.md: max 80 lines each
|
|
10
|
+
- Rules: max 50 lines each, MUST have `paths:` for scoping
|
|
11
|
+
- Skills: MUST use `context: fork` for anything over 30 lines
|
|
12
|
+
- MCP servers: max 5 active, scoped to agents via `mcpServers:`
|
|
13
|
+
- Startup: under 20% context
|
|
14
|
+
- Working: under 60% context — THIS IS THE HARD BUDGET
|
|
15
|
+
|
|
16
|
+
## Runtime Enforcement
|
|
17
|
+
- Run `/context-check` between EVERY workflow phase transition
|
|
18
|
+
- If context > 60%: STOP and compact before continuing
|
|
19
|
+
- If context > 75%: compact aggressively, consider session split
|
|
20
|
+
- Compact command: `/compact "focus on [current task/phase]"`
|
|
21
|
+
- `/clear` between unrelated tasks — never reuse a bloated session
|
|
22
|
+
|
|
23
|
+
## What Costs Zero
|
|
24
|
+
- Subagent context (separate window)
|
|
25
|
+
- `context: fork` skills (isolated)
|
|
26
|
+
- Templates, profiles, docs (never auto-loaded)
|
|
27
|
+
- Agent bodies (load only in subagent)
|
|
28
|
+
|
|
29
|
+
## What Accumulates (danger)
|
|
30
|
+
- File reads in main context stay in history
|
|
31
|
+
- Agent results returned to main context
|
|
32
|
+
- Bash outputs (especially long ones)
|
|
33
|
+
- Non-forked skill invocations
|
|
34
|
+
- Inline code review diffs
|