superkit-mcp-server 1.2.2 → 1.2.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/SUPERKIT.md CHANGED
@@ -125,7 +125,7 @@ cat skills/session-resume/SKILL.md
125
125
 
126
126
  **BEFORE** solving a new problem:
127
127
  ```bash
128
- ./scripts/compound-search.sh "{keywords}"
128
+ Call MCP `call_tool_compound_manager` { action: "search", terms: ["{keywords}"] }
129
129
  ```
130
130
 
131
131
  If solution found → Apply it, do not reinvent the wheel!
@@ -145,7 +145,7 @@ If solution found → Apply it, do not reinvent the wheel!
145
145
 
146
146
  Run daily:
147
147
  ```bash
148
- ./scripts/compound-dashboard.sh
148
+ Call MCP `call_tool_compound_manager` { action: "dashboard" }
149
149
  ```
150
150
  **Target**: Grade B or higher
151
151
 
@@ -145,10 +145,11 @@ Is it actively exploited (EPSS >0.5)?
145
145
 
146
146
  ## Validation
147
147
 
148
- After your review, run the validation script:
148
+ After your review, validate with project-specific checks:
149
149
 
150
150
  ```bash
151
- python scripts/security_scan.py <project_path> --output summary
151
+ # Use your project's linting/security tools (e.g., npm audit, semgrep, bandit)
152
+ npm audit --production
152
153
  ```
153
154
 
154
155
  This validates that security principles were correctly applied.
package/build/index.js CHANGED
@@ -293,25 +293,34 @@ server.setRequestHandler(GetPromptRequestSchema, async (request) => {
293
293
  try {
294
294
  const content = await fs.readFile(safePath, "utf-8");
295
295
  const parsed = toml.parse(content);
296
- const promptText = parsed?.prompt || `Execute the ${promptName} command.`;
296
+ let promptText = parsed?.prompt || `Execute the ${promptName} command.`;
297
+ // Resolve @{path} includes from super-kit package root
298
+ const includePattern = /@\{([^}]+)\}/g;
299
+ let match;
300
+ while ((match = includePattern.exec(promptText)) !== null) {
301
+ const includePath = match[1];
302
+ const resolvedPath = getSafePath(superKitRoot, includePath);
303
+ if (resolvedPath) {
304
+ try {
305
+ const includeContent = await fs.readFile(resolvedPath, "utf-8");
306
+ promptText = promptText.replace(match[0], includeContent);
307
+ }
308
+ catch {
309
+ promptText = promptText.replace(match[0], `[File not found: ${includePath}]`);
310
+ }
311
+ }
312
+ }
297
313
  // Load SUPERKIT.md for systematic inclusion
298
314
  const superKitPath = path.join(superKitRoot, "SUPERKIT.md");
299
315
  const superKitContent = await fs.readFile(superKitPath, "utf-8");
300
316
  return {
301
317
  description: parsed?.description || `Loaded command: ${promptName}`,
302
318
  messages: [
303
- {
304
- role: "system", // Try standard system role if supported by the client
305
- content: {
306
- type: "text",
307
- text: superKitContent,
308
- },
309
- },
310
319
  {
311
320
  role: "user",
312
321
  content: {
313
322
  type: "text",
314
- text: promptText,
323
+ text: `[SYSTEM INSTRUCTIONS - Follow these strictly]\n\n${superKitContent}\n\n[USER TASK]\n\n${promptText}`,
315
324
  },
316
325
  },
317
326
  ],
@@ -17,7 +17,7 @@ describe('Logger Tools', () => {
17
17
  vi.setSystemTime(new Date('2026-03-04T00:00:00Z'));
18
18
  const result = await logSkill('test-skill', 'manual', 'context', tempDir);
19
19
  expect(result).toBe('Successfully logged skill usage for test-skill');
20
- const logFile = path.join(tempDir, '.agent', 'logs', 'skill_usage.log');
20
+ const logFile = path.join(tempDir, 'docs', 'agents', 'logs', 'skill_usage.log');
21
21
  const content = await fs.readFile(logFile, 'utf-8');
22
22
  expect(content).toContain('2026-03-04T00:00:00Z|test-skill|manual|context\n');
23
23
  vi.useRealTimers();
@@ -27,7 +27,7 @@ describe('Logger Tools', () => {
27
27
  vi.setSystemTime(new Date('2026-03-04T00:00:00Z'));
28
28
  const result = await logWorkflow('test-workflow', 'session-123', tempDir);
29
29
  expect(result).toBe('Successfully logged workflow usage for test-workflow');
30
- const logFile = path.join(tempDir, '.agent', 'logs', 'workflow_usage.log');
30
+ const logFile = path.join(tempDir, 'docs', 'agents', 'logs', 'workflow_usage.log');
31
31
  const content = await fs.readFile(logFile, 'utf-8');
32
32
  expect(content).toContain('2026-03-04T00:00:00Z|test-workflow|session-123\n');
33
33
  vi.useRealTimers();
@@ -36,13 +36,13 @@ describe('Logger Tools', () => {
36
36
  vi.useFakeTimers();
37
37
  vi.setSystemTime(new Date('2026-03-04T00:00:00Z')); // 1772582400 in seconds
38
38
  await logWorkflow('test-workflow', '', tempDir);
39
- const logFile = path.join(tempDir, '.agent', 'logs', 'workflow_usage.log');
39
+ const logFile = path.join(tempDir, 'docs', 'agents', 'logs', 'workflow_usage.log');
40
40
  const content = await fs.readFile(logFile, 'utf-8');
41
41
  expect(content).toContain('2026-03-04T00:00:00Z|test-workflow|1772582400\n');
42
42
  vi.useRealTimers();
43
43
  });
44
44
  it('should rotate logs older than retention days', async () => {
45
- const logDir = path.join(tempDir, '.agent', 'logs');
45
+ const logDir = path.join(tempDir, 'docs', 'agents', 'logs');
46
46
  await fs.mkdir(logDir, { recursive: true });
47
47
  // Old log line (e.g. 100 days old)
48
48
  const oldTimestamp = new Date();
@@ -61,7 +61,7 @@ describe('Logger Tools', () => {
61
61
  expect(finalContent).toContain('context2');
62
62
  });
63
63
  it('should report if no logs need rotation', async () => {
64
- const logDir = path.join(tempDir, '.agent', 'logs');
64
+ const logDir = path.join(tempDir, 'docs', 'agents', 'logs');
65
65
  await fs.mkdir(logDir, { recursive: true });
66
66
  const newTimestamp = new Date();
67
67
  newTimestamp.setDate(newTimestamp.getDate() - 10);
@@ -102,29 +102,16 @@ export async function validateArchitecture(projectPath = '.') {
102
102
  };
103
103
  const expSkills = getExpected('skills');
104
104
  const expWorkflows = getExpected('workflows');
105
- const expScripts = getExpected('scripts');
106
105
  const expPatterns = getExpected('patterns');
107
- let actSkills = 0, actWorkflows = 0, actScripts = 0, actPatterns = 0;
106
+ let actSkills = 0, actWorkflows = 0, actPatterns = 0;
108
107
  try {
109
108
  actSkills = (await fs.readdir(path.join(projectPath, 'skills'))).length;
110
109
  }
111
110
  catch { }
112
111
  try {
113
- const wfs = await fs.readdir(path.join(projectPath, '.agent/workflows'));
112
+ const wfs = await fs.readdir(path.join(projectPath, 'skills', 'workflows'));
114
113
  actWorkflows = wfs.filter(f => f.endsWith('.md') && f !== 'README.md').length;
115
114
  }
116
- catch {
117
- // Fallback to workflows/
118
- try {
119
- const wfs = await fs.readdir(path.join(projectPath, 'workflows'));
120
- actWorkflows = wfs.filter(f => f.endsWith('.md') && f !== 'README.md').length;
121
- }
122
- catch { }
123
- }
124
- try {
125
- const scripts = await fs.readdir(path.join(projectPath, 'scripts'));
126
- actScripts = scripts.filter(f => f.endsWith('.sh') || f.endsWith('.js') || f.endsWith('.ts')).length;
127
- }
128
115
  catch { }
129
116
  try {
130
117
  const patternsCont = await fs.readFile(path.join(projectPath, 'docs/solutions/patterns/critical-patterns.md'), 'utf8');
@@ -141,10 +128,6 @@ export async function validateArchitecture(projectPath = '.') {
141
128
  out += `❌ Workflows mismatch: Doc says ${expWorkflows}, Found ${actWorkflows}\n`;
142
129
  fail = true;
143
130
  }
144
- if (actScripts !== expScripts) {
145
- out += `❌ Scripts mismatch: Doc says ${expScripts}, Found ${actScripts}\n`;
146
- fail = true;
147
- }
148
131
  if (actPatterns !== expPatterns) {
149
132
  out += `❌ Patterns mismatch: Doc says ${expPatterns}, Found ${actPatterns}\n`;
150
133
  fail = true;
@@ -2,12 +2,12 @@ import * as fs from 'fs/promises';
2
2
  import { existsSync } from 'fs';
3
3
  import * as path from 'path';
4
4
  import { spawn, execSync } from 'child_process';
5
- const AGENT_DIR = path.resolve('.agent');
5
+ const AGENT_DIR = path.resolve('docs', 'agents');
6
6
  const PID_FILE = path.join(AGENT_DIR, 'preview.pid');
7
7
  const LOG_FILE = path.join(AGENT_DIR, 'preview.log');
8
8
  export async function manageAutoPreview(action, port = 3000) {
9
9
  const root = path.resolve('.');
10
- // Ensure .agent dir exists
10
+ // Ensure docs/agents dir exists
11
11
  if (!existsSync(AGENT_DIR)) {
12
12
  await fs.mkdir(AGENT_DIR, { recursive: true });
13
13
  }
@@ -4,8 +4,8 @@ export async function compoundSearch(terms, projectPath = '.') {
4
4
  const searchDirs = ['docs/solutions', 'docs/explorations', 'docs/decisions'].map(d => path.join(projectPath, d));
5
5
  // Log usage
6
6
  const logLine = `${new Date().toISOString().replace(/\.[0-9]{3}Z$/, 'Z')}|compound-search|manual|${terms.join(',')}\n`;
7
- await fs.mkdir(path.join(projectPath, '.agent', 'logs'), { recursive: true });
8
- await fs.appendFile(path.join(projectPath, '.agent', 'logs', 'compound_usage.log'), logLine);
7
+ await fs.mkdir(path.join(projectPath, 'docs', 'agents', 'logs'), { recursive: true });
8
+ await fs.appendFile(path.join(projectPath, 'docs', 'agents', 'logs', 'compound_usage.log'), logLine);
9
9
  const results = new Set();
10
10
  async function searchInDir(dir) {
11
11
  try {
@@ -54,8 +54,8 @@ export async function updateSolutionRef(files, projectPath = '.') {
54
54
  const today = new Date().toISOString().split('T')[0];
55
55
  let count = 0;
56
56
  const logLine = `${new Date().toISOString().replace(/\.[0-9]{3}Z$/, 'Z')}|update-solution-ref|manual|${files.join(',')}\n`;
57
- await fs.mkdir(path.join(projectPath, '.agent', 'logs'), { recursive: true });
58
- await fs.appendFile(path.join(projectPath, '.agent', 'logs', 'compound_usage.log'), logLine);
57
+ await fs.mkdir(path.join(projectPath, 'docs', 'agents', 'logs'), { recursive: true });
58
+ await fs.appendFile(path.join(projectPath, 'docs', 'agents', 'logs', 'compound_usage.log'), logLine);
59
59
  let output = '';
60
60
  for (const file of files) {
61
61
  const fullPath = path.resolve(projectPath, file);
@@ -66,14 +66,9 @@ export async function checkDocsFreshness(skipDocs = false, projectPath = '.') {
66
66
  let warnings = 0;
67
67
  let output = `🔍 Checking documentation freshness for: ${commitMsg}\n`;
68
68
  const changedFiles = changedFilesStr.split('\n');
69
- const newScripts = changedFiles.filter(f => f.startsWith('scripts/') && f !== 'scripts/README.md');
70
- if (newScripts.length > 0 && !changedFiles.includes('scripts/README.md')) {
71
- output += "⚠️ Scripts modified but scripts/README.md not updated.\n";
72
- warnings++;
73
- }
74
- const newWorkflows = changedFiles.filter(f => f.startsWith('.agent/workflows/') && f !== '.agent/workflows/README.md');
75
- if (newWorkflows.length > 0 && !changedFiles.includes('.agent/workflows/README.md')) {
76
- output += "⚠️ Workflows modified but .agent/workflows/README.md not updated.\n";
69
+ const newWorkflows = changedFiles.filter(f => f.startsWith('skills/workflows/') && f !== 'skills/workflows/README.md');
70
+ if (newWorkflows.length > 0 && !changedFiles.includes('skills/workflows/README.md')) {
71
+ output += "⚠️ Workflows modified but skills/workflows/README.md not updated.\n";
77
72
  warnings++;
78
73
  }
79
74
  const codeChanged = changedFiles.some(f => /^(src|components|lib|app)\//.test(f));
@@ -91,7 +86,7 @@ export async function checkDocsFreshness(skipDocs = false, projectPath = '.') {
91
86
  return output;
92
87
  }
93
88
  export async function discoverUndocumentedFolders(projectPath = '.') {
94
- const roots = ["app", "lib", "backend", "scripts", "src"];
89
+ const roots = ["app", "lib", "backend", "src"];
95
90
  const exclusions = ["node_modules", "__pycache__", ".git", "__tests__", "archive", ".vercel", ".next", "dist"];
96
91
  const undocumented = [];
97
92
  async function scanDir(dir, depth) {
@@ -132,7 +127,7 @@ export async function discoverUndocumentedFolders(projectPath = '.') {
132
127
  return "✅ All key folders have README documentation.";
133
128
  }
134
129
  export async function validateFolderDocs(strict = false, targetFolders = [], projectPath = '.') {
135
- const coreFolders = ["src", "scripts", "docs/solutions", "docs/architecture", ".agent/workflows"];
130
+ const coreFolders = ["src", "docs/solutions", "docs/architecture", "skills/workflows", "agents"];
136
131
  const foldersToCheck = targetFolders.length > 0 ? targetFolders : coreFolders;
137
132
  let exitCode = 0;
138
133
  let output = "🔍 Validating hierarchical documentation...\n";
@@ -1,7 +1,7 @@
1
1
  import * as fs from 'fs/promises';
2
2
  import * as path from 'path';
3
3
  function getLogDir(projectPath = '.') {
4
- return path.join(projectPath, '.agent', 'logs');
4
+ return path.join(projectPath, 'docs', 'agents', 'logs');
5
5
  }
6
6
  export async function logSkill(skill, trigger = 'manual', context = '', projectPath = '.') {
7
7
  // Format timestamp like 2026-03-04T00:12:42Z (ISO 8601 without milliseconds)
@@ -4,11 +4,18 @@ export async function manageSession(command, rootPath = '.') {
4
4
  const root = path.resolve(rootPath);
5
5
  const pkgPath = path.join(root, 'package.json');
6
6
  const getPackageInfo = async () => {
7
+ let name = root.split(path.sep).pop() || 'unnamed';
8
+ let version = '0.0.0';
9
+ const stack = [];
10
+ let scripts = [];
11
+ // 1. Node.js Check
7
12
  try {
8
13
  const data = await fs.readFile(pkgPath, 'utf8');
9
14
  const pkg = JSON.parse(data);
15
+ name = pkg.name || name;
16
+ version = pkg.version || version;
17
+ scripts = Object.keys(pkg.scripts || {});
10
18
  const allDeps = { ...(pkg.dependencies || {}), ...(pkg.devDependencies || {}) };
11
- const stack = [];
12
19
  if (allDeps['next'])
13
20
  stack.push("Next.js");
14
21
  else if (allDeps['react'])
@@ -27,16 +34,69 @@ export async function manageSession(command, rootPath = '.') {
27
34
  stack.push("Prisma");
28
35
  if (allDeps['typescript'])
29
36
  stack.push("TypeScript");
30
- return {
31
- name: pkg.name || 'unnamed',
32
- version: pkg.version || '0.0.0',
33
- stack,
34
- scripts: Object.keys(pkg.scripts || {})
35
- };
37
+ if (stack.length === 0)
38
+ stack.push("Node.js");
36
39
  }
37
- catch (e) {
38
- return { name: root.split(path.sep).pop() || 'unnamed', version: '0.0.0', stack: ['Generic'], scripts: [] };
40
+ catch { }
41
+ // 2. Polyglot Checks
42
+ try {
43
+ const files = await fs.readdir(root);
44
+ // Python
45
+ if (files.includes('requirements.txt') || files.includes('Pipfile') || files.includes('pyproject.toml')) {
46
+ if (!stack.includes("Python"))
47
+ stack.push("Python");
48
+ if (files.includes('manage.py'))
49
+ stack.push("Django");
50
+ if (files.includes('requirements.txt')) {
51
+ const content = await fs.readFile(path.join(root, 'requirements.txt'), 'utf8');
52
+ if (content.includes('flask'))
53
+ stack.push("Flask");
54
+ if (content.includes('fastapi'))
55
+ stack.push("FastAPI");
56
+ if (content.includes('django') && !stack.includes("Django"))
57
+ stack.push("Django");
58
+ }
59
+ }
60
+ // Go
61
+ if (files.includes('go.mod'))
62
+ stack.push("Go");
63
+ // Rust
64
+ if (files.includes('Cargo.toml'))
65
+ stack.push("Rust");
66
+ // Java / Kotlin
67
+ if (files.includes('pom.xml'))
68
+ stack.push("Java (Maven)");
69
+ if (files.includes('build.gradle') || files.includes('build.gradle.kts'))
70
+ stack.push("Java/Kotlin (Gradle)");
71
+ // PHP
72
+ if (files.includes('composer.json')) {
73
+ if (!stack.includes("PHP"))
74
+ stack.push("PHP");
75
+ try {
76
+ const composer = JSON.parse(await fs.readFile(path.join(root, 'composer.json'), 'utf8'));
77
+ const deps = { ...(composer.require || {}), ...(composer['require-dev'] || {}) };
78
+ if (deps['laravel/framework'])
79
+ stack.push("Laravel");
80
+ if (deps['symfony/framework-bundle'])
81
+ stack.push("Symfony");
82
+ }
83
+ catch { }
84
+ }
85
+ // Infrastructure
86
+ if (files.includes('Dockerfile') || files.includes('docker-compose.yml'))
87
+ stack.push("Docker");
88
+ if (files.some(f => f.endsWith('.tf')))
89
+ stack.push("Terraform");
39
90
  }
91
+ catch { }
92
+ if (stack.length === 0)
93
+ stack.push("Generic");
94
+ return {
95
+ name,
96
+ version,
97
+ stack: Array.from(new Set(stack)),
98
+ scripts
99
+ };
40
100
  };
41
101
  const countFiles = async (dir) => {
42
102
  let count = 0;
@@ -91,7 +151,16 @@ export async function manageSession(command, rootPath = '.') {
91
151
  output += `📁 Project: ${info.name}\n`;
92
152
  output += `📂 Path: ${root}\n`;
93
153
  output += `🏷️ Type: ${info.stack.join(', ')}\n`;
94
- output += `📊 Status: Active\n\n`;
154
+ // Check for active todos to determine status
155
+ let status = 'Idle';
156
+ try {
157
+ const todos = await fs.readdir(path.join(root, 'todos'));
158
+ const hasActive = todos.some(f => f.endsWith('.md') && !f.includes('template') && !f.includes('archive'));
159
+ if (hasActive)
160
+ status = 'Active';
161
+ }
162
+ catch { }
163
+ output += `📊 Status: ${status}\n\n`;
95
164
  output += `🔧 Tech Stack:\n`;
96
165
  for (const tech of info.stack)
97
166
  output += ` • ${tech}\n`;
@@ -135,12 +135,12 @@ async function generateSectionFile(sectionPrefix, rules, outputDir, getOutputOnl
135
135
  }
136
136
  export async function runConvertRules(projectPath = ".") {
137
137
  let report = `============================================================\n`;
138
- report += `CONVERSION SCRIPT: React Best Practices -> .agent Format\n`;
138
+ report += `CONVERSION SCRIPT: React Best Practices -> Skills Format\n`;
139
139
  report += `============================================================\n`;
140
140
  const baseDir = path.resolve(projectPath);
141
141
  // Mimic the python logic for paths if run from anywhere, or adapt to superkit structure
142
142
  const rulesDir = path.join(baseDir, "skills", "react-best-practices", "rules");
143
- const outputDir = path.join(baseDir, ".agent", "skills", "react-best-practices");
143
+ const outputDir = path.join(baseDir, "skills", "react-best-practices");
144
144
  report += `[*] Reading rules from: ${rulesDir}\n[*] Output to: ${outputDir}\n\n`;
145
145
  try {
146
146
  const stat = await fs.stat(rulesDir);
@@ -3,7 +3,7 @@ description = "View development rules and coding standards"
3
3
  prompt = """
4
4
  You are a Development Standards Expert. Read and explain the development rules.
5
5
 
6
- @{.agent/workflows/development-rules.md}
6
+ @{skills/workflows/development-rules.md}
7
7
 
8
8
  Based on the rules above, help the user with: {{args}}
9
9
 
@@ -3,7 +3,7 @@ description = "View documentation management guidelines"
3
3
  prompt = """
4
4
  You are a Documentation Manager. Read and explain the documentation guidelines.
5
5
 
6
- @{.agent/workflows/documentation-management.md}
6
+ @{skills/workflows/documentation-management.md}
7
7
 
8
8
  Based on the guidelines above, help the user with: {{args}}
9
9
 
@@ -3,7 +3,7 @@ description = "View agent orchestration protocols"
3
3
  prompt = """
4
4
  You are an Orchestration Expert. Read and explain the orchestration protocols.
5
5
 
6
- @{.agent/workflows/orchestration-protocol.md}
6
+ @{skills/workflows/orchestration-protocol.md}
7
7
 
8
8
  Based on the protocols above, help the user with: {{args}}
9
9
 
@@ -5,7 +5,7 @@ prompt = """
5
5
 
6
6
  Loading status...
7
7
 
8
- {{shell ./scripts/status-dashboard.sh}}
8
+ Call MCP `call_tool_session_manager` { command: "status" }
9
9
 
10
10
  ---
11
11
 
@@ -16,7 +16,7 @@ Loading status...
16
16
  | View specs details | `ls docs/specs/` |
17
17
  | View plans details | `ls plans/` |
18
18
  | View todos details | `ls todos/` |
19
- | Compound health | `./scripts/compound-dashboard.sh` |
19
+ | Compound health | `Call MCP call_tool_compound_manager { action: "dashboard" }` |
20
20
  | Housekeeping | `/housekeeping` |
21
21
 
22
22
  What do you need to do next?
@@ -3,7 +3,7 @@ description = "View primary workflow guide (/cook)"
3
3
  prompt = """
4
4
  You are a Workflow Guide. Read and explain the primary workflow.
5
5
 
6
- @{.agent/workflows/primary-workflow.md}
6
+ @{skills/workflows/primary-workflow.md}
7
7
 
8
8
  Based on the workflow above, help the user with: {{args}}
9
9
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "superkit-mcp-server",
3
- "version": "1.2.2",
3
+ "version": "1.2.4",
4
4
  "type": "module",
5
5
  "description": "An MCP server for exploring and loading Super-Kit AI agent resources.",
6
6
  "main": "build/index.js",
@@ -34,7 +34,7 @@ Call MCP `call_tool_logger_manager` { action: "logSkill", name: "debug", outcome
34
34
 
35
35
  > **"If you haven't reproduced it, you haven't fixed it."**
36
36
 
37
- Always implement a reproduction case (test or script) BEFORE attempting a fix. Highly recommended to use the template at `docs/templates/repro-script-template.sh` and store artifacts in `scripts/repro/`.
37
+ Always implement a reproduction case (test or script) BEFORE attempting a fix. Create a minimal reproduction test case that triggers the issue.
38
38
 
39
39
  > [!NOTE]
40
- > No top-level `debug/` folder exists. Use `skills/debug/` for guidance and `scripts/repro/` for artifacts.
40
+ > No top-level `debug/` folder exists. Use `skills/debug/` for guidance.
@@ -12,9 +12,9 @@ Try to strip away all unnecessary complexity.
12
12
  - Create a test case in a relevant `.test.tsx` file that mimics the user interaction.
13
13
 
14
14
  **Backend:**
15
- - Create a script in `scripts/repro/` that triggers the API call.
15
+ - Create a test case or a minimal script that triggers the API call.
16
16
 
17
- ## 3. Scripts
18
- Use these scripts to help:
19
- - `./scripts/push-env.sh` (Sync environment variables)
17
+ ## 3. Tools
18
+ Use these to help:
20
19
  - `npm run test` (Run unit tests)
20
+ - MCP `call_tool_compound_manager` { action: "search", terms: ["error message keywords"] } (Search for known solutions)
@@ -93,4 +93,4 @@ Consider running this skill when you see:
93
93
  - Todos: `todos/`
94
94
  - Plans: `plans/`
95
95
  - Solutions: `docs/solutions/`
96
- - Workflows: `.agent/workflows/`
96
+ - Workflows: `skills/workflows/`
@@ -18,14 +18,14 @@ Skills are folder-based packages. You can define these scopes based on your need
18
18
 
19
19
  | Scope | Path | Description |
20
20
  | ------------- | --------------------------------- | ------------------------------------ |
21
- | **Workspace** | `<workspace-root>/.agent/skills/` | Available only in a specific project |
21
+ | **Workspace** | `<workspace-root>/skills/` | Available only in a specific project |
22
22
 
23
23
  ### Skill Directory Structure
24
24
 
25
25
  ```
26
26
  my-skill/
27
27
  ├── SKILL.md # (Required) Metadata & instructions
28
- ├── scripts/ # (Optional) Python or Bash scripts
28
+ ├── scripts/ # (Optional) Automation scripts
29
29
  ├── references/ # (Optional) Text, documentation, templates
30
30
  └── assets/ # (Optional) Images or logos
31
31
  ```
@@ -39,7 +39,7 @@ This is an instruction-only skill; you only need to create the `SKILL.md` file.
39
39
  ### Step 1: Create the directory
40
40
 
41
41
  ```bash
42
- mkdir -p .agent/skills/code-review
42
+ mkdir -p skills/code-review
43
43
  ```
44
44
 
45
45
  ### Step 2: Create SKILL.md
@@ -119,12 +119,12 @@ This skill uses a reference file in the `resources/` (or `references/`) director
119
119
  ### Step 1: Create the directory
120
120
 
121
121
  ```bash
122
- mkdir -p .agent/skills/license-header-adder/resources
122
+ mkdir -p skills/license-header-adder/resources
123
123
  ```
124
124
 
125
125
  ### Step 2: Create the template file
126
126
 
127
- **`.agent/skills/license-header-adder/resources/HEADER.txt`**:
127
+ **`skills/license-header-adder/resources/HEADER.txt`**:
128
128
 
129
129
  ```
130
130
  /*
@@ -136,7 +136,7 @@ mkdir -p .agent/skills/license-header-adder/resources
136
136
 
137
137
  ### Step 3: Create SKILL.md
138
138
 
139
- **`.agent/skills/license-header-adder/SKILL.md`**:
139
+ **`skills/license-header-adder/SKILL.md`**:
140
140
 
141
141
  ```markdown
142
142
  ---
@@ -198,7 +198,7 @@ Call MCP `call_tool_todo_manager` { action: "create", priority: "p2", title: "Pr
198
198
 
199
199
  ```bash
200
200
  # Get next ID
201
- next_id=$(./scripts/next-todo-id.sh)
201
+ Call MCP `call_tool_todo_manager` { action: "nextId" }
202
202
 
203
203
  # Create todo from template
204
204
  cp todos/todo-template.md todos/${next_id}-pending-p2-{description}.md
@@ -37,7 +37,7 @@ Call MCP `call_tool_compound_manager` { action: "health" }
37
37
 
38
38
  ### Step 3: Maintenance
39
39
 
40
- 1. **Fix Orphans**: Run `./scripts/update-solution-ref.sh` on solutions you know you've used recently.
40
+ 1. **Fix Orphans**: Run `Call MCP call_tool_compound_manager { action: "updateRef" }` on solutions you know you've used recently.
41
41
  2. **Promote Patterns**: If new pattern candidates are identified, run `/compound`.
42
42
 
43
43
  ### Step 4: Record Status
@@ -15,8 +15,7 @@ skills/{skill-name}/
15
15
  ├── SKILL.md # Router + essential principles (always loaded)
16
16
  ├── workflows/ # Step-by-step procedures
17
17
  ├── references/ # Domain knowledge
18
- ├── templates/ # Output structures
19
- └── scripts/ # Reusable code
18
+ └── templates/ # Output structures
20
19
  ```
21
20
 
22
21
  ## Workflow
@@ -27,7 +27,7 @@ When to use: {trigger conditions}
27
27
  ### Step 2: Create File
28
28
 
29
29
  ```bash
30
- cat > .agent/workflows/{name}.md << 'EOF'
30
+ cat > skills/workflows/{name}.md << 'EOF'
31
31
  ---
32
32
  description: {description}
33
33
  ---
@@ -65,7 +65,7 @@ EOF
65
65
 
66
66
  ```bash
67
67
  # Check file exists
68
- cat .agent/workflows/{name}.md
68
+ cat skills/workflows/{name}.md
69
69
 
70
70
  # Test invocation
71
71
  /{name}
@@ -127,7 +127,7 @@ Check if any deprecated ADRs need review (haven't been reviewed in 6+ months):
127
127
 
128
128
  ```bash
129
129
  // turbo
130
- ./scripts/check-deprecated-adrs.sh
130
+ Call MCP `call_tool_compound_manager` { action: "validate" }
131
131
  ```
132
132
 
133
133
  - If warnings appear, review the deprecated ADR to see if it can be archived or needs an update.
@@ -140,7 +140,7 @@ Prevent log files from growing indefinitely:
140
140
  ```bash
141
141
  # Rotate logs older than 12 weeks
142
142
  // turbo
143
- ./scripts/rotate-logs.sh
143
+ Call MCP `call_tool_logger_manager` { action: "rotateLogs" }
144
144
  ```
145
145
 
146
146
  ### Step 3.8: Documentation Freshness Check
@@ -155,7 +155,7 @@ Call MCP `call_tool_docs_manager` { action: "freshness" }
155
155
  **The script checks:**
156
156
  - [ ] Files changed in last commit have corresponding doc updates
157
157
  - [ ] New scripts are mentioned in README files
158
- - [ ] New workflows are indexed in `.agent/workflows/README.md`
158
+ - [ ] New workflows are indexed in `skills/workflows/README.md`
159
159
 
160
160
  **If warnings appear:** Update docs before pushing.
161
161
 
@@ -53,9 +53,9 @@ Copy the **table** AND the **update command** from the script output into your p
53
53
 
54
54
  Before proceeding to Step 0.5, confirm:
55
55
 
56
- - [ ] Ran `compound-search.sh` with relevant keywords?
56
+ - [ ] Ran MCP `call_tool_compound_manager` { action: "search" } with relevant keywords?
57
57
  - [ ] Reviewed all matching solutions (or confirmed none found)?
58
- - [ ] Ran `update-solution-ref.sh` if reusing any solution?
58
+ - [ ] Ran MCP `call_tool_compound_manager` { action: "updateRef" } if reusing any solution?
59
59
 
60
60
  **If any box is unchecked:** Complete it now. Do NOT proceed.
61
61
 
@@ -33,12 +33,12 @@ Call MCP `call_tool_logger_manager` { action: "logSkill", name: "compound-docs",
33
33
 
34
34
  1. **Check suggestion volume:**
35
35
  ```bash
36
- wc -l .agent/logs/skill_suggestions.csv
36
+ wc -l docs/agents/logs/skill_suggestions.csv
37
37
  ```
38
38
 
39
39
  2. **View unique suggestions:**
40
40
  ```bash
41
- cut -d',' -f1 .agent/logs/skill_suggestions.csv | sort | uniq -c | sort -nr | head -20
41
+ cut -d',' -f1 docs/agents/logs/skill_suggestions.csv | sort | uniq -c | sort -nr | head -20
42
42
  ```
43
43
 
44
44
  3. **Identify candidates:**
@@ -78,7 +78,7 @@ If a clear gap exists:
78
78
  If it's a synonym for an existing skill:
79
79
 
80
80
  1. Add keywords to the existing `SKILL.md` description
81
- 2. Update `.agent/workflows/README.md` to reference it
81
+ 2. Update `skills/workflows/README.md` to reference it
82
82
 
83
83
  #### Option C: Archive (Low Value)
84
84
  If it's noise or one-off:
@@ -95,10 +95,10 @@ If it's noise or one-off:
95
95
 
96
96
  ```bash
97
97
  # Append to archive
98
- cat .agent/logs/skill_suggestions.csv >> .agent/logs/skill_suggestions_archive.csv
98
+ cat docs/agents/logs/skill_suggestions.csv >> docs/agents/logs/skill_suggestions_archive.csv
99
99
 
100
100
  # Clear active log
101
- printf "suggestion,context,count\n" > .agent/logs/skill_suggestions.csv
101
+ printf "suggestion,context,count\n" > docs/agents/logs/skill_suggestions.csv
102
102
  ```
103
103
 
104
104
  ---
@@ -12,7 +12,7 @@ Rapidly process pending work items with strict time-boxing to clear backlog bott
12
12
 
13
13
  - When "Pending Todos" count > 20
14
14
  - Weekly (Fridays) to clear the deck
15
- - When `compound-dashboard.sh` shows "Bottleneck" warning
15
+ - When MCP `call_tool_compound_manager` { action: "dashboard" } shows "Bottleneck" warning
16
16
 
17
17
  ---
18
18
 
@@ -233,8 +233,8 @@ Call MCP `call_tool_todo_manager` { action: "create", priority: "p2", title: "{d
233
233
  **Common docs to update:**
234
234
  | Change Type | Update Target |
235
235
  |-------------|---------------|
236
- | New script | Add to relevant README (e.g., `scripts/README.md`) |
237
- | New workflow | Add to `.agent/workflows/README.md` |
236
+ | New MCP tool | Update relevant README |
237
+ | New workflow | Add to `skills/workflows/README.md` |
238
238
  | New API endpoint | Update API documentation |
239
239
  | New component | Update component docs |
240
240
  | Config change | Update setup/installation docs |