superkit-mcp-server 1.0.1 → 1.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/ARCHITECTURE.md +2 -3
- package/README.md +1 -0
- package/build/index.js +75 -0
- package/build/tools/autoPreview.js +99 -0
- package/build/tools/checklist.js +120 -0
- package/build/tools/sessionManager.js +107 -0
- package/build/tools/validators/__tests__/apiSchema.test.js +77 -0
- package/build/tools/validators/__tests__/convertRules.test.js +38 -0
- package/build/tools/validators/__tests__/frontendDesign.test.js +55 -0
- package/build/tools/validators/__tests__/geoChecker.test.js +45 -0
- package/build/tools/validators/__tests__/i18nChecker.test.js +32 -0
- package/build/tools/validators/__tests__/lintRunner.test.js +65 -0
- package/build/tools/validators/__tests__/mobileAudit.test.js +40 -0
- package/build/tools/validators/__tests__/playwrightRunner.test.js +55 -0
- package/build/tools/validators/__tests__/reactPerformanceChecker.test.js +49 -0
- package/build/tools/validators/__tests__/securityScan.test.js +42 -0
- package/build/tools/validators/__tests__/seoChecker.test.js +44 -0
- package/build/tools/validators/__tests__/testRunner.test.js +49 -0
- package/build/tools/validators/__tests__/typeCoverage.test.js +62 -0
- package/build/tools/validators/accessibilityChecker.js +124 -0
- package/build/tools/validators/apiValidator.js +140 -0
- package/build/tools/validators/convertRules.js +170 -0
- package/build/tools/validators/geoChecker.js +176 -0
- package/build/tools/validators/i18nChecker.js +205 -0
- package/build/tools/validators/lighthouseAudit.js +50 -0
- package/build/tools/validators/lintRunner.js +106 -0
- package/build/tools/validators/mobileAudit.js +190 -0
- package/build/tools/validators/playwrightRunner.js +101 -0
- package/build/tools/validators/reactPerformanceChecker.js +199 -0
- package/build/tools/validators/schemaValidator.js +105 -0
- package/build/tools/validators/securityScan.js +215 -0
- package/build/tools/validators/seoChecker.js +122 -0
- package/build/tools/validators/testRunner.js +111 -0
- package/build/tools/validators/typeCoverage.js +150 -0
- package/build/tools/validators/uxAudit.js +222 -0
- package/build/tools/verifyAll.js +159 -0
- package/package.json +5 -3
- package/skills/tech/api-patterns/SKILL.md +1 -1
- package/skills/tech/clean-code/SKILL.md +14 -14
- package/skills/tech/doc.md +3 -3
- package/skills/tech/frontend-design/SKILL.md +1 -1
- package/skills/tech/geo-fundamentals/SKILL.md +1 -1
- package/skills/tech/i18n-localization/SKILL.md +1 -1
- package/skills/tech/lint-and-validate/SKILL.md +2 -2
- package/skills/tech/mobile-design/SKILL.md +1 -1
- package/skills/tech/nextjs-react-expert/SKILL.md +1 -1
- package/skills/tech/parallel-agents/SKILL.md +3 -3
- package/skills/tech/performance-profiling/SKILL.md +1 -1
- package/skills/tech/vulnerability-scanner/SKILL.md +1 -1
- package/skills/tech/webapp-testing/SKILL.md +3 -3
- package/workflows/review-compound.md +1 -1
- package/skills/tech/api-patterns/scripts/api_validator.py +0 -211
- package/skills/tech/database-design/scripts/schema_validator.py +0 -172
- package/skills/tech/frontend-design/scripts/accessibility_checker.py +0 -183
- package/skills/tech/frontend-design/scripts/ux_audit.py +0 -722
- package/skills/tech/geo-fundamentals/scripts/geo_checker.py +0 -289
- package/skills/tech/i18n-localization/scripts/i18n_checker.py +0 -241
- package/skills/tech/lint-and-validate/scripts/lint_runner.py +0 -184
- package/skills/tech/lint-and-validate/scripts/type_coverage.py +0 -173
- package/skills/tech/mobile-design/scripts/mobile_audit.py +0 -670
- package/skills/tech/nextjs-react-expert/scripts/convert_rules.py +0 -222
- package/skills/tech/nextjs-react-expert/scripts/react_performance_checker.py +0 -252
- package/skills/tech/performance-profiling/scripts/lighthouse_audit.py +0 -76
- package/skills/tech/seo-fundamentals/scripts/seo_checker.py +0 -219
- package/skills/tech/testing-patterns/scripts/test_runner.py +0 -219
- package/skills/tech/vulnerability-scanner/scripts/security_scan.py +0 -458
- package/skills/tech/webapp-testing/scripts/playwright_runner.py +0 -173
package/ARCHITECTURE.md
CHANGED
|
@@ -22,7 +22,6 @@ super-kit/
|
|
|
22
22
|
├── SUPERKIT.md # Global rules and activation protocol
|
|
23
23
|
├── .core/ # Core engine-independent logic
|
|
24
24
|
│ ├── rules/ # Universal mandates (e.g., clean-code, security-first)
|
|
25
|
-
│ └── scripts/ # Master validation (checklist.py, auto_preview.py)
|
|
26
25
|
├── agents/ # The T-Shaped AI Team Personas
|
|
27
26
|
├── skills/ # The Knowledge Modules
|
|
28
27
|
│ ├── meta/ # Session-resume, compound-docs, file-todos (from gemini-kit)
|
|
@@ -84,7 +83,7 @@ The primary operating directive for building sustainable software:
|
|
|
84
83
|
1. **Explore**: Investigate the codebase and gather requirements.
|
|
85
84
|
2. **Plan**: Write the task boundaries and solution architecture.
|
|
86
85
|
3. **Work**: The Core Team executes code generation.
|
|
87
|
-
4. **Review**: Automated auditing via
|
|
86
|
+
4. **Review**: Automated auditing via `src/tools/checklist.ts`.
|
|
88
87
|
5. **Compound**: Output reusable solutions to `docs/solutions/`.
|
|
89
88
|
|
|
90
89
|
## ⚙️ Usage & Agnosticism
|
|
@@ -94,7 +93,7 @@ The entry point is reading `SUPERKIT.md` to establish global rules.
|
|
|
94
93
|
|
|
95
94
|
### How to Point Changing Agents to the Entrypoint:
|
|
96
95
|
|
|
97
|
-
- **
|
|
96
|
+
- **Standard Agents**: Reference `@SUPERKIT.md` in your prompt, or set its content as your persistent user rules for the project.
|
|
98
97
|
- **Cursor/Windsurf**: Reference `@SUPERKIT.md` in your Composer/Chat, or set the content of `SUPERKIT.md` as your project's `.cursorrules` / `.windsurfrules`.
|
|
99
98
|
- **Cline (VS Code)**: Set the content of `SUPERKIT.md` as your custom instructions in the Cline settings, or add an `@SUPERKIT.md` command in your prompt.
|
|
100
99
|
- **Gemini / Google AI Studio**: Supply `SUPERKIT.md` as your System Prompt instructions.
|
package/README.md
CHANGED
|
@@ -26,6 +26,7 @@ npx -y superkit-mcp-server
|
|
|
26
26
|
- **`load_superkit_agent`**: Loads the system instructions and guidelines for a specific specialist agent (e.g., `data-engineer`).
|
|
27
27
|
- **`load_superkit_skill`**: Loads the skill instructions (`SKILL.md`) for a specific category and skill (e.g., category: `tech`, name: `react-best-practices`).
|
|
28
28
|
- **`load_superkit_workflow`**: Loads the instructions for a specific slash-command workflow (e.g., `work`, `explore`).
|
|
29
|
+
- **`call_tool_checklist`**: Executes the native TypeScript validation suite (security, web accessibility, react performance, testing, API structure) on a target project location via the MCP environment instead of generic bash loops.
|
|
29
30
|
|
|
30
31
|
## Manual Installation and Configuration
|
|
31
32
|
|
package/build/index.js
CHANGED
|
@@ -5,6 +5,10 @@ import { CallToolRequestSchema, ListToolsRequestSchema, } from "@modelcontextpro
|
|
|
5
5
|
import * as path from "path";
|
|
6
6
|
import * as fs from "fs/promises";
|
|
7
7
|
import { fileURLToPath } from "url";
|
|
8
|
+
import { manageAutoPreview } from "./tools/autoPreview.js";
|
|
9
|
+
import { manageSession } from "./tools/sessionManager.js";
|
|
10
|
+
import { runChecklist } from "./tools/checklist.js";
|
|
11
|
+
import { runVerifyAll } from "./tools/verifyAll.js";
|
|
8
12
|
const __filename = fileURLToPath(import.meta.url);
|
|
9
13
|
const __dirname = path.dirname(__filename);
|
|
10
14
|
const superKitRoot = path.resolve(__dirname, "../");
|
|
@@ -37,6 +41,57 @@ function getSafePath(basePath, relativePath) {
|
|
|
37
41
|
return resolvedPath;
|
|
38
42
|
}
|
|
39
43
|
const TOOLS = [
|
|
44
|
+
{
|
|
45
|
+
name: "call_tool_auto_preview",
|
|
46
|
+
description: "Manages (start/stop/status) the local development server for previewing the application.",
|
|
47
|
+
inputSchema: {
|
|
48
|
+
type: "object",
|
|
49
|
+
properties: {
|
|
50
|
+
action: { type: "string", enum: ["start", "stop", "status"] },
|
|
51
|
+
port: { type: "number", default: 3000 }
|
|
52
|
+
},
|
|
53
|
+
required: ["action"],
|
|
54
|
+
}
|
|
55
|
+
},
|
|
56
|
+
{
|
|
57
|
+
name: "call_tool_session_manager",
|
|
58
|
+
description: "Analyzes project state, detects tech stack, tracks file statistics, and provides a summary.",
|
|
59
|
+
inputSchema: {
|
|
60
|
+
type: "object",
|
|
61
|
+
properties: {
|
|
62
|
+
command: { type: "string", enum: ["status", "info"] },
|
|
63
|
+
path: { type: "string", default: "." }
|
|
64
|
+
},
|
|
65
|
+
required: ["command"],
|
|
66
|
+
}
|
|
67
|
+
},
|
|
68
|
+
{
|
|
69
|
+
name: "call_tool_checklist",
|
|
70
|
+
description: "Orchestrates validation scripts in priority order for incremental validation during development.",
|
|
71
|
+
inputSchema: {
|
|
72
|
+
type: "object",
|
|
73
|
+
properties: {
|
|
74
|
+
projectPath: { type: "string", default: "." },
|
|
75
|
+
url: { type: "string" },
|
|
76
|
+
skipPerformance: { type: "boolean", default: false }
|
|
77
|
+
},
|
|
78
|
+
required: ["projectPath"]
|
|
79
|
+
}
|
|
80
|
+
},
|
|
81
|
+
{
|
|
82
|
+
name: "call_tool_verify_all",
|
|
83
|
+
description: "Runs COMPLETE validation including all checks + performance + E2E for deployment.",
|
|
84
|
+
inputSchema: {
|
|
85
|
+
type: "object",
|
|
86
|
+
properties: {
|
|
87
|
+
projectPath: { type: "string", default: "." },
|
|
88
|
+
url: { type: "string" },
|
|
89
|
+
skipE2E: { type: "boolean", default: false },
|
|
90
|
+
stopOnFail: { type: "boolean", default: false }
|
|
91
|
+
},
|
|
92
|
+
required: ["projectPath", "url"]
|
|
93
|
+
}
|
|
94
|
+
},
|
|
40
95
|
{
|
|
41
96
|
name: "list_superkit_assets",
|
|
42
97
|
description: "Lists all available agents, skills, and workflows in the Super-Kit repository.",
|
|
@@ -98,6 +153,26 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
|
|
|
98
153
|
});
|
|
99
154
|
server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
100
155
|
try {
|
|
156
|
+
if (request.params.name === "call_tool_auto_preview") {
|
|
157
|
+
const args = request.params.arguments;
|
|
158
|
+
const res = await manageAutoPreview(args.action, args.port);
|
|
159
|
+
return { content: [{ type: "text", text: res }] };
|
|
160
|
+
}
|
|
161
|
+
if (request.params.name === "call_tool_session_manager") {
|
|
162
|
+
const args = request.params.arguments;
|
|
163
|
+
const res = await manageSession(args.command, args.path);
|
|
164
|
+
return { content: [{ type: "text", text: res }] };
|
|
165
|
+
}
|
|
166
|
+
if (request.params.name === "call_tool_checklist") {
|
|
167
|
+
const args = request.params.arguments;
|
|
168
|
+
const res = await runChecklist(args.projectPath, args.url, args.skipPerformance);
|
|
169
|
+
return { content: [{ type: "text", text: res }] };
|
|
170
|
+
}
|
|
171
|
+
if (request.params.name === "call_tool_verify_all") {
|
|
172
|
+
const args = request.params.arguments;
|
|
173
|
+
const res = await runVerifyAll(args.projectPath, args.url, args.skipE2E, args.stopOnFail);
|
|
174
|
+
return { content: [{ type: "text", text: res }] };
|
|
175
|
+
}
|
|
101
176
|
if (request.params.name === "list_superkit_assets") {
|
|
102
177
|
const agentsPath = path.join(superKitRoot, "agents");
|
|
103
178
|
const skillsTechPath = path.join(superKitRoot, "skills", "tech");
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
import * as fs from 'fs/promises';
|
|
2
|
+
import { existsSync } from 'fs';
|
|
3
|
+
import * as path from 'path';
|
|
4
|
+
import { spawn, execSync } from 'child_process';
|
|
5
|
+
const AGENT_DIR = path.resolve('.agent');
|
|
6
|
+
const PID_FILE = path.join(AGENT_DIR, 'preview.pid');
|
|
7
|
+
const LOG_FILE = path.join(AGENT_DIR, 'preview.log');
|
|
8
|
+
export async function manageAutoPreview(action, port = 3000) {
|
|
9
|
+
const root = path.resolve('.');
|
|
10
|
+
// Ensure .agent dir exists
|
|
11
|
+
if (!existsSync(AGENT_DIR)) {
|
|
12
|
+
await fs.mkdir(AGENT_DIR, { recursive: true });
|
|
13
|
+
}
|
|
14
|
+
const isRunning = (pid) => {
|
|
15
|
+
try {
|
|
16
|
+
process.kill(pid, 0);
|
|
17
|
+
return true;
|
|
18
|
+
}
|
|
19
|
+
catch (e) {
|
|
20
|
+
return false;
|
|
21
|
+
}
|
|
22
|
+
};
|
|
23
|
+
if (action === 'start') {
|
|
24
|
+
if (existsSync(PID_FILE)) {
|
|
25
|
+
const pid = parseInt(await fs.readFile(PID_FILE, 'utf-8'));
|
|
26
|
+
if (isRunning(pid)) {
|
|
27
|
+
return `⚠️ Preview already running (PID: ${pid})`;
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
const pkgPath = path.join(root, 'package.json');
|
|
31
|
+
if (!existsSync(pkgPath)) {
|
|
32
|
+
return "❌ No package.json found";
|
|
33
|
+
}
|
|
34
|
+
const pkg = JSON.parse(await fs.readFile(pkgPath, 'utf-8'));
|
|
35
|
+
const scripts = pkg.scripts || {};
|
|
36
|
+
// npm sometimes behaves differently on Windows, using shell helps
|
|
37
|
+
let cmd = '';
|
|
38
|
+
let args = [];
|
|
39
|
+
if (scripts['dev']) {
|
|
40
|
+
cmd = 'npm';
|
|
41
|
+
args = ['run', 'dev'];
|
|
42
|
+
}
|
|
43
|
+
else if (scripts['start']) {
|
|
44
|
+
cmd = 'npm';
|
|
45
|
+
args = ['start'];
|
|
46
|
+
}
|
|
47
|
+
else {
|
|
48
|
+
return "❌ No 'dev' or 'start' script found in package.json";
|
|
49
|
+
}
|
|
50
|
+
const logStream = (await import('fs')).createWriteStream(LOG_FILE, { flags: 'w' });
|
|
51
|
+
const child = spawn(cmd, args, {
|
|
52
|
+
cwd: root,
|
|
53
|
+
env: { ...process.env, PORT: port.toString() },
|
|
54
|
+
shell: true,
|
|
55
|
+
detached: true,
|
|
56
|
+
stdio: ['ignore', logStream, logStream]
|
|
57
|
+
});
|
|
58
|
+
child.unref();
|
|
59
|
+
await fs.writeFile(PID_FILE, child.pid.toString());
|
|
60
|
+
return `✅ Preview started! (PID: ${child.pid})\n Logs: ${LOG_FILE}\n URL: http://localhost:${port}`;
|
|
61
|
+
}
|
|
62
|
+
if (action === 'stop') {
|
|
63
|
+
if (!existsSync(PID_FILE)) {
|
|
64
|
+
return "ℹ️ No preview server found.";
|
|
65
|
+
}
|
|
66
|
+
const pid = parseInt(await fs.readFile(PID_FILE, 'utf-8'));
|
|
67
|
+
if (isRunning(pid)) {
|
|
68
|
+
try {
|
|
69
|
+
process.kill(pid, process.platform === 'win32' ? 'SIGINT' : 'SIGTERM');
|
|
70
|
+
await fs.unlink(PID_FILE).catch(() => { });
|
|
71
|
+
return `🛑 Preview stopped (PID: ${pid})`;
|
|
72
|
+
}
|
|
73
|
+
catch (e) {
|
|
74
|
+
if (process.platform === 'win32') {
|
|
75
|
+
execSync(`taskkill /F /T /PID ${pid}`);
|
|
76
|
+
await fs.unlink(PID_FILE).catch(() => { });
|
|
77
|
+
return `🛑 Preview stopped via taskkill (PID: ${pid})`;
|
|
78
|
+
}
|
|
79
|
+
return `❌ Error stopping server: ${e.message}`;
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
await fs.unlink(PID_FILE).catch(() => { });
|
|
83
|
+
return "ℹ️ Process was not running, removed stale PID file.";
|
|
84
|
+
}
|
|
85
|
+
if (action === 'status') {
|
|
86
|
+
let running = false;
|
|
87
|
+
let pid = null;
|
|
88
|
+
if (existsSync(PID_FILE)) {
|
|
89
|
+
pid = parseInt(await fs.readFile(PID_FILE, 'utf-8'));
|
|
90
|
+
if (isRunning(pid))
|
|
91
|
+
running = true;
|
|
92
|
+
}
|
|
93
|
+
if (running) {
|
|
94
|
+
return `✅ Status: Running\n🔢 PID: ${pid}\n🌐 URL: http://localhost:${port}\n📝 Logs: ${LOG_FILE}`;
|
|
95
|
+
}
|
|
96
|
+
return "⚪ Status: Stopped";
|
|
97
|
+
}
|
|
98
|
+
return "❌ Invalid action.";
|
|
99
|
+
}
|
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
import * as path from 'path';
|
|
2
|
+
import { spawn } from 'child_process';
|
|
3
|
+
import { existsSync } from 'fs';
|
|
4
|
+
import * as fs from 'fs/promises';
|
|
5
|
+
import { runSecurityScan } from './validators/securityScan.js';
|
|
6
|
+
import { runLintRunner } from './validators/lintRunner.js';
|
|
7
|
+
import { runTypeCoverage } from './validators/typeCoverage.js';
|
|
8
|
+
import { runTestRunner } from './validators/testRunner.js';
|
|
9
|
+
import { runSchemaValidator } from './validators/schemaValidator.js';
|
|
10
|
+
import { runApiValidator } from './validators/apiValidator.js';
|
|
11
|
+
import { runUxAudit } from './validators/uxAudit.js';
|
|
12
|
+
import { runAccessibilityChecker } from './validators/accessibilityChecker.js';
|
|
13
|
+
import { runGeoChecker } from './validators/geoChecker.js';
|
|
14
|
+
import { runI18nChecker } from './validators/i18nChecker.js';
|
|
15
|
+
import { runMobileAudit } from './validators/mobileAudit.js';
|
|
16
|
+
import { runReactPerformanceChecker } from './validators/reactPerformanceChecker.js';
|
|
17
|
+
import { runSeoChecker } from './validators/seoChecker.js';
|
|
18
|
+
import { runLighthouseAudit } from './validators/lighthouseAudit.js';
|
|
19
|
+
import { runPlaywrightTest } from './validators/playwrightRunner.js';
|
|
20
|
+
async function hasScript(rootPath, scriptName) {
|
|
21
|
+
const pkgPath = path.join(rootPath, 'package.json');
|
|
22
|
+
if (!existsSync(pkgPath))
|
|
23
|
+
return false;
|
|
24
|
+
try {
|
|
25
|
+
const pkg = JSON.parse(await fs.readFile(pkgPath, 'utf8'));
|
|
26
|
+
return !!(pkg.scripts && pkg.scripts[scriptName]);
|
|
27
|
+
}
|
|
28
|
+
catch {
|
|
29
|
+
return false;
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
async function runCommand(name, command, args, projectPath) {
|
|
33
|
+
return new Promise((resolve) => {
|
|
34
|
+
const child = spawn(command, args, { cwd: projectPath, shell: true });
|
|
35
|
+
let out = '';
|
|
36
|
+
let err = '';
|
|
37
|
+
child.stdout.on('data', data => out += data.toString());
|
|
38
|
+
child.stderr.on('data', data => err += data.toString());
|
|
39
|
+
child.on('close', code => {
|
|
40
|
+
const passed = code === 0;
|
|
41
|
+
const report = passed ? `✅ ${name}: PASSED\n${out}` : `❌ ${name}: FAILED\n${err || out}`;
|
|
42
|
+
resolve({ passed, report });
|
|
43
|
+
});
|
|
44
|
+
setTimeout(() => {
|
|
45
|
+
child.kill();
|
|
46
|
+
resolve({ passed: false, report: `❌ ${name}: TIMED OUT\n${out}\n${err}` });
|
|
47
|
+
}, 5 * 60 * 1000);
|
|
48
|
+
});
|
|
49
|
+
}
|
|
50
|
+
export async function runChecklist(projectPath, url, skipPerformance = false) {
|
|
51
|
+
const root = path.resolve(projectPath);
|
|
52
|
+
let masterOutput = `🚀 SUPER KIT - MASTER CHECKLIST\nProject: ${root}\nURL: ${url || 'None'}\n\n📋 CORE CHECKS\n`;
|
|
53
|
+
let passedCount = 0;
|
|
54
|
+
let failedCount = 0;
|
|
55
|
+
let skippedCount = 0;
|
|
56
|
+
let hasCriticalFailure = false;
|
|
57
|
+
const runAndFormat = async (name, required, fn) => {
|
|
58
|
+
if (hasCriticalFailure)
|
|
59
|
+
return;
|
|
60
|
+
try {
|
|
61
|
+
const result = await fn();
|
|
62
|
+
if (result.skipped) {
|
|
63
|
+
skippedCount++;
|
|
64
|
+
masterOutput += `\n--- [${name}] ---\n⏭️ Skipped: ${result.report}\n`;
|
|
65
|
+
return;
|
|
66
|
+
}
|
|
67
|
+
masterOutput += `\n--- [${name}] ---\n${result.report}\n`;
|
|
68
|
+
if (result.passed) {
|
|
69
|
+
passedCount++;
|
|
70
|
+
}
|
|
71
|
+
else {
|
|
72
|
+
failedCount++;
|
|
73
|
+
if (required) {
|
|
74
|
+
hasCriticalFailure = true;
|
|
75
|
+
masterOutput += `CRITICAL: ${name} failed. Stopping checklist.\n`;
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
catch (e) {
|
|
80
|
+
masterOutput += `\n--- [${name}] ---\n❌ Error executing check: ${e.message}\n`;
|
|
81
|
+
failedCount++;
|
|
82
|
+
if (required)
|
|
83
|
+
hasCriticalFailure = true;
|
|
84
|
+
}
|
|
85
|
+
};
|
|
86
|
+
// Replace CLI calls using exact TS equivalents
|
|
87
|
+
await runAndFormat("Security Scan", false, () => runSecurityScan(root));
|
|
88
|
+
await runAndFormat("Lint Check", true, () => runLintRunner(root));
|
|
89
|
+
await runAndFormat("Type Coverage", false, () => runTypeCoverage(root));
|
|
90
|
+
await runAndFormat("Test Runner", false, () => runTestRunner(root, false));
|
|
91
|
+
await runAndFormat("Schema Validator", false, () => runSchemaValidator(root));
|
|
92
|
+
await runAndFormat("API Validator", false, () => runApiValidator(root));
|
|
93
|
+
await runAndFormat("UX Audit", false, () => runUxAudit(root));
|
|
94
|
+
await runAndFormat("Accessibility Checker", false, () => runAccessibilityChecker(root));
|
|
95
|
+
await runAndFormat("GEO Checker", false, () => runGeoChecker(root));
|
|
96
|
+
await runAndFormat("i18n Checker", false, () => runI18nChecker(root));
|
|
97
|
+
await runAndFormat("Mobile Audit", false, () => runMobileAudit(root));
|
|
98
|
+
await runAndFormat("React Performance", false, () => runReactPerformanceChecker(root));
|
|
99
|
+
await runAndFormat("SEO Checker", false, () => runSeoChecker(root));
|
|
100
|
+
// Build check (still executes package.json script)
|
|
101
|
+
await runAndFormat("Build Validation", false, async () => {
|
|
102
|
+
if (await hasScript(root, 'build'))
|
|
103
|
+
return runCommand("Build Validation", 'npm', ['run', 'build'], root);
|
|
104
|
+
return { passed: true, skipped: true, report: `Script 'build' not found in package.json` };
|
|
105
|
+
});
|
|
106
|
+
if (url && !skipPerformance && !hasCriticalFailure) {
|
|
107
|
+
masterOutput += `\n⚡ PERFORMANCE & E2E CHECKS\n`;
|
|
108
|
+
await runAndFormat("Lighthouse / Performance", true, () => runLighthouseAudit(url));
|
|
109
|
+
await runAndFormat("E2E Tests", false, () => runPlaywrightTest(url).then(res => ({
|
|
110
|
+
passed: res.status === 'success',
|
|
111
|
+
report: JSON.stringify(res, null, 2)
|
|
112
|
+
})));
|
|
113
|
+
}
|
|
114
|
+
else if (!url && !hasCriticalFailure) {
|
|
115
|
+
masterOutput += `\n⏭️ Skipping Performance & E2E checks (No URL provided)\n`;
|
|
116
|
+
}
|
|
117
|
+
masterOutput += `\n📊 SUMMARY\nPassed: ${passedCount}\nFailed: ${failedCount}\nSkipped: ${skippedCount}\n\n`;
|
|
118
|
+
masterOutput += failedCount > 0 ? `❌ Checks FAILED - Attention required.` : `✅ All checks PASSED!`;
|
|
119
|
+
return masterOutput;
|
|
120
|
+
}
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
import * as fs from 'fs/promises';
|
|
2
|
+
import * as path from 'path';
|
|
3
|
+
export async function manageSession(command, rootPath = '.') {
|
|
4
|
+
const root = path.resolve(rootPath);
|
|
5
|
+
const pkgPath = path.join(root, 'package.json');
|
|
6
|
+
const getPackageInfo = async () => {
|
|
7
|
+
try {
|
|
8
|
+
const data = await fs.readFile(pkgPath, 'utf8');
|
|
9
|
+
const pkg = JSON.parse(data);
|
|
10
|
+
const allDeps = { ...(pkg.dependencies || {}), ...(pkg.devDependencies || {}) };
|
|
11
|
+
const stack = [];
|
|
12
|
+
if (allDeps['next'])
|
|
13
|
+
stack.push("Next.js");
|
|
14
|
+
else if (allDeps['react'])
|
|
15
|
+
stack.push("React");
|
|
16
|
+
else if (allDeps['vue'])
|
|
17
|
+
stack.push("Vue");
|
|
18
|
+
else if (allDeps['svelte'])
|
|
19
|
+
stack.push("Svelte");
|
|
20
|
+
else if (allDeps['express'])
|
|
21
|
+
stack.push("Express");
|
|
22
|
+
else if (allDeps['nestjs'] || allDeps['@nestjs/core'])
|
|
23
|
+
stack.push("NestJS");
|
|
24
|
+
if (allDeps['tailwindcss'])
|
|
25
|
+
stack.push("Tailwind CSS");
|
|
26
|
+
if (allDeps['prisma'])
|
|
27
|
+
stack.push("Prisma");
|
|
28
|
+
if (allDeps['typescript'])
|
|
29
|
+
stack.push("TypeScript");
|
|
30
|
+
return {
|
|
31
|
+
name: pkg.name || 'unnamed',
|
|
32
|
+
version: pkg.version || '0.0.0',
|
|
33
|
+
stack,
|
|
34
|
+
scripts: Object.keys(pkg.scripts || {})
|
|
35
|
+
};
|
|
36
|
+
}
|
|
37
|
+
catch (e) {
|
|
38
|
+
return { name: root.split(path.sep).pop() || 'unnamed', version: '0.0.0', stack: ['Generic'], scripts: [] };
|
|
39
|
+
}
|
|
40
|
+
};
|
|
41
|
+
const countFiles = async (dir) => {
|
|
42
|
+
let count = 0;
|
|
43
|
+
const exclude = new Set(['.git', 'node_modules', '.next', 'dist', 'build', '.agent', '.gemini', '__pycache__']);
|
|
44
|
+
try {
|
|
45
|
+
const items = await fs.readdir(dir, { withFileTypes: true });
|
|
46
|
+
for (const item of items) {
|
|
47
|
+
if (exclude.has(item.name))
|
|
48
|
+
continue;
|
|
49
|
+
if (item.isDirectory()) {
|
|
50
|
+
count += await countFiles(path.join(dir, item.name));
|
|
51
|
+
}
|
|
52
|
+
else {
|
|
53
|
+
count++;
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
catch { }
|
|
58
|
+
return count;
|
|
59
|
+
};
|
|
60
|
+
const detectFeatures = async () => {
|
|
61
|
+
const features = [];
|
|
62
|
+
const srcPath = path.join(root, 'src');
|
|
63
|
+
const possibleDirs = ["components", "modules", "features", "app", "pages", "services"];
|
|
64
|
+
try {
|
|
65
|
+
const srcExists = await fs.stat(srcPath).then(s => s.isDirectory()).catch(() => false);
|
|
66
|
+
if (srcExists) {
|
|
67
|
+
for (const d of possibleDirs) {
|
|
68
|
+
const p = path.join(srcPath, d);
|
|
69
|
+
const pExists = await fs.stat(p).then(s => s.isDirectory()).catch(() => false);
|
|
70
|
+
if (pExists) {
|
|
71
|
+
const children = await fs.readdir(p, { withFileTypes: true });
|
|
72
|
+
for (const child of children) {
|
|
73
|
+
if (child.isDirectory())
|
|
74
|
+
features.push(child.name);
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
catch { }
|
|
81
|
+
return features.slice(0, 10);
|
|
82
|
+
};
|
|
83
|
+
const info = await getPackageInfo();
|
|
84
|
+
if (command === 'info') {
|
|
85
|
+
return JSON.stringify(info, null, 2);
|
|
86
|
+
}
|
|
87
|
+
if (command === 'status') {
|
|
88
|
+
const fileCount = await countFiles(root);
|
|
89
|
+
const features = await detectFeatures();
|
|
90
|
+
let output = `=== Project Status ===\n\n`;
|
|
91
|
+
output += `📁 Project: ${info.name}\n`;
|
|
92
|
+
output += `📂 Path: ${root}\n`;
|
|
93
|
+
output += `🏷️ Type: ${info.stack.join(', ')}\n`;
|
|
94
|
+
output += `📊 Status: Active\n\n`;
|
|
95
|
+
output += `🔧 Tech Stack:\n`;
|
|
96
|
+
for (const tech of info.stack)
|
|
97
|
+
output += ` • ${tech}\n`;
|
|
98
|
+
output += `\n✅ Detected Modules/Features (${features.length}):\n`;
|
|
99
|
+
if (features.length === 0)
|
|
100
|
+
output += ` (No distinct feature modules detected)\n`;
|
|
101
|
+
for (const feat of features)
|
|
102
|
+
output += ` • ${feat}\n`;
|
|
103
|
+
output += `\n📄 Files: ${fileCount} total files tracked\n`;
|
|
104
|
+
return output;
|
|
105
|
+
}
|
|
106
|
+
return "❌ Invalid command";
|
|
107
|
+
}
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
|
2
|
+
import { validatePrismaSchema } from '../schemaValidator.js';
|
|
3
|
+
import { checkApiCode, checkOpenApiSpec } from '../apiValidator.js';
|
|
4
|
+
import * as fs from 'fs/promises';
|
|
5
|
+
vi.mock('fs/promises');
|
|
6
|
+
describe('schemaValidator', () => {
|
|
7
|
+
beforeEach(() => {
|
|
8
|
+
vi.clearAllMocks();
|
|
9
|
+
});
|
|
10
|
+
describe('validatePrismaSchema', () => {
|
|
11
|
+
it('should detect bad model names and missing fields', async () => {
|
|
12
|
+
vi.mocked(fs.readFile).mockResolvedValue(`
|
|
13
|
+
model user {
|
|
14
|
+
name String
|
|
15
|
+
}
|
|
16
|
+
model Post {
|
|
17
|
+
id String @id
|
|
18
|
+
userId String
|
|
19
|
+
createdAt DateTime
|
|
20
|
+
}
|
|
21
|
+
enum role { ADMIN, USER }
|
|
22
|
+
`);
|
|
23
|
+
const issues = await validatePrismaSchema('/mock.prisma');
|
|
24
|
+
expect(issues.some(i => i.includes("Model 'user' should be PascalCase"))).toBe(true);
|
|
25
|
+
expect(issues.some(i => i.includes("Enum 'role' should be PascalCase"))).toBe(true);
|
|
26
|
+
expect(issues.some(i => i.includes("missing createdAt"))).toBe(true); // for user
|
|
27
|
+
expect(issues.some(i => i.includes("adding @@index([userId])"))).toBe(true); // for Post
|
|
28
|
+
});
|
|
29
|
+
});
|
|
30
|
+
});
|
|
31
|
+
describe('apiValidator', () => {
|
|
32
|
+
beforeEach(() => {
|
|
33
|
+
vi.clearAllMocks();
|
|
34
|
+
});
|
|
35
|
+
describe('checkOpenApiSpec', () => {
|
|
36
|
+
it('should validate openapi json', async () => {
|
|
37
|
+
vi.mocked(fs.readFile).mockResolvedValue(JSON.stringify({
|
|
38
|
+
openapi: "3.0.0",
|
|
39
|
+
info: { title: "Test", version: "1", description: "Test API" },
|
|
40
|
+
paths: {
|
|
41
|
+
"/test": { get: { responses: { 200: {} }, description: "desc" } }
|
|
42
|
+
}
|
|
43
|
+
}));
|
|
44
|
+
const res = await checkOpenApiSpec('api.json');
|
|
45
|
+
expect(res.issues.length).toBe(0);
|
|
46
|
+
});
|
|
47
|
+
});
|
|
48
|
+
describe('checkApiCode', () => {
|
|
49
|
+
it('should detect missing api practices', async () => {
|
|
50
|
+
vi.mocked(fs.readFile).mockResolvedValue(`
|
|
51
|
+
function handler() {
|
|
52
|
+
// no try, no status, no security check
|
|
53
|
+
return "hello";
|
|
54
|
+
}
|
|
55
|
+
`);
|
|
56
|
+
const res = await checkApiCode('route.ts');
|
|
57
|
+
expect(res.issues.some(i => i.includes('No error handling'))).toBe(true);
|
|
58
|
+
expect(res.issues.some(i => i.includes('No explicit HTTP status'))).toBe(true);
|
|
59
|
+
expect(res.passed.length).toBe(0);
|
|
60
|
+
});
|
|
61
|
+
it('should pass good practices', async () => {
|
|
62
|
+
vi.mocked(fs.readFile).mockResolvedValue(`
|
|
63
|
+
import { z } from 'zod';
|
|
64
|
+
function handler(req, res) {
|
|
65
|
+
try {
|
|
66
|
+
const jwtToken = "123";
|
|
67
|
+
return res.status(200).send("hello");
|
|
68
|
+
} catch(e) {}
|
|
69
|
+
}
|
|
70
|
+
`);
|
|
71
|
+
const res = await checkApiCode('route.ts');
|
|
72
|
+
expect(res.passed.some(i => i.includes('Error handling'))).toBe(true);
|
|
73
|
+
expect(res.passed.some(i => i.includes('validation present'))).toBe(true);
|
|
74
|
+
expect(res.passed.some(i => i.includes('status codes used'))).toBe(true);
|
|
75
|
+
});
|
|
76
|
+
});
|
|
77
|
+
});
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
|
2
|
+
import { runConvertRules } from '../convertRules.js';
|
|
3
|
+
import * as fs from 'fs/promises';
|
|
4
|
+
vi.mock('fs/promises');
|
|
5
|
+
describe('convertRules', () => {
|
|
6
|
+
beforeEach(() => {
|
|
7
|
+
vi.clearAllMocks();
|
|
8
|
+
});
|
|
9
|
+
it('should fail if the rules directory does not exist', async () => {
|
|
10
|
+
vi.mocked(fs.stat).mockRejectedValue(new Error('ENOENT'));
|
|
11
|
+
const res = await runConvertRules('.');
|
|
12
|
+
expect(res.passed).toBe(false);
|
|
13
|
+
expect(res.report).toContain('[ERROR] Rules directory not found');
|
|
14
|
+
});
|
|
15
|
+
it('should correctly parse frontmatter and generate rules', async () => {
|
|
16
|
+
vi.mocked(fs.stat).mockResolvedValue({ isDirectory: () => true });
|
|
17
|
+
vi.mocked(fs.readdir).mockResolvedValue(['async-waterfall.md']);
|
|
18
|
+
vi.mocked(fs.readFile).mockResolvedValue(`---
|
|
19
|
+
title: Waterfall check
|
|
20
|
+
impact: HIGH
|
|
21
|
+
tags: perf
|
|
22
|
+
---
|
|
23
|
+
Content body of the rule here.`);
|
|
24
|
+
vi.mocked(fs.mkdir).mockResolvedValue(undefined);
|
|
25
|
+
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
|
|
26
|
+
const res = await runConvertRules('.');
|
|
27
|
+
console.log("ACTUAL REPORT:", res.report);
|
|
28
|
+
expect(res.passed).toBe(true);
|
|
29
|
+
expect(res.report).toContain('Generated 8 section files from 1 rules');
|
|
30
|
+
// ensure valid output creation
|
|
31
|
+
expect(fs.writeFile).toHaveBeenCalled();
|
|
32
|
+
const callArgs = vi.mocked(fs.writeFile).mock.calls[0];
|
|
33
|
+
const writtenContent = callArgs[1];
|
|
34
|
+
expect(writtenContent).toContain('## Rule 1.1: Waterfall check');
|
|
35
|
+
expect(writtenContent).toContain('**Impact:** HIGH');
|
|
36
|
+
expect(writtenContent).toContain('Content body of the rule here.');
|
|
37
|
+
});
|
|
38
|
+
});
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
|
2
|
+
import { checkAccessibility } from '../accessibilityChecker.js';
|
|
3
|
+
import { runUxAudit } from '../uxAudit.js';
|
|
4
|
+
import * as fs from 'fs/promises';
|
|
5
|
+
vi.mock('fs/promises');
|
|
6
|
+
describe('accessibilityChecker', () => {
|
|
7
|
+
beforeEach(() => {
|
|
8
|
+
vi.clearAllMocks();
|
|
9
|
+
});
|
|
10
|
+
describe('checkAccessibility', () => {
|
|
11
|
+
it('should detect input without label and img without alt', async () => {
|
|
12
|
+
vi.mocked(fs.readFile).mockResolvedValue(`
|
|
13
|
+
<input type="text" name="bad" />
|
|
14
|
+
<button>Click me</button>
|
|
15
|
+
`);
|
|
16
|
+
const issues = await checkAccessibility('test.html');
|
|
17
|
+
expect(issues.some(i => i.includes('Input without label'))).toBe(true);
|
|
18
|
+
expect(issues.some(i => i.includes('Missing lang'))).toBe(false); // only checks if <html> exists
|
|
19
|
+
});
|
|
20
|
+
it('should pass good inputs', async () => {
|
|
21
|
+
vi.mocked(fs.readFile).mockResolvedValue(`
|
|
22
|
+
<html lang="en">
|
|
23
|
+
<input type="text" aria-label="Good" />
|
|
24
|
+
<button aria-label="Close">X</button>
|
|
25
|
+
</html>
|
|
26
|
+
`);
|
|
27
|
+
const issues = await checkAccessibility('test.html');
|
|
28
|
+
expect(issues).toEqual([]); // Skip link is only requested if <main> or <body> is present
|
|
29
|
+
});
|
|
30
|
+
});
|
|
31
|
+
});
|
|
32
|
+
describe('uxAudit', () => {
|
|
33
|
+
beforeEach(() => {
|
|
34
|
+
vi.clearAllMocks();
|
|
35
|
+
});
|
|
36
|
+
describe('runUxAudit', () => {
|
|
37
|
+
it('should detect UX violations', async () => {
|
|
38
|
+
vi.mocked(fs.readdir).mockResolvedValue([{
|
|
39
|
+
name: 'test.tsx',
|
|
40
|
+
isDirectory: () => false
|
|
41
|
+
}]);
|
|
42
|
+
vi.mocked(fs.readFile).mockResolvedValue(`
|
|
43
|
+
<button onClick={() => {}}>Submit</button>
|
|
44
|
+
<img src="foo.jpg">
|
|
45
|
+
<p style="color: #000000; font-family: purple;">Hello</p>
|
|
46
|
+
`);
|
|
47
|
+
const res = await runUxAudit('.');
|
|
48
|
+
expect(res.passed).toBe(false);
|
|
49
|
+
expect(res.report.includes('PURPLE DETECTED')).toBe(true);
|
|
50
|
+
expect(res.report.includes('Missing img alt text')).toBe(true);
|
|
51
|
+
expect(res.report.includes('Pure black')).toBe(true);
|
|
52
|
+
expect(res.report.includes('Interactive elements lack immediate feedback')).toBe(true);
|
|
53
|
+
});
|
|
54
|
+
});
|
|
55
|
+
});
|