@stan-chen/simple-cli 0.2.1 → 0.2.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +55 -238
- package/dist/claw/jit.d.ts +5 -0
- package/dist/claw/jit.js +138 -0
- package/dist/claw/management.d.ts +3 -0
- package/dist/claw/management.js +107 -0
- package/dist/cli.js +306 -61
- package/dist/commands/git/commit.js +2 -1
- package/dist/commands/index.js +3 -2
- package/dist/context.js +13 -3
- package/dist/lib/agent.d.ts +4 -3
- package/dist/lib/agent.js +49 -17
- package/dist/lib/git.js +6 -1
- package/dist/lib/shim.d.ts +4 -0
- package/dist/lib/shim.js +30 -0
- package/dist/lib/ui.js +25 -0
- package/dist/mcp/manager.js +5 -1
- package/dist/prompts/provider.js +1 -0
- package/dist/providers/index.d.ts +21 -5
- package/dist/providers/index.js +75 -64
- package/dist/providers/multi.d.ts +2 -1
- package/dist/registry.d.ts +5 -0
- package/dist/registry.js +86 -22
- package/dist/repoMap.js +18 -18
- package/dist/router.js +21 -11
- package/dist/skills.js +10 -10
- package/dist/swarm/worker.d.ts +2 -0
- package/dist/swarm/worker.js +85 -15
- package/dist/tools/analyze_file.d.ts +16 -0
- package/dist/tools/analyze_file.js +43 -0
- package/dist/tools/clawBrain.d.ts +23 -0
- package/dist/tools/clawBrain.js +136 -0
- package/dist/tools/claw_brain.d.ts +23 -0
- package/dist/tools/claw_brain.js +139 -0
- package/dist/tools/deleteFile.d.ts +19 -0
- package/dist/tools/deleteFile.js +36 -0
- package/dist/tools/delete_file.d.ts +19 -0
- package/dist/tools/delete_file.js +36 -0
- package/dist/tools/fileOps.d.ts +22 -0
- package/dist/tools/fileOps.js +43 -0
- package/dist/tools/file_ops.d.ts +22 -0
- package/dist/tools/file_ops.js +43 -0
- package/dist/tools/grep.d.ts +2 -2
- package/dist/tools/linter.js +85 -27
- package/dist/tools/list_dir.d.ts +29 -0
- package/dist/tools/list_dir.js +50 -0
- package/dist/tools/organizer.d.ts +1 -0
- package/dist/tools/organizer.js +65 -0
- package/dist/tools/read_files.d.ts +25 -0
- package/dist/tools/read_files.js +31 -0
- package/dist/tools/reload_tools.d.ts +11 -0
- package/dist/tools/reload_tools.js +22 -0
- package/dist/tools/run_command.d.ts +32 -0
- package/dist/tools/run_command.js +103 -0
- package/dist/tools/scheduler.d.ts +25 -0
- package/dist/tools/scheduler.js +65 -0
- package/dist/tools/writeFiles.js +1 -1
- package/dist/tools/write_files.d.ts +84 -0
- package/dist/tools/write_files.js +91 -0
- package/dist/tools/write_to_file.d.ts +15 -0
- package/dist/tools/write_to_file.js +21 -0
- package/package.json +84 -78
package/dist/registry.js
CHANGED
|
@@ -8,6 +8,7 @@ import { join, dirname, basename, extname } from 'path';
|
|
|
8
8
|
import { fileURLToPath, pathToFileURL } from 'url';
|
|
9
9
|
import { z } from 'zod';
|
|
10
10
|
import { getMCPManager } from './mcp/manager.js';
|
|
11
|
+
import YAML from 'yaml';
|
|
11
12
|
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
12
13
|
const TOOLS_DIR = join(__dirname, 'tools');
|
|
13
14
|
/**
|
|
@@ -41,16 +42,34 @@ function extractDocFromComments(content) {
|
|
|
41
42
|
}
|
|
42
43
|
/**
|
|
43
44
|
* Parses a tool definition from a Markdown file (.md) or string
|
|
45
|
+
* Supports YAML frontmatter as per OpenClaw PRD.
|
|
44
46
|
*/
|
|
45
|
-
function
|
|
46
|
-
|
|
47
|
-
const meta = {
|
|
47
|
+
export function getMeta(content, filename) {
|
|
48
|
+
let meta = {
|
|
48
49
|
name: basename(filename, extname(filename)),
|
|
49
50
|
description: '',
|
|
50
51
|
command: '',
|
|
51
52
|
parameters: {},
|
|
52
53
|
permission: 'execute'
|
|
53
54
|
};
|
|
55
|
+
// 1. Try YAML frontmatter (OpenClaw style)
|
|
56
|
+
if (content.startsWith('---')) {
|
|
57
|
+
const end = content.indexOf('---', 3);
|
|
58
|
+
if (end > -1) {
|
|
59
|
+
try {
|
|
60
|
+
const yamlStr = content.slice(3, end);
|
|
61
|
+
const yamlMeta = YAML.parse(yamlStr);
|
|
62
|
+
if (yamlMeta) {
|
|
63
|
+
return { ...meta, ...yamlMeta };
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
catch {
|
|
67
|
+
// Fall back to manual parsing if YAML fails
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
// 2. Fallback to manual Markdown section parsing
|
|
72
|
+
const lines = content.split('\n');
|
|
54
73
|
let currentSection = '';
|
|
55
74
|
for (const line of lines) {
|
|
56
75
|
if (line.startsWith('# ')) {
|
|
@@ -77,11 +96,32 @@ function parseMarkdownTool(content, filename) {
|
|
|
77
96
|
}
|
|
78
97
|
// Helper to create a Tool from metadata (JSON or MD)
|
|
79
98
|
function createScriptTool(meta, source, spec) {
|
|
99
|
+
let inputSchema = z.object({}).passthrough();
|
|
100
|
+
if (meta.parameters && typeof meta.parameters === 'object') {
|
|
101
|
+
const shape = {};
|
|
102
|
+
for (const [key, param] of Object.entries(meta.parameters)) {
|
|
103
|
+
const p = param;
|
|
104
|
+
let schema = z.string();
|
|
105
|
+
if (p.type === 'number' || p === 'number')
|
|
106
|
+
schema = z.number();
|
|
107
|
+
else if (p.type === 'boolean' || p === 'boolean')
|
|
108
|
+
schema = z.boolean();
|
|
109
|
+
else if (p.type === 'array' || p === 'array')
|
|
110
|
+
schema = z.array(z.any());
|
|
111
|
+
else if (p.type === 'object' || p === 'object')
|
|
112
|
+
schema = z.object({}).passthrough();
|
|
113
|
+
if (typeof p === 'object' && p.description) {
|
|
114
|
+
schema = schema.describe(p.description);
|
|
115
|
+
}
|
|
116
|
+
shape[key] = schema;
|
|
117
|
+
}
|
|
118
|
+
inputSchema = z.object(shape).passthrough();
|
|
119
|
+
}
|
|
80
120
|
return {
|
|
81
121
|
name: meta.name,
|
|
82
122
|
description: meta.description || `Script tool: ${meta.command}`,
|
|
83
123
|
permission: meta.permission || 'execute',
|
|
84
|
-
inputSchema
|
|
124
|
+
inputSchema,
|
|
85
125
|
source,
|
|
86
126
|
specification: spec,
|
|
87
127
|
execute: async (args) => {
|
|
@@ -91,10 +131,15 @@ function createScriptTool(meta, source, spec) {
|
|
|
91
131
|
if (isWindows && finalCommand.endsWith('.ps1')) {
|
|
92
132
|
finalCommand = `powershell -ExecutionPolicy Bypass -File ${finalCommand}`;
|
|
93
133
|
}
|
|
134
|
+
const env = { ...process.env, TOOL_INPUT: JSON.stringify(args) };
|
|
135
|
+
// Pass arguments as INPUT_{NAME} for OpenClaw parity
|
|
136
|
+
for (const [key, value] of Object.entries(args)) {
|
|
137
|
+
env[`INPUT_${key.toUpperCase()}`] = typeof value === 'string' ? value : JSON.stringify(value);
|
|
138
|
+
}
|
|
94
139
|
const child = spawn(finalCommand, {
|
|
95
140
|
shell: true,
|
|
96
141
|
cwd: process.cwd(),
|
|
97
|
-
env
|
|
142
|
+
env,
|
|
98
143
|
});
|
|
99
144
|
let stdout = '';
|
|
100
145
|
let stderr = '';
|
|
@@ -143,7 +188,7 @@ async function loadToolsFromDir(dir, source) {
|
|
|
143
188
|
if (!tools.has(item)) {
|
|
144
189
|
const doc = await findDocInDir(fullPath, item);
|
|
145
190
|
if (doc && doc.file.endsWith('.md')) {
|
|
146
|
-
const meta =
|
|
191
|
+
const meta = getMeta(doc.content, doc.file);
|
|
147
192
|
if (meta && meta.command) {
|
|
148
193
|
tools.set(meta.name, createScriptTool(meta, source, doc.content));
|
|
149
194
|
}
|
|
@@ -158,19 +203,27 @@ async function loadToolsFromDir(dir, source) {
|
|
|
158
203
|
if (item.includes('.test.'))
|
|
159
204
|
continue;
|
|
160
205
|
try {
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
206
|
+
// Peek at file content to see if it looks like a module with exports
|
|
207
|
+
// This avoids executing scripts with top-level side effects during import
|
|
208
|
+
const content = await readFile(fullPath, 'utf-8');
|
|
209
|
+
if (!content.includes('export ')) {
|
|
210
|
+
// Fall through to Case 2 (Generic Script)
|
|
211
|
+
}
|
|
212
|
+
else {
|
|
213
|
+
const module = await import(pathToFileURL(fullPath).href);
|
|
214
|
+
const toolDef = module.tool || module;
|
|
215
|
+
if (toolDef.name && toolDef.execute) {
|
|
216
|
+
const schema = toolDef.inputSchema || toolDef.schema;
|
|
217
|
+
tools.set(toolDef.name, {
|
|
218
|
+
name: toolDef.name,
|
|
219
|
+
description: toolDef.description || 'No description',
|
|
220
|
+
permission: toolDef.permission || 'read',
|
|
221
|
+
inputSchema: schema || z.object({}),
|
|
222
|
+
execute: toolDef.execute,
|
|
223
|
+
source,
|
|
224
|
+
});
|
|
225
|
+
continue;
|
|
226
|
+
}
|
|
174
227
|
}
|
|
175
228
|
}
|
|
176
229
|
catch { /* might be a script, fall through */ }
|
|
@@ -189,12 +242,12 @@ async function loadToolsFromDir(dir, source) {
|
|
|
189
242
|
meta = JSON.parse(companion.content);
|
|
190
243
|
}
|
|
191
244
|
else {
|
|
192
|
-
meta =
|
|
245
|
+
meta = getMeta(companion.content, companion.file);
|
|
193
246
|
specContent = companion.content;
|
|
194
247
|
}
|
|
195
248
|
}
|
|
196
249
|
else if (internalDoc) {
|
|
197
|
-
meta =
|
|
250
|
+
meta = getMeta(internalDoc, item);
|
|
198
251
|
if (!meta.command) {
|
|
199
252
|
if (ext === '.py')
|
|
200
253
|
meta.command = `python ${fullPath}`;
|
|
@@ -223,7 +276,7 @@ async function loadToolsFromDir(dir, source) {
|
|
|
223
276
|
meta = JSON.parse(content);
|
|
224
277
|
}
|
|
225
278
|
else {
|
|
226
|
-
meta =
|
|
279
|
+
meta = getMeta(content, item);
|
|
227
280
|
}
|
|
228
281
|
if (meta && meta.name && meta.command) {
|
|
229
282
|
tools.set(meta.name, createScriptTool(meta, source, content));
|
|
@@ -243,6 +296,7 @@ export const loadTools = async () => {
|
|
|
243
296
|
const customDirs = ['skills', 'scripts', 'tools', '.simple-cli/tools'];
|
|
244
297
|
const builtinTools = await loadToolsFromDir(TOOLS_DIR, 'builtin');
|
|
245
298
|
const allProjectTools = new Map();
|
|
299
|
+
// Local project dirs
|
|
246
300
|
for (const d of customDirs) {
|
|
247
301
|
const dirPath = join(process.cwd(), d);
|
|
248
302
|
const tools = await loadToolsFromDir(dirPath, 'project');
|
|
@@ -250,6 +304,15 @@ export const loadTools = async () => {
|
|
|
250
304
|
allProjectTools.set(name, tool);
|
|
251
305
|
}
|
|
252
306
|
}
|
|
307
|
+
// Global OpenClaw skills (from PRD)
|
|
308
|
+
const home = process.env.HOME || process.env.USERPROFILE || '';
|
|
309
|
+
const globalClawDir = join(home, '.openclaw', 'workspace', 'skills');
|
|
310
|
+
const globalTools = await loadToolsFromDir(globalClawDir, 'project');
|
|
311
|
+
for (const [name, tool] of globalTools) {
|
|
312
|
+
if (!allProjectTools.has(name)) {
|
|
313
|
+
allProjectTools.set(name, tool);
|
|
314
|
+
}
|
|
315
|
+
}
|
|
253
316
|
return new Map([...builtinTools, ...allProjectTools]);
|
|
254
317
|
};
|
|
255
318
|
// Load MCP tools and merge with built-in tools
|
|
@@ -282,6 +345,7 @@ export const getToolDefinitions = (tools) => {
|
|
|
282
345
|
const builtinTools = [];
|
|
283
346
|
const projectTools = [];
|
|
284
347
|
const mcpTools = [];
|
|
348
|
+
const isClaw = process.argv.includes('--claw') || process.argv.includes('-claw');
|
|
285
349
|
for (const tool of tools.values()) {
|
|
286
350
|
if (tool.source === 'mcp') {
|
|
287
351
|
mcpTools.push(tool);
|
package/dist/repoMap.js
CHANGED
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
* Uses ts-morph for TypeScript/JavaScript and simple parsing for others.
|
|
4
4
|
*/
|
|
5
5
|
import { Project, ScriptTarget } from 'ts-morph';
|
|
6
|
-
import { readdir } from 'fs/promises';
|
|
6
|
+
import { readdir, readFile } from 'fs/promises';
|
|
7
7
|
import { join, extname, relative } from 'path';
|
|
8
8
|
const IGNORED_DIRS = new Set(['node_modules', '.git', 'dist', 'build', '.next', 'coverage']);
|
|
9
9
|
const TS_EXTENSIONS = new Set(['.ts', '.tsx', '.js', '.jsx']);
|
|
@@ -36,37 +36,37 @@ export const generateRepoMap = async (rootDir = '.') => {
|
|
|
36
36
|
}
|
|
37
37
|
catch { /* ignore access errors */ }
|
|
38
38
|
}
|
|
39
|
-
// 2. Process Files
|
|
39
|
+
// 2. Process Files in parallel
|
|
40
40
|
// Limit to 50 files for now to avoid context explosion
|
|
41
41
|
const filesToProcess = validFiles.slice(0, 50);
|
|
42
|
-
|
|
42
|
+
const results = await Promise.all(filesToProcess.map(async (filePath) => {
|
|
43
43
|
const ext = extname(filePath);
|
|
44
44
|
const relPath = relative(rootDir, filePath);
|
|
45
45
|
if (TS_EXTENSIONS.has(ext)) {
|
|
46
46
|
try {
|
|
47
|
-
|
|
48
|
-
const sourceFile = project.createSourceFile(filePath,
|
|
47
|
+
const content = await readFile(filePath, 'utf-8');
|
|
48
|
+
const sourceFile = project.createSourceFile(filePath, content, { overwrite: true });
|
|
49
49
|
const symbols = [];
|
|
50
|
-
sourceFile.getClasses().forEach(c => symbols.push(`class ${c.getName()}`));
|
|
51
|
-
sourceFile.getFunctions().forEach(f => symbols.push(`func ${f.getName()}`));
|
|
52
|
-
sourceFile.getInterfaces().forEach(i => symbols.push(`interface ${i.getName()}`));
|
|
53
|
-
sourceFile.getTypeAliases().forEach(t => symbols.push(`type ${t.getName()}`));
|
|
54
|
-
sourceFile.getVariableStatements().forEach(v => {
|
|
55
|
-
v.getDeclarations().forEach(d => symbols.push(`const ${d.getName()}`));
|
|
50
|
+
sourceFile.getClasses().forEach((c) => symbols.push(`class ${c.getName()}`));
|
|
51
|
+
sourceFile.getFunctions().forEach((f) => symbols.push(`func ${f.getName()}`));
|
|
52
|
+
sourceFile.getInterfaces().forEach((i) => symbols.push(`interface ${i.getName()}`));
|
|
53
|
+
sourceFile.getTypeAliases().forEach((t) => symbols.push(`type ${t.getName()}`));
|
|
54
|
+
sourceFile.getVariableStatements().forEach((v) => {
|
|
55
|
+
v.getDeclarations().forEach((d) => symbols.push(`const ${d.getName()}`));
|
|
56
56
|
});
|
|
57
|
-
|
|
58
|
-
fileMaps.push({ path: relPath, symbols });
|
|
59
|
-
}
|
|
57
|
+
return { path: relPath, symbols };
|
|
60
58
|
}
|
|
61
59
|
catch (e) {
|
|
62
|
-
|
|
60
|
+
return null;
|
|
63
61
|
}
|
|
64
62
|
}
|
|
65
63
|
else {
|
|
66
|
-
|
|
67
|
-
// For now, let's just list the file path for completeness if it's source code
|
|
68
|
-
fileMaps.push({ path: relPath, symbols: [] });
|
|
64
|
+
return { path: relPath, symbols: [] };
|
|
69
65
|
}
|
|
66
|
+
}));
|
|
67
|
+
for (const res of results) {
|
|
68
|
+
if (res)
|
|
69
|
+
fileMaps.push(res);
|
|
70
70
|
}
|
|
71
71
|
if (fileMaps.length === 0)
|
|
72
72
|
return 'No source files found.';
|
package/dist/router.js
CHANGED
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
* MoE Router: Mix of Experts task routing
|
|
3
3
|
* Routes tasks to appropriate model tiers based on complexity
|
|
4
4
|
*/
|
|
5
|
+
import { jsonrepair } from 'jsonrepair';
|
|
5
6
|
import { z } from 'zod';
|
|
6
7
|
// Schema for Orchestrator's routing response
|
|
7
8
|
export const RoutingResponseSchema = z.object({
|
|
@@ -25,9 +26,9 @@ export const loadTierConfig = () => {
|
|
|
25
26
|
for (const tier of [1, 2, 3, 4, 5]) {
|
|
26
27
|
const envModel = process.env[`MOE_TIER_${tier}_MODEL`];
|
|
27
28
|
const model = envModel || DEFAULT_TIERS[tier].defaultModel;
|
|
28
|
-
//
|
|
29
|
-
//
|
|
30
|
-
const provider = model.includes('/') ? model.split(
|
|
29
|
+
// Vercel AI SDK supports provider prefixes (e.g., "anthropic:claude-3", "google:gemini-pro")
|
|
30
|
+
// Some tests/tools use slash separators ("anthropic/claude-3-opus"). Accept either.
|
|
31
|
+
const provider = model.includes(':') || model.includes('/') ? model.split(/[:\/]/)[0] : 'openai';
|
|
31
32
|
tiers.set(tier, {
|
|
32
33
|
tier,
|
|
33
34
|
role: DEFAULT_TIERS[tier].role,
|
|
@@ -67,14 +68,23 @@ export const routeTask = async (task, orchestratorCall) => {
|
|
|
67
68
|
if (!jsonMatch) {
|
|
68
69
|
return getDefaultRouting(task);
|
|
69
70
|
}
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
71
|
+
try {
|
|
72
|
+
const repaired = jsonrepair(jsonMatch[0]);
|
|
73
|
+
const data = JSON.parse(repaired);
|
|
74
|
+
// Fuzzy mapping for recommendedTier
|
|
75
|
+
const tierValue = data.recommendedTier || data.tier || data.recommended_tier || 3;
|
|
76
|
+
const complexityValue = data.complexity || 5;
|
|
77
|
+
return {
|
|
78
|
+
tier: (Math.max(1, Math.min(5, Number(tierValue)))),
|
|
79
|
+
complexity: Number(complexityValue),
|
|
80
|
+
contextRequired: data.contextRequired === 'high' ? 'high' : 'low',
|
|
81
|
+
risk: data.risk === 'high' ? 'high' : 'low',
|
|
82
|
+
reasoning: data.reasoning || 'No reasoning provided'
|
|
83
|
+
};
|
|
84
|
+
}
|
|
85
|
+
catch (e) {
|
|
86
|
+
return getDefaultRouting(task);
|
|
87
|
+
}
|
|
78
88
|
}
|
|
79
89
|
catch (error) {
|
|
80
90
|
console.error('Routing error, using default:', error);
|
package/dist/skills.js
CHANGED
|
@@ -45,7 +45,7 @@ When making changes to files:
|
|
|
45
45
|
2. Make precise, targeted changes using search/replace
|
|
46
46
|
3. Verify changes don't break existing functionality
|
|
47
47
|
4. Follow the existing code style`,
|
|
48
|
-
tools: ['
|
|
48
|
+
tools: ['read_files', 'write_files', 'run_command', 'glob', 'grep', 'lint', 'reload_tools', 'scheduler'],
|
|
49
49
|
},
|
|
50
50
|
// Architect skill for planning
|
|
51
51
|
architect: {
|
|
@@ -66,7 +66,7 @@ When working on architecture:
|
|
|
66
66
|
|
|
67
67
|
Focus on high-level design rather than implementation details.
|
|
68
68
|
Generate diagrams and documentation when helpful.`,
|
|
69
|
-
tools: ['
|
|
69
|
+
tools: ['read_files', 'glob', 'grep', 'memory'],
|
|
70
70
|
modelPreference: 'orchestrator',
|
|
71
71
|
},
|
|
72
72
|
// Ask skill for questions only
|
|
@@ -87,7 +87,7 @@ You should NOT:
|
|
|
87
87
|
- Create new files
|
|
88
88
|
|
|
89
89
|
Always read files before answering questions about them.`,
|
|
90
|
-
tools: ['
|
|
90
|
+
tools: ['read_files', 'glob', 'grep'],
|
|
91
91
|
},
|
|
92
92
|
// Help/docs skill
|
|
93
93
|
help: {
|
|
@@ -102,7 +102,7 @@ Always read files before answering questions about them.`,
|
|
|
102
102
|
|
|
103
103
|
Be concise and provide examples when helpful.
|
|
104
104
|
Reference official documentation when available.`,
|
|
105
|
-
tools: ['
|
|
105
|
+
tools: ['read_files', 'scrape_url'],
|
|
106
106
|
},
|
|
107
107
|
// Test skill
|
|
108
108
|
test: {
|
|
@@ -122,7 +122,7 @@ Test guidelines:
|
|
|
122
122
|
- Use appropriate mocking when needed
|
|
123
123
|
|
|
124
124
|
After writing tests, run them to verify they pass.`,
|
|
125
|
-
tools: ['
|
|
125
|
+
tools: ['read_files', 'write_files', 'run_command', 'glob', 'grep', 'lint', 'scheduler'],
|
|
126
126
|
},
|
|
127
127
|
// Debug skill
|
|
128
128
|
debug: {
|
|
@@ -141,7 +141,7 @@ Debugging tips:
|
|
|
141
141
|
- Check recent changes that might have caused the issue
|
|
142
142
|
- Look for common patterns (null checks, async issues, etc.)
|
|
143
143
|
- Use the linter to catch syntax errors`,
|
|
144
|
-
tools: ['
|
|
144
|
+
tools: ['read_files', 'write_files', 'run_command', 'grep', 'lint', 'git', 'scheduler'],
|
|
145
145
|
},
|
|
146
146
|
// Refactor skill
|
|
147
147
|
refactor: {
|
|
@@ -162,7 +162,7 @@ Refactoring principles:
|
|
|
162
162
|
- Follow SOLID principles
|
|
163
163
|
|
|
164
164
|
Always run tests after refactoring to ensure nothing broke.`,
|
|
165
|
-
tools: ['
|
|
165
|
+
tools: ['read_files', 'write_files', 'run_command', 'glob', 'grep', 'lint', 'git', 'scheduler'],
|
|
166
166
|
},
|
|
167
167
|
// Review skill
|
|
168
168
|
review: {
|
|
@@ -178,7 +178,7 @@ Always run tests after refactoring to ensure nothing broke.`,
|
|
|
178
178
|
|
|
179
179
|
Provide constructive feedback with specific suggestions.
|
|
180
180
|
Prioritize critical issues over minor style concerns.`,
|
|
181
|
-
tools: ['
|
|
181
|
+
tools: ['read_files', 'glob', 'grep', 'git', 'lint'],
|
|
182
182
|
},
|
|
183
183
|
// Shell skill
|
|
184
184
|
shell: {
|
|
@@ -196,7 +196,7 @@ Safety guidelines:
|
|
|
196
196
|
- Use safe defaults (no force flags unless needed)
|
|
197
197
|
- Be careful with destructive operations
|
|
198
198
|
- Test commands before applying to production`,
|
|
199
|
-
tools: ['
|
|
199
|
+
tools: ['run_command', 'read_files', 'write_files', 'glob'],
|
|
200
200
|
},
|
|
201
201
|
// Git skill
|
|
202
202
|
git: {
|
|
@@ -215,7 +215,7 @@ Git guidelines:
|
|
|
215
215
|
- Make small, focused commits
|
|
216
216
|
- Keep branches up to date
|
|
217
217
|
- Review changes before committing`,
|
|
218
|
-
tools: ['git', '
|
|
218
|
+
tools: ['git', 'read_files', 'glob', 'grep'],
|
|
219
219
|
},
|
|
220
220
|
};
|
|
221
221
|
// Get active skill from environment or default
|
package/dist/swarm/worker.d.ts
CHANGED
package/dist/swarm/worker.js
CHANGED
|
@@ -12,6 +12,12 @@ export class Worker extends EventEmitter {
|
|
|
12
12
|
startedAt = 0;
|
|
13
13
|
output = '';
|
|
14
14
|
options;
|
|
15
|
+
isTerminating = false;
|
|
16
|
+
onUnexpectedExit = (code) => {
|
|
17
|
+
if (!this.isTerminating) {
|
|
18
|
+
this.emit('error', new Error(`Worker process exited unexpectedly with code ${code}`));
|
|
19
|
+
}
|
|
20
|
+
};
|
|
15
21
|
constructor(options) {
|
|
16
22
|
super();
|
|
17
23
|
this.id = `worker-${randomUUID().slice(0, 8)}`;
|
|
@@ -114,14 +120,19 @@ export class Worker extends EventEmitter {
|
|
|
114
120
|
// Set timeout
|
|
115
121
|
timeoutId = setTimeout(() => {
|
|
116
122
|
if (!resolved) {
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
123
|
+
(async () => {
|
|
124
|
+
try {
|
|
125
|
+
await this.kill();
|
|
126
|
+
}
|
|
127
|
+
catch (e) { /* best-effort */ }
|
|
128
|
+
finish({
|
|
129
|
+
success: false,
|
|
130
|
+
filesChanged: [],
|
|
131
|
+
error: `Task timed out after ${timeout}ms`,
|
|
132
|
+
duration: timeout,
|
|
133
|
+
output: this.output,
|
|
134
|
+
});
|
|
135
|
+
})();
|
|
125
136
|
}
|
|
126
137
|
}, timeout);
|
|
127
138
|
// Send task to stdin and close
|
|
@@ -177,15 +188,74 @@ export class Worker extends EventEmitter {
|
|
|
177
188
|
* Kill the worker process
|
|
178
189
|
*/
|
|
179
190
|
kill() {
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
191
|
+
// Keep backwards-compatible signature: return a Promise but allow callers to ignore it.
|
|
192
|
+
const graceMs = 5000;
|
|
193
|
+
const promise = (async () => {
|
|
194
|
+
if (!this.process)
|
|
195
|
+
return;
|
|
196
|
+
if (this.process.killed)
|
|
197
|
+
return;
|
|
198
|
+
this.isTerminating = true;
|
|
199
|
+
// stop reporting unexpected-exit while we intentionally terminate
|
|
200
|
+
try {
|
|
201
|
+
this.process.off('exit', this.onUnexpectedExit);
|
|
202
|
+
}
|
|
203
|
+
catch (e) { }
|
|
204
|
+
// If IPC is available, politely ask the child to shutdown
|
|
205
|
+
try {
|
|
206
|
+
if (this.process.connected) {
|
|
207
|
+
try {
|
|
208
|
+
this.process.send({ type: 'shutdown' });
|
|
209
|
+
}
|
|
210
|
+
catch (e) { }
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
catch (e) { }
|
|
214
|
+
try {
|
|
215
|
+
this.process.kill('SIGTERM');
|
|
216
|
+
}
|
|
217
|
+
catch (e) {
|
|
218
|
+
// ignore
|
|
219
|
+
}
|
|
220
|
+
// wait for exit up to graceMs, otherwise force-kill
|
|
221
|
+
await new Promise((resolve) => {
|
|
222
|
+
let resolved = false;
|
|
223
|
+
const onExit = () => {
|
|
224
|
+
if (resolved)
|
|
225
|
+
return;
|
|
226
|
+
resolved = true;
|
|
227
|
+
clearTimeout(timer);
|
|
228
|
+
resolve(undefined);
|
|
229
|
+
};
|
|
230
|
+
const timer = setTimeout(() => {
|
|
231
|
+
try {
|
|
232
|
+
if (this.process && !this.process.killed)
|
|
233
|
+
this.process.kill('SIGKILL');
|
|
234
|
+
}
|
|
235
|
+
catch (e) { }
|
|
236
|
+
// still wait for exit event
|
|
237
|
+
}, graceMs);
|
|
238
|
+
try {
|
|
239
|
+
if (this.process)
|
|
240
|
+
this.process.once('exit', onExit);
|
|
241
|
+
}
|
|
242
|
+
catch (e) {
|
|
243
|
+
resolve(undefined);
|
|
186
244
|
}
|
|
187
|
-
}
|
|
245
|
+
});
|
|
246
|
+
try {
|
|
247
|
+
this.process = null;
|
|
248
|
+
}
|
|
249
|
+
catch (e) { }
|
|
250
|
+
})();
|
|
251
|
+
// Log kill action for auditing
|
|
252
|
+
try {
|
|
253
|
+
const fs = require('fs');
|
|
254
|
+
const path = require('path');
|
|
255
|
+
fs.appendFileSync(path.join(process.cwd(), '.worker_kill.log'), `${new Date().toISOString()} kill requested for worker ${this.id}\n`);
|
|
188
256
|
}
|
|
257
|
+
catch (e) { }
|
|
258
|
+
return promise;
|
|
189
259
|
}
|
|
190
260
|
/**
|
|
191
261
|
* Check if worker is busy
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Tool: analyzeFile
|
|
3
|
+
* Structured analysis of a Source File (TS/JS) using ts-morph
|
|
4
|
+
*/
|
|
5
|
+
import { z } from 'zod';
|
|
6
|
+
export declare const name = "analyze_file";
|
|
7
|
+
export declare const description = "Perform structured analysis of a TypeScript/JavaScript file to extract classes, functions, and interfaces.";
|
|
8
|
+
export declare const permission: "read";
|
|
9
|
+
export declare const schema: z.ZodObject<{
|
|
10
|
+
path: z.ZodString;
|
|
11
|
+
}, "strip", z.ZodTypeAny, {
|
|
12
|
+
path: string;
|
|
13
|
+
}, {
|
|
14
|
+
path: string;
|
|
15
|
+
}>;
|
|
16
|
+
export declare const execute: (args: Record<string, unknown>) => Promise<unknown>;
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Tool: analyzeFile
|
|
3
|
+
* Structured analysis of a Source File (TS/JS) using ts-morph
|
|
4
|
+
*/
|
|
5
|
+
import { Project, ScriptTarget } from 'ts-morph';
|
|
6
|
+
import { z } from 'zod';
|
|
7
|
+
import { readFile } from 'fs/promises';
|
|
8
|
+
export const name = 'analyze_file';
|
|
9
|
+
export const description = 'Perform structured analysis of a TypeScript/JavaScript file to extract classes, functions, and interfaces.';
|
|
10
|
+
export const permission = 'read';
|
|
11
|
+
export const schema = z.object({
|
|
12
|
+
path: z.string().describe('Path to the file to analyze')
|
|
13
|
+
});
|
|
14
|
+
export const execute = async (args) => {
|
|
15
|
+
const parsed = schema.parse(args);
|
|
16
|
+
const path = parsed.path;
|
|
17
|
+
try {
|
|
18
|
+
const content = await readFile(path, 'utf-8');
|
|
19
|
+
const project = new Project({
|
|
20
|
+
compilerOptions: { target: ScriptTarget.ESNext, allowJs: true },
|
|
21
|
+
useInMemoryFileSystem: true
|
|
22
|
+
});
|
|
23
|
+
const sourceFile = project.createSourceFile(path, content);
|
|
24
|
+
return {
|
|
25
|
+
path,
|
|
26
|
+
classes: sourceFile.getClasses().map(c => ({
|
|
27
|
+
name: c.getName(),
|
|
28
|
+
methods: c.getMethods().map(m => m.getName()),
|
|
29
|
+
properties: c.getProperties().map(p => p.getName())
|
|
30
|
+
})),
|
|
31
|
+
functions: sourceFile.getFunctions().map(f => ({
|
|
32
|
+
name: f.getName(),
|
|
33
|
+
params: f.getParameters().map(p => p.getName())
|
|
34
|
+
})),
|
|
35
|
+
interfaces: sourceFile.getInterfaces().map(i => i.getName()),
|
|
36
|
+
types: sourceFile.getTypeAliases().map(t => t.getName()),
|
|
37
|
+
exports: sourceFile.getExportedDeclarations().keys()
|
|
38
|
+
};
|
|
39
|
+
}
|
|
40
|
+
catch (error) {
|
|
41
|
+
throw new Error(`Failed to analyze file ${path}: ${error instanceof Error ? error.message : error}`);
|
|
42
|
+
}
|
|
43
|
+
};
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* ClawBrain - Agentic Reasoning and Persistence for Autonomous Mode
|
|
3
|
+
*/
|
|
4
|
+
import { z } from 'zod';
|
|
5
|
+
import type { Tool } from '../registry.js';
|
|
6
|
+
export declare const inputSchema: z.ZodObject<{
|
|
7
|
+
action: z.ZodEnum<["set_goal", "update_status", "log_reflection", "get_summary", "prune", "link_files"]>;
|
|
8
|
+
content: z.ZodOptional<z.ZodString>;
|
|
9
|
+
status: z.ZodOptional<z.ZodEnum<["planning", "executing", "completed", "failed"]>>;
|
|
10
|
+
links: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
|
|
11
|
+
}, "strip", z.ZodTypeAny, {
|
|
12
|
+
action: "log_reflection" | "get_summary" | "prune" | "set_goal" | "update_status" | "link_files";
|
|
13
|
+
status?: "completed" | "failed" | "planning" | "executing" | undefined;
|
|
14
|
+
content?: string | undefined;
|
|
15
|
+
links?: string[] | undefined;
|
|
16
|
+
}, {
|
|
17
|
+
action: "log_reflection" | "get_summary" | "prune" | "set_goal" | "update_status" | "link_files";
|
|
18
|
+
status?: "completed" | "failed" | "planning" | "executing" | undefined;
|
|
19
|
+
content?: string | undefined;
|
|
20
|
+
links?: string[] | undefined;
|
|
21
|
+
}>;
|
|
22
|
+
export declare const execute: (args: Record<string, unknown>, cwd?: string) => Promise<any>;
|
|
23
|
+
export declare const tool: Tool;
|