neoagent 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +28 -0
- package/LICENSE +21 -0
- package/README.md +42 -0
- package/bin/neoagent.js +8 -0
- package/com.neoagent.plist +45 -0
- package/docs/configuration.md +45 -0
- package/docs/skills.md +45 -0
- package/lib/manager.js +459 -0
- package/package.json +61 -0
- package/server/db/database.js +239 -0
- package/server/index.js +442 -0
- package/server/middleware/auth.js +35 -0
- package/server/public/app.html +559 -0
- package/server/public/css/app.css +608 -0
- package/server/public/css/styles.css +472 -0
- package/server/public/favicon.svg +17 -0
- package/server/public/js/app.js +3283 -0
- package/server/public/login.html +313 -0
- package/server/routes/agents.js +125 -0
- package/server/routes/auth.js +105 -0
- package/server/routes/browser.js +116 -0
- package/server/routes/mcp.js +164 -0
- package/server/routes/memory.js +193 -0
- package/server/routes/messaging.js +153 -0
- package/server/routes/protocols.js +87 -0
- package/server/routes/scheduler.js +63 -0
- package/server/routes/settings.js +98 -0
- package/server/routes/skills.js +107 -0
- package/server/routes/store.js +1192 -0
- package/server/services/ai/compaction.js +82 -0
- package/server/services/ai/engine.js +1690 -0
- package/server/services/ai/models.js +46 -0
- package/server/services/ai/multiStep.js +112 -0
- package/server/services/ai/providers/anthropic.js +181 -0
- package/server/services/ai/providers/base.js +40 -0
- package/server/services/ai/providers/google.js +187 -0
- package/server/services/ai/providers/grok.js +121 -0
- package/server/services/ai/providers/ollama.js +162 -0
- package/server/services/ai/providers/openai.js +167 -0
- package/server/services/ai/toolRunner.js +218 -0
- package/server/services/browser/controller.js +320 -0
- package/server/services/cli/executor.js +204 -0
- package/server/services/mcp/client.js +260 -0
- package/server/services/memory/embeddings.js +126 -0
- package/server/services/memory/manager.js +431 -0
- package/server/services/messaging/base.js +23 -0
- package/server/services/messaging/discord.js +238 -0
- package/server/services/messaging/manager.js +328 -0
- package/server/services/messaging/telegram.js +243 -0
- package/server/services/messaging/telnyx.js +693 -0
- package/server/services/messaging/whatsapp.js +304 -0
- package/server/services/scheduler/cron.js +312 -0
- package/server/services/websocket.js +191 -0
- package/server/utils/security.js +71 -0
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
const { BaseProvider } = require('./base');
|
|
2
|
+
|
|
3
|
+
class OllamaProvider extends BaseProvider {
|
|
4
|
+
constructor(config = {}) {
|
|
5
|
+
super(config);
|
|
6
|
+
this.name = 'ollama';
|
|
7
|
+
this.baseUrl = config.baseUrl || process.env.OLLAMA_URL || 'http://localhost:11434';
|
|
8
|
+
this.models = [];
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
async listModels() {
|
|
12
|
+
try {
|
|
13
|
+
const res = await fetch(`${this.baseUrl}/api/tags`);
|
|
14
|
+
const data = await res.json();
|
|
15
|
+
this.models = (data.models || []).map(m => m.name);
|
|
16
|
+
return this.models;
|
|
17
|
+
} catch {
|
|
18
|
+
return [];
|
|
19
|
+
}
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
getContextWindow(model) {
|
|
23
|
+
return 128000;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
formatToolsForOllama(tools) {
|
|
27
|
+
return tools.map(tool => ({
|
|
28
|
+
type: 'function',
|
|
29
|
+
function: {
|
|
30
|
+
name: tool.name,
|
|
31
|
+
description: tool.description,
|
|
32
|
+
parameters: tool.parameters || { type: 'object', properties: {} }
|
|
33
|
+
}
|
|
34
|
+
}));
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
async chat(messages, tools = [], options = {}) {
|
|
38
|
+
const model = options.model || this.config.model || 'llama3.1';
|
|
39
|
+
const body = {
|
|
40
|
+
model,
|
|
41
|
+
messages: messages.map(m => ({
|
|
42
|
+
role: m.role,
|
|
43
|
+
content: m.content || '',
|
|
44
|
+
...(m.tool_calls ? { tool_calls: m.tool_calls } : {}),
|
|
45
|
+
...(m.tool_call_id ? { tool_call_id: m.tool_call_id } : {})
|
|
46
|
+
})),
|
|
47
|
+
stream: false,
|
|
48
|
+
options: {
|
|
49
|
+
temperature: options.temperature ?? 0.7,
|
|
50
|
+
num_predict: options.maxTokens || 16384
|
|
51
|
+
}
|
|
52
|
+
};
|
|
53
|
+
|
|
54
|
+
if (tools.length > 0) {
|
|
55
|
+
body.tools = this.formatToolsForOllama(tools);
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
const res = await fetch(`${this.baseUrl}/api/chat`, {
|
|
59
|
+
method: 'POST',
|
|
60
|
+
headers: { 'Content-Type': 'application/json' },
|
|
61
|
+
body: JSON.stringify(body)
|
|
62
|
+
});
|
|
63
|
+
|
|
64
|
+
const data = await res.json();
|
|
65
|
+
const msg = data.message || {};
|
|
66
|
+
|
|
67
|
+
return {
|
|
68
|
+
content: msg.content || '',
|
|
69
|
+
toolCalls: (msg.tool_calls || []).map((tc, i) => ({
|
|
70
|
+
id: `call_ollama_${Date.now()}_${i}`,
|
|
71
|
+
type: 'function',
|
|
72
|
+
function: {
|
|
73
|
+
name: tc.function.name,
|
|
74
|
+
arguments: JSON.stringify(tc.function.arguments || {})
|
|
75
|
+
}
|
|
76
|
+
})),
|
|
77
|
+
finishReason: msg.tool_calls?.length > 0 ? 'tool_calls' : 'stop',
|
|
78
|
+
usage: data.prompt_eval_count ? {
|
|
79
|
+
promptTokens: data.prompt_eval_count || 0,
|
|
80
|
+
completionTokens: data.eval_count || 0,
|
|
81
|
+
totalTokens: (data.prompt_eval_count || 0) + (data.eval_count || 0)
|
|
82
|
+
} : null,
|
|
83
|
+
model: data.model || model
|
|
84
|
+
};
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
async *stream(messages, tools = [], options = {}) {
|
|
88
|
+
const model = options.model || this.config.model || 'llama3.1';
|
|
89
|
+
const body = {
|
|
90
|
+
model,
|
|
91
|
+
messages: messages.map(m => ({
|
|
92
|
+
role: m.role,
|
|
93
|
+
content: m.content || '',
|
|
94
|
+
...(m.tool_calls ? { tool_calls: m.tool_calls } : {}),
|
|
95
|
+
...(m.tool_call_id ? { tool_call_id: m.tool_call_id } : {})
|
|
96
|
+
})),
|
|
97
|
+
stream: true,
|
|
98
|
+
options: {
|
|
99
|
+
temperature: options.temperature ?? 0.7,
|
|
100
|
+
num_predict: options.maxTokens || 16384
|
|
101
|
+
}
|
|
102
|
+
};
|
|
103
|
+
|
|
104
|
+
if (tools.length > 0) {
|
|
105
|
+
body.tools = this.formatToolsForOllama(tools);
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
const res = await fetch(`${this.baseUrl}/api/chat`, {
|
|
109
|
+
method: 'POST',
|
|
110
|
+
headers: { 'Content-Type': 'application/json' },
|
|
111
|
+
body: JSON.stringify(body)
|
|
112
|
+
});
|
|
113
|
+
|
|
114
|
+
const reader = res.body.getReader();
|
|
115
|
+
const decoder = new TextDecoder();
|
|
116
|
+
let content = '';
|
|
117
|
+
let buffer = '';
|
|
118
|
+
|
|
119
|
+
while (true) {
|
|
120
|
+
const { done, value } = await reader.read();
|
|
121
|
+
if (done) break;
|
|
122
|
+
|
|
123
|
+
buffer += decoder.decode(value, { stream: true });
|
|
124
|
+
const lines = buffer.split('\n');
|
|
125
|
+
buffer = lines.pop() || '';
|
|
126
|
+
|
|
127
|
+
for (const line of lines) {
|
|
128
|
+
if (!line.trim()) continue;
|
|
129
|
+
try {
|
|
130
|
+
const data = JSON.parse(line);
|
|
131
|
+
if (data.message?.content) {
|
|
132
|
+
content += data.message.content;
|
|
133
|
+
yield { type: 'content', content: data.message.content };
|
|
134
|
+
}
|
|
135
|
+
if (data.done) {
|
|
136
|
+
const toolCalls = (data.message?.tool_calls || []).map((tc, i) => ({
|
|
137
|
+
id: `call_ollama_${Date.now()}_${i}`,
|
|
138
|
+
type: 'function',
|
|
139
|
+
function: {
|
|
140
|
+
name: tc.function.name,
|
|
141
|
+
arguments: JSON.stringify(tc.function.arguments || {})
|
|
142
|
+
}
|
|
143
|
+
}));
|
|
144
|
+
yield {
|
|
145
|
+
type: 'done',
|
|
146
|
+
content,
|
|
147
|
+
toolCalls,
|
|
148
|
+
finishReason: toolCalls.length > 0 ? 'tool_calls' : 'stop',
|
|
149
|
+
usage: data.prompt_eval_count ? {
|
|
150
|
+
promptTokens: data.prompt_eval_count || 0,
|
|
151
|
+
completionTokens: data.eval_count || 0,
|
|
152
|
+
totalTokens: (data.prompt_eval_count || 0) + (data.eval_count || 0)
|
|
153
|
+
} : null
|
|
154
|
+
};
|
|
155
|
+
}
|
|
156
|
+
} catch {}
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
module.exports = { OllamaProvider };
|
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
const OpenAI = require('openai');
|
|
2
|
+
const { BaseProvider } = require('./base');
|
|
3
|
+
|
|
4
|
+
class OpenAIProvider extends BaseProvider {
|
|
5
|
+
constructor(config = {}) {
|
|
6
|
+
super(config);
|
|
7
|
+
this.name = 'openai';
|
|
8
|
+
this.models = [
|
|
9
|
+
'gpt-5',
|
|
10
|
+
'gpt-5-mini',
|
|
11
|
+
'gpt-5-nano',
|
|
12
|
+
'gpt-5.2',
|
|
13
|
+
'gpt-4.1',
|
|
14
|
+
'o3',
|
|
15
|
+
'o4-mini'
|
|
16
|
+
];
|
|
17
|
+
// Reasoning models: no temperature, use max_completion_tokens, support reasoning_effort
|
|
18
|
+
this.reasoningModels = new Set(['gpt-5', 'gpt-5-mini', 'gpt-5-nano', 'gpt-5.2', 'gpt-5.1', 'o1', 'o3', 'o4-mini', 'o3-mini']);
|
|
19
|
+
this.contextWindows = {
|
|
20
|
+
'gpt-5': 400000,
|
|
21
|
+
'gpt-5-mini': 200000,
|
|
22
|
+
'gpt-5-nano': 128000,
|
|
23
|
+
'gpt-5.2': 400000,
|
|
24
|
+
'gpt-5.1': 400000,
|
|
25
|
+
'gpt-4.1': 1047576,
|
|
26
|
+
'o3': 200000,
|
|
27
|
+
'o4-mini': 200000,
|
|
28
|
+
'o3-mini': 200000
|
|
29
|
+
};
|
|
30
|
+
this.client = new OpenAI({
|
|
31
|
+
apiKey: config.apiKey || process.env.OPENAI_API_KEY
|
|
32
|
+
});
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
isReasoningModel(model) {
|
|
36
|
+
// Match exact IDs and prefix variants (gpt-5-2025-08-07 etc)
|
|
37
|
+
for (const id of this.reasoningModels) {
|
|
38
|
+
if (model === id || model.startsWith(id + '-')) return true;
|
|
39
|
+
}
|
|
40
|
+
return false;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
getContextWindow(model) {
|
|
44
|
+
for (const [id, size] of Object.entries(this.contextWindows)) {
|
|
45
|
+
if (model === id || model.startsWith(id + '-')) return size;
|
|
46
|
+
}
|
|
47
|
+
return 128000;
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
_buildParams(model, messages, tools, options) {
|
|
51
|
+
const isReasoning = this.isReasoningModel(model);
|
|
52
|
+
// Reasoning models (GPT-5, o-series): use developer role for system messages
|
|
53
|
+
const formattedMessages = isReasoning
|
|
54
|
+
? messages.map(m => m.role === 'system' ? { ...m, role: 'developer' } : m)
|
|
55
|
+
: messages;
|
|
56
|
+
|
|
57
|
+
const params = {
|
|
58
|
+
model,
|
|
59
|
+
messages: formattedMessages
|
|
60
|
+
};
|
|
61
|
+
|
|
62
|
+
if (isReasoning) {
|
|
63
|
+
// max_completion_tokens (not max_tokens) for reasoning models
|
|
64
|
+
params.max_completion_tokens = options.maxTokens || 16384;
|
|
65
|
+
// reasoning_effort: low/medium/high (default medium for speed/quality balance)
|
|
66
|
+
if (options.reasoningEffort || options.reasoning_effort) {
|
|
67
|
+
params.reasoning_effort = options.reasoningEffort || options.reasoning_effort;
|
|
68
|
+
}
|
|
69
|
+
// No temperature for reasoning models
|
|
70
|
+
} else {
|
|
71
|
+
params.temperature = options.temperature ?? 0.7;
|
|
72
|
+
params.max_tokens = options.maxTokens || 16384;
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
if (tools && tools.length > 0) {
|
|
76
|
+
params.tools = this.formatTools(tools);
|
|
77
|
+
params.tool_choice = options.toolChoice || 'auto';
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
return params;
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
async chat(messages, tools = [], options = {}) {
|
|
84
|
+
const model = options.model || this.config.model || this.getDefaultModel();
|
|
85
|
+
const params = this._buildParams(model, messages, tools, options);
|
|
86
|
+
|
|
87
|
+
const response = await this.client.chat.completions.create(params);
|
|
88
|
+
const choice = response.choices[0];
|
|
89
|
+
|
|
90
|
+
return {
|
|
91
|
+
content: choice.message.content,
|
|
92
|
+
toolCalls: choice.message.tool_calls || [],
|
|
93
|
+
finishReason: choice.finish_reason,
|
|
94
|
+
usage: response.usage ? {
|
|
95
|
+
promptTokens: response.usage.prompt_tokens,
|
|
96
|
+
completionTokens: response.usage.completion_tokens,
|
|
97
|
+
totalTokens: response.usage.total_tokens
|
|
98
|
+
} : null,
|
|
99
|
+
model: response.model
|
|
100
|
+
};
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
async *stream(messages, tools = [], options = {}) {
|
|
104
|
+
const model = options.model || this.config.model || this.getDefaultModel();
|
|
105
|
+
const params = this._buildParams(model, messages, tools, options);
|
|
106
|
+
params.stream = true;
|
|
107
|
+
params.stream_options = { include_usage: true };
|
|
108
|
+
const stream = await this.client.chat.completions.create(params);
|
|
109
|
+
|
|
110
|
+
let currentToolCalls = [];
|
|
111
|
+
let content = '';
|
|
112
|
+
let finalUsage = null;
|
|
113
|
+
|
|
114
|
+
for await (const chunk of stream) {
|
|
115
|
+
// Final usage-only chunk (empty choices array)
|
|
116
|
+
if (chunk.usage && (!chunk.choices || chunk.choices.length === 0)) {
|
|
117
|
+
finalUsage = {
|
|
118
|
+
promptTokens: chunk.usage.prompt_tokens,
|
|
119
|
+
completionTokens: chunk.usage.completion_tokens,
|
|
120
|
+
totalTokens: chunk.usage.total_tokens
|
|
121
|
+
};
|
|
122
|
+
continue;
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
const delta = chunk.choices[0]?.delta;
|
|
126
|
+
if (!delta) continue;
|
|
127
|
+
|
|
128
|
+
if (delta.content) {
|
|
129
|
+
content += delta.content;
|
|
130
|
+
yield { type: 'content', content: delta.content };
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
if (delta.tool_calls) {
|
|
134
|
+
for (const tc of delta.tool_calls) {
|
|
135
|
+
if (tc.index !== undefined) {
|
|
136
|
+
if (!currentToolCalls[tc.index]) {
|
|
137
|
+
currentToolCalls[tc.index] = {
|
|
138
|
+
id: tc.id || '',
|
|
139
|
+
type: 'function',
|
|
140
|
+
function: { name: '', arguments: '' }
|
|
141
|
+
};
|
|
142
|
+
}
|
|
143
|
+
if (tc.id) currentToolCalls[tc.index].id = tc.id;
|
|
144
|
+
if (tc.function?.name) currentToolCalls[tc.index].function.name += tc.function.name;
|
|
145
|
+
if (tc.function?.arguments) currentToolCalls[tc.index].function.arguments += tc.function.arguments;
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
if (chunk.choices[0]?.finish_reason) {
|
|
151
|
+
yield {
|
|
152
|
+
type: 'done',
|
|
153
|
+
content,
|
|
154
|
+
toolCalls: currentToolCalls.filter(tc => tc.id),
|
|
155
|
+
finishReason: chunk.choices[0].finish_reason,
|
|
156
|
+
usage: chunk.usage ? {
|
|
157
|
+
promptTokens: chunk.usage.prompt_tokens,
|
|
158
|
+
completionTokens: chunk.usage.completion_tokens,
|
|
159
|
+
totalTokens: chunk.usage.total_tokens
|
|
160
|
+
} : finalUsage
|
|
161
|
+
};
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
module.exports = { OpenAIProvider };
|
|
@@ -0,0 +1,218 @@
|
|
|
1
|
+
const fs = require('fs');
|
|
2
|
+
const path = require('path');
|
|
3
|
+
const db = require('../../db/database');
|
|
4
|
+
|
|
5
|
+
const SKILLS_DIR = path.join(__dirname, '..', '..', '..', 'agent-data', 'skills');
|
|
6
|
+
|
|
7
|
+
class SkillRunner {
|
|
8
|
+
constructor() {
|
|
9
|
+
this.skills = new Map();
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
async loadSkills() {
|
|
13
|
+
this.skills.clear();
|
|
14
|
+
if (!fs.existsSync(SKILLS_DIR)) return;
|
|
15
|
+
|
|
16
|
+
const loadDir = (dir) => {
|
|
17
|
+
const entries = fs.readdirSync(dir, { withFileTypes: true });
|
|
18
|
+
for (const entry of entries) {
|
|
19
|
+
const fullPath = path.join(dir, entry.name);
|
|
20
|
+
if (entry.isDirectory()) {
|
|
21
|
+
const skillFile = path.join(fullPath, 'SKILL.md');
|
|
22
|
+
if (fs.existsSync(skillFile)) {
|
|
23
|
+
this.loadSkillFile(skillFile);
|
|
24
|
+
}
|
|
25
|
+
loadDir(fullPath);
|
|
26
|
+
} else if (entry.name.endsWith('.md')) {
|
|
27
|
+
this.loadSkillFile(fullPath);
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
};
|
|
31
|
+
|
|
32
|
+
loadDir(SKILLS_DIR);
|
|
33
|
+
|
|
34
|
+
const dbSkills = db.prepare('SELECT * FROM skills WHERE enabled = 1').all();
|
|
35
|
+
for (const skill of dbSkills) {
|
|
36
|
+
if (fs.existsSync(skill.file_path)) {
|
|
37
|
+
this.loadSkillFile(skill.file_path);
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
loadSkillFile(filePath) {
|
|
43
|
+
try {
|
|
44
|
+
const content = fs.readFileSync(filePath, 'utf-8');
|
|
45
|
+
const skill = this.parseSkillMd(content, filePath);
|
|
46
|
+
if (skill) {
|
|
47
|
+
this.skills.set(skill.name, skill);
|
|
48
|
+
}
|
|
49
|
+
} catch (err) {
|
|
50
|
+
console.error(`Failed to load skill from ${filePath}:`, err.message);
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
parseSkillMd(content, filePath) {
|
|
55
|
+
const frontmatterMatch = content.match(/^---\n([\s\S]*?)\n---\n?([\s\S]*)/);
|
|
56
|
+
if (!frontmatterMatch) return null;
|
|
57
|
+
|
|
58
|
+
const frontmatter = frontmatterMatch[1];
|
|
59
|
+
const body = frontmatterMatch[2];
|
|
60
|
+
|
|
61
|
+
const metadata = {};
|
|
62
|
+
const lines = frontmatter.split('\n');
|
|
63
|
+
for (const line of lines) {
|
|
64
|
+
const match = line.match(/^(\w[\w-]*)\s*:\s*(.+)$/);
|
|
65
|
+
if (match) {
|
|
66
|
+
const key = match[1].trim();
|
|
67
|
+
let value = match[2].trim();
|
|
68
|
+
if (value.startsWith('{') || value.startsWith('[')) {
|
|
69
|
+
try { value = JSON.parse(value); } catch {}
|
|
70
|
+
} else if (value === 'true') value = true;
|
|
71
|
+
else if (value === 'false') value = false;
|
|
72
|
+
metadata[key] = value;
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
if (!metadata.name) return null;
|
|
77
|
+
|
|
78
|
+
return {
|
|
79
|
+
name: metadata.name,
|
|
80
|
+
description: metadata.description || '',
|
|
81
|
+
metadata,
|
|
82
|
+
instructions: body.trim(),
|
|
83
|
+
filePath,
|
|
84
|
+
dir: path.dirname(filePath)
|
|
85
|
+
};
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
getSkillsForPrompt() {
|
|
89
|
+
const skills = Array.from(this.skills.values());
|
|
90
|
+
if (skills.length === 0) return '';
|
|
91
|
+
|
|
92
|
+
let prompt = '\n## Available Skills\n';
|
|
93
|
+
for (const skill of skills) {
|
|
94
|
+
prompt += `\n### ${skill.name}\n${skill.description}\n`;
|
|
95
|
+
if (skill.instructions) {
|
|
96
|
+
prompt += `${skill.instructions.slice(0, 500)}\n`;
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
return prompt;
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
getToolDefinitions() {
|
|
103
|
+
const tools = [];
|
|
104
|
+
for (const skill of this.skills.values()) {
|
|
105
|
+
if (skill.metadata.tool) {
|
|
106
|
+
tools.push({
|
|
107
|
+
name: skill.name,
|
|
108
|
+
description: skill.description,
|
|
109
|
+
parameters: skill.metadata.parameters || { type: 'object', properties: {} }
|
|
110
|
+
});
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
return tools;
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
async executeTool(toolName, args) {
|
|
117
|
+
const skill = this.skills.get(toolName);
|
|
118
|
+
if (!skill) return null;
|
|
119
|
+
|
|
120
|
+
if (skill.metadata.command) {
|
|
121
|
+
const { CLIExecutor } = require('../cli/executor');
|
|
122
|
+
const executor = new CLIExecutor();
|
|
123
|
+
let command = skill.metadata.command;
|
|
124
|
+
for (const [key, value] of Object.entries(args)) {
|
|
125
|
+
command = command.replace(`{${key}}`, value);
|
|
126
|
+
}
|
|
127
|
+
return await executor.execute(command, { cwd: skill.dir });
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
return { skill: skill.name, instructions: skill.instructions, args };
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
createSkill(name, description, instructions, metadata = {}) {
|
|
134
|
+
const safeName = name.replace(/[^a-z0-9-]/gi, '-').toLowerCase();
|
|
135
|
+
const skillDir = path.join(SKILLS_DIR, safeName);
|
|
136
|
+
if (!fs.existsSync(skillDir)) fs.mkdirSync(skillDir, { recursive: true });
|
|
137
|
+
|
|
138
|
+
const frontmatter = this._buildFrontmatter(safeName, description, metadata);
|
|
139
|
+
const filePath = path.join(skillDir, 'SKILL.md');
|
|
140
|
+
fs.writeFileSync(filePath, frontmatter + `\n\n${instructions}`);
|
|
141
|
+
|
|
142
|
+
db.prepare('INSERT OR REPLACE INTO skills (name, description, file_path, metadata, auto_created, updated_at) VALUES (?, ?, ?, ?, 1, datetime(\'now\'))')
|
|
143
|
+
.run(safeName, description, filePath, JSON.stringify(metadata));
|
|
144
|
+
|
|
145
|
+
this.loadSkillFile(filePath);
|
|
146
|
+
|
|
147
|
+
return { success: true, name: safeName, path: filePath };
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
updateSkill(name, { description, instructions, metadata } = {}) {
|
|
151
|
+
const skill = this.skills.get(name);
|
|
152
|
+
if (!skill) return { error: `Skill '${name}' not found` };
|
|
153
|
+
|
|
154
|
+
const newDesc = description !== undefined ? description : skill.description;
|
|
155
|
+
const newInstructions = instructions !== undefined ? instructions : skill.instructions;
|
|
156
|
+
// Merge: if metadata provided use it, otherwise preserve existing non-name/description fields
|
|
157
|
+
let metaToWrite = {};
|
|
158
|
+
if (metadata !== undefined) {
|
|
159
|
+
metaToWrite = metadata;
|
|
160
|
+
} else {
|
|
161
|
+
const existing = { ...skill.metadata };
|
|
162
|
+
delete existing.name;
|
|
163
|
+
delete existing.description;
|
|
164
|
+
metaToWrite = existing;
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
const frontmatter = this._buildFrontmatter(name, newDesc, metaToWrite);
|
|
168
|
+
fs.writeFileSync(skill.filePath, frontmatter + `\n\n${newInstructions}`);
|
|
169
|
+
db.prepare('UPDATE skills SET description = ?, updated_at = datetime(\'now\') WHERE name = ?').run(newDesc, name);
|
|
170
|
+
this.loadSkillFile(skill.filePath);
|
|
171
|
+
|
|
172
|
+
return { success: true, name, path: skill.filePath };
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
deleteSkill(name) {
|
|
176
|
+
const skill = this.skills.get(name);
|
|
177
|
+
if (!skill) return { error: `Skill '${name}' not found` };
|
|
178
|
+
|
|
179
|
+
try {
|
|
180
|
+
fs.unlinkSync(skill.filePath);
|
|
181
|
+
const dir = path.dirname(skill.filePath);
|
|
182
|
+
if (path.basename(skill.filePath) === 'SKILL.md') {
|
|
183
|
+
const remaining = fs.readdirSync(dir);
|
|
184
|
+
if (remaining.length === 0) fs.rmdirSync(dir);
|
|
185
|
+
}
|
|
186
|
+
} catch (e) { /* ignore */ }
|
|
187
|
+
|
|
188
|
+
db.prepare('DELETE FROM skills WHERE name = ?').run(name);
|
|
189
|
+
this.skills.delete(name);
|
|
190
|
+
|
|
191
|
+
return { success: true, deleted: name };
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
_buildFrontmatter(name, description, metadata = {}) {
|
|
195
|
+
let fm = `---\nname: ${name}\ndescription: ${description}\n`;
|
|
196
|
+
if (metadata && typeof metadata === 'object') {
|
|
197
|
+
for (const [key, val] of Object.entries(metadata)) {
|
|
198
|
+
if (key === 'name' || key === 'description') continue;
|
|
199
|
+
fm += typeof val === 'object'
|
|
200
|
+
? `${key}: ${JSON.stringify(val)}\n`
|
|
201
|
+
: `${key}: ${val}\n`;
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
fm += `---`;
|
|
205
|
+
return fm;
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
getAll() {
|
|
209
|
+
return Array.from(this.skills.values()).map(s => ({
|
|
210
|
+
name: s.name,
|
|
211
|
+
description: s.description,
|
|
212
|
+
metadata: s.metadata,
|
|
213
|
+
filePath: s.filePath
|
|
214
|
+
}));
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
module.exports = { SkillRunner };
|