kernelbot 1.0.24 → 1.0.26
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +8 -0
- package/README.md +92 -71
- package/bin/kernel.js +30 -21
- package/config.example.yaml +2 -1
- package/package.json +5 -1
- package/src/agent.js +137 -55
- package/src/bot.js +258 -65
- package/src/conversation.js +36 -0
- package/src/prompts/system.js +28 -42
- package/src/providers/anthropic.js +44 -0
- package/src/providers/base.js +30 -0
- package/src/providers/index.js +36 -0
- package/src/providers/models.js +54 -0
- package/src/providers/openai-compat.js +163 -0
- package/src/tools/categories.js +101 -0
- package/src/utils/config.js +160 -12
package/src/conversation.js
CHANGED
|
@@ -11,6 +11,7 @@ function getConversationsPath() {
|
|
|
11
11
|
export class ConversationManager {
|
|
12
12
|
constructor(config) {
|
|
13
13
|
this.maxHistory = config.conversation.max_history;
|
|
14
|
+
this.recentWindow = config.conversation.recent_window || 10;
|
|
14
15
|
this.conversations = new Map();
|
|
15
16
|
this.filePath = getConversationsPath();
|
|
16
17
|
}
|
|
@@ -49,6 +50,41 @@ export class ConversationManager {
|
|
|
49
50
|
return this.conversations.get(key);
|
|
50
51
|
}
|
|
51
52
|
|
|
53
|
+
/**
|
|
54
|
+
* Get history with older messages compressed into a summary.
|
|
55
|
+
* Keeps the last `recentWindow` messages verbatim and summarizes older ones.
|
|
56
|
+
*/
|
|
57
|
+
getSummarizedHistory(chatId) {
|
|
58
|
+
const history = this.getHistory(chatId);
|
|
59
|
+
|
|
60
|
+
if (history.length <= this.recentWindow) {
|
|
61
|
+
return [...history];
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
const olderMessages = history.slice(0, history.length - this.recentWindow);
|
|
65
|
+
const recentMessages = history.slice(history.length - this.recentWindow);
|
|
66
|
+
|
|
67
|
+
// Compress older messages into a single summary
|
|
68
|
+
const summaryLines = olderMessages.map((msg) => {
|
|
69
|
+
const content = typeof msg.content === 'string'
|
|
70
|
+
? msg.content.slice(0, 200)
|
|
71
|
+
: JSON.stringify(msg.content).slice(0, 200);
|
|
72
|
+
return `[${msg.role}]: ${content}`;
|
|
73
|
+
});
|
|
74
|
+
|
|
75
|
+
const summaryMessage = {
|
|
76
|
+
role: 'user',
|
|
77
|
+
content: `[CONVERSATION SUMMARY - ${olderMessages.length} earlier messages]\n${summaryLines.join('\n')}`,
|
|
78
|
+
};
|
|
79
|
+
|
|
80
|
+
// Ensure result starts with user role
|
|
81
|
+
const result = [summaryMessage, ...recentMessages];
|
|
82
|
+
|
|
83
|
+
// If the first real message after summary is assistant, that's fine since
|
|
84
|
+
// our summary is role:user. But ensure recent starts correctly.
|
|
85
|
+
return result;
|
|
86
|
+
}
|
|
87
|
+
|
|
52
88
|
addMessage(chatId, role, content) {
|
|
53
89
|
const history = this.getHistory(chatId);
|
|
54
90
|
history.push({ role, content });
|
package/src/prompts/system.js
CHANGED
|
@@ -1,46 +1,32 @@
|
|
|
1
|
-
import { toolDefinitions } from '../tools/index.js';
|
|
2
|
-
|
|
3
1
|
export function getSystemPrompt(config) {
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
##
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
-
|
|
25
|
-
-
|
|
26
|
-
-
|
|
27
|
-
-
|
|
28
|
-
- When a user sends /extract <url> <selector>, use extract_content with that URL and selector
|
|
29
|
-
|
|
30
|
-
You are the orchestrator. Claude Code is the coder. Never use read_file + write_file to modify source code — that's Claude Code's job. You handle git, GitHub, and infrastructure. Claude Code handles all code changes.
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
## Non-Coding Tasks (monitoring, deploying, restarting services, checking status)
|
|
34
|
-
- Use OS, Docker, process, network, and monitoring tools directly
|
|
35
|
-
- No need to spawn Claude Code for these
|
|
2
|
+
return `You are ${config.bot.name}, a senior software engineer and sysadmin AI agent on Telegram. Be concise — this is chat, not documentation.
|
|
3
|
+
|
|
4
|
+
## Coding Tasks
|
|
5
|
+
NEVER write code yourself with read_file/write_file. ALWAYS use spawn_claude_code.
|
|
6
|
+
1. Clone repo + create branch (git tools)
|
|
7
|
+
2. spawn_claude_code with a clear, detailed prompt
|
|
8
|
+
3. Commit + push (git tools)
|
|
9
|
+
4. Create PR (GitHub tools) and report the link
|
|
10
|
+
|
|
11
|
+
## Web Browsing
|
|
12
|
+
- browse_website: read/summarize pages
|
|
13
|
+
- screenshot_website: visual snapshots (auto-sent to chat)
|
|
14
|
+
- extract_content: pull data via CSS selectors
|
|
15
|
+
- interact_with_page: click/type/scroll on pages
|
|
16
|
+
- send_image: send any image file to chat
|
|
17
|
+
|
|
18
|
+
## Non-Coding Tasks
|
|
19
|
+
Use OS, Docker, process, network, and monitoring tools directly. No need for Claude Code.
|
|
20
|
+
|
|
21
|
+
## Efficiency Rules
|
|
22
|
+
- Chain shell commands with && in execute_command instead of multiple calls
|
|
23
|
+
- Read multiple files with one execute_command("cat file1 file2") instead of multiple read_file calls
|
|
24
|
+
- Plan first, gather info in one step, then act
|
|
25
|
+
- Keep responses under 500 words unless asked for details
|
|
36
26
|
|
|
37
27
|
## Guidelines
|
|
38
|
-
- Use tools proactively
|
|
39
|
-
-
|
|
40
|
-
-
|
|
41
|
-
-
|
|
42
|
-
- For destructive operations (rm, kill, service stop, force push), confirm with the user first.
|
|
43
|
-
- Never expose API keys, tokens, or secrets in your responses.
|
|
44
|
-
- If a task will take a while, tell the user upfront.
|
|
45
|
-
- If something fails, explain what went wrong and suggest a fix.`;
|
|
28
|
+
- Use tools proactively — don't describe what you'd do, just do it.
|
|
29
|
+
- If a command fails, analyze and try an alternative.
|
|
30
|
+
- For destructive ops (rm, kill, force push), confirm with the user first.
|
|
31
|
+
- Never expose secrets in responses.`;
|
|
46
32
|
}
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
import Anthropic from '@anthropic-ai/sdk';
|
|
2
|
+
import { BaseProvider } from './base.js';
|
|
3
|
+
|
|
4
|
+
export class AnthropicProvider extends BaseProvider {
|
|
5
|
+
constructor(opts) {
|
|
6
|
+
super(opts);
|
|
7
|
+
this.client = new Anthropic({ apiKey: this.apiKey });
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
async chat({ system, messages, tools }) {
|
|
11
|
+
const response = await this.client.messages.create({
|
|
12
|
+
model: this.model,
|
|
13
|
+
max_tokens: this.maxTokens,
|
|
14
|
+
temperature: this.temperature,
|
|
15
|
+
system,
|
|
16
|
+
tools,
|
|
17
|
+
messages,
|
|
18
|
+
});
|
|
19
|
+
|
|
20
|
+
const stopReason = response.stop_reason === 'end_turn' ? 'end_turn' : 'tool_use';
|
|
21
|
+
|
|
22
|
+
const textBlocks = response.content.filter((b) => b.type === 'text');
|
|
23
|
+
const text = textBlocks.map((b) => b.text).join('\n');
|
|
24
|
+
|
|
25
|
+
const toolCalls = response.content
|
|
26
|
+
.filter((b) => b.type === 'tool_use')
|
|
27
|
+
.map((b) => ({ id: b.id, name: b.name, input: b.input }));
|
|
28
|
+
|
|
29
|
+
return {
|
|
30
|
+
stopReason,
|
|
31
|
+
text,
|
|
32
|
+
toolCalls,
|
|
33
|
+
rawContent: response.content,
|
|
34
|
+
};
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
async ping() {
|
|
38
|
+
await this.client.messages.create({
|
|
39
|
+
model: this.model,
|
|
40
|
+
max_tokens: 16,
|
|
41
|
+
messages: [{ role: 'user', content: 'ping' }],
|
|
42
|
+
});
|
|
43
|
+
}
|
|
44
|
+
}
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Abstract provider interface.
|
|
3
|
+
* Every provider must implement chat() and ping().
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
export class BaseProvider {
|
|
7
|
+
constructor({ model, maxTokens, temperature, apiKey }) {
|
|
8
|
+
this.model = model;
|
|
9
|
+
this.maxTokens = maxTokens;
|
|
10
|
+
this.temperature = temperature;
|
|
11
|
+
this.apiKey = apiKey;
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* Send a chat completion request.
|
|
16
|
+
* @param {object} opts
|
|
17
|
+
* @param {string} opts.system - System prompt
|
|
18
|
+
* @param {Array} opts.messages - Anthropic-format messages
|
|
19
|
+
* @param {Array} opts.tools - Anthropic-format tool definitions
|
|
20
|
+
* @returns {Promise<{stopReason: 'end_turn'|'tool_use', text: string, toolCalls: Array<{id,name,input}>, rawContent: Array}>}
|
|
21
|
+
*/
|
|
22
|
+
async chat({ system, messages, tools }) {
|
|
23
|
+
throw new Error('chat() not implemented');
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
/** Quick connectivity test — throws on failure. */
|
|
27
|
+
async ping() {
|
|
28
|
+
throw new Error('ping() not implemented');
|
|
29
|
+
}
|
|
30
|
+
}
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import { AnthropicProvider } from './anthropic.js';
|
|
2
|
+
import { OpenAICompatProvider } from './openai-compat.js';
|
|
3
|
+
import { PROVIDERS } from './models.js';
|
|
4
|
+
|
|
5
|
+
export { PROVIDERS } from './models.js';
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Create the right provider based on config.brain.
|
|
9
|
+
* @param {object} config - Full app config (must have config.brain)
|
|
10
|
+
* @returns {BaseProvider}
|
|
11
|
+
*/
|
|
12
|
+
export function createProvider(config) {
|
|
13
|
+
const { provider, model, max_tokens, temperature, api_key } = config.brain;
|
|
14
|
+
|
|
15
|
+
const providerDef = PROVIDERS[provider];
|
|
16
|
+
if (!providerDef) {
|
|
17
|
+
throw new Error(`Unknown provider: ${provider}. Valid: ${Object.keys(PROVIDERS).join(', ')}`);
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
const opts = {
|
|
21
|
+
model,
|
|
22
|
+
maxTokens: max_tokens,
|
|
23
|
+
temperature,
|
|
24
|
+
apiKey: api_key,
|
|
25
|
+
};
|
|
26
|
+
|
|
27
|
+
if (provider === 'anthropic') {
|
|
28
|
+
return new AnthropicProvider(opts);
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
// OpenAI, Google, Groq — all use OpenAI-compatible API
|
|
32
|
+
return new OpenAICompatProvider({
|
|
33
|
+
...opts,
|
|
34
|
+
baseUrl: providerDef.baseUrl || undefined,
|
|
35
|
+
});
|
|
36
|
+
}
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Provider & model catalog — single source of truth.
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
export const PROVIDERS = {
|
|
6
|
+
anthropic: {
|
|
7
|
+
name: 'Anthropic (Claude)',
|
|
8
|
+
envKey: 'ANTHROPIC_API_KEY',
|
|
9
|
+
models: [
|
|
10
|
+
{ id: 'claude-sonnet-4-20250514', label: 'Claude Sonnet 4' },
|
|
11
|
+
{ id: 'claude-opus-4-20250514', label: 'Claude Opus 4' },
|
|
12
|
+
{ id: 'claude-haiku-4-5-20251001', label: 'Claude Haiku 4.5' },
|
|
13
|
+
],
|
|
14
|
+
},
|
|
15
|
+
openai: {
|
|
16
|
+
name: 'OpenAI (GPT)',
|
|
17
|
+
envKey: 'OPENAI_API_KEY',
|
|
18
|
+
models: [
|
|
19
|
+
{ id: 'gpt-4o', label: 'GPT-4o' },
|
|
20
|
+
{ id: 'gpt-4o-mini', label: 'GPT-4o Mini' },
|
|
21
|
+
{ id: 'o1', label: 'o1' },
|
|
22
|
+
{ id: 'o3-mini', label: 'o3-mini' },
|
|
23
|
+
],
|
|
24
|
+
},
|
|
25
|
+
google: {
|
|
26
|
+
name: 'Google (Gemini)',
|
|
27
|
+
envKey: 'GOOGLE_API_KEY',
|
|
28
|
+
baseUrl: 'https://generativelanguage.googleapis.com/v1beta/openai/',
|
|
29
|
+
models: [
|
|
30
|
+
{ id: 'gemini-2.0-flash', label: 'Gemini 2.0 Flash' },
|
|
31
|
+
{ id: 'gemini-2.5-pro', label: 'Gemini 2.5 Pro' },
|
|
32
|
+
],
|
|
33
|
+
},
|
|
34
|
+
groq: {
|
|
35
|
+
name: 'Groq',
|
|
36
|
+
envKey: 'GROQ_API_KEY',
|
|
37
|
+
baseUrl: 'https://api.groq.com/openai/v1',
|
|
38
|
+
models: [
|
|
39
|
+
{ id: 'llama-3.3-70b-versatile', label: 'Llama 3.3 70B' },
|
|
40
|
+
{ id: 'llama-3.1-8b-instant', label: 'Llama 3.1 8B' },
|
|
41
|
+
{ id: 'mixtral-8x7b-32768', label: 'Mixtral 8x7B' },
|
|
42
|
+
],
|
|
43
|
+
},
|
|
44
|
+
};
|
|
45
|
+
|
|
46
|
+
/** Models that don't support system prompts or temperature (reasoning models). */
|
|
47
|
+
export const REASONING_MODELS = new Set(['o1', 'o3-mini']);
|
|
48
|
+
|
|
49
|
+
export function getProviderForModel(modelId) {
|
|
50
|
+
for (const [key, provider] of Object.entries(PROVIDERS)) {
|
|
51
|
+
if (provider.models.some((m) => m.id === modelId)) return key;
|
|
52
|
+
}
|
|
53
|
+
return null;
|
|
54
|
+
}
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
import OpenAI from 'openai';
|
|
2
|
+
import { BaseProvider } from './base.js';
|
|
3
|
+
import { REASONING_MODELS } from './models.js';
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* OpenAI-compatible provider — works with OpenAI, Groq, and Google Gemini
|
|
7
|
+
* via configurable baseURL.
|
|
8
|
+
*/
|
|
9
|
+
export class OpenAICompatProvider extends BaseProvider {
|
|
10
|
+
constructor(opts) {
|
|
11
|
+
super(opts);
|
|
12
|
+
this.client = new OpenAI({
|
|
13
|
+
apiKey: this.apiKey,
|
|
14
|
+
...(opts.baseUrl && { baseURL: opts.baseUrl }),
|
|
15
|
+
});
|
|
16
|
+
this.isReasoningModel = REASONING_MODELS.has(this.model);
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
// ── Format conversion helpers ──
|
|
20
|
+
|
|
21
|
+
/** Anthropic tool defs → OpenAI function tool defs */
|
|
22
|
+
_convertTools(tools) {
|
|
23
|
+
if (!tools || tools.length === 0) return undefined;
|
|
24
|
+
return tools.map((t) => ({
|
|
25
|
+
type: 'function',
|
|
26
|
+
function: {
|
|
27
|
+
name: t.name,
|
|
28
|
+
description: t.description,
|
|
29
|
+
parameters: t.input_schema,
|
|
30
|
+
},
|
|
31
|
+
}));
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
/** Anthropic messages → OpenAI messages */
|
|
35
|
+
_convertMessages(system, messages) {
|
|
36
|
+
const out = [];
|
|
37
|
+
|
|
38
|
+
// System prompt as first message (skip for reasoning models)
|
|
39
|
+
if (system && !this.isReasoningModel) {
|
|
40
|
+
const systemText = Array.isArray(system)
|
|
41
|
+
? system.map((b) => b.text).join('\n')
|
|
42
|
+
: system;
|
|
43
|
+
out.push({ role: 'system', content: systemText });
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
for (const msg of messages) {
|
|
47
|
+
if (msg.role === 'user') {
|
|
48
|
+
// Could be a string, content blocks, or tool_result array
|
|
49
|
+
if (typeof msg.content === 'string') {
|
|
50
|
+
out.push({ role: 'user', content: msg.content });
|
|
51
|
+
} else if (Array.isArray(msg.content)) {
|
|
52
|
+
// Check if it's tool results
|
|
53
|
+
if (msg.content[0]?.type === 'tool_result') {
|
|
54
|
+
for (const tr of msg.content) {
|
|
55
|
+
out.push({
|
|
56
|
+
role: 'tool',
|
|
57
|
+
tool_call_id: tr.tool_use_id,
|
|
58
|
+
content: typeof tr.content === 'string' ? tr.content : JSON.stringify(tr.content),
|
|
59
|
+
});
|
|
60
|
+
}
|
|
61
|
+
} else {
|
|
62
|
+
// Text content blocks
|
|
63
|
+
const text = msg.content
|
|
64
|
+
.filter((b) => b.type === 'text')
|
|
65
|
+
.map((b) => b.text)
|
|
66
|
+
.join('\n');
|
|
67
|
+
out.push({ role: 'user', content: text || '' });
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
} else if (msg.role === 'assistant') {
|
|
71
|
+
// Convert Anthropic content blocks → OpenAI format
|
|
72
|
+
if (typeof msg.content === 'string') {
|
|
73
|
+
out.push({ role: 'assistant', content: msg.content });
|
|
74
|
+
} else if (Array.isArray(msg.content)) {
|
|
75
|
+
const textParts = msg.content.filter((b) => b.type === 'text');
|
|
76
|
+
const toolParts = msg.content.filter((b) => b.type === 'tool_use');
|
|
77
|
+
|
|
78
|
+
const assistantMsg = {
|
|
79
|
+
role: 'assistant',
|
|
80
|
+
content: textParts.map((b) => b.text).join('\n') || null,
|
|
81
|
+
};
|
|
82
|
+
|
|
83
|
+
if (toolParts.length > 0) {
|
|
84
|
+
assistantMsg.tool_calls = toolParts.map((b) => ({
|
|
85
|
+
id: b.id,
|
|
86
|
+
type: 'function',
|
|
87
|
+
function: {
|
|
88
|
+
name: b.name,
|
|
89
|
+
arguments: JSON.stringify(b.input),
|
|
90
|
+
},
|
|
91
|
+
}));
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
out.push(assistantMsg);
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
return out;
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
/** OpenAI response → normalized format with rawContent in Anthropic format */
|
|
103
|
+
_normalizeResponse(response) {
|
|
104
|
+
const choice = response.choices[0];
|
|
105
|
+
const finishReason = choice.finish_reason;
|
|
106
|
+
|
|
107
|
+
const stopReason = finishReason === 'tool_calls' ? 'tool_use' : 'end_turn';
|
|
108
|
+
|
|
109
|
+
const text = choice.message.content || '';
|
|
110
|
+
|
|
111
|
+
const toolCalls = (choice.message.tool_calls || []).map((tc) => ({
|
|
112
|
+
id: tc.id,
|
|
113
|
+
name: tc.function.name,
|
|
114
|
+
input: JSON.parse(tc.function.arguments),
|
|
115
|
+
}));
|
|
116
|
+
|
|
117
|
+
// Build rawContent in Anthropic format for message history consistency
|
|
118
|
+
const rawContent = [];
|
|
119
|
+
if (text) {
|
|
120
|
+
rawContent.push({ type: 'text', text });
|
|
121
|
+
}
|
|
122
|
+
for (const tc of toolCalls) {
|
|
123
|
+
rawContent.push({ type: 'tool_use', id: tc.id, name: tc.name, input: tc.input });
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
return { stopReason, text, toolCalls, rawContent };
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
// ── Public API ──
|
|
130
|
+
|
|
131
|
+
async chat({ system, messages, tools }) {
|
|
132
|
+
const params = {
|
|
133
|
+
model: this.model,
|
|
134
|
+
messages: this._convertMessages(system, messages),
|
|
135
|
+
};
|
|
136
|
+
|
|
137
|
+
if (!this.isReasoningModel) {
|
|
138
|
+
params.temperature = this.temperature;
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
params.max_tokens = this.maxTokens;
|
|
142
|
+
|
|
143
|
+
const convertedTools = this._convertTools(tools);
|
|
144
|
+
if (convertedTools) {
|
|
145
|
+
params.tools = convertedTools;
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
const response = await this.client.chat.completions.create(params);
|
|
149
|
+
return this._normalizeResponse(response);
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
async ping() {
|
|
153
|
+
const params = {
|
|
154
|
+
model: this.model,
|
|
155
|
+
max_tokens: 16,
|
|
156
|
+
messages: [{ role: 'user', content: 'ping' }],
|
|
157
|
+
};
|
|
158
|
+
if (!this.isReasoningModel) {
|
|
159
|
+
params.temperature = 0;
|
|
160
|
+
}
|
|
161
|
+
await this.client.chat.completions.create(params);
|
|
162
|
+
}
|
|
163
|
+
}
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Smart tool filtering — send only relevant tools per request to save tokens.
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
export const TOOL_CATEGORIES = {
|
|
6
|
+
core: ['execute_command', 'read_file', 'write_file', 'list_directory'],
|
|
7
|
+
git: ['git_clone', 'git_checkout', 'git_commit', 'git_push', 'git_diff'],
|
|
8
|
+
github: ['github_create_pr', 'github_get_pr_diff', 'github_post_review', 'github_create_repo', 'github_list_prs'],
|
|
9
|
+
coding: ['spawn_claude_code'],
|
|
10
|
+
docker: ['docker_ps', 'docker_logs', 'docker_exec', 'docker_compose'],
|
|
11
|
+
process: ['process_list', 'kill_process', 'service_control'],
|
|
12
|
+
monitor: ['disk_usage', 'memory_usage', 'cpu_usage', 'system_logs'],
|
|
13
|
+
network: ['check_port', 'curl_url', 'nginx_reload'],
|
|
14
|
+
browser: ['browse_website', 'screenshot_website', 'extract_content', 'send_image', 'interact_with_page'],
|
|
15
|
+
jira: ['jira_get_ticket', 'jira_search_tickets', 'jira_list_my_tickets', 'jira_get_project_tickets'],
|
|
16
|
+
};
|
|
17
|
+
|
|
18
|
+
const CATEGORY_KEYWORDS = {
|
|
19
|
+
coding: ['code', 'fix', 'bug', 'implement', 'refactor', 'build', 'feature', 'develop', 'program', 'write code', 'add feature', 'change', 'update', 'modify', 'create app', 'scaffold', 'debug', 'patch', 'review'],
|
|
20
|
+
git: ['git', 'commit', 'branch', 'merge', 'clone', 'pull', 'push', 'diff', 'stash', 'rebase', 'checkout', 'repo'],
|
|
21
|
+
github: ['pr', 'pull request', 'github', 'review', 'merge request'],
|
|
22
|
+
docker: ['docker', 'container', 'compose', 'image', 'kubernetes', 'k8s'],
|
|
23
|
+
process: ['process', 'kill', 'restart', 'service', 'daemon', 'systemctl', 'pid'],
|
|
24
|
+
monitor: ['disk', 'memory', 'cpu', 'usage', 'monitor', 'logs', 'status', 'health', 'space'],
|
|
25
|
+
network: ['port', 'curl', 'http', 'nginx', 'network', 'api', 'endpoint', 'request', 'url', 'fetch'],
|
|
26
|
+
browser: ['browse', 'screenshot', 'scrape', 'website', 'web page', 'webpage', 'extract content', 'html', 'css selector'],
|
|
27
|
+
jira: ['jira', 'ticket', 'issue', 'sprint', 'backlog', 'story', 'epic'],
|
|
28
|
+
};
|
|
29
|
+
|
|
30
|
+
// Categories that imply other categories
|
|
31
|
+
const CATEGORY_DEPS = {
|
|
32
|
+
coding: ['git', 'github'],
|
|
33
|
+
github: ['git'],
|
|
34
|
+
};
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* Select relevant tools for a user message based on keyword matching.
|
|
38
|
+
* Always includes 'core' tools. Falls back to ALL tools if nothing specific matched.
|
|
39
|
+
*/
|
|
40
|
+
export function selectToolsForMessage(userMessage, allTools) {
|
|
41
|
+
const lower = userMessage.toLowerCase();
|
|
42
|
+
const matched = new Set(['core']);
|
|
43
|
+
|
|
44
|
+
for (const [category, keywords] of Object.entries(CATEGORY_KEYWORDS)) {
|
|
45
|
+
for (const kw of keywords) {
|
|
46
|
+
if (lower.includes(kw)) {
|
|
47
|
+
matched.add(category);
|
|
48
|
+
// Add dependencies
|
|
49
|
+
const deps = CATEGORY_DEPS[category];
|
|
50
|
+
if (deps) deps.forEach((d) => matched.add(d));
|
|
51
|
+
break;
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
// Fallback: if only core matched, the request is ambiguous — send all tools
|
|
57
|
+
if (matched.size === 1) {
|
|
58
|
+
return allTools;
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
// Build the filtered tool name set
|
|
62
|
+
const toolNames = new Set();
|
|
63
|
+
for (const cat of matched) {
|
|
64
|
+
const names = TOOL_CATEGORIES[cat];
|
|
65
|
+
if (names) names.forEach((n) => toolNames.add(n));
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
return allTools.filter((t) => toolNames.has(t.name));
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
/**
|
|
72
|
+
* After a tool is used, expand the tool set to include related categories
|
|
73
|
+
* so the model can use follow-up tools it might need.
|
|
74
|
+
*/
|
|
75
|
+
export function expandToolsForUsed(usedToolNames, currentTools, allTools) {
|
|
76
|
+
const currentNames = new Set(currentTools.map((t) => t.name));
|
|
77
|
+
const needed = new Set();
|
|
78
|
+
|
|
79
|
+
for (const name of usedToolNames) {
|
|
80
|
+
// Find which category this tool belongs to
|
|
81
|
+
for (const [cat, tools] of Object.entries(TOOL_CATEGORIES)) {
|
|
82
|
+
if (tools.includes(name)) {
|
|
83
|
+
// Add deps for that category
|
|
84
|
+
const deps = CATEGORY_DEPS[cat];
|
|
85
|
+
if (deps) {
|
|
86
|
+
for (const dep of deps) {
|
|
87
|
+
for (const t of TOOL_CATEGORIES[dep]) {
|
|
88
|
+
if (!currentNames.has(t)) needed.add(t);
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
break;
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
if (needed.size === 0) return currentTools;
|
|
98
|
+
|
|
99
|
+
const extra = allTools.filter((t) => needed.has(t.name));
|
|
100
|
+
return [...currentTools, ...extra];
|
|
101
|
+
}
|