kernelbot 1.0.24 → 1.0.26
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +8 -0
- package/README.md +92 -71
- package/bin/kernel.js +30 -21
- package/config.example.yaml +2 -1
- package/package.json +5 -1
- package/src/agent.js +137 -55
- package/src/bot.js +258 -65
- package/src/conversation.js +36 -0
- package/src/prompts/system.js +28 -42
- package/src/providers/anthropic.js +44 -0
- package/src/providers/base.js +30 -0
- package/src/providers/index.js +36 -0
- package/src/providers/models.js +54 -0
- package/src/providers/openai-compat.js +163 -0
- package/src/tools/categories.js +101 -0
- package/src/utils/config.js +160 -12
package/src/utils/config.js
CHANGED
|
@@ -5,20 +5,23 @@ import { createInterface } from 'readline';
|
|
|
5
5
|
import yaml from 'js-yaml';
|
|
6
6
|
import dotenv from 'dotenv';
|
|
7
7
|
import chalk from 'chalk';
|
|
8
|
+
import { PROVIDERS } from '../providers/models.js';
|
|
8
9
|
|
|
9
10
|
const DEFAULTS = {
|
|
10
11
|
bot: {
|
|
11
12
|
name: 'KernelBot',
|
|
12
13
|
description: 'AI engineering agent with full OS control',
|
|
13
14
|
},
|
|
14
|
-
|
|
15
|
+
brain: {
|
|
16
|
+
provider: 'anthropic',
|
|
15
17
|
model: 'claude-sonnet-4-20250514',
|
|
16
|
-
max_tokens:
|
|
18
|
+
max_tokens: 4096,
|
|
17
19
|
temperature: 0.3,
|
|
18
|
-
max_tool_depth:
|
|
20
|
+
max_tool_depth: 12,
|
|
19
21
|
},
|
|
20
22
|
telegram: {
|
|
21
23
|
allowed_users: [],
|
|
24
|
+
batch_window_ms: 3000,
|
|
22
25
|
},
|
|
23
26
|
claude_code: {
|
|
24
27
|
model: 'claude-opus-4-6',
|
|
@@ -44,6 +47,7 @@ const DEFAULTS = {
|
|
|
44
47
|
},
|
|
45
48
|
conversation: {
|
|
46
49
|
max_history: 50,
|
|
50
|
+
recent_window: 10,
|
|
47
51
|
},
|
|
48
52
|
};
|
|
49
53
|
|
|
@@ -90,9 +94,126 @@ function ask(rl, question) {
|
|
|
90
94
|
return new Promise((res) => rl.question(question, res));
|
|
91
95
|
}
|
|
92
96
|
|
|
97
|
+
/**
|
|
98
|
+
* Migrate legacy `anthropic` config section → `brain` section.
|
|
99
|
+
*/
|
|
100
|
+
function migrateAnthropicConfig(config) {
|
|
101
|
+
if (config.anthropic && !config.brain) {
|
|
102
|
+
config.brain = {
|
|
103
|
+
provider: 'anthropic',
|
|
104
|
+
model: config.anthropic.model || DEFAULTS.brain.model,
|
|
105
|
+
max_tokens: config.anthropic.max_tokens || DEFAULTS.brain.max_tokens,
|
|
106
|
+
temperature: config.anthropic.temperature ?? DEFAULTS.brain.temperature,
|
|
107
|
+
max_tool_depth: config.anthropic.max_tool_depth || DEFAULTS.brain.max_tool_depth,
|
|
108
|
+
};
|
|
109
|
+
if (config.anthropic.api_key) {
|
|
110
|
+
config.brain.api_key = config.anthropic.api_key;
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
return config;
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
/**
|
|
117
|
+
* Interactive provider → model picker.
|
|
118
|
+
*/
|
|
119
|
+
export async function promptProviderSelection(rl) {
|
|
120
|
+
const providerKeys = Object.keys(PROVIDERS);
|
|
121
|
+
|
|
122
|
+
console.log(chalk.bold('\n Select AI provider:\n'));
|
|
123
|
+
providerKeys.forEach((key, i) => {
|
|
124
|
+
console.log(` ${chalk.cyan(`${i + 1}.`)} ${PROVIDERS[key].name}`);
|
|
125
|
+
});
|
|
126
|
+
console.log('');
|
|
127
|
+
|
|
128
|
+
let providerIdx;
|
|
129
|
+
while (true) {
|
|
130
|
+
const input = await ask(rl, chalk.cyan(' Provider (number): '));
|
|
131
|
+
providerIdx = parseInt(input.trim(), 10) - 1;
|
|
132
|
+
if (providerIdx >= 0 && providerIdx < providerKeys.length) break;
|
|
133
|
+
console.log(chalk.dim(' Invalid choice, try again.'));
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
const providerKey = providerKeys[providerIdx];
|
|
137
|
+
const provider = PROVIDERS[providerKey];
|
|
138
|
+
|
|
139
|
+
console.log(chalk.bold(`\n Select model for ${provider.name}:\n`));
|
|
140
|
+
provider.models.forEach((m, i) => {
|
|
141
|
+
console.log(` ${chalk.cyan(`${i + 1}.`)} ${m.label} (${m.id})`);
|
|
142
|
+
});
|
|
143
|
+
console.log('');
|
|
144
|
+
|
|
145
|
+
let modelIdx;
|
|
146
|
+
while (true) {
|
|
147
|
+
const input = await ask(rl, chalk.cyan(' Model (number): '));
|
|
148
|
+
modelIdx = parseInt(input.trim(), 10) - 1;
|
|
149
|
+
if (modelIdx >= 0 && modelIdx < provider.models.length) break;
|
|
150
|
+
console.log(chalk.dim(' Invalid choice, try again.'));
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
const model = provider.models[modelIdx];
|
|
154
|
+
return { providerKey, modelId: model.id };
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
/**
|
|
158
|
+
* Save provider and model to config.yaml.
|
|
159
|
+
*/
|
|
160
|
+
export function saveProviderToYaml(providerKey, modelId) {
|
|
161
|
+
const configDir = getConfigDir();
|
|
162
|
+
mkdirSync(configDir, { recursive: true });
|
|
163
|
+
const configPath = join(configDir, 'config.yaml');
|
|
164
|
+
|
|
165
|
+
let existing = {};
|
|
166
|
+
if (existsSync(configPath)) {
|
|
167
|
+
existing = yaml.load(readFileSync(configPath, 'utf-8')) || {};
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
existing.brain = {
|
|
171
|
+
...(existing.brain || {}),
|
|
172
|
+
provider: providerKey,
|
|
173
|
+
model: modelId,
|
|
174
|
+
};
|
|
175
|
+
|
|
176
|
+
// Remove legacy anthropic section if migrating
|
|
177
|
+
delete existing.anthropic;
|
|
178
|
+
|
|
179
|
+
writeFileSync(configPath, yaml.dump(existing, { lineWidth: -1 }));
|
|
180
|
+
return configPath;
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
/**
|
|
184
|
+
* Full interactive flow: change brain model + optionally enter API key.
|
|
185
|
+
*/
|
|
186
|
+
export async function changeBrainModel(config, rl) {
|
|
187
|
+
const { providerKey, modelId } = await promptProviderSelection(rl);
|
|
188
|
+
|
|
189
|
+
const providerDef = PROVIDERS[providerKey];
|
|
190
|
+
const savedPath = saveProviderToYaml(providerKey, modelId);
|
|
191
|
+
console.log(chalk.dim(`\n Saved to ${savedPath}`));
|
|
192
|
+
|
|
193
|
+
// Update live config
|
|
194
|
+
config.brain.provider = providerKey;
|
|
195
|
+
config.brain.model = modelId;
|
|
196
|
+
|
|
197
|
+
// Check if we have the API key for this provider
|
|
198
|
+
const envKey = providerDef.envKey;
|
|
199
|
+
const currentKey = process.env[envKey];
|
|
200
|
+
if (!currentKey) {
|
|
201
|
+
const key = await ask(rl, chalk.cyan(`\n ${providerDef.name} API key (${envKey}): `));
|
|
202
|
+
if (key.trim()) {
|
|
203
|
+
saveCredential(config, envKey, key.trim());
|
|
204
|
+
config.brain.api_key = key.trim();
|
|
205
|
+
console.log(chalk.dim(' Saved.\n'));
|
|
206
|
+
}
|
|
207
|
+
} else {
|
|
208
|
+
config.brain.api_key = currentKey;
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
return config;
|
|
212
|
+
}
|
|
213
|
+
|
|
93
214
|
async function promptForMissing(config) {
|
|
94
215
|
const missing = [];
|
|
95
|
-
if (!config.
|
|
216
|
+
if (!config.brain.api_key) missing.push('brain_api_key');
|
|
96
217
|
if (!config.telegram.bot_token) missing.push('TELEGRAM_BOT_TOKEN');
|
|
97
218
|
|
|
98
219
|
if (missing.length === 0) return config;
|
|
@@ -110,10 +231,19 @@ async function promptForMissing(config) {
|
|
|
110
231
|
existingEnv = readFileSync(envPath, 'utf-8');
|
|
111
232
|
}
|
|
112
233
|
|
|
113
|
-
if (!mutableConfig.
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
234
|
+
if (!mutableConfig.brain.api_key) {
|
|
235
|
+
// Run provider selection flow
|
|
236
|
+
const { providerKey, modelId } = await promptProviderSelection(rl);
|
|
237
|
+
mutableConfig.brain.provider = providerKey;
|
|
238
|
+
mutableConfig.brain.model = modelId;
|
|
239
|
+
saveProviderToYaml(providerKey, modelId);
|
|
240
|
+
|
|
241
|
+
const providerDef = PROVIDERS[providerKey];
|
|
242
|
+
const envKey = providerDef.envKey;
|
|
243
|
+
|
|
244
|
+
const key = await ask(rl, chalk.cyan(`\n ${providerDef.name} API key: `));
|
|
245
|
+
mutableConfig.brain.api_key = key.trim();
|
|
246
|
+
envLines.push(`${envKey}=${key.trim()}`);
|
|
117
247
|
}
|
|
118
248
|
|
|
119
249
|
if (!mutableConfig.telegram.bot_token) {
|
|
@@ -164,12 +294,21 @@ export function loadConfig() {
|
|
|
164
294
|
fileConfig = yaml.load(raw) || {};
|
|
165
295
|
}
|
|
166
296
|
|
|
297
|
+
// Backward compat: migrate anthropic → brain
|
|
298
|
+
migrateAnthropicConfig(fileConfig);
|
|
299
|
+
|
|
167
300
|
const config = deepMerge(DEFAULTS, fileConfig);
|
|
168
301
|
|
|
169
|
-
// Overlay env vars for
|
|
170
|
-
|
|
171
|
-
|
|
302
|
+
// Overlay env vars for brain API key based on provider
|
|
303
|
+
const providerDef = PROVIDERS[config.brain.provider];
|
|
304
|
+
if (providerDef && process.env[providerDef.envKey]) {
|
|
305
|
+
config.brain.api_key = process.env[providerDef.envKey];
|
|
172
306
|
}
|
|
307
|
+
// Legacy fallback: ANTHROPIC_API_KEY for anthropic provider
|
|
308
|
+
if (config.brain.provider === 'anthropic' && !config.brain.api_key && process.env.ANTHROPIC_API_KEY) {
|
|
309
|
+
config.brain.api_key = process.env.ANTHROPIC_API_KEY;
|
|
310
|
+
}
|
|
311
|
+
|
|
173
312
|
if (process.env.TELEGRAM_BOT_TOKEN) {
|
|
174
313
|
config.telegram.bot_token = process.env.TELEGRAM_BOT_TOKEN;
|
|
175
314
|
}
|
|
@@ -221,7 +360,16 @@ export function saveCredential(config, envKey, value) {
|
|
|
221
360
|
config.github.token = value;
|
|
222
361
|
break;
|
|
223
362
|
case 'ANTHROPIC_API_KEY':
|
|
224
|
-
config.anthropic.api_key = value;
|
|
363
|
+
if (config.brain.provider === 'anthropic') config.brain.api_key = value;
|
|
364
|
+
break;
|
|
365
|
+
case 'OPENAI_API_KEY':
|
|
366
|
+
if (config.brain.provider === 'openai') config.brain.api_key = value;
|
|
367
|
+
break;
|
|
368
|
+
case 'GOOGLE_API_KEY':
|
|
369
|
+
if (config.brain.provider === 'google') config.brain.api_key = value;
|
|
370
|
+
break;
|
|
371
|
+
case 'GROQ_API_KEY':
|
|
372
|
+
if (config.brain.provider === 'groq') config.brain.api_key = value;
|
|
225
373
|
break;
|
|
226
374
|
case 'TELEGRAM_BOT_TOKEN':
|
|
227
375
|
config.telegram.bot_token = value;
|