kernelbot 1.0.24 → 1.0.25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/.env.example CHANGED
@@ -1,5 +1,13 @@
1
+ # AI provider API keys (only the one matching your brain.provider is required)
1
2
  ANTHROPIC_API_KEY=sk-ant-...
3
+ OPENAI_API_KEY=sk-...
4
+ GOOGLE_API_KEY=AIza...
5
+ GROQ_API_KEY=gsk_...
6
+
7
+ # Required
2
8
  TELEGRAM_BOT_TOKEN=123456:ABC-DEF...
9
+
10
+ # Optional
3
11
  GITHUB_TOKEN=ghp_...
4
12
  JIRA_BASE_URL=https://yourcompany.atlassian.net
5
13
  JIRA_EMAIL=you@company.com
package/README.md CHANGED
@@ -1,13 +1,14 @@
1
1
  # KernelBot
2
2
 
3
- [kernelbot.io](https://kernelbot.io) | [npm](https://www.npmjs.com/package/kernelbot) | [GitHub](https://github.com/KernelCode/KERNEL)
3
+ [kernelbot.io](https://kernelbot.io) | [npm](https://www.npmjs.com/package/kernelbot) | [GitHub](https://github.com/KernelCode/kernelbot)
4
4
 
5
- AI engineering agent — a Telegram bot backed by Claude with full OS control via tool use.
5
+ AI engineering agent — a Telegram bot backed by Claude, GPT, Gemini, or Groq with full OS control via tool use.
6
6
 
7
7
  Send a message in Telegram, and KernelBot will read files, write code, run commands, browse the web, manage infrastructure, and respond with the results. It's your personal engineering assistant with direct access to your machine.
8
8
 
9
9
  ## Features
10
10
 
11
+ - **Multi-model support** — choose your AI brain: Anthropic (Claude), OpenAI (GPT), Google (Gemini), or Groq (Llama/Mixtral). Switch models anytime from the CLI menu
11
12
  - **Autonomous agent loop** — send one message and KernelBot chains tool calls until the task is done, no hand-holding needed
12
13
  - **Full shell access** — run any command, install packages, build projects, run tests
13
14
  - **File management** — read, write, and create files with automatic directory creation
@@ -30,14 +31,14 @@ Send a message in Telegram, and KernelBot will read files, write code, run comma
30
31
  ## How It Works
31
32
 
32
33
  ```text
33
- You (Telegram) → KernelBot → Claude (Anthropic API)
34
+ You (Telegram) → KernelBot → AI Brain (Claude / GPT / Gemini / Groq)
34
35
 
35
36
  Tools (shell, files, git, docker, browser, etc.)
36
37
 
37
38
  Claude Code CLI (coding tasks)
38
39
  ```
39
40
 
40
- KernelBot runs a **tool-use loop**: Claude decides which tools to call, KernelBot executes them on your OS, feeds results back, and Claude continues until the task is done. One message can trigger dozens of tool calls autonomously.
41
+ KernelBot runs a **tool-use loop**: the AI decides which tools to call, KernelBot executes them on your OS, feeds results back, and the AI continues until the task is done. One message can trigger dozens of tool calls autonomously.
41
42
 
42
43
  For complex coding tasks, KernelBot can spawn **Claude Code CLI** as a sub-agent — giving it a dedicated coding environment with its own tool loop for writing, editing, and debugging code.
43
44
 
@@ -45,85 +46,85 @@ For complex coding tasks, KernelBot can spawn **Claude Code CLI** as a sub-agent
45
46
 
46
47
  ### File System & Shell
47
48
 
48
- | Tool | Description |
49
- | --- | --- |
50
- | `execute_command` | Run any shell command (git, npm, python, etc.) |
51
- | `read_file` | Read file contents with optional line limits |
52
- | `write_file` | Write/create files, auto-creates parent directories |
53
- | `list_directory` | List directory contents, optionally recursive |
49
+ | Tool | Description |
50
+ | ----------------- | --------------------------------------------------- |
51
+ | `execute_command` | Run any shell command (git, npm, python, etc.) |
52
+ | `read_file` | Read file contents with optional line limits |
53
+ | `write_file` | Write/create files, auto-creates parent directories |
54
+ | `list_directory` | List directory contents, optionally recursive |
54
55
 
55
56
  ### Git & GitHub
56
57
 
57
- | Tool | Description |
58
- | --- | --- |
59
- | `git_clone` | Clone a repo (`org/repo` shorthand or full URL) |
60
- | `git_checkout` | Checkout or create branches |
61
- | `git_commit` | Stage all changes and commit |
62
- | `git_push` | Push current branch to remote |
63
- | `git_diff` | Show uncommitted changes |
64
- | `github_create_pr` | Create a pull request |
65
- | `github_get_pr_diff` | Get the diff of a PR |
66
- | `github_post_review` | Post a review on a PR |
67
- | `github_create_repo` | Create a new GitHub repository |
68
- | `github_list_prs` | List pull requests for a repo |
58
+ | Tool | Description |
59
+ | -------------------- | ----------------------------------------------- |
60
+ | `git_clone` | Clone a repo (`org/repo` shorthand or full URL) |
61
+ | `git_checkout` | Checkout or create branches |
62
+ | `git_commit` | Stage all changes and commit |
63
+ | `git_push` | Push current branch to remote |
64
+ | `git_diff` | Show uncommitted changes |
65
+ | `github_create_pr` | Create a pull request |
66
+ | `github_get_pr_diff` | Get the diff of a PR |
67
+ | `github_post_review` | Post a review on a PR |
68
+ | `github_create_repo` | Create a new GitHub repository |
69
+ | `github_list_prs` | List pull requests for a repo |
69
70
 
70
71
  ### Web Browsing
71
72
 
72
- | Tool | Description |
73
- | --- | --- |
74
- | `browse_website` | Navigate to a URL and extract page content (title, headings, text, links) |
75
- | `screenshot_website` | Take a screenshot of a website, supports full-page and element capture |
76
- | `extract_content` | Extract specific content using CSS selectors |
77
- | `interact_with_page` | Click, type, scroll, and run JS on a webpage |
78
- | `send_image` | Send an image/screenshot directly to the Telegram chat |
73
+ | Tool | Description |
74
+ | -------------------- | ------------------------------------------------------------------------- |
75
+ | `browse_website` | Navigate to a URL and extract page content (title, headings, text, links) |
76
+ | `screenshot_website` | Take a screenshot of a website, supports full-page and element capture |
77
+ | `extract_content` | Extract specific content using CSS selectors |
78
+ | `interact_with_page` | Click, type, scroll, and run JS on a webpage |
79
+ | `send_image` | Send an image/screenshot directly to the Telegram chat |
79
80
 
80
81
  ### JIRA
81
82
 
82
- | Tool | Description |
83
- | --- | --- |
84
- | `jira_get_ticket` | Get details of a specific JIRA ticket |
85
- | `jira_search_tickets` | Search tickets using JQL queries |
86
- | `jira_list_my_tickets` | List tickets assigned to the current user |
87
- | `jira_get_project_tickets` | Get tickets from a specific JIRA project |
83
+ | Tool | Description |
84
+ | -------------------------- | ----------------------------------------- |
85
+ | `jira_get_ticket` | Get details of a specific JIRA ticket |
86
+ | `jira_search_tickets` | Search tickets using JQL queries |
87
+ | `jira_list_my_tickets` | List tickets assigned to the current user |
88
+ | `jira_get_project_tickets` | Get tickets from a specific JIRA project |
88
89
 
89
90
  ### Docker
90
91
 
91
- | Tool | Description |
92
- | --- | --- |
93
- | `docker_ps` | List containers |
94
- | `docker_logs` | Get container logs |
95
- | `docker_exec` | Execute a command inside a running container |
96
- | `docker_compose` | Run docker compose commands |
92
+ | Tool | Description |
93
+ | ---------------- | -------------------------------------------- |
94
+ | `docker_ps` | List containers |
95
+ | `docker_logs` | Get container logs |
96
+ | `docker_exec` | Execute a command inside a running container |
97
+ | `docker_compose` | Run docker compose commands |
97
98
 
98
99
  ### Process & System
99
100
 
100
- | Tool | Description |
101
- | --- | --- |
102
- | `process_list` | List running processes, optionally filter by name |
103
- | `kill_process` | Kill a process by PID or name |
101
+ | Tool | Description |
102
+ | ----------------- | ------------------------------------------------------ |
103
+ | `process_list` | List running processes, optionally filter by name |
104
+ | `kill_process` | Kill a process by PID or name |
104
105
  | `service_control` | Manage systemd services (start, stop, restart, status) |
105
106
 
106
107
  ### Monitoring
107
108
 
108
- | Tool | Description |
109
- | --- | --- |
110
- | `disk_usage` | Show disk space usage |
111
- | `memory_usage` | Show RAM usage |
112
- | `cpu_usage` | Show CPU load |
113
- | `system_logs` | Read system or application logs |
109
+ | Tool | Description |
110
+ | -------------- | ------------------------------- |
111
+ | `disk_usage` | Show disk space usage |
112
+ | `memory_usage` | Show RAM usage |
113
+ | `cpu_usage` | Show CPU load |
114
+ | `system_logs` | Read system or application logs |
114
115
 
115
116
  ### Networking
116
117
 
117
- | Tool | Description |
118
- | --- | --- |
119
- | `check_port` | Check if a port is open and listening |
120
- | `curl_url` | Make HTTP requests and return the response |
121
- | `nginx_reload` | Test nginx config and reload if valid |
118
+ | Tool | Description |
119
+ | -------------- | ------------------------------------------ |
120
+ | `check_port` | Check if a port is open and listening |
121
+ | `curl_url` | Make HTTP requests and return the response |
122
+ | `nginx_reload` | Test nginx config and reload if valid |
122
123
 
123
124
  ### Coding
124
125
 
125
- | Tool | Description |
126
- | --- | --- |
126
+ | Tool | Description |
127
+ | ------------------- | ----------------------------------------------------------------------------------------- |
127
128
  | `spawn_claude_code` | Spawn Claude Code CLI for coding tasks — writing, fixing, reviewing, and scaffolding code |
128
129
 
129
130
  ## Disclaimer
@@ -144,10 +145,13 @@ kernelbot
144
145
 
145
146
  That's it. On first run, KernelBot will:
146
147
 
147
- 1. Detect missing credentials and prompt for them
148
- 2. Save them to `~/.kernelbot/.env`
149
- 3. Verify API connections
150
- 4. Launch the Telegram bot
148
+ 1. Prompt you to select an AI provider and model
149
+ 2. Ask for your API key and Telegram bot token
150
+ 3. Save credentials to `~/.kernelbot/.env`
151
+ 4. Verify API connections
152
+ 5. Launch the Telegram bot
153
+
154
+ You can change your AI provider/model anytime from the CLI menu (option 5).
151
155
 
152
156
  ## Configuration
153
157
 
@@ -158,7 +162,12 @@ KernelBot auto-detects config from the current directory or `~/.kernelbot/`. Eve
158
162
  Set these in `.env` or as system environment variables:
159
163
 
160
164
  ```text
161
- ANTHROPIC_API_KEY=sk-ant-...
165
+ # AI provider key (only the one matching your provider is required)
166
+ ANTHROPIC_API_KEY=sk-ant-... # for Anthropic (Claude)
167
+ OPENAI_API_KEY=sk-... # for OpenAI (GPT)
168
+ GOOGLE_API_KEY=AIza... # for Google (Gemini)
169
+ GROQ_API_KEY=gsk_... # for Groq (Llama/Mixtral)
170
+
162
171
  TELEGRAM_BOT_TOKEN=123456:ABC-DEF...
163
172
  GITHUB_TOKEN=ghp_... # optional, for GitHub tools
164
173
  JIRA_BASE_URL=https://yourcompany.atlassian.net # optional, for JIRA tools
@@ -174,7 +183,8 @@ Drop a `config.yaml` in your working directory or `~/.kernelbot/` to customize b
174
183
  bot:
175
184
  name: KernelBot
176
185
 
177
- anthropic:
186
+ brain:
187
+ provider: anthropic # anthropic | openai | google | groq
178
188
  model: claude-sonnet-4-20250514
179
189
  max_tokens: 8192
180
190
  temperature: 0.3
@@ -209,11 +219,11 @@ conversation:
209
219
 
210
220
  ## Telegram Commands
211
221
 
212
- | Command | Description |
213
- | --- | --- |
214
- | `/clean` | Clear conversation and start fresh |
215
- | `/history` | Show message count in memory |
216
- | `/help` | Show help message |
222
+ | Command | Description |
223
+ | ---------- | ---------------------------------- |
224
+ | `/clean` | Clear conversation and start fresh |
225
+ | `/history` | Show message count in memory |
226
+ | `/help` | Show help message |
217
227
 
218
228
  ## Security
219
229
 
@@ -256,12 +266,18 @@ KernelBot/
256
266
  ├── bin/
257
267
  │ └── kernel.js # Entry point + CLI menu
258
268
  ├── src/
259
- │ ├── agent.js # Claude tool-use loop
269
+ │ ├── agent.js # AI tool-use loop (provider-agnostic)
260
270
  │ ├── bot.js # Telegram bot (polling, auth, message handling)
261
271
  │ ├── coder.js # Claude Code CLI spawner + smart output
262
272
  │ ├── conversation.js # Per-chat conversation history
263
273
  │ ├── prompts/
264
274
  │ │ └── system.js # System prompt
275
+ │ ├── providers/
276
+ │ │ ├── models.js # Provider & model catalog
277
+ │ │ ├── base.js # Abstract provider interface
278
+ │ │ ├── anthropic.js # Anthropic (Claude) provider
279
+ │ │ ├── openai-compat.js # OpenAI / Gemini / Groq provider
280
+ │ │ └── index.js # Provider factory
265
281
  │ ├── security/
266
282
  │ │ ├── auth.js # User allowlist
267
283
  │ │ ├── audit.js # Tool call audit logging
@@ -290,7 +306,11 @@ KernelBot/
290
306
  ## Requirements
291
307
 
292
308
  - Node.js 18+
293
- - [Anthropic API key](https://console.anthropic.com/)
309
+ - AI provider API key (one of):
310
+ - [Anthropic API key](https://console.anthropic.com/) (Claude)
311
+ - [OpenAI API key](https://platform.openai.com/api-keys) (GPT)
312
+ - [Google AI API key](https://aistudio.google.com/apikey) (Gemini)
313
+ - [Groq API key](https://console.groq.com/keys) (Llama/Mixtral)
294
314
  - [Telegram Bot Token](https://t.me/BotFather)
295
315
  - Chromium/Chrome (for browser tools — installed automatically by Puppeteer)
296
316
  - [GitHub Token](https://github.com/settings/tokens) (optional, for GitHub tools)
package/bin/kernel.js CHANGED
@@ -9,7 +9,7 @@ import { readFileSync, existsSync } from 'fs';
9
9
  import { join } from 'path';
10
10
  import { homedir } from 'os';
11
11
  import chalk from 'chalk';
12
- import { loadConfig, loadConfigInteractive } from '../src/utils/config.js';
12
+ import { loadConfig, loadConfigInteractive, changeBrainModel } from '../src/utils/config.js';
13
13
  import { createLogger, getLogger } from '../src/utils/logger.js';
14
14
  import {
15
15
  showLogo,
@@ -21,16 +21,23 @@ import { createAuditLogger } from '../src/security/audit.js';
21
21
  import { ConversationManager } from '../src/conversation.js';
22
22
  import { Agent } from '../src/agent.js';
23
23
  import { startBot } from '../src/bot.js';
24
- import Anthropic from '@anthropic-ai/sdk';
24
+ import { createProvider, PROVIDERS } from '../src/providers/index.js';
25
25
 
26
- function showMenu() {
26
+ function showMenu(config) {
27
+ const providerDef = PROVIDERS[config.brain.provider];
28
+ const providerName = providerDef ? providerDef.name : config.brain.provider;
29
+ const modelId = config.brain.model;
30
+
31
+ console.log('');
32
+ console.log(chalk.dim(` Current brain: ${providerName} / ${modelId}`));
27
33
  console.log('');
28
34
  console.log(chalk.bold(' What would you like to do?\n'));
29
35
  console.log(` ${chalk.cyan('1.')} Start bot`);
30
36
  console.log(` ${chalk.cyan('2.')} Check connections`);
31
37
  console.log(` ${chalk.cyan('3.')} View logs`);
32
38
  console.log(` ${chalk.cyan('4.')} View audit logs`);
33
- console.log(` ${chalk.cyan('5.')} Exit`);
39
+ console.log(` ${chalk.cyan('5.')} Change brain model`);
40
+ console.log(` ${chalk.cyan('6.')} Exit`);
34
41
  console.log('');
35
42
  }
36
43
 
@@ -70,21 +77,21 @@ function viewLog(filename) {
70
77
  }
71
78
 
72
79
  async function runCheck(config) {
73
- await showStartupCheck('ANTHROPIC_API_KEY', async () => {
74
- if (!config.anthropic.api_key) throw new Error('Not set');
80
+ const providerDef = PROVIDERS[config.brain.provider];
81
+ const providerLabel = providerDef ? providerDef.name : config.brain.provider;
82
+ const envKeyLabel = providerDef ? providerDef.envKey : 'API_KEY';
83
+
84
+ await showStartupCheck(envKeyLabel, async () => {
85
+ if (!config.brain.api_key) throw new Error('Not set');
75
86
  });
76
87
 
77
88
  await showStartupCheck('TELEGRAM_BOT_TOKEN', async () => {
78
89
  if (!config.telegram.bot_token) throw new Error('Not set');
79
90
  });
80
91
 
81
- await showStartupCheck('Anthropic API connection', async () => {
82
- const client = new Anthropic({ apiKey: config.anthropic.api_key });
83
- await client.messages.create({
84
- model: config.anthropic.model,
85
- max_tokens: 16,
86
- messages: [{ role: 'user', content: 'ping' }],
87
- });
92
+ await showStartupCheck(`${providerLabel} API connection`, async () => {
93
+ const provider = createProvider(config);
94
+ await provider.ping();
88
95
  });
89
96
 
90
97
  await showStartupCheck('Telegram Bot API', async () => {
@@ -102,16 +109,15 @@ async function startBotFlow(config) {
102
109
  createAuditLogger();
103
110
  const logger = getLogger();
104
111
 
112
+ const providerDef = PROVIDERS[config.brain.provider];
113
+ const providerLabel = providerDef ? providerDef.name : config.brain.provider;
114
+
105
115
  const checks = [];
106
116
 
107
117
  checks.push(
108
- await showStartupCheck('Anthropic API', async () => {
109
- const client = new Anthropic({ apiKey: config.anthropic.api_key });
110
- await client.messages.create({
111
- model: config.anthropic.model,
112
- max_tokens: 16,
113
- messages: [{ role: 'user', content: 'ping' }],
114
- });
118
+ await showStartupCheck(`${providerLabel} API`, async () => {
119
+ const provider = createProvider(config);
120
+ await provider.ping();
115
121
  }),
116
122
  );
117
123
 
@@ -148,7 +154,7 @@ async function main() {
148
154
 
149
155
  let running = true;
150
156
  while (running) {
151
- showMenu();
157
+ showMenu(config);
152
158
  const choice = await ask(rl, chalk.cyan(' > '));
153
159
 
154
160
  switch (choice.trim()) {
@@ -168,6 +174,9 @@ async function main() {
168
174
  viewLog('kernel-audit.log');
169
175
  break;
170
176
  case '5':
177
+ await changeBrainModel(config, rl);
178
+ break;
179
+ case '6':
171
180
  running = false;
172
181
  break;
173
182
  default:
@@ -5,7 +5,8 @@ bot:
5
5
  name: KernelBot
6
6
  description: AI engineering agent with full OS control
7
7
 
8
- anthropic:
8
+ brain:
9
+ provider: anthropic # anthropic | openai | google | groq
9
10
  model: claude-sonnet-4-20250514
10
11
  max_tokens: 8192
11
12
  temperature: 0.3
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "kernelbot",
3
- "version": "1.0.24",
3
+ "version": "1.0.25",
4
4
  "description": "KernelBot — AI engineering agent with full OS control",
5
5
  "type": "module",
6
6
  "author": "Abdullah Al-Taheri <abdullah@altaheri.me>",
@@ -15,6 +15,9 @@
15
15
  "agent",
16
16
  "telegram",
17
17
  "anthropic",
18
+ "openai",
19
+ "gemini",
20
+ "groq",
18
21
  "tools"
19
22
  ],
20
23
  "repository": {
@@ -37,6 +40,7 @@
37
40
  "gradient-string": "^3.0.0",
38
41
  "js-yaml": "^4.1.0",
39
42
  "node-telegram-bot-api": "^0.66.0",
43
+ "openai": "^4.82.0",
40
44
  "ora": "^8.1.1",
41
45
  "puppeteer": "^24.37.3",
42
46
  "simple-git": "^3.31.1",
package/src/agent.js CHANGED
@@ -1,4 +1,4 @@
1
- import Anthropic from '@anthropic-ai/sdk';
1
+ import { createProvider } from './providers/index.js';
2
2
  import { toolDefinitions, executeTool, checkConfirmation } from './tools/index.js';
3
3
  import { getSystemPrompt } from './prompts/system.js';
4
4
  import { getLogger } from './utils/logger.js';
@@ -8,7 +8,7 @@ export class Agent {
8
8
  constructor({ config, conversationManager }) {
9
9
  this.config = config;
10
10
  this.conversationManager = conversationManager;
11
- this.client = new Anthropic({ apiKey: config.anthropic.api_key });
11
+ this.provider = createProvider(config);
12
12
  this.systemPrompt = getSystemPrompt(config);
13
13
  this._pending = new Map(); // chatId -> pending state
14
14
  }
@@ -33,7 +33,7 @@ export class Agent {
33
33
  }
34
34
  }
35
35
 
36
- const { max_tool_depth } = this.config.anthropic;
36
+ const { max_tool_depth } = this.config.brain;
37
37
 
38
38
  // Add user message to persistent history
39
39
  this.conversationManager.addMessage(chatId, 'user', userMessage);
@@ -160,7 +160,7 @@ export class Agent {
160
160
  }
161
161
 
162
162
  pending.messages.push({ role: 'user', content: pending.toolResults });
163
- const { max_tool_depth } = this.config.anthropic;
163
+ const { max_tool_depth } = this.config.brain;
164
164
  return await this._runLoop(chatId, pending.messages, user, 0, max_tool_depth);
165
165
  }
166
166
 
@@ -203,56 +203,44 @@ export class Agent {
203
203
 
204
204
  async _runLoop(chatId, messages, user, startDepth, maxDepth) {
205
205
  const logger = getLogger();
206
- const { model, max_tokens, temperature } = this.config.anthropic;
207
206
 
208
207
  for (let depth = startDepth; depth < maxDepth; depth++) {
209
208
  logger.debug(`Agent loop iteration ${depth + 1}/${maxDepth}`);
210
209
 
211
- const response = await this.client.messages.create({
212
- model,
213
- max_tokens,
214
- temperature,
210
+ const response = await this.provider.chat({
215
211
  system: this.systemPrompt,
216
- tools: toolDefinitions,
217
212
  messages,
213
+ tools: toolDefinitions,
218
214
  });
219
215
 
220
- if (response.stop_reason === 'end_turn') {
221
- const textBlocks = response.content
222
- .filter((b) => b.type === 'text')
223
- .map((b) => b.text);
224
- const reply = textBlocks.join('\n');
225
-
216
+ if (response.stopReason === 'end_turn') {
217
+ const reply = response.text || '';
226
218
  this.conversationManager.addMessage(chatId, 'assistant', reply);
227
219
  return reply;
228
220
  }
229
221
 
230
- if (response.stop_reason === 'tool_use') {
231
- messages.push({ role: 'assistant', content: response.content });
222
+ if (response.stopReason === 'tool_use') {
223
+ messages.push({ role: 'assistant', content: response.rawContent });
232
224
 
233
- // Send Claude's thinking text to the user
234
- const thinkingBlocks = response.content.filter((b) => b.type === 'text' && b.text.trim());
235
- if (thinkingBlocks.length > 0) {
236
- const thinking = thinkingBlocks.map((b) => b.text).join('\n');
237
- logger.info(`Agent thinking: ${thinking.slice(0, 200)}`);
238
- await this._sendUpdate(`💭 ${thinking}`);
225
+ // Send thinking text to the user
226
+ if (response.text && response.text.trim()) {
227
+ logger.info(`Agent thinking: ${response.text.slice(0, 200)}`);
228
+ await this._sendUpdate(`💭 ${response.text}`);
239
229
  }
240
230
 
241
- const toolUseBlocks = response.content.filter((b) => b.type === 'tool_use');
242
231
  const toolResults = [];
243
232
 
244
- for (let i = 0; i < toolUseBlocks.length; i++) {
245
- const block = toolUseBlocks[i];
233
+ for (let i = 0; i < response.toolCalls.length; i++) {
234
+ const block = response.toolCalls[i];
235
+
236
+ // Build a block-like object for _checkPause (needs .type for remainingBlocks filter)
237
+ const blockObj = { type: 'tool_use', id: block.id, name: block.name, input: block.input };
246
238
 
247
239
  // Check if we need to pause (missing cred or dangerous action)
248
- const pauseMsg = this._checkPause(
249
- chatId,
250
- block,
251
- user,
252
- toolResults,
253
- toolUseBlocks.slice(i + 1),
254
- messages,
255
- );
240
+ const remaining = response.toolCalls.slice(i + 1).map((tc) => ({
241
+ type: 'tool_use', id: tc.id, name: tc.name, input: tc.input,
242
+ }));
243
+ const pauseMsg = this._checkPause(chatId, blockObj, user, toolResults, remaining, messages);
256
244
  if (pauseMsg) return pauseMsg;
257
245
 
258
246
  const summary = this._formatToolSummary(block.name, block.input);
@@ -278,14 +266,10 @@ export class Agent {
278
266
  }
279
267
 
280
268
  // Unexpected stop reason
281
- logger.warn(`Unexpected stop_reason: ${response.stop_reason}`);
282
- const fallbackText = response.content
283
- .filter((b) => b.type === 'text')
284
- .map((b) => b.text)
285
- .join('\n');
286
- if (fallbackText) {
287
- this.conversationManager.addMessage(chatId, 'assistant', fallbackText);
288
- return fallbackText;
269
+ logger.warn(`Unexpected stopReason: ${response.stopReason}`);
270
+ if (response.text) {
271
+ this.conversationManager.addMessage(chatId, 'assistant', response.text);
272
+ return response.text;
289
273
  }
290
274
  return 'Something went wrong — unexpected response from the model.';
291
275
  }
@@ -0,0 +1,44 @@
1
+ import Anthropic from '@anthropic-ai/sdk';
2
+ import { BaseProvider } from './base.js';
3
+
4
+ export class AnthropicProvider extends BaseProvider {
5
+ constructor(opts) {
6
+ super(opts);
7
+ this.client = new Anthropic({ apiKey: this.apiKey });
8
+ }
9
+
10
+ async chat({ system, messages, tools }) {
11
+ const response = await this.client.messages.create({
12
+ model: this.model,
13
+ max_tokens: this.maxTokens,
14
+ temperature: this.temperature,
15
+ system,
16
+ tools,
17
+ messages,
18
+ });
19
+
20
+ const stopReason = response.stop_reason === 'end_turn' ? 'end_turn' : 'tool_use';
21
+
22
+ const textBlocks = response.content.filter((b) => b.type === 'text');
23
+ const text = textBlocks.map((b) => b.text).join('\n');
24
+
25
+ const toolCalls = response.content
26
+ .filter((b) => b.type === 'tool_use')
27
+ .map((b) => ({ id: b.id, name: b.name, input: b.input }));
28
+
29
+ return {
30
+ stopReason,
31
+ text,
32
+ toolCalls,
33
+ rawContent: response.content,
34
+ };
35
+ }
36
+
37
+ async ping() {
38
+ await this.client.messages.create({
39
+ model: this.model,
40
+ max_tokens: 16,
41
+ messages: [{ role: 'user', content: 'ping' }],
42
+ });
43
+ }
44
+ }
@@ -0,0 +1,30 @@
1
+ /**
2
+ * Abstract provider interface.
3
+ * Every provider must implement chat() and ping().
4
+ */
5
+
6
+ export class BaseProvider {
7
+ constructor({ model, maxTokens, temperature, apiKey }) {
8
+ this.model = model;
9
+ this.maxTokens = maxTokens;
10
+ this.temperature = temperature;
11
+ this.apiKey = apiKey;
12
+ }
13
+
14
+ /**
15
+ * Send a chat completion request.
16
+ * @param {object} opts
17
+ * @param {string} opts.system - System prompt
18
+ * @param {Array} opts.messages - Anthropic-format messages
19
+ * @param {Array} opts.tools - Anthropic-format tool definitions
20
+ * @returns {Promise<{stopReason: 'end_turn'|'tool_use', text: string, toolCalls: Array<{id,name,input}>, rawContent: Array}>}
21
+ */
22
+ async chat({ system, messages, tools }) {
23
+ throw new Error('chat() not implemented');
24
+ }
25
+
26
+ /** Quick connectivity test — throws on failure. */
27
+ async ping() {
28
+ throw new Error('ping() not implemented');
29
+ }
30
+ }
@@ -0,0 +1,36 @@
1
+ import { AnthropicProvider } from './anthropic.js';
2
+ import { OpenAICompatProvider } from './openai-compat.js';
3
+ import { PROVIDERS } from './models.js';
4
+
5
+ export { PROVIDERS } from './models.js';
6
+
7
+ /**
8
+ * Create the right provider based on config.brain.
9
+ * @param {object} config - Full app config (must have config.brain)
10
+ * @returns {BaseProvider}
11
+ */
12
+ export function createProvider(config) {
13
+ const { provider, model, max_tokens, temperature, api_key } = config.brain;
14
+
15
+ const providerDef = PROVIDERS[provider];
16
+ if (!providerDef) {
17
+ throw new Error(`Unknown provider: ${provider}. Valid: ${Object.keys(PROVIDERS).join(', ')}`);
18
+ }
19
+
20
+ const opts = {
21
+ model,
22
+ maxTokens: max_tokens,
23
+ temperature,
24
+ apiKey: api_key,
25
+ };
26
+
27
+ if (provider === 'anthropic') {
28
+ return new AnthropicProvider(opts);
29
+ }
30
+
31
+ // OpenAI, Google, Groq — all use OpenAI-compatible API
32
+ return new OpenAICompatProvider({
33
+ ...opts,
34
+ baseUrl: providerDef.baseUrl || undefined,
35
+ });
36
+ }
@@ -0,0 +1,54 @@
1
+ /**
2
+ * Provider & model catalog — single source of truth.
3
+ */
4
+
5
+ export const PROVIDERS = {
6
+ anthropic: {
7
+ name: 'Anthropic (Claude)',
8
+ envKey: 'ANTHROPIC_API_KEY',
9
+ models: [
10
+ { id: 'claude-sonnet-4-20250514', label: 'Claude Sonnet 4' },
11
+ { id: 'claude-opus-4-20250514', label: 'Claude Opus 4' },
12
+ { id: 'claude-haiku-4-5-20251001', label: 'Claude Haiku 4.5' },
13
+ ],
14
+ },
15
+ openai: {
16
+ name: 'OpenAI (GPT)',
17
+ envKey: 'OPENAI_API_KEY',
18
+ models: [
19
+ { id: 'gpt-4o', label: 'GPT-4o' },
20
+ { id: 'gpt-4o-mini', label: 'GPT-4o Mini' },
21
+ { id: 'o1', label: 'o1' },
22
+ { id: 'o3-mini', label: 'o3-mini' },
23
+ ],
24
+ },
25
+ google: {
26
+ name: 'Google (Gemini)',
27
+ envKey: 'GOOGLE_API_KEY',
28
+ baseUrl: 'https://generativelanguage.googleapis.com/v1beta/openai/',
29
+ models: [
30
+ { id: 'gemini-2.0-flash', label: 'Gemini 2.0 Flash' },
31
+ { id: 'gemini-2.5-pro', label: 'Gemini 2.5 Pro' },
32
+ ],
33
+ },
34
+ groq: {
35
+ name: 'Groq',
36
+ envKey: 'GROQ_API_KEY',
37
+ baseUrl: 'https://api.groq.com/openai/v1',
38
+ models: [
39
+ { id: 'llama-3.3-70b-versatile', label: 'Llama 3.3 70B' },
40
+ { id: 'llama-3.1-8b-instant', label: 'Llama 3.1 8B' },
41
+ { id: 'mixtral-8x7b-32768', label: 'Mixtral 8x7B' },
42
+ ],
43
+ },
44
+ };
45
+
46
+ /** Models that don't support system prompts or temperature (reasoning models). */
47
+ export const REASONING_MODELS = new Set(['o1', 'o3-mini']);
48
+
49
+ export function getProviderForModel(modelId) {
50
+ for (const [key, provider] of Object.entries(PROVIDERS)) {
51
+ if (provider.models.some((m) => m.id === modelId)) return key;
52
+ }
53
+ return null;
54
+ }
@@ -0,0 +1,163 @@
1
+ import OpenAI from 'openai';
2
+ import { BaseProvider } from './base.js';
3
+ import { REASONING_MODELS } from './models.js';
4
+
5
+ /**
6
+ * OpenAI-compatible provider — works with OpenAI, Groq, and Google Gemini
7
+ * via configurable baseURL.
8
+ */
9
+ export class OpenAICompatProvider extends BaseProvider {
10
+ constructor(opts) {
11
+ super(opts);
12
+ this.client = new OpenAI({
13
+ apiKey: this.apiKey,
14
+ ...(opts.baseUrl && { baseURL: opts.baseUrl }),
15
+ });
16
+ this.isReasoningModel = REASONING_MODELS.has(this.model);
17
+ }
18
+
19
+ // ── Format conversion helpers ──
20
+
21
+ /** Anthropic tool defs → OpenAI function tool defs */
22
+ _convertTools(tools) {
23
+ if (!tools || tools.length === 0) return undefined;
24
+ return tools.map((t) => ({
25
+ type: 'function',
26
+ function: {
27
+ name: t.name,
28
+ description: t.description,
29
+ parameters: t.input_schema,
30
+ },
31
+ }));
32
+ }
33
+
34
+ /** Anthropic messages → OpenAI messages */
35
+ _convertMessages(system, messages) {
36
+ const out = [];
37
+
38
+ // System prompt as first message (skip for reasoning models)
39
+ if (system && !this.isReasoningModel) {
40
+ const systemText = Array.isArray(system)
41
+ ? system.map((b) => b.text).join('\n')
42
+ : system;
43
+ out.push({ role: 'system', content: systemText });
44
+ }
45
+
46
+ for (const msg of messages) {
47
+ if (msg.role === 'user') {
48
+ // Could be a string, content blocks, or tool_result array
49
+ if (typeof msg.content === 'string') {
50
+ out.push({ role: 'user', content: msg.content });
51
+ } else if (Array.isArray(msg.content)) {
52
+ // Check if it's tool results
53
+ if (msg.content[0]?.type === 'tool_result') {
54
+ for (const tr of msg.content) {
55
+ out.push({
56
+ role: 'tool',
57
+ tool_call_id: tr.tool_use_id,
58
+ content: typeof tr.content === 'string' ? tr.content : JSON.stringify(tr.content),
59
+ });
60
+ }
61
+ } else {
62
+ // Text content blocks
63
+ const text = msg.content
64
+ .filter((b) => b.type === 'text')
65
+ .map((b) => b.text)
66
+ .join('\n');
67
+ out.push({ role: 'user', content: text || '' });
68
+ }
69
+ }
70
+ } else if (msg.role === 'assistant') {
71
+ // Convert Anthropic content blocks → OpenAI format
72
+ if (typeof msg.content === 'string') {
73
+ out.push({ role: 'assistant', content: msg.content });
74
+ } else if (Array.isArray(msg.content)) {
75
+ const textParts = msg.content.filter((b) => b.type === 'text');
76
+ const toolParts = msg.content.filter((b) => b.type === 'tool_use');
77
+
78
+ const assistantMsg = {
79
+ role: 'assistant',
80
+ content: textParts.map((b) => b.text).join('\n') || null,
81
+ };
82
+
83
+ if (toolParts.length > 0) {
84
+ assistantMsg.tool_calls = toolParts.map((b) => ({
85
+ id: b.id,
86
+ type: 'function',
87
+ function: {
88
+ name: b.name,
89
+ arguments: JSON.stringify(b.input),
90
+ },
91
+ }));
92
+ }
93
+
94
+ out.push(assistantMsg);
95
+ }
96
+ }
97
+ }
98
+
99
+ return out;
100
+ }
101
+
102
+ /** OpenAI response → normalized format with rawContent in Anthropic format */
103
+ _normalizeResponse(response) {
104
+ const choice = response.choices[0];
105
+ const finishReason = choice.finish_reason;
106
+
107
+ const stopReason = finishReason === 'tool_calls' ? 'tool_use' : 'end_turn';
108
+
109
+ const text = choice.message.content || '';
110
+
111
+ const toolCalls = (choice.message.tool_calls || []).map((tc) => ({
112
+ id: tc.id,
113
+ name: tc.function.name,
114
+ input: JSON.parse(tc.function.arguments),
115
+ }));
116
+
117
+ // Build rawContent in Anthropic format for message history consistency
118
+ const rawContent = [];
119
+ if (text) {
120
+ rawContent.push({ type: 'text', text });
121
+ }
122
+ for (const tc of toolCalls) {
123
+ rawContent.push({ type: 'tool_use', id: tc.id, name: tc.name, input: tc.input });
124
+ }
125
+
126
+ return { stopReason, text, toolCalls, rawContent };
127
+ }
128
+
129
+ // ── Public API ──
130
+
131
+ async chat({ system, messages, tools }) {
132
+ const params = {
133
+ model: this.model,
134
+ messages: this._convertMessages(system, messages),
135
+ };
136
+
137
+ if (!this.isReasoningModel) {
138
+ params.temperature = this.temperature;
139
+ }
140
+
141
+ params.max_tokens = this.maxTokens;
142
+
143
+ const convertedTools = this._convertTools(tools);
144
+ if (convertedTools) {
145
+ params.tools = convertedTools;
146
+ }
147
+
148
+ const response = await this.client.chat.completions.create(params);
149
+ return this._normalizeResponse(response);
150
+ }
151
+
152
+ async ping() {
153
+ const params = {
154
+ model: this.model,
155
+ max_tokens: 16,
156
+ messages: [{ role: 'user', content: 'ping' }],
157
+ };
158
+ if (!this.isReasoningModel) {
159
+ params.temperature = 0;
160
+ }
161
+ await this.client.chat.completions.create(params);
162
+ }
163
+ }
@@ -5,13 +5,15 @@ import { createInterface } from 'readline';
5
5
  import yaml from 'js-yaml';
6
6
  import dotenv from 'dotenv';
7
7
  import chalk from 'chalk';
8
+ import { PROVIDERS } from '../providers/models.js';
8
9
 
9
10
  const DEFAULTS = {
10
11
  bot: {
11
12
  name: 'KernelBot',
12
13
  description: 'AI engineering agent with full OS control',
13
14
  },
14
- anthropic: {
15
+ brain: {
16
+ provider: 'anthropic',
15
17
  model: 'claude-sonnet-4-20250514',
16
18
  max_tokens: 8192,
17
19
  temperature: 0.3,
@@ -90,9 +92,126 @@ function ask(rl, question) {
90
92
  return new Promise((res) => rl.question(question, res));
91
93
  }
92
94
 
95
+ /**
96
+ * Migrate legacy `anthropic` config section → `brain` section.
97
+ */
98
+ function migrateAnthropicConfig(config) {
99
+ if (config.anthropic && !config.brain) {
100
+ config.brain = {
101
+ provider: 'anthropic',
102
+ model: config.anthropic.model || DEFAULTS.brain.model,
103
+ max_tokens: config.anthropic.max_tokens || DEFAULTS.brain.max_tokens,
104
+ temperature: config.anthropic.temperature ?? DEFAULTS.brain.temperature,
105
+ max_tool_depth: config.anthropic.max_tool_depth || DEFAULTS.brain.max_tool_depth,
106
+ };
107
+ if (config.anthropic.api_key) {
108
+ config.brain.api_key = config.anthropic.api_key;
109
+ }
110
+ }
111
+ return config;
112
+ }
113
+
114
+ /**
115
+ * Interactive provider → model picker.
116
+ */
117
+ export async function promptProviderSelection(rl) {
118
+ const providerKeys = Object.keys(PROVIDERS);
119
+
120
+ console.log(chalk.bold('\n Select AI provider:\n'));
121
+ providerKeys.forEach((key, i) => {
122
+ console.log(` ${chalk.cyan(`${i + 1}.`)} ${PROVIDERS[key].name}`);
123
+ });
124
+ console.log('');
125
+
126
+ let providerIdx;
127
+ while (true) {
128
+ const input = await ask(rl, chalk.cyan(' Provider (number): '));
129
+ providerIdx = parseInt(input.trim(), 10) - 1;
130
+ if (providerIdx >= 0 && providerIdx < providerKeys.length) break;
131
+ console.log(chalk.dim(' Invalid choice, try again.'));
132
+ }
133
+
134
+ const providerKey = providerKeys[providerIdx];
135
+ const provider = PROVIDERS[providerKey];
136
+
137
+ console.log(chalk.bold(`\n Select model for ${provider.name}:\n`));
138
+ provider.models.forEach((m, i) => {
139
+ console.log(` ${chalk.cyan(`${i + 1}.`)} ${m.label} (${m.id})`);
140
+ });
141
+ console.log('');
142
+
143
+ let modelIdx;
144
+ while (true) {
145
+ const input = await ask(rl, chalk.cyan(' Model (number): '));
146
+ modelIdx = parseInt(input.trim(), 10) - 1;
147
+ if (modelIdx >= 0 && modelIdx < provider.models.length) break;
148
+ console.log(chalk.dim(' Invalid choice, try again.'));
149
+ }
150
+
151
+ const model = provider.models[modelIdx];
152
+ return { providerKey, modelId: model.id };
153
+ }
154
+
155
+ /**
156
+ * Save provider and model to config.yaml.
157
+ */
158
+ export function saveProviderToYaml(providerKey, modelId) {
159
+ const configDir = getConfigDir();
160
+ mkdirSync(configDir, { recursive: true });
161
+ const configPath = join(configDir, 'config.yaml');
162
+
163
+ let existing = {};
164
+ if (existsSync(configPath)) {
165
+ existing = yaml.load(readFileSync(configPath, 'utf-8')) || {};
166
+ }
167
+
168
+ existing.brain = {
169
+ ...(existing.brain || {}),
170
+ provider: providerKey,
171
+ model: modelId,
172
+ };
173
+
174
+ // Remove legacy anthropic section if migrating
175
+ delete existing.anthropic;
176
+
177
+ writeFileSync(configPath, yaml.dump(existing, { lineWidth: -1 }));
178
+ return configPath;
179
+ }
180
+
181
+ /**
182
+ * Full interactive flow: change brain model + optionally enter API key.
183
+ */
184
+ export async function changeBrainModel(config, rl) {
185
+ const { providerKey, modelId } = await promptProviderSelection(rl);
186
+
187
+ const providerDef = PROVIDERS[providerKey];
188
+ const savedPath = saveProviderToYaml(providerKey, modelId);
189
+ console.log(chalk.dim(`\n Saved to ${savedPath}`));
190
+
191
+ // Update live config
192
+ config.brain.provider = providerKey;
193
+ config.brain.model = modelId;
194
+
195
+ // Check if we have the API key for this provider
196
+ const envKey = providerDef.envKey;
197
+ const currentKey = process.env[envKey];
198
+ if (!currentKey) {
199
+ const key = await ask(rl, chalk.cyan(`\n ${providerDef.name} API key (${envKey}): `));
200
+ if (key.trim()) {
201
+ saveCredential(config, envKey, key.trim());
202
+ config.brain.api_key = key.trim();
203
+ console.log(chalk.dim(' Saved.\n'));
204
+ }
205
+ } else {
206
+ config.brain.api_key = currentKey;
207
+ }
208
+
209
+ return config;
210
+ }
211
+
93
212
  async function promptForMissing(config) {
94
213
  const missing = [];
95
- if (!config.anthropic.api_key) missing.push('ANTHROPIC_API_KEY');
214
+ if (!config.brain.api_key) missing.push('brain_api_key');
96
215
  if (!config.telegram.bot_token) missing.push('TELEGRAM_BOT_TOKEN');
97
216
 
98
217
  if (missing.length === 0) return config;
@@ -110,10 +229,19 @@ async function promptForMissing(config) {
110
229
  existingEnv = readFileSync(envPath, 'utf-8');
111
230
  }
112
231
 
113
- if (!mutableConfig.anthropic.api_key) {
114
- const key = await ask(rl, chalk.cyan(' Anthropic API key: '));
115
- mutableConfig.anthropic.api_key = key.trim();
116
- envLines.push(`ANTHROPIC_API_KEY=${key.trim()}`);
232
+ if (!mutableConfig.brain.api_key) {
233
+ // Run provider selection flow
234
+ const { providerKey, modelId } = await promptProviderSelection(rl);
235
+ mutableConfig.brain.provider = providerKey;
236
+ mutableConfig.brain.model = modelId;
237
+ saveProviderToYaml(providerKey, modelId);
238
+
239
+ const providerDef = PROVIDERS[providerKey];
240
+ const envKey = providerDef.envKey;
241
+
242
+ const key = await ask(rl, chalk.cyan(`\n ${providerDef.name} API key: `));
243
+ mutableConfig.brain.api_key = key.trim();
244
+ envLines.push(`${envKey}=${key.trim()}`);
117
245
  }
118
246
 
119
247
  if (!mutableConfig.telegram.bot_token) {
@@ -164,12 +292,21 @@ export function loadConfig() {
164
292
  fileConfig = yaml.load(raw) || {};
165
293
  }
166
294
 
295
+ // Backward compat: migrate anthropic → brain
296
+ migrateAnthropicConfig(fileConfig);
297
+
167
298
  const config = deepMerge(DEFAULTS, fileConfig);
168
299
 
169
- // Overlay env vars for secrets
170
- if (process.env.ANTHROPIC_API_KEY) {
171
- config.anthropic.api_key = process.env.ANTHROPIC_API_KEY;
300
+ // Overlay env vars for brain API key based on provider
301
+ const providerDef = PROVIDERS[config.brain.provider];
302
+ if (providerDef && process.env[providerDef.envKey]) {
303
+ config.brain.api_key = process.env[providerDef.envKey];
172
304
  }
305
+ // Legacy fallback: ANTHROPIC_API_KEY for anthropic provider
306
+ if (config.brain.provider === 'anthropic' && !config.brain.api_key && process.env.ANTHROPIC_API_KEY) {
307
+ config.brain.api_key = process.env.ANTHROPIC_API_KEY;
308
+ }
309
+
173
310
  if (process.env.TELEGRAM_BOT_TOKEN) {
174
311
  config.telegram.bot_token = process.env.TELEGRAM_BOT_TOKEN;
175
312
  }
@@ -221,7 +358,16 @@ export function saveCredential(config, envKey, value) {
221
358
  config.github.token = value;
222
359
  break;
223
360
  case 'ANTHROPIC_API_KEY':
224
- config.anthropic.api_key = value;
361
+ if (config.brain.provider === 'anthropic') config.brain.api_key = value;
362
+ break;
363
+ case 'OPENAI_API_KEY':
364
+ if (config.brain.provider === 'openai') config.brain.api_key = value;
365
+ break;
366
+ case 'GOOGLE_API_KEY':
367
+ if (config.brain.provider === 'google') config.brain.api_key = value;
368
+ break;
369
+ case 'GROQ_API_KEY':
370
+ if (config.brain.provider === 'groq') config.brain.api_key = value;
225
371
  break;
226
372
  case 'TELEGRAM_BOT_TOKEN':
227
373
  config.telegram.bot_token = value;