kernelbot 1.0.23 → 1.0.25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/bin/kernel.js CHANGED
@@ -9,7 +9,7 @@ import { readFileSync, existsSync } from 'fs';
9
9
  import { join } from 'path';
10
10
  import { homedir } from 'os';
11
11
  import chalk from 'chalk';
12
- import { loadConfig, loadConfigInteractive } from '../src/utils/config.js';
12
+ import { loadConfig, loadConfigInteractive, changeBrainModel } from '../src/utils/config.js';
13
13
  import { createLogger, getLogger } from '../src/utils/logger.js';
14
14
  import {
15
15
  showLogo,
@@ -21,16 +21,23 @@ import { createAuditLogger } from '../src/security/audit.js';
21
21
  import { ConversationManager } from '../src/conversation.js';
22
22
  import { Agent } from '../src/agent.js';
23
23
  import { startBot } from '../src/bot.js';
24
- import Anthropic from '@anthropic-ai/sdk';
24
+ import { createProvider, PROVIDERS } from '../src/providers/index.js';
25
25
 
26
- function showMenu() {
26
+ function showMenu(config) {
27
+ const providerDef = PROVIDERS[config.brain.provider];
28
+ const providerName = providerDef ? providerDef.name : config.brain.provider;
29
+ const modelId = config.brain.model;
30
+
31
+ console.log('');
32
+ console.log(chalk.dim(` Current brain: ${providerName} / ${modelId}`));
27
33
  console.log('');
28
34
  console.log(chalk.bold(' What would you like to do?\n'));
29
35
  console.log(` ${chalk.cyan('1.')} Start bot`);
30
36
  console.log(` ${chalk.cyan('2.')} Check connections`);
31
37
  console.log(` ${chalk.cyan('3.')} View logs`);
32
38
  console.log(` ${chalk.cyan('4.')} View audit logs`);
33
- console.log(` ${chalk.cyan('5.')} Exit`);
39
+ console.log(` ${chalk.cyan('5.')} Change brain model`);
40
+ console.log(` ${chalk.cyan('6.')} Exit`);
34
41
  console.log('');
35
42
  }
36
43
 
@@ -70,21 +77,21 @@ function viewLog(filename) {
70
77
  }
71
78
 
72
79
  async function runCheck(config) {
73
- await showStartupCheck('ANTHROPIC_API_KEY', async () => {
74
- if (!config.anthropic.api_key) throw new Error('Not set');
80
+ const providerDef = PROVIDERS[config.brain.provider];
81
+ const providerLabel = providerDef ? providerDef.name : config.brain.provider;
82
+ const envKeyLabel = providerDef ? providerDef.envKey : 'API_KEY';
83
+
84
+ await showStartupCheck(envKeyLabel, async () => {
85
+ if (!config.brain.api_key) throw new Error('Not set');
75
86
  });
76
87
 
77
88
  await showStartupCheck('TELEGRAM_BOT_TOKEN', async () => {
78
89
  if (!config.telegram.bot_token) throw new Error('Not set');
79
90
  });
80
91
 
81
- await showStartupCheck('Anthropic API connection', async () => {
82
- const client = new Anthropic({ apiKey: config.anthropic.api_key });
83
- await client.messages.create({
84
- model: config.anthropic.model,
85
- max_tokens: 16,
86
- messages: [{ role: 'user', content: 'ping' }],
87
- });
92
+ await showStartupCheck(`${providerLabel} API connection`, async () => {
93
+ const provider = createProvider(config);
94
+ await provider.ping();
88
95
  });
89
96
 
90
97
  await showStartupCheck('Telegram Bot API', async () => {
@@ -102,16 +109,15 @@ async function startBotFlow(config) {
102
109
  createAuditLogger();
103
110
  const logger = getLogger();
104
111
 
112
+ const providerDef = PROVIDERS[config.brain.provider];
113
+ const providerLabel = providerDef ? providerDef.name : config.brain.provider;
114
+
105
115
  const checks = [];
106
116
 
107
117
  checks.push(
108
- await showStartupCheck('Anthropic API', async () => {
109
- const client = new Anthropic({ apiKey: config.anthropic.api_key });
110
- await client.messages.create({
111
- model: config.anthropic.model,
112
- max_tokens: 16,
113
- messages: [{ role: 'user', content: 'ping' }],
114
- });
118
+ await showStartupCheck(`${providerLabel} API`, async () => {
119
+ const provider = createProvider(config);
120
+ await provider.ping();
115
121
  }),
116
122
  );
117
123
 
@@ -148,7 +154,7 @@ async function main() {
148
154
 
149
155
  let running = true;
150
156
  while (running) {
151
- showMenu();
157
+ showMenu(config);
152
158
  const choice = await ask(rl, chalk.cyan(' > '));
153
159
 
154
160
  switch (choice.trim()) {
@@ -168,6 +174,9 @@ async function main() {
168
174
  viewLog('kernel-audit.log');
169
175
  break;
170
176
  case '5':
177
+ await changeBrainModel(config, rl);
178
+ break;
179
+ case '6':
171
180
  running = false;
172
181
  break;
173
182
  default:
@@ -5,7 +5,8 @@ bot:
5
5
  name: KernelBot
6
6
  description: AI engineering agent with full OS control
7
7
 
8
- anthropic:
8
+ brain:
9
+ provider: anthropic # anthropic | openai | google | groq
9
10
  model: claude-sonnet-4-20250514
10
11
  max_tokens: 8192
11
12
  temperature: 0.3
@@ -20,6 +21,11 @@ github:
20
21
  default_branch: main
21
22
  # default_org: my-org
22
23
 
24
+ jira:
25
+ # base_url: https://yourcompany.atlassian.net # or self-hosted JIRA server URL
26
+ # email: you@company.com # JIRA account email (Cloud) or username (Server)
27
+ # api_token: your-api-token # API token from https://id.atlassian.net/manage-profile/security/api-tokens
28
+
23
29
  telegram:
24
30
  # List Telegram user IDs allowed to interact. Empty = allow all (dev mode).
25
31
  allowed_users: []
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "kernelbot",
3
- "version": "1.0.23",
3
+ "version": "1.0.25",
4
4
  "description": "KernelBot — AI engineering agent with full OS control",
5
5
  "type": "module",
6
6
  "author": "Abdullah Al-Taheri <abdullah@altaheri.me>",
@@ -15,6 +15,9 @@
15
15
  "agent",
16
16
  "telegram",
17
17
  "anthropic",
18
+ "openai",
19
+ "gemini",
20
+ "groq",
18
21
  "tools"
19
22
  ],
20
23
  "repository": {
@@ -24,7 +27,7 @@
24
27
  "bugs": {
25
28
  "url": "https://github.com/KernelCode/KernelBot/issues"
26
29
  },
27
- "homepage": "https://github.com/KernelCode/KernelBot#readme",
30
+ "homepage": "https://kernelbot.io",
28
31
  "license": "MIT",
29
32
  "dependencies": {
30
33
  "@anthropic-ai/sdk": "^0.39.0",
@@ -37,6 +40,7 @@
37
40
  "gradient-string": "^3.0.0",
38
41
  "js-yaml": "^4.1.0",
39
42
  "node-telegram-bot-api": "^0.66.0",
43
+ "openai": "^4.82.0",
40
44
  "ora": "^8.1.1",
41
45
  "puppeteer": "^24.37.3",
42
46
  "simple-git": "^3.31.1",
package/src/agent.js CHANGED
@@ -1,4 +1,4 @@
1
- import Anthropic from '@anthropic-ai/sdk';
1
+ import { createProvider } from './providers/index.js';
2
2
  import { toolDefinitions, executeTool, checkConfirmation } from './tools/index.js';
3
3
  import { getSystemPrompt } from './prompts/system.js';
4
4
  import { getLogger } from './utils/logger.js';
@@ -8,7 +8,7 @@ export class Agent {
8
8
  constructor({ config, conversationManager }) {
9
9
  this.config = config;
10
10
  this.conversationManager = conversationManager;
11
- this.client = new Anthropic({ apiKey: config.anthropic.api_key });
11
+ this.provider = createProvider(config);
12
12
  this.systemPrompt = getSystemPrompt(config);
13
13
  this._pending = new Map(); // chatId -> pending state
14
14
  }
@@ -33,7 +33,7 @@ export class Agent {
33
33
  }
34
34
  }
35
35
 
36
- const { max_tool_depth } = this.config.anthropic;
36
+ const { max_tool_depth } = this.config.brain;
37
37
 
38
38
  // Add user message to persistent history
39
39
  this.conversationManager.addMessage(chatId, 'user', userMessage);
@@ -160,7 +160,7 @@ export class Agent {
160
160
  }
161
161
 
162
162
  pending.messages.push({ role: 'user', content: pending.toolResults });
163
- const { max_tool_depth } = this.config.anthropic;
163
+ const { max_tool_depth } = this.config.brain;
164
164
  return await this._runLoop(chatId, pending.messages, user, 0, max_tool_depth);
165
165
  }
166
166
 
@@ -203,56 +203,44 @@ export class Agent {
203
203
 
204
204
  async _runLoop(chatId, messages, user, startDepth, maxDepth) {
205
205
  const logger = getLogger();
206
- const { model, max_tokens, temperature } = this.config.anthropic;
207
206
 
208
207
  for (let depth = startDepth; depth < maxDepth; depth++) {
209
208
  logger.debug(`Agent loop iteration ${depth + 1}/${maxDepth}`);
210
209
 
211
- const response = await this.client.messages.create({
212
- model,
213
- max_tokens,
214
- temperature,
210
+ const response = await this.provider.chat({
215
211
  system: this.systemPrompt,
216
- tools: toolDefinitions,
217
212
  messages,
213
+ tools: toolDefinitions,
218
214
  });
219
215
 
220
- if (response.stop_reason === 'end_turn') {
221
- const textBlocks = response.content
222
- .filter((b) => b.type === 'text')
223
- .map((b) => b.text);
224
- const reply = textBlocks.join('\n');
225
-
216
+ if (response.stopReason === 'end_turn') {
217
+ const reply = response.text || '';
226
218
  this.conversationManager.addMessage(chatId, 'assistant', reply);
227
219
  return reply;
228
220
  }
229
221
 
230
- if (response.stop_reason === 'tool_use') {
231
- messages.push({ role: 'assistant', content: response.content });
222
+ if (response.stopReason === 'tool_use') {
223
+ messages.push({ role: 'assistant', content: response.rawContent });
232
224
 
233
- // Send Claude's thinking text to the user
234
- const thinkingBlocks = response.content.filter((b) => b.type === 'text' && b.text.trim());
235
- if (thinkingBlocks.length > 0) {
236
- const thinking = thinkingBlocks.map((b) => b.text).join('\n');
237
- logger.info(`Agent thinking: ${thinking.slice(0, 200)}`);
238
- await this._sendUpdate(`💭 ${thinking}`);
225
+ // Send thinking text to the user
226
+ if (response.text && response.text.trim()) {
227
+ logger.info(`Agent thinking: ${response.text.slice(0, 200)}`);
228
+ await this._sendUpdate(`💭 ${response.text}`);
239
229
  }
240
230
 
241
- const toolUseBlocks = response.content.filter((b) => b.type === 'tool_use');
242
231
  const toolResults = [];
243
232
 
244
- for (let i = 0; i < toolUseBlocks.length; i++) {
245
- const block = toolUseBlocks[i];
233
+ for (let i = 0; i < response.toolCalls.length; i++) {
234
+ const block = response.toolCalls[i];
235
+
236
+ // Build a block-like object for _checkPause (needs .type for remainingBlocks filter)
237
+ const blockObj = { type: 'tool_use', id: block.id, name: block.name, input: block.input };
246
238
 
247
239
  // Check if we need to pause (missing cred or dangerous action)
248
- const pauseMsg = this._checkPause(
249
- chatId,
250
- block,
251
- user,
252
- toolResults,
253
- toolUseBlocks.slice(i + 1),
254
- messages,
255
- );
240
+ const remaining = response.toolCalls.slice(i + 1).map((tc) => ({
241
+ type: 'tool_use', id: tc.id, name: tc.name, input: tc.input,
242
+ }));
243
+ const pauseMsg = this._checkPause(chatId, blockObj, user, toolResults, remaining, messages);
256
244
  if (pauseMsg) return pauseMsg;
257
245
 
258
246
  const summary = this._formatToolSummary(block.name, block.input);
@@ -278,14 +266,10 @@ export class Agent {
278
266
  }
279
267
 
280
268
  // Unexpected stop reason
281
- logger.warn(`Unexpected stop_reason: ${response.stop_reason}`);
282
- const fallbackText = response.content
283
- .filter((b) => b.type === 'text')
284
- .map((b) => b.text)
285
- .join('\n');
286
- if (fallbackText) {
287
- this.conversationManager.addMessage(chatId, 'assistant', fallbackText);
288
- return fallbackText;
269
+ logger.warn(`Unexpected stopReason: ${response.stopReason}`);
270
+ if (response.text) {
271
+ this.conversationManager.addMessage(chatId, 'assistant', response.text);
272
+ return response.text;
289
273
  }
290
274
  return 'Something went wrong — unexpected response from the model.';
291
275
  }
@@ -0,0 +1,44 @@
1
+ import Anthropic from '@anthropic-ai/sdk';
2
+ import { BaseProvider } from './base.js';
3
+
4
+ export class AnthropicProvider extends BaseProvider {
5
+ constructor(opts) {
6
+ super(opts);
7
+ this.client = new Anthropic({ apiKey: this.apiKey });
8
+ }
9
+
10
+ async chat({ system, messages, tools }) {
11
+ const response = await this.client.messages.create({
12
+ model: this.model,
13
+ max_tokens: this.maxTokens,
14
+ temperature: this.temperature,
15
+ system,
16
+ tools,
17
+ messages,
18
+ });
19
+
20
+ const stopReason = response.stop_reason === 'end_turn' ? 'end_turn' : 'tool_use';
21
+
22
+ const textBlocks = response.content.filter((b) => b.type === 'text');
23
+ const text = textBlocks.map((b) => b.text).join('\n');
24
+
25
+ const toolCalls = response.content
26
+ .filter((b) => b.type === 'tool_use')
27
+ .map((b) => ({ id: b.id, name: b.name, input: b.input }));
28
+
29
+ return {
30
+ stopReason,
31
+ text,
32
+ toolCalls,
33
+ rawContent: response.content,
34
+ };
35
+ }
36
+
37
+ async ping() {
38
+ await this.client.messages.create({
39
+ model: this.model,
40
+ max_tokens: 16,
41
+ messages: [{ role: 'user', content: 'ping' }],
42
+ });
43
+ }
44
+ }
@@ -0,0 +1,30 @@
1
+ /**
2
+ * Abstract provider interface.
3
+ * Every provider must implement chat() and ping().
4
+ */
5
+
6
+ export class BaseProvider {
7
+ constructor({ model, maxTokens, temperature, apiKey }) {
8
+ this.model = model;
9
+ this.maxTokens = maxTokens;
10
+ this.temperature = temperature;
11
+ this.apiKey = apiKey;
12
+ }
13
+
14
+ /**
15
+ * Send a chat completion request.
16
+ * @param {object} opts
17
+ * @param {string} opts.system - System prompt
18
+ * @param {Array} opts.messages - Anthropic-format messages
19
+ * @param {Array} opts.tools - Anthropic-format tool definitions
20
+ * @returns {Promise<{stopReason: 'end_turn'|'tool_use', text: string, toolCalls: Array<{id,name,input}>, rawContent: Array}>}
21
+ */
22
+ async chat({ system, messages, tools }) {
23
+ throw new Error('chat() not implemented');
24
+ }
25
+
26
+ /** Quick connectivity test — throws on failure. */
27
+ async ping() {
28
+ throw new Error('ping() not implemented');
29
+ }
30
+ }
@@ -0,0 +1,36 @@
1
+ import { AnthropicProvider } from './anthropic.js';
2
+ import { OpenAICompatProvider } from './openai-compat.js';
3
+ import { PROVIDERS } from './models.js';
4
+
5
+ export { PROVIDERS } from './models.js';
6
+
7
+ /**
8
+ * Create the right provider based on config.brain.
9
+ * @param {object} config - Full app config (must have config.brain)
10
+ * @returns {BaseProvider}
11
+ */
12
+ export function createProvider(config) {
13
+ const { provider, model, max_tokens, temperature, api_key } = config.brain;
14
+
15
+ const providerDef = PROVIDERS[provider];
16
+ if (!providerDef) {
17
+ throw new Error(`Unknown provider: ${provider}. Valid: ${Object.keys(PROVIDERS).join(', ')}`);
18
+ }
19
+
20
+ const opts = {
21
+ model,
22
+ maxTokens: max_tokens,
23
+ temperature,
24
+ apiKey: api_key,
25
+ };
26
+
27
+ if (provider === 'anthropic') {
28
+ return new AnthropicProvider(opts);
29
+ }
30
+
31
+ // OpenAI, Google, Groq — all use OpenAI-compatible API
32
+ return new OpenAICompatProvider({
33
+ ...opts,
34
+ baseUrl: providerDef.baseUrl || undefined,
35
+ });
36
+ }
@@ -0,0 +1,54 @@
1
+ /**
2
+ * Provider & model catalog — single source of truth.
3
+ */
4
+
5
+ export const PROVIDERS = {
6
+ anthropic: {
7
+ name: 'Anthropic (Claude)',
8
+ envKey: 'ANTHROPIC_API_KEY',
9
+ models: [
10
+ { id: 'claude-sonnet-4-20250514', label: 'Claude Sonnet 4' },
11
+ { id: 'claude-opus-4-20250514', label: 'Claude Opus 4' },
12
+ { id: 'claude-haiku-4-5-20251001', label: 'Claude Haiku 4.5' },
13
+ ],
14
+ },
15
+ openai: {
16
+ name: 'OpenAI (GPT)',
17
+ envKey: 'OPENAI_API_KEY',
18
+ models: [
19
+ { id: 'gpt-4o', label: 'GPT-4o' },
20
+ { id: 'gpt-4o-mini', label: 'GPT-4o Mini' },
21
+ { id: 'o1', label: 'o1' },
22
+ { id: 'o3-mini', label: 'o3-mini' },
23
+ ],
24
+ },
25
+ google: {
26
+ name: 'Google (Gemini)',
27
+ envKey: 'GOOGLE_API_KEY',
28
+ baseUrl: 'https://generativelanguage.googleapis.com/v1beta/openai/',
29
+ models: [
30
+ { id: 'gemini-2.0-flash', label: 'Gemini 2.0 Flash' },
31
+ { id: 'gemini-2.5-pro', label: 'Gemini 2.5 Pro' },
32
+ ],
33
+ },
34
+ groq: {
35
+ name: 'Groq',
36
+ envKey: 'GROQ_API_KEY',
37
+ baseUrl: 'https://api.groq.com/openai/v1',
38
+ models: [
39
+ { id: 'llama-3.3-70b-versatile', label: 'Llama 3.3 70B' },
40
+ { id: 'llama-3.1-8b-instant', label: 'Llama 3.1 8B' },
41
+ { id: 'mixtral-8x7b-32768', label: 'Mixtral 8x7B' },
42
+ ],
43
+ },
44
+ };
45
+
46
+ /** Models that don't support system prompts or temperature (reasoning models). */
47
+ export const REASONING_MODELS = new Set(['o1', 'o3-mini']);
48
+
49
+ export function getProviderForModel(modelId) {
50
+ for (const [key, provider] of Object.entries(PROVIDERS)) {
51
+ if (provider.models.some((m) => m.id === modelId)) return key;
52
+ }
53
+ return null;
54
+ }
@@ -0,0 +1,163 @@
1
+ import OpenAI from 'openai';
2
+ import { BaseProvider } from './base.js';
3
+ import { REASONING_MODELS } from './models.js';
4
+
5
+ /**
6
+ * OpenAI-compatible provider — works with OpenAI, Groq, and Google Gemini
7
+ * via configurable baseURL.
8
+ */
9
+ export class OpenAICompatProvider extends BaseProvider {
10
+ constructor(opts) {
11
+ super(opts);
12
+ this.client = new OpenAI({
13
+ apiKey: this.apiKey,
14
+ ...(opts.baseUrl && { baseURL: opts.baseUrl }),
15
+ });
16
+ this.isReasoningModel = REASONING_MODELS.has(this.model);
17
+ }
18
+
19
+ // ── Format conversion helpers ──
20
+
21
+ /** Anthropic tool defs → OpenAI function tool defs */
22
+ _convertTools(tools) {
23
+ if (!tools || tools.length === 0) return undefined;
24
+ return tools.map((t) => ({
25
+ type: 'function',
26
+ function: {
27
+ name: t.name,
28
+ description: t.description,
29
+ parameters: t.input_schema,
30
+ },
31
+ }));
32
+ }
33
+
34
+ /** Anthropic messages → OpenAI messages */
35
+ _convertMessages(system, messages) {
36
+ const out = [];
37
+
38
+ // System prompt as first message (skip for reasoning models)
39
+ if (system && !this.isReasoningModel) {
40
+ const systemText = Array.isArray(system)
41
+ ? system.map((b) => b.text).join('\n')
42
+ : system;
43
+ out.push({ role: 'system', content: systemText });
44
+ }
45
+
46
+ for (const msg of messages) {
47
+ if (msg.role === 'user') {
48
+ // Could be a string, content blocks, or tool_result array
49
+ if (typeof msg.content === 'string') {
50
+ out.push({ role: 'user', content: msg.content });
51
+ } else if (Array.isArray(msg.content)) {
52
+ // Check if it's tool results
53
+ if (msg.content[0]?.type === 'tool_result') {
54
+ for (const tr of msg.content) {
55
+ out.push({
56
+ role: 'tool',
57
+ tool_call_id: tr.tool_use_id,
58
+ content: typeof tr.content === 'string' ? tr.content : JSON.stringify(tr.content),
59
+ });
60
+ }
61
+ } else {
62
+ // Text content blocks
63
+ const text = msg.content
64
+ .filter((b) => b.type === 'text')
65
+ .map((b) => b.text)
66
+ .join('\n');
67
+ out.push({ role: 'user', content: text || '' });
68
+ }
69
+ }
70
+ } else if (msg.role === 'assistant') {
71
+ // Convert Anthropic content blocks → OpenAI format
72
+ if (typeof msg.content === 'string') {
73
+ out.push({ role: 'assistant', content: msg.content });
74
+ } else if (Array.isArray(msg.content)) {
75
+ const textParts = msg.content.filter((b) => b.type === 'text');
76
+ const toolParts = msg.content.filter((b) => b.type === 'tool_use');
77
+
78
+ const assistantMsg = {
79
+ role: 'assistant',
80
+ content: textParts.map((b) => b.text).join('\n') || null,
81
+ };
82
+
83
+ if (toolParts.length > 0) {
84
+ assistantMsg.tool_calls = toolParts.map((b) => ({
85
+ id: b.id,
86
+ type: 'function',
87
+ function: {
88
+ name: b.name,
89
+ arguments: JSON.stringify(b.input),
90
+ },
91
+ }));
92
+ }
93
+
94
+ out.push(assistantMsg);
95
+ }
96
+ }
97
+ }
98
+
99
+ return out;
100
+ }
101
+
102
+ /** OpenAI response → normalized format with rawContent in Anthropic format */
103
+ _normalizeResponse(response) {
104
+ const choice = response.choices[0];
105
+ const finishReason = choice.finish_reason;
106
+
107
+ const stopReason = finishReason === 'tool_calls' ? 'tool_use' : 'end_turn';
108
+
109
+ const text = choice.message.content || '';
110
+
111
+ const toolCalls = (choice.message.tool_calls || []).map((tc) => ({
112
+ id: tc.id,
113
+ name: tc.function.name,
114
+ input: JSON.parse(tc.function.arguments),
115
+ }));
116
+
117
+ // Build rawContent in Anthropic format for message history consistency
118
+ const rawContent = [];
119
+ if (text) {
120
+ rawContent.push({ type: 'text', text });
121
+ }
122
+ for (const tc of toolCalls) {
123
+ rawContent.push({ type: 'tool_use', id: tc.id, name: tc.name, input: tc.input });
124
+ }
125
+
126
+ return { stopReason, text, toolCalls, rawContent };
127
+ }
128
+
129
+ // ── Public API ──
130
+
131
+ async chat({ system, messages, tools }) {
132
+ const params = {
133
+ model: this.model,
134
+ messages: this._convertMessages(system, messages),
135
+ };
136
+
137
+ if (!this.isReasoningModel) {
138
+ params.temperature = this.temperature;
139
+ }
140
+
141
+ params.max_tokens = this.maxTokens;
142
+
143
+ const convertedTools = this._convertTools(tools);
144
+ if (convertedTools) {
145
+ params.tools = convertedTools;
146
+ }
147
+
148
+ const response = await this.client.chat.completions.create(params);
149
+ return this._normalizeResponse(response);
150
+ }
151
+
152
+ async ping() {
153
+ const params = {
154
+ model: this.model,
155
+ max_tokens: 16,
156
+ messages: [{ role: 'user', content: 'ping' }],
157
+ };
158
+ if (!this.isReasoningModel) {
159
+ params.temperature = 0;
160
+ }
161
+ await this.client.chat.completions.create(params);
162
+ }
163
+ }
@@ -7,6 +7,7 @@ import { definitions as gitDefinitions, handlers as gitHandlers } from './git.js
7
7
  import { definitions as githubDefinitions, handlers as githubHandlers } from './github.js';
8
8
  import { definitions as codingDefinitions, handlers as codingHandlers } from './coding.js';
9
9
  import { definitions as browserDefinitions, handlers as browserHandlers } from './browser.js';
10
+ import { definitions as jiraDefinitions, handlers as jiraHandlers } from './jira.js';
10
11
  import { logToolCall } from '../security/audit.js';
11
12
  import { requiresConfirmation } from '../security/confirm.js';
12
13
 
@@ -20,6 +21,7 @@ export const toolDefinitions = [
20
21
  ...githubDefinitions,
21
22
  ...codingDefinitions,
22
23
  ...browserDefinitions,
24
+ ...jiraDefinitions,
23
25
  ];
24
26
 
25
27
  const handlerMap = {
@@ -32,6 +34,7 @@ const handlerMap = {
32
34
  ...githubHandlers,
33
35
  ...codingHandlers,
34
36
  ...browserHandlers,
37
+ ...jiraHandlers,
35
38
  };
36
39
 
37
40
  export function checkConfirmation(name, params, config) {