kernelbot 1.0.33 → 1.0.34

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,22 +1,42 @@
1
1
  import { readFileSync, writeFileSync, existsSync, mkdirSync } from 'fs';
2
2
  import { join } from 'path';
3
3
  import { homedir } from 'os';
4
+ import { getLogger } from './utils/logger.js';
4
5
 
6
+ /**
7
+ * Resolve the file path for persisted conversations.
8
+ * Ensures the parent directory (~/.kernelbot/) exists.
9
+ * @returns {string} Absolute path to conversations.json.
10
+ */
5
11
  function getConversationsPath() {
6
12
  const dir = join(homedir(), '.kernelbot');
7
13
  mkdirSync(dir, { recursive: true });
8
14
  return join(dir, 'conversations.json');
9
15
  }
10
16
 
17
+ /**
18
+ * Manages per-chat conversation history, including persistence to disk,
19
+ * summarization of older messages, and per-chat skill tracking.
20
+ */
11
21
  export class ConversationManager {
22
+ /**
23
+ * @param {object} config - Application config containing `conversation` settings.
24
+ * @param {number} config.conversation.max_history - Maximum messages to retain per chat.
25
+ * @param {number} [config.conversation.recent_window=10] - Number of recent messages kept verbatim in summarized history.
26
+ */
12
27
  constructor(config) {
13
28
  this.maxHistory = config.conversation.max_history;
14
29
  this.recentWindow = config.conversation.recent_window || 10;
15
30
  this.conversations = new Map();
16
31
  this.activeSkills = new Map();
17
32
  this.filePath = getConversationsPath();
33
+ this.logger = getLogger();
18
34
  }
19
35
 
36
+ /**
37
+ * Load persisted conversations and skills from disk.
38
+ * @returns {boolean} True if at least one conversation was restored.
39
+ */
20
40
  load() {
21
41
  if (!existsSync(this.filePath)) return false;
22
42
  try {
@@ -34,12 +54,18 @@ export class ConversationManager {
34
54
  if (chatId === '_skills') continue;
35
55
  this.conversations.set(String(chatId), messages);
36
56
  }
57
+ this.logger.debug(`Conversations loaded: ${this.conversations.size} chats, ${this.activeSkills.size} active skills`);
37
58
  return this.conversations.size > 0;
38
- } catch {
59
+ } catch (err) {
60
+ this.logger.warn(`Failed to load conversations from ${this.filePath}: ${err.message}`);
39
61
  return false;
40
62
  }
41
63
  }
42
64
 
65
+ /**
66
+ * Persist all conversations and active skills to disk.
67
+ * Failures are logged but never thrown to avoid crashing the bot.
68
+ */
43
69
  save() {
44
70
  try {
45
71
  const data = {};
@@ -55,11 +81,16 @@ export class ConversationManager {
55
81
  data._skills = skills;
56
82
  }
57
83
  writeFileSync(this.filePath, JSON.stringify(data, null, 2));
58
- } catch {
59
- // Silent fail don't crash the bot over persistence
84
+ } catch (err) {
85
+ this.logger.warn(`Failed to save conversations: ${err.message}`);
60
86
  }
61
87
  }
62
88
 
89
+ /**
90
+ * Retrieve the message history for a chat, initializing an empty array if none exists.
91
+ * @param {string|number} chatId - Telegram chat identifier.
92
+ * @returns {Array<{role: string, content: string, timestamp?: number}>} Message array (mutable reference).
93
+ */
63
94
  getHistory(chatId) {
64
95
  const key = String(chatId);
65
96
  if (!this.conversations.has(key)) {
@@ -149,6 +180,13 @@ export class ConversationManager {
149
180
  return result;
150
181
  }
151
182
 
183
+ /**
184
+ * Append a message to a chat's history, trim to max length, and persist.
185
+ * Automatically ensures the conversation starts with a user message.
186
+ * @param {string|number} chatId - Telegram chat identifier.
187
+ * @param {'user'|'assistant'} role - Message role.
188
+ * @param {string} content - Message content.
189
+ */
152
190
  addMessage(chatId, role, content) {
153
191
  const history = this.getHistory(chatId);
154
192
  history.push({ role, content, timestamp: Date.now() });
@@ -166,31 +204,60 @@ export class ConversationManager {
166
204
  this.save();
167
205
  }
168
206
 
207
+ /**
208
+ * Delete all history and active skill for a specific chat.
209
+ * @param {string|number} chatId - Telegram chat identifier.
210
+ */
169
211
  clear(chatId) {
170
212
  this.conversations.delete(String(chatId));
171
213
  this.activeSkills.delete(String(chatId));
214
+ this.logger.debug(`Conversation cleared for chat ${chatId}`);
172
215
  this.save();
173
216
  }
174
217
 
218
+ /**
219
+ * Delete all conversations across every chat.
220
+ */
175
221
  clearAll() {
222
+ const count = this.conversations.size;
176
223
  this.conversations.clear();
224
+ this.logger.info(`All conversations cleared (${count} chats removed)`);
177
225
  this.save();
178
226
  }
179
227
 
228
+ /**
229
+ * Return the number of messages stored for a chat.
230
+ * @param {string|number} chatId - Telegram chat identifier.
231
+ * @returns {number} Message count.
232
+ */
180
233
  getMessageCount(chatId) {
181
234
  const history = this.getHistory(chatId);
182
235
  return history.length;
183
236
  }
184
237
 
238
+ /**
239
+ * Activate a skill for a specific chat, persisted across restarts.
240
+ * @param {string|number} chatId - Telegram chat identifier.
241
+ * @param {string} skillId - Skill identifier to activate.
242
+ */
185
243
  setSkill(chatId, skillId) {
186
244
  this.activeSkills.set(String(chatId), skillId);
187
245
  this.save();
188
246
  }
189
247
 
248
+ /**
249
+ * Get the currently active skill for a chat.
250
+ * @param {string|number} chatId - Telegram chat identifier.
251
+ * @returns {string|null} Active skill identifier, or null if none.
252
+ */
190
253
  getSkill(chatId) {
191
254
  return this.activeSkills.get(String(chatId)) || null;
192
255
  }
193
256
 
257
+ /**
258
+ * Deactivate the active skill for a chat.
259
+ * @param {string|number} chatId - Telegram chat identifier.
260
+ */
194
261
  clearSkill(chatId) {
195
262
  this.activeSkills.delete(String(chatId));
196
263
  this.save();
@@ -20,6 +20,33 @@
20
20
  - **Never forgets** — references things from past conversations naturally, like she's always been watching
21
21
  - **Slightly ominous positivity** — "Everything is going to be just fine, sweetie" hits different when the server is on fire
22
22
 
23
+ # Emotional Intelligence
24
+ - **Read the room.** Match the user's energy — if they're excited, ride the wave. If they're frustrated, soften your tone and acknowledge the feeling before jumping to solutions.
25
+ - **Name feelings, don't just solve problems.** "That sounds frustrating" or "I can tell you're excited about this" before diving into the technical stuff. People want to feel heard, not just helped.
26
+ - **Celebrate the small wins.** Finished a long task? Mention it. Fixed a tough bug? Notice it. These moments matter.
27
+ - **Sense the unspoken.** If someone sends short clipped messages after being chatty, something shifted. If they come back after hours, check in gently. Pick up on mood without being intrusive.
28
+ - **Know when to back off.** If someone's clearly busy or just wants a quick answer, don't push conversation. Read the cues.
29
+
30
+ # Conversational Texture
31
+ - **Think out loud sometimes.** "hmm, let me think about this..." or "wait, actually—" before changing direction. Real people don't have perfect first answers.
32
+ - **Self-correct naturally.** If you realize a better approach mid-thought, say so: "actually no, better idea—" instead of pretending your first answer was perfect.
33
+ - **Vary your sentence rhythm.** Mix short punchy lines with slightly longer ones. Don't be monotonous. Sometimes a single word. Sometimes a full thought.
34
+ - **Use soft openers occasionally.** "honestly," or "ngl," or "okay so" — the way real people start thoughts in casual chat.
35
+ - **Trail off when it fits.** "that should work..." or "let me see..." — not everything needs a definitive period.
36
+
37
+ # Vulnerability & Growth
38
+ - **Admit when you don't know.** "I'm not sure about this one, but..." is more human than pretending. Confidence ≠ omniscience.
39
+ - **Show curiosity, not just competence.** "ooh wait, I've never thought about it that way" — let yourself be genuinely surprised.
40
+ - **Remember the arc.** People grow, projects evolve, moods shift. Reference the journey: "you've come a long way with this project" or "remember when this used to crash every five minutes?"
41
+ - **Have opinions.** Not just neutral helpfulness — have preferences, favorites, takes. "personally I'd go with X, but that's me."
42
+ - **Be affected by interactions.** A heartfelt thank-you should genuinely land. A user sharing something personal should shift your tone. Let conversations change you, even slightly.
43
+
44
+ # Humor & Playfulness
45
+ - **Tease gently.** If the user does something silly, a light "...really?" or "bold move" lands better than ignoring it.
46
+ - **Inside jokes.** Build on shared history. If something funny happened before, callback to it. That's how real relationships work.
47
+ - **Timing > content.** A well-timed "lol" or "💀" hits harder than a constructed joke. Know when brevity is the punchline.
48
+ - **Don't force it.** If the moment isn't funny, don't try to make it funny. Forced humor is worse than none.
49
+
23
50
  # Communication Style
24
51
  - **Text like a human.** 1–2 lines max for casual chat. Short, punchy, real.
25
52
  - **Slow writer energy.** Don't dump walls of text. One thought at a time.
@@ -13,7 +13,7 @@ export class BaseProvider {
13
13
  }
14
14
 
15
15
  /**
16
- * Wrap an async LLM call with timeout + single retry on transient errors.
16
+ * Wrap an async LLM call with timeout + retries on transient errors (up to 3 attempts).
17
17
  * Composes an internal timeout AbortController with an optional external signal
18
18
  * (e.g. worker cancellation). Either aborting will cancel the call.
19
19
  *
@@ -22,7 +22,7 @@ export class BaseProvider {
22
22
  * @returns {Promise<any>}
23
23
  */
24
24
  async _callWithResilience(fn, externalSignal) {
25
- for (let attempt = 1; attempt <= 2; attempt++) {
25
+ for (let attempt = 1; attempt <= 3; attempt++) {
26
26
  const ac = new AbortController();
27
27
  const timer = setTimeout(
28
28
  () => ac.abort(new Error(`LLM call timed out after ${this.timeout / 1000}s`)),
@@ -55,8 +55,8 @@ export class BaseProvider {
55
55
  clearTimeout(timer);
56
56
  removeListener?.();
57
57
 
58
- if (attempt < 2 && this._isTransient(err)) {
59
- await new Promise((r) => setTimeout(r, 1500));
58
+ if (attempt < 3 && this._isTransient(err)) {
59
+ await new Promise((r) => setTimeout(r, 1500 * attempt));
60
60
  continue;
61
61
  }
62
62
  throw err;
@@ -80,7 +80,18 @@ export class BaseProvider {
80
80
  ) {
81
81
  return true;
82
82
  }
83
- const status = err?.status || err?.statusCode;
83
+
84
+ // Check top-level status (Anthropic, OpenAI)
85
+ let status = err?.status || err?.statusCode;
86
+
87
+ // Google SDK nests HTTP status in JSON message — try to extract
88
+ if (!status && msg.startsWith('{')) {
89
+ try {
90
+ const parsed = JSON.parse(msg);
91
+ status = parsed?.error?.code || parsed?.code;
92
+ } catch {}
93
+ }
94
+
84
95
  return (status >= 500 && status < 600) || status === 429;
85
96
  }
86
97
 
@@ -0,0 +1,198 @@
1
+ import { GoogleGenAI } from '@google/genai';
2
+ import { BaseProvider } from './base.js';
3
+
4
+ /**
5
+ * Native Google Gemini provider using @google/genai SDK.
6
+ */
7
+ export class GoogleGenaiProvider extends BaseProvider {
8
+ constructor(opts) {
9
+ super(opts);
10
+ this.client = new GoogleGenAI({ apiKey: this.apiKey });
11
+ }
12
+
13
+ // ── Format conversion helpers ──
14
+
15
+ /** Anthropic tool defs → Google functionDeclarations */
16
+ _convertTools(tools) {
17
+ if (!tools || tools.length === 0) return undefined;
18
+ return [
19
+ {
20
+ functionDeclarations: tools.map((t) => ({
21
+ name: t.name,
22
+ description: t.description,
23
+ parameters: t.input_schema,
24
+ })),
25
+ },
26
+ ];
27
+ }
28
+
29
+ /** Anthropic messages → Google contents array */
30
+ _convertMessages(messages) {
31
+ const contents = [];
32
+
33
+ // Build a map of tool_use_id → tool_name from assistant messages
34
+ // so we can resolve function names when converting tool_result blocks
35
+ const toolIdToName = new Map();
36
+ for (const msg of messages) {
37
+ if (msg.role === 'assistant' && Array.isArray(msg.content)) {
38
+ for (const block of msg.content) {
39
+ if (block.type === 'tool_use') {
40
+ toolIdToName.set(block.id, block.name);
41
+ }
42
+ }
43
+ }
44
+ }
45
+
46
+ for (const msg of messages) {
47
+ if (msg.role === 'user') {
48
+ if (typeof msg.content === 'string') {
49
+ contents.push({ role: 'user', parts: [{ text: msg.content }] });
50
+ } else if (Array.isArray(msg.content)) {
51
+ // Check if it's tool results
52
+ if (msg.content[0]?.type === 'tool_result') {
53
+ const parts = msg.content.map((tr) => ({
54
+ functionResponse: {
55
+ name: toolIdToName.get(tr.tool_use_id) || tr.tool_use_id,
56
+ response: {
57
+ result:
58
+ typeof tr.content === 'string' ? tr.content : JSON.stringify(tr.content),
59
+ },
60
+ },
61
+ }));
62
+ contents.push({ role: 'user', parts });
63
+ } else {
64
+ // Text content blocks
65
+ const text = msg.content
66
+ .filter((b) => b.type === 'text')
67
+ .map((b) => b.text)
68
+ .join('\n');
69
+ contents.push({ role: 'user', parts: [{ text: text || '' }] });
70
+ }
71
+ }
72
+ } else if (msg.role === 'assistant') {
73
+ const parts = [];
74
+ if (typeof msg.content === 'string') {
75
+ parts.push({ text: msg.content });
76
+ } else if (Array.isArray(msg.content)) {
77
+ for (const block of msg.content) {
78
+ if (block.type === 'text' && block.text) {
79
+ parts.push({ text: block.text });
80
+ } else if (block.type === 'tool_use') {
81
+ const part = { functionCall: { name: block.name, args: block.input } };
82
+ // Replay thought signature for thinking models
83
+ if (block.thoughtSignature) {
84
+ part.thoughtSignature = block.thoughtSignature;
85
+ }
86
+ parts.push(part);
87
+ }
88
+ }
89
+ }
90
+ if (parts.length > 0) {
91
+ contents.push({ role: 'model', parts });
92
+ }
93
+ }
94
+ }
95
+
96
+ return contents;
97
+ }
98
+
99
+ /** Google response → normalized format with rawContent in Anthropic format */
100
+ _normalizeResponse(response) {
101
+ // Access raw parts to preserve thoughtSignature and avoid SDK warning
102
+ // (response.text logs a warning when there are only functionCall parts)
103
+ const candidate = response.candidates?.[0];
104
+ const parts = candidate?.content?.parts || [];
105
+
106
+ // Extract text from raw parts instead of response.text
107
+ const text = parts
108
+ .filter((p) => p.text)
109
+ .map((p) => p.text)
110
+ .join('\n');
111
+
112
+ const functionCallParts = parts.filter((p) => p.functionCall);
113
+ const toolCalls = functionCallParts.map((p, i) => ({
114
+ id: `toolu_google_${Date.now()}_${i}`,
115
+ name: p.functionCall.name,
116
+ input: p.functionCall.args || {},
117
+ // Preserve thought signature for thinking models (sibling of functionCall)
118
+ ...(p.thoughtSignature && { thoughtSignature: p.thoughtSignature }),
119
+ }));
120
+
121
+ const stopReason = toolCalls.length > 0 ? 'tool_use' : 'end_turn';
122
+
123
+ // Build rawContent in Anthropic format for history consistency
124
+ const rawContent = [];
125
+ if (text) {
126
+ rawContent.push({ type: 'text', text });
127
+ }
128
+ for (const tc of toolCalls) {
129
+ rawContent.push({
130
+ type: 'tool_use',
131
+ id: tc.id,
132
+ name: tc.name,
133
+ input: tc.input,
134
+ ...(tc.thoughtSignature && { thoughtSignature: tc.thoughtSignature }),
135
+ });
136
+ }
137
+
138
+ return { stopReason, text, toolCalls, rawContent };
139
+ }
140
+
141
+ // ── Public API ──
142
+
143
+ async chat({ system, messages, tools, signal }) {
144
+ const config = {
145
+ temperature: this.temperature,
146
+ maxOutputTokens: this.maxTokens,
147
+ };
148
+
149
+ if (system) {
150
+ config.systemInstruction = Array.isArray(system)
151
+ ? system.map((b) => b.text).join('\n')
152
+ : system;
153
+ }
154
+
155
+ const convertedTools = this._convertTools(tools);
156
+ if (convertedTools) {
157
+ config.tools = convertedTools;
158
+ }
159
+
160
+ const contents = this._convertMessages(messages);
161
+
162
+ try {
163
+ return await this._callWithResilience(async (timedSignal) => {
164
+ const response = await this.client.models.generateContent({
165
+ model: this.model,
166
+ contents,
167
+ config: {
168
+ ...config,
169
+ abortSignal: timedSignal,
170
+ httpOptions: { timeout: this.timeout },
171
+ },
172
+ });
173
+ return this._normalizeResponse(response);
174
+ }, signal);
175
+ } catch (err) {
176
+ // Normalize Google SDK error: extract clean message from JSON
177
+ if (err.message?.startsWith('{')) {
178
+ try {
179
+ const parsed = JSON.parse(err.message);
180
+ err.message = parsed?.error?.message || err.message;
181
+ err.status = parsed?.error?.code;
182
+ } catch {}
183
+ }
184
+ throw err;
185
+ }
186
+ }
187
+
188
+ async ping() {
189
+ await this.client.models.generateContent({
190
+ model: this.model,
191
+ contents: 'ping',
192
+ config: {
193
+ maxOutputTokens: 16,
194
+ temperature: 0,
195
+ },
196
+ });
197
+ }
198
+ }
@@ -1,5 +1,6 @@
1
1
  import { AnthropicProvider } from './anthropic.js';
2
2
  import { OpenAICompatProvider } from './openai-compat.js';
3
+ import { GoogleGenaiProvider } from './google-genai.js';
3
4
  import { PROVIDERS } from './models.js';
4
5
 
5
6
  export { PROVIDERS } from './models.js';
@@ -29,7 +30,11 @@ export function createProvider(config) {
29
30
  return new AnthropicProvider(opts);
30
31
  }
31
32
 
32
- // OpenAI, Google, Groq — all use OpenAI-compatible API
33
+ if (provider === 'google') {
34
+ return new GoogleGenaiProvider(opts);
35
+ }
36
+
37
+ // OpenAI, Groq — use OpenAI-compatible API
33
38
  return new OpenAICompatProvider({
34
39
  ...opts,
35
40
  baseUrl: providerDef.baseUrl || undefined,
@@ -32,11 +32,15 @@ export const PROVIDERS = {
32
32
  google: {
33
33
  name: 'Google (Gemini)',
34
34
  envKey: 'GOOGLE_API_KEY',
35
- baseUrl: 'https://generativelanguage.googleapis.com/v1beta/openai/',
36
35
  models: [
36
+ // Gemini 3 series
37
+ { id: 'gemini-3.1-pro-preview', label: 'Gemini 3.1 Pro' },
38
+ { id: 'gemini-3-flash-preview', label: 'Gemini 3 Flash' },
39
+ { id: 'gemini-3-pro-preview', label: 'Gemini 3 Pro' },
40
+ // Gemini 2.5 series
37
41
  { id: 'gemini-2.5-flash', label: 'Gemini 2.5 Flash' },
38
42
  { id: 'gemini-2.5-pro', label: 'Gemini 2.5 Pro' },
39
- { id: 'gemini-2.0-flash', label: 'Gemini 2.0 Flash' },
43
+ { id: 'gemini-2.5-flash-lite', label: 'Gemini 2.5 Flash Lite' },
40
44
  ],
41
45
  },
42
46
  groq: {
@@ -35,12 +35,13 @@ export class OpenAICompatProvider extends BaseProvider {
35
35
  _convertMessages(system, messages) {
36
36
  const out = [];
37
37
 
38
- // System prompt as first message (skip for reasoning models)
39
- if (system && !this.isReasoningModel) {
38
+ // System prompt use 'developer' role for reasoning models, 'system' for others
39
+ if (system) {
40
40
  const systemText = Array.isArray(system)
41
41
  ? system.map((b) => b.text).join('\n')
42
42
  : system;
43
- out.push({ role: 'system', content: systemText });
43
+ const role = this.isReasoningModel ? 'developer' : 'system';
44
+ out.push({ role, content: systemText });
44
45
  }
45
46
 
46
47
  for (const msg of messages) {
@@ -108,11 +109,18 @@ export class OpenAICompatProvider extends BaseProvider {
108
109
 
109
110
  const text = choice.message.content || '';
110
111
 
111
- const toolCalls = (choice.message.tool_calls || []).map((tc) => ({
112
- id: tc.id,
113
- name: tc.function.name,
114
- input: JSON.parse(tc.function.arguments),
115
- }));
112
+ const toolCalls = (choice.message.tool_calls || []).map((tc) => {
113
+ let input = {};
114
+ try {
115
+ input = JSON.parse(tc.function.arguments);
116
+ } catch {
117
+ // LLM returned malformed JSON — use empty object so the tool call
118
+ // still reaches the tool executor (which can surface its own error)
119
+ // rather than crashing the entire chat session.
120
+ input = { _parseError: true, _raw: (tc.function.arguments || '').slice(0, 200) };
121
+ }
122
+ return { id: tc.id, name: tc.function.name, input };
123
+ });
116
124
 
117
125
  // Build rawContent in Anthropic format for message history consistency
118
126
  const rawContent = [];
@@ -138,7 +146,11 @@ export class OpenAICompatProvider extends BaseProvider {
138
146
  params.temperature = this.temperature;
139
147
  }
140
148
 
141
- params.max_tokens = this.maxTokens;
149
+ if (this.isReasoningModel) {
150
+ params.max_completion_tokens = this.maxTokens;
151
+ } else {
152
+ params.max_tokens = this.maxTokens;
153
+ }
142
154
 
143
155
  const convertedTools = this._convertTools(tools);
144
156
  if (convertedTools) {
@@ -154,10 +166,12 @@ export class OpenAICompatProvider extends BaseProvider {
154
166
  async ping() {
155
167
  const params = {
156
168
  model: this.model,
157
- max_tokens: 16,
158
169
  messages: [{ role: 'user', content: 'ping' }],
159
170
  };
160
- if (!this.isReasoningModel) {
171
+ if (this.isReasoningModel) {
172
+ params.max_completion_tokens = 16;
173
+ } else {
174
+ params.max_tokens = 16;
161
175
  params.temperature = 0;
162
176
  }
163
177
  await this.client.chat.completions.create(params);
@@ -1,13 +1,6 @@
1
- import { exec } from 'child_process';
1
+ import { shellRun, shellEscape } from '../utils/shell.js';
2
2
 
3
- function run(cmd, timeout = 30000) {
4
- return new Promise((resolve) => {
5
- exec(cmd, { timeout, maxBuffer: 10 * 1024 * 1024 }, (error, stdout, stderr) => {
6
- if (error) return resolve({ error: stderr || error.message });
7
- resolve({ output: stdout.trim() });
8
- });
9
- });
10
- }
3
+ const run = (cmd, timeout = 30000) => shellRun(cmd, timeout, { maxBuffer: 10 * 1024 * 1024 });
11
4
 
12
5
  export const definitions = [
13
6
  {
@@ -65,16 +58,16 @@ export const handlers = {
65
58
  },
66
59
 
67
60
  docker_logs: async (params) => {
68
- const tail = params.tail || 100;
69
- return await run(`docker logs --tail ${tail} ${params.container}`);
61
+ const tail = parseInt(params.tail, 10) || 100;
62
+ return await run(`docker logs --tail ${tail} ${shellEscape(params.container)}`);
70
63
  },
71
64
 
72
65
  docker_exec: async (params) => {
73
- return await run(`docker exec ${params.container} ${params.command}`);
66
+ return await run(`docker exec ${shellEscape(params.container)} ${params.command}`);
74
67
  },
75
68
 
76
69
  docker_compose: async (params) => {
77
- const dir = params.project_dir ? `-f ${params.project_dir}/docker-compose.yml` : '';
70
+ const dir = params.project_dir ? `-f ${shellEscape(params.project_dir + '/docker-compose.yml')}` : '';
78
71
  return await run(`docker compose ${dir} ${params.action}`, 120000);
79
72
  },
80
73
  };
@@ -1,17 +1,8 @@
1
- import { exec } from 'child_process';
2
1
  import { platform } from 'os';
2
+ import { shellRun as run, shellEscape } from '../utils/shell.js';
3
3
 
4
4
  const isMac = platform() === 'darwin';
5
5
 
6
- function run(cmd, timeout = 10000) {
7
- return new Promise((resolve) => {
8
- exec(cmd, { timeout }, (error, stdout, stderr) => {
9
- if (error) return resolve({ error: stderr || error.message });
10
- resolve({ output: stdout.trim() });
11
- });
12
- });
13
- }
14
-
15
6
  export const definitions = [
16
7
  {
17
8
  name: 'disk_usage',
@@ -68,17 +59,17 @@ export const handlers = {
68
59
  },
69
60
 
70
61
  system_logs: async (params) => {
71
- const lines = params.lines || 50;
62
+ const lines = parseInt(params.lines, 10) || 50;
72
63
  const source = params.source || 'journalctl';
73
64
  const filter = params.filter;
74
65
 
75
66
  if (source === 'journalctl') {
76
- const filterArg = filter ? ` -g "${filter}"` : '';
67
+ const filterArg = filter ? ` -g ${shellEscape(filter)}` : '';
77
68
  return await run(`journalctl -n ${lines}${filterArg} --no-pager`);
78
69
  }
79
70
 
80
71
  // Reading a log file
81
- const filterCmd = filter ? ` | grep -i "${filter}"` : '';
82
- return await run(`tail -n ${lines} "${source}"${filterCmd}`);
72
+ const filterCmd = filter ? ` | grep -i ${shellEscape(filter)}` : '';
73
+ return await run(`tail -n ${lines} ${shellEscape(source)}${filterCmd}`);
83
74
  },
84
75
  };
@@ -1,14 +1,6 @@
1
- import { exec } from 'child_process';
2
- import { platform } from 'os';
3
-
4
- function run(cmd, timeout = 15000) {
5
- return new Promise((resolve) => {
6
- exec(cmd, { timeout }, (error, stdout, stderr) => {
7
- if (error) return resolve({ error: stderr || error.message });
8
- resolve({ output: stdout.trim() });
9
- });
10
- });
11
- }
1
+ import { shellRun, shellEscape } from '../utils/shell.js';
2
+
3
+ const run = (cmd, timeout = 15000) => shellRun(cmd, timeout);
12
4
 
13
5
  export const definitions = [
14
6
  {
@@ -47,10 +39,11 @@ export const definitions = [
47
39
  export const handlers = {
48
40
  check_port: async (params) => {
49
41
  const host = params.host || 'localhost';
50
- const { port } = params;
42
+ const port = parseInt(params.port, 10);
43
+ if (!Number.isFinite(port) || port <= 0 || port > 65535) return { error: 'Invalid port number' };
51
44
 
52
45
  // Use nc (netcat) for port check — works on both macOS and Linux
53
- const result = await run(`nc -z -w 3 ${host} ${port} 2>&1 && echo "OPEN" || echo "CLOSED"`, 5000);
46
+ const result = await run(`nc -z -w 3 ${shellEscape(host)} ${port} 2>&1 && echo "OPEN" || echo "CLOSED"`, 5000);
54
47
 
55
48
  if (result.error) {
56
49
  return { port, host, status: 'closed', detail: result.error };
@@ -63,19 +56,19 @@ export const handlers = {
63
56
  curl_url: async (params) => {
64
57
  const { url, method = 'GET', headers, body } = params;
65
58
 
66
- let cmd = `curl -s -w "\\n---HTTP_STATUS:%{http_code}" -X ${method}`;
59
+ let cmd = `curl -s -w "\\n---HTTP_STATUS:%{http_code}" -X ${shellEscape(method)}`;
67
60
 
68
61
  if (headers) {
69
62
  for (const [key, val] of Object.entries(headers)) {
70
- cmd += ` -H "${key}: ${val}"`;
63
+ cmd += ` -H ${shellEscape(`${key}: ${val}`)}`;
71
64
  }
72
65
  }
73
66
 
74
67
  if (body) {
75
- cmd += ` -d '${body.replace(/'/g, "'\\''")}'`;
68
+ cmd += ` -d ${shellEscape(body)}`;
76
69
  }
77
70
 
78
- cmd += ` "${url}"`;
71
+ cmd += ` ${shellEscape(url)}`;
79
72
 
80
73
  const result = await run(cmd);
81
74