@compilr-dev/agents 0.3.2 → 0.3.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,86 @@
1
+ /**
2
+ * Together AI LLM Provider
3
+ *
4
+ * Implements LLMProvider interface for Together AI models.
5
+ * Extends OpenAICompatibleProvider for shared functionality.
6
+ *
7
+ * @example
8
+ * ```typescript
9
+ * const provider = createTogetherProvider({
10
+ * model: 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo',
11
+ * apiKey: process.env.TOGETHER_API_KEY
12
+ * });
13
+ * ```
14
+ *
15
+ * @remarks
16
+ * - Requires valid Together AI API key
17
+ * - Default model is meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo
18
+ * - Supports Llama, Mistral, Qwen, and other open models
19
+ */
20
+ import type { ChatOptions } from './types.js';
21
+ import { ProviderError } from '../errors.js';
22
+ import { OpenAICompatibleProvider } from './openai-compatible.js';
23
+ /**
24
+ * Configuration for TogetherProvider
25
+ */
26
+ export interface TogetherProviderConfig {
27
+ /** Together AI API key (falls back to TOGETHER_API_KEY env var) */
28
+ apiKey?: string;
29
+ /** Base URL for Together API (default: https://api.together.xyz) */
30
+ baseUrl?: string;
31
+ /** Default model to use (default: meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo) */
32
+ model?: string;
33
+ /** Default max tokens (default: 4096) */
34
+ maxTokens?: number;
35
+ /** Request timeout in milliseconds (default: 120000) */
36
+ timeout?: number;
37
+ }
38
+ /**
39
+ * Together AI LLM Provider
40
+ *
41
+ * Provides streaming chat completion using Together AI models.
42
+ * Supports Llama, Mistral, Qwen, and other open-source models.
43
+ */
44
+ export declare class TogetherProvider extends OpenAICompatibleProvider {
45
+ readonly name = "together";
46
+ private readonly apiKey;
47
+ constructor(config?: TogetherProviderConfig);
48
+ /**
49
+ * Together AI authentication with Bearer token
50
+ */
51
+ protected getAuthHeaders(): Record<string, string>;
52
+ /**
53
+ * Together AI chat completions endpoint (OpenAI-compatible)
54
+ */
55
+ protected getEndpointPath(): string;
56
+ /**
57
+ * Together AI uses standard OpenAI body format
58
+ */
59
+ protected buildProviderSpecificBody(_options?: ChatOptions): Record<string, unknown>;
60
+ /**
61
+ * Map HTTP errors with Together AI-specific messages
62
+ */
63
+ protected mapHttpError(status: number, body: string, _model: string): ProviderError;
64
+ /**
65
+ * Map connection errors with Together AI-specific messages
66
+ */
67
+ protected mapConnectionError(_error: Error): ProviderError;
68
+ }
69
+ /**
70
+ * Create a Together AI provider instance
71
+ *
72
+ * @example
73
+ * ```typescript
74
+ * // Using environment variable (TOGETHER_API_KEY)
75
+ * const provider = createTogetherProvider();
76
+ *
77
+ * // With explicit API key
78
+ * const provider = createTogetherProvider({ apiKey: 'xxx-...' });
79
+ *
80
+ * // With custom model
81
+ * const provider = createTogetherProvider({
82
+ * model: 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo'
83
+ * });
84
+ * ```
85
+ */
86
+ export declare function createTogetherProvider(config?: TogetherProviderConfig): TogetherProvider;
@@ -0,0 +1,123 @@
1
+ /**
2
+ * Together AI LLM Provider
3
+ *
4
+ * Implements LLMProvider interface for Together AI models.
5
+ * Extends OpenAICompatibleProvider for shared functionality.
6
+ *
7
+ * @example
8
+ * ```typescript
9
+ * const provider = createTogetherProvider({
10
+ * model: 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo',
11
+ * apiKey: process.env.TOGETHER_API_KEY
12
+ * });
13
+ * ```
14
+ *
15
+ * @remarks
16
+ * - Requires valid Together AI API key
17
+ * - Default model is meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo
18
+ * - Supports Llama, Mistral, Qwen, and other open models
19
+ */
20
+ import { ProviderError } from '../errors.js';
21
+ import { OpenAICompatibleProvider } from './openai-compatible.js';
22
+ // Default configuration
23
+ const DEFAULT_MODEL = 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo';
24
+ const DEFAULT_BASE_URL = 'https://api.together.xyz';
25
+ /**
26
+ * Together AI LLM Provider
27
+ *
28
+ * Provides streaming chat completion using Together AI models.
29
+ * Supports Llama, Mistral, Qwen, and other open-source models.
30
+ */
31
+ export class TogetherProvider extends OpenAICompatibleProvider {
32
+ name = 'together';
33
+ apiKey;
34
+ constructor(config = {}) {
35
+ const apiKey = config.apiKey ?? process.env.TOGETHER_API_KEY;
36
+ if (!apiKey) {
37
+ throw new ProviderError('Together AI API key not found. Set TOGETHER_API_KEY environment variable or pass apiKey in config.', 'together');
38
+ }
39
+ const baseConfig = {
40
+ baseUrl: config.baseUrl ?? DEFAULT_BASE_URL,
41
+ model: config.model ?? DEFAULT_MODEL,
42
+ maxTokens: config.maxTokens,
43
+ timeout: config.timeout,
44
+ };
45
+ super(baseConfig);
46
+ this.apiKey = apiKey;
47
+ }
48
+ /**
49
+ * Together AI authentication with Bearer token
50
+ */
51
+ getAuthHeaders() {
52
+ return {
53
+ Authorization: `Bearer ${this.apiKey}`,
54
+ };
55
+ }
56
+ /**
57
+ * Together AI chat completions endpoint (OpenAI-compatible)
58
+ */
59
+ getEndpointPath() {
60
+ return '/v1/chat/completions';
61
+ }
62
+ /**
63
+ * Together AI uses standard OpenAI body format
64
+ */
65
+ buildProviderSpecificBody(_options) {
66
+ return {};
67
+ }
68
+ /**
69
+ * Map HTTP errors with Together AI-specific messages
70
+ */
71
+ mapHttpError(status, body, _model) {
72
+ let message = `Together AI error (${String(status)})`;
73
+ try {
74
+ const parsed = JSON.parse(body);
75
+ if (parsed.error?.message) {
76
+ message = parsed.error.message;
77
+ }
78
+ }
79
+ catch {
80
+ message = body || message;
81
+ }
82
+ switch (status) {
83
+ case 401:
84
+ return new ProviderError('Invalid Together AI API key. Check your TOGETHER_API_KEY.', 'together', 401);
85
+ case 403:
86
+ return new ProviderError('Access denied. Check your Together AI API key permissions.', 'together', 403);
87
+ case 429:
88
+ return new ProviderError('Together AI rate limit exceeded. Please wait and try again.', 'together', 429);
89
+ case 500:
90
+ case 502:
91
+ case 503:
92
+ return new ProviderError('Together AI service temporarily unavailable. Please try again later.', 'together', status);
93
+ default:
94
+ return new ProviderError(message, 'together', status);
95
+ }
96
+ }
97
+ /**
98
+ * Map connection errors with Together AI-specific messages
99
+ */
100
+ mapConnectionError(_error) {
101
+ return new ProviderError('Failed to connect to Together AI API. Check your internet connection.', 'together');
102
+ }
103
+ }
104
+ /**
105
+ * Create a Together AI provider instance
106
+ *
107
+ * @example
108
+ * ```typescript
109
+ * // Using environment variable (TOGETHER_API_KEY)
110
+ * const provider = createTogetherProvider();
111
+ *
112
+ * // With explicit API key
113
+ * const provider = createTogetherProvider({ apiKey: 'xxx-...' });
114
+ *
115
+ * // With custom model
116
+ * const provider = createTogetherProvider({
117
+ * model: 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo'
118
+ * });
119
+ * ```
120
+ */
121
+ export function createTogetherProvider(config = {}) {
122
+ return new TogetherProvider(config);
123
+ }
@@ -70,6 +70,14 @@ export interface LLMUsage {
70
70
  outputTokens: number;
71
71
  cacheReadTokens?: number;
72
72
  cacheCreationTokens?: number;
73
+ /** Thinking tokens (Gemini 2.5+ models with thinking) */
74
+ thinkingTokens?: number;
75
+ /** Debug payload info - estimated char counts before sending to provider */
76
+ debugPayload?: {
77
+ systemChars: number;
78
+ contentsChars: number;
79
+ toolsChars: number;
80
+ };
73
81
  }
74
82
  /**
75
83
  * Streaming chunk types
@@ -145,5 +145,6 @@ export function createAskUserSimpleTool(options = {}) {
145
145
  return createErrorResult(error instanceof Error ? error.message : String(error));
146
146
  }
147
147
  },
148
+ silent: true,
148
149
  });
149
150
  }
@@ -191,5 +191,6 @@ export function createAskUserTool(options = {}) {
191
191
  return createErrorResult(error instanceof Error ? error.message : String(error));
192
192
  }
193
193
  },
194
+ silent: true,
194
195
  });
195
196
  }
@@ -132,6 +132,8 @@ async function executeBash(input, context) {
132
132
  env,
133
133
  onOutput: context.onOutput,
134
134
  fifoCheck,
135
+ abortSignal: context.abortSignal,
136
+ onBackground: context.onBackground,
135
137
  });
136
138
  }
137
139
  // Non-streaming execution (original behavior)
@@ -179,7 +181,7 @@ async function executeBash(input, context) {
179
181
  * Execute command with streaming output to onOutput callback
180
182
  */
181
183
  async function executeWithStreaming(command, options) {
182
- const { cwd, timeout = DEFAULT_TIMEOUT, env, onOutput, fifoCheck } = options;
184
+ const { cwd, timeout = DEFAULT_TIMEOUT, env, onOutput, fifoCheck, abortSignal, onBackground, } = options;
183
185
  return new Promise((resolve) => {
184
186
  const child = spawn(command, [], {
185
187
  cwd,
@@ -189,6 +191,7 @@ async function executeWithStreaming(command, options) {
189
191
  let stdoutBuffer = '';
190
192
  let stderrBuffer = '';
191
193
  let timedOut = false;
194
+ let backgrounded = false;
192
195
  // Set up timeout
193
196
  const timeoutId = timeout > 0
194
197
  ? setTimeout(() => {
@@ -198,8 +201,62 @@ async function executeWithStreaming(command, options) {
198
201
  setTimeout(() => child.kill('SIGKILL'), 5000);
199
202
  }, timeout)
200
203
  : undefined;
204
+ // Handle abort signal for backgrounding
205
+ if (abortSignal) {
206
+ const handleAbort = () => {
207
+ // Check if this is a "background" abort (not a cancel)
208
+ const reason = abortSignal.reason;
209
+ if (reason === 'background') {
210
+ backgrounded = true;
211
+ if (timeoutId)
212
+ clearTimeout(timeoutId);
213
+ // Move process to ShellManager
214
+ const manager = getDefaultShellManager();
215
+ const shellId = manager.adoptProcess(child, {
216
+ command,
217
+ cwd,
218
+ initialStdout: stdoutBuffer,
219
+ initialStderr: stderrBuffer,
220
+ });
221
+ // Notify via callback
222
+ if (onBackground) {
223
+ onBackground(shellId, stdoutBuffer + stderrBuffer);
224
+ }
225
+ // Resolve with backgrounded result
226
+ resolve(createSuccessResult({
227
+ backgrounded: true,
228
+ shell_id: shellId,
229
+ partial_stdout: truncateOutput(stdoutBuffer, DEFAULT_MAX_OUTPUT_SIZE).content,
230
+ partial_stderr: truncateOutput(stderrBuffer, DEFAULT_MAX_OUTPUT_SIZE).content,
231
+ message: `Command moved to background. Use bash_output with shell_id '${shellId}' to check status.`,
232
+ }));
233
+ }
234
+ else {
235
+ // Regular cancellation - kill the process
236
+ if (timeoutId)
237
+ clearTimeout(timeoutId);
238
+ child.kill('SIGTERM');
239
+ setTimeout(() => {
240
+ try {
241
+ child.kill('SIGKILL');
242
+ }
243
+ catch {
244
+ /* ignore */
245
+ }
246
+ }, 5000);
247
+ resolve(createErrorResult('Command cancelled by user'));
248
+ }
249
+ };
250
+ if (abortSignal.aborted) {
251
+ handleAbort();
252
+ return;
253
+ }
254
+ abortSignal.addEventListener('abort', handleAbort, { once: true });
255
+ }
201
256
  // Stream stdout
202
257
  child.stdout.on('data', (data) => {
258
+ if (backgrounded)
259
+ return; // Stop collecting if backgrounded
203
260
  const text = data.toString();
204
261
  stdoutBuffer += text;
205
262
  // Emit each line separately for better UI updates
@@ -212,6 +269,8 @@ async function executeWithStreaming(command, options) {
212
269
  });
213
270
  // Stream stderr
214
271
  child.stderr.on('data', (data) => {
272
+ if (backgrounded)
273
+ return; // Stop collecting if backgrounded
215
274
  const text = data.toString();
216
275
  stderrBuffer += text;
217
276
  // Emit each line separately
@@ -224,6 +283,8 @@ async function executeWithStreaming(command, options) {
224
283
  });
225
284
  // Handle completion
226
285
  child.on('close', (code) => {
286
+ if (backgrounded)
287
+ return; // Already resolved
227
288
  if (timeoutId)
228
289
  clearTimeout(timeoutId);
229
290
  if (timedOut) {
@@ -243,6 +304,8 @@ async function executeWithStreaming(command, options) {
243
304
  });
244
305
  // Handle spawn errors
245
306
  child.on('error', (error) => {
307
+ if (backgrounded)
308
+ return; // Already resolved
246
309
  if (timeoutId)
247
310
  clearTimeout(timeoutId);
248
311
  resolve(createErrorResult(error.message));
@@ -253,7 +316,7 @@ async function executeWithStreaming(command, options) {
253
316
  * Execute command with streaming output (custom options version for createBashTool)
254
317
  */
255
318
  async function executeWithStreamingCustom(command, options) {
256
- const { cwd, timeout = DEFAULT_TIMEOUT, env, onOutput, fifoCheck, maxOutputSize, shell, } = options;
319
+ const { cwd, timeout = DEFAULT_TIMEOUT, env, onOutput, fifoCheck, maxOutputSize, shell, abortSignal, onBackground, } = options;
257
320
  return new Promise((resolve) => {
258
321
  const child = spawn(command, [], {
259
322
  cwd,
@@ -263,6 +326,7 @@ async function executeWithStreamingCustom(command, options) {
263
326
  let stdoutBuffer = '';
264
327
  let stderrBuffer = '';
265
328
  let timedOut = false;
329
+ let backgrounded = false;
266
330
  // Set up timeout
267
331
  const timeoutId = timeout > 0
268
332
  ? setTimeout(() => {
@@ -272,8 +336,57 @@ async function executeWithStreamingCustom(command, options) {
272
336
  setTimeout(() => child.kill('SIGKILL'), 5000);
273
337
  }, timeout)
274
338
  : undefined;
339
+ // Handle abort signal for backgrounding
340
+ if (abortSignal) {
341
+ const handleAbort = () => {
342
+ const reason = abortSignal.reason;
343
+ if (reason === 'background') {
344
+ backgrounded = true;
345
+ if (timeoutId)
346
+ clearTimeout(timeoutId);
347
+ const manager = getDefaultShellManager();
348
+ const shellId = manager.adoptProcess(child, {
349
+ command,
350
+ cwd,
351
+ initialStdout: stdoutBuffer,
352
+ initialStderr: stderrBuffer,
353
+ });
354
+ if (onBackground) {
355
+ onBackground(shellId, stdoutBuffer + stderrBuffer);
356
+ }
357
+ resolve(createSuccessResult({
358
+ backgrounded: true,
359
+ shell_id: shellId,
360
+ partial_stdout: truncateOutput(stdoutBuffer, maxOutputSize).content,
361
+ partial_stderr: truncateOutput(stderrBuffer, maxOutputSize).content,
362
+ message: `Command moved to background. Use bash_output with shell_id '${shellId}' to check status.`,
363
+ }));
364
+ }
365
+ else {
366
+ if (timeoutId)
367
+ clearTimeout(timeoutId);
368
+ child.kill('SIGTERM');
369
+ setTimeout(() => {
370
+ try {
371
+ child.kill('SIGKILL');
372
+ }
373
+ catch {
374
+ /* ignore */
375
+ }
376
+ }, 5000);
377
+ resolve(createErrorResult('Command cancelled by user'));
378
+ }
379
+ };
380
+ if (abortSignal.aborted) {
381
+ handleAbort();
382
+ return;
383
+ }
384
+ abortSignal.addEventListener('abort', handleAbort, { once: true });
385
+ }
275
386
  // Stream stdout
276
387
  child.stdout.on('data', (data) => {
388
+ if (backgrounded)
389
+ return;
277
390
  const text = data.toString();
278
391
  stdoutBuffer += text;
279
392
  // Emit each line separately for better UI updates
@@ -286,6 +399,8 @@ async function executeWithStreamingCustom(command, options) {
286
399
  });
287
400
  // Stream stderr
288
401
  child.stderr.on('data', (data) => {
402
+ if (backgrounded)
403
+ return;
289
404
  const text = data.toString();
290
405
  stderrBuffer += text;
291
406
  // Emit each line separately
@@ -298,6 +413,8 @@ async function executeWithStreamingCustom(command, options) {
298
413
  });
299
414
  // Handle completion
300
415
  child.on('close', (code) => {
416
+ if (backgrounded)
417
+ return;
301
418
  if (timeoutId)
302
419
  clearTimeout(timeoutId);
303
420
  if (timedOut) {
@@ -317,6 +434,8 @@ async function executeWithStreamingCustom(command, options) {
317
434
  });
318
435
  // Handle spawn errors
319
436
  child.on('error', (error) => {
437
+ if (backgrounded)
438
+ return;
320
439
  if (timeoutId)
321
440
  clearTimeout(timeoutId);
322
441
  resolve(createErrorResult(error.message));
@@ -444,6 +563,8 @@ export function createBashTool(options) {
444
563
  fifoCheck,
445
564
  maxOutputSize,
446
565
  shell,
566
+ abortSignal: context.abortSignal,
567
+ onBackground: context.onBackground,
447
568
  });
448
569
  }
449
570
  // Execute with merged options (non-streaming)
@@ -1,6 +1,7 @@
1
1
  /**
2
2
  * Shell Manager - Track and manage background shell processes
3
3
  */
4
+ import { type ChildProcess } from 'node:child_process';
4
5
  /**
5
6
  * Status of a background shell
6
7
  */
@@ -153,6 +154,20 @@ export declare class ShellManager {
153
154
  * Kill all running shells
154
155
  */
155
156
  killAll(): number;
157
+ /**
158
+ * Adopt an existing running process into the shell manager.
159
+ * Used when moving a foreground bash command to background (Ctrl+B).
160
+ *
161
+ * @param process - The ChildProcess to adopt
162
+ * @param options - Options including command and initial buffers
163
+ * @returns The shell ID for tracking
164
+ */
165
+ adoptProcess(process: ChildProcess, options: {
166
+ command: string;
167
+ cwd?: string;
168
+ initialStdout?: string;
169
+ initialStderr?: string;
170
+ }): string;
156
171
  /**
157
172
  * Cleanup - kill all shells and clear state
158
173
  */
@@ -277,6 +277,57 @@ export class ShellManager {
277
277
  }
278
278
  return count;
279
279
  }
280
+ /**
281
+ * Adopt an existing running process into the shell manager.
282
+ * Used when moving a foreground bash command to background (Ctrl+B).
283
+ *
284
+ * @param process - The ChildProcess to adopt
285
+ * @param options - Options including command and initial buffers
286
+ * @returns The shell ID for tracking
287
+ */
288
+ adoptProcess(process, options) {
289
+ const id = randomUUID().slice(0, 8);
290
+ const state = {
291
+ info: {
292
+ id,
293
+ command: options.command,
294
+ status: 'running',
295
+ startTime: new Date(),
296
+ cwd: options.cwd,
297
+ },
298
+ process,
299
+ stdoutBuffer: options.initialStdout ? [options.initialStdout] : [],
300
+ stderrBuffer: options.initialStderr ? [options.initialStderr] : [],
301
+ readIndex: { stdout: 0, stderr: 0 },
302
+ };
303
+ // Attach listeners for ongoing output (stdout may be null if already closed)
304
+ if (process.stdout) {
305
+ process.stdout.on('data', (chunk) => {
306
+ this.appendToBuffer(state.stdoutBuffer, chunk.toString());
307
+ });
308
+ }
309
+ // Attach stderr listener
310
+ if (process.stderr) {
311
+ process.stderr.on('data', (chunk) => {
312
+ this.appendToBuffer(state.stderrBuffer, chunk.toString());
313
+ });
314
+ }
315
+ // Handle completion
316
+ process.on('close', (code) => {
317
+ state.info.status = code === 0 ? 'completed' : 'failed';
318
+ state.info.exitCode = code ?? undefined;
319
+ state.info.endTime = new Date();
320
+ this.scheduleCleanup(id);
321
+ });
322
+ process.on('error', (error) => {
323
+ state.info.status = 'failed';
324
+ state.info.endTime = new Date();
325
+ state.stderrBuffer.push(`Process error: ${error.message}`);
326
+ this.scheduleCleanup(id);
327
+ });
328
+ this.shells.set(id, state);
329
+ return id;
330
+ }
280
331
  /**
281
332
  * Cleanup - kill all shells and clear state
282
333
  */
@@ -95,5 +95,6 @@ export function createSuggestTool(options = {}) {
95
95
  action: input.action,
96
96
  }));
97
97
  },
98
+ silent: true,
98
99
  });
99
100
  }
@@ -271,6 +271,7 @@ export function createTodoTools(store) {
271
271
  total: input.todos.length,
272
272
  }));
273
273
  },
274
+ silent: true,
274
275
  });
275
276
  const todoRead = defineTool({
276
277
  name: 'todo_read',
@@ -308,6 +309,7 @@ export function createTodoTools(store) {
308
309
  total: todos.length,
309
310
  }));
310
311
  },
312
+ silent: true,
311
313
  });
312
314
  return { todoWrite, todoRead, store: todoStore };
313
315
  }
@@ -29,6 +29,12 @@ export interface DefineToolOptions<T extends object> {
29
29
  * Default: false (sequential execution)
30
30
  */
31
31
  parallel?: boolean;
32
+ /**
33
+ * If true, this tool runs silently without spinner updates or result output.
34
+ * Used for internal housekeeping tools like todo_read, suggest, etc.
35
+ * Default: false (normal visibility)
36
+ */
37
+ silent?: boolean;
32
38
  }
33
39
  /**
34
40
  * Define a tool with type-safe input handling
@@ -33,6 +33,7 @@ export function defineTool(options) {
33
33
  definition,
34
34
  execute: options.execute,
35
35
  parallel: options.parallel,
36
+ silent: options.silent,
36
37
  };
37
38
  }
38
39
  /**
@@ -39,6 +39,19 @@ export interface ToolExecutionContext {
39
39
  * Tool use ID for correlation with events
40
40
  */
41
41
  toolUseId?: string;
42
+ /**
43
+ * AbortSignal for cancelling/backgrounding the tool execution.
44
+ * When aborted with reason 'background', the bash tool should move the
45
+ * process to ShellManager instead of terminating.
46
+ */
47
+ abortSignal?: AbortSignal;
48
+ /**
49
+ * Callback to notify when a process has been moved to background.
50
+ * Called by bash tool when user presses Ctrl+B (abortSignal with 'background' reason).
51
+ * @param shellId - The shell ID in ShellManager for later retrieval
52
+ * @param partialOutput - Output collected so far (stdout + stderr)
53
+ */
54
+ onBackground?: (shellId: string, partialOutput: string) => void;
42
55
  }
43
56
  /**
44
57
  * Tool handler function type
@@ -59,6 +72,12 @@ export interface Tool<T = object> {
59
72
  * Default: false (sequential execution)
60
73
  */
61
74
  parallel?: boolean;
75
+ /**
76
+ * If true, this tool runs silently without spinner updates or result output.
77
+ * Used for internal housekeeping tools like todo_read, suggest, etc.
78
+ * Default: false (normal visibility)
79
+ */
80
+ silent?: boolean;
62
81
  }
63
82
  /**
64
83
  * Tool registry for managing available tools
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@compilr-dev/agents",
3
- "version": "0.3.2",
3
+ "version": "0.3.4",
4
4
  "description": "Lightweight multi-LLM agent library for building CLI AI assistants",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
@@ -78,5 +78,8 @@
78
78
  },
79
79
  "dependencies": {
80
80
  "@google/genai": "^1.38.0"
81
+ },
82
+ "overrides": {
83
+ "hono": "^4.11.7"
81
84
  }
82
85
  }