@zds-ai/cli 0.1.8 → 0.1.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. package/README.md +387 -34
  2. package/dist/agent/context-manager.d.ts +70 -0
  3. package/dist/agent/context-manager.js +138 -0
  4. package/dist/agent/context-manager.js.map +1 -0
  5. package/dist/agent/hook-manager.d.ts +194 -0
  6. package/dist/agent/hook-manager.js +676 -0
  7. package/dist/agent/hook-manager.js.map +1 -0
  8. package/dist/agent/llm-agent.d.ts +469 -100
  9. package/dist/agent/llm-agent.js +781 -1580
  10. package/dist/agent/llm-agent.js.map +1 -1
  11. package/dist/agent/message-processor.d.ts +103 -0
  12. package/dist/agent/message-processor.js +225 -0
  13. package/dist/agent/message-processor.js.map +1 -0
  14. package/dist/agent/prompt-variables.d.ts +103 -40
  15. package/dist/agent/prompt-variables.js +250 -113
  16. package/dist/agent/prompt-variables.js.map +1 -1
  17. package/dist/agent/session-manager.d.ts +75 -0
  18. package/dist/agent/session-manager.js +194 -0
  19. package/dist/agent/session-manager.js.map +1 -0
  20. package/dist/agent/tool-executor.d.ts +111 -0
  21. package/dist/agent/tool-executor.js +397 -0
  22. package/dist/agent/tool-executor.js.map +1 -0
  23. package/dist/bin/generate_image_sd.sh +19 -12
  24. package/dist/bin/joycaption.sh +37 -0
  25. package/dist/grok/client.d.ts +52 -0
  26. package/dist/grok/client.js +127 -19
  27. package/dist/grok/client.js.map +1 -1
  28. package/dist/grok/tools.js +42 -8
  29. package/dist/grok/tools.js.map +1 -1
  30. package/dist/hooks/use-input-handler.d.ts +1 -1
  31. package/dist/hooks/use-input-handler.js +100 -13
  32. package/dist/hooks/use-input-handler.js.map +1 -1
  33. package/dist/index.js +25 -3
  34. package/dist/index.js.map +1 -1
  35. package/dist/mcp/config.d.ts +1 -0
  36. package/dist/mcp/config.js +45 -7
  37. package/dist/mcp/config.js.map +1 -1
  38. package/dist/tools/character-tool.js +13 -1
  39. package/dist/tools/character-tool.js.map +1 -1
  40. package/dist/tools/image-tool.d.ts +11 -1
  41. package/dist/tools/image-tool.js +109 -2
  42. package/dist/tools/image-tool.js.map +1 -1
  43. package/dist/tools/introspect-tool.js +131 -30
  44. package/dist/tools/introspect-tool.js.map +1 -1
  45. package/dist/tools/morph-editor.d.ts +21 -9
  46. package/dist/tools/morph-editor.js +21 -9
  47. package/dist/tools/morph-editor.js.map +1 -1
  48. package/dist/ui/components/active-task-status.d.ts +1 -1
  49. package/dist/ui/components/api-key-input.d.ts +1 -1
  50. package/dist/ui/components/backend-status.d.ts +1 -1
  51. package/dist/ui/components/chat-history.d.ts +1 -1
  52. package/dist/ui/components/chat-interface.d.ts +1 -1
  53. package/dist/ui/components/chat-interface.js +1 -1
  54. package/dist/ui/components/chat-interface.js.map +1 -1
  55. package/dist/ui/components/context-status.d.ts +1 -1
  56. package/dist/ui/components/mood-status.d.ts +1 -1
  57. package/dist/ui/components/persona-status.d.ts +1 -1
  58. package/dist/utils/chat-history-manager.d.ts +12 -4
  59. package/dist/utils/chat-history-manager.js +26 -11
  60. package/dist/utils/chat-history-manager.js.map +1 -1
  61. package/dist/utils/hook-executor.d.ts +53 -2
  62. package/dist/utils/hook-executor.js +258 -36
  63. package/dist/utils/hook-executor.js.map +1 -1
  64. package/dist/utils/rephrase-handler.d.ts +1 -1
  65. package/dist/utils/settings-manager.d.ts +41 -11
  66. package/dist/utils/settings-manager.js +172 -40
  67. package/dist/utils/settings-manager.js.map +1 -1
  68. package/dist/utils/slash-commands.d.ts +3 -3
  69. package/dist/utils/slash-commands.js +11 -5
  70. package/dist/utils/slash-commands.js.map +1 -1
  71. package/dist/utils/startup-hook.js +9 -2
  72. package/dist/utils/startup-hook.js.map +1 -1
  73. package/package.json +10 -8
@@ -1,7 +1,13 @@
1
- import { LLMMessage, LLMToolCall } from "../grok/client.js";
1
+ import { LLMToolCall } from "../grok/client.js";
2
2
  import type { ChatCompletionContentPart } from "openai/resources/chat/completions.js";
3
3
  import { ToolResult } from "../types/index.js";
4
4
  import { EventEmitter } from "events";
5
+ import { SessionState } from "../utils/chat-history-manager.js";
6
+ /**
7
+ * Represents a single entry in the conversation history.
8
+ * Supports various message types including user messages, assistant responses,
9
+ * tool calls, tool results, and system messages.
10
+ */
5
11
  export interface ChatEntry {
6
12
  type: "user" | "assistant" | "tool_result" | "tool_call" | "system";
7
13
  content?: string | ChatCompletionContentPart[];
@@ -22,6 +28,10 @@ export interface ChatEntry {
22
28
  [key: string]: any;
23
29
  };
24
30
  }
31
+ /**
32
+ * Represents a chunk of data in the streaming response.
33
+ * Used for real-time communication between the agent and UI components.
34
+ */
25
35
  export interface StreamingChunk {
26
36
  type: "content" | "tool_calls" | "tool_result" | "done" | "token_count" | "user_message";
27
37
  content?: string;
@@ -32,6 +42,60 @@ export interface StreamingChunk {
32
42
  userEntry?: ChatEntry;
33
43
  systemMessages?: ChatEntry[];
34
44
  }
45
+ /**
46
+ * Main LLM Agent class that orchestrates AI conversations with tool execution capabilities.
47
+ *
48
+ * ## Architecture Overview
49
+ *
50
+ * The LLMAgent serves as the central coordinator for AI-powered conversations, managing:
51
+ * - **Conversation Flow**: Handles user messages, AI responses, and multi-turn conversations
52
+ * - **Tool Execution**: Coordinates with various tools (file editing, shell commands, web search, etc.)
53
+ * - **Context Management**: Tracks conversation history and manages token limits
54
+ * - **Session State**: Maintains persona, mood, active tasks, and other session data
55
+ * - **Streaming Support**: Provides real-time response streaming for better UX
56
+ *
57
+ * ## Delegation Architecture
58
+ *
59
+ * The agent delegates specialized functionality to focused manager classes:
60
+ * - **ToolExecutor**: Handles all tool execution, validation, and approval workflows
61
+ * - **HookManager**: Manages persona/mood/task hooks and backend testing
62
+ * - **SessionManager**: Handles session persistence and state restoration
63
+ * - **MessageProcessor**: Processes user input, handles rephrasing, and XML parsing
64
+ * - **ContextManager**: Manages context warnings, compaction, and token tracking
65
+ *
66
+ * ## Key Features
67
+ *
68
+ * - **Multi-Model Support**: Works with various LLM backends (Grok, OpenAI, etc.)
69
+ * - **Tool Integration**: Seamlessly integrates with 15+ built-in tools
70
+ * - **MCP Support**: Extends capabilities via Model Context Protocol servers
71
+ * - **Vision Support**: Handles image inputs for vision-capable models
72
+ * - **Streaming Responses**: Real-time response generation with token counting
73
+ * - **Context Awareness**: Intelligent context management and automatic compaction
74
+ * - **Hook System**: Extensible hook system for custom behaviors
75
+ * - **Session Persistence**: Maintains conversation state across restarts
76
+ *
77
+ * ## Usage Patterns
78
+ *
79
+ * ```typescript
80
+ * // Initialize agent
81
+ * const agent = new LLMAgent(apiKey, baseURL, model);
82
+ * await agent.initialize();
83
+ *
84
+ * // Process messages (non-streaming)
85
+ * const entries = await agent.processUserMessage("Hello, world!");
86
+ *
87
+ * // Process messages (streaming)
88
+ * for await (const chunk of agent.processUserMessageStream("Write a file")) {
89
+ * console.log(chunk);
90
+ * }
91
+ *
92
+ * // Manage session state
93
+ * await agent.setPersona("helpful assistant");
94
+ * await agent.startActiveTask("coding", "writing tests");
95
+ * ```
96
+ *
97
+ * @extends EventEmitter Emits 'contextChange' events for token usage updates
98
+ */
35
99
  export declare class LLMAgent extends EventEmitter {
36
100
  private llmClient;
37
101
  private textEditor;
@@ -58,8 +122,6 @@ export declare class LLMAgent extends EventEmitter {
58
122
  private temperature;
59
123
  private maxTokens;
60
124
  private firstMessageProcessed;
61
- private contextWarningAt80;
62
- private contextWarningAt90;
63
125
  private persona;
64
126
  private personaColor;
65
127
  private mood;
@@ -70,72 +132,314 @@ export declare class LLMAgent extends EventEmitter {
70
132
  private apiKeyEnvVar;
71
133
  private pendingContextEditSession;
72
134
  private rephraseState;
73
- private hookPrefillText;
135
+ private toolExecutor;
136
+ private hookManager;
137
+ private sessionManager;
138
+ private messageProcessor;
139
+ private contextManager;
140
+ private maxContextSize;
141
+ /**
142
+ * Cleans up incomplete tool calls in the message history.
143
+ * Ensures all tool calls have corresponding tool results to prevent API errors.
144
+ *
145
+ * This method scans the last assistant message for tool calls and adds
146
+ * "[Cancelled by user]" results for any tool calls that don't have results.
147
+ *
148
+ * @private
149
+ */
150
+ private cleanupIncompleteToolCalls;
151
+ /**
152
+ * Executes the instance hook if it hasn't been run yet.
153
+ *
154
+ * The instance hook runs once per agent session and can:
155
+ * - Set prompt variables
156
+ * - Add system messages
157
+ * - Provide prefill text for responses
158
+ *
159
+ * @private
160
+ */
161
+ private executeInstanceHookIfNeeded;
162
+ /**
163
+ * Creates a new LLMAgent instance.
164
+ *
165
+ * @param apiKey - API key for the LLM service
166
+ * @param baseURL - Optional base URL for the API endpoint
167
+ * @param model - Optional model name (defaults to saved model or "grok-code-fast-1")
168
+ * @param maxToolRounds - Maximum number of tool execution rounds (default: 400)
169
+ * @param debugLogFile - Optional path for MCP debug logging
170
+ * @param startupHookOutput - Optional output from startup hook execution
171
+ * @param temperature - Optional temperature for API requests (0.0-2.0)
172
+ * @param maxTokens - Optional maximum tokens for API responses
173
+ */
74
174
  constructor(apiKey: string, baseURL?: string, model?: string, maxToolRounds?: number, debugLogFile?: string, startupHookOutput?: string, temperature?: number, maxTokens?: number);
75
175
  private startupHookOutput?;
76
176
  private systemPrompt;
77
177
  private hasRunInstanceHook;
78
178
  /**
79
- * Initialize the agent with dynamic system prompt
80
- * Must be called after construction
179
+ * Initialize the agent with dynamic system prompt.
180
+ *
181
+ * This method must be called after construction to:
182
+ * - Build the system message with current tool availability
183
+ * - Set up the initial conversation context
184
+ * - Execute the instance hook if configured
185
+ *
186
+ * @throws {Error} If system message generation fails
81
187
  */
82
188
  initialize(): Promise<void>;
83
189
  /**
84
- * Build/rebuild the system message with current tool availability
85
- * Updates this.systemPrompt which is always used for messages[0]
190
+ * Build/rebuild the system message with current tool availability.
191
+ *
192
+ * This method:
193
+ * - Generates a dynamic tool list using the introspect tool
194
+ * - Sets the APP:TOOLS variable for template rendering
195
+ * - Renders the full SYSTEM template with all variables
196
+ * - Updates messages[0] with the new system prompt
197
+ *
198
+ * The system prompt is always at messages[0] and contains the core
199
+ * instructions, tool descriptions, and current context information.
86
200
  */
87
201
  buildSystemMessage(): Promise<void>;
202
+ /**
203
+ * Render system message with current variable state
204
+ * Called before LLM API calls and task processing to ensure fresh content
205
+ */
206
+ renderSystemMessage(): void;
207
+ /**
208
+ * Load initial conversation history from persistence.
209
+ *
210
+ * This method:
211
+ * - Loads the chat history (excluding system messages)
212
+ * - Sets or generates the system prompt
213
+ * - Converts history to API message format
214
+ * - Handles tool call/result matching
215
+ * - Updates token counts
216
+ *
217
+ * @param history - Array of chat entries to load
218
+ * @param systemPrompt - Optional system prompt (will generate if not provided)
219
+ */
88
220
  loadInitialHistory(history: ChatEntry[], systemPrompt?: string): Promise<void>;
221
+ /**
222
+ * Initialize Model Context Protocol (MCP) servers in the background.
223
+ *
224
+ * This method loads MCP configuration and initializes any configured
225
+ * servers without blocking agent construction. Errors are logged but
226
+ * don't prevent agent operation.
227
+ *
228
+ * @param debugLogFile - Optional path for MCP debug output
229
+ * @private
230
+ */
89
231
  private initializeMCP;
232
+ /**
233
+ * Checks if the current model is a Grok model.
234
+ * Used to enable Grok-specific features like web search.
235
+ *
236
+ * @returns True if the current model name contains "grok"
237
+ * @private
238
+ */
90
239
  private isGrokModel;
240
+ /**
241
+ * Heuristic to determine if web search should be enabled for a message.
242
+ *
243
+ * Analyzes the message content for keywords that suggest the user is
244
+ * asking for current information, news, or time-sensitive data.
245
+ *
246
+ * @param message - The user message to analyze
247
+ * @returns True if web search should be enabled
248
+ * @private
249
+ */
91
250
  private shouldUseSearchFor;
251
+ /**
252
+ * Process a user message and return all conversation entries generated.
253
+ *
254
+ * This is the main non-streaming message processing method that:
255
+ * - Handles rephrase commands and message preprocessing
256
+ * - Manages the agent loop with tool execution
257
+ * - Processes multiple rounds of AI responses and tool calls
258
+ * - Handles errors and context management
259
+ * - Returns all new conversation entries
260
+ *
261
+ * ## Processing Flow
262
+ *
263
+ * 1. **Setup**: Parse rephrase commands, clean incomplete tool calls
264
+ * 2. **Message Processing**: Parse images, assemble content, add to history
265
+ * 3. **Agent Loop**: Continue until no more tool calls or max rounds reached
266
+ * - Get AI response
267
+ * - Execute any tool calls
268
+ * - Add results to conversation
269
+ * - Get next response if needed
270
+ * 4. **Cleanup**: Handle errors, update context, return entries
271
+ *
272
+ * @param message - The user message to process
273
+ * @returns Promise resolving to array of new conversation entries
274
+ * @throws {Error} If message processing fails critically
275
+ */
92
276
  processUserMessage(message: string): Promise<ChatEntry[]>;
93
277
  /**
94
- * Parse XML-formatted tool calls from message content (x.ai format)
95
- * Converts <xai:function_call> elements to standard LLMToolCall format
278
+ * Process a user message with real-time streaming response.
279
+ *
280
+ * This is the main streaming message processing method that yields
281
+ * chunks of data as the conversation progresses. Provides real-time
282
+ * updates for:
283
+ * - User message processing
284
+ * - AI response streaming (content as it's generated)
285
+ * - Tool execution progress
286
+ * - Token count updates
287
+ * - System messages from hooks
288
+ *
289
+ * ## Streaming Flow
290
+ *
291
+ * 1. **Setup**: Process user message, yield user entry
292
+ * 2. **Agent Loop**: Stream AI responses and execute tools
293
+ * - Stream AI response content in real-time
294
+ * - Yield tool calls when detected
295
+ * - Execute tools and yield results
296
+ * - Continue until completion
297
+ * 3. **Completion**: Yield final token counts and done signal
298
+ *
299
+ * ## Chunk Types
300
+ *
301
+ * - `user_message`: Initial user message entry
302
+ * - `content`: Streaming AI response content
303
+ * - `tool_calls`: Tool calls detected in AI response
304
+ * - `tool_result`: Results from tool execution
305
+ * - `token_count`: Updated token usage
306
+ * - `done`: Processing complete
307
+ *
308
+ * @param message - The user message to process
309
+ * @yields StreamingChunk objects with real-time updates
310
+ * @throws {Error} If streaming fails critically
96
311
  */
97
- private parseXMLToolCalls;
98
- private messageReducer;
99
312
  processUserMessageStream(message: string): AsyncGenerator<StreamingChunk, void, unknown>;
100
313
  /**
101
314
  * Apply default parameter values for tools
102
315
  * This ensures the approval hook sees the same parameters that will be used during execution
103
316
  */
104
- private applyToolParameterDefaults;
105
317
  /**
106
318
  * Validate tool arguments against the tool's schema
107
319
  * Returns null if valid, or an error message if invalid
108
320
  */
109
- private validateToolArguments;
110
- private executeTool;
111
- private executeMCPTool;
321
+ /**
322
+ * Get a copy of the current chat history.
323
+ * @returns Array of chat entries (defensive copy)
324
+ */
112
325
  getChatHistory(): ChatEntry[];
326
+ /**
327
+ * Set the chat history to a new array of entries.
328
+ * @param history - New chat history entries
329
+ */
113
330
  setChatHistory(history: ChatEntry[]): void;
331
+ /**
332
+ * Get the current system prompt.
333
+ * @returns The system prompt string
334
+ */
114
335
  getSystemPrompt(): string;
336
+ /**
337
+ * Set a new system prompt and update the first message.
338
+ * @param prompt - Ignored (deprecated) - system prompt is always rendered from variables
339
+ */
115
340
  setSystemPrompt(prompt: string): void;
341
+ /**
342
+ * Get a copy of the current API messages array.
343
+ * @returns Array of LLM messages (defensive copy)
344
+ */
116
345
  getMessages(): any[];
346
+ /**
347
+ * Get the current token count for the conversation.
348
+ * @returns Number of tokens in the current message context
349
+ */
117
350
  getCurrentTokenCount(): number;
351
+ /**
352
+ * Get the maximum context size for the current model.
353
+ * @returns Maximum number of tokens supported
354
+ * @todo Make this model-specific for different context windows
355
+ */
118
356
  getMaxContextSize(): number;
357
+ /**
358
+ * Get the current context usage as a percentage.
359
+ * @returns Percentage of context window used (0-100)
360
+ */
119
361
  getContextUsagePercent(): number;
120
362
  /**
121
- * Convert context messages to markdown format for viewing
363
+ * Convert the conversation context to markdown format for viewing.
364
+ *
365
+ * Creates a human-readable markdown representation of the conversation
366
+ * including:
367
+ * - Header with context file path and token usage
368
+ * - Numbered messages with timestamps
369
+ * - Formatted tool calls and results
370
+ * - Proper attribution (User/Assistant/System)
371
+ *
122
372
  * Format: (N) Name (role) - timestamp
373
+ *
374
+ * @returns Promise resolving to markdown-formatted conversation
123
375
  */
124
376
  convertContextToMarkdown(): Promise<string>;
377
+ /**
378
+ * Get the current persona setting.
379
+ * @returns Current persona string
380
+ */
125
381
  getPersona(): string;
382
+ /**
383
+ * Get the current persona display color.
384
+ * @returns Color name for persona display
385
+ */
126
386
  getPersonaColor(): string;
387
+ /**
388
+ * Get the current mood setting.
389
+ * @returns Current mood string
390
+ */
127
391
  getMood(): string;
392
+ /**
393
+ * Get the current mood display color.
394
+ * @returns Color name for mood display
395
+ */
128
396
  getMoodColor(): string;
397
+ /**
398
+ * Get the current active task.
399
+ * @returns Current active task string
400
+ */
129
401
  getActiveTask(): string;
402
+ /**
403
+ * Get the current active task action.
404
+ * @returns Current task action string
405
+ */
130
406
  getActiveTaskAction(): string;
407
+ /**
408
+ * Get the current active task display color.
409
+ * @returns Color name for task display
410
+ */
131
411
  getActiveTaskColor(): string;
412
+ /**
413
+ * Set a pending context edit session for file-based context editing.
414
+ * @param tmpJsonPath - Path to temporary JSON file
415
+ * @param contextFilePath - Path to actual context file
416
+ */
132
417
  setPendingContextEditSession(tmpJsonPath: string, contextFilePath: string): void;
418
+ /**
419
+ * Get the current pending context edit session.
420
+ * @returns Edit session info or null if none pending
421
+ */
133
422
  getPendingContextEditSession(): {
134
423
  tmpJsonPath: string;
135
424
  contextFilePath: string;
136
425
  } | null;
426
+ /**
427
+ * Clear the pending context edit session.
428
+ */
137
429
  clearPendingContextEditSession(): void;
430
+ /**
431
+ * Set the rephrase state for message editing operations.
432
+ * @param originalAssistantMessageIndex - Index of original assistant message
433
+ * @param rephraseRequestIndex - Index of rephrase request
434
+ * @param newResponseIndex - Index of new response (-1 if not yet created)
435
+ * @param messageType - Type of message being rephrased
436
+ * @param prefillText - Optional prefill text for the response
437
+ */
138
438
  setRephraseState(originalAssistantMessageIndex: number, rephraseRequestIndex: number, newResponseIndex: number, messageType: "user" | "system", prefillText?: string): void;
439
+ /**
440
+ * Get the current rephrase state.
441
+ * @returns Rephrase state info or null if none active
442
+ */
139
443
  getRephraseState(): {
140
444
  originalAssistantMessageIndex: number;
141
445
  rephraseRequestIndex: number;
@@ -143,135 +447,200 @@ export declare class LLMAgent extends EventEmitter {
143
447
  messageType: "user" | "system";
144
448
  prefillText?: string;
145
449
  } | null;
450
+ /**
451
+ * Clear the current rephrase state.
452
+ */
146
453
  clearRephraseState(): void;
454
+ /**
455
+ * Set the agent's persona with optional color.
456
+ *
457
+ * Executes the persona hook if configured and updates the agent's
458
+ * persona state on success.
459
+ *
460
+ * @param persona - The persona description
461
+ * @param color - Optional display color (defaults to "white")
462
+ * @returns Promise resolving to success/error result
463
+ */
147
464
  setPersona(persona: string, color?: string): Promise<{
148
465
  success: boolean;
149
466
  error?: string;
150
467
  }>;
468
+ /**
469
+ * Set the agent's mood with optional color.
470
+ *
471
+ * Executes the mood hook if configured and updates the agent's
472
+ * mood state on success.
473
+ *
474
+ * @param mood - The mood description
475
+ * @param color - Optional display color (defaults to "white")
476
+ * @returns Promise resolving to success/error result
477
+ */
151
478
  setMood(mood: string, color?: string): Promise<{
152
479
  success: boolean;
153
480
  error?: string;
154
481
  }>;
482
+ /**
483
+ * Start an active task with specified action and color.
484
+ *
485
+ * Executes the task start hook if configured and updates the agent's
486
+ * task state on success.
487
+ *
488
+ * @param activeTask - The task description
489
+ * @param action - The current action within the task
490
+ * @param color - Optional display color (defaults to "white")
491
+ * @returns Promise resolving to success/error result
492
+ */
155
493
  startActiveTask(activeTask: string, action: string, color?: string): Promise<{
156
494
  success: boolean;
157
495
  error?: string;
158
496
  }>;
497
+ /**
498
+ * Transition the active task to a new action/status.
499
+ *
500
+ * Updates the current task action without changing the task itself.
501
+ *
502
+ * @param action - The new action description
503
+ * @param color - Optional display color (defaults to current color)
504
+ * @returns Promise resolving to success/error result
505
+ */
159
506
  transitionActiveTaskStatus(action: string, color?: string): Promise<{
160
507
  success: boolean;
161
508
  error?: string;
162
509
  }>;
510
+ /**
511
+ * Stop the current active task with reason and documentation.
512
+ *
513
+ * Executes the task stop hook if configured and clears the agent's
514
+ * task state on success.
515
+ *
516
+ * @param reason - Reason for stopping the task
517
+ * @param documentationFile - Path to documentation file
518
+ * @param color - Optional display color (defaults to "white")
519
+ * @returns Promise resolving to success/error result
520
+ */
163
521
  stopActiveTask(reason: string, documentationFile: string, color?: string): Promise<{
164
522
  success: boolean;
165
523
  error?: string;
166
524
  }>;
167
- private emitContextChange;
168
- private addContextWarningIfNeeded;
169
- executeCommand(command: string, skipConfirmation?: boolean): Promise<ToolResult>;
170
- getCurrentModel(): string;
171
- setModel(model: string): void;
172
- /**
173
- * Strip in-progress tool calls from messages for backend/model testing
174
- * Removes tool_calls from the last assistant message and any corresponding tool results
175
- * @returns Cleaned copy of messages array, or original if no stripping needed
176
- */
177
- static stripInProgressToolCalls(messages: LLMMessage[]): LLMMessage[];
178
525
  /**
179
- * Test a model change by making a test API call with current conversation context
180
- * Rolls back to previous model if test fails
181
- * @param newModel Model to test
182
- * @returns Promise with success status and optional error message
526
+ * Delegation method for hook processing (used by ToolExecutor).
527
+ *
528
+ * Processes hook results through the HookManager to handle commands,
529
+ * variable transformations, and other hook-specific logic.
530
+ *
531
+ * @param hookResult - Result object from hook execution
532
+ * @param envKey - Optional environment key for variable transformation
533
+ * @returns Promise resolving to processing result
183
534
  */
184
- testModel(newModel: string): Promise<{
535
+ processHookResult(hookResult: {
536
+ commands?: any[];
537
+ }, envKey?: string): Promise<{
185
538
  success: boolean;
186
- error?: string;
539
+ transformedValue?: string;
187
540
  }>;
188
541
  /**
189
- * Test backend/baseUrl/model changes by making a test API call with current conversation context
190
- * Rolls back all changes if test fails
191
- * @param backend Backend display name
192
- * @param baseUrl Base URL for API calls
193
- * @param apiKeyEnvVar Name of environment variable containing API key
194
- * @param model Model to use (optional, uses current model if not specified)
195
- * @returns Promise with success status and optional error message
542
+ * Execute a shell command through the ZSH tool.
543
+ *
544
+ * @param command - Shell command to execute
545
+ * @param skipConfirmation - Whether to skip confirmation prompts
546
+ * @returns Promise resolving to tool execution result
196
547
  */
197
- testBackendModelChange(backend: string, baseUrl: string, apiKeyEnvVar: string, model?: string): Promise<{
198
- success: boolean;
199
- error?: string;
200
- }>;
548
+ executeCommand(command: string, skipConfirmation?: boolean): Promise<ToolResult>;
201
549
  /**
202
- * Process hook result including commands and transformations
203
- * Handles ENV transformations, model/backend testing, and error messaging
204
- * @param hookResult Hook execution result
205
- * @param envKey Optional ENV key to check for transformation (e.g., ZDS_AI_AGENT_PERSONA)
206
- * @returns Object with success status and transformed value (if any)
550
+ * Get the current LLM model name.
551
+ * @returns Current model identifier
207
552
  */
208
- private processHookResult;
553
+ getCurrentModel(): string;
209
554
  /**
210
- * Process hook commands (MODEL, BACKEND, BASE_URL, SYSTEM, ENV)
211
- * Handles model/backend testing and error messaging
212
- * @param commands Hook commands from applyHookCommands()
555
+ * Set a new LLM model and update related components.
556
+ *
557
+ * This method:
558
+ * - Updates the LLM client model
559
+ * - Resets vision support flag
560
+ * - Updates the token counter for the new model
561
+ * - Handles model name suffixes (e.g., :nothinking)
562
+ *
563
+ * @param model - New model identifier
564
+ */
565
+ setModel(model: string): void;
566
+ /**
567
+ * Get the backend name (e.g., "grok", "openai").
568
+ * @returns Backend identifier string
213
569
  */
214
- private processHookCommands;
215
570
  getBackend(): string;
571
+ /**
572
+ * Abort the current operation if one is in progress.
573
+ *
574
+ * This will cancel streaming responses and tool execution.
575
+ */
216
576
  abortCurrentOperation(): void;
577
+ /**
578
+ * Clear the conversation cache and reinitialize the agent.
579
+ *
580
+ * This method:
581
+ * - Backs up current conversation to timestamped files
582
+ * - Clears chat history and messages
583
+ * - Resets context warnings and processing flags
584
+ * - Re-executes startup and instance hooks
585
+ * - Saves the cleared state
586
+ * - Emits context change events
587
+ *
588
+ * Used when context becomes too large or user requests a fresh start.
589
+ */
217
590
  clearCache(): Promise<void>;
218
591
  /**
219
- * Get current session state for persistence
220
- */
221
- getSessionState(): {
222
- session: string;
223
- persona: string;
224
- personaColor: string;
225
- mood: string;
226
- moodColor: string;
227
- activeTask: string;
228
- activeTaskAction: string;
229
- activeTaskColor: string;
230
- cwd: string;
231
- contextCurrent: number;
232
- contextMax: number;
233
- backend: string;
234
- baseUrl: string;
235
- apiKeyEnvVar: string;
236
- model: string;
237
- supportsVision: boolean;
238
- };
592
+ * Get current session state for persistence.
593
+ *
594
+ * Collects all session-related state including:
595
+ * - Model and backend configuration
596
+ * - Persona, mood, and task settings
597
+ * - Context usage statistics
598
+ * - API key environment variable
599
+ *
600
+ * @returns Complete session state object
601
+ */
602
+ getSessionState(): SessionState;
603
+ /**
604
+ * Restore session state from persistence.
605
+ *
606
+ * Restores all session-related state including:
607
+ * - Model and backend configuration
608
+ * - Persona, mood, and task settings
609
+ * - Token counter and API client setup
610
+ *
611
+ * @param state - Session state to restore
612
+ */
613
+ restoreSessionState(state: SessionState): Promise<void>;
239
614
  /**
240
- * Restore session state from persistence
241
- */
242
- restoreSessionState(state: {
243
- session?: string;
244
- persona: string;
245
- personaColor: string;
246
- mood: string;
247
- moodColor: string;
248
- activeTask: string;
249
- activeTaskAction: string;
250
- activeTaskColor: string;
251
- cwd: string;
252
- contextCurrent?: number;
253
- contextMax?: number;
254
- backend?: string;
255
- baseUrl?: string;
256
- apiKeyEnvVar?: string;
257
- model?: string;
258
- supportsVision?: boolean;
259
- }): Promise<void>;
260
- /**
261
- * Compact conversation context by keeping system prompt and last N messages
262
- * Reduces context size when it grows too large for backend to handle
615
+ * Compact conversation context by keeping system prompt and last N messages.
616
+ *
617
+ * Reduces context size when it grows too large for the backend to handle.
618
+ * Removes older messages while preserving the system prompt and recent context.
619
+ *
620
+ * @param keepLastMessages - Number of recent messages to keep (default: 20)
263
621
  * @returns Number of messages removed
264
622
  */
265
623
  compactContext(keepLastMessages?: number): number;
266
624
  /**
267
- * Get all tool instances and their class names for display purposes
625
+ * Get all tool instances and their class names for display purposes.
626
+ *
627
+ * Uses reflection to find all tool instances and extract their
628
+ * class names and handled method names for introspection.
629
+ *
630
+ * @returns Array of tool info objects with class names and methods
268
631
  */
269
632
  getToolClassInfo(): Array<{
270
633
  className: string;
271
634
  methods: string[];
272
635
  }>;
273
636
  /**
274
- * Get all tool instances via reflection
637
+ * Get all tool instances via reflection.
638
+ *
639
+ * Scans all properties of the agent instance to find objects that
640
+ * implement the tool interface (have getHandledToolNames method).
641
+ *
642
+ * @returns Array of tool instances with their class names
643
+ * @private
275
644
  */
276
645
  private getToolInstances;
277
646
  }