todo-agent 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,210 @@
1
+ """
2
+ Command-line interface for todo.sh LLM agent.
3
+ """
4
+
5
+ import threading
6
+ import time
7
+ from typing import Optional
8
+
9
+ try:
10
+ from rich.console import Console
11
+ from rich.live import Live
12
+ from rich.spinner import Spinner
13
+ from rich.text import Text
14
+ from todo_agent.core.todo_manager import TodoManager
15
+ from todo_agent.infrastructure.config import Config
16
+ from todo_agent.infrastructure.todo_shell import TodoShell
17
+ from todo_agent.infrastructure.logger import Logger
18
+ from todo_agent.infrastructure.inference import Inference
19
+ from todo_agent.interface.tools import ToolCallHandler
20
+ except ImportError:
21
+ from rich.console import Console
22
+ from rich.live import Live
23
+ from rich.spinner import Spinner
24
+ from rich.text import Text
25
+ from core.todo_manager import TodoManager
26
+ from infrastructure.config import Config
27
+ from infrastructure.todo_shell import TodoShell
28
+ from infrastructure.logger import Logger
29
+ from infrastructure.inference import Inference
30
+ from interface.tools import ToolCallHandler
31
+
32
+
33
+ class CLI:
34
+ """User interaction loop and input/output handling."""
35
+
36
+ def __init__(self):
37
+ # Initialize logger first
38
+ self.logger = Logger("cli")
39
+ self.logger.info("Initializing CLI")
40
+
41
+ self.config = Config()
42
+ self.config.validate()
43
+ self.logger.debug("Configuration validated")
44
+
45
+ # Initialize infrastructure
46
+ self.todo_shell = TodoShell(self.config.todo_file_path, self.logger)
47
+ self.logger.debug("Infrastructure components initialized")
48
+
49
+ # Initialize core
50
+ self.todo_manager = TodoManager(self.todo_shell)
51
+ self.logger.debug("Core components initialized")
52
+
53
+ # Initialize interface
54
+ self.tool_handler = ToolCallHandler(self.todo_manager, self.logger)
55
+ self.logger.debug("Interface components initialized")
56
+
57
+ # Initialize inference engine
58
+ self.inference = Inference(self.config, self.tool_handler, self.logger)
59
+ self.logger.debug("Inference engine initialized")
60
+
61
+ # Initialize rich console for animations
62
+ self.console = Console()
63
+
64
+ self.logger.info("CLI initialization completed")
65
+
66
+ def _create_thinking_spinner(self, message: str = "Thinking...") -> Spinner:
67
+ """
68
+ Create a thinking spinner with the given message.
69
+
70
+ Args:
71
+ message: The message to display alongside the spinner
72
+
73
+ Returns:
74
+ Spinner object ready for display
75
+ """
76
+ return Spinner("dots", text=Text(message, style="cyan"))
77
+
78
+ def _get_thinking_live(self) -> Live:
79
+ """
80
+ Create a live display context for the thinking spinner.
81
+
82
+ Returns:
83
+ Live display context manager
84
+ """
85
+ initial_spinner = self._create_thinking_spinner("Thinking...")
86
+ return Live(initial_spinner, console=self.console, refresh_per_second=10)
87
+
88
+
89
+
90
+ def run(self):
91
+ """Main CLI interaction loop."""
92
+ self.logger.info("Starting CLI interaction loop")
93
+ print("Todo.sh LLM Agent - Type 'quit' to exit")
94
+ print("Commands: 'clear' (clear conversation), 'history' (show stats), 'help'")
95
+ print("=" * 50)
96
+
97
+ while True:
98
+ try:
99
+ user_input = input("\n> ").strip()
100
+
101
+ if user_input.lower() in ["quit", "exit", "q"]:
102
+ self.logger.info("User requested exit")
103
+ print("Goodbye!")
104
+ break
105
+
106
+ if not user_input:
107
+ continue
108
+
109
+ # Handle special commands
110
+ if user_input.lower() == "clear":
111
+ self.logger.info("User requested conversation clear")
112
+ self.inference.clear_conversation()
113
+ print("Conversation history cleared.")
114
+ continue
115
+
116
+ if user_input.lower() == "history":
117
+ self.logger.debug("User requested conversation history")
118
+ summary = self.inference.get_conversation_summary()
119
+ print(f"Conversation Stats:")
120
+ print(f" Total messages: {summary['total_messages']}")
121
+ print(f" User messages: {summary['user_messages']}")
122
+ print(f" Assistant messages: {summary['assistant_messages']}")
123
+ print(f" Tool messages: {summary['tool_messages']}")
124
+ print(f" Estimated tokens: {summary['estimated_tokens']}")
125
+
126
+ # Display thinking time statistics if available
127
+ if 'thinking_time_count' in summary and summary['thinking_time_count'] > 0:
128
+ print(f" Thinking time stats:")
129
+ print(f" Total thinking time: {summary['total_thinking_time']:.2f}s")
130
+ print(f" Average thinking time: {summary['average_thinking_time']:.2f}s")
131
+ print(f" Min thinking time: {summary['min_thinking_time']:.2f}s")
132
+ print(f" Max thinking time: {summary['max_thinking_time']:.2f}s")
133
+ print(f" Requests with timing: {summary['thinking_time_count']}")
134
+ continue
135
+
136
+ if user_input.lower() == "help":
137
+ self.logger.debug("User requested help")
138
+ print("Available commands:")
139
+ print(" clear - Clear conversation history")
140
+ print(" history - Show conversation statistics")
141
+ print(" help - Show this help message")
142
+ print(" list - List all tasks (no LLM interaction)")
143
+ print(" quit - Exit the application")
144
+ print(" Or just type your request naturally!")
145
+ continue
146
+
147
+ if user_input.lower() == "list":
148
+ self.logger.debug("User requested task list")
149
+ try:
150
+ output = self.todo_shell.list_tasks()
151
+ print(output)
152
+ except Exception as e:
153
+ self.logger.error(f"Error listing tasks: {str(e)}")
154
+ print(f"Error: Failed to list tasks: {str(e)}")
155
+ continue
156
+
157
+ self.logger.info(f"Processing user request: {user_input[:50]}{'...' if len(user_input) > 50 else ''}")
158
+ response = self.handle_request(user_input)
159
+ print(response)
160
+
161
+ except KeyboardInterrupt:
162
+ self.logger.info("User interrupted with Ctrl+C")
163
+ print("\nGoodbye!")
164
+ break
165
+ except Exception as e:
166
+ self.logger.error(f"Error in CLI loop: {str(e)}")
167
+ print(f"Error: {str(e)}")
168
+
169
+ def handle_request(self, user_input: str) -> str:
170
+ """
171
+ Handle user request with LLM-driven tool orchestration and conversation memory.
172
+
173
+ Args:
174
+ user_input: Natural language user request
175
+
176
+ Returns:
177
+ Formatted response for user
178
+ """
179
+ # Show thinking spinner during LLM processing
180
+ with self._get_thinking_live() as live:
181
+ try:
182
+ # Process request through inference engine
183
+ response, thinking_time = self.inference.process_request(user_input)
184
+
185
+ # Update spinner with completion message and thinking time
186
+ live.update(self._create_thinking_spinner(f"(thought for {thinking_time:.1f}s)"))
187
+
188
+ return response
189
+ except Exception as e:
190
+ # Update spinner with error message
191
+ live.update(self._create_thinking_spinner("Request failed"))
192
+
193
+ # Log the error
194
+ self.logger.error(f"Error in handle_request: {str(e)}")
195
+
196
+ # Return error message
197
+ return f"Error: {str(e)}"
198
+
199
+ def run_single_request(self, user_input: str) -> str:
200
+ """
201
+ Run a single request without entering the interactive loop.
202
+
203
+ Args:
204
+ user_input: Natural language user request
205
+
206
+ Returns:
207
+ Formatted response
208
+ """
209
+ self.logger.info(f"Running single request: {user_input[:50]}{'...' if len(user_input) > 50 else ''}")
210
+ return self.handle_request(user_input)