code-puppy 0.0.97__py3-none-any.whl → 0.0.118__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- code_puppy/__init__.py +2 -5
- code_puppy/__main__.py +10 -0
- code_puppy/agent.py +125 -40
- code_puppy/agent_prompts.py +30 -24
- code_puppy/callbacks.py +152 -0
- code_puppy/command_line/command_handler.py +359 -0
- code_puppy/command_line/load_context_completion.py +59 -0
- code_puppy/command_line/model_picker_completion.py +14 -21
- code_puppy/command_line/motd.py +44 -28
- code_puppy/command_line/prompt_toolkit_completion.py +42 -23
- code_puppy/config.py +266 -26
- code_puppy/http_utils.py +122 -0
- code_puppy/main.py +570 -383
- code_puppy/message_history_processor.py +195 -104
- code_puppy/messaging/__init__.py +46 -0
- code_puppy/messaging/message_queue.py +288 -0
- code_puppy/messaging/queue_console.py +293 -0
- code_puppy/messaging/renderers.py +305 -0
- code_puppy/messaging/spinner/__init__.py +55 -0
- code_puppy/messaging/spinner/console_spinner.py +200 -0
- code_puppy/messaging/spinner/spinner_base.py +66 -0
- code_puppy/messaging/spinner/textual_spinner.py +97 -0
- code_puppy/model_factory.py +73 -105
- code_puppy/plugins/__init__.py +32 -0
- code_puppy/reopenable_async_client.py +225 -0
- code_puppy/state_management.py +60 -21
- code_puppy/summarization_agent.py +56 -35
- code_puppy/token_utils.py +7 -9
- code_puppy/tools/__init__.py +1 -4
- code_puppy/tools/command_runner.py +187 -32
- code_puppy/tools/common.py +44 -35
- code_puppy/tools/file_modifications.py +335 -118
- code_puppy/tools/file_operations.py +368 -95
- code_puppy/tools/token_check.py +27 -11
- code_puppy/tools/tools_content.py +53 -0
- code_puppy/tui/__init__.py +10 -0
- code_puppy/tui/app.py +1050 -0
- code_puppy/tui/components/__init__.py +21 -0
- code_puppy/tui/components/chat_view.py +512 -0
- code_puppy/tui/components/command_history_modal.py +218 -0
- code_puppy/tui/components/copy_button.py +139 -0
- code_puppy/tui/components/custom_widgets.py +58 -0
- code_puppy/tui/components/input_area.py +167 -0
- code_puppy/tui/components/sidebar.py +309 -0
- code_puppy/tui/components/status_bar.py +182 -0
- code_puppy/tui/messages.py +27 -0
- code_puppy/tui/models/__init__.py +8 -0
- code_puppy/tui/models/chat_message.py +25 -0
- code_puppy/tui/models/command_history.py +89 -0
- code_puppy/tui/models/enums.py +24 -0
- code_puppy/tui/screens/__init__.py +13 -0
- code_puppy/tui/screens/help.py +130 -0
- code_puppy/tui/screens/settings.py +256 -0
- code_puppy/tui/screens/tools.py +74 -0
- code_puppy/tui/tests/__init__.py +1 -0
- code_puppy/tui/tests/test_chat_message.py +28 -0
- code_puppy/tui/tests/test_chat_view.py +88 -0
- code_puppy/tui/tests/test_command_history.py +89 -0
- code_puppy/tui/tests/test_copy_button.py +191 -0
- code_puppy/tui/tests/test_custom_widgets.py +27 -0
- code_puppy/tui/tests/test_disclaimer.py +27 -0
- code_puppy/tui/tests/test_enums.py +15 -0
- code_puppy/tui/tests/test_file_browser.py +60 -0
- code_puppy/tui/tests/test_help.py +38 -0
- code_puppy/tui/tests/test_history_file_reader.py +107 -0
- code_puppy/tui/tests/test_input_area.py +33 -0
- code_puppy/tui/tests/test_settings.py +44 -0
- code_puppy/tui/tests/test_sidebar.py +33 -0
- code_puppy/tui/tests/test_sidebar_history.py +153 -0
- code_puppy/tui/tests/test_sidebar_history_navigation.py +132 -0
- code_puppy/tui/tests/test_status_bar.py +54 -0
- code_puppy/tui/tests/test_timestamped_history.py +52 -0
- code_puppy/tui/tests/test_tools.py +82 -0
- code_puppy/version_checker.py +26 -3
- {code_puppy-0.0.97.dist-info → code_puppy-0.0.118.dist-info}/METADATA +9 -2
- code_puppy-0.0.118.dist-info/RECORD +86 -0
- code_puppy-0.0.97.dist-info/RECORD +0 -32
- {code_puppy-0.0.97.data → code_puppy-0.0.118.data}/data/code_puppy/models.json +0 -0
- {code_puppy-0.0.97.dist-info → code_puppy-0.0.118.dist-info}/WHEEL +0 -0
- {code_puppy-0.0.97.dist-info → code_puppy-0.0.118.dist-info}/entry_points.txt +0 -0
- {code_puppy-0.0.97.dist-info → code_puppy-0.0.118.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,288 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Message queue system for decoupling Rich console output from renderers.
|
|
3
|
+
|
|
4
|
+
This allows both TUI and interactive modes to consume the same messages
|
|
5
|
+
but render them differently based on their capabilities.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
import queue
|
|
10
|
+
import threading
|
|
11
|
+
from dataclasses import dataclass
|
|
12
|
+
from datetime import datetime, timezone
|
|
13
|
+
from enum import Enum
|
|
14
|
+
from typing import Any, Dict, Optional, Union
|
|
15
|
+
|
|
16
|
+
from rich.text import Text
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class MessageType(Enum):
|
|
20
|
+
"""Types of messages that can be sent through the queue."""
|
|
21
|
+
|
|
22
|
+
# Basic content types
|
|
23
|
+
INFO = "info"
|
|
24
|
+
SUCCESS = "success"
|
|
25
|
+
WARNING = "warning"
|
|
26
|
+
ERROR = "error"
|
|
27
|
+
DIVIDER = "divider"
|
|
28
|
+
|
|
29
|
+
# Tool-specific types
|
|
30
|
+
TOOL_OUTPUT = "tool_output"
|
|
31
|
+
COMMAND_OUTPUT = "command_output"
|
|
32
|
+
FILE_OPERATION = "file_operation"
|
|
33
|
+
|
|
34
|
+
# Agent-specific types
|
|
35
|
+
AGENT_REASONING = "agent_reasoning"
|
|
36
|
+
PLANNED_NEXT_STEPS = "planned_next_steps"
|
|
37
|
+
AGENT_RESPONSE = "agent_response"
|
|
38
|
+
AGENT_STATUS = "agent_status"
|
|
39
|
+
|
|
40
|
+
# System types
|
|
41
|
+
SYSTEM = "system"
|
|
42
|
+
DEBUG = "debug"
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@dataclass
|
|
46
|
+
class UIMessage:
|
|
47
|
+
"""A message to be displayed in the UI."""
|
|
48
|
+
|
|
49
|
+
type: MessageType
|
|
50
|
+
content: Union[str, Text, Any] # Can be Rich Text, Table, Markdown, etc.
|
|
51
|
+
timestamp: datetime = None
|
|
52
|
+
metadata: Dict[str, Any] = None
|
|
53
|
+
|
|
54
|
+
def __post_init__(self):
|
|
55
|
+
if self.timestamp is None:
|
|
56
|
+
self.timestamp = datetime.now(timezone.utc)
|
|
57
|
+
if self.metadata is None:
|
|
58
|
+
self.metadata = {}
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class MessageQueue:
|
|
62
|
+
"""Thread-safe message queue for UI messages."""
|
|
63
|
+
|
|
64
|
+
def __init__(self, maxsize: int = 1000):
|
|
65
|
+
self._queue = queue.Queue(maxsize=maxsize)
|
|
66
|
+
self._async_queue = None # Will be created when needed
|
|
67
|
+
self._async_queue_maxsize = maxsize
|
|
68
|
+
self._listeners = []
|
|
69
|
+
self._running = False
|
|
70
|
+
self._thread = None
|
|
71
|
+
self._startup_buffer = [] # Buffer messages before any renderer starts
|
|
72
|
+
self._has_active_renderer = False
|
|
73
|
+
self._event_loop = None # Store reference to the event loop
|
|
74
|
+
|
|
75
|
+
def start(self):
|
|
76
|
+
"""Start the queue processing."""
|
|
77
|
+
if self._running:
|
|
78
|
+
return
|
|
79
|
+
|
|
80
|
+
self._running = True
|
|
81
|
+
self._thread = threading.Thread(target=self._process_messages, daemon=True)
|
|
82
|
+
self._thread.start()
|
|
83
|
+
|
|
84
|
+
def get_buffered_messages(self):
|
|
85
|
+
"""Get all currently buffered messages without waiting."""
|
|
86
|
+
# First get any startup buffered messages
|
|
87
|
+
messages = list(self._startup_buffer)
|
|
88
|
+
|
|
89
|
+
# Then get any queued messages
|
|
90
|
+
while True:
|
|
91
|
+
try:
|
|
92
|
+
message = self._queue.get_nowait()
|
|
93
|
+
messages.append(message)
|
|
94
|
+
except queue.Empty:
|
|
95
|
+
break
|
|
96
|
+
return messages
|
|
97
|
+
|
|
98
|
+
def clear_startup_buffer(self):
|
|
99
|
+
"""Clear the startup buffer after processing."""
|
|
100
|
+
self._startup_buffer.clear()
|
|
101
|
+
|
|
102
|
+
def stop(self):
|
|
103
|
+
"""Stop the queue processing."""
|
|
104
|
+
self._running = False
|
|
105
|
+
if self._thread and self._thread.is_alive():
|
|
106
|
+
self._thread.join(timeout=1.0)
|
|
107
|
+
|
|
108
|
+
def emit(self, message: UIMessage):
|
|
109
|
+
"""Emit a message to the queue."""
|
|
110
|
+
# If no renderer is active yet, buffer the message for startup
|
|
111
|
+
if not self._has_active_renderer:
|
|
112
|
+
self._startup_buffer.append(message)
|
|
113
|
+
return
|
|
114
|
+
|
|
115
|
+
try:
|
|
116
|
+
self._queue.put_nowait(message)
|
|
117
|
+
except queue.Full:
|
|
118
|
+
# Drop oldest message to make room
|
|
119
|
+
try:
|
|
120
|
+
self._queue.get_nowait()
|
|
121
|
+
self._queue.put_nowait(message)
|
|
122
|
+
except queue.Empty:
|
|
123
|
+
pass
|
|
124
|
+
|
|
125
|
+
def emit_simple(self, message_type: MessageType, content: Any, **metadata):
|
|
126
|
+
"""Emit a simple message with just type and content."""
|
|
127
|
+
msg = UIMessage(type=message_type, content=content, metadata=metadata)
|
|
128
|
+
self.emit(msg)
|
|
129
|
+
|
|
130
|
+
def get_nowait(self) -> Optional[UIMessage]:
|
|
131
|
+
"""Get a message without blocking."""
|
|
132
|
+
try:
|
|
133
|
+
return self._queue.get_nowait()
|
|
134
|
+
except queue.Empty:
|
|
135
|
+
return None
|
|
136
|
+
|
|
137
|
+
async def get_async(self) -> UIMessage:
|
|
138
|
+
"""Get a message asynchronously."""
|
|
139
|
+
# Lazy initialization of async queue and store event loop reference
|
|
140
|
+
if self._async_queue is None:
|
|
141
|
+
self._async_queue = asyncio.Queue(maxsize=self._async_queue_maxsize)
|
|
142
|
+
self._event_loop = asyncio.get_running_loop()
|
|
143
|
+
return await self._async_queue.get()
|
|
144
|
+
|
|
145
|
+
def _process_messages(self):
|
|
146
|
+
"""Process messages from sync to async queue."""
|
|
147
|
+
while self._running:
|
|
148
|
+
try:
|
|
149
|
+
message = self._queue.get(timeout=0.1)
|
|
150
|
+
|
|
151
|
+
# Try to put in async queue if we have an event loop reference
|
|
152
|
+
if self._event_loop is not None and self._async_queue is not None:
|
|
153
|
+
# Use thread-safe call to put message in async queue
|
|
154
|
+
# Create a bound method to avoid closure issues
|
|
155
|
+
try:
|
|
156
|
+
self._event_loop.call_soon_threadsafe(
|
|
157
|
+
self._async_queue.put_nowait, message
|
|
158
|
+
)
|
|
159
|
+
except Exception:
|
|
160
|
+
# Handle any errors with the async queue operation
|
|
161
|
+
pass
|
|
162
|
+
|
|
163
|
+
# Notify listeners immediately for sync processing
|
|
164
|
+
for listener in self._listeners:
|
|
165
|
+
try:
|
|
166
|
+
listener(message)
|
|
167
|
+
except Exception:
|
|
168
|
+
pass # Don't let listener errors break processing
|
|
169
|
+
|
|
170
|
+
except queue.Empty:
|
|
171
|
+
continue
|
|
172
|
+
|
|
173
|
+
def add_listener(self, callback):
|
|
174
|
+
"""Add a listener for messages (for direct sync consumption)."""
|
|
175
|
+
self._listeners.append(callback)
|
|
176
|
+
# Mark that we have an active renderer
|
|
177
|
+
self._has_active_renderer = True
|
|
178
|
+
|
|
179
|
+
def remove_listener(self, callback):
|
|
180
|
+
"""Remove a listener."""
|
|
181
|
+
if callback in self._listeners:
|
|
182
|
+
self._listeners.remove(callback)
|
|
183
|
+
# If no more listeners, mark as no active renderer
|
|
184
|
+
if not self._listeners:
|
|
185
|
+
self._has_active_renderer = False
|
|
186
|
+
|
|
187
|
+
def mark_renderer_active(self):
|
|
188
|
+
"""Mark that a renderer is now active and consuming messages."""
|
|
189
|
+
self._has_active_renderer = True
|
|
190
|
+
|
|
191
|
+
def mark_renderer_inactive(self):
|
|
192
|
+
"""Mark that no renderer is currently active."""
|
|
193
|
+
self._has_active_renderer = False
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
# Global message queue instance
|
|
197
|
+
_global_queue: Optional[MessageQueue] = None
|
|
198
|
+
_queue_lock = threading.Lock()
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
def get_global_queue() -> MessageQueue:
|
|
202
|
+
"""Get or create the global message queue."""
|
|
203
|
+
global _global_queue
|
|
204
|
+
|
|
205
|
+
with _queue_lock:
|
|
206
|
+
if _global_queue is None:
|
|
207
|
+
_global_queue = MessageQueue()
|
|
208
|
+
_global_queue.start()
|
|
209
|
+
|
|
210
|
+
return _global_queue
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
def get_buffered_startup_messages():
|
|
214
|
+
"""Get any messages that were buffered before renderers started."""
|
|
215
|
+
queue = get_global_queue()
|
|
216
|
+
# Only return startup buffer messages, don't clear them yet
|
|
217
|
+
messages = list(queue._startup_buffer)
|
|
218
|
+
return messages
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
def emit_message(message_type: MessageType, content: Any, **metadata):
|
|
222
|
+
"""Convenience function to emit a message to the global queue."""
|
|
223
|
+
queue = get_global_queue()
|
|
224
|
+
queue.emit_simple(message_type, content, **metadata)
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
def emit_info(content: Any, **metadata):
|
|
228
|
+
"""Emit an info message."""
|
|
229
|
+
emit_message(MessageType.INFO, content, **metadata)
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
def emit_success(content: Any, **metadata):
|
|
233
|
+
"""Emit a success message."""
|
|
234
|
+
emit_message(MessageType.SUCCESS, content, **metadata)
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
def emit_warning(content: Any, **metadata):
|
|
238
|
+
"""Emit a warning message."""
|
|
239
|
+
emit_message(MessageType.WARNING, content, **metadata)
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
def emit_error(content: Any, **metadata):
|
|
243
|
+
"""Emit an error message."""
|
|
244
|
+
emit_message(MessageType.ERROR, content, **metadata)
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
def emit_tool_output(content: Any, tool_name: str = None, **metadata):
|
|
248
|
+
"""Emit tool output."""
|
|
249
|
+
if tool_name:
|
|
250
|
+
metadata["tool_name"] = tool_name
|
|
251
|
+
emit_message(MessageType.TOOL_OUTPUT, content, **metadata)
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
def emit_command_output(content: Any, command: str = None, **metadata):
|
|
255
|
+
"""Emit command output."""
|
|
256
|
+
if command:
|
|
257
|
+
metadata["command"] = command
|
|
258
|
+
emit_message(MessageType.COMMAND_OUTPUT, content, **metadata)
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
def emit_agent_reasoning(content: Any, **metadata):
|
|
262
|
+
"""Emit agent reasoning."""
|
|
263
|
+
emit_message(MessageType.AGENT_REASONING, content, **metadata)
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
def emit_planned_next_steps(content: Any, **metadata):
|
|
267
|
+
"""Emit planned_next_steps"""
|
|
268
|
+
emit_message(MessageType.PLANNED_NEXT_STEPS, content, **metadata)
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
def emit_agent_response(content: Any, **metadata):
|
|
272
|
+
"""Emit agent_response"""
|
|
273
|
+
emit_message(MessageType.AGENT_RESPONSE, content, **metadata)
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
def emit_system_message(content: Any, **metadata):
|
|
277
|
+
"""Emit a system message."""
|
|
278
|
+
emit_message(MessageType.SYSTEM, content, **metadata)
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
def emit_divider(content: str = "[dim]" + "─" * 100 + "\n" + "[/dim]", **metadata):
|
|
282
|
+
"""Emit a divider line"""
|
|
283
|
+
from code_puppy.state_management import is_tui_mode
|
|
284
|
+
|
|
285
|
+
if not is_tui_mode():
|
|
286
|
+
emit_message(MessageType.DIVIDER, content, **metadata)
|
|
287
|
+
else:
|
|
288
|
+
pass
|
|
@@ -0,0 +1,293 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Queue-based console that mimics Rich Console but sends messages to a queue.
|
|
3
|
+
|
|
4
|
+
This allows tools to use the same Rich console interface while having
|
|
5
|
+
their output captured and routed through our message queue system.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import traceback
|
|
9
|
+
from typing import Any, Optional
|
|
10
|
+
|
|
11
|
+
from rich.console import Console
|
|
12
|
+
from rich.markdown import Markdown
|
|
13
|
+
from rich.table import Table
|
|
14
|
+
from rich.text import Text
|
|
15
|
+
|
|
16
|
+
from .message_queue import MessageQueue, MessageType, get_global_queue
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class QueueConsole:
|
|
20
|
+
"""
|
|
21
|
+
Console-like interface that sends messages to a queue instead of stdout.
|
|
22
|
+
|
|
23
|
+
This is designed to be a drop-in replacement for Rich Console that
|
|
24
|
+
routes messages through our queue system.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
def __init__(
|
|
28
|
+
self,
|
|
29
|
+
queue: Optional[MessageQueue] = None,
|
|
30
|
+
fallback_console: Optional[Console] = None,
|
|
31
|
+
):
|
|
32
|
+
self.queue = queue or get_global_queue()
|
|
33
|
+
self.fallback_console = fallback_console or Console()
|
|
34
|
+
|
|
35
|
+
def print(
|
|
36
|
+
self,
|
|
37
|
+
*values: Any,
|
|
38
|
+
sep: str = " ",
|
|
39
|
+
end: str = "\n",
|
|
40
|
+
style: Optional[str] = None,
|
|
41
|
+
highlight: bool = True,
|
|
42
|
+
**kwargs,
|
|
43
|
+
):
|
|
44
|
+
"""Print values to the message queue."""
|
|
45
|
+
# Handle Rich objects properly
|
|
46
|
+
if len(values) == 1 and hasattr(values[0], "__rich_console__"):
|
|
47
|
+
# Single Rich object - pass it through directly
|
|
48
|
+
content = values[0]
|
|
49
|
+
message_type = self._infer_message_type_from_rich_object(content, style)
|
|
50
|
+
else:
|
|
51
|
+
# Convert to string, but handle Rich objects properly
|
|
52
|
+
processed_values = []
|
|
53
|
+
for v in values:
|
|
54
|
+
if hasattr(v, "__rich_console__"):
|
|
55
|
+
# For Rich objects, try to extract their text content
|
|
56
|
+
from io import StringIO
|
|
57
|
+
|
|
58
|
+
from rich.console import Console
|
|
59
|
+
|
|
60
|
+
string_io = StringIO()
|
|
61
|
+
# Use markup=False to prevent interpretation of square brackets as markup
|
|
62
|
+
temp_console = Console(
|
|
63
|
+
file=string_io, width=80, legacy_windows=False, markup=False
|
|
64
|
+
)
|
|
65
|
+
temp_console.print(v)
|
|
66
|
+
processed_values.append(string_io.getvalue().rstrip("\n"))
|
|
67
|
+
else:
|
|
68
|
+
processed_values.append(str(v))
|
|
69
|
+
|
|
70
|
+
content = sep.join(processed_values) + end
|
|
71
|
+
message_type = self._infer_message_type(content, style)
|
|
72
|
+
|
|
73
|
+
# Create Rich Text object if style is provided and content is string
|
|
74
|
+
if style and isinstance(content, str):
|
|
75
|
+
content = Text(content, style=style)
|
|
76
|
+
|
|
77
|
+
# Emit to queue
|
|
78
|
+
self.queue.emit_simple(
|
|
79
|
+
message_type, content, style=style, highlight=highlight, **kwargs
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
def print_exception(
|
|
83
|
+
self,
|
|
84
|
+
*,
|
|
85
|
+
width: Optional[int] = None,
|
|
86
|
+
extra_lines: int = 3,
|
|
87
|
+
theme: Optional[str] = None,
|
|
88
|
+
word_wrap: bool = False,
|
|
89
|
+
show_locals: bool = False,
|
|
90
|
+
indent_guides: bool = True,
|
|
91
|
+
suppress: tuple = (),
|
|
92
|
+
max_frames: int = 100,
|
|
93
|
+
):
|
|
94
|
+
"""Print exception information to the queue."""
|
|
95
|
+
# Get the exception traceback
|
|
96
|
+
exc_text = traceback.format_exc()
|
|
97
|
+
|
|
98
|
+
# Emit as error message
|
|
99
|
+
self.queue.emit_simple(
|
|
100
|
+
MessageType.ERROR,
|
|
101
|
+
f"Exception:\n{exc_text}",
|
|
102
|
+
exception=True,
|
|
103
|
+
show_locals=show_locals,
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
def log(
|
|
107
|
+
self,
|
|
108
|
+
*values: Any,
|
|
109
|
+
sep: str = " ",
|
|
110
|
+
end: str = "\n",
|
|
111
|
+
style: Optional[str] = None,
|
|
112
|
+
justify: Optional[str] = None,
|
|
113
|
+
emoji: Optional[bool] = None,
|
|
114
|
+
markup: Optional[bool] = None,
|
|
115
|
+
highlight: Optional[bool] = None,
|
|
116
|
+
log_locals: bool = False,
|
|
117
|
+
):
|
|
118
|
+
"""Log a message (similar to print but with logging semantics)."""
|
|
119
|
+
content = sep.join(str(v) for v in values) + end
|
|
120
|
+
|
|
121
|
+
# Log messages are typically informational
|
|
122
|
+
message_type = MessageType.INFO
|
|
123
|
+
if style:
|
|
124
|
+
message_type = self._infer_message_type(content, style)
|
|
125
|
+
|
|
126
|
+
if style and isinstance(content, str):
|
|
127
|
+
content = Text(content, style=style)
|
|
128
|
+
|
|
129
|
+
self.queue.emit_simple(
|
|
130
|
+
message_type, content, log=True, style=style, log_locals=log_locals
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
def _infer_message_type_from_rich_object(
|
|
134
|
+
self, content: Any, style: Optional[str] = None
|
|
135
|
+
) -> MessageType:
|
|
136
|
+
"""Infer message type from Rich object type and style."""
|
|
137
|
+
if style:
|
|
138
|
+
style_lower = style.lower()
|
|
139
|
+
if "red" in style_lower or "error" in style_lower:
|
|
140
|
+
return MessageType.ERROR
|
|
141
|
+
elif "yellow" in style_lower or "warning" in style_lower:
|
|
142
|
+
return MessageType.WARNING
|
|
143
|
+
elif "green" in style_lower or "success" in style_lower:
|
|
144
|
+
return MessageType.SUCCESS
|
|
145
|
+
elif "blue" in style_lower:
|
|
146
|
+
return MessageType.INFO
|
|
147
|
+
elif "purple" in style_lower or "magenta" in style_lower:
|
|
148
|
+
return MessageType.AGENT_REASONING
|
|
149
|
+
elif "dim" in style_lower:
|
|
150
|
+
return MessageType.SYSTEM
|
|
151
|
+
|
|
152
|
+
# Infer from object type
|
|
153
|
+
if isinstance(content, Markdown):
|
|
154
|
+
return MessageType.AGENT_REASONING
|
|
155
|
+
elif isinstance(content, Table):
|
|
156
|
+
return MessageType.TOOL_OUTPUT
|
|
157
|
+
elif hasattr(content, "lexer_name"): # Syntax object
|
|
158
|
+
return MessageType.TOOL_OUTPUT
|
|
159
|
+
|
|
160
|
+
return MessageType.INFO
|
|
161
|
+
|
|
162
|
+
def _infer_message_type(
|
|
163
|
+
self, content: str, style: Optional[str] = None
|
|
164
|
+
) -> MessageType:
|
|
165
|
+
"""Infer message type from content and style."""
|
|
166
|
+
if style:
|
|
167
|
+
style_lower = style.lower()
|
|
168
|
+
if "red" in style_lower or "error" in style_lower:
|
|
169
|
+
return MessageType.ERROR
|
|
170
|
+
elif "yellow" in style_lower or "warning" in style_lower:
|
|
171
|
+
return MessageType.WARNING
|
|
172
|
+
elif "green" in style_lower or "success" in style_lower:
|
|
173
|
+
return MessageType.SUCCESS
|
|
174
|
+
elif "blue" in style_lower:
|
|
175
|
+
return MessageType.INFO
|
|
176
|
+
elif "purple" in style_lower or "magenta" in style_lower:
|
|
177
|
+
return MessageType.AGENT_REASONING
|
|
178
|
+
elif "dim" in style_lower:
|
|
179
|
+
return MessageType.SYSTEM
|
|
180
|
+
|
|
181
|
+
# Infer from content patterns
|
|
182
|
+
content_lower = content.lower()
|
|
183
|
+
if any(word in content_lower for word in ["error", "failed", "exception"]):
|
|
184
|
+
return MessageType.ERROR
|
|
185
|
+
elif any(word in content_lower for word in ["warning", "warn"]):
|
|
186
|
+
return MessageType.WARNING
|
|
187
|
+
elif any(word in content_lower for word in ["success", "completed", "done"]):
|
|
188
|
+
return MessageType.SUCCESS
|
|
189
|
+
elif any(word in content_lower for word in ["tool", "command", "running"]):
|
|
190
|
+
return MessageType.TOOL_OUTPUT
|
|
191
|
+
|
|
192
|
+
return MessageType.INFO
|
|
193
|
+
|
|
194
|
+
# Additional methods to maintain Rich Console compatibility
|
|
195
|
+
def rule(self, title: str = "", *, align: str = "center", style: str = "rule.line"):
|
|
196
|
+
"""Print a horizontal rule."""
|
|
197
|
+
self.queue.emit_simple(
|
|
198
|
+
MessageType.SYSTEM,
|
|
199
|
+
f"─── {title} ───" if title else "─" * 40,
|
|
200
|
+
rule=True,
|
|
201
|
+
style=style,
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
def status(self, status: str, *, spinner: str = "dots"):
|
|
205
|
+
"""Show a status message (simplified)."""
|
|
206
|
+
self.queue.emit_simple(
|
|
207
|
+
MessageType.INFO, f"⏳ {status}", status=True, spinner=spinner
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
def input(self, prompt: str = "") -> str:
|
|
211
|
+
"""Get user input without spinner interference.
|
|
212
|
+
|
|
213
|
+
This method coordinates with the TUI to pause any running spinners
|
|
214
|
+
and properly display the user input prompt.
|
|
215
|
+
"""
|
|
216
|
+
# Set the global flag that we're awaiting user input
|
|
217
|
+
from code_puppy.tools.command_runner import set_awaiting_user_input
|
|
218
|
+
|
|
219
|
+
set_awaiting_user_input(True)
|
|
220
|
+
|
|
221
|
+
# Signal TUI to pause spinner and prepare for user input (legacy method)
|
|
222
|
+
try:
|
|
223
|
+
# Try to get the current TUI app instance and pause spinner
|
|
224
|
+
from textual.app import App
|
|
225
|
+
|
|
226
|
+
current_app = App.get_running_app()
|
|
227
|
+
if hasattr(current_app, "pause_spinner_for_input"):
|
|
228
|
+
current_app.pause_spinner_for_input()
|
|
229
|
+
except Exception:
|
|
230
|
+
# If we can't pause the spinner (not in TUI mode), continue anyway
|
|
231
|
+
pass
|
|
232
|
+
|
|
233
|
+
# Emit the prompt as a system message so it shows in the TUI chat
|
|
234
|
+
if prompt:
|
|
235
|
+
self.queue.emit_simple(MessageType.SYSTEM, prompt, requires_user_input=True)
|
|
236
|
+
|
|
237
|
+
# Create a new, isolated console instance specifically for input
|
|
238
|
+
# This bypasses any spinner or queue system interference
|
|
239
|
+
input_console = Console(file=__import__("sys").stderr, force_terminal=True)
|
|
240
|
+
|
|
241
|
+
# Clear any spinner artifacts and position cursor properly
|
|
242
|
+
if prompt:
|
|
243
|
+
input_console.print(prompt, end="", style="bold cyan")
|
|
244
|
+
|
|
245
|
+
# Use regular input() which will read from stdin
|
|
246
|
+
# Since we printed the prompt to stderr, this should work cleanly
|
|
247
|
+
try:
|
|
248
|
+
user_response = input()
|
|
249
|
+
|
|
250
|
+
# Show the user's response in the chat as well
|
|
251
|
+
if user_response:
|
|
252
|
+
self.queue.emit_simple(
|
|
253
|
+
MessageType.USER, f"User response: {user_response}"
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
return user_response
|
|
257
|
+
except (KeyboardInterrupt, EOFError):
|
|
258
|
+
# Handle interruption gracefully
|
|
259
|
+
input_console.print("\n[yellow]Input cancelled[/yellow]")
|
|
260
|
+
self.queue.emit_simple(MessageType.WARNING, "User input cancelled")
|
|
261
|
+
return ""
|
|
262
|
+
finally:
|
|
263
|
+
# Clear the global flag for awaiting user input
|
|
264
|
+
from code_puppy.tools.command_runner import set_awaiting_user_input
|
|
265
|
+
|
|
266
|
+
set_awaiting_user_input(False)
|
|
267
|
+
|
|
268
|
+
# Signal TUI to resume spinner if needed (legacy method)
|
|
269
|
+
try:
|
|
270
|
+
from textual.app import App
|
|
271
|
+
|
|
272
|
+
current_app = App.get_running_app()
|
|
273
|
+
if hasattr(current_app, "resume_spinner_after_input"):
|
|
274
|
+
current_app.resume_spinner_after_input()
|
|
275
|
+
except Exception:
|
|
276
|
+
# If we can't resume the spinner, continue anyway
|
|
277
|
+
pass
|
|
278
|
+
|
|
279
|
+
# File-like interface for compatibility
|
|
280
|
+
@property
|
|
281
|
+
def file(self):
|
|
282
|
+
"""Get the current file (for compatibility)."""
|
|
283
|
+
return self.fallback_console.file
|
|
284
|
+
|
|
285
|
+
@file.setter
|
|
286
|
+
def file(self, value):
|
|
287
|
+
"""Set the current file (for compatibility)."""
|
|
288
|
+
self.fallback_console.file = value
|
|
289
|
+
|
|
290
|
+
|
|
291
|
+
def get_queue_console(queue: Optional[MessageQueue] = None) -> QueueConsole:
|
|
292
|
+
"""Get a QueueConsole instance."""
|
|
293
|
+
return QueueConsole(queue or get_global_queue())
|