code-puppy 0.0.97__py3-none-any.whl → 0.0.118__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- code_puppy/__init__.py +2 -5
- code_puppy/__main__.py +10 -0
- code_puppy/agent.py +125 -40
- code_puppy/agent_prompts.py +30 -24
- code_puppy/callbacks.py +152 -0
- code_puppy/command_line/command_handler.py +359 -0
- code_puppy/command_line/load_context_completion.py +59 -0
- code_puppy/command_line/model_picker_completion.py +14 -21
- code_puppy/command_line/motd.py +44 -28
- code_puppy/command_line/prompt_toolkit_completion.py +42 -23
- code_puppy/config.py +266 -26
- code_puppy/http_utils.py +122 -0
- code_puppy/main.py +570 -383
- code_puppy/message_history_processor.py +195 -104
- code_puppy/messaging/__init__.py +46 -0
- code_puppy/messaging/message_queue.py +288 -0
- code_puppy/messaging/queue_console.py +293 -0
- code_puppy/messaging/renderers.py +305 -0
- code_puppy/messaging/spinner/__init__.py +55 -0
- code_puppy/messaging/spinner/console_spinner.py +200 -0
- code_puppy/messaging/spinner/spinner_base.py +66 -0
- code_puppy/messaging/spinner/textual_spinner.py +97 -0
- code_puppy/model_factory.py +73 -105
- code_puppy/plugins/__init__.py +32 -0
- code_puppy/reopenable_async_client.py +225 -0
- code_puppy/state_management.py +60 -21
- code_puppy/summarization_agent.py +56 -35
- code_puppy/token_utils.py +7 -9
- code_puppy/tools/__init__.py +1 -4
- code_puppy/tools/command_runner.py +187 -32
- code_puppy/tools/common.py +44 -35
- code_puppy/tools/file_modifications.py +335 -118
- code_puppy/tools/file_operations.py +368 -95
- code_puppy/tools/token_check.py +27 -11
- code_puppy/tools/tools_content.py +53 -0
- code_puppy/tui/__init__.py +10 -0
- code_puppy/tui/app.py +1050 -0
- code_puppy/tui/components/__init__.py +21 -0
- code_puppy/tui/components/chat_view.py +512 -0
- code_puppy/tui/components/command_history_modal.py +218 -0
- code_puppy/tui/components/copy_button.py +139 -0
- code_puppy/tui/components/custom_widgets.py +58 -0
- code_puppy/tui/components/input_area.py +167 -0
- code_puppy/tui/components/sidebar.py +309 -0
- code_puppy/tui/components/status_bar.py +182 -0
- code_puppy/tui/messages.py +27 -0
- code_puppy/tui/models/__init__.py +8 -0
- code_puppy/tui/models/chat_message.py +25 -0
- code_puppy/tui/models/command_history.py +89 -0
- code_puppy/tui/models/enums.py +24 -0
- code_puppy/tui/screens/__init__.py +13 -0
- code_puppy/tui/screens/help.py +130 -0
- code_puppy/tui/screens/settings.py +256 -0
- code_puppy/tui/screens/tools.py +74 -0
- code_puppy/tui/tests/__init__.py +1 -0
- code_puppy/tui/tests/test_chat_message.py +28 -0
- code_puppy/tui/tests/test_chat_view.py +88 -0
- code_puppy/tui/tests/test_command_history.py +89 -0
- code_puppy/tui/tests/test_copy_button.py +191 -0
- code_puppy/tui/tests/test_custom_widgets.py +27 -0
- code_puppy/tui/tests/test_disclaimer.py +27 -0
- code_puppy/tui/tests/test_enums.py +15 -0
- code_puppy/tui/tests/test_file_browser.py +60 -0
- code_puppy/tui/tests/test_help.py +38 -0
- code_puppy/tui/tests/test_history_file_reader.py +107 -0
- code_puppy/tui/tests/test_input_area.py +33 -0
- code_puppy/tui/tests/test_settings.py +44 -0
- code_puppy/tui/tests/test_sidebar.py +33 -0
- code_puppy/tui/tests/test_sidebar_history.py +153 -0
- code_puppy/tui/tests/test_sidebar_history_navigation.py +132 -0
- code_puppy/tui/tests/test_status_bar.py +54 -0
- code_puppy/tui/tests/test_timestamped_history.py +52 -0
- code_puppy/tui/tests/test_tools.py +82 -0
- code_puppy/version_checker.py +26 -3
- {code_puppy-0.0.97.dist-info → code_puppy-0.0.118.dist-info}/METADATA +9 -2
- code_puppy-0.0.118.dist-info/RECORD +86 -0
- code_puppy-0.0.97.dist-info/RECORD +0 -32
- {code_puppy-0.0.97.data → code_puppy-0.0.118.data}/data/code_puppy/models.json +0 -0
- {code_puppy-0.0.97.dist-info → code_puppy-0.0.118.dist-info}/WHEEL +0 -0
- {code_puppy-0.0.97.dist-info → code_puppy-0.0.118.dist-info}/entry_points.txt +0 -0
- {code_puppy-0.0.97.dist-info → code_puppy-0.0.118.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,58 +1,35 @@
|
|
|
1
1
|
import json
|
|
2
|
-
from typing import List, Set
|
|
3
|
-
import os
|
|
4
|
-
from pathlib import Path
|
|
2
|
+
from typing import Any, List, Set, Tuple
|
|
5
3
|
|
|
6
4
|
import pydantic
|
|
7
|
-
from pydantic_ai.messages import
|
|
8
|
-
ModelMessage,
|
|
9
|
-
TextPart,
|
|
10
|
-
ModelResponse,
|
|
11
|
-
ModelRequest,
|
|
12
|
-
ToolCallPart,
|
|
13
|
-
)
|
|
5
|
+
from pydantic_ai.messages import ModelMessage, ModelRequest, TextPart, ToolCallPart
|
|
14
6
|
|
|
15
|
-
from code_puppy.
|
|
7
|
+
from code_puppy.config import (
|
|
8
|
+
get_model_name,
|
|
9
|
+
get_protected_token_count,
|
|
10
|
+
get_summarization_threshold,
|
|
11
|
+
)
|
|
12
|
+
from code_puppy.messaging import emit_error, emit_info, emit_warning
|
|
16
13
|
from code_puppy.model_factory import ModelFactory
|
|
17
|
-
from code_puppy.
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
except ImportError:
|
|
26
|
-
STATUS_DISPLAY_AVAILABLE = False
|
|
27
|
-
|
|
28
|
-
# Import summarization agent
|
|
29
|
-
try:
|
|
30
|
-
from code_puppy.summarization_agent import (
|
|
31
|
-
get_summarization_agent as _get_summarization_agent,
|
|
32
|
-
)
|
|
33
|
-
|
|
34
|
-
SUMMARIZATION_AVAILABLE = True
|
|
35
|
-
|
|
36
|
-
# Make the function available in this module's namespace for mocking
|
|
37
|
-
def get_summarization_agent():
|
|
38
|
-
return _get_summarization_agent()
|
|
39
|
-
|
|
40
|
-
except ImportError:
|
|
41
|
-
SUMMARIZATION_AVAILABLE = False
|
|
42
|
-
console.print(
|
|
43
|
-
"[yellow]Warning: Summarization agent not available. Message history will be truncated instead of summarized.[/yellow]"
|
|
44
|
-
)
|
|
14
|
+
from code_puppy.state_management import (
|
|
15
|
+
add_compacted_message_hash,
|
|
16
|
+
get_compacted_message_hashes,
|
|
17
|
+
get_message_history,
|
|
18
|
+
hash_message,
|
|
19
|
+
set_message_history,
|
|
20
|
+
)
|
|
21
|
+
from code_puppy.summarization_agent import run_summarization_sync
|
|
45
22
|
|
|
46
|
-
|
|
47
|
-
|
|
23
|
+
# Protected tokens are now configurable via get_protected_token_count()
|
|
24
|
+
# Default is 50000 but can be customized in ~/.code_puppy/puppy.cfg
|
|
48
25
|
|
|
49
26
|
|
|
50
|
-
|
|
51
|
-
def get_tokenizer_for_model(model_name: str):
|
|
27
|
+
def estimate_token_count(text: str) -> int:
|
|
52
28
|
"""
|
|
53
|
-
|
|
29
|
+
Simple token estimation using len(message) - 4.
|
|
30
|
+
This replaces tiktoken with a much simpler approach.
|
|
54
31
|
"""
|
|
55
|
-
return
|
|
32
|
+
return max(1, len(text) - 4)
|
|
56
33
|
|
|
57
34
|
|
|
58
35
|
def stringify_message_part(part) -> str:
|
|
@@ -97,53 +74,123 @@ def stringify_message_part(part) -> str:
|
|
|
97
74
|
|
|
98
75
|
def estimate_tokens_for_message(message: ModelMessage) -> int:
|
|
99
76
|
"""
|
|
100
|
-
Estimate the number of tokens in a message using
|
|
101
|
-
|
|
77
|
+
Estimate the number of tokens in a message using len(message) - 4.
|
|
78
|
+
Simple and fast replacement for tiktoken.
|
|
102
79
|
"""
|
|
103
80
|
total_tokens = 0
|
|
104
81
|
|
|
105
82
|
for part in message.parts:
|
|
106
83
|
part_str = stringify_message_part(part)
|
|
107
84
|
if part_str:
|
|
108
|
-
total_tokens +=
|
|
85
|
+
total_tokens += estimate_token_count(part_str)
|
|
109
86
|
|
|
110
87
|
return max(1, total_tokens)
|
|
111
88
|
|
|
112
89
|
|
|
113
|
-
def
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
90
|
+
def split_messages_for_protected_summarization(
|
|
91
|
+
messages: List[ModelMessage],
|
|
92
|
+
) -> Tuple[List[ModelMessage], List[ModelMessage]]:
|
|
93
|
+
"""
|
|
94
|
+
Split messages into two groups: messages to summarize and protected recent messages.
|
|
95
|
+
|
|
96
|
+
Returns:
|
|
97
|
+
Tuple of (messages_to_summarize, protected_messages)
|
|
98
|
+
|
|
99
|
+
The protected_messages are the most recent messages that total up to the configured protected token count.
|
|
100
|
+
The system message (first message) is always protected.
|
|
101
|
+
All other messages that don't fit in the protected zone will be summarized.
|
|
102
|
+
"""
|
|
103
|
+
if len(messages) <= 1: # Just system message or empty
|
|
104
|
+
return [], messages
|
|
105
|
+
|
|
106
|
+
# Always protect the system message (first message)
|
|
107
|
+
system_message = messages[0]
|
|
108
|
+
system_tokens = estimate_tokens_for_message(system_message)
|
|
109
|
+
|
|
110
|
+
if len(messages) == 1:
|
|
111
|
+
return [], messages
|
|
112
|
+
|
|
113
|
+
# Get the configured protected token count
|
|
114
|
+
protected_tokens_limit = get_protected_token_count()
|
|
115
|
+
|
|
116
|
+
# Calculate tokens for messages from most recent backwards (excluding system message)
|
|
117
|
+
protected_messages = []
|
|
118
|
+
protected_token_count = system_tokens # Start with system message tokens
|
|
119
|
+
|
|
120
|
+
# Go backwards through non-system messages to find protected zone
|
|
121
|
+
for i in range(len(messages) - 1, 0, -1): # Stop at 1, not 0 (skip system message)
|
|
122
|
+
message = messages[i]
|
|
123
|
+
message_tokens = estimate_tokens_for_message(message)
|
|
124
|
+
|
|
125
|
+
# If adding this message would exceed protected tokens, stop here
|
|
126
|
+
if protected_token_count + message_tokens > protected_tokens_limit:
|
|
127
|
+
break
|
|
128
|
+
|
|
129
|
+
protected_messages.insert(0, message) # Insert at beginning to maintain order
|
|
130
|
+
protected_token_count += message_tokens
|
|
131
|
+
|
|
132
|
+
# Add system message at the beginning of protected messages
|
|
133
|
+
protected_messages.insert(0, system_message)
|
|
134
|
+
|
|
135
|
+
# Messages to summarize are everything between system message and protected zone
|
|
136
|
+
protected_start_idx = (
|
|
137
|
+
len(messages) - len(protected_messages) + 1
|
|
138
|
+
) # +1 because system message is protected
|
|
139
|
+
messages_to_summarize = messages[
|
|
140
|
+
1:protected_start_idx
|
|
141
|
+
] # Start from 1 to skip system message
|
|
142
|
+
|
|
143
|
+
emit_info(
|
|
144
|
+
f"🔒 Protecting {len(protected_messages)} recent messages ({protected_token_count} tokens, limit: {protected_tokens_limit})"
|
|
145
|
+
)
|
|
146
|
+
emit_info(f"📝 Summarizing {len(messages_to_summarize)} older messages")
|
|
147
|
+
|
|
148
|
+
return messages_to_summarize, protected_messages
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def summarize_messages(
|
|
152
|
+
messages: List[ModelMessage], with_protection=True
|
|
153
|
+
) -> Tuple[List[ModelMessage], List[ModelMessage]]:
|
|
154
|
+
"""
|
|
155
|
+
Summarize messages while protecting recent messages up to PROTECTED_TOKENS.
|
|
156
|
+
|
|
157
|
+
Returns:
|
|
158
|
+
List of messages: [system_message, summary_of_old_messages, ...protected_recent_messages]
|
|
159
|
+
"""
|
|
160
|
+
messages_to_summarize, protected_messages = messages, []
|
|
161
|
+
if with_protection:
|
|
162
|
+
messages_to_summarize, protected_messages = (
|
|
163
|
+
split_messages_for_protected_summarization(messages)
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
if not messages_to_summarize:
|
|
167
|
+
# Nothing to summarize, return protected messages as-is
|
|
168
|
+
return protected_messages, messages_to_summarize
|
|
169
|
+
|
|
120
170
|
instructions = (
|
|
121
|
-
"
|
|
171
|
+
"The input will be a log of Agentic AI steps that have been taken"
|
|
122
172
|
" as well as user queries, etc. Summarize the contents of these steps."
|
|
123
173
|
" The high level details should remain but the bulk of the content from tool-call"
|
|
124
174
|
" responses should be compacted and summarized. For example if you see a tool-call"
|
|
125
175
|
" reading a file, and the file contents are large, then in your summary you might just"
|
|
126
176
|
" write: * used read_file on space_invaders.cpp - contents removed."
|
|
127
177
|
"\n Make sure your result is a bulleted list of all steps and interactions."
|
|
178
|
+
"\n\nNOTE: This summary represents older conversation history. Recent messages are preserved separately."
|
|
128
179
|
)
|
|
180
|
+
|
|
129
181
|
try:
|
|
130
|
-
|
|
131
|
-
|
|
182
|
+
new_messages = run_summarization_sync(
|
|
183
|
+
instructions, message_history=messages_to_summarize
|
|
184
|
+
)
|
|
185
|
+
# Return: [system_message, summary, ...protected_recent_messages]
|
|
186
|
+
result = new_messages + protected_messages[1:]
|
|
187
|
+
return prune_interrupted_tool_calls(result), messages_to_summarize
|
|
132
188
|
except Exception as e:
|
|
133
|
-
|
|
134
|
-
return
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
# New: single-message summarization helper used by tests
|
|
138
|
-
# - If the message has a ToolCallPart, return original message (no summarization)
|
|
139
|
-
# - If the message has system/instructions, return original message
|
|
140
|
-
# - Otherwise, summarize and return a new ModelRequest with the summarized content
|
|
141
|
-
# - On any error, return the original message
|
|
189
|
+
emit_error(f"Summarization failed during compaction: {e}")
|
|
190
|
+
return messages, messages_to_summarize # Return original messages on failure
|
|
142
191
|
|
|
143
192
|
|
|
144
193
|
def summarize_message(message: ModelMessage) -> ModelMessage:
|
|
145
|
-
if not SUMMARIZATION_AVAILABLE:
|
|
146
|
-
return message
|
|
147
194
|
try:
|
|
148
195
|
# If the message looks like a system/instructions message, skip summarization
|
|
149
196
|
instructions = getattr(message, "instructions", None)
|
|
@@ -164,12 +211,11 @@ def summarize_message(message: ModelMessage) -> ModelMessage:
|
|
|
164
211
|
prompt = "Please summarize the following user message:\n" + "\n".join(
|
|
165
212
|
content_bits
|
|
166
213
|
)
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
summarized = ModelRequest([TextPart(result.output)])
|
|
214
|
+
output_text = run_summarization_sync(prompt)
|
|
215
|
+
summarized = ModelRequest([TextPart(output_text)])
|
|
170
216
|
return summarized
|
|
171
217
|
except Exception as e:
|
|
172
|
-
|
|
218
|
+
emit_error(f"Summarization failed: {e}")
|
|
173
219
|
return message
|
|
174
220
|
|
|
175
221
|
|
|
@@ -177,14 +223,7 @@ def get_model_context_length() -> int:
|
|
|
177
223
|
"""
|
|
178
224
|
Get the context length for the currently configured model from models.json
|
|
179
225
|
"""
|
|
180
|
-
|
|
181
|
-
models_path = os.environ.get("MODELS_JSON_PATH")
|
|
182
|
-
if not models_path:
|
|
183
|
-
models_path = Path(__file__).parent / "models.json"
|
|
184
|
-
else:
|
|
185
|
-
models_path = Path(models_path)
|
|
186
|
-
|
|
187
|
-
model_configs = ModelFactory.load_config(str(models_path))
|
|
226
|
+
model_configs = ModelFactory.load_config()
|
|
188
227
|
model_name = get_model_name()
|
|
189
228
|
|
|
190
229
|
# Get context length from model config
|
|
@@ -241,8 +280,8 @@ def prune_interrupted_tool_calls(messages: List[ModelMessage]) -> List[ModelMess
|
|
|
241
280
|
pruned.append(msg)
|
|
242
281
|
|
|
243
282
|
if dropped_count:
|
|
244
|
-
|
|
245
|
-
f"
|
|
283
|
+
emit_warning(
|
|
284
|
+
f"Pruned {dropped_count} message(s) with mismatched tool_call_id pairs"
|
|
246
285
|
)
|
|
247
286
|
return pruned
|
|
248
287
|
|
|
@@ -255,31 +294,83 @@ def message_history_processor(messages: List[ModelMessage]) -> List[ModelMessage
|
|
|
255
294
|
|
|
256
295
|
proportion_used = total_current_tokens / model_max
|
|
257
296
|
|
|
258
|
-
#
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
297
|
+
# Check if we're in TUI mode and can update the status bar
|
|
298
|
+
from code_puppy.state_management import get_tui_app_instance, is_tui_mode
|
|
299
|
+
|
|
300
|
+
if is_tui_mode():
|
|
301
|
+
tui_app = get_tui_app_instance()
|
|
302
|
+
if tui_app:
|
|
303
|
+
try:
|
|
304
|
+
# Update the status bar instead of emitting a chat message
|
|
305
|
+
status_bar = tui_app.query_one("StatusBar")
|
|
306
|
+
status_bar.update_token_info(
|
|
307
|
+
total_current_tokens, model_max, proportion_used
|
|
308
|
+
)
|
|
309
|
+
except Exception:
|
|
310
|
+
# Fallback to chat message if status bar update fails
|
|
311
|
+
emit_info(
|
|
312
|
+
f"\n[bold white on blue] Tokens in context: {total_current_tokens}, total model capacity: {model_max}, proportion used: {proportion_used:.2f} [/bold white on blue] \n",
|
|
313
|
+
message_group="token_context_status",
|
|
314
|
+
)
|
|
315
|
+
else:
|
|
316
|
+
# Fallback if no TUI app instance
|
|
317
|
+
emit_info(
|
|
318
|
+
f"\n[bold white on blue] Tokens in context: {total_current_tokens}, total model capacity: {model_max}, proportion used: {proportion_used:.2f} [/bold white on blue] \n",
|
|
319
|
+
message_group="token_context_status",
|
|
320
|
+
)
|
|
321
|
+
else:
|
|
322
|
+
# Non-TUI mode - emit to console as before
|
|
323
|
+
emit_info(
|
|
324
|
+
f"\n[bold white on blue] Tokens in context: {total_current_tokens}, total model capacity: {model_max}, proportion used: {proportion_used:.2f} [/bold white on blue] \n"
|
|
325
|
+
)
|
|
273
326
|
|
|
274
|
-
#
|
|
275
|
-
|
|
327
|
+
# Get the configured summarization threshold
|
|
328
|
+
summarization_threshold = get_summarization_threshold()
|
|
276
329
|
|
|
277
|
-
if proportion_used >
|
|
278
|
-
|
|
279
|
-
result_messages = [messages[0], summary]
|
|
330
|
+
if proportion_used > summarization_threshold:
|
|
331
|
+
result_messages, summarized_messages = summarize_messages(messages)
|
|
280
332
|
final_token_count = sum(
|
|
281
333
|
estimate_tokens_for_message(msg) for msg in result_messages
|
|
282
334
|
)
|
|
283
|
-
|
|
335
|
+
# Update status bar with final token count if in TUI mode
|
|
336
|
+
if is_tui_mode():
|
|
337
|
+
tui_app = get_tui_app_instance()
|
|
338
|
+
if tui_app:
|
|
339
|
+
try:
|
|
340
|
+
status_bar = tui_app.query_one("StatusBar")
|
|
341
|
+
status_bar.update_token_info(
|
|
342
|
+
final_token_count, model_max, final_token_count / model_max
|
|
343
|
+
)
|
|
344
|
+
except Exception:
|
|
345
|
+
emit_info(
|
|
346
|
+
f"Final token count after processing: {final_token_count}",
|
|
347
|
+
message_group="token_context_status",
|
|
348
|
+
)
|
|
349
|
+
else:
|
|
350
|
+
emit_info(
|
|
351
|
+
f"Final token count after processing: {final_token_count}",
|
|
352
|
+
message_group="token_context_status",
|
|
353
|
+
)
|
|
354
|
+
else:
|
|
355
|
+
emit_info(f"Final token count after processing: {final_token_count}")
|
|
356
|
+
set_message_history(result_messages)
|
|
357
|
+
for m in summarized_messages:
|
|
358
|
+
add_compacted_message_hash(hash_message(m))
|
|
284
359
|
return result_messages
|
|
285
360
|
return messages
|
|
361
|
+
|
|
362
|
+
|
|
363
|
+
def message_history_accumulator(messages: List[Any]):
|
|
364
|
+
_message_history = get_message_history()
|
|
365
|
+
message_history_hashes = set([hash_message(m) for m in _message_history])
|
|
366
|
+
for msg in messages:
|
|
367
|
+
if (
|
|
368
|
+
hash_message(msg) not in message_history_hashes
|
|
369
|
+
and hash_message(msg) not in get_compacted_message_hashes()
|
|
370
|
+
):
|
|
371
|
+
_message_history.append(msg)
|
|
372
|
+
|
|
373
|
+
# Apply message history trimming using the main processor
|
|
374
|
+
# This ensures we maintain global state while still managing context limits
|
|
375
|
+
message_history_processor(_message_history)
|
|
376
|
+
return get_message_history()
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
from .message_queue import (
|
|
2
|
+
MessageQueue,
|
|
3
|
+
MessageType,
|
|
4
|
+
UIMessage,
|
|
5
|
+
emit_agent_reasoning,
|
|
6
|
+
emit_agent_response,
|
|
7
|
+
emit_command_output,
|
|
8
|
+
emit_divider,
|
|
9
|
+
emit_error,
|
|
10
|
+
emit_info,
|
|
11
|
+
emit_message,
|
|
12
|
+
emit_planned_next_steps,
|
|
13
|
+
emit_success,
|
|
14
|
+
emit_system_message,
|
|
15
|
+
emit_tool_output,
|
|
16
|
+
emit_warning,
|
|
17
|
+
get_buffered_startup_messages,
|
|
18
|
+
get_global_queue,
|
|
19
|
+
)
|
|
20
|
+
from .queue_console import QueueConsole, get_queue_console
|
|
21
|
+
from .renderers import InteractiveRenderer, SynchronousInteractiveRenderer, TUIRenderer
|
|
22
|
+
|
|
23
|
+
__all__ = [
|
|
24
|
+
"MessageQueue",
|
|
25
|
+
"MessageType",
|
|
26
|
+
"UIMessage",
|
|
27
|
+
"get_global_queue",
|
|
28
|
+
"emit_message",
|
|
29
|
+
"emit_info",
|
|
30
|
+
"emit_success",
|
|
31
|
+
"emit_warning",
|
|
32
|
+
"emit_divider",
|
|
33
|
+
"emit_error",
|
|
34
|
+
"emit_tool_output",
|
|
35
|
+
"emit_command_output",
|
|
36
|
+
"emit_agent_reasoning",
|
|
37
|
+
"emit_planned_next_steps",
|
|
38
|
+
"emit_agent_response",
|
|
39
|
+
"emit_system_message",
|
|
40
|
+
"get_buffered_startup_messages",
|
|
41
|
+
"InteractiveRenderer",
|
|
42
|
+
"TUIRenderer",
|
|
43
|
+
"SynchronousInteractiveRenderer",
|
|
44
|
+
"QueueConsole",
|
|
45
|
+
"get_queue_console",
|
|
46
|
+
]
|